Merge branch 'develop' of https://github.com/coreemu/core into develop

This commit is contained in:
Blake Harnden 2019-10-21 09:37:26 -07:00
commit 104ac4cdc7
18 changed files with 124 additions and 188 deletions

View file

@ -881,7 +881,7 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
session = self.get_session(request.session_id, context)
node = self.get_node(session, request.node_id, context)
try:
output = node.node_net_cmd(request.command)
output = node.cmd(request.command)
except CoreCommandError as e:
output = e.stderr
return core_pb2.NodeCommandResponse(output=output)

View file

@ -885,7 +885,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
status = e.returncode
else:
try:
res = node.node_net_cmd(command)
res = node.cmd(command)
status = 0
except CoreCommandError as e:
res = e.stderr
@ -911,7 +911,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
if message.flags & MessageFlags.LOCAL.value:
utils.mute_detach(command)
else:
node.node_net_cmd(command, wait=False)
node.cmd(command, wait=False)
except CoreError:
logging.exception("error getting object: %s", node_num)
# XXX wait and queue this message to try again later

View file

@ -583,7 +583,7 @@ class EmaneManager(ModelManager):
log_file = os.path.join(path, f"emane{n}.log")
platform_xml = os.path.join(path, f"platform{n}.xml")
args = f"{emanecmd} -f {log_file} {platform_xml}"
output = node.node_net_cmd(args)
output = node.cmd(args)
logging.info("node(%s) emane daemon running: %s", node.name, args)
logging.info("node(%s) emane daemon output: %s", node.name, output)
@ -613,7 +613,7 @@ class EmaneManager(ModelManager):
continue
if node.up:
node.node_net_cmd(kill_emaned, wait=False)
node.cmd(kill_emaned, wait=False)
# TODO: RJ45 node
if stop_emane_on_host:
@ -813,7 +813,7 @@ class EmaneManager(ModelManager):
"""
args = "pkill -0 -x emane"
try:
node.node_net_cmd(args)
node.cmd(args)
result = True
except CoreCommandError:
result = False

View file

@ -1914,4 +1914,4 @@ class Session(object):
utils.mute_detach(data)
else:
node = self.get_node(node_id)
node.node_net_cmd(data, wait=False)
node.cmd(data, wait=False)

View file

@ -61,7 +61,7 @@ class NodeBase(object):
self.position = Position()
use_ovs = session.options.get_config("ovs") == "True"
self.net_client = get_net_client(use_ovs, self.net_cmd)
self.net_client = get_net_client(use_ovs, self.host_cmd)
def startup(self):
"""
@ -79,10 +79,9 @@ class NodeBase(object):
"""
raise NotImplementedError
def net_cmd(self, args, env=None, cwd=None, wait=True):
def host_cmd(self, args, env=None, cwd=None, wait=True):
"""
Runs a command that is used to configure and setup the network on the host
system.
Runs a command on the host system or distributed server.
:param str args: command to run
:param dict env: environment to run command with
@ -265,7 +264,7 @@ class CoreNodeBase(NodeBase):
"""
if self.nodedir is None:
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
self.net_cmd(f"mkdir -p {self.nodedir}")
self.host_cmd(f"mkdir -p {self.nodedir}")
self.tmpnodedir = True
else:
self.tmpnodedir = False
@ -281,7 +280,7 @@ class CoreNodeBase(NodeBase):
return
if self.tmpnodedir:
self.net_cmd(f"rm -rf {self.nodedir}")
self.host_cmd(f"rm -rf {self.nodedir}")
def addnetif(self, netif, ifindex):
"""
@ -383,10 +382,9 @@ class CoreNodeBase(NodeBase):
return common
def node_net_cmd(self, args, wait=True):
def cmd(self, args, wait=True):
"""
Runs a command that is used to configure and setup the network within a
node.
Runs a command within a node container.
:param str args: command to run
:param bool wait: True to wait for status, False otherwise
@ -462,7 +460,7 @@ class CoreNode(CoreNodeBase):
:param bool use_ovs: True for OVS bridges, False for Linux bridges
:return:node network client
"""
return get_net_client(use_ovs, self.node_net_cmd)
return get_net_client(use_ovs, self.cmd)
def alive(self):
"""
@ -472,7 +470,7 @@ class CoreNode(CoreNodeBase):
:rtype: bool
"""
try:
self.net_cmd(f"kill -0 {self.pid}")
self.host_cmd(f"kill -0 {self.pid}")
except CoreCommandError:
return False
@ -502,7 +500,7 @@ class CoreNode(CoreNodeBase):
env["NODE_NUMBER"] = str(self.id)
env["NODE_NAME"] = str(self.name)
output = self.net_cmd(vnoded, env=env)
output = self.host_cmd(vnoded, env=env)
self.pid = int(output)
logging.debug("node(%s) pid: %s", self.name, self.pid)
@ -546,13 +544,13 @@ class CoreNode(CoreNodeBase):
# kill node process if present
try:
self.net_cmd(f"kill -9 {self.pid}")
self.host_cmd(f"kill -9 {self.pid}")
except CoreCommandError:
logging.exception("error killing process")
# remove node directory if present
try:
self.net_cmd(f"rm -rf {self.ctrlchnlname}")
self.host_cmd(f"rm -rf {self.ctrlchnlname}")
except CoreCommandError:
logging.exception("error removing node directory")
@ -565,7 +563,7 @@ class CoreNode(CoreNodeBase):
finally:
self.rmnodedir()
def node_net_cmd(self, args, wait=True):
def cmd(self, args, wait=True):
"""
Runs a command that is used to configure and setup the network within a
node.
@ -607,7 +605,7 @@ class CoreNode(CoreNodeBase):
hostpath = os.path.join(
self.nodedir, os.path.normpath(path).strip("/").replace("/", ".")
)
self.net_cmd(f"mkdir -p {hostpath}")
self.host_cmd(f"mkdir -p {hostpath}")
self.mount(hostpath, path)
def mount(self, source, target):
@ -621,8 +619,8 @@ class CoreNode(CoreNodeBase):
"""
source = os.path.abspath(source)
logging.debug("node(%s) mounting: %s at %s", self.name, source, target)
self.node_net_cmd(f"mkdir -p {target}")
self.node_net_cmd(f"{MOUNT_BIN} -n --bind {source} {target}")
self.cmd(f"mkdir -p {target}")
self.cmd(f"{MOUNT_BIN} -n --bind {source} {target}")
self._mounts.append((source, target))
def newifindex(self):
@ -846,7 +844,7 @@ class CoreNode(CoreNodeBase):
self.client.check_cmd(f"mv {srcname} {filename}")
self.client.check_cmd("sync")
else:
self.net_cmd(f"mkdir -p {directory}")
self.host_cmd(f"mkdir -p {directory}")
self.server.remote_put(srcname, filename)
def hostfilename(self, filename):
@ -883,9 +881,9 @@ class CoreNode(CoreNodeBase):
open_file.write(contents)
os.chmod(open_file.name, mode)
else:
self.net_cmd(f"mkdir -m {0o755:o} -p {dirname}")
self.host_cmd(f"mkdir -m {0o755:o} -p {dirname}")
self.server.remote_put_temp(hostfilename, contents)
self.net_cmd(f"chmod {mode:o} {hostfilename}")
self.host_cmd(f"chmod {mode:o} {hostfilename}")
logging.debug(
"node(%s) added file: %s; mode: 0%o", self.name, hostfilename, mode
)
@ -906,7 +904,7 @@ class CoreNode(CoreNodeBase):
else:
self.server.remote_put(srcfilename, hostfilename)
if mode is not None:
self.net_cmd(f"chmod {mode:o} {hostfilename}")
self.host_cmd(f"chmod {mode:o} {hostfilename}")
logging.info(
"node(%s) copied file: %s; mode: %s", self.name, hostfilename, mode
)

View file

@ -131,7 +131,7 @@ class DockerNode(CoreNode):
if self.up:
raise ValueError("starting a node that is already up")
self.makenodedir()
self.client = DockerClient(self.name, self.image, self.net_cmd)
self.client = DockerClient(self.name, self.image, self.host_cmd)
self.pid = self.client.create_container()
self.up = True
@ -176,7 +176,7 @@ class DockerNode(CoreNode):
"""
logging.debug("creating node dir: %s", path)
args = f"mkdir -p {path}"
self.node_net_cmd(args)
self.cmd(args)
def mount(self, source, target):
"""
@ -206,13 +206,13 @@ class DockerNode(CoreNode):
temp.close()
if directory:
self.node_net_cmd(f"mkdir -m {0o755:o} -p {directory}")
self.cmd(f"mkdir -m {0o755:o} -p {directory}")
if self.server is not None:
self.server.remote_put(temp.name, temp.name)
self.client.copy_file(temp.name, filename)
self.node_net_cmd(f"chmod {mode:o} {filename}")
self.cmd(f"chmod {mode:o} {filename}")
if self.server is not None:
self.net_cmd(f"rm -f {temp.name}")
self.host_cmd(f"rm -f {temp.name}")
os.unlink(temp.name)
logging.debug(
"node(%s) added file: %s; mode: 0%o", self.name, filename, mode
@ -232,7 +232,7 @@ class DockerNode(CoreNode):
"node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode
)
directory = os.path.dirname(filename)
self.node_net_cmd(f"mkdir -p {directory}")
self.cmd(f"mkdir -p {directory}")
if self.server is None:
source = srcfilename
@ -242,4 +242,4 @@ class DockerNode(CoreNode):
self.server.remote_put(source, temp.name)
self.client.copy_file(source, filename)
self.node_net_cmd(f"chmod {mode:o} {filename}")
self.cmd(f"chmod {mode:o} {filename}")

View file

@ -17,7 +17,7 @@ class CoreInterface(object):
def __init__(self, session, node, name, mtu, server=None):
"""
Creates a PyCoreNetIf instance.
Creates a CoreInterface instance.
:param core.emulator.session.Session session: core session instance
:param core.nodes.base.CoreNode node: node for interface
@ -46,11 +46,11 @@ class CoreInterface(object):
self.flow_id = None
self.server = server
use_ovs = session.options.get_config("ovs") == "True"
self.net_client = get_net_client(use_ovs, self.net_cmd)
self.net_client = get_net_client(use_ovs, self.host_cmd)
def net_cmd(self, args, env=None, cwd=None, wait=True):
def host_cmd(self, args, env=None, cwd=None, wait=True):
"""
Runs a command on the host system or distributed servers.
Runs a command on the host system or distributed server.
:param str args: command to run
:param dict env: environment to run command with

View file

@ -112,7 +112,7 @@ class LxcNode(CoreNode):
if self.up:
raise ValueError("starting a node that is already up")
self.makenodedir()
self.client = LxdClient(self.name, self.image, self.net_cmd)
self.client = LxdClient(self.name, self.image, self.host_cmd)
self.pid = self.client.create_container()
self.up = True
@ -149,7 +149,7 @@ class LxcNode(CoreNode):
"""
logging.info("creating node dir: %s", path)
args = f"mkdir -p {path}"
return self.node_net_cmd(args)
return self.cmd(args)
def mount(self, source, target):
"""
@ -180,13 +180,13 @@ class LxcNode(CoreNode):
temp.close()
if directory:
self.node_net_cmd(f"mkdir -m {0o755:o} -p {directory}")
self.cmd(f"mkdir -m {0o755:o} -p {directory}")
if self.server is not None:
self.server.remote_put(temp.name, temp.name)
self.client.copy_file(temp.name, filename)
self.node_net_cmd(f"chmod {mode:o} {filename}")
self.cmd(f"chmod {mode:o} {filename}")
if self.server is not None:
self.net_cmd(f"rm -f {temp.name}")
self.host_cmd(f"rm -f {temp.name}")
os.unlink(temp.name)
logging.debug("node(%s) added file: %s; mode: 0%o", self.name, filename, mode)
@ -204,7 +204,7 @@ class LxcNode(CoreNode):
"node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode
)
directory = os.path.dirname(filename)
self.node_net_cmd(f"mkdir -p {directory}")
self.cmd(f"mkdir -p {directory}")
if self.server is None:
source = srcfilename
@ -214,7 +214,7 @@ class LxcNode(CoreNode):
self.server.remote_put(source, temp.name)
self.client.copy_file(source, filename)
self.node_net_cmd(f"chmod {mode:o} {filename}")
self.cmd(f"chmod {mode:o} {filename}")
def addnetif(self, netif, ifindex):
super(LxcNode, self).addnetif(netif, ifindex)

View file

@ -162,20 +162,20 @@ class EbtablesQueue(object):
"""
# save kernel ebtables snapshot to a file
args = self.ebatomiccmd("--atomic-save")
wlan.net_cmd(args)
wlan.host_cmd(args)
# modify the table file using queued ebtables commands
for c in self.cmds:
args = self.ebatomiccmd(c)
wlan.net_cmd(args)
wlan.host_cmd(args)
self.cmds = []
# commit the table file to the kernel
args = self.ebatomiccmd("--atomic-commit")
wlan.net_cmd(args)
wlan.host_cmd(args)
try:
wlan.net_cmd(f"rm -f {self.atomic_file}")
wlan.host_cmd(f"rm -f {self.atomic_file}")
except CoreCommandError:
logging.exception("error removing atomic file: %s", self.atomic_file)
@ -270,7 +270,7 @@ class CoreNetwork(CoreNetworkBase):
self.startup()
ebq.startupdateloop(self)
def net_cmd(self, args, env=None, cwd=None, wait=True):
def host_cmd(self, args, env=None, cwd=None, wait=True):
"""
Runs a command that is used to configure and setup the network on the host
system and all configured distributed servers.
@ -302,7 +302,7 @@ class CoreNetwork(CoreNetworkBase):
f"{EBTABLES_BIN} -N {self.brname} -P {self.policy}",
f"{EBTABLES_BIN} -A FORWARD --logical-in {self.brname} -j {self.brname}",
]
ebtablescmds(self.net_cmd, cmds)
ebtablescmds(self.host_cmd, cmds)
self.up = True
@ -323,7 +323,7 @@ class CoreNetwork(CoreNetworkBase):
f"{EBTABLES_BIN} -D FORWARD --logical-in {self.brname} -j {self.brname}",
f"{EBTABLES_BIN} -X {self.brname}",
]
ebtablescmds(self.net_cmd, cmds)
ebtablescmds(self.host_cmd, cmds)
except CoreCommandError:
logging.exception("error during shutdown")
@ -462,13 +462,13 @@ class CoreNetwork(CoreNetworkBase):
if bw > 0:
if self.up:
cmd = f"{tc} {parent} handle 1: {tbf}"
netif.net_cmd(cmd)
netif.host_cmd(cmd)
netif.setparam("has_tbf", True)
changed = True
elif netif.getparam("has_tbf") and bw <= 0:
if self.up:
cmd = f"{TC_BIN} qdisc delete dev {devname} {parent}"
netif.net_cmd(cmd)
netif.host_cmd(cmd)
netif.setparam("has_tbf", False)
# removing the parent removes the child
netif.setparam("has_netem", False)
@ -510,14 +510,14 @@ class CoreNetwork(CoreNetworkBase):
return
if self.up:
cmd = f"{TC_BIN} qdisc delete dev {devname} {parent} handle 10:"
netif.net_cmd(cmd)
netif.host_cmd(cmd)
netif.setparam("has_netem", False)
elif len(netem) > 1:
if self.up:
cmd = (
f"{TC_BIN} qdisc replace dev {devname} {parent} handle 10: {netem}"
)
netif.net_cmd(cmd)
netif.host_cmd(cmd)
netif.setparam("has_netem", True)
def linknet(self, net):
@ -802,7 +802,7 @@ class CtrlNet(CoreNetwork):
self.brname,
self.updown_script,
)
self.net_cmd(f"{self.updown_script} {self.brname} startup")
self.host_cmd(f"{self.updown_script} {self.brname} startup")
if self.serverintf:
self.net_client.create_interface(self.brname, self.serverintf)
@ -830,7 +830,7 @@ class CtrlNet(CoreNetwork):
self.brname,
self.updown_script,
)
self.net_cmd(f"{self.updown_script} {self.brname} shutdown")
self.host_cmd(f"{self.updown_script} {self.brname} shutdown")
except CoreCommandError:
logging.exception("error issuing shutdown script shutdown")

View file

@ -188,13 +188,13 @@ class PhysicalNode(CoreNodeBase):
source = os.path.abspath(source)
logging.info("mounting %s at %s", source, target)
os.makedirs(target)
self.net_cmd(f"{MOUNT_BIN} --bind {source} {target}", cwd=self.nodedir)
self.host_cmd(f"{MOUNT_BIN} --bind {source} {target}", cwd=self.nodedir)
self._mounts.append((source, target))
def umount(self, target):
logging.info("unmounting '%s'", target)
try:
self.net_cmd(f"{UMOUNT_BIN} -l {target}", cwd=self.nodedir)
self.host_cmd(f"{UMOUNT_BIN} -l {target}", cwd=self.nodedir)
except CoreCommandError:
logging.exception("unmounting failed for %s", target)
@ -220,8 +220,8 @@ class PhysicalNode(CoreNodeBase):
os.chmod(node_file.name, mode)
logging.info("created nodefile: '%s'; mode: 0%o", node_file.name, mode)
def node_net_cmd(self, args, wait=True):
return self.net_cmd(args, wait=wait)
def cmd(self, args, wait=True):
return self.host_cmd(args, wait=wait)
class Rj45Node(CoreNodeBase, CoreInterface):

View file

@ -598,7 +598,7 @@ class CoreServices(object):
for cmd in cmds:
logging.debug("validating service(%s) using: %s", service.name, cmd)
try:
node.node_net_cmd(cmd)
node.cmd(cmd)
except CoreCommandError as e:
logging.debug(
"node(%s) service(%s) validate failed", node.name, service.name
@ -631,7 +631,7 @@ class CoreServices(object):
status = 0
for args in service.shutdown:
try:
node.node_net_cmd(args)
node.cmd(args)
except CoreCommandError:
logging.exception("error running stop command %s", args)
status = -1
@ -729,7 +729,7 @@ class CoreServices(object):
status = 0
for cmd in cmds:
try:
node.node_net_cmd(cmd, wait)
node.cmd(cmd, wait)
except CoreCommandError:
logging.exception("error starting command")
status = -1

View file

@ -42,12 +42,12 @@ def example(options):
last_node = session.get_node(options.nodes + 1)
logging.info("starting iperf server on node: %s", first_node.name)
first_node.node_net_cmd("iperf -s -D")
first_node.cmd("iperf -s -D")
first_node_address = prefixes.ip4_address(first_node)
logging.info("node %s connecting to %s", last_node.name, first_node_address)
output = last_node.node_net_cmd(f"iperf -t {options.time} -c {first_node_address}")
output = last_node.cmd(f"iperf -t {options.time} -c {first_node_address}")
logging.info(output)
first_node.node_net_cmd("killall -9 iperf")
first_node.cmd("killall -9 iperf")
# shutdown session
coreemu.shutdown()

View file

@ -46,11 +46,11 @@ def example(options):
last_node = session.get_node(options.nodes + 1)
logging.info("starting iperf server on node: %s", first_node.name)
first_node.node_net_cmd("iperf -s -D")
first_node.cmd("iperf -s -D")
address = prefixes.ip4_address(first_node)
logging.info("node %s connecting to %s", last_node.name, address)
last_node.node_net_cmd(f"iperf -t {options.time} -c {address}")
first_node.node_net_cmd("killall -9 iperf")
last_node.cmd(f"iperf -t {options.time} -c {address}")
first_node.cmd("killall -9 iperf")
# shutdown session
coreemu.shutdown()

View file

@ -27,7 +27,7 @@ _DIR = os.path.dirname(os.path.abspath(__file__))
def ping(from_node, to_node, ip_prefixes, count=3):
address = ip_prefixes.ip4_address(to_node)
try:
from_node.node_net_cmd(f"ping -c {count} {address}")
from_node.cmd(f"ping -c {count} {address}")
status = 0
except CoreCommandError as e:
status = e.returncode

View file

@ -20,7 +20,7 @@ _WIRED = [NodeTypes.PEER_TO_PEER, NodeTypes.HUB, NodeTypes.SWITCH]
def ping(from_node, to_node, ip_prefixes):
address = ip_prefixes.ip4_address(to_node)
try:
from_node.node_net_cmd(f"ping -c 3 {address}")
from_node.cmd(f"ping -c 3 {address}")
status = 0
except CoreCommandError as e:
status = e.returncode

View file

@ -30,7 +30,7 @@ class TestNodes:
assert os.path.exists(node.nodedir)
assert node.alive()
assert node.up
assert node.node_net_cmd("ip address show lo")
assert node.cmd("ip address show lo")
def test_node_update(self, session):
# given

View file

@ -9,39 +9,17 @@ A large emulation scenario can be deployed on multiple emulation servers and
controlled by a single GUI. The GUI, representing the entire topology, can be
run on one of the emulation servers or on a separate machine.
Each machine that will act as an emulation server would ideally have the
same version of CORE installed. It is not important to have the GUI component
but the CORE Python daemon **core-daemon** needs to be installed.
**NOTE: The server that the GUI connects with is referred to as
the master server.**
Each machine that will act as an emulation will require the installation of a distributed CORE package and
some configuration to allow SSH as root.
## Configuring Listen Address
## Configuring SSH
First we need to configure the **core-daemon** on all servers to listen on an
interface over the network. The simplest way would be updating the core
configuration file to listen on all interfaces. Alternatively, configure it to
listen to the specific interface you desire by supplying the correct address.
Distributed CORE works using the python fabric library to run commands on remote servers over SSH.
The **listenaddr** configuration should be set to the address of the interface
that should receive CORE API control commands from the other servers;
setting **listenaddr = 0.0.0.0** causes the Python daemon to listen on all
interfaces. CORE uses TCP port **4038** by default to communicate from the
controlling machine (with GUI) to the emulation servers. Make sure that
firewall rules are configured as necessary to allow this traffic.
### Remote GUI Terminals
```shell
# open configuration file
vi /etc/core/core.conf
# within core.conf
[core-daemon]
listenaddr = 0.0.0.0
```
## Enabling Remote SSH Shells
### Update GUI Terminal Program
You need to have the same user defined on each server, since the user used
for these remote shells is the same user that is running the CORE GUI.
**Edit -> Preferences... -> Terminal program:**
@ -54,31 +32,51 @@ May need to install xterm if, not already installed.
sudo apt install xterm
```
### Setup SSH
### Distributed Server SSH Configuration
In order to easily open shells on the emulation servers, the servers should be
running an SSH server, and public key login should be enabled. This is
accomplished by generating an SSH key for your user on all servers being used
for distributed emulation, if you do not already have one. Then copying your
master server public key to the authorized_keys file on all other servers that
will be used to help drive the distributed emulation. When double-clicking on a
node during runtime, instead of opening a local shell, the GUI will attempt to
SSH to the emulation server to run an interactive shell.
First the distributed servers must be configured to allow passwordless root login over SSH.
You need to have the same user defined on each server, since the user used
for these remote shells is the same user that is running the CORE GUI.
```shell
On distributed server:
```shelll
# install openssh-server
sudo apt install openssh-server
# generate ssh if needed
ssh-keygen -o -t rsa -b 4096
# open sshd config
vi /etc/ssh/sshd_config
# verify these configurations in file
PermitRootLogin yes
PasswordAuthentication yes
# restart sshd
sudo systemctl restart sshd
```
On master server:
```shell
# install package if needed
sudo apt install openssh-client
# generate ssh key if needed
ssh-keygen -o -t rsa -b 4096 -f ~/.ssh/core
# copy public key to authorized_keys file
ssh-copy-id user@server
# or
scp ~/.ssh/id_rsa.pub username@server:~/.ssh/authorized_keys
ssh-copy-id -i ~/.ssh/core root@server
# configure fabric to use the core ssh key
sudo vi /etc/fabric.yml
```
On distributed server:
```shell
# open sshd config
vi /etc/ssh/sshd_config
# change configuration for root login to without password
PermitRootLogin without-password
# restart sshd
sudo systemctl restart sshd
```
## Add Emulation Servers in GUI
@ -155,27 +153,16 @@ The names before the addresses need to match the servers configured in
controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24
```
EMANE appears to require location events for nodes to be sync'ed across
all EMANE instances for nodes to find each other. Using an EMANE eel file
for your scenario can help clear this up, which might be desired anyway.
* https://github.com/adjacentlink/emane/wiki/EEL-Generator
You can also move nodes within the GUI to help trigger location events from
CORE when the **core.conf** settings below is used. Assuming the nodes
did not find each other by default and you are not using an eel file.
```shell
emane_event_generate = True
```
## Distributed Checklist
1. Install the same version of the CORE daemon on all servers.
1. Set **listenaddr** configuration in all of the server's core.conf files,
then start (or restart) the daemon.
1. Install CORE on master server
1. Install distributed CORE package on all servers needed
1. Installed and configure public-key SSH access on all servers (if you want to use
double-click shells or Widgets.)
double-click shells or Widgets.) for both the GUI user (for terminals) and root for running CORE commands
1. Choose the servers that participate in distributed emulation.
1. Assign nodes to desired servers, empty for master server.
1. Press the **Start** button to launch the distributed emulation.

View file

@ -1,60 +1,11 @@
# Using the gRPC API
By default the gRPC API is currently not turned on by default. There are a couple ways that this can be enabled
to use.
gRPC is the main API for interfacing with CORE.
## Enabling gRPC
### HTTP Proxy
## HTTP Proxy
Since gRPC is HTTP2 based, proxy configurations can cause issue. Clear out your proxy when running if needed.
### Daemon Options
The gRPC API is enabled through options provided to the **core-daemon**.
```shell
usage: core-daemon [-h] [-f CONFIGFILE] [-p PORT] [-n NUMTHREADS] [--ovs]
[--grpc] [--grpc-port GRPCPORT]
[--grpc-address GRPCADDRESS]
CORE daemon v.5.3.0 instantiates Linux network namespace nodes.
optional arguments:
-h, --help show this help message and exit
-f CONFIGFILE, --configfile CONFIGFILE
read config from specified file; default =
/etc/core/core.conf
-p PORT, --port PORT port number to listen on; default = 4038
-n NUMTHREADS, --numthreads NUMTHREADS
number of server threads; default = 1
--ovs enable experimental ovs mode, default is false
--grpc enable grpc api, default is false
--grpc-port GRPCPORT grpc port to listen on; default 50051
--grpc-address GRPCADDRESS
grpc address to listen on; default localhost
```
### Enabling in Service Files
Modify service files to append the --grpc options as desired.
For sysv services /etc/init.d/core-daemon
```shell
CMD="PYTHONPATH=/usr/lib/python3.6/site-packages python3 /usr/bin/$NAME --grpc"
```
For systemd service /lib/systemd/system/core-daemon.service
```shell
ExecStart=@PYTHON@ @bindir@/core-daemon --grpc
```
### Enabling from Command Line
```shell
sudo core-daemon --grpc
```
## Python Client
A python client wrapper is provided at **core.api.grpc.client.CoreGrpcClient**.