Merge branch 'develop' of https://github.com/coreemu/core into develop
This commit is contained in:
commit
6e25388ad9
54 changed files with 3019 additions and 1625 deletions
13
Makefile.am
13
Makefile.am
|
@ -46,9 +46,11 @@ MAINTAINERCLEANFILES = .version \
|
|||
|
||||
|
||||
if PYTHON3
|
||||
PYTHON_DEP = python3 >= 3.0
|
||||
PYTHON_DEB_DEP = python3 >= 3.0
|
||||
PYTHON_RPM_DEP = python3 >= 3.0
|
||||
else
|
||||
PYTHON_DEP = python >= 2.7, python < 3.0
|
||||
PYTHON_DEB_DEP = python (>= 2.7), python (<< 3.0)
|
||||
PYTHON_RPM_DEP = python >= 2.7, python < 3.0
|
||||
endif
|
||||
|
||||
define fpm-rpm =
|
||||
|
@ -61,6 +63,7 @@ fpm -s dir -t rpm -n core \
|
|||
-p core_$(PYTHON)_VERSION_ARCH.rpm \
|
||||
-v $(PACKAGE_VERSION) \
|
||||
--rpm-init scripts/core-daemon \
|
||||
--config-files "/etc/core" \
|
||||
-d "ethtool" \
|
||||
-d "tcl" \
|
||||
-d "tk" \
|
||||
|
@ -71,7 +74,7 @@ fpm -s dir -t rpm -n core \
|
|||
-d "iproute" \
|
||||
-d "libev" \
|
||||
-d "net-tools" \
|
||||
-d "$(PYTHON_DEP)" \
|
||||
-d "$(PYTHON_RPM_DEP)" \
|
||||
-C $(DESTDIR)
|
||||
endef
|
||||
|
||||
|
@ -85,6 +88,8 @@ fpm -s dir -t deb -n core \
|
|||
-p core_$(PYTHON)_VERSION_ARCH.deb \
|
||||
-v $(PACKAGE_VERSION) \
|
||||
--deb-systemd scripts/core-daemon.service \
|
||||
--deb-no-default-config-files \
|
||||
--config-files "/etc/core" \
|
||||
-d "ethtool" \
|
||||
-d "tcl" \
|
||||
-d "tk" \
|
||||
|
@ -96,7 +101,7 @@ fpm -s dir -t deb -n core \
|
|||
-d "ebtables" \
|
||||
-d "iproute2" \
|
||||
-d "libev4" \
|
||||
-d "$(PYTHON_DEP)" \
|
||||
-d "$(PYTHON_DEB_DEP)" \
|
||||
-C $(DESTDIR)
|
||||
endef
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
# Process this file with autoconf to produce a configure script.
|
||||
|
||||
# this defines the CORE version number, must be static for AC_INIT
|
||||
AC_INIT(core, 5.3.0)
|
||||
AC_INIT(core, 5.3.1)
|
||||
|
||||
# autoconf and automake initialization
|
||||
AC_CONFIG_SRCDIR([netns/version.h.in])
|
||||
|
|
|
@ -73,11 +73,9 @@ public class NodeType {
|
|||
return ID_LOOKUP.values().stream()
|
||||
.filter(nodeType -> {
|
||||
boolean sameType = nodeType.getValue() == type;
|
||||
boolean sameModel;
|
||||
if (model != null) {
|
||||
boolean sameModel = true;
|
||||
if (!model.isEmpty()) {
|
||||
sameModel = model.equals(nodeType.getModel());
|
||||
} else {
|
||||
sameModel = nodeType.getModel() == null;
|
||||
}
|
||||
return sameType && sameModel;
|
||||
})
|
||||
|
|
|
@ -6,22 +6,45 @@ import inet.ipaddr.IPAddressString;
|
|||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
||||
import java.beans.IndexedPropertyDescriptor;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashSet;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
public class CoreAddresses {
|
||||
private static final Logger logger = LogManager.getLogger();
|
||||
private IPAddress currentSubnet = new IPAddressString("10.0.0.0/24").getAddress();
|
||||
private AtomicBoolean firstSubnet = new AtomicBoolean(true);
|
||||
private IPAddress currentSubnet = new IPAddressString("10.0.0.0/24").getAddress().toPrefixBlock();
|
||||
private Queue<IPAddress> deleted = new LinkedBlockingQueue<>();
|
||||
private Set<IPAddress> usedSubnets = new HashSet<>();
|
||||
|
||||
public void usedAddress(IPAddress address) {
|
||||
logger.info("adding used address: {} - {}", address, address.toPrefixBlock());
|
||||
usedSubnets.add(address.toPrefixBlock());
|
||||
logger.info("used subnets: {}", usedSubnets);
|
||||
}
|
||||
|
||||
public void reuseSubnet(IPAddress subnet) {
|
||||
deleted.add(subnet);
|
||||
}
|
||||
|
||||
public IPAddress nextSubnet() {
|
||||
logger.info("getting next subnet: {}", currentSubnet);
|
||||
if (!firstSubnet.getAndSet(false)) {
|
||||
// skip existing subnets, when loaded from file
|
||||
while (usedSubnets.contains(currentSubnet)) {
|
||||
currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock();
|
||||
}
|
||||
logger.info("getting updated boundary: {}", currentSubnet);
|
||||
return currentSubnet;
|
||||
|
||||
// re-use any deleted subnets
|
||||
IPAddress next = deleted.poll();
|
||||
if (next == null) {
|
||||
next = currentSubnet;
|
||||
currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock();
|
||||
}
|
||||
return next;
|
||||
}
|
||||
|
||||
public IPAddress findSubnet(Set<CoreInterface> interfaces) {
|
||||
|
@ -43,6 +66,12 @@ public class CoreAddresses {
|
|||
.orElseGet(() -> currentSubnet);
|
||||
}
|
||||
|
||||
public void reset() {
|
||||
deleted.clear();
|
||||
usedSubnets.clear();
|
||||
currentSubnet = new IPAddressString("10.0.0.0/24").getAddress().toPrefixBlock();
|
||||
}
|
||||
|
||||
public static void main(String... args) {
|
||||
IPAddress addresses = new IPAddressString("10.0.0.0/16").getAddress();
|
||||
System.out.println(String.format("address: %s", addresses.increment(257)));
|
||||
|
|
|
@ -248,6 +248,7 @@ public class NetworkGraph {
|
|||
}
|
||||
nodeMap.clear();
|
||||
graphViewer.repaint();
|
||||
coreAddresses.reset();
|
||||
}
|
||||
|
||||
public void updatePositions() {
|
||||
|
@ -285,6 +286,13 @@ public class NetworkGraph {
|
|||
private void handleEdgeAdded(GraphEvent.Edge<CoreNode, CoreLink> edgeEvent) {
|
||||
CoreLink link = edgeEvent.getEdge();
|
||||
if (link.isLoaded()) {
|
||||
// load addresses to avoid duplication
|
||||
if (link.getInterfaceOne().getIp4() != null) {
|
||||
coreAddresses.usedAddress(link.getInterfaceOne().getIp4());
|
||||
}
|
||||
if (link.getInterfaceTwo().getIp4() != null) {
|
||||
coreAddresses.usedAddress(link.getInterfaceTwo().getIp4());
|
||||
}
|
||||
return;
|
||||
}
|
||||
Pair<CoreNode> endpoints = graph.getEndpoints(link);
|
||||
|
@ -417,6 +425,38 @@ public class NetworkGraph {
|
|||
private void handleEdgeRemoved(GraphEvent.Edge<CoreNode, CoreLink> edgeEvent) {
|
||||
CoreLink link = edgeEvent.getEdge();
|
||||
logger.info("removed edge: {}", link);
|
||||
CoreNode nodeOne = getVertex(link.getNodeOne());
|
||||
CoreInterface interfaceOne = link.getInterfaceOne();
|
||||
CoreNode nodeTwo = getVertex(link.getNodeTwo());
|
||||
CoreInterface interfaceTwo = link.getInterfaceTwo();
|
||||
boolean nodeOneIsDefault = isNode(nodeOne);
|
||||
boolean nodeTwoIsDefault = isNode(nodeTwo);
|
||||
|
||||
// check what we are unlinking
|
||||
Set<CoreInterface> interfaces;
|
||||
IPAddress subnet = null;
|
||||
if (nodeOneIsDefault && nodeTwoIsDefault) {
|
||||
subnet = interfaceOne.getIp4().toPrefixBlock();
|
||||
logger.info("unlinking node to node reuse subnet: {}", subnet);
|
||||
} else if (nodeOneIsDefault) {
|
||||
interfaces = getNetworkInterfaces(nodeTwo, new HashSet<>());
|
||||
if (interfaces.isEmpty()) {
|
||||
subnet = interfaceOne.getIp4().toPrefixBlock();
|
||||
logger.info("unlinking node one from network reuse subnet: {}", subnet);
|
||||
}
|
||||
} else if (nodeTwoIsDefault) {
|
||||
interfaces = getNetworkInterfaces(nodeOne, new HashSet<>());
|
||||
if (interfaces.isEmpty()) {
|
||||
subnet = interfaceTwo.getIp4().toPrefixBlock();
|
||||
logger.info("unlinking node two from network reuse subnet: {}", subnet);
|
||||
}
|
||||
} else {
|
||||
logger.info("nothing to do when unlinking networks");
|
||||
}
|
||||
|
||||
if (subnet != null) {
|
||||
coreAddresses.reuseSubnet(subnet);
|
||||
}
|
||||
}
|
||||
|
||||
private void handleVertexAdded(GraphEvent.Vertex<CoreNode, CoreLink> vertexEvent) {
|
||||
|
@ -475,7 +515,7 @@ public class NetworkGraph {
|
|||
}
|
||||
|
||||
private boolean isWirelessNode(CoreNode node) {
|
||||
return node.getType() == NodeType.EMANE || node.getType() == NodeType.WLAN;
|
||||
return node != null && (node.getType() == NodeType.EMANE || node.getType() == NodeType.WLAN);
|
||||
}
|
||||
|
||||
private boolean checkForWirelessNode(CoreNode nodeOne, CoreNode nodeTwo) {
|
||||
|
|
|
@ -1 +1 @@
|
|||
../../../../daemon/proto/core.proto
|
||||
../../../../daemon/proto/core/api/grpc/core.proto
|
|
@ -17,6 +17,7 @@ from core.emulator.emudata import NodeOptions, InterfaceData, LinkOptions
|
|||
from core.emulator.enumerations import NodeTypes, EventTypes, LinkTypes
|
||||
from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility
|
||||
from core.nodes import nodeutils
|
||||
from core.nodes.base import CoreNetworkBase
|
||||
from core.nodes.ipaddress import MacAddress
|
||||
from core.services.coreservices import ServiceManager
|
||||
|
||||
|
@ -73,18 +74,24 @@ def convert_link(session, link_data):
|
|||
interface_one = None
|
||||
if link_data.interface1_id is not None:
|
||||
node = session.get_node(link_data.node1_id)
|
||||
interface = node.netif(link_data.interface1_id)
|
||||
interface_name = None
|
||||
if not isinstance(node, CoreNetworkBase):
|
||||
interface = node.netif(link_data.interface1_id)
|
||||
interface_name = interface.name
|
||||
interface_one = core_pb2.Interface(
|
||||
id=link_data.interface1_id, name=interface.name, mac=convert_value(link_data.interface1_mac),
|
||||
id=link_data.interface1_id, name=interface_name, mac=convert_value(link_data.interface1_mac),
|
||||
ip4=convert_value(link_data.interface1_ip4), ip4mask=link_data.interface1_ip4_mask,
|
||||
ip6=convert_value(link_data.interface1_ip6), ip6mask=link_data.interface1_ip6_mask)
|
||||
|
||||
interface_two = None
|
||||
if link_data.interface2_id is not None:
|
||||
node = session.get_node(link_data.node2_id)
|
||||
interface = node.netif(link_data.interface2_id)
|
||||
interface_name = None
|
||||
if not isinstance(node, CoreNetworkBase):
|
||||
interface = node.netif(link_data.interface2_id)
|
||||
interface_name = interface.name
|
||||
interface_two = core_pb2.Interface(
|
||||
id=link_data.interface2_id, name=interface.name, mac=convert_value(link_data.interface2_mac),
|
||||
id=link_data.interface2_id, name=interface_name, mac=convert_value(link_data.interface2_mac),
|
||||
ip4=convert_value(link_data.interface2_ip4), ip4mask=link_data.interface2_ip4_mask,
|
||||
ip6=convert_value(link_data.interface2_ip6), ip6mask=link_data.interface2_ip6_mask)
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import select
|
|||
import socket
|
||||
import threading
|
||||
|
||||
from core import utils
|
||||
from core.api.tlv import coreapi
|
||||
from core.nodes.base import CoreNodeBase, CoreNetworkBase
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
@ -121,7 +122,6 @@ class CoreBroker(object):
|
|||
self.physical_nodes = set()
|
||||
# allows for other message handlers to process API messages (e.g. EMANE)
|
||||
self.handlers = set()
|
||||
self.handlers.add(self.handle_distributed)
|
||||
# dict with tunnel key to tunnel device mapping
|
||||
self.tunnels = {}
|
||||
self.dorecvloop = False
|
||||
|
@ -388,12 +388,13 @@ class CoreBroker(object):
|
|||
:return: tunnel key for the node pair
|
||||
:rtype: int
|
||||
"""
|
||||
logging.debug("creating tunnel key for: %s, %s", n1num, n2num)
|
||||
sid = self.session_id_master
|
||||
if sid is None:
|
||||
# this is the master session
|
||||
sid = self.session.id
|
||||
|
||||
key = (sid << 16) ^ hash(n1num) ^ (hash(n2num) << 8)
|
||||
key = (sid << 16) ^ utils.hashkey(n1num) ^ (utils.hashkey(n2num) << 8)
|
||||
return key & 0xFFFFFFFF
|
||||
|
||||
def addtunnel(self, remoteip, n1num, n2num, localnum):
|
||||
|
@ -1049,62 +1050,3 @@ class CoreBroker(object):
|
|||
if not server.instantiation_complete:
|
||||
return False
|
||||
return True
|
||||
|
||||
def handle_distributed(self, message):
|
||||
"""
|
||||
Handle the session options config message as it has reached the
|
||||
broker. Options requiring modification for distributed operation should
|
||||
be handled here.
|
||||
|
||||
:param message: message to handle
|
||||
:return: nothing
|
||||
"""
|
||||
if not self.session.master:
|
||||
return
|
||||
|
||||
if message.message_type != MessageTypes.CONFIG.value or message.get_tlv(ConfigTlvs.OBJECT.value) != "session":
|
||||
return
|
||||
|
||||
values_str = message.get_tlv(ConfigTlvs.VALUES.value)
|
||||
if values_str is None:
|
||||
return
|
||||
|
||||
value_strings = values_str.split("|")
|
||||
for value_string in value_strings:
|
||||
key, _value = value_string.split("=", 1)
|
||||
if key == "controlnet":
|
||||
self.handle_distributed_control_net(message, value_strings, value_strings.index(value_string))
|
||||
|
||||
def handle_distributed_control_net(self, message, values, index):
|
||||
"""
|
||||
Modify Config Message if multiple control network prefixes are
|
||||
defined. Map server names to prefixes and repack the message before
|
||||
it is forwarded to slave servers.
|
||||
|
||||
:param message: message to handle
|
||||
:param list values: values to handle
|
||||
:param int index: index ti get key value from
|
||||
:return: nothing
|
||||
"""
|
||||
key_value = values[index]
|
||||
_key, value = key_value.split("=", 1)
|
||||
control_nets = value.split()
|
||||
|
||||
if len(control_nets) < 2:
|
||||
logging.warning("multiple controlnet prefixes do not exist")
|
||||
return
|
||||
|
||||
servers = self.session.broker.getservernames()
|
||||
if len(servers) < 2:
|
||||
logging.warning("not distributed")
|
||||
return
|
||||
|
||||
servers.remove("localhost")
|
||||
# master always gets first prefix
|
||||
servers.insert(0, "localhost")
|
||||
# create list of "server1:ctrlnet1 server2:ctrlnet2 ..."
|
||||
control_nets = map(lambda x: "%s:%s" % (x[0], x[1]), zip(servers, control_nets))
|
||||
values[index] = "controlnet=%s" % (" ".join(control_nets))
|
||||
values_str = "|".join(values)
|
||||
message.tlv_data[ConfigTlvs.VALUES.value] = values_str
|
||||
message.repack()
|
||||
|
|
|
@ -510,7 +510,6 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
:param message: message for replies
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("dispatching replies: %s", replies)
|
||||
for reply in replies:
|
||||
message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply)
|
||||
try:
|
||||
|
@ -524,7 +523,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
reply_message = "CoreMessage (type %d flags %d length %d)" % (
|
||||
message_type, message_flags, message_length)
|
||||
|
||||
logging.debug("dispatch reply:\n%s", reply_message)
|
||||
logging.debug("sending reply:\n%s", reply_message)
|
||||
|
||||
try:
|
||||
self.sendall(reply)
|
||||
|
@ -629,7 +628,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
"""
|
||||
Node Message handler
|
||||
|
||||
:param core.api.coreapi.CoreNodeMessage message: node message
|
||||
:param core.api.tlv.coreapi.CoreNodeMessage message: node message
|
||||
:return: replies to node message
|
||||
"""
|
||||
replies = []
|
||||
|
@ -860,7 +859,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
raise
|
||||
else:
|
||||
thread = threading.Thread(
|
||||
target=execfile,
|
||||
target=utils.execute_file,
|
||||
args=(file_name, {"__file__": file_name, "coreemu": self.coreemu})
|
||||
)
|
||||
thread.daemon = True
|
||||
|
@ -1036,8 +1035,10 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
if message_type == ConfigFlags.REQUEST:
|
||||
node_id = config_data.node
|
||||
metadata_configs = self.session.metadata.get_configs()
|
||||
if metadata_configs is None:
|
||||
metadata_configs = {}
|
||||
data_values = "|".join(["%s=%s" % (x, metadata_configs[x]) for x in metadata_configs])
|
||||
data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs())
|
||||
data_types = tuple(ConfigDataTypes.STRING.value for _ in metadata_configs)
|
||||
config_response = ConfigData(
|
||||
message_type=0,
|
||||
node=node_id,
|
||||
|
@ -1397,7 +1398,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
open_file.write(data)
|
||||
return ()
|
||||
|
||||
self.session.node_add_file(node_num, source_name, file_name, data)
|
||||
self.session.add_node_file(node_num, source_name, file_name, data)
|
||||
else:
|
||||
raise NotImplementedError
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
|
@ -443,10 +444,13 @@ class EmaneManager(ModelManager):
|
|||
continue
|
||||
|
||||
platformid += 1
|
||||
|
||||
# create temporary config for updating distributed nodes
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
self.set_config("platform_id_start", str(platformid))
|
||||
self.set_config("nem_id_start", str(nemid))
|
||||
config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, self.get_configs())
|
||||
config = copy.deepcopy(self.get_configs())
|
||||
config["platform_id_start"] = str(platformid)
|
||||
config["nem_id_start"] = str(nemid)
|
||||
config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, config)
|
||||
message = dataconversion.convert_config(config_data)
|
||||
server.sock.send(message)
|
||||
# increment nemid for next server by number of interfaces
|
||||
|
@ -477,26 +481,30 @@ class EmaneManager(ModelManager):
|
|||
be configured. This generates configuration for slave control nets
|
||||
using the default list of prefixes.
|
||||
"""
|
||||
session = self.session
|
||||
# slave server
|
||||
session = self.session
|
||||
if not session.master:
|
||||
return
|
||||
|
||||
servers = session.broker.getservernames()
|
||||
# not distributed
|
||||
servers = session.broker.getservernames()
|
||||
if len(servers) < 2:
|
||||
return
|
||||
|
||||
prefix = session.options.get_config("controlnet")
|
||||
prefixes = prefix.split()
|
||||
# normal Config messaging will distribute controlnets
|
||||
if len(prefixes) >= len(servers):
|
||||
return
|
||||
prefix = session.options.get_config("controlnet", default="")
|
||||
prefixes = prefix.split()
|
||||
if len(prefixes) < len(servers):
|
||||
logging.info("setting up default controlnet prefixes for distributed (%d configured)", len(prefixes))
|
||||
prefix = ctrlnet.DEFAULT_PREFIX_LIST[0]
|
||||
prefixes = prefix.split()
|
||||
servers.remove("localhost")
|
||||
servers.insert(0, "localhost")
|
||||
prefix = " ".join("%s:%s" % (s, prefixes[i]) for i, s in enumerate(servers))
|
||||
|
||||
# this generates a config message having controlnet prefix assignments
|
||||
logging.info("Setting up default controlnet prefixes for distributed (%d configured)" % len(prefixes))
|
||||
prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0]
|
||||
vals = 'controlnet="%s"' % prefixes
|
||||
logging.info("setting up controlnet prefixes for distributed: %s", prefix)
|
||||
vals = "controlnet=%s" % prefix
|
||||
tlvdata = b""
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session")
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0)
|
||||
|
@ -504,6 +512,7 @@ class EmaneManager(ModelManager):
|
|||
rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata)
|
||||
msghdr = rawmsg[:coreapi.CoreMessage.header_len]
|
||||
msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:])
|
||||
logging.debug("sending controlnet message:\n%s", msg)
|
||||
self.session.broker.handle_message(msg)
|
||||
|
||||
def check_node_models(self):
|
||||
|
@ -660,12 +669,12 @@ class EmaneManager(ModelManager):
|
|||
|
||||
# multicast route is needed for OTA data
|
||||
args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev]
|
||||
node.check_cmd(args)
|
||||
node.network_cmd(args)
|
||||
|
||||
# multicast route is also needed for event data if on control network
|
||||
if eventservicenetidx >= 0 and eventgroup != otagroup:
|
||||
args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev]
|
||||
node.check_cmd(args)
|
||||
node.network_cmd(args)
|
||||
|
||||
# start emane
|
||||
args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)]
|
||||
|
|
|
@ -165,16 +165,16 @@ class EmaneNode(EmaneNet):
|
|||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
logging.info("nemid for %s is unknown" % ifname)
|
||||
logging.info("nemid for %s is unknown", ifname)
|
||||
return
|
||||
lat, long, alt = self.session.location.getgeo(x, y, z)
|
||||
logging.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, long, alt)
|
||||
lat, lon, alt = self.session.location.getgeo(x, y, z)
|
||||
logging.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, lon, alt)
|
||||
event = LocationEvent()
|
||||
|
||||
# altitude must be an integer or warning is printed
|
||||
# unused: yaw, pitch, roll, azimuth, elevation, velocity
|
||||
alt = int(round(alt))
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
event.append(nemid, latitude=lat, longitude=lon, altitude=alt)
|
||||
self.session.emane.service.publish(0, event)
|
||||
|
||||
def setnempositions(self, moved_netifs):
|
||||
|
@ -199,12 +199,12 @@ class EmaneNode(EmaneNet):
|
|||
logging.info("nemid for %s is unknown" % ifname)
|
||||
continue
|
||||
x, y, z = netif.node.getposition()
|
||||
lat, long, alt = self.session.location.getgeo(x, y, z)
|
||||
lat, lon, alt = self.session.location.getgeo(x, y, z)
|
||||
logging.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)",
|
||||
i, ifname, nemid, x, y, z, lat, long, alt)
|
||||
i, ifname, nemid, x, y, z, lat, lon, alt)
|
||||
# altitude must be an integer or warning is printed
|
||||
alt = int(round(alt))
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
event.append(nemid, latitude=lat, longitude=lon, altitude=alt)
|
||||
i += 1
|
||||
|
||||
self.session.emane.service.publish(0, event)
|
||||
|
|
|
@ -79,12 +79,13 @@ class NodeOptions(object):
|
|||
Options for creating and updating nodes within core.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, model="PC"):
|
||||
def __init__(self, name=None, model="PC", image=None):
|
||||
"""
|
||||
Create a NodeOptions object.
|
||||
|
||||
:param str name: name of node, defaults to node class name postfix with its id
|
||||
:param str model: defines services for default and physical nodes, defaults to "router"
|
||||
:param str image: image to use for docker nodes
|
||||
"""
|
||||
self.name = name
|
||||
self.model = model
|
||||
|
@ -99,6 +100,7 @@ class NodeOptions(object):
|
|||
self.alt = None
|
||||
self.emulation_id = None
|
||||
self.emulation_server = None
|
||||
self.image = image
|
||||
|
||||
def set_position(self, x, y):
|
||||
"""
|
||||
|
|
|
@ -81,6 +81,8 @@ class NodeTypes(Enum):
|
|||
PEER_TO_PEER = 12
|
||||
CONTROL_NET = 13
|
||||
EMANE_NET = 14
|
||||
DOCKER = 15
|
||||
LXC = 16
|
||||
|
||||
|
||||
class Rj45Models(Enum):
|
||||
|
|
|
@ -22,10 +22,10 @@ from core.api.tlv.broker import CoreBroker
|
|||
from core.emane.emanemanager import EmaneManager
|
||||
from core.emulator.data import EventData, NodeData
|
||||
from core.emulator.data import ExceptionData
|
||||
from core.emulator.emudata import LinkOptions, NodeOptions
|
||||
from core.emulator.emudata import IdGen
|
||||
from core.emulator.emudata import is_net_node
|
||||
from core.emulator.emudata import LinkOptions, NodeOptions
|
||||
from core.emulator.emudata import create_interface
|
||||
from core.emulator.emudata import is_net_node
|
||||
from core.emulator.emudata import link_config
|
||||
from core.emulator.enumerations import EventTypes, LinkTypes
|
||||
from core.emulator.enumerations import ExceptionLevels
|
||||
|
@ -361,6 +361,18 @@ class Session(object):
|
|||
self.delete_node(net_one.id)
|
||||
node_one.delnetif(interface_one.netindex)
|
||||
node_two.delnetif(interface_two.netindex)
|
||||
elif node_one and net_one:
|
||||
interface = node_one.netif(interface_one_id)
|
||||
logging.info("deleting link node(%s):interface(%s) node(%s)",
|
||||
node_one.name, interface.name, net_one.name)
|
||||
interface.detachnet()
|
||||
node_one.delnetif(interface.netindex)
|
||||
elif node_two and net_one:
|
||||
interface = node_two.netif(interface_two_id)
|
||||
logging.info("deleting link node(%s):interface(%s) node(%s)",
|
||||
node_two.name, interface.name, net_one.name)
|
||||
interface.detachnet()
|
||||
node_two.delnetif(interface.netindex)
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
|
@ -488,7 +500,10 @@ class Session(object):
|
|||
|
||||
# create node
|
||||
logging.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start)
|
||||
node = self.create_node(cls=node_class, _id=_id, name=name, start=start)
|
||||
if _type in [NodeTypes.DOCKER, NodeTypes.LXC]:
|
||||
node = self.create_node(cls=node_class, _id=_id, name=name, start=start, image=node_options.image)
|
||||
else:
|
||||
node = self.create_node(cls=node_class, _id=_id, name=name, start=start)
|
||||
|
||||
# set node attributes
|
||||
node.icon = node_options.icon
|
||||
|
@ -499,7 +514,7 @@ class Session(object):
|
|||
self.set_node_position(node, node_options)
|
||||
|
||||
# add services to default and physical nodes only
|
||||
if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]:
|
||||
if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL, NodeTypes.DOCKER, NodeTypes.LXC]:
|
||||
node.type = node_options.model
|
||||
logging.debug("set node type: %s", node.type)
|
||||
self.services.add_services(node, node.type, node_options.services)
|
||||
|
@ -616,6 +631,9 @@ class Session(object):
|
|||
# clear out existing session
|
||||
self.clear()
|
||||
|
||||
if start:
|
||||
self.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# write out xml file
|
||||
CoreXmlReader(self).read(file_name)
|
||||
|
||||
|
@ -1168,7 +1186,7 @@ class Session(object):
|
|||
with self._nodes_lock:
|
||||
file_path = os.path.join(self.session_dir, "nodes")
|
||||
with open(file_path, "w") as f:
|
||||
for _id in sorted(self.nodes.keys()):
|
||||
for _id in self.nodes.keys():
|
||||
node = self.nodes[_id]
|
||||
f.write("%s %s %s %s\n" % (_id, node.name, node.apitype, type(node)))
|
||||
except IOError:
|
||||
|
@ -1214,19 +1232,19 @@ class Session(object):
|
|||
# write current nodes out to session directory file
|
||||
self.write_nodes()
|
||||
|
||||
# controlnet may be needed by some EMANE models
|
||||
# create control net interfaces and broker network tunnels
|
||||
# which need to exist for emane to sync on location events
|
||||
# in distributed scenarios
|
||||
self.add_remove_control_interface(node=None, remove=False)
|
||||
self.broker.startup()
|
||||
|
||||
# instantiate will be invoked again upon Emane configure
|
||||
if self.emane.startup() == self.emane.NOT_READY:
|
||||
return
|
||||
|
||||
# start feature helpers
|
||||
self.broker.startup()
|
||||
self.mobility.startup()
|
||||
|
||||
# boot the services on each node
|
||||
# boot node services and then start mobility
|
||||
self.boot_nodes()
|
||||
self.mobility.startup()
|
||||
|
||||
# set broker local instantiation to complete
|
||||
self.broker.local_instantiation_complete()
|
||||
|
@ -1349,7 +1367,7 @@ class Session(object):
|
|||
# TODO: PyCoreNode is not the type to check
|
||||
if isinstance(node, CoreNodeBase) and not nodeutils.is_node(node, NodeTypes.RJ45):
|
||||
# add a control interface if configured
|
||||
logging.info("booting node: %s", node.name)
|
||||
logging.info("booting node(%s): %s", node.name, node.services)
|
||||
self.add_remove_control_interface(node=node, remove=False)
|
||||
result = pool.apply_async(self.services.boot_services, (node,))
|
||||
results.append(result)
|
||||
|
@ -1498,7 +1516,7 @@ class Session(object):
|
|||
break
|
||||
|
||||
if not prefix:
|
||||
logging.error("Control network prefix not found for server '%s'" % servers[0])
|
||||
logging.error("control network prefix not found for server: %s", servers[0])
|
||||
assign_address = False
|
||||
try:
|
||||
prefix = prefixes[0].split(':', 1)[1]
|
||||
|
|
|
@ -53,6 +53,14 @@ class MobilityManager(ModelManager):
|
|||
self.physnets = {}
|
||||
self.session.broker.handlers.add(self.physnodehandlelink)
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Clear out all current configurations.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.config_reset()
|
||||
|
||||
def startup(self, node_ids=None):
|
||||
"""
|
||||
Session is transitioning from instantiation to runtime state.
|
||||
|
|
|
@ -493,11 +493,11 @@ class CoreNode(CoreNodeBase):
|
|||
|
||||
# bring up the loopback interface
|
||||
logging.debug("bringing up loopback interface")
|
||||
self.check_cmd([constants.IP_BIN, "link", "set", "lo", "up"])
|
||||
self.network_cmd([constants.IP_BIN, "link", "set", "lo", "up"])
|
||||
|
||||
# set hostname for node
|
||||
logging.debug("setting hostname: %s", self.name)
|
||||
self.check_cmd(["hostname", self.name])
|
||||
self.network_cmd(["hostname", self.name])
|
||||
|
||||
# mark node as up
|
||||
self.up = True
|
||||
|
@ -572,6 +572,17 @@ class CoreNode(CoreNodeBase):
|
|||
"""
|
||||
return self.client.cmd_output(args)
|
||||
|
||||
def network_cmd(self, args):
|
||||
"""
|
||||
Runs a command for a node that is used to configure and setup network interfaces.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
return self.check_cmd(args)
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
@ -667,15 +678,15 @@ class CoreNode(CoreNodeBase):
|
|||
|
||||
if self.up:
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)])
|
||||
self.check_cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname])
|
||||
self.check_cmd([constants.ETHTOOL_BIN, "-K", ifname, "rx", "off", "tx", "off"])
|
||||
self.network_cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname])
|
||||
self.network_cmd([constants.ETHTOOL_BIN, "-K", ifname, "rx", "off", "tx", "off"])
|
||||
|
||||
veth.name = ifname
|
||||
|
||||
if self.up:
|
||||
# TODO: potentially find better way to query interface ID
|
||||
# retrieve interface information
|
||||
output = self.check_cmd(["ip", "link", "show", veth.name])
|
||||
output = self.network_cmd([constants.IP_BIN, "link", "show", veth.name])
|
||||
logging.debug("interface command output: %s", output)
|
||||
output = output.split("\n")
|
||||
veth.flow_id = int(output[0].strip().split(":")[0]) + 1
|
||||
|
@ -736,7 +747,7 @@ class CoreNode(CoreNodeBase):
|
|||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
args = [constants.IP_BIN, "link", "set", "dev", self.ifname(ifindex), "address", str(addr)]
|
||||
self.check_cmd(args)
|
||||
self.network_cmd(args)
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
"""
|
||||
|
@ -750,10 +761,10 @@ class CoreNode(CoreNodeBase):
|
|||
# check if addr is ipv6
|
||||
if ":" in str(addr):
|
||||
args = [constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)]
|
||||
self.check_cmd(args)
|
||||
self.network_cmd(args)
|
||||
else:
|
||||
args = [constants.IP_BIN, "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)]
|
||||
self.check_cmd(args)
|
||||
self.network_cmd(args)
|
||||
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
|
@ -772,7 +783,7 @@ class CoreNode(CoreNodeBase):
|
|||
logging.exception("trying to delete unknown address: %s" % addr)
|
||||
|
||||
if self.up:
|
||||
self.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
|
||||
self.network_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
|
||||
|
||||
def delalladdr(self, ifindex, address_types=None):
|
||||
"""
|
||||
|
@ -806,7 +817,7 @@ class CoreNode(CoreNodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if self.up:
|
||||
self.check_cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
self.network_cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None):
|
||||
"""
|
||||
|
@ -867,12 +878,12 @@ class CoreNode(CoreNodeBase):
|
|||
utils.check_cmd([constants.IP_BIN, "link", "add", "name", tmp1, "type", "veth", "peer", "name", tmp2])
|
||||
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
self.check_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
self.network_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
interface = CoreInterface(node=self, name=ifname, mtu=_DEFAULT_MTU)
|
||||
self.addnetif(interface, self.newifindex())
|
||||
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
othernode.check_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
othernode.network_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
other_interface = CoreInterface(node=othernode, name=otherifname, mtu=_DEFAULT_MTU)
|
||||
othernode.addnetif(other_interface, othernode.newifindex())
|
||||
|
||||
|
|
297
daemon/core/nodes/docker.py
Normal file
297
daemon/core/nodes/docker.py
Normal file
|
@ -0,0 +1,297 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
from core import utils, CoreCommandError
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.nodes.base import CoreNode
|
||||
|
||||
|
||||
class DockerClient(object):
|
||||
def __init__(self, name, image):
|
||||
self.name = name
|
||||
self.image = image
|
||||
self.pid = None
|
||||
self._addr = {}
|
||||
|
||||
def create_container(self):
|
||||
utils.check_cmd(
|
||||
"docker run -td --init --net=none --hostname {name} --name {name} "
|
||||
"--sysctl net.ipv6.conf.all.disable_ipv6=0 "
|
||||
"{image} /bin/bash".format(
|
||||
name=self.name,
|
||||
image=self.image
|
||||
))
|
||||
self.pid = self.get_pid()
|
||||
return self.pid
|
||||
|
||||
def get_info(self):
|
||||
args = "docker inspect {name}".format(name=self.name)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
data = json.loads(output)
|
||||
if not data:
|
||||
raise CoreCommandError(status, args, "docker({name}) not present".format(name=self.name))
|
||||
return data[0]
|
||||
|
||||
def is_alive(self):
|
||||
try:
|
||||
data = self.get_info()
|
||||
return data["State"]["Running"]
|
||||
except CoreCommandError:
|
||||
return False
|
||||
|
||||
def stop_container(self):
|
||||
utils.check_cmd("docker rm -f {name}".format(
|
||||
name=self.name
|
||||
))
|
||||
|
||||
def cmd(self, cmd, wait=True):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
logging.info("docker cmd wait(%s): %s", wait, cmd)
|
||||
return utils.cmd("docker exec {name} {cmd}".format(
|
||||
name=self.name,
|
||||
cmd=cmd
|
||||
), wait)
|
||||
|
||||
def cmd_output(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
logging.info("docker cmd output: %s", cmd)
|
||||
return utils.cmd_output("docker exec {name} {cmd}".format(
|
||||
name=self.name,
|
||||
cmd=cmd
|
||||
))
|
||||
|
||||
def ns_cmd(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = "nsenter -t {pid} -u -i -p -n {cmd}".format(
|
||||
pid=self.pid,
|
||||
cmd=cmd
|
||||
)
|
||||
logging.info("ns cmd: %s", args)
|
||||
return utils.cmd_output(args)
|
||||
|
||||
def get_pid(self):
|
||||
args = "docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
self.pid = output
|
||||
logging.debug("node(%s) pid: %s", self.name, self.pid)
|
||||
return output
|
||||
|
||||
def copy_file(self, source, destination):
|
||||
args = "docker cp {source} {name}:{destination}".format(
|
||||
source=source,
|
||||
name=self.name,
|
||||
destination=destination
|
||||
)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
"""
|
||||
Get address for interface on node.
|
||||
|
||||
:param str ifname: interface name to get address for
|
||||
:param bool rescan: rescan flag
|
||||
:return: interface information
|
||||
:rtype: dict
|
||||
"""
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
|
||||
interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
args = ["ip", "addr", "show", "dev", ifname]
|
||||
status, output = self.ns_cmd(args)
|
||||
for line in output:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
interface["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
interface["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
interface["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
interface["inet6link"].append(line[1])
|
||||
else:
|
||||
logging.warning("unknown scope: %s" % line[3])
|
||||
|
||||
if status:
|
||||
logging.warning("nonzero exist status (%s) for cmd: %s", status, args)
|
||||
self._addr[ifname] = interface
|
||||
return interface
|
||||
|
||||
|
||||
class DockerNode(CoreNode):
|
||||
apitype = NodeTypes.DOCKER.value
|
||||
|
||||
def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None):
|
||||
"""
|
||||
Create a DockerNode instance.
|
||||
|
||||
:param core.emulator.session.Session session: core session instance
|
||||
:param int _id: object id
|
||||
:param str name: object name
|
||||
:param str nodedir: node directory
|
||||
:param str bootsh: boot shell to use
|
||||
:param bool start: start flag
|
||||
:param str image: image to start container with
|
||||
"""
|
||||
if image is None:
|
||||
image = "ubuntu"
|
||||
self.image = image
|
||||
super(DockerNode, self).__init__(session, _id, name, nodedir, bootsh, start)
|
||||
|
||||
def alive(self):
|
||||
"""
|
||||
Check if the node is alive.
|
||||
|
||||
:return: True if node is alive, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.client.is_alive()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Start a new namespace node by invoking the vnoded process that
|
||||
allocates a new namespace. Bring up the loopback device and set
|
||||
the hostname.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
if self.up:
|
||||
raise ValueError("starting a node that is already up")
|
||||
self.makenodedir()
|
||||
self.client = DockerClient(self.name, self.image)
|
||||
self.pid = self.client.create_container()
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown logic.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# nothing to do if node is not up
|
||||
if not self.up:
|
||||
return
|
||||
|
||||
with self.lock:
|
||||
self._netif.clear()
|
||||
self.client.stop_container()
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
return self.client.cmd(args, wait)
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
return self.client.cmd_output(args)
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
status, output = self.client.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output
|
||||
|
||||
def network_cmd(self, args):
|
||||
if not self.up:
|
||||
logging.debug("node down, not running network command: %s", args)
|
||||
return 0
|
||||
|
||||
status, output = self.client.ns_cmd(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output
|
||||
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
||||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
return "docker exec -it {name} bash".format(name=self.name)
|
||||
|
||||
def privatedir(self, path):
|
||||
"""
|
||||
Create a private directory.
|
||||
|
||||
:param str path: path to create
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("creating node dir: %s", path)
|
||||
args = "mkdir -p {path}".format(path=path)
|
||||
self.check_cmd(args)
|
||||
|
||||
def mount(self, source, target):
|
||||
"""
|
||||
Create and mount a directory.
|
||||
|
||||
:param str source: source directory to mount
|
||||
:param str target: target directory to create
|
||||
:return: nothing
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
logging.info("mounting source(%s) target(%s)", source, target)
|
||||
raise Exception("not supported")
|
||||
|
||||
def nodefile(self, filename, contents, mode=0o644):
|
||||
"""
|
||||
Create a node file with a given mode.
|
||||
|
||||
:param str filename: name of file to create
|
||||
:param contents: contents of file
|
||||
:param int mode: mode for file
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("node dir(%s) ctrlchannel(%s)", self.nodedir, self.ctrlchnlname)
|
||||
logging.info("nodefile filename(%s) mode(%s)", filename, mode)
|
||||
file_path = os.path.join(self.nodedir, filename)
|
||||
with open(file_path, "w") as f:
|
||||
os.chmod(f.name, mode)
|
||||
f.write(contents)
|
||||
self.client.copy_file(file_path, filename)
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode=None):
|
||||
"""
|
||||
Copy a file to a node, following symlinks and preserving metadata.
|
||||
Change file mode if specified.
|
||||
|
||||
:param str filename: file name to copy file to
|
||||
:param str srcfilename: file to copy
|
||||
:param int mode: mode to copy to
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode)
|
||||
raise Exception("not supported")
|
|
@ -237,7 +237,7 @@ class Veth(CoreInterface):
|
|||
|
||||
if self.node:
|
||||
try:
|
||||
self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
self.node.network_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
except CoreCommandError:
|
||||
logging.exception("error shutting down interface")
|
||||
|
||||
|
@ -245,7 +245,7 @@ class Veth(CoreInterface):
|
|||
try:
|
||||
utils.check_cmd([constants.IP_BIN, "link", "delete", self.localname])
|
||||
except CoreCommandError:
|
||||
logging.exception("error deleting link")
|
||||
logging.info("link already removed: %s", self.localname)
|
||||
|
||||
self.up = False
|
||||
|
||||
|
@ -298,7 +298,7 @@ class TunTap(CoreInterface):
|
|||
return
|
||||
|
||||
try:
|
||||
self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
self.node.network_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
except CoreCommandError:
|
||||
logging.exception("error shutting down tunnel tap")
|
||||
|
||||
|
@ -361,7 +361,11 @@ class TunTap(CoreInterface):
|
|||
|
||||
def nodedevexists():
|
||||
args = [constants.IP_BIN, "link", "show", self.name]
|
||||
return self.node.cmd(args)
|
||||
try:
|
||||
self.node.network_cmd(args)
|
||||
return 0
|
||||
except CoreCommandError:
|
||||
return 1
|
||||
|
||||
count = 0
|
||||
while True:
|
||||
|
@ -393,8 +397,8 @@ class TunTap(CoreInterface):
|
|||
self.waitfordevicelocal()
|
||||
netns = str(self.node.pid)
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "netns", netns])
|
||||
self.node.check_cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name])
|
||||
self.node.check_cmd([constants.IP_BIN, "link", "set", self.name, "up"])
|
||||
self.node.network_cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name])
|
||||
self.node.network_cmd([constants.IP_BIN, "link", "set", self.name, "up"])
|
||||
|
||||
def setaddrs(self):
|
||||
"""
|
||||
|
@ -404,7 +408,7 @@ class TunTap(CoreInterface):
|
|||
"""
|
||||
self.waitfordevicenode()
|
||||
for addr in self.addrlist:
|
||||
self.node.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
self.node.network_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
|
||||
|
||||
class GreTap(CoreInterface):
|
||||
|
|
301
daemon/core/nodes/lxd.py
Normal file
301
daemon/core/nodes/lxd.py
Normal file
|
@ -0,0 +1,301 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
|
||||
from core import utils, CoreCommandError
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.nodes.base import CoreNode
|
||||
|
||||
|
||||
class LxdClient(object):
|
||||
def __init__(self, name, image):
|
||||
self.name = name
|
||||
self.image = image
|
||||
self.pid = None
|
||||
self._addr = {}
|
||||
|
||||
def create_container(self):
|
||||
utils.check_cmd("lxc launch {image} {name}".format(
|
||||
name=self.name,
|
||||
image=self.image
|
||||
))
|
||||
data = self.get_info()
|
||||
self.pid = data["state"]["pid"]
|
||||
return self.pid
|
||||
|
||||
def get_info(self):
|
||||
args = "lxc list {name} --format json".format(name=self.name)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
data = json.loads(output)
|
||||
if not data:
|
||||
raise CoreCommandError(status, args, "LXC({name}) not present".format(name=self.name))
|
||||
return data[0]
|
||||
|
||||
def is_alive(self):
|
||||
try:
|
||||
data = self.get_info()
|
||||
return data["state"]["status"] == "Running"
|
||||
except CoreCommandError:
|
||||
return False
|
||||
|
||||
def stop_container(self):
|
||||
utils.check_cmd("lxc delete --force {name}".format(
|
||||
name=self.name
|
||||
))
|
||||
|
||||
def _cmd_args(self, cmd):
|
||||
return "lxc exec -nT {name} -- {cmd}".format(
|
||||
name=self.name,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
def cmd_output(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._cmd_args(cmd)
|
||||
logging.info("lxc cmd output: %s", args)
|
||||
return utils.cmd_output(args)
|
||||
|
||||
def cmd(self, cmd, wait=True):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._cmd_args(cmd)
|
||||
logging.info("lxc cmd: %s", args)
|
||||
return utils.cmd(args, wait)
|
||||
|
||||
def _ns_args(self, cmd):
|
||||
return "nsenter -t {pid} -m -u -i -p -n {cmd}".format(
|
||||
pid=self.pid,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
def ns_cmd_output(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._ns_args(cmd)
|
||||
logging.info("ns cmd: %s", args)
|
||||
return utils.cmd_output(args)
|
||||
|
||||
def ns_cmd(self, cmd, wait=True):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._ns_args(cmd)
|
||||
logging.info("ns cmd: %s", args)
|
||||
return utils.cmd(args, wait)
|
||||
|
||||
def copy_file(self, source, destination):
|
||||
if destination[0] != "/":
|
||||
destination = os.path.join("/root/", destination)
|
||||
|
||||
args = "lxc file push {source} {name}/{destination}".format(
|
||||
source=source,
|
||||
name=self.name,
|
||||
destination=destination
|
||||
)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
"""
|
||||
Get address for interface on node.
|
||||
|
||||
:param str ifname: interface name to get address for
|
||||
:param bool rescan: rescan flag
|
||||
:return: interface information
|
||||
:rtype: dict
|
||||
"""
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
|
||||
interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
args = ["ip", "addr", "show", "dev", ifname]
|
||||
status, output = self.ns_cmd_output(args)
|
||||
for line in output:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
interface["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
interface["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
interface["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
interface["inet6link"].append(line[1])
|
||||
else:
|
||||
logging.warning("unknown scope: %s" % line[3])
|
||||
|
||||
if status:
|
||||
logging.warning("nonzero exist status (%s) for cmd: %s", status, args)
|
||||
self._addr[ifname] = interface
|
||||
return interface
|
||||
|
||||
|
||||
class LxcNode(CoreNode):
|
||||
apitype = NodeTypes.LXC.value
|
||||
|
||||
def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None):
|
||||
"""
|
||||
Create a LxcNode instance.
|
||||
|
||||
:param core.emulator.session.Session session: core session instance
|
||||
:param int _id: object id
|
||||
:param str name: object name
|
||||
:param str nodedir: node directory
|
||||
:param str bootsh: boot shell to use
|
||||
:param bool start: start flag
|
||||
:param str image: image to start container with
|
||||
"""
|
||||
if image is None:
|
||||
image = "ubuntu"
|
||||
self.image = image
|
||||
super(LxcNode, self).__init__(session, _id, name, nodedir, bootsh, start)
|
||||
|
||||
def alive(self):
|
||||
"""
|
||||
Check if the node is alive.
|
||||
|
||||
:return: True if node is alive, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.client.is_alive()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Startup logic.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
if self.up:
|
||||
raise ValueError("starting a node that is already up")
|
||||
self.makenodedir()
|
||||
self.client = LxdClient(self.name, self.image)
|
||||
self.pid = self.client.create_container()
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown logic.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# nothing to do if node is not up
|
||||
if not self.up:
|
||||
return
|
||||
|
||||
with self.lock:
|
||||
self._netif.clear()
|
||||
self.client.stop_container()
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
return self.client.cmd(args, wait)
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
return self.client.cmd_output(args)
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
status, output = self.client.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output
|
||||
|
||||
def network_cmd(self, args):
|
||||
if not self.up:
|
||||
logging.debug("node down, not running network command: %s", args)
|
||||
return 0
|
||||
return self.check_cmd(args)
|
||||
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
||||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
return "lxc exec {name} -- bash".format(name=self.name)
|
||||
|
||||
def privatedir(self, path):
|
||||
"""
|
||||
Create a private directory.
|
||||
|
||||
:param str path: path to create
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("creating node dir: %s", path)
|
||||
args = "mkdir -p {path}".format(path=path)
|
||||
self.check_cmd(args)
|
||||
|
||||
def mount(self, source, target):
|
||||
"""
|
||||
Create and mount a directory.
|
||||
|
||||
:param str source: source directory to mount
|
||||
:param str target: target directory to create
|
||||
:return: nothing
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
logging.info("mounting source(%s) target(%s)", source, target)
|
||||
raise Exception("not supported")
|
||||
|
||||
def nodefile(self, filename, contents, mode=0o644):
|
||||
"""
|
||||
Create a node file with a given mode.
|
||||
|
||||
:param str filename: name of file to create
|
||||
:param contents: contents of file
|
||||
:param int mode: mode for file
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("node dir(%s) ctrlchannel(%s)", self.nodedir, self.ctrlchnlname)
|
||||
logging.info("nodefile filename(%s) mode(%s)", filename, mode)
|
||||
file_path = os.path.join(self.nodedir, filename)
|
||||
with open(file_path, "w") as f:
|
||||
os.chmod(f.name, mode)
|
||||
f.write(contents)
|
||||
self.client.copy_file(file_path, filename)
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode=None):
|
||||
"""
|
||||
Copy a file to a node, following symlinks and preserving metadata.
|
||||
Change file mode if specified.
|
||||
|
||||
:param str filename: file name to copy file to
|
||||
:param str srcfilename: file to copy
|
||||
:param int mode: mode to copy to
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode)
|
||||
raise Exception("not supported")
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
super(LxcNode, self).addnetif(netif, ifindex)
|
||||
# adding small delay to allow time for adding addresses to work correctly
|
||||
time.sleep(0.5)
|
|
@ -459,7 +459,7 @@ class CoreNetwork(CoreNetworkBase):
|
|||
netem = ["netem"]
|
||||
changed = max(changed, netif.setparam("delay", delay))
|
||||
if loss is not None:
|
||||
loss = int(loss)
|
||||
loss = float(loss)
|
||||
changed = max(changed, netif.setparam("loss", loss))
|
||||
if duplicate is not None:
|
||||
duplicate = int(duplicate)
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
Provides default node maps that can be used to run core with.
|
||||
"""
|
||||
import core.nodes.base
|
||||
import core.nodes.docker
|
||||
import core.nodes.lxd
|
||||
import core.nodes.network
|
||||
import core.nodes.physical
|
||||
from core.emane.nodes import EmaneNet
|
||||
|
@ -25,5 +27,7 @@ NODES = {
|
|||
NodeTypes.EMANE_NET: EmaneNet,
|
||||
NodeTypes.TAP_BRIDGE: GreTapBridge,
|
||||
NodeTypes.PEER_TO_PEER: core.nodes.network.PtpNet,
|
||||
NodeTypes.CONTROL_NET: core.nodes.network.CtrlNet
|
||||
NodeTypes.CONTROL_NET: core.nodes.network.CtrlNet,
|
||||
NodeTypes.DOCKER: core.nodes.docker.DockerNode,
|
||||
NodeTypes.LXC: core.nodes.lxd.LxcNode
|
||||
}
|
||||
|
|
|
@ -225,7 +225,7 @@ class OvsNet(CoreNetworkBase):
|
|||
delay_changed = netif.setparam("delay", delay)
|
||||
|
||||
if loss is not None:
|
||||
loss = int(loss)
|
||||
loss = float(loss)
|
||||
loss_changed = netif.setparam("loss", loss)
|
||||
|
||||
if duplicate is not None:
|
||||
|
|
|
@ -3,6 +3,7 @@ Miscellaneous utility functions, wrappers around some subprocess procedures.
|
|||
"""
|
||||
|
||||
import fcntl
|
||||
import hashlib
|
||||
import importlib
|
||||
import inspect
|
||||
import logging
|
||||
|
@ -17,6 +18,43 @@ from core import CoreCommandError
|
|||
DEVNULL = open(os.devnull, "wb")
|
||||
|
||||
|
||||
def execute_file(path, exec_globals=None, exec_locals=None):
|
||||
"""
|
||||
Provides an alternative way to run execfile to be compatible for
|
||||
both python2/3.
|
||||
|
||||
:param str path: path of file to execute
|
||||
:param dict exec_globals: globals values to pass to execution
|
||||
:param dict exec_locals: local values to pass to execution
|
||||
:return: nothing
|
||||
"""
|
||||
if exec_globals is None:
|
||||
exec_globals = {}
|
||||
exec_globals.update({
|
||||
"__file__": path,
|
||||
"__name__": "__main__"
|
||||
})
|
||||
with open(path, "rb") as f:
|
||||
data = compile(f.read(), path, "exec")
|
||||
exec(data, exec_globals, exec_locals)
|
||||
|
||||
|
||||
def hashkey(value):
|
||||
"""
|
||||
Provide a consistent hash that can be used in place
|
||||
of the builtin hash, that no longer behaves consistently
|
||||
in python3.
|
||||
|
||||
:param str/int value: value to hash
|
||||
:return: hash value
|
||||
:rtype: int
|
||||
"""
|
||||
if isinstance(value, int):
|
||||
value = str(value)
|
||||
value = value.encode("utf-8")
|
||||
return int(hashlib.sha256(value).hexdigest(), 16)
|
||||
|
||||
|
||||
def _detach_init():
|
||||
"""
|
||||
Fork a child process and exit.
|
||||
|
|
|
@ -782,7 +782,7 @@ class CoreXmlReader(object):
|
|||
link_options.mburst = get_int(options_element, "mburst")
|
||||
link_options.jitter = get_int(options_element, "jitter")
|
||||
link_options.key = get_int(options_element, "key")
|
||||
link_options.per = get_int(options_element, "per")
|
||||
link_options.per = get_float(options_element, "per")
|
||||
link_options.unidirectional = get_int(options_element, "unidirectional")
|
||||
link_options.session = options_element.get("session")
|
||||
link_options.emulation_id = get_int(options_element, "emulation_id")
|
||||
|
|
|
@ -108,7 +108,7 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
|
|||
:return: the next nem id that can be used for creating platform xml files
|
||||
:rtype: int
|
||||
"""
|
||||
logging.debug("building emane platform xml for node(%s): %s", node, node.name)
|
||||
logging.debug("building emane platform xml for node(%s) nem_id(%s): %s", node, nem_id, node.name)
|
||||
nem_entries = {}
|
||||
|
||||
if node.model is None:
|
||||
|
@ -116,6 +116,7 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
|
|||
return nem_entries
|
||||
|
||||
for netif in node.netifs():
|
||||
logging.debug("building platform xml for interface(%s) nem_id(%s)", netif.name, nem_id)
|
||||
# build nem xml
|
||||
nem_definition = nem_file_name(node.model, netif)
|
||||
nem_element = etree.Element("nem", id=str(nem_id), name=netif.localname, definition=nem_definition)
|
||||
|
|
30
daemon/examples/docker/README.md
Normal file
30
daemon/examples/docker/README.md
Normal file
|
@ -0,0 +1,30 @@
|
|||
# Docker Support
|
||||
|
||||
Information on how Docker can be leveraged and included to create
|
||||
nodes based on Docker containers and images to interface with
|
||||
existing CORE nodes, when needed.
|
||||
|
||||
# Installation
|
||||
|
||||
```shell
|
||||
sudo apt install docker.io
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
Custom configuration required to avoid iptable rules being added and removing
|
||||
the need for the default docker network, since core will be orchestrating
|
||||
connections between nodes.
|
||||
|
||||
Place the file below in **/etc/docker/**
|
||||
* daemon.json
|
||||
|
||||
# Tools and Versions Tested With
|
||||
|
||||
* Docker version 18.09.5, build e8ff056
|
||||
* nsenter from util-linux 2.31.1
|
||||
|
||||
# Examples
|
||||
|
||||
This directory provides a few small examples creating Docker nodes
|
||||
and linking them to themselves or with standard CORE nodes.
|
5
daemon/examples/docker/daemon.json
Normal file
5
daemon/examples/docker/daemon.json
Normal file
|
@ -0,0 +1,5 @@
|
|||
{
|
||||
"bridge": "none",
|
||||
"iptables": false
|
||||
|
||||
}
|
33
daemon/examples/docker/docker2core.py
Normal file
33
daemon/examples/docker/docker2core.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
import logging
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes, EventTypes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(model=None, image="ubuntu")
|
||||
|
||||
# create node one
|
||||
node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options)
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
|
||||
# create node two
|
||||
node_two = session.add_node()
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
|
||||
# add link
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two)
|
||||
|
||||
# instantiate
|
||||
session.instantiate()
|
||||
finally:
|
||||
input("continue to shutdown")
|
||||
coreemu.shutdown()
|
35
daemon/examples/docker/docker2docker.py
Normal file
35
daemon/examples/docker/docker2docker.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
import logging
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes, EventTypes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# create nodes and interfaces
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(model=None, image="ubuntu")
|
||||
|
||||
# create node one
|
||||
node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options)
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
|
||||
# create node two
|
||||
node_two = session.add_node(_type=NodeTypes.DOCKER, node_options=options)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
|
||||
# add link
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two)
|
||||
|
||||
# instantiate
|
||||
session.instantiate()
|
||||
finally:
|
||||
input("continue to shutdown")
|
||||
coreemu.shutdown()
|
43
daemon/examples/docker/switch.py
Normal file
43
daemon/examples/docker/switch.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
import logging
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes, EventTypes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(model=None, image="ubuntu")
|
||||
|
||||
# create switch
|
||||
switch = session.add_node(_type=NodeTypes.SWITCH)
|
||||
|
||||
# node one
|
||||
node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options)
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
|
||||
# node two
|
||||
node_two = session.add_node(_type=NodeTypes.DOCKER, node_options=options)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
|
||||
# node three
|
||||
node_three = session.add_node()
|
||||
interface_three = prefixes.create_interface(node_three)
|
||||
|
||||
# add links
|
||||
session.add_link(node_one.id, switch.id, interface_one)
|
||||
session.add_link(node_two.id, switch.id, interface_two)
|
||||
session.add_link(node_three.id, switch.id, interface_three)
|
||||
|
||||
# instantiate
|
||||
session.instantiate()
|
||||
finally:
|
||||
input("continue to shutdown")
|
||||
coreemu.shutdown()
|
29
daemon/examples/lxd/README.md
Normal file
29
daemon/examples/lxd/README.md
Normal file
|
@ -0,0 +1,29 @@
|
|||
# LXD Support
|
||||
|
||||
Information on how LXD can be leveraged and included to create
|
||||
nodes based on LXC containers and images to interface with
|
||||
existing CORE nodes, when needed.
|
||||
|
||||
# Installation
|
||||
|
||||
```shell
|
||||
sudo snap install lxd
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
Initialize LXD and say no to adding a default bridge.
|
||||
|
||||
```shell
|
||||
sudo lxd init
|
||||
```
|
||||
|
||||
# Tools and Versions Tested With
|
||||
|
||||
* LXD 3.14
|
||||
* nsenter from util-linux 2.31.1
|
||||
|
||||
# Examples
|
||||
|
||||
This directory provides a few small examples creating LXC nodes
|
||||
using LXD and linking them to themselves or with standard CORE nodes.
|
33
daemon/examples/lxd/lxd2core.py
Normal file
33
daemon/examples/lxd/lxd2core.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
import logging
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes, EventTypes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(image="ubuntu")
|
||||
|
||||
# create node one
|
||||
node_one = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
|
||||
# create node two
|
||||
node_two = session.add_node()
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
|
||||
# add link
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two)
|
||||
|
||||
# instantiate
|
||||
session.instantiate()
|
||||
finally:
|
||||
input("continue to shutdown")
|
||||
coreemu.shutdown()
|
35
daemon/examples/lxd/lxd2lxd.py
Normal file
35
daemon/examples/lxd/lxd2lxd.py
Normal file
|
@ -0,0 +1,35 @@
|
|||
import logging
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes, EventTypes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# create nodes and interfaces
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(image="ubuntu")
|
||||
|
||||
# create node one
|
||||
node_one = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
|
||||
# create node two
|
||||
node_two = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
|
||||
# add link
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two)
|
||||
|
||||
# instantiate
|
||||
session.instantiate()
|
||||
finally:
|
||||
input("continue to shutdown")
|
||||
coreemu.shutdown()
|
43
daemon/examples/lxd/switch.py
Normal file
43
daemon/examples/lxd/switch.py
Normal file
|
@ -0,0 +1,43 @@
|
|||
import logging
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes, EventTypes
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(image="ubuntu")
|
||||
|
||||
# create switch
|
||||
switch = session.add_node(_type=NodeTypes.SWITCH)
|
||||
|
||||
# node one
|
||||
node_one = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
|
||||
# node two
|
||||
node_two = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
|
||||
# node three
|
||||
node_three = session.add_node()
|
||||
interface_three = prefixes.create_interface(node_three)
|
||||
|
||||
# add links
|
||||
session.add_link(node_one.id, switch.id, interface_one)
|
||||
session.add_link(node_two.id, switch.id, interface_two)
|
||||
session.add_link(node_three.id, switch.id, interface_three)
|
||||
|
||||
# instantiate
|
||||
session.instantiate()
|
||||
finally:
|
||||
input("continue to shutdown")
|
||||
coreemu.shutdown()
|
|
@ -1,48 +1,54 @@
|
|||
"""
|
||||
Example custom emane model.
|
||||
"""
|
||||
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
## Custom EMANE Model
|
||||
class ExampleModel(emanemodel.EmaneModel):
|
||||
### MAC Definition
|
||||
"""
|
||||
Custom emane model.
|
||||
|
||||
:var str name: defines the emane model name that will show up in the GUI
|
||||
|
||||
Mac Definition:
|
||||
:var str mac_library: defines that mac library that the model will reference
|
||||
:var str mac_xml: defines the mac manifest file that will be parsed to obtain configuration options,
|
||||
that will be displayed within the GUI
|
||||
:var dict mac_mac_defaults: allows you to override options that are maintained within the manifest file above
|
||||
:var list mac_mac_config: parses the manifest file and converts configurations into core supported formats
|
||||
|
||||
Phy Definition:
|
||||
NOTE: phy configuration will default to the universal model as seen below and the below section does not
|
||||
have to be included
|
||||
:var str phy_library: defines that phy library that the model will reference, used if you need to
|
||||
provide a custom phy
|
||||
:var str phy_xml: defines the phy manifest file that will be parsed to obtain configuration options,
|
||||
that will be displayed within the GUI
|
||||
:var dict phy_defaults: allows you to override options that are maintained within the manifest file above
|
||||
or for the default universal model
|
||||
:var list phy_config: parses the manifest file and converts configurations into core supported formats
|
||||
|
||||
Custom Override Options:
|
||||
NOTE: these options default to what's seen below and do not have to be included
|
||||
:var set config_ignore: allows you to ignore options within phy/mac, used typically if you needed to add
|
||||
a custom option for display within the gui
|
||||
"""
|
||||
|
||||
# Defines the emane model name that will show up in the GUI.
|
||||
name = "emane_example"
|
||||
|
||||
# Defines that mac library that the model will reference.
|
||||
mac_library = "rfpipemaclayer"
|
||||
# Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed
|
||||
# within the GUI.
|
||||
mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml"
|
||||
# Allows you to override options that are maintained within the manifest file above.
|
||||
mac_defaults = {
|
||||
"pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml",
|
||||
}
|
||||
# Parses the manifest file and converts configurations into core supported formats.
|
||||
mac_config = emanemanifest.parse(mac_xml, mac_defaults)
|
||||
|
||||
### PHY Definition
|
||||
# **NOTE: phy configuration will default to the universal model as seen below and the below section does not
|
||||
# have to be included.**
|
||||
|
||||
# Defines that phy library that the model will reference, used if you need to provide a custom phy.
|
||||
phy_library = None
|
||||
# Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed
|
||||
# within the GUI.
|
||||
phy_xml = "/usr/share/emane/manifest/emanephy.xml"
|
||||
# Allows you to override options that are maintained within the manifest file above or for the default universal
|
||||
# model.
|
||||
phy_defaults = {
|
||||
"subid": "1",
|
||||
"propagationmodel": "2ray",
|
||||
"noisemode": "none"
|
||||
}
|
||||
# Parses the manifest file and converts configurations into core supported formats.
|
||||
phy_config = emanemanifest.parse(phy_xml, phy_defaults)
|
||||
|
||||
### Custom override options
|
||||
# **NOTE: these options default to what's seen below and do not have to be included.**
|
||||
|
||||
# Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display
|
||||
# within the gui.
|
||||
config_ignore = set()
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
This directory contains a sample custom service that you can use as a template
|
||||
for creating your own services.
|
||||
|
||||
Follow these steps to add your own services:
|
||||
|
||||
1. Modify the sample service MyService to do what you want. It could generate
|
||||
config/script files, mount per-node directories, start processes/scripts,
|
||||
etc. sample.py is a Python file that defines one or more classes to be
|
||||
imported. You can create multiple Python files that will be imported.
|
||||
Add any new filenames to the __init__.py file.
|
||||
|
||||
2. Put these files in a directory such as /home/username/.core/myservices
|
||||
Note that the last component of this directory name 'myservices' should not
|
||||
be named something like 'services' which conflicts with an existing Python
|
||||
name (the syntax 'from myservices import *' is used).
|
||||
|
||||
3. Add a 'custom_services_dir = /home/username/.core/myservices' entry to the
|
||||
/etc/core/core.conf file.
|
||||
|
||||
4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax)
|
||||
should be displayed in the /var/log/core-daemon.log log file (or on screen).
|
||||
|
||||
5. Start using your custom service on your nodes. You can create a new node
|
||||
type that uses your service, or change the default services for an existing
|
||||
node type, or change individual nodes.
|
||||
|
|
@ -1,64 +1,81 @@
|
|||
"""
|
||||
Sample user-defined service.
|
||||
Simple example for a user-defined service.
|
||||
"""
|
||||
|
||||
from core.services.coreservices import CoreService
|
||||
from core.services.coreservices import ServiceMode
|
||||
|
||||
|
||||
## Custom CORE Service
|
||||
class MyService(CoreService):
|
||||
### Service Attributes
|
||||
"""
|
||||
Custom CORE Service
|
||||
|
||||
# Name used as a unique ID for this service and is required, no spaces.
|
||||
:var str name: name used as a unique ID for this service and is required, no spaces
|
||||
:var str group: allows you to group services within the GUI under a common name
|
||||
:var tuple executables: executables this service depends on to function, if executable is
|
||||
not on the path, service will not be loaded
|
||||
:var tuple dependencies: services that this service depends on for startup, tuple of service names
|
||||
:var tuple dirs: directories that this service will create within a node
|
||||
:var tuple configs: files that this service will generate, without a full path this file goes in
|
||||
the node's directory e.g. /tmp/pycore.12345/n1.conf/myfile
|
||||
:var tuple startup: commands used to start this service, any non-zero exit code will cause a failure
|
||||
:var tuple validate: commands used to validate that a service was started, any non-zero exit code
|
||||
will cause a failure
|
||||
:var ServiceMode validation_mode: validation mode, used to determine startup success.
|
||||
NON_BLOCKING - runs startup commands, and validates success with validation commands
|
||||
BLOCKING - runs startup commands, and validates success with the startup commands themselves
|
||||
TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone
|
||||
:var int validation_timer: time in seconds for a service to wait for validation, before determining
|
||||
success in TIMER/NON_BLOCKING modes.
|
||||
:var float validation_validation_period: period in seconds to wait before retrying validation,
|
||||
only used in NON_BLOCKING mode
|
||||
:var tuple shutdown: shutdown commands to stop this service
|
||||
"""
|
||||
name = "MyService"
|
||||
# Allows you to group services within the GUI under a common name.
|
||||
group = "Utility"
|
||||
# Executables this service depends on to function, if executable is not on the path, service will not be loaded.
|
||||
executables = ()
|
||||
# Services that this service depends on for startup, tuple of service names.
|
||||
dependencies = ()
|
||||
# Directories that this service will create within a node.
|
||||
dirs = ()
|
||||
# Files that this service will generate, without a full path this file goes in the node's directory.
|
||||
# e.g. /tmp/pycore.12345/n1.conf/myfile
|
||||
configs = ("myservice1.sh", "myservice2.sh")
|
||||
# Commands used to start this service, any non-zero exit code will cause a failure.
|
||||
startup = ("sh %s" % configs[0], "sh %s" % configs[1])
|
||||
# Commands used to validate that a service was started, any non-zero exit code will cause a failure.
|
||||
validate = ()
|
||||
# Validation mode, used to determine startup success.
|
||||
#
|
||||
# * NON_BLOCKING - runs startup commands, and validates success with validation commands
|
||||
# * BLOCKING - runs startup commands, and validates success with the startup commands themselves
|
||||
# * TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone
|
||||
validation_mode = ServiceMode.NON_BLOCKING
|
||||
# Time in seconds for a service to wait for validation, before determining success in TIMER/NON_BLOCKING modes.
|
||||
validation_timer = 5
|
||||
# Period in seconds to wait before retrying validation, only used in NON_BLOCKING mode.
|
||||
validation_period = 0.5
|
||||
# Shutdown commands to stop this service.
|
||||
shutdown = ()
|
||||
|
||||
### On Load
|
||||
@classmethod
|
||||
def on_load(cls):
|
||||
# Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
|
||||
# dynamic settings for the environment.
|
||||
"""
|
||||
Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
|
||||
dynamic settings for the environment.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
### Get Configs
|
||||
@classmethod
|
||||
def get_configs(cls, node):
|
||||
# Provides a way to dynamically generate the config files from the node a service will run.
|
||||
# Defaults to the class definition and can be left out entirely if not needed.
|
||||
"""
|
||||
Provides a way to dynamically generate the config files from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.
|
||||
|
||||
:param node: core node that the service is being ran on
|
||||
:return: tuple of config files to create
|
||||
"""
|
||||
return cls.configs
|
||||
|
||||
### Generate Config
|
||||
@classmethod
|
||||
def generate_config(cls, node, filename):
|
||||
# Returns a string representation for a file, given the node the service is starting on the config filename
|
||||
# that this information will be used for. This must be defined, if "configs" are defined.
|
||||
"""
|
||||
Returns a string representation for a file, given the node the service is starting on the config filename
|
||||
that this information will be used for. This must be defined, if "configs" are defined.
|
||||
|
||||
:param node: core node that the service is being ran on
|
||||
:param str filename: configuration file to generate
|
||||
:return: configuration file content
|
||||
:rtype: str
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
|
||||
if filename == cls.configs[0]:
|
||||
|
@ -70,16 +87,24 @@ class MyService(CoreService):
|
|||
|
||||
return cfg
|
||||
|
||||
### Get Startup
|
||||
@classmethod
|
||||
def get_startup(cls, node):
|
||||
# Provides a way to dynamically generate the startup commands from the node a service will run.
|
||||
# Defaults to the class definition and can be left out entirely if not needed.
|
||||
"""
|
||||
Provides a way to dynamically generate the startup commands from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.
|
||||
|
||||
:param node: core node that the service is being ran on
|
||||
:return: tuple of startup commands to run
|
||||
"""
|
||||
return cls.startup
|
||||
|
||||
### Get Validate
|
||||
@classmethod
|
||||
def get_validate(cls, node):
|
||||
# Provides a way to dynamically generate the validate commands from the node a service will run.
|
||||
# Defaults to the class definition and can be left out entirely if not needed.
|
||||
"""
|
||||
Provides a way to dynamically generate the validate commands from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.
|
||||
|
||||
:param node: core node that the service is being ran on
|
||||
:return: tuple of commands to validate service startup with
|
||||
"""
|
||||
return cls.validate
|
||||
|
|
|
@ -791,7 +791,7 @@ message LinkOptions {
|
|||
int32 key = 3;
|
||||
int32 mburst = 4;
|
||||
int32 mer = 5;
|
||||
int32 per = 6;
|
||||
float per = 6;
|
||||
int64 bandwidth = 7;
|
||||
int32 burst = 8;
|
||||
int64 delay = 9;
|
||||
|
|
|
@ -9,134 +9,22 @@ import time
|
|||
import pytest
|
||||
from mock.mock import MagicMock
|
||||
|
||||
from core.api.tlv.coreapi import CoreConfMessage
|
||||
from core.api.tlv.coreapi import CoreEventMessage
|
||||
from core.api.tlv.coreapi import CoreExecMessage
|
||||
from core.api.tlv.coreapi import CoreLinkMessage
|
||||
from core.api.tlv.coreapi import CoreNodeMessage
|
||||
from core.api.grpc.client import InterfaceHelper
|
||||
from core.api.grpc.server import CoreGrpcServer
|
||||
from core.api.tlv.coreapi import CoreConfMessage, CoreEventMessage
|
||||
from core.api.tlv.corehandlers import CoreHandler
|
||||
from core.api.tlv.coreserver import CoreServer
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes
|
||||
from core.emulator.enumerations import CORE_API_PORT
|
||||
from core.emulator.enumerations import CORE_API_PORT, EventTlvs
|
||||
from core.emulator.enumerations import ConfigTlvs
|
||||
from core.emulator.enumerations import EventTlvs
|
||||
from core.emulator.enumerations import EventTypes
|
||||
from core.emulator.enumerations import ExecuteTlvs
|
||||
from core.emulator.enumerations import LinkTlvs
|
||||
from core.emulator.enumerations import LinkTypes
|
||||
from core.emulator.enumerations import MessageFlags
|
||||
from core.emulator.enumerations import NodeTlvs
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.api.grpc.client import InterfaceHelper
|
||||
from core.api.grpc.server import CoreGrpcServer
|
||||
from core.nodes import ipaddress
|
||||
from core.nodes.ipaddress import MacAddress
|
||||
from core.services.coreservices import ServiceManager
|
||||
|
||||
EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward"
|
||||
|
||||
|
||||
def node_message(_id, name, emulation_server=None, node_type=NodeTypes.DEFAULT, model=None):
|
||||
"""
|
||||
Convenience method for creating a node TLV messages.
|
||||
|
||||
:param int _id: node id
|
||||
:param str name: node name
|
||||
:param str emulation_server: distributed server name, if desired
|
||||
:param core.enumerations.NodeTypes node_type: node type
|
||||
:param str model: model for node
|
||||
:return: tlv message
|
||||
:rtype: core.api.coreapi.CoreNodeMessage
|
||||
"""
|
||||
values = [
|
||||
(NodeTlvs.NUMBER, _id),
|
||||
(NodeTlvs.TYPE, node_type.value),
|
||||
(NodeTlvs.NAME, name),
|
||||
(NodeTlvs.EMULATION_SERVER, emulation_server),
|
||||
]
|
||||
|
||||
if model:
|
||||
values.append((NodeTlvs.MODEL, model))
|
||||
|
||||
return CoreNodeMessage.create(MessageFlags.ADD.value, values)
|
||||
|
||||
|
||||
def link_message(n1, n2, intf_one=None, address_one=None, intf_two=None, address_two=None, key=None):
|
||||
"""
|
||||
Convenience method for creating link TLV messages.
|
||||
|
||||
:param int n1: node one id
|
||||
:param int n2: node two id
|
||||
:param int intf_one: node one interface id
|
||||
:param core.misc.ipaddress.IpAddress address_one: node one ip4 address
|
||||
:param int intf_two: node two interface id
|
||||
:param core.misc.ipaddress.IpAddress address_two: node two ip4 address
|
||||
:param int key: tunnel key for link if needed
|
||||
:return: tlv mesage
|
||||
:rtype: core.api.coreapi.CoreLinkMessage
|
||||
"""
|
||||
mac_one, mac_two = None, None
|
||||
if address_one:
|
||||
mac_one = MacAddress.random()
|
||||
if address_two:
|
||||
mac_two = MacAddress.random()
|
||||
|
||||
values = [
|
||||
(LinkTlvs.N1_NUMBER, n1),
|
||||
(LinkTlvs.N2_NUMBER, n2),
|
||||
(LinkTlvs.DELAY, 0),
|
||||
(LinkTlvs.BANDWIDTH, 0),
|
||||
(LinkTlvs.PER, "0"),
|
||||
(LinkTlvs.DUP, "0"),
|
||||
(LinkTlvs.JITTER, 0),
|
||||
(LinkTlvs.TYPE, LinkTypes.WIRED.value),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, intf_one),
|
||||
(LinkTlvs.INTERFACE1_IP4, address_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
(LinkTlvs.INTERFACE1_MAC, mac_one),
|
||||
(LinkTlvs.INTERFACE2_NUMBER, intf_two),
|
||||
(LinkTlvs.INTERFACE2_IP4, address_two),
|
||||
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
|
||||
(LinkTlvs.INTERFACE2_MAC, mac_two),
|
||||
]
|
||||
|
||||
if key:
|
||||
values.append((LinkTlvs.KEY, key))
|
||||
|
||||
return CoreLinkMessage.create(MessageFlags.ADD.value, values)
|
||||
|
||||
|
||||
def command_message(node, command):
|
||||
"""
|
||||
Create an execute command TLV message.
|
||||
|
||||
:param node: node to execute command for
|
||||
:param command: command to execute
|
||||
:return: tlv message
|
||||
:rtype: core.api.coreapi.CoreExecMessage
|
||||
"""
|
||||
flags = MessageFlags.STRING.value | MessageFlags.TEXT.value
|
||||
return CoreExecMessage.create(flags, [
|
||||
(ExecuteTlvs.NODE, node.id),
|
||||
(ExecuteTlvs.NUMBER, 1),
|
||||
(ExecuteTlvs.COMMAND, command)
|
||||
])
|
||||
|
||||
|
||||
def state_message(state):
|
||||
"""
|
||||
Create a event TLV message for a new state.
|
||||
|
||||
:param core.enumerations.EventTypes state: state to create message for
|
||||
:return: tlv message
|
||||
:rtype: core.api.coreapi.CoreEventMessage
|
||||
"""
|
||||
return CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, state.value)
|
||||
])
|
||||
|
||||
|
||||
class CoreServerTest(object):
|
||||
def __init__(self, port=CORE_API_PORT):
|
||||
self.host = "localhost"
|
||||
|
@ -152,13 +40,20 @@ class CoreServerTest(object):
|
|||
self.session = None
|
||||
self.request_handler = None
|
||||
|
||||
def setup(self, distributed_address, port):
|
||||
def setup_handler(self):
|
||||
self.session = self.server.coreemu.create_session(1)
|
||||
request_mock = MagicMock()
|
||||
request_mock.fileno = MagicMock(return_value=1)
|
||||
self.request_handler = CoreHandler(request_mock, "", self.server)
|
||||
self.request_handler.session = self.session
|
||||
self.request_handler.add_session_handlers()
|
||||
|
||||
def setup(self, distributed_address):
|
||||
# validate address
|
||||
assert distributed_address, "distributed server address was not provided"
|
||||
|
||||
# create session
|
||||
self.session = self.server.coreemu.create_session(1)
|
||||
self.session.master = True
|
||||
|
||||
# create request handler
|
||||
request_mock = MagicMock()
|
||||
|
@ -170,11 +65,11 @@ class CoreServerTest(object):
|
|||
|
||||
# have broker handle a configuration state change
|
||||
self.session.set_state(EventTypes.DEFINITION_STATE)
|
||||
message = state_message(EventTypes.CONFIGURATION_STATE)
|
||||
message = CoreEventMessage.create(0, [(EventTlvs.TYPE, EventTypes.CONFIGURATION_STATE.value)])
|
||||
self.request_handler.handle_message(message)
|
||||
|
||||
# add broker server for distributed core
|
||||
distributed = "%s:%s:%s" % (self.distributed_server, distributed_address, port)
|
||||
distributed = "%s:%s:%s" % (self.distributed_server, distributed_address, self.port)
|
||||
message = CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "broker"),
|
||||
(ConfigTlvs.TYPE, 0),
|
||||
|
@ -204,7 +99,6 @@ class CoreServerTest(object):
|
|||
|
||||
def shutdown(self):
|
||||
self.server.coreemu.shutdown()
|
||||
self.server.shutdown()
|
||||
self.server.server_close()
|
||||
|
||||
|
||||
|
@ -268,6 +162,20 @@ def cored():
|
|||
ServiceManager.services.clear()
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def coreserver():
|
||||
# create and return server
|
||||
server = CoreServerTest()
|
||||
server.setup_handler()
|
||||
yield server
|
||||
|
||||
# cleanup
|
||||
server.shutdown()
|
||||
|
||||
# cleanup services
|
||||
ServiceManager.services.clear()
|
||||
|
||||
|
||||
def ping(from_node, to_node, ip_prefixes, count=3):
|
||||
address = ip_prefixes.ip4_address(to_node)
|
||||
return from_node.cmd(["ping", "-c", str(count), address])
|
||||
|
|
|
@ -1,15 +1,125 @@
|
|||
"""
|
||||
Unit tests for testing CORE with distributed networks.
|
||||
"""
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
|
||||
import conftest
|
||||
|
||||
from core.api.tlv.coreapi import CoreExecMessage
|
||||
from core.emulator.enumerations import EventTypes
|
||||
from core.api.tlv.coreapi import CoreExecMessage, CoreNodeMessage, CoreLinkMessage, CoreEventMessage, CoreConfMessage
|
||||
from core.emulator.enumerations import EventTypes, NodeTlvs, LinkTlvs, LinkTypes, EventTlvs, ConfigTlvs, ConfigFlags
|
||||
from core.emulator.enumerations import ExecuteTlvs
|
||||
from core.emulator.enumerations import MessageFlags
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.nodes.ipaddress import IpAddress
|
||||
from core.nodes.ipaddress import IpAddress, MacAddress, Ipv4Prefix
|
||||
|
||||
|
||||
def set_emane_model(node_id, model):
|
||||
return CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, node_id),
|
||||
(ConfigTlvs.OBJECT, model),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
])
|
||||
|
||||
|
||||
def node_message(_id, name, emulation_server=None, node_type=NodeTypes.DEFAULT, model=None):
|
||||
"""
|
||||
Convenience method for creating a node TLV messages.
|
||||
|
||||
:param int _id: node id
|
||||
:param str name: node name
|
||||
:param str emulation_server: distributed server name, if desired
|
||||
:param core.emulator.enumerations.NodeTypes node_type: node type
|
||||
:param str model: model for node
|
||||
:return: tlv message
|
||||
:rtype: core.api.tlv.coreapi.CoreNodeMessage
|
||||
"""
|
||||
values = [
|
||||
(NodeTlvs.NUMBER, _id),
|
||||
(NodeTlvs.TYPE, node_type.value),
|
||||
(NodeTlvs.NAME, name),
|
||||
(NodeTlvs.EMULATION_SERVER, emulation_server),
|
||||
(NodeTlvs.X_POSITION, 0),
|
||||
(NodeTlvs.Y_POSITION, 0),
|
||||
]
|
||||
|
||||
if model:
|
||||
values.append((NodeTlvs.MODEL, model))
|
||||
|
||||
return CoreNodeMessage.create(MessageFlags.ADD.value, values)
|
||||
|
||||
|
||||
def link_message(n1, n2, intf_one=None, address_one=None, intf_two=None, address_two=None, key=None, mask=24):
|
||||
"""
|
||||
Convenience method for creating link TLV messages.
|
||||
|
||||
:param int n1: node one id
|
||||
:param int n2: node two id
|
||||
:param int intf_one: node one interface id
|
||||
:param core.nodes.ipaddress.IpAddress address_one: node one ip4 address
|
||||
:param int intf_two: node two interface id
|
||||
:param core.nodes.ipaddress.IpAddress address_two: node two ip4 address
|
||||
:param int key: tunnel key for link if needed
|
||||
:param int mask: ip4 mask to use for link
|
||||
:return: tlv mesage
|
||||
:rtype: core.api.tlv.coreapi.CoreLinkMessage
|
||||
"""
|
||||
mac_one, mac_two = None, None
|
||||
if address_one:
|
||||
mac_one = MacAddress.random()
|
||||
if address_two:
|
||||
mac_two = MacAddress.random()
|
||||
|
||||
values = [
|
||||
(LinkTlvs.N1_NUMBER, n1),
|
||||
(LinkTlvs.N2_NUMBER, n2),
|
||||
(LinkTlvs.DELAY, 0),
|
||||
(LinkTlvs.BANDWIDTH, 0),
|
||||
(LinkTlvs.PER, "0"),
|
||||
(LinkTlvs.DUP, "0"),
|
||||
(LinkTlvs.JITTER, 0),
|
||||
(LinkTlvs.TYPE, LinkTypes.WIRED.value),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, intf_one),
|
||||
(LinkTlvs.INTERFACE1_IP4, address_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, mask),
|
||||
(LinkTlvs.INTERFACE1_MAC, mac_one),
|
||||
(LinkTlvs.INTERFACE2_NUMBER, intf_two),
|
||||
(LinkTlvs.INTERFACE2_IP4, address_two),
|
||||
(LinkTlvs.INTERFACE2_IP4_MASK, mask),
|
||||
(LinkTlvs.INTERFACE2_MAC, mac_two),
|
||||
]
|
||||
|
||||
if key:
|
||||
values.append((LinkTlvs.KEY, key))
|
||||
|
||||
return CoreLinkMessage.create(MessageFlags.ADD.value, values)
|
||||
|
||||
|
||||
def command_message(node, command):
|
||||
"""
|
||||
Create an execute command TLV message.
|
||||
|
||||
:param node: node to execute command for
|
||||
:param command: command to execute
|
||||
:return: tlv message
|
||||
:rtype: core.api.tlv.coreapi.CoreExecMessage
|
||||
"""
|
||||
flags = MessageFlags.STRING.value | MessageFlags.TEXT.value
|
||||
return CoreExecMessage.create(flags, [
|
||||
(ExecuteTlvs.NODE, node.id),
|
||||
(ExecuteTlvs.NUMBER, 1),
|
||||
(ExecuteTlvs.COMMAND, command)
|
||||
])
|
||||
|
||||
|
||||
def state_message(state):
|
||||
"""
|
||||
Create a event TLV message for a new state.
|
||||
|
||||
:param core.enumerations.EventTypes state: state to create message for
|
||||
:return: tlv message
|
||||
:rtype: core.api.tlv.coreapi.CoreEventMessage
|
||||
"""
|
||||
return CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, state.value)
|
||||
])
|
||||
|
||||
|
||||
def validate_response(replies, _):
|
||||
|
@ -28,18 +138,18 @@ def validate_response(replies, _):
|
|||
|
||||
|
||||
class TestDistributed:
|
||||
def test_distributed(self, cored, distributed_address):
|
||||
def test_switch(self, cored, distributed_address):
|
||||
"""
|
||||
Test creating a distributed network.
|
||||
Test creating a distributed switch network.
|
||||
|
||||
:param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with
|
||||
:param core.api.tlv.coreserver.CoreServer conftest.Core cored: core daemon server to test with
|
||||
:param str distributed_address: distributed server to test against
|
||||
"""
|
||||
# initialize server for testing
|
||||
cored.setup(distributed_address)
|
||||
|
||||
# create local node
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=1,
|
||||
name="n1",
|
||||
model="host"
|
||||
|
@ -47,7 +157,7 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed node and assign to distributed server
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=2,
|
||||
name="n2",
|
||||
emulation_server=cored.distributed_server,
|
||||
|
@ -56,17 +166,16 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed switch and assign to distributed server
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=3,
|
||||
name="n3",
|
||||
emulation_server=cored.distributed_server,
|
||||
node_type=NodeTypes.SWITCH
|
||||
)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# link message one
|
||||
ip4_address = cored.prefix.addr(1)
|
||||
message = conftest.link_message(
|
||||
message = link_message(
|
||||
n1=1,
|
||||
n2=3,
|
||||
intf_one=0,
|
||||
|
@ -76,7 +185,7 @@ class TestDistributed:
|
|||
|
||||
# link message two
|
||||
ip4_address = cored.prefix.addr(2)
|
||||
message = conftest.link_message(
|
||||
message = link_message(
|
||||
n1=3,
|
||||
n2=2,
|
||||
intf_two=0,
|
||||
|
@ -85,12 +194,86 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# change session to instantiation state
|
||||
message = conftest.state_message(EventTypes.INSTANTIATION_STATE)
|
||||
message = state_message(EventTypes.INSTANTIATION_STATE)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# test a ping command
|
||||
node_one = cored.session.get_node(1)
|
||||
message = conftest.command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
message = command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
cored.request_handler.dispatch_replies = validate_response
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
def test_emane(self, cored, distributed_address):
|
||||
"""
|
||||
Test creating a distributed emane network.
|
||||
|
||||
:param core.api.tlv.coreserver.CoreServer conftest.Core cored: core daemon server to test with
|
||||
:param str distributed_address: distributed server to test against
|
||||
"""
|
||||
# initialize server for testing
|
||||
cored.setup(distributed_address)
|
||||
|
||||
# configure required controlnet
|
||||
cored.session.options.set_config("controlnet", "core1:172.16.1.0/24 core2:172.16.2.0/24")
|
||||
|
||||
# create local node
|
||||
message = node_message(
|
||||
_id=1,
|
||||
name="n1",
|
||||
model="mdr"
|
||||
)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed node and assign to distributed server
|
||||
message = node_message(
|
||||
_id=2,
|
||||
name="n2",
|
||||
emulation_server=cored.distributed_server,
|
||||
model="mdr"
|
||||
)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed switch and assign to distributed server
|
||||
message = node_message(
|
||||
_id=3,
|
||||
name="n3",
|
||||
node_type=NodeTypes.EMANE
|
||||
)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# set emane model
|
||||
message = set_emane_model(3, EmaneIeee80211abgModel.name)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# link message one
|
||||
ip4_address = cored.prefix.addr(1)
|
||||
message = link_message(
|
||||
n1=1,
|
||||
n2=3,
|
||||
intf_one=0,
|
||||
address_one=ip4_address,
|
||||
mask=32
|
||||
)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# link message two
|
||||
ip4_address = cored.prefix.addr(2)
|
||||
message = link_message(
|
||||
n1=2,
|
||||
n2=3,
|
||||
intf_one=0,
|
||||
address_one=ip4_address,
|
||||
mask=32
|
||||
)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# change session to instantiation state
|
||||
message = state_message(EventTypes.INSTANTIATION_STATE)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# test a ping command
|
||||
node_one = cored.session.get_node(1)
|
||||
message = command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
cored.request_handler.dispatch_replies = validate_response
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
|
@ -98,14 +281,14 @@ class TestDistributed:
|
|||
"""
|
||||
Test creating a distributed prouter node.
|
||||
|
||||
:param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with
|
||||
:param core.coreserver.CoreServer Core cored: core daemon server to test with
|
||||
:param str distributed_address: distributed server to test against
|
||||
"""
|
||||
# initialize server for testing
|
||||
cored.setup(distributed_address)
|
||||
|
||||
# create local node
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=1,
|
||||
name="n1",
|
||||
model="host"
|
||||
|
@ -113,7 +296,7 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed node and assign to distributed server
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=2,
|
||||
name="n2",
|
||||
emulation_server=cored.distributed_server,
|
||||
|
@ -123,7 +306,7 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed switch and assign to distributed server
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=3,
|
||||
name="n3",
|
||||
node_type=NodeTypes.SWITCH
|
||||
|
@ -132,7 +315,7 @@ class TestDistributed:
|
|||
|
||||
# link message one
|
||||
ip4_address = cored.prefix.addr(1)
|
||||
message = conftest.link_message(
|
||||
message = link_message(
|
||||
n1=1,
|
||||
n2=3,
|
||||
intf_one=0,
|
||||
|
@ -142,7 +325,7 @@ class TestDistributed:
|
|||
|
||||
# link message two
|
||||
ip4_address = cored.prefix.addr(2)
|
||||
message = conftest.link_message(
|
||||
message = link_message(
|
||||
n1=3,
|
||||
n2=2,
|
||||
intf_two=0,
|
||||
|
@ -151,12 +334,12 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# change session to instantiation state
|
||||
message = conftest.state_message(EventTypes.INSTANTIATION_STATE)
|
||||
message = state_message(EventTypes.INSTANTIATION_STATE)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
# test a ping command
|
||||
node_one = cored.session.get_node(1)
|
||||
message = conftest.command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
message = command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
cored.request_handler.dispatch_replies = validate_response
|
||||
cored.request_handler.handle_message(message)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
@ -165,14 +348,14 @@ class TestDistributed:
|
|||
"""
|
||||
Test session broker creation.
|
||||
|
||||
:param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with
|
||||
:param core.coreserver.CoreServer Core cored: core daemon server to test with
|
||||
:param str distributed_address: distributed server to test against
|
||||
"""
|
||||
# initialize server for testing
|
||||
cored.setup(distributed_address)
|
||||
|
||||
# create local node
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=1,
|
||||
name="n1",
|
||||
model="host"
|
||||
|
@ -180,7 +363,7 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# create distributed node and assign to distributed server
|
||||
message = conftest.node_message(
|
||||
message = node_message(
|
||||
_id=2,
|
||||
name=distributed_address,
|
||||
emulation_server=cored.distributed_server,
|
||||
|
@ -191,7 +374,7 @@ class TestDistributed:
|
|||
# link message one
|
||||
ip4_address = cored.prefix.addr(1)
|
||||
address_two = IpAddress.from_string(distributed_address)
|
||||
message = conftest.link_message(
|
||||
message = link_message(
|
||||
n1=1,
|
||||
n2=2,
|
||||
intf_one=0,
|
||||
|
@ -203,5 +386,5 @@ class TestDistributed:
|
|||
cored.request_handler.handle_message(message)
|
||||
|
||||
# change session to instantiation state
|
||||
message = conftest.state_message(EventTypes.INSTANTIATION_STATE)
|
||||
message = state_message(EventTypes.INSTANTIATION_STATE)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
|
|
@ -1,174 +1,851 @@
|
|||
"""
|
||||
Unit tests for testing with a CORE switch.
|
||||
Tests for testing tlv message handling.
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
|
||||
import threading
|
||||
import mock
|
||||
import pytest
|
||||
|
||||
from core.api.tlv import coreapi, dataconversion
|
||||
from core.api.tlv.coreapi import CoreExecuteTlv
|
||||
from core.emulator.enumerations import CORE_API_PORT, NodeTypes
|
||||
from core.emulator.enumerations import EventTlvs
|
||||
from core.emulator.enumerations import EventTypes
|
||||
from core.api.tlv import coreapi
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emulator.enumerations import EventTlvs, SessionTlvs, EventTypes, FileTlvs, RegisterTlvs, ConfigTlvs, \
|
||||
ConfigFlags
|
||||
from core.emulator.enumerations import ExecuteTlvs
|
||||
from core.emulator.enumerations import LinkTlvs
|
||||
from core.emulator.enumerations import LinkTypes
|
||||
from core.emulator.enumerations import MessageFlags
|
||||
from core.emulator.enumerations import MessageTypes
|
||||
from core.nodes import ipaddress
|
||||
from core.emulator.enumerations import NodeTypes, NodeTlvs
|
||||
from core.location.mobility import BasicRangeModel
|
||||
from core.nodes.ipaddress import Ipv4Prefix
|
||||
|
||||
|
||||
def command_message(node, command):
|
||||
"""
|
||||
Create an execute command TLV message.
|
||||
|
||||
:param node: node to execute command for
|
||||
:param command: command to execute
|
||||
:return: packed execute message
|
||||
"""
|
||||
tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id)
|
||||
tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1)
|
||||
tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command)
|
||||
return coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data)
|
||||
|
||||
|
||||
def state_message(state):
|
||||
"""
|
||||
Create a event TLV message for a new state.
|
||||
|
||||
:param core.enumerations.EventTypes state: state to create message for
|
||||
:return: packed event message
|
||||
"""
|
||||
tlv_data = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, state.value)
|
||||
return coreapi.CoreEventMessage.pack(0, tlv_data)
|
||||
|
||||
|
||||
def switch_link_message(switch, node, address, prefix_len):
|
||||
"""
|
||||
Create a link TLV message for node to a switch, with the provided address and prefix length.
|
||||
|
||||
:param switch: switch for link
|
||||
:param node: node for link
|
||||
:param address: address node on link
|
||||
:param prefix_len: prefix length of address
|
||||
:return: packed link message
|
||||
"""
|
||||
tlv_data = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.id)
|
||||
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, node.id)
|
||||
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value)
|
||||
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0)
|
||||
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, address)
|
||||
tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix_len)
|
||||
return coreapi.CoreLinkMessage.pack(MessageFlags.ADD.value, tlv_data)
|
||||
|
||||
|
||||
def run_cmd(node, exec_cmd):
|
||||
"""
|
||||
Convenience method for sending commands to a node using the legacy API.
|
||||
|
||||
:param node: The node the command should be issued too
|
||||
:param exec_cmd: A string with the command to be run
|
||||
:return: Returns the result of the command
|
||||
"""
|
||||
# Set up the command api message
|
||||
# tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id)
|
||||
# tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1)
|
||||
# tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd)
|
||||
# message = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data)
|
||||
message = command_message(node, exec_cmd)
|
||||
node.session.broker.handlerawmsg(message)
|
||||
|
||||
# Now wait for the response
|
||||
server = node.session.broker.servers["localhost"]
|
||||
server.sock.settimeout(50.0)
|
||||
|
||||
# receive messages until we get our execute response
|
||||
result = None
|
||||
status = False
|
||||
while True:
|
||||
message_header = server.sock.recv(coreapi.CoreMessage.header_len)
|
||||
message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(message_header)
|
||||
message_data = server.sock.recv(message_length)
|
||||
|
||||
# If we get the right response return the results
|
||||
print("received response message: %s" % message_type)
|
||||
if message_type == MessageTypes.EXECUTE.value:
|
||||
message = coreapi.CoreExecMessage(message_flags, message_header, message_data)
|
||||
result = message.get_tlv(ExecuteTlvs.RESULT.value)
|
||||
status = message.get_tlv(ExecuteTlvs.STATUS.value)
|
||||
break
|
||||
|
||||
return result, status
|
||||
def dict_to_str(values):
|
||||
return "|".join("%s=%s" % (x, values[x]) for x in values)
|
||||
|
||||
|
||||
class TestGui:
|
||||
def test_broker(self, cored):
|
||||
"""
|
||||
Test session broker creation.
|
||||
@pytest.mark.parametrize("node_type, model", [
|
||||
(NodeTypes.DEFAULT, "PC"),
|
||||
(NodeTypes.EMANE, None),
|
||||
(NodeTypes.HUB, None),
|
||||
(NodeTypes.SWITCH, None),
|
||||
(NodeTypes.WIRELESS_LAN, None),
|
||||
(NodeTypes.TUNNEL, None),
|
||||
(NodeTypes.RJ45, None),
|
||||
])
|
||||
def test_node_add(self, coreserver, node_type, model):
|
||||
node_id = 1
|
||||
message = coreapi.CoreNodeMessage.create(MessageFlags.ADD.value, [
|
||||
(NodeTlvs.NUMBER, node_id),
|
||||
(NodeTlvs.TYPE, node_type.value),
|
||||
(NodeTlvs.NAME, "n1"),
|
||||
(NodeTlvs.X_POSITION, 0),
|
||||
(NodeTlvs.Y_POSITION, 0),
|
||||
(NodeTlvs.MODEL, model),
|
||||
])
|
||||
|
||||
:param core.emulator.coreemu.EmuSession session: session for test
|
||||
:param cored: cored daemon server to test with
|
||||
"""
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
# set core daemon to run in the background
|
||||
thread = threading.Thread(target=cored.server.serve_forever)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
assert coreserver.session.get_node(node_id) is not None
|
||||
|
||||
# ip prefix for nodes
|
||||
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
|
||||
daemon = "localhost"
|
||||
def test_node_update(self, coreserver):
|
||||
node_id = 1
|
||||
coreserver.session.add_node(_id=node_id)
|
||||
x = 50
|
||||
y = 100
|
||||
message = coreapi.CoreNodeMessage.create(0, [
|
||||
(NodeTlvs.NUMBER, node_id),
|
||||
(NodeTlvs.X_POSITION, x),
|
||||
(NodeTlvs.Y_POSITION, y),
|
||||
])
|
||||
|
||||
# add server
|
||||
session = cored.server.coreemu.create_session()
|
||||
session.broker.addserver(daemon, "127.0.0.1", CORE_API_PORT)
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
# setup server
|
||||
session.broker.setupserver(daemon)
|
||||
node = coreserver.session.get_node(node_id)
|
||||
assert node is not None
|
||||
assert node.position.x == x
|
||||
assert node.position.y == y
|
||||
|
||||
# do not want the recvloop running as we will deal ourselves
|
||||
session.broker.dorecvloop = False
|
||||
def test_node_delete(self, coreserver):
|
||||
node_id = 1
|
||||
coreserver.session.add_node(_id=node_id)
|
||||
message = coreapi.CoreNodeMessage.create(MessageFlags.DELETE.value, [
|
||||
(NodeTlvs.NUMBER, node_id),
|
||||
])
|
||||
|
||||
# have broker handle a configuration state change
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
event_message = state_message(EventTypes.CONFIGURATION_STATE)
|
||||
session.broker.handlerawmsg(event_message)
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
# create a switch node
|
||||
switch = session.add_node(_type=NodeTypes.SWITCH)
|
||||
switch.setposition(x=80, y=50)
|
||||
switch.server = daemon
|
||||
with pytest.raises(KeyError):
|
||||
coreserver.session.get_node(node_id)
|
||||
|
||||
# retrieve switch data representation, create a switch message for broker to handle
|
||||
switch_data = switch.data(MessageFlags.ADD.value)
|
||||
switch_message = dataconversion.convert_node(switch_data)
|
||||
session.broker.handlerawmsg(switch_message)
|
||||
def test_link_add_node_to_net(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
switch = 2
|
||||
coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, switch),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE1_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
])
|
||||
|
||||
# create node one
|
||||
node_one = session.add_node()
|
||||
node_one.server = daemon
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
# create node two
|
||||
node_two = session.add_node()
|
||||
node_two.server = daemon
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
|
||||
# create node messages for the broker to handle
|
||||
for node in [node_one, node_two]:
|
||||
node_data = node.data(MessageFlags.ADD.value)
|
||||
node_message = dataconversion.convert_node(node_data)
|
||||
session.broker.handlerawmsg(node_message)
|
||||
def test_link_add_net_to_node(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
switch = 2
|
||||
coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, switch),
|
||||
(LinkTlvs.N2_NUMBER, node_one),
|
||||
(LinkTlvs.INTERFACE2_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE2_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
|
||||
])
|
||||
|
||||
# create links to switch from nodes for broker to handle
|
||||
for index, node in enumerate([node_one, node_two], start=1):
|
||||
ip4_address = prefix.addr(index)
|
||||
link_message = switch_link_message(switch, node, ip4_address, prefix.prefixlen)
|
||||
session.broker.handlerawmsg(link_message)
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
# change session to instantiation state
|
||||
event_message = state_message(EventTypes.INSTANTIATION_STATE)
|
||||
session.broker.handlerawmsg(event_message)
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
|
||||
# Get the ip or last node and ping it from the first
|
||||
output, status = run_cmd(node_one, "ip -4 -o addr show dev eth0")
|
||||
pingip = output.split()[3].split("/")[0]
|
||||
output, status = run_cmd(node_two, "ping -c 5 " + pingip)
|
||||
assert not status
|
||||
def test_link_add_node_to_node(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
node_two = 2
|
||||
coreserver.session.add_node(_id=node_two)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
interface_two = ip_prefix.addr(node_two)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, node_two),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE1_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
(LinkTlvs.INTERFACE2_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE2_IP4, interface_two),
|
||||
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
all_links = []
|
||||
for node_id in coreserver.session.nodes:
|
||||
node = coreserver.session.nodes[node_id]
|
||||
all_links += node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
|
||||
def test_link_update(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
switch = 2
|
||||
coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, switch),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE1_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
link = all_links[0]
|
||||
assert link.bandwidth is None
|
||||
|
||||
bandwidth = 50000
|
||||
message = coreapi.CoreLinkMessage.create(0, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, switch),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.BANDWIDTH, bandwidth),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
link = all_links[0]
|
||||
assert link.bandwidth == bandwidth
|
||||
|
||||
def test_link_delete_node_to_node(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
node_two = 2
|
||||
coreserver.session.add_node(_id=node_two)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
interface_two = ip_prefix.addr(node_two)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, node_two),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE1_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
(LinkTlvs.INTERFACE2_IP4, interface_two),
|
||||
(LinkTlvs.INTERFACE2_IP4_MASK, 24),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
all_links = []
|
||||
for node_id in coreserver.session.nodes:
|
||||
node = coreserver.session.nodes[node_id]
|
||||
all_links += node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.DELETE.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, node_two),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE2_NUMBER, 0),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
all_links = []
|
||||
for node_id in coreserver.session.nodes:
|
||||
node = coreserver.session.nodes[node_id]
|
||||
all_links += node.all_link_data(0)
|
||||
assert len(all_links) == 0
|
||||
|
||||
def test_link_delete_node_to_net(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
switch = 2
|
||||
coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, switch),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE1_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.DELETE.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, switch),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 0
|
||||
|
||||
def test_link_delete_net_to_node(self, coreserver):
|
||||
node_one = 1
|
||||
coreserver.session.add_node(_id=node_one)
|
||||
switch = 2
|
||||
coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH)
|
||||
ip_prefix = Ipv4Prefix("10.0.0.0/24")
|
||||
interface_one = ip_prefix.addr(node_one)
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [
|
||||
(LinkTlvs.N1_NUMBER, node_one),
|
||||
(LinkTlvs.N2_NUMBER, switch),
|
||||
(LinkTlvs.INTERFACE1_NUMBER, 0),
|
||||
(LinkTlvs.INTERFACE1_IP4, interface_one),
|
||||
(LinkTlvs.INTERFACE1_IP4_MASK, 24),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 1
|
||||
|
||||
message = coreapi.CoreLinkMessage.create(MessageFlags.DELETE.value, [
|
||||
(LinkTlvs.N1_NUMBER, switch),
|
||||
(LinkTlvs.N2_NUMBER, node_one),
|
||||
(LinkTlvs.INTERFACE2_NUMBER, 0),
|
||||
])
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
switch_node = coreserver.session.get_node(switch)
|
||||
all_links = switch_node.all_link_data(0)
|
||||
assert len(all_links) == 0
|
||||
|
||||
def test_session_update(self, coreserver):
|
||||
session_id = coreserver.session.id
|
||||
name = "test"
|
||||
message = coreapi.CoreSessionMessage.create(0, [
|
||||
(SessionTlvs.NUMBER, str(session_id)),
|
||||
(SessionTlvs.NAME, name),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.name == name
|
||||
|
||||
def test_session_query(self, coreserver):
|
||||
coreserver.request_handler.dispatch_replies = mock.MagicMock()
|
||||
message = coreapi.CoreSessionMessage.create(MessageFlags.STRING.value, [])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
args, _ = coreserver.request_handler.dispatch_replies.call_args
|
||||
replies = args[0]
|
||||
assert len(replies) == 1
|
||||
|
||||
def test_session_join(self, coreserver):
|
||||
coreserver.request_handler.dispatch_replies = mock.MagicMock()
|
||||
session_id = coreserver.session.id
|
||||
message = coreapi.CoreSessionMessage.create(MessageFlags.ADD.value, [
|
||||
(SessionTlvs.NUMBER, str(session_id)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.request_handler.session.id == session_id
|
||||
|
||||
def test_session_delete(self, coreserver):
|
||||
assert len(coreserver.server.coreemu.sessions) == 1
|
||||
session_id = coreserver.session.id
|
||||
message = coreapi.CoreSessionMessage.create(MessageFlags.DELETE.value, [
|
||||
(SessionTlvs.NUMBER, str(session_id)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert len(coreserver.server.coreemu.sessions) == 0
|
||||
|
||||
def test_file_hook_add(self, coreserver):
|
||||
state = EventTypes.DATACOLLECT_STATE.value
|
||||
assert coreserver.session._hooks.get(state) is None
|
||||
file_name = "test.sh"
|
||||
file_data = "echo hello"
|
||||
message = coreapi.CoreFileMessage.create(MessageFlags.ADD.value, [
|
||||
(FileTlvs.TYPE, "hook:%s" % state),
|
||||
(FileTlvs.NAME, file_name),
|
||||
(FileTlvs.DATA, file_data),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
hooks = coreserver.session._hooks.get(state)
|
||||
assert len(hooks) == 1
|
||||
name, data = hooks[0]
|
||||
assert file_name == name
|
||||
assert file_data == data
|
||||
|
||||
def test_file_service_file_set(self, coreserver):
|
||||
node = coreserver.session.add_node()
|
||||
service = "DefaultRoute"
|
||||
file_name = "defaultroute.sh"
|
||||
file_data = "echo hello"
|
||||
message = coreapi.CoreFileMessage.create(MessageFlags.ADD.value, [
|
||||
(FileTlvs.NODE, node.id),
|
||||
(FileTlvs.TYPE, "service:%s" % service),
|
||||
(FileTlvs.NAME, file_name),
|
||||
(FileTlvs.DATA, file_data),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
service_file = coreserver.session.services.get_service_file(node, service, file_name)
|
||||
assert file_data == service_file.data
|
||||
|
||||
def test_file_node_file_copy(self, coreserver):
|
||||
file_name = "/var/log/test/node.log"
|
||||
node = coreserver.session.add_node()
|
||||
node.makenodedir()
|
||||
file_data = "echo hello"
|
||||
message = coreapi.CoreFileMessage.create(MessageFlags.ADD.value, [
|
||||
(FileTlvs.NODE, node.id),
|
||||
(FileTlvs.NAME, file_name),
|
||||
(FileTlvs.DATA, file_data),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
directory, basename = os.path.split(file_name)
|
||||
created_directory = directory[1:].replace("/", ".")
|
||||
create_path = os.path.join(node.nodedir, created_directory, basename)
|
||||
assert os.path.exists(create_path)
|
||||
|
||||
def test_exec_node_tty(self, coreserver):
|
||||
coreserver.request_handler.dispatch_replies = mock.MagicMock()
|
||||
node = coreserver.session.add_node()
|
||||
node.startup()
|
||||
message = coreapi.CoreExecMessage.create(MessageFlags.TTY.value, [
|
||||
(ExecuteTlvs.NODE, node.id),
|
||||
(ExecuteTlvs.NUMBER, 1),
|
||||
(ExecuteTlvs.COMMAND, "bash")
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
args, _ = coreserver.request_handler.dispatch_replies.call_args
|
||||
replies = args[0]
|
||||
assert len(replies) == 1
|
||||
|
||||
def test_exec_local_command(self, coreserver):
|
||||
coreserver.request_handler.dispatch_replies = mock.MagicMock()
|
||||
node = coreserver.session.add_node()
|
||||
node.startup()
|
||||
message = coreapi.CoreExecMessage.create(
|
||||
MessageFlags.TEXT.value | MessageFlags.LOCAL.value, [
|
||||
(ExecuteTlvs.NODE, node.id),
|
||||
(ExecuteTlvs.NUMBER, 1),
|
||||
(ExecuteTlvs.COMMAND, "echo hello")
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
args, _ = coreserver.request_handler.dispatch_replies.call_args
|
||||
replies = args[0]
|
||||
assert len(replies) == 1
|
||||
|
||||
def test_exec_node_command(self, coreserver):
|
||||
coreserver.request_handler.dispatch_replies = mock.MagicMock()
|
||||
node = coreserver.session.add_node()
|
||||
node.startup()
|
||||
message = coreapi.CoreExecMessage.create(
|
||||
MessageFlags.TEXT.value, [
|
||||
(ExecuteTlvs.NODE, node.id),
|
||||
(ExecuteTlvs.NUMBER, 1),
|
||||
(ExecuteTlvs.COMMAND, "echo hello")
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
args, _ = coreserver.request_handler.dispatch_replies.call_args
|
||||
replies = args[0]
|
||||
assert len(replies) == 1
|
||||
|
||||
@pytest.mark.parametrize("state", [
|
||||
EventTypes.SHUTDOWN_STATE,
|
||||
EventTypes.RUNTIME_STATE,
|
||||
EventTypes.DATACOLLECT_STATE,
|
||||
EventTypes.CONFIGURATION_STATE,
|
||||
EventTypes.DEFINITION_STATE
|
||||
])
|
||||
def test_event_state(self, coreserver, state):
|
||||
message = coreapi.CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, state.value),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.state == state.value
|
||||
|
||||
def test_event_schedule(self, coreserver):
|
||||
coreserver.session.add_event = mock.MagicMock()
|
||||
node = coreserver.session.add_node()
|
||||
message = coreapi.CoreEventMessage.create(MessageFlags.ADD.value, [
|
||||
(EventTlvs.TYPE, EventTypes.SCHEDULED.value),
|
||||
(EventTlvs.TIME, str(time.time() + 100)),
|
||||
(EventTlvs.NODE, node.id),
|
||||
(EventTlvs.NAME, "event"),
|
||||
(EventTlvs.DATA, "data"),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.session.add_event.assert_called_once()
|
||||
|
||||
def test_event_save_xml(self, coreserver, tmpdir):
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
coreserver.session.add_node()
|
||||
message = coreapi.CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, EventTypes.FILE_SAVE.value),
|
||||
(EventTlvs.NAME, file_path),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert os.path.exists(file_path)
|
||||
|
||||
def test_event_open_xml(self, coreserver, tmpdir):
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
node = coreserver.session.add_node()
|
||||
coreserver.session.save_xml(file_path)
|
||||
coreserver.session.delete_node(node.id)
|
||||
message = coreapi.CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, EventTypes.FILE_OPEN.value),
|
||||
(EventTlvs.NAME, file_path),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.get_node(node.id)
|
||||
|
||||
@pytest.mark.parametrize("state", [
|
||||
EventTypes.START,
|
||||
EventTypes.STOP,
|
||||
EventTypes.RESTART,
|
||||
EventTypes.PAUSE,
|
||||
EventTypes.RECONFIGURE
|
||||
])
|
||||
def test_event_service(self, coreserver, state):
|
||||
coreserver.session.broadcast_event = mock.MagicMock()
|
||||
node = coreserver.session.add_node()
|
||||
node.startup()
|
||||
message = coreapi.CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, state.value),
|
||||
(EventTlvs.NODE, node.id),
|
||||
(EventTlvs.NAME, "service:DefaultRoute"),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.session.broadcast_event.assert_called_once()
|
||||
|
||||
@pytest.mark.parametrize("state", [
|
||||
EventTypes.START,
|
||||
EventTypes.STOP,
|
||||
EventTypes.RESTART,
|
||||
EventTypes.PAUSE,
|
||||
EventTypes.RECONFIGURE
|
||||
])
|
||||
def test_event_mobility(self, coreserver, state):
|
||||
message = coreapi.CoreEventMessage.create(0, [
|
||||
(EventTlvs.TYPE, state.value),
|
||||
(EventTlvs.NAME, "mobility:ns2script"),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
def test_register_gui(self, coreserver):
|
||||
coreserver.request_handler.master = False
|
||||
message = coreapi.CoreRegMessage.create(0, [
|
||||
(RegisterTlvs.GUI, "gui"),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.request_handler.master is True
|
||||
|
||||
def test_register_xml(self, coreserver, tmpdir):
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
node = coreserver.session.add_node()
|
||||
coreserver.session.save_xml(file_path)
|
||||
coreserver.session.delete_node(node.id)
|
||||
message = coreapi.CoreRegMessage.create(0, [
|
||||
(RegisterTlvs.EXECUTE_SERVER, file_path),
|
||||
])
|
||||
coreserver.session.instantiate()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.server.coreemu.sessions[2].get_node(node.id)
|
||||
|
||||
def test_register_python(self, coreserver, tmpdir):
|
||||
xml_file = tmpdir.join("test.py")
|
||||
file_path = xml_file.strpath
|
||||
with open(file_path, "w") as f:
|
||||
f.write("coreemu = globals()['coreemu']\n")
|
||||
f.write("session = coreemu.sessions[1]\n")
|
||||
f.write("session.add_node()\n")
|
||||
message = coreapi.CoreRegMessage.create(0, [
|
||||
(RegisterTlvs.EXECUTE_SERVER, file_path),
|
||||
])
|
||||
coreserver.session.instantiate()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert len(coreserver.session.nodes) == 1
|
||||
|
||||
def test_config_all(self, coreserver):
|
||||
node = coreserver.session.add_node()
|
||||
message = coreapi.CoreConfMessage.create(MessageFlags.ADD.value, [
|
||||
(ConfigTlvs.OBJECT, "all"),
|
||||
(ConfigTlvs.NODE, node.id),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
|
||||
])
|
||||
coreserver.session.location.reset = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.session.location.reset.assert_called_once()
|
||||
|
||||
def test_config_options_request(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "session"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_options_update(self, coreserver):
|
||||
test_key = "test"
|
||||
test_value = "test"
|
||||
values = {
|
||||
test_key: test_value
|
||||
}
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "session"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.options.get_config(test_key) == test_value
|
||||
|
||||
def test_config_location_reset(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "location"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
|
||||
])
|
||||
coreserver.session.location.refxyz = (10, 10, 10)
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.location.refxyz == (0, 0, 0)
|
||||
|
||||
def test_config_location_update(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "location"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, "10|10|70|50|0|0.5"),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.location.refxyz == (10, 10, 0.0)
|
||||
assert coreserver.session.location.refgeo == (70, 50, 0)
|
||||
assert coreserver.session.location.refscale == 0.5
|
||||
|
||||
def test_config_metadata_request(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "metadata"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_metadata_update(self, coreserver):
|
||||
test_key = "test"
|
||||
test_value = "test"
|
||||
values = {
|
||||
test_key: test_value
|
||||
}
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "metadata"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.metadata.get_config(test_key) == test_value
|
||||
|
||||
def test_config_broker_request(self, coreserver):
|
||||
server = "test"
|
||||
host = "10.0.0.1"
|
||||
port = 50000
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "broker"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, "%s:%s:%s" % (server, host, port)),
|
||||
])
|
||||
coreserver.session.broker.addserver = mock.MagicMock()
|
||||
coreserver.session.broker.setupserver = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.session.broker.addserver.assert_called_once_with(server, host, port)
|
||||
coreserver.session.broker.setupserver.assert_called_once_with(server)
|
||||
|
||||
def test_config_services_request_all(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "services"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_services_request_specific(self, coreserver):
|
||||
node = coreserver.session.add_node()
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, node.id),
|
||||
(ConfigTlvs.OBJECT, "services"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
(ConfigTlvs.OPAQUE, "service:DefaultRoute"),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_services_request_specific_file(self, coreserver):
|
||||
node = coreserver.session.add_node()
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, node.id),
|
||||
(ConfigTlvs.OBJECT, "services"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
(ConfigTlvs.OPAQUE, "service:DefaultRoute:defaultroute.sh"),
|
||||
])
|
||||
coreserver.session.broadcast_file = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.session.broadcast_file.assert_called_once()
|
||||
|
||||
def test_config_services_reset(self, coreserver):
|
||||
node = coreserver.session.add_node()
|
||||
service = "DefaultRoute"
|
||||
coreserver.session.services.set_service(node.id, service)
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "services"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
|
||||
])
|
||||
assert coreserver.session.services.get_service(node.id, service) is not None
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.services.get_service(node.id, service) is None
|
||||
|
||||
def test_config_services_set(self, coreserver):
|
||||
node = coreserver.session.add_node()
|
||||
service = "DefaultRoute"
|
||||
values = {
|
||||
"meta": "metadata"
|
||||
}
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, node.id),
|
||||
(ConfigTlvs.OBJECT, "services"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.OPAQUE, "service:%s" % service),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
])
|
||||
assert coreserver.session.services.get_service(node.id, service) is None
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert coreserver.session.services.get_service(node.id, service) is not None
|
||||
|
||||
def test_config_mobility_reset(self, coreserver):
|
||||
wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "MobilityManager"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.RESET.value),
|
||||
])
|
||||
coreserver.session.mobility.set_model_config(wlan.id, BasicRangeModel.name, {})
|
||||
assert len(coreserver.session.mobility.node_configurations) == 1
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
assert len(coreserver.session.mobility.node_configurations) == 0
|
||||
|
||||
def test_config_mobility_model_request(self, coreserver):
|
||||
wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, wlan.id),
|
||||
(ConfigTlvs.OBJECT, BasicRangeModel.name),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_mobility_model_update(self, coreserver):
|
||||
wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
config_key = "range"
|
||||
config_value = "1000"
|
||||
values = {
|
||||
config_key: config_value
|
||||
}
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, wlan.id),
|
||||
(ConfigTlvs.OBJECT, BasicRangeModel.name),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
config = coreserver.session.mobility.get_model_config(wlan.id, BasicRangeModel.name)
|
||||
assert config[config_key] == config_value
|
||||
|
||||
def test_config_emane_model_request(self, coreserver):
|
||||
wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, wlan.id),
|
||||
(ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_emane_model_update(self, coreserver):
|
||||
wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
config_key = "distance"
|
||||
config_value = "50051"
|
||||
values = {
|
||||
config_key: config_value
|
||||
}
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.NODE, wlan.id),
|
||||
(ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
config = coreserver.session.emane.get_model_config(wlan.id, EmaneIeee80211abgModel.name)
|
||||
assert config[config_key] == config_value
|
||||
|
||||
def test_config_emane_request(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "emane"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.REQUEST.value),
|
||||
])
|
||||
coreserver.request_handler.handle_broadcast_config = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.request_handler.handle_broadcast_config.assert_called_once()
|
||||
|
||||
def test_config_emane_update(self, coreserver):
|
||||
config_key = "eventservicedevice"
|
||||
config_value = "eth4"
|
||||
values = {
|
||||
config_key: config_value
|
||||
}
|
||||
message = coreapi.CoreConfMessage.create(0, [
|
||||
(ConfigTlvs.OBJECT, "emane"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
])
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
config = coreserver.session.emane.get_configs()
|
||||
assert config[config_key] == config_value
|
||||
|
|
|
@ -350,7 +350,7 @@ class TestXml:
|
|||
|
||||
# create link
|
||||
link_options = LinkOptions()
|
||||
link_options.per = 20
|
||||
link_options.per = 10.5
|
||||
link_options.bandwidth = 50000
|
||||
link_options.jitter = 10
|
||||
link_options.delay = 30
|
||||
|
@ -415,7 +415,7 @@ class TestXml:
|
|||
|
||||
# create link
|
||||
link_options = LinkOptions()
|
||||
link_options.per = 20
|
||||
link_options.per = 10.5
|
||||
link_options.bandwidth = 50000
|
||||
link_options.jitter = 10
|
||||
link_options.delay = 30
|
||||
|
@ -483,7 +483,7 @@ class TestXml:
|
|||
link_options_one.unidirectional = 1
|
||||
link_options_one.bandwidth = 5000
|
||||
link_options_one.delay = 10
|
||||
link_options_one.per = 5
|
||||
link_options_one.per = 10.5
|
||||
link_options_one.dup = 5
|
||||
link_options_one.jitter = 5
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two, link_options_one)
|
||||
|
|
|
@ -26,7 +26,7 @@ A CORE node is a lightweight virtual machine. The CORE framework runs on Linux.
|
|||
|
||||
### Linux
|
||||
|
||||
Linux network namespaces (also known as netns, LXC, or [Linux containers](http://lxc.sourceforge.net/)) is the primary virtualization technique used by CORE. LXC has been part of the mainline Linux kernel since 2.6.24. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE.
|
||||
Linux network namespaces (also known as netns) is the primary virtualization technique used by CORE. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE.
|
||||
|
||||
CORE combines these namespaces with Linux Ethernet bridging to form networks. Link characteristics are applied using Linux Netem queuing disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless networks are emulated by controlling which interfaces can send and receive with ebtables rules.
|
||||
|
||||
|
|
180
docs/distributed.md
Normal file
180
docs/distributed.md
Normal file
|
@ -0,0 +1,180 @@
|
|||
# CORE - Distributed Emulation
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## Overview
|
||||
|
||||
A large emulation scenario can be deployed on multiple emulation servers and
|
||||
controlled by a single GUI. The GUI, representing the entire topology, can be
|
||||
run on one of the emulation servers or on a separate machine.
|
||||
|
||||
Each machine that will act as an emulation server would ideally have the
|
||||
same version of CORE installed. It is not important to have the GUI component
|
||||
but the CORE Python daemon **core-daemon** needs to be installed.
|
||||
|
||||
**NOTE: The server that the GUI connects with is referred to as
|
||||
the master server.**
|
||||
|
||||
## Configuring Listen Address
|
||||
|
||||
First we need to configure the **core-daemon** on all servers to listen on an
|
||||
interface over the network. The simplest way would be updating the core
|
||||
configuration file to listen on all interfaces. Alternatively, configure it to
|
||||
listen to the specific interface you desire by supplying the correct address.
|
||||
|
||||
The **listenaddr** configuration should be set to the address of the interface
|
||||
that should receive CORE API control commands from the other servers;
|
||||
setting **listenaddr = 0.0.0.0** causes the Python daemon to listen on all
|
||||
interfaces. CORE uses TCP port **4038** by default to communicate from the
|
||||
controlling machine (with GUI) to the emulation servers. Make sure that
|
||||
firewall rules are configured as necessary to allow this traffic.
|
||||
|
||||
```shell
|
||||
# open configuration file
|
||||
vi /etc/core/core.conf
|
||||
|
||||
# within core.conf
|
||||
[core-daemon]
|
||||
listenaddr = 0.0.0.0
|
||||
```
|
||||
|
||||
## Enabling Remote SSH Shells
|
||||
|
||||
### Update GUI Terminal Program
|
||||
|
||||
**Edit -> Preferences... -> Terminal program:**
|
||||
|
||||
Currently recommend setting this to **xterm -e** as the default
|
||||
**gnome-terminal** will not work.
|
||||
|
||||
May need to install xterm if, not already installed.
|
||||
|
||||
```shell
|
||||
sudo apt install xterm
|
||||
```
|
||||
|
||||
### Setup SSH
|
||||
|
||||
In order to easily open shells on the emulation servers, the servers should be
|
||||
running an SSH server, and public key login should be enabled. This is
|
||||
accomplished by generating an SSH key for your user on all servers being used
|
||||
for distributed emulation, if you do not already have one. Then copying your
|
||||
master server public key to the authorized_keys file on all other servers that
|
||||
will be used to help drive the distributed emulation. When double-clicking on a
|
||||
node during runtime, instead of opening a local shell, the GUI will attempt to
|
||||
SSH to the emulation server to run an interactive shell.
|
||||
|
||||
You need to have the same user defined on each server, since the user used
|
||||
for these remote shells is the same user that is running the CORE GUI.
|
||||
|
||||
```shell
|
||||
# install openssh-server
|
||||
sudo apt install openssh-server
|
||||
|
||||
# generate ssh if needed
|
||||
ssh-keygen -o -t rsa -b 4096
|
||||
|
||||
# copy public key to authorized_keys file
|
||||
ssh-copy-id user@server
|
||||
# or
|
||||
scp ~/.ssh/id_rsa.pub username@server:~/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
## Add Emulation Servers in GUI
|
||||
|
||||
Within the core-gui navigate to menu option:
|
||||
|
||||
**Session -> Emulation servers...**
|
||||
|
||||
Within the dialog box presented, add or modify an existing server if present
|
||||
to use the name, address, and port for the a server you plan to use.
|
||||
|
||||
Server configurations are loaded and written to in a configuration file for
|
||||
the GUI.
|
||||
|
||||
**~/.core/servers.conf**
|
||||
```conf
|
||||
# name address port
|
||||
server2 192.168.0.2 4038
|
||||
```
|
||||
|
||||
## Assigning Nodes
|
||||
|
||||
The user needs to assign nodes to emulation servers in the scenario. Making no
|
||||
assignment means the node will be emulated on the master server
|
||||
In the configuration window of every node, a drop-down box located between
|
||||
the *Node name* and the *Image* button will select the name of the emulation
|
||||
server. By default, this menu shows *(none)*, indicating that the node will
|
||||
be emulated locally on the master. When entering Execute mode, the CORE GUI
|
||||
will deploy the node on its assigned emulation server.
|
||||
|
||||
Another way to assign emulation servers is to select one or more nodes using
|
||||
the select tool (shift-click to select multiple), and right-click one of the
|
||||
nodes and choose *Assign to...*.
|
||||
|
||||
The **CORE emulation servers** dialog box may also be used to assign nodes to
|
||||
servers. The assigned server name appears in parenthesis next to the node name.
|
||||
To assign all nodes to one of the servers, click on the server name and then
|
||||
the **all nodes** button. Servers that have assigned nodes are shown in blue in
|
||||
the server list. Another option is to first select a subset of nodes, then open
|
||||
the **CORE emulation servers** box and use the **selected nodes** button.
|
||||
|
||||
**IMPORTANT: Leave the nodes unassigned if they are to be run on the master
|
||||
server. Do not explicitly assign the nodes to the master server.**
|
||||
|
||||
## GUI Visualization
|
||||
|
||||
If there is a link between two nodes residing on different servers, the GUI
|
||||
will draw the link with a dashed line.
|
||||
|
||||
## Concerns and Limitations
|
||||
|
||||
Wireless nodes, i.e. those connected to a WLAN node, can be assigned to
|
||||
different emulation servers and participate in the same wireless network
|
||||
only if an EMANE model is used for the WLAN. The basic range model does
|
||||
not work across multiple servers due to the Linux bridging and ebtables
|
||||
rules that are used.
|
||||
|
||||
**NOTE: The basic range wireless model does not support distributed emulation,
|
||||
but EMANE does.**
|
||||
|
||||
When nodes are linked across servers **core-daemons** will automatically
|
||||
create necessary tunnels between the nodes when executed. Care should be taken
|
||||
to arrange the topology such that the number of tunnels is minimized. The
|
||||
tunnels carry data between servers to connect nodes as specified in the topology.
|
||||
These tunnels are created using GRE tunneling, similar to the Tunnel Tool.
|
||||
|
||||
### EMANE Configuration and Issues
|
||||
|
||||
EMANE needs to have controlnet configured in **core.conf** in order to startup correctly.
|
||||
The names before the addresses need to match the servers configured in
|
||||
**~/.core/servers.conf** previously.
|
||||
|
||||
```shell
|
||||
controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24
|
||||
```
|
||||
|
||||
EMANE appears to require location events for nodes to be sync'ed across
|
||||
all EMANE instances for nodes to find each other. Using an EMANE eel file
|
||||
for your scenario can help clear this up, which might be desired anyway.
|
||||
|
||||
* https://github.com/adjacentlink/emane/wiki/EEL-Generator
|
||||
|
||||
You can also move nodes within the GUI to help trigger location events from
|
||||
CORE when the **core.conf** settings below is used. Assuming the nodes
|
||||
did not find each other by default and you are not using an eel file.
|
||||
|
||||
```shell
|
||||
emane_event_generate = True
|
||||
```
|
||||
|
||||
## Distributed Checklist
|
||||
|
||||
1. Install the same version of the CORE daemon on all servers.
|
||||
1. Set **listenaddr** configuration in all of the server's core.conf files,
|
||||
then start (or restart) the daemon.
|
||||
1. Installed and configure public-key SSH access on all servers (if you want to use
|
||||
double-click shells or Widgets.)
|
||||
1. Assign nodes to desired servers, empty for master server
|
||||
1. Press the **Start** button to launch the distributed emulation.
|
|
@ -68,9 +68,7 @@ sudo ln -s /usr/local/share/emane /usr/share/emane
|
|||
CORE supports custom developed EMANE models by way of dynamically loading user created python files that represent the model. Custom EMANE models should be placed within the path defined by **emane_models_dir** in the CORE configuration file. This path cannot end in **/emane**.
|
||||
|
||||
Here is an example model with documentation describing functionality:
|
||||
[Example Model](examplemodel.html)
|
||||
|
||||
|
||||
[Example Model](/daemon/examples/myemane/examplemodel.py)
|
||||
|
||||
## Single PC with EMANE
|
||||
|
||||
|
|
|
@ -1,239 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=utf-8">
|
||||
<title>examplemodel.py</title>
|
||||
<link rel="stylesheet" href="pycco.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id='container'>
|
||||
<div id="background"></div>
|
||||
<div class='section'>
|
||||
<div class='docs'><h1>examplemodel.py</h1></div>
|
||||
</div>
|
||||
<div class='clearall'>
|
||||
<div class='section' id='section-0'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-0'>#</a>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">core.emane</span> <span class="kn">import</span> <span class="n">emanemanifest</span>
|
||||
<span class="kn">from</span> <span class="nn">core.emane</span> <span class="kn">import</span> <span class="n">emanemodel</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-1'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-1'>#</a>
|
||||
</div>
|
||||
<h1>Custom EMANE Model</h1>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span class="k">class</span> <span class="nc">ExampleModel</span><span class="p">(</span><span class="n">emanemodel</span><span class="o">.</span><span class="n">EmaneModel</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-2'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-2'>#</a>
|
||||
</div>
|
||||
<h2>MAC Definition</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-3'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-3'>#</a>
|
||||
</div>
|
||||
<p>Defines the emane model name that will show up in the GUI.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">name</span> <span class="o">=</span> <span class="s2">"emane_example"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-4'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-4'>#</a>
|
||||
</div>
|
||||
<p>Defines that mac library that the model will reference.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_library</span> <span class="o">=</span> <span class="s2">"rfpipemaclayer"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-5'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-5'>#</a>
|
||||
</div>
|
||||
<p>Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed
|
||||
within the GUI.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_xml</span> <span class="o">=</span> <span class="s2">"/usr/share/emane/manifest/rfpipemaclayer.xml"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-6'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-6'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override options that are maintained within the manifest file above.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_defaults</span> <span class="o">=</span> <span class="p">{</span>
|
||||
<span class="s2">"pcrcurveuri"</span><span class="p">:</span> <span class="s2">"/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml"</span><span class="p">,</span>
|
||||
<span class="p">}</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-7'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-7'>#</a>
|
||||
</div>
|
||||
<p>Parses the manifest file and converts configurations into core supported formats.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_config</span> <span class="o">=</span> <span class="n">emanemanifest</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">mac_xml</span><span class="p">,</span> <span class="n">mac_defaults</span><span class="p">)</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-8'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-8'>#</a>
|
||||
</div>
|
||||
<h2>PHY Definition</h2>
|
||||
<p><strong>NOTE: phy configuration will default to the universal model as seen below and the below section does not
|
||||
have to be included.</strong></p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-9'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-9'>#</a>
|
||||
</div>
|
||||
<p>Defines that phy library that the model will reference, used if you need to provide a custom phy.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_library</span> <span class="o">=</span> <span class="bp">None</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-10'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-10'>#</a>
|
||||
</div>
|
||||
<p>Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed
|
||||
within the GUI.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_xml</span> <span class="o">=</span> <span class="s2">"/usr/share/emane/manifest/emanephy.xml"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-11'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-11'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override options that are maintained within the manifest file above or for the default universal
|
||||
model.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_defaults</span> <span class="o">=</span> <span class="p">{</span>
|
||||
<span class="s2">"subid"</span><span class="p">:</span> <span class="s2">"1"</span><span class="p">,</span>
|
||||
<span class="s2">"propagationmodel"</span><span class="p">:</span> <span class="s2">"2ray"</span><span class="p">,</span>
|
||||
<span class="s2">"noisemode"</span><span class="p">:</span> <span class="s2">"none"</span>
|
||||
<span class="p">}</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-12'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-12'>#</a>
|
||||
</div>
|
||||
<p>Parses the manifest file and converts configurations into core supported formats.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_config</span> <span class="o">=</span> <span class="n">emanemanifest</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">phy_xml</span><span class="p">,</span> <span class="n">phy_defaults</span><span class="p">)</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-13'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-13'>#</a>
|
||||
</div>
|
||||
<h2>Custom override options</h2>
|
||||
<p><strong>NOTE: these options default to what's seen below and do not have to be included.</strong></p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-14'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-14'>#</a>
|
||||
</div>
|
||||
<p>Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display
|
||||
within the gui.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">config_ignore</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-15'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-15'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override how options are displayed with the GUI, using the GUI format of
|
||||
"name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed
|
||||
numbers after ":" for including values in each tab.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">config_groups_override</span> <span class="o">=</span> <span class="bp">None</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-16'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-16'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in
|
||||
that order.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">config_matrix_override</span> <span class="o">=</span> <span class="bp">None</span>
|
||||
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
</div>
|
||||
</body>
|
|
@ -1,344 +0,0 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=utf-8">
|
||||
<title>sample.py</title>
|
||||
<link rel="stylesheet" href="pycco.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id='container'>
|
||||
<div id="background"></div>
|
||||
<div class='section'>
|
||||
<div class='docs'><h1>sample.py</h1></div>
|
||||
</div>
|
||||
<div class='clearall'>
|
||||
<div class='section' id='section-0'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-0'>#</a>
|
||||
</div>
|
||||
<p>Sample user-defined service.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">core.service</span> <span class="kn">import</span> <span class="n">CoreService</span>
|
||||
<span class="kn">from</span> <span class="nn">core.service</span> <span class="kn">import</span> <span class="n">ServiceMode</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-1'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-1'>#</a>
|
||||
</div>
|
||||
<h1>Custom CORE Service</h1>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span class="k">class</span> <span class="nc">MyService</span><span class="p">(</span><span class="n">CoreService</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-2'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-2'>#</a>
|
||||
</div>
|
||||
<h2>Service Attributes</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-3'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-3'>#</a>
|
||||
</div>
|
||||
<p>Name used as a unique ID for this service and is required, no spaces.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">name</span> <span class="o">=</span> <span class="s2">"MyService"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-4'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-4'>#</a>
|
||||
</div>
|
||||
<p>Allows you to group services within the GUI under a common name.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">group</span> <span class="o">=</span> <span class="s2">"Utility"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-5'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-5'>#</a>
|
||||
</div>
|
||||
<p>Executables this service depends on to function, if executable is not on the path, service will not be loaded.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">executables</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-6'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-6'>#</a>
|
||||
</div>
|
||||
<p>Services that this service depends on for startup, tuple of service names.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">dependencies</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-7'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-7'>#</a>
|
||||
</div>
|
||||
<p>Directories that this service will create within a node.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">dirs</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-8'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-8'>#</a>
|
||||
</div>
|
||||
<p>Files that this service will generate, without a full path this file goes in the node’s directory.
|
||||
e.g. /tmp/pycore.12345/n1.conf/myfile</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">configs</span> <span class="o">=</span> <span class="p">(</span><span class="s2">"myservice1.sh"</span><span class="p">,</span> <span class="s2">"myservice2.sh"</span><span class="p">)</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-9'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-9'>#</a>
|
||||
</div>
|
||||
<p>Commands used to start this service, any non-zero exit code will cause a failure.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">startup</span> <span class="o">=</span> <span class="p">(</span><span class="s2">"sh </span><span class="si">%s</span><span class="s2">"</span> <span class="o">%</span> <span class="n">configs</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="s2">"sh </span><span class="si">%s</span><span class="s2">"</span> <span class="o">%</span> <span class="n">configs</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-10'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-10'>#</a>
|
||||
</div>
|
||||
<p>Commands used to validate that a service was started, any non-zero exit code will cause a failure.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validate</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-11'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-11'>#</a>
|
||||
</div>
|
||||
<p>Validation mode, used to determine startup success.</p>
|
||||
<ul>
|
||||
<li>NON_BLOCKING - runs startup commands, and validates success with validation commands</li>
|
||||
<li>BLOCKING - runs startup commands, and validates success with the startup commands themselves</li>
|
||||
<li>TIMER - runs startup commands, and validates success by waiting for “validation_timer” alone</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validation_mode</span> <span class="o">=</span> <span class="n">ServiceMode</span><span class="o">.</span><span class="n">NON_BLOCKING</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-12'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-12'>#</a>
|
||||
</div>
|
||||
<p>Time in seconds for a service to wait for validation, before determining success in TIMER/NON_BLOCKING modes.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validation_timer</span> <span class="o">=</span> <span class="mi">5</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-13'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-13'>#</a>
|
||||
</div>
|
||||
<p>Period in seconds to wait before retrying validation, only used in NON_BLOCKING mode.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validation_period</span> <span class="o">=</span> <span class="mf">0.5</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-14'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-14'>#</a>
|
||||
</div>
|
||||
<p>Shutdown commands to stop this service.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">shutdown</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-15'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-15'>#</a>
|
||||
</div>
|
||||
<h2>On Load</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">on_load</span><span class="p">(</span><span class="bp">cls</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-16'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-16'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
|
||||
dynamic settings for the environment.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">pass</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-17'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-17'>#</a>
|
||||
</div>
|
||||
<h2>Get Configs</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">get_configs</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-18'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-18'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to dynamically generate the config files from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-19'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-19'>#</a>
|
||||
</div>
|
||||
<h2>Generate Config</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">generate_config</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">,</span> <span class="n">filename</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-20'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-20'>#</a>
|
||||
</div>
|
||||
<p>Returns a string representation for a file, given the node the service is starting on the config filename
|
||||
that this information will be used for. This must be defined, if “configs” are defined.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">cfg</span> <span class="o">=</span> <span class="s2">"#!/bin/sh</span><span class="se">\n</span><span class="s2">"</span>
|
||||
|
||||
<span class="k">if</span> <span class="n">filename</span> <span class="o">==</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
|
||||
<span class="n">cfg</span> <span class="o">+=</span> <span class="s2">"# auto-generated by MyService (sample.py)</span><span class="se">\n</span><span class="s2">"</span>
|
||||
<span class="k">for</span> <span class="n">ifc</span> <span class="ow">in</span> <span class="n">node</span><span class="o">.</span><span class="n">netifs</span><span class="p">():</span>
|
||||
<span class="n">cfg</span> <span class="o">+=</span> <span class="s1">'echo "Node </span><span class="si">%s</span><span class="s1"> has interface </span><span class="si">%s</span><span class="s1">"</span><span class="se">\n</span><span class="s1">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">node</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">ifc</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
|
||||
<span class="k">elif</span> <span class="n">filename</span> <span class="o">==</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span><span class="p">[</span><span class="mi">1</span><span class="p">]:</span>
|
||||
<span class="n">cfg</span> <span class="o">+=</span> <span class="s2">"echo hello"</span>
|
||||
|
||||
<span class="k">return</span> <span class="n">cfg</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-21'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-21'>#</a>
|
||||
</div>
|
||||
<h2>Get Startup</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">get_startup</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-22'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-22'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to dynamically generate the startup commands from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">startup</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-23'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-23'>#</a>
|
||||
</div>
|
||||
<h2>Get Validate</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">get_validate</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-24'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-24'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to dynamically generate the validate commands from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">validate</span>
|
||||
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
</div>
|
||||
</body>
|
|
@ -23,6 +23,7 @@ networking scenarios, security studies, and increasing the size of physical test
|
|||
|[Architecture](architecture.md)|Overview of the architecture|
|
||||
|[Installation](install.md)|Installing from source, packages, & other dependencies|
|
||||
|[Using the GUI](usage.md)|Details on the different node types and options in the GUI|
|
||||
|[Distributed](distributed.md)|Overview and detals for running CORE across multiple servers|
|
||||
|[Python Scripting](scripting.md)|How to write python scripts for creating a CORE session|
|
||||
|[gRPC API](grpc.md)|How to enable and use the gRPC API|
|
||||
|[Node Types](machine.md)|Overview of node types supported within CORE|
|
||||
|
|
|
@ -46,15 +46,19 @@ Install Path | Description
|
|||
|
||||
The newly added gRPC API which depends on python library grpcio is not commonly found within system repos.
|
||||
To account for this it would be recommended to install the python dependencies using the **requirements.txt** found in
|
||||
the latest release.
|
||||
the latest [CORE Release](https://github.com/coreemu/core/releases).
|
||||
|
||||
```shell
|
||||
sudo pip install -r requirements.txt
|
||||
# for python 2
|
||||
sudo python -m pip install -r requirements.txt
|
||||
# for python 3
|
||||
sudo python3 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Ubuntu 19.04
|
||||
|
||||
Ubuntu 19.04 can provide all the packages needed at the system level and can be installed as follows:
|
||||
|
||||
```shell
|
||||
# python 2
|
||||
sudo apt install python-configparser python-enum34 python-future python-grpcio python-lxml
|
||||
|
@ -62,6 +66,17 @@ sudo apt install python-configparser python-enum34 python-future python-grpcio p
|
|||
sudo apt install python3-configparser python3-enum34 python3-future python3-grpcio python3-lxml
|
||||
```
|
||||
|
||||
## Other Distros
|
||||
|
||||
The newly added gRPC API which depends on python library grpcio is not commonly found within system repos.
|
||||
To account for this it would be recommended to install the python dependencies using the **requirements.txt** found in
|
||||
the latest release.
|
||||
|
||||
```shell
|
||||
# will need to pip3 for python3 usage
|
||||
sudo pip install -r requirements.txt
|
||||
```
|
||||
|
||||
# Pre-Req Installing OSPF MDR
|
||||
|
||||
Virtual networks generally require some form of routing in order to work (e.g. to automatically populate routing
|
||||
|
@ -86,10 +101,14 @@ sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb
|
|||
Requires building from source, from the latest nightly snapshot.
|
||||
|
||||
```shell
|
||||
# packages needed beyond what's normally required to build core on ubuntu
|
||||
sudo apt install libtool libreadline-dev
|
||||
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/nightly_snapshots/quagga-svnsnap.tgz
|
||||
tar xzf quagga-svnsnap.tgz
|
||||
cd quagga
|
||||
./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \
|
||||
./bootstrap.sh
|
||||
./configure --disable-doc --enable-user=root --enable-group=root --with-cflags=-ggdb \
|
||||
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \
|
||||
--localstatedir=/var/run/quagga
|
||||
make
|
||||
|
@ -121,9 +140,9 @@ Ubuntu package defaults to using systemd for running as a service.
|
|||
|
||||
```shell
|
||||
# python2
|
||||
sudo apt ./core_python_$VERSION_amd64.deb
|
||||
sudo apt install ./core_python_$VERSION_amd64.deb
|
||||
# python3
|
||||
sudo apt ./core_python3_$VERSION_amd64.deb
|
||||
sudo apt install ./core_python3_$VERSION_amd64.deb
|
||||
```
|
||||
|
||||
Run the CORE GUI as a normal user:
|
||||
|
@ -202,7 +221,11 @@ After running the *core-gui* command, a GUI should appear with a canvas for draw
|
|||
This option is listed here for developers and advanced users who are comfortable patching and building source code.
|
||||
Please consider using the binary packages instead for a simplified install experience.
|
||||
|
||||
## Pre-Req All
|
||||
## Download and Extract Source Code
|
||||
|
||||
You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu/core) page.
|
||||
|
||||
## Install grpcio-tools
|
||||
|
||||
Python module grpcio-tools is currently needed to generate code from the CORE protobuf file during the build.
|
||||
|
||||
|
@ -213,19 +236,21 @@ pip2 install grpcio-tools
|
|||
pip3 install grpcio-tools
|
||||
```
|
||||
|
||||
## Pre-Reqs Ubuntu 18.04
|
||||
## Distro Requirements
|
||||
|
||||
### Ubuntu 18.04 Requirements
|
||||
|
||||
```shell
|
||||
sudo apt install automake pkg-config gcc libev-dev bridge-utils ebtables python-dev python-setuptools tk libtk-img
|
||||
```
|
||||
|
||||
## Pre-Reqs Ubuntu 16.04
|
||||
### Ubuntu 16.04 Requirements
|
||||
|
||||
```shell
|
||||
sudo apt-get install automake bridge-utils ebtables python-dev libev-dev python-setuptools libtk-img
|
||||
```
|
||||
|
||||
## Pre-Reqs CentOS 7
|
||||
### CentOS 7 with Gnome Desktop Requirements
|
||||
|
||||
```shell
|
||||
sudo yum -y install automake gcc python-devel libev-devel tk
|
||||
|
@ -235,15 +260,13 @@ sudo yum -y install automake gcc python-devel libev-devel tk
|
|||
|
||||
```shell
|
||||
./bootstrap.sh
|
||||
# for python2
|
||||
PYTHON=python2 ./configure
|
||||
# for python3
|
||||
PYTHON=python3 ./configure
|
||||
# use python2 or python3 depending on desired version
|
||||
PYTHON=$VERSION ./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
## Build Documentation
|
||||
# Building Documentation
|
||||
|
||||
Building documentation requires python-sphinx not noted above.
|
||||
|
||||
|
@ -256,14 +279,12 @@ sudo apt install python3-sphinx
|
|||
sudo yum install python3-sphinx
|
||||
|
||||
./bootstrap.sh
|
||||
# for python2
|
||||
PYTHON=python2 ./configure
|
||||
# for python3
|
||||
PYTHON=python3 ./configure
|
||||
# use python2 or python3 depending on desired version
|
||||
PYTHON=$VERSION ./configure
|
||||
make doc
|
||||
```
|
||||
|
||||
## Build Packages
|
||||
# Building Packages
|
||||
Build package commands, DESTDIR is used to make install into and then for packaging by fpm.
|
||||
|
||||
**NOTE: clean the DESTDIR if re-using the same directory**
|
||||
|
@ -272,10 +293,8 @@ Build package commands, DESTDIR is used to make install into and then for packag
|
|||
|
||||
```shell
|
||||
./bootstrap.sh
|
||||
# for python2
|
||||
PYTHON=python2 ./configure
|
||||
# for python3
|
||||
PYTHON=python3 ./configure
|
||||
# use python2 or python3 depending on desired version
|
||||
PYTHON=$VERSION ./configure
|
||||
make
|
||||
mkdir /tmp/core-build
|
||||
make fpm DESTDIR=/tmp/core-build
|
||||
|
|
190
docs/pycco.css
190
docs/pycco.css
|
@ -1,190 +0,0 @@
|
|||
/*--------------------- Layout and Typography ----------------------------*/
|
||||
body {
|
||||
font-family: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
|
||||
font-size: 16px;
|
||||
line-height: 24px;
|
||||
color: #252519;
|
||||
margin: 0; padding: 0;
|
||||
background: #f5f5ff;
|
||||
}
|
||||
a {
|
||||
color: #261a3b;
|
||||
}
|
||||
a:visited {
|
||||
color: #261a3b;
|
||||
}
|
||||
p {
|
||||
margin: 0 0 15px 0;
|
||||
}
|
||||
h1, h2, h3, h4, h5, h6 {
|
||||
margin: 40px 0 15px 0;
|
||||
}
|
||||
h2, h3, h4, h5, h6 {
|
||||
margin-top: 0;
|
||||
}
|
||||
#container {
|
||||
background: white;
|
||||
}
|
||||
#container, div.section {
|
||||
position: relative;
|
||||
}
|
||||
#background {
|
||||
position: absolute;
|
||||
top: 0; left: 580px; right: 0; bottom: 0;
|
||||
background: #f5f5ff;
|
||||
border-left: 1px solid #e5e5ee;
|
||||
z-index: 0;
|
||||
}
|
||||
#jump_to, #jump_page {
|
||||
background: white;
|
||||
-webkit-box-shadow: 0 0 25px #777; -moz-box-shadow: 0 0 25px #777;
|
||||
-webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px;
|
||||
font: 10px Arial;
|
||||
text-transform: uppercase;
|
||||
cursor: pointer;
|
||||
text-align: right;
|
||||
}
|
||||
#jump_to, #jump_wrapper {
|
||||
position: fixed;
|
||||
right: 0; top: 0;
|
||||
padding: 5px 10px;
|
||||
}
|
||||
#jump_wrapper {
|
||||
padding: 0;
|
||||
display: none;
|
||||
}
|
||||
#jump_to:hover #jump_wrapper {
|
||||
display: block;
|
||||
}
|
||||
#jump_page {
|
||||
padding: 5px 0 3px;
|
||||
margin: 0 0 25px 25px;
|
||||
}
|
||||
#jump_page .source {
|
||||
display: block;
|
||||
padding: 5px 10px;
|
||||
text-decoration: none;
|
||||
border-top: 1px solid #eee;
|
||||
}
|
||||
#jump_page .source:hover {
|
||||
background: #f5f5ff;
|
||||
}
|
||||
#jump_page .source:first-child {
|
||||
}
|
||||
div.docs {
|
||||
float: left;
|
||||
max-width: 500px;
|
||||
min-width: 500px;
|
||||
min-height: 5px;
|
||||
padding: 10px 25px 1px 50px;
|
||||
vertical-align: top;
|
||||
text-align: left;
|
||||
}
|
||||
.docs pre {
|
||||
margin: 15px 0 15px;
|
||||
padding-left: 15px;
|
||||
}
|
||||
.docs p tt, .docs p code {
|
||||
background: #f8f8ff;
|
||||
border: 1px solid #dedede;
|
||||
font-size: 12px;
|
||||
padding: 0 0.2em;
|
||||
}
|
||||
.octowrap {
|
||||
position: relative;
|
||||
}
|
||||
.octothorpe {
|
||||
font: 12px Arial;
|
||||
text-decoration: none;
|
||||
color: #454545;
|
||||
position: absolute;
|
||||
top: 3px; left: -20px;
|
||||
padding: 1px 2px;
|
||||
opacity: 0;
|
||||
-webkit-transition: opacity 0.2s linear;
|
||||
}
|
||||
div.docs:hover .octothorpe {
|
||||
opacity: 1;
|
||||
}
|
||||
div.code {
|
||||
margin-left: 580px;
|
||||
padding: 14px 15px 16px 50px;
|
||||
vertical-align: top;
|
||||
}
|
||||
.code pre, .docs p code {
|
||||
font-size: 12px;
|
||||
}
|
||||
pre, tt, code {
|
||||
line-height: 18px;
|
||||
font-family: Monaco, Consolas, "Lucida Console", monospace;
|
||||
margin: 0; padding: 0;
|
||||
}
|
||||
div.clearall {
|
||||
clear: both;
|
||||
}
|
||||
|
||||
|
||||
/*---------------------- Syntax Highlighting -----------------------------*/
|
||||
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
|
||||
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
|
||||
body .hll { background-color: #ffffcc }
|
||||
body .c { color: #408080; font-style: italic } /* Comment */
|
||||
body .err { border: 1px solid #FF0000 } /* Error */
|
||||
body .k { color: #954121 } /* Keyword */
|
||||
body .o { color: #666666 } /* Operator */
|
||||
body .cm { color: #408080; font-style: italic } /* Comment.Multiline */
|
||||
body .cp { color: #BC7A00 } /* Comment.Preproc */
|
||||
body .c1 { color: #408080; font-style: italic } /* Comment.Single */
|
||||
body .cs { color: #408080; font-style: italic } /* Comment.Special */
|
||||
body .gd { color: #A00000 } /* Generic.Deleted */
|
||||
body .ge { font-style: italic } /* Generic.Emph */
|
||||
body .gr { color: #FF0000 } /* Generic.Error */
|
||||
body .gh { color: #000080; font-weight: bold } /* Generic.Heading */
|
||||
body .gi { color: #00A000 } /* Generic.Inserted */
|
||||
body .go { color: #808080 } /* Generic.Output */
|
||||
body .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
|
||||
body .gs { font-weight: bold } /* Generic.Strong */
|
||||
body .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
|
||||
body .gt { color: #0040D0 } /* Generic.Traceback */
|
||||
body .kc { color: #954121 } /* Keyword.Constant */
|
||||
body .kd { color: #954121; font-weight: bold } /* Keyword.Declaration */
|
||||
body .kn { color: #954121; font-weight: bold } /* Keyword.Namespace */
|
||||
body .kp { color: #954121 } /* Keyword.Pseudo */
|
||||
body .kr { color: #954121; font-weight: bold } /* Keyword.Reserved */
|
||||
body .kt { color: #B00040 } /* Keyword.Type */
|
||||
body .m { color: #666666 } /* Literal.Number */
|
||||
body .s { color: #219161 } /* Literal.String */
|
||||
body .na { color: #7D9029 } /* Name.Attribute */
|
||||
body .nb { color: #954121 } /* Name.Builtin */
|
||||
body .nc { color: #0000FF; font-weight: bold } /* Name.Class */
|
||||
body .no { color: #880000 } /* Name.Constant */
|
||||
body .nd { color: #AA22FF } /* Name.Decorator */
|
||||
body .ni { color: #999999; font-weight: bold } /* Name.Entity */
|
||||
body .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
|
||||
body .nf { color: #0000FF } /* Name.Function */
|
||||
body .nl { color: #A0A000 } /* Name.Label */
|
||||
body .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
|
||||
body .nt { color: #954121; font-weight: bold } /* Name.Tag */
|
||||
body .nv { color: #19469D } /* Name.Variable */
|
||||
body .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
|
||||
body .w { color: #bbbbbb } /* Text.Whitespace */
|
||||
body .mf { color: #666666 } /* Literal.Number.Float */
|
||||
body .mh { color: #666666 } /* Literal.Number.Hex */
|
||||
body .mi { color: #666666 } /* Literal.Number.Integer */
|
||||
body .mo { color: #666666 } /* Literal.Number.Oct */
|
||||
body .sb { color: #219161 } /* Literal.String.Backtick */
|
||||
body .sc { color: #219161 } /* Literal.String.Char */
|
||||
body .sd { color: #219161; font-style: italic } /* Literal.String.Doc */
|
||||
body .s2 { color: #219161 } /* Literal.String.Double */
|
||||
body .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
|
||||
body .sh { color: #219161 } /* Literal.String.Heredoc */
|
||||
body .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
|
||||
body .sx { color: #954121 } /* Literal.String.Other */
|
||||
body .sr { color: #BB6688 } /* Literal.String.Regex */
|
||||
body .s1 { color: #219161 } /* Literal.String.Single */
|
||||
body .ss { color: #19469D } /* Literal.String.Symbol */
|
||||
body .bp { color: #954121 } /* Name.Builtin.Pseudo */
|
||||
body .vc { color: #19469D } /* Name.Variable.Class */
|
||||
body .vg { color: #19469D } /* Name.Variable.Global */
|
||||
body .vi { color: #19469D } /* Name.Variable.Instance */
|
||||
body .il { color: #666666 } /* Literal.Number.Integer.Long */
|
460
docs/services.md
460
docs/services.md
|
@ -3,11 +3,459 @@
|
|||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## Custom Services
|
||||
## Services
|
||||
|
||||
CORE supports custom developed services by way of dynamically loading user created python files.
|
||||
Custom services should be placed within the path defined by **custom_services_dir** in the CORE
|
||||
configuration file. This path cannot end in **/services**.
|
||||
CORE uses the concept of services to specify what processes or scripts run on a
|
||||
node when it is started. Layer-3 nodes such as routers and PCs are defined by
|
||||
the services that they run.
|
||||
|
||||
Here is an example service with documentation describing functionality:
|
||||
[Example Service](exampleservice.html)
|
||||
Services may be customized for each node, or new custom services can be
|
||||
created. New node types can be created each having a different name, icon, and
|
||||
set of default services. Each service defines the per-node directories,
|
||||
configuration files, startup index, starting commands, validation commands,
|
||||
shutdown commands, and meta-data associated with a node.
|
||||
|
||||
**NOTE:**
|
||||
Network namespace nodes do not undergo the normal Linux boot process
|
||||
using the **init**, **upstart**, or **systemd** frameworks. These
|
||||
lightweight nodes use configured CORE *services*.
|
||||
|
||||
## Default Services and Node Types
|
||||
|
||||
Here are the default node types and their services:
|
||||
|
||||
* *router* - zebra, OSFPv2, OSPFv3, and IPForward services for IGP
|
||||
link-state routing.
|
||||
* *host* - DefaultRoute and SSH services, representing an SSH server having a
|
||||
default route when connected directly to a router.
|
||||
* *PC* - DefaultRoute service for having a default route when connected
|
||||
directly to a router.
|
||||
* *mdr* - zebra, OSPFv3MDR, and IPForward services for
|
||||
wireless-optimized MANET Designated Router routing.
|
||||
* *prouter* - a physical router, having the same default services as the
|
||||
*router* node type; for incorporating Linux testbed machines into an
|
||||
emulation.
|
||||
|
||||
Configuration files can be automatically generated by each service. For
|
||||
example, CORE automatically generates routing protocol configuration for the
|
||||
router nodes in order to simplify the creation of virtual networks.
|
||||
|
||||
To change the services associated with a node, double-click on the node to
|
||||
invoke its configuration dialog and click on the *Services...* button,
|
||||
or right-click a node a choose *Services...* from the menu.
|
||||
Services are enabled or disabled by clicking on their names. The button next to
|
||||
each service name allows you to customize all aspects of this service for this
|
||||
node. For example, special route redistribution commands could be inserted in
|
||||
to the Quagga routing configuration associated with the zebra service.
|
||||
|
||||
To change the default services associated with a node type, use the Node Types
|
||||
dialog available from the *Edit* button at the end of the Layer-3 nodes
|
||||
toolbar, or choose *Node types...* from the *Session* menu. Note that
|
||||
any new services selected are not applied to existing nodes if the nodes have
|
||||
been customized.
|
||||
|
||||
The node types are saved in a **~/.core/nodes.conf** file, not with the
|
||||
**.imn** file. Keep this in mind when changing the default services for
|
||||
existing node types; it may be better to simply create a new node type. It is
|
||||
recommended that you do not change the default built-in node types. The
|
||||
**nodes.conf** file can be copied between CORE machines to save your custom
|
||||
types.
|
||||
|
||||
## Customizing a Service
|
||||
|
||||
A service can be fully customized for a particular node. From the node's
|
||||
configuration dialog, click on the button next to the service name to invoke
|
||||
the service customization dialog for that service.
|
||||
The dialog has three tabs for configuring the different aspects of the service:
|
||||
files, directories, and startup/shutdown.
|
||||
|
||||
**NOTE:**
|
||||
A **yellow** customize icon next to a service indicates that service
|
||||
requires customization (e.g. the *Firewall* service).
|
||||
A **green** customize icon indicates that a custom configuration exists.
|
||||
Click the *Defaults* button when customizing a service to remove any
|
||||
customizations.
|
||||
|
||||
The Files tab is used to display or edit the configuration files or scripts that
|
||||
are used for this service. Files can be selected from a drop-down list, and
|
||||
their contents are displayed in a text entry below. The file contents are
|
||||
generated by the CORE daemon based on the network topology that exists at
|
||||
the time the customization dialog is invoked.
|
||||
|
||||
The Directories tab shows the per-node directories for this service. For the
|
||||
default types, CORE nodes share the same filesystem tree, except for these
|
||||
per-node directories that are defined by the services. For example, the
|
||||
**/var/run/quagga** directory needs to be unique for each node running
|
||||
the Zebra service, because Quagga running on each node needs to write separate
|
||||
PID files to that directory.
|
||||
|
||||
**NOTE:**
|
||||
The **/var/log** and **/var/run** directories are
|
||||
mounted uniquely per-node by default.
|
||||
Per-node mount targets can be found in **/tmp/pycore.nnnnn/nN.conf/**
|
||||
(where *nnnnn* is the session number and *N* is the node number.)
|
||||
|
||||
The Startup/shutdown tab lists commands that are used to start and stop this
|
||||
service. The startup index allows configuring when this service starts relative
|
||||
to the other services enabled for this node; a service with a lower startup
|
||||
index value is started before those with higher values. Because shell scripts
|
||||
generated by the Files tab will not have execute permissions set, the startup
|
||||
commands should include the shell name, with
|
||||
something like ```sh script.sh```.
|
||||
|
||||
Shutdown commands optionally terminate the process(es) associated with this
|
||||
service. Generally they send a kill signal to the running process using the
|
||||
*kill* or *killall* commands. If the service does not terminate
|
||||
the running processes using a shutdown command, the processes will be killed
|
||||
when the *vnoded* daemon is terminated (with *kill -9*) and
|
||||
the namespace destroyed. It is a good practice to
|
||||
specify shutdown commands, which will allow for proper process termination, and
|
||||
for run-time control of stopping and restarting services.
|
||||
|
||||
Validate commands are executed following the startup commands. A validate
|
||||
command can execute a process or script that should return zero if the service
|
||||
has started successfully, and have a non-zero return value for services that
|
||||
have had a problem starting. For example, the *pidof* command will check
|
||||
if a process is running and return zero when found. When a validate command
|
||||
produces a non-zero return value, an exception is generated, which will cause
|
||||
an error to be displayed in the Check Emulation Light.
|
||||
|
||||
**TIP:**
|
||||
To start, stop, and restart services during run-time, right-click a
|
||||
node and use the *Services...* menu.
|
||||
|
||||
## New Services
|
||||
|
||||
Services can save time required to configure nodes, especially if a number
|
||||
of nodes require similar configuration procedures. New services can be
|
||||
introduced to automate tasks.
|
||||
|
||||
### Leveraging UserDefined
|
||||
|
||||
The easiest way to capture the configuration of a new process into a service
|
||||
is by using the **UserDefined** service. This is a blank service where any
|
||||
aspect may be customized. The UserDefined service is convenient for testing
|
||||
ideas for a service before adding a new service type.
|
||||
|
||||
### Creating New Service
|
||||
|
||||
1. Modify the [Example Service File](/daemon/examples/myservices/sample.py)
|
||||
to do what you want. It could generate config/script files, mount per-node
|
||||
directories, start processes/scripts, etc. sample.py is a Python file that
|
||||
defines one or more classes to be imported. You can create multiple Python
|
||||
files that will be imported. Add any new filenames to the __init__.py file.
|
||||
|
||||
2. Put these files in a directory such as /home/username/.core/myservices
|
||||
Note that the last component of this directory name **myservices** should not
|
||||
be named something like **services** which conflicts with an existing Python
|
||||
name (the syntax 'from myservices import *' is used).
|
||||
|
||||
3. Add a **custom_services_dir = /home/username/.core/myservices** entry to the
|
||||
/etc/core/core.conf file.
|
||||
|
||||
**NOTE:**
|
||||
The directory name used in **custom_services_dir** should be unique and
|
||||
should not correspond to
|
||||
any existing Python module name. For example, don't use the name **subprocess**
|
||||
or **services**.
|
||||
|
||||
4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax)
|
||||
should be displayed in the /var/log/core-daemon.log log file (or on screen).
|
||||
|
||||
5. Start using your custom service on your nodes. You can create a new node
|
||||
type that uses your service, or change the default services for an existing
|
||||
node type, or change individual nodes.
|
||||
|
||||
If you have created a new service type that may be useful to others, please
|
||||
consider contributing it to the CORE project.
|
||||
|
||||
## Available Services
|
||||
|
||||
### BIRD Internet Routing Daemon
|
||||
The [BIRD Internet Routing Daemon](https://bird.network.cz/) is a routing daemon; i.e., a software responsible for managing kernel packet forwarding tables. It aims to develop a dynamic IP routing daemon with full support of all modern routing protocols, easy to use configuration interface and powerful route filtering language, primarily targeted on (but not limited to) Linux and other UNIX-like systems and distributed under the GNU General Public License. BIRD has a free implementation of several well known and common routing and router-supplemental protocols, namely RIP, RIPng, OSPFv2, OSPFv3, BGP, BFD, and NDP/RA. BIRD supports IPv4 and IPv6 address families, Linux kernel and several BSD variants (tested on FreeBSD, NetBSD and OpenBSD). BIRD consists of bird daemon and birdc interactive CLI client used for supervision.
|
||||
|
||||
In order to be able to use the BIRD Internet Routing Protocol, you must first install the project on your machine.
|
||||
|
||||
|
||||
#### BIRD Package Install
|
||||
```shell
|
||||
sudo apt-get install bird
|
||||
```
|
||||
|
||||
#### BIRD Source Code Install
|
||||
You can download BIRD source code from it's [official repository.](https://gitlab.labs.nic.cz/labs/bird/)
|
||||
```shell
|
||||
./configure
|
||||
make
|
||||
su
|
||||
make install
|
||||
vi /etc/bird/bird.conf
|
||||
```
|
||||
The installation will place the bird directory inside */etc* where you will also find its config file.
|
||||
|
||||
In order to be able to do use the Bird Internet Routing Protocol, you must modify *bird.conf* due to the fact that the given configuration file is not configured beyond allowing the bird daemon to start, which means that nothing else will happen if you run it. Keeran Marquis has a very detailed example on [Configuring BGP using Bird on Ubuntu](https://blog.marquis.co/configuring-bgp-using-bird-on-ubuntu-14-04lts/) which can be used as a building block to implement your custom routing daemon.
|
||||
|
||||
|
||||
### FRRouting
|
||||
FRRouting is a routing software package that provides TCP/IP based routing services with routing protocols support such as BGP, RIP, OSPF, IS-IS and more. FRR also supports special BGP Route Reflector and Route Server behavior. In addition to traditional IPv4 routing protocols, FRR also supports IPv6 routing protocols. With an SNMP daemon that supports the AgentX protocol, FRR provides routing protocol MIB read-only access (SNMP Support).
|
||||
|
||||
FRR currently supports the following protocols:
|
||||
* BGP
|
||||
* OSPFv2
|
||||
* OSPFv3
|
||||
* RIPv1
|
||||
* RIPv2
|
||||
* RIPng
|
||||
* IS-IS
|
||||
* PIM-SM/MSDP
|
||||
* LDP
|
||||
* BFD
|
||||
* Babel
|
||||
* PBR
|
||||
* OpenFabric
|
||||
* EIGRP (alpha)
|
||||
* NHRP (alpha)
|
||||
|
||||
#### FRRouting Package Install
|
||||
```shell
|
||||
sudo apt install curl
|
||||
curl -s https://deb.frrouting.org/frr/keys.asc | sudo apt-key add -
|
||||
FRRVER="frr-stable"
|
||||
echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) $FRRVER | sudo tee -a /etc/apt/sources.list.d/frr.list
|
||||
sudo apt update && sudo apt install frr frr-pythontools
|
||||
```
|
||||
|
||||
#### FRRouting Source Code Install
|
||||
Building FRR from source is the best way to ensure you have the latest features and bug fixes. Details for each supported platform, including dependency package listings, permissions, and other gotchas, are in the developer’s documentation.
|
||||
|
||||
FRR’s source is available on the project [GitHub page](https://github.com/FRRouting/frr).
|
||||
```shell
|
||||
git clone https://github.com/FRRouting/frr.git
|
||||
```
|
||||
|
||||
Change into your FRR source directory and issue:
|
||||
```shell
|
||||
./bootstrap.sh
|
||||
```
|
||||
Then, choose the configuration options that you wish to use for the installation. You can find these options on FRR's [official webpage](http://docs.frrouting.org/en/latest/installation.html). Once you have chosen your configure options, run the configure script and pass the options you chose:
|
||||
```shell
|
||||
./configure \
|
||||
--prefix=/usr \
|
||||
--enable-exampledir=/usr/share/doc/frr/examples/ \
|
||||
--localstatedir=/var/run/frr \
|
||||
--sbindir=/usr/lib/frr \
|
||||
--sysconfdir=/etc/frr \
|
||||
--enable-pimd \
|
||||
--enable-watchfrr \
|
||||
...
|
||||
```
|
||||
After configuring the software, you are ready to build and install it in your system.
|
||||
```shell
|
||||
make && sudo make install
|
||||
```
|
||||
If everything finishes successfully, FRR should be installed.
|
||||
|
||||
|
||||
### Docker
|
||||
Docker service allows running docker containers within CORE nodes.
|
||||
The running of Docker within a CORE node allows for additional extensibility to
|
||||
the CORE services. This allows network applications and protocols to be easily
|
||||
packaged and run on any node.
|
||||
|
||||
This service will add a new group to the services list. This will have a service called Docker which will just start the docker service within the node but not run anything. It will also scan all docker images on the host machine. If any are tagged with 'core' then they will be added as a service to the Docker group. The image will then be auto run if that service is selected.
|
||||
|
||||
This requires a recent version of Docker. This was tested using a PPA on Ubuntu with version 1.2.0. The version in the standard Ubuntu repo is to old for this purpose (we need --net host).
|
||||
|
||||
#### Docker Installation
|
||||
To use Docker services, you must first install the Docker python image. This is used to interface with Docker from the python service.
|
||||
|
||||
```shell
|
||||
sudo apt-get install docker.io
|
||||
sudo apt-get install python-pip
|
||||
pip install docker-py
|
||||
```
|
||||
Once everything runs successfully, a Docker group under services will appear. An example use case is to pull an image from [Docker](https://hub.docker.com/). A test image has been uploaded for this purpose:
|
||||
```shell
|
||||
sudo docker pull stuartmarsden/multicastping
|
||||
```
|
||||
This downloads an image which is based on Ubuntu 14.04 with python and twisted. It runs a simple program that sends a multicast ping and listens and records any it receives. In order for this to appear as a docker service it must be tagged with core.
|
||||
Find out the id by running 'sudo docker images'. You should see all installed images and the one you want looks like this:
|
||||
```shell
|
||||
stuartmarsden/multicastping latest 4833487e66d2 20 hours
|
||||
ago 487 MB
|
||||
```
|
||||
The id will be different on your machine so use it in the following command:
|
||||
```shell
|
||||
sudo docker tag 4833487e66d2 stuartmarsden/multicastping:core
|
||||
```
|
||||
This image will be listed in the services after we restart the core-daemon:
|
||||
```shell
|
||||
sudo service core-daemon restart
|
||||
```
|
||||
|
||||
### NRL Services
|
||||
The Protean Protocol Prototyping Library (ProtoLib) is a cross-platform library that allows applications to be built while supporting a variety of platforms including Linux, Windows, WinCE/PocketPC, MacOS, FreeBSD, Solaris, etc as well as the simulation environments of NS2 and Opnet. The goal of the Protolib is to provide a set of simple, cross-platform C++ classes that allow development of network protocols and applications that can run on different platforms and in network simulation environments. While Protolib provides an overall framework for developing working protocol implementations, applications, and simulation modules, the individual classes are designed for use as stand-alone components when possible. Although Protolib is principally for research purposes, the code has been constructed to provide robust, efficient performance and adaptability to real applications. In some cases, the code consists of data structures, etc useful in protocol implementations and, in other cases, provides common, cross-platform interfaces to system services and functions (e.g., sockets, timers, routing tables, etc).
|
||||
|
||||
Currently the Naval Research Laboratory uses this library to develop a wide variety of protocols.The NRL Protolib currently supports the following protocols:
|
||||
* MGEN_Sink
|
||||
* NHDP
|
||||
* SMF
|
||||
* OLSR
|
||||
* OLSRv2
|
||||
* OLSRORG
|
||||
* MgenActor
|
||||
* arouted
|
||||
|
||||
#### NRL Installation
|
||||
In order to be able to use the different protocols that NRL offers, you must first download the support library itself. You can get the source code from their [official nightly snapshots website](https://downloads.pf.itd.nrl.navy.mil/protolib/nightly_snapshots/).
|
||||
|
||||
#### Multi-Generator (MGEN)
|
||||
Download MGEN from the [NRL MGEN nightly snapshots](https://downloads.pf.itd.nrl.navy.mil/mgen/nightly_snapshots/), unpack it and copy the protolib library into the main folder *mgen*. Execute the following commands to build the protocol.
|
||||
```shell
|
||||
cd mgen/makefiles
|
||||
make -f Makefile.{os} mgen
|
||||
```
|
||||
|
||||
#### Neighborhood Discovery Protocol (NHDP)
|
||||
Download NHDP from the [NRL NHDP nightly snapshots](https://downloads.pf.itd.nrl.navy.mil/nhdp/nightly_snapshots/).
|
||||
```shell
|
||||
sudo apt-get install libpcap-dev libboost-all-dev
|
||||
wget https://github.com/protocolbuffers/protobuf/releases/download/v3.8.0/protoc-3.8.0-linux-x86_64.zip
|
||||
unzip protoc-3.8.0-linux-x86_64.zip
|
||||
```
|
||||
Then place the binaries in your $PATH. To know your paths you can issue the following command
|
||||
```shell
|
||||
echo $PATH
|
||||
```
|
||||
Go to the downloaded *NHDP* tarball, unpack it and place the protolib library inside the NHDP main folder. Now, compile the NHDP Protocol.
|
||||
```shell
|
||||
cd nhdp/unix
|
||||
make -f Makefile.{os}
|
||||
```
|
||||
|
||||
#### Simplified Multicast Forwarding (SMF)
|
||||
Download SMF from the [NRL SMF nightly snapshot](https://downloads.pf.itd.nrl.navy.mil/smf/nightly_snapshots/) , unpack it and place the protolib library inside the *smf* main folder.
|
||||
```shell
|
||||
cd mgen/makefiles
|
||||
make -f Makefile.{os}
|
||||
```
|
||||
|
||||
#### Optimized Link State Routing Protocol (OLSR)
|
||||
To install the OLSR protocol, download their source code from their [nightly snapshots](https://downloads.pf.itd.nrl.navy.mil/olsr/nightly_snapshots/nrlolsr-svnsnap.tgz). Unpack it and place the previously downloaded protolib library inside the *nrlolsr* main directory. Then execute the following commands:
|
||||
```shell
|
||||
cd ./unix
|
||||
make -f Makefile.{os}
|
||||
```
|
||||
|
||||
### Quagga Routing Suite
|
||||
Quagga is a routing software suite, providing implementations of OSPFv2, OSPFv3, RIP v1 and v2, RIPng and BGP-4 for Unix platforms, particularly FreeBSD, Linux, Solaris and NetBSD. Quagga is a fork of GNU Zebra which was developed by Kunihiro Ishiguro.
|
||||
The Quagga architecture consists of a core daemon, zebra, which acts as an abstraction layer to the underlying Unix kernel and presents the Zserv API over a Unix or TCP stream to Quagga clients. It is these Zserv clients which typically implement a routing protocol and communicate routing updates to the zebra daemon.
|
||||
|
||||
#### Quagga Package Install
|
||||
```shell
|
||||
sudo apt-get install quagga
|
||||
```
|
||||
|
||||
#### Quagga Source Install
|
||||
First, download the source code from their [official webpage](https://www.quagga.net/).
|
||||
```shell
|
||||
sudo apt-get install gawk
|
||||
```
|
||||
Extract the tarball, go to the directory of your currently extracted code and issue the following commands.
|
||||
```shell
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Software Defined Networking
|
||||
Ryu is a component-based software defined networking framework. Ryu provides software components with well defined API that make it easy for developers to create new network management and control applications. Ryu supports various protocols for managing network devices, such as OpenFlow, Netconf, OF-config, etc. About OpenFlow, Ryu supports fully 1.0, 1.2, 1.3, 1.4, 1.5 and Nicira Extensions. All of the code is freely available under the Apache 2.0 license.
|
||||
```shell
|
||||
```
|
||||
|
||||
#### Installation
|
||||
##### Prerequisites
|
||||
```shell
|
||||
sudo apt-get install gcc python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev zlib1g-dev
|
||||
```
|
||||
##### Ryu Package Install
|
||||
```shell
|
||||
pip install ryu
|
||||
```
|
||||
##### Ryu Source Install
|
||||
```shell
|
||||
git clone git://github.com/osrg/ryu.git
|
||||
cd ryu; pip install .
|
||||
```
|
||||
|
||||
### Security Services
|
||||
The security services offer a wide variety of protocols capable of satisfying the most use cases available. Security services such as IP security protocols, for providing security at the IP layer, as well as the suite of protocols designed to provide that security, through authentication and encryption of IP network packets. Virtual Private Networks (VPNs) and Firewalls are also available for use to the user.
|
||||
|
||||
#### Installation
|
||||
```shell
|
||||
sudo apt-get install ipsec-tools racoon openvpn
|
||||
```
|
||||
|
||||
### UCARP
|
||||
UCARP allows a couple of hosts to share common virtual IP addresses in order to provide automatic failover. It is a portable userland implementation of the secure and patent-free Common Address Redundancy Protocol (CARP, OpenBSD's alternative to the patents-bloated VRRP).
|
||||
|
||||
Strong points of the CARP protocol are: very low overhead, cryptographically signed messages, interoperability between different operating systems and no need for any dedicated extra network link between redundant hosts.
|
||||
|
||||
#### Installation
|
||||
```shell
|
||||
sudo apt-get install ucarp
|
||||
```
|
||||
|
||||
### Utilities Services
|
||||
The following services are provided as utilities:
|
||||
* Default Routing
|
||||
* Default Muticast Routing
|
||||
* Static Routing
|
||||
* SSH
|
||||
* DHCP
|
||||
* DHCP Client
|
||||
* FTP
|
||||
* HTTP
|
||||
* PCAP
|
||||
* RADVD
|
||||
* ATD
|
||||
|
||||
#### Installation
|
||||
To install the functionality of the previously metioned services you can run the following command:
|
||||
```shell
|
||||
sudo apt-get install isc-dhcp-server apache2 libpcap-dev radvd at
|
||||
```
|
||||
|
||||
### XORP routing suite
|
||||
XORP is an open networking platform that supports OSPF, RIP, BGP, OLSR, VRRP, PIM, IGMP (Multicast) and other routing protocols. Most protocols support IPv4 and IPv6 where applicable. It is known to work on various Linux distributions and flavors of BSD.
|
||||
|
||||
XORP started life as a project at the ICSI Center for Open Networking (ICON) at the International Computer Science Institute in Berkeley, California, USA, and spent some time with the team at XORP, Inc. It is now maintained and improved on a volunteer basis by a core of long-term XORP developers and some newer contributors.
|
||||
|
||||
XORP's primary goal is to be an open platform for networking protocol implementations and an alternative to proprietary and closed networking products in the marketplace today. It is the only open source platform to offer integrated multicast capability.
|
||||
|
||||
XORP design philosophy is:
|
||||
* modularity
|
||||
* extensibility
|
||||
* performance
|
||||
* robustness
|
||||
This is achieved by carefully separating functionalities into independent modules, and by providing an API for each module.
|
||||
|
||||
XORP divides into two subsystems. The higher-level ("user-level") subsystem consists of the routing protocols. The lower-level ("kernel") manages the forwarding path, and provides APIs for the higher-level to access.
|
||||
|
||||
User-level XORP uses multi-process architecture with one process per routing protocol, and a novel inter-process communication mechanism called XRL (XORP Resource Locator).
|
||||
|
||||
The lower-level subsystem can use traditional UNIX kernel forwarding, or Click modular router. The modularity and independency of the lower-level from the user-level subsystem allows for its easily replacement with other solutions including high-end hardware-based forwarding engines.
|
||||
|
||||
#### Installation
|
||||
In order to be able to install the XORP Routing Suite, you must first install scons in order to compile it.
|
||||
```shell
|
||||
sudo apt-get install scons
|
||||
```
|
||||
Then, download XORP from its official [release web page](http://www.xorp.org/releases/current/).
|
||||
```shell
|
||||
http://www.xorp.org/releases/current/
|
||||
cd xorp
|
||||
sudo apt-get install libssl-dev ncurses-dev
|
||||
scons
|
||||
scons install
|
||||
```
|
||||
|
|
311
docs/usage.md
311
docs/usage.md
|
@ -23,7 +23,7 @@ __Note: The CORE GUI is currently in a state of transition. The replacement can
|
|||
|
||||
## Prerequisites
|
||||
|
||||
Beyond instaling CORE, you must have the CORE daemon running. This is done on the command line with either Systemd or SysV
|
||||
Beyond installing CORE, you must have the CORE daemon running. This is done on the command line with either Systemd or SysV
|
||||
```shell
|
||||
# systed
|
||||
sudo systemctl daemon-reload
|
||||
|
@ -69,51 +69,51 @@ The toolbar is a row of buttons that runs vertically along the left side of the
|
|||
|
||||
When CORE is in Edit mode (the default), the vertical Editing Toolbar exists on the left side of the CORE window. Below are brief descriptions for each toolbar item, starting from the top. Most of the tools are grouped into related sub-menus, which appear when you click on their group icon.
|
||||
|
||||
* |select| *Selection Tool* - default tool for selecting, moving, configuring nodes
|
||||
* |start| *Start button* - starts Execute mode, instantiates the emulation
|
||||
* |link| *Link* - the Link Tool allows network links to be drawn between two nodes by clicking and dragging the mouse
|
||||
* |router| *Network-layer virtual nodes*
|
||||
* |router| *Router* - runs Quagga OSPFv2 and OSPFv3 routing to forward packets
|
||||
* |host| *Host* - emulated server machine having a default route, runs SSH server
|
||||
* |pc| *PC* - basic emulated machine having a default route, runs no processes by default
|
||||
* |mdr| *MDR* - runs Quagga OSPFv3 MDR routing for MANET-optimized routing
|
||||
* |router_green| *PRouter* - physical router represents a real testbed machine
|
||||
* |document_properties| *Edit* - edit node types button invokes the CORE Node Types dialog. New types of nodes may be created having different icons and names. The default services that are started with each node type can be changed here.
|
||||
* |hub| *Link-layer nodes*
|
||||
* |hub| *Hub* - the Ethernet hub forwards incoming packets to every connected node
|
||||
* |lanswitch| *Switch* - the Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table
|
||||
* |wlan| *Wireless LAN* - when routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them
|
||||
* |rj45| *RJ45* - with the RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation
|
||||
* |tunnel| *Tunnel* - the Tunnel Tool allows connecting together more than one CORE emulation using GRE tunnels
|
||||
*  *Selection Tool* - default tool for selecting, moving, configuring nodes
|
||||
*  *Start button* - starts Execute mode, instantiates the emulation
|
||||
*  *Link* - the Link Tool allows network links to be drawn between two nodes by clicking and dragging the mouse
|
||||
*  *Network-layer virtual nodes*
|
||||
*  *Router* - runs Quagga OSPFv2 and OSPFv3 routing to forward packets
|
||||
*  *Host* - emulated server machine having a default route, runs SSH server
|
||||
*  *PC* - basic emulated machine having a default route, runs no processes by default
|
||||
*  *MDR* - runs Quagga OSPFv3 MDR routing for MANET-optimized routing
|
||||
*  *PRouter* - physical router represents a real testbed machine
|
||||
*  *Edit* - edit node types button invokes the CORE Node Types dialog. New types of nodes may be created having different icons and names. The default services that are started with each node type can be changed here.
|
||||
*  *Link-layer nodes*
|
||||
*  *Hub* - the Ethernet hub forwards incoming packets to every connected node
|
||||
*  *Switch* - the Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table
|
||||
*  *Wireless LAN* - when routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them
|
||||
*  *RJ45* - with the RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation
|
||||
*  *Tunnel* - the Tunnel Tool allows connecting together more than one CORE emulation using GRE tunnels
|
||||
* *Annotation Tools*
|
||||
* |marker| *Marker* - for drawing marks on the canvas
|
||||
* |oval| *Oval* - for drawing circles on the canvas that appear in the background
|
||||
* |rectangle| *Rectangle* - for drawing rectangles on the canvas that appear in the background
|
||||
* |text| *Text* - for placing text captions on the canvas
|
||||
*  *Marker* - for drawing marks on the canvas
|
||||
*  *Oval* - for drawing circles on the canvas that appear in the background
|
||||
*  *Rectangle* - for drawing rectangles on the canvas that appear in the background
|
||||
*  *Text* - for placing text captions on the canvas
|
||||
|
||||
### Execution Toolbar
|
||||
|
||||
When the Start button is pressed, CORE switches to Execute mode, and the Edit toolbar on the left of the CORE window is replaced with the Execution toolbar Below are the items on this toolbar, starting from the top.
|
||||
|
||||
* |select| *Selection Tool* - in Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node
|
||||
* |stop| *Stop button* - stops Execute mode, terminates the emulation, returns CORE to edit mode.
|
||||
* |observe| *Observer Widgets Tool* - clicking on this magnifying glass icon
|
||||
*  *Selection Tool* - in Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node
|
||||
*  *Stop button* - stops Execute mode, terminates the emulation, returns CORE to edit mode.
|
||||
*  *Observer Widgets Tool* - clicking on this magnifying glass icon
|
||||
invokes a menu for easily selecting an Observer Widget. The icon has a darker
|
||||
gray background when an Observer Widget is active, during which time moving
|
||||
the mouse over a node will pop up an information display for that node.
|
||||
* |plot| *Plot Tool* - with this tool enabled, clicking on any link will
|
||||
*  *Plot Tool* - with this tool enabled, clicking on any link will
|
||||
activate the Throughput Widget and draw a small, scrolling throughput plot
|
||||
on the canvas. The plot shows the real-time kbps traffic for that link.
|
||||
The plots may be dragged around the canvas; right-click on a
|
||||
plot to remove it.
|
||||
* |marker| *Marker* - for drawing freehand lines on the canvas, useful during
|
||||
*  *Marker* - for drawing freehand lines on the canvas, useful during
|
||||
demonstrations; markings are not saved
|
||||
* |twonode| *Two-node Tool* - click to choose a starting and ending node, and
|
||||
*  *Two-node Tool* - click to choose a starting and ending node, and
|
||||
run a one-time *traceroute* between those nodes or a continuous *ping -R*
|
||||
between nodes. The output is displayed in real time in a results box, while
|
||||
the IP addresses are parsed and the complete network path is highlighted on
|
||||
the CORE display.
|
||||
* |run| *Run Tool* - this tool allows easily running a command on all or a
|
||||
*  *Run Tool* - this tool allows easily running a command on all or a
|
||||
subset of all nodes. A list box allows selecting any of the nodes. A text
|
||||
entry box allows entering any command. The command should return immediately,
|
||||
otherwise the display will block awaiting response. The *ping* command, for
|
||||
|
@ -728,261 +728,6 @@ pseudo-link will be drawn, representing the link between the two nodes on
|
|||
different canvases. Double-clicking on the label at the end of the arrow will
|
||||
jump to the canvas that it links.
|
||||
|
||||
Distributed Emulation
|
||||
---------------------
|
||||
|
||||
A large emulation scenario can be deployed on multiple emulation servers and
|
||||
controlled by a single GUI. The GUI, representing the entire topology, can be
|
||||
run on one of the emulation servers or on a separate machine. Emulations can be
|
||||
distributed on Linux.
|
||||
|
||||
Each machine that will act as an emulation server needs to have CORE installed.
|
||||
It is not important to have the GUI component but the CORE Python daemon
|
||||
**core-daemon** needs to be installed. Set the **listenaddr** line in the
|
||||
**/etc/core/core.conf** configuration file so that the CORE Python
|
||||
daemon will respond to commands from other servers:
|
||||
|
||||
```shell
|
||||
### core-daemon configuration options ###
|
||||
[core-daemon]
|
||||
pidfile = /var/run/core-daemon.pid
|
||||
logfile = /var/log/core-daemon.log
|
||||
listenaddr = 0.0.0.0
|
||||
```
|
||||
|
||||
|
||||
The **listenaddr** should be set to the address of the interface that should
|
||||
receive CORE API control commands from the other servers; setting **listenaddr
|
||||
= 0.0.0.0** causes the Python daemon to listen on all interfaces. CORE uses TCP
|
||||
port 4038 by default to communicate from the controlling machine (with GUI) to
|
||||
the emulation servers. Make sure that firewall rules are configured as
|
||||
necessary to allow this traffic.
|
||||
|
||||
In order to easily open shells on the emulation servers, the servers should be
|
||||
running an SSH server, and public key login should be enabled. This is
|
||||
accomplished by generating an SSH key for your user if you do not already have
|
||||
one (use **ssh-keygen -t rsa**), and then copying your public key to the
|
||||
authorized_keys file on the server (for example, **ssh-copy-id user@server** or
|
||||
**scp ~/.ssh/id_rsa.pub server:.ssh/authorized_keys**.) When double-clicking on
|
||||
a node during runtime, instead of opening a local shell, the GUI will attempt
|
||||
to SSH to the emulation server to run an interactive shell. The user name used
|
||||
for these remote shells is the same user that is running the CORE GUI.
|
||||
|
||||
**HINT: Here is a quick distributed emulation checklist.**
|
||||
|
||||
1. Install the CORE daemon on all servers.
|
||||
2. Configure public-key SSH access to all servers (if you want to use
|
||||
double-click shells or Widgets.)
|
||||
3. Set **listenaddr=0.0.0.0** in all of the server's core.conf files,
|
||||
then start (or restart) the daemon.
|
||||
4. Select nodes, right-click them, and choose *Assign to* to assign
|
||||
the servers (add servers through *Session*, *Emulation Servers...*)
|
||||
5. Press the *Start* button to launch the distributed emulation.
|
||||
|
||||
Servers are configured by choosing *Emulation servers...* from the *Session*
|
||||
menu. Servers parameters are configured in the list below and stored in a
|
||||
*servers.conf* file for use in different scenarios. The IP address and port of
|
||||
the server must be specified. The name of each server will be saved in the
|
||||
topology file as each node's location.
|
||||
|
||||
**NOTE:**
|
||||
The server that the GUI connects with
|
||||
is referred to as the master server.
|
||||
|
||||
The user needs to assign nodes to emulation servers in the scenario. Making no
|
||||
assignment means the node will be emulated on the master server
|
||||
In the configuration window of every node, a drop-down box located between
|
||||
the *Node name* and the *Image* button will select the name of the emulation
|
||||
server. By default, this menu shows *(none)*, indicating that the node will
|
||||
be emulated locally on the master. When entering Execute mode, the CORE GUI
|
||||
will deploy the node on its assigned emulation server.
|
||||
|
||||
Another way to assign emulation servers is to select one or more nodes using
|
||||
the select tool (shift-click to select multiple), and right-click one of the
|
||||
nodes and choose *Assign to...*.
|
||||
|
||||
The *CORE emulation servers* dialog box may also be used to assign nodes to
|
||||
servers. The assigned server name appears in parenthesis next to the node name.
|
||||
To assign all nodes to one of the servers, click on the server name and then
|
||||
the *all nodes* button. Servers that have assigned nodes are shown in blue in
|
||||
the server list. Another option is to first select a subset of nodes, then open
|
||||
the *CORE emulation servers* box and use the *selected nodes* button.
|
||||
|
||||
**IMPORTANT:**
|
||||
Leave the nodes unassigned if they are to be run on the master server.
|
||||
Do not explicitly assign the nodes to the master server.
|
||||
|
||||
The emulation server machines should be reachable on the specified port and via
|
||||
SSH. SSH is used when double-clicking a node to open a shell, the GUI will open
|
||||
an SSH prompt to that node's emulation server. Public-key authentication should
|
||||
be configured so that SSH passwords are not needed.
|
||||
|
||||
If there is a link between two nodes residing on different servers, the GUI
|
||||
will draw the link with a dashed line, and automatically create necessary
|
||||
tunnels between the nodes when executed. Care should be taken to arrange the
|
||||
topology such that the number of tunnels is minimized. The tunnels carry data
|
||||
between servers to connect nodes as specified in the topology.
|
||||
These tunnels are created using GRE tunneling, similar to the Tunnel Tool.
|
||||
|
||||
Wireless nodes, i.e. those connected to a WLAN node, can be assigned to
|
||||
different emulation servers and participate in the same wireless network
|
||||
only if an
|
||||
EMANE model is used for the WLAN. The basic range model does not work across multiple servers due
|
||||
to the Linux bridging and ebtables rules that are used.
|
||||
|
||||
**NOTE:**
|
||||
The basic range wireless model does not support distributed emulation,
|
||||
but EMANE does.
|
||||
|
||||
## Services
|
||||
|
||||
CORE uses the concept of services to specify what processes or scripts run on a
|
||||
node when it is started. Layer-3 nodes such as routers and PCs are defined by
|
||||
the services that they run.
|
||||
|
||||
Services may be customized for each node, or new custom services can be
|
||||
created. New node types can be created each having a different name, icon, and
|
||||
set of default services. Each service defines the per-node directories,
|
||||
configuration files, startup index, starting commands, validation commands,
|
||||
shutdown commands, and meta-data associated with a node.
|
||||
|
||||
**NOTE:**
|
||||
Network namespace nodes do not undergo the normal Linux boot process
|
||||
using the **init**, **upstart**, or **systemd** frameworks. These
|
||||
lightweight nodes use configured CORE *services*.
|
||||
|
||||
### Default Services and Node Types
|
||||
|
||||
Here are the default node types and their services:
|
||||
|
||||
* *router* - zebra, OSFPv2, OSPFv3, and IPForward services for IGP
|
||||
link-state routing.
|
||||
* *host* - DefaultRoute and SSH services, representing an SSH server having a
|
||||
default route when connected directly to a router.
|
||||
* *PC* - DefaultRoute service for having a default route when connected
|
||||
directly to a router.
|
||||
* *mdr* - zebra, OSPFv3MDR, and IPForward services for
|
||||
wireless-optimized MANET Designated Router routing.
|
||||
* *prouter* - a physical router, having the same default services as the
|
||||
*router* node type; for incorporating Linux testbed machines into an
|
||||
emulation.
|
||||
|
||||
Configuration files can be automatically generated by each service. For
|
||||
example, CORE automatically generates routing protocol configuration for the
|
||||
router nodes in order to simplify the creation of virtual networks.
|
||||
|
||||
To change the services associated with a node, double-click on the node to
|
||||
invoke its configuration dialog and click on the *Services...* button,
|
||||
or right-click a node a choose *Services...* from the menu.
|
||||
Services are enabled or disabled by clicking on their names. The button next to
|
||||
each service name allows you to customize all aspects of this service for this
|
||||
node. For example, special route redistribution commands could be inserted in
|
||||
to the Quagga routing configuration associated with the zebra service.
|
||||
|
||||
To change the default services associated with a node type, use the Node Types
|
||||
dialog available from the *Edit* button at the end of the Layer-3 nodes
|
||||
toolbar, or choose *Node types...* from the *Session* menu. Note that
|
||||
any new services selected are not applied to existing nodes if the nodes have
|
||||
been customized.
|
||||
|
||||
The node types are saved in a **~/.core/nodes.conf** file, not with the
|
||||
**.imn** file. Keep this in mind when changing the default services for
|
||||
existing node types; it may be better to simply create a new node type. It is
|
||||
recommended that you do not change the default built-in node types. The
|
||||
**nodes.conf** file can be copied between CORE machines to save your custom
|
||||
types.
|
||||
|
||||
### Customizing a Service
|
||||
|
||||
A service can be fully customized for a particular node. From the node's
|
||||
configuration dialog, click on the button next to the service name to invoke
|
||||
the service customization dialog for that service.
|
||||
The dialog has three tabs for configuring the different aspects of the service:
|
||||
files, directories, and startup/shutdown.
|
||||
|
||||
**NOTE:**
|
||||
A **yellow** customize icon next to a service indicates that service
|
||||
requires customization (e.g. the *Firewall* service).
|
||||
A **green** customize icon indicates that a custom configuration exists.
|
||||
Click the *Defaults* button when customizing a service to remove any
|
||||
customizations.
|
||||
|
||||
The Files tab is used to display or edit the configuration files or scripts that
|
||||
are used for this service. Files can be selected from a drop-down list, and
|
||||
their contents are displayed in a text entry below. The file contents are
|
||||
generated by the CORE daemon based on the network topology that exists at
|
||||
the time the customization dialog is invoked.
|
||||
|
||||
The Directories tab shows the per-node directories for this service. For the
|
||||
default types, CORE nodes share the same filesystem tree, except for these
|
||||
per-node directories that are defined by the services. For example, the
|
||||
**/var/run/quagga** directory needs to be unique for each node running
|
||||
the Zebra service, because Quagga running on each node needs to write separate
|
||||
PID files to that directory.
|
||||
|
||||
**NOTE:**
|
||||
The **/var/log** and **/var/run** directories are
|
||||
mounted uniquely per-node by default.
|
||||
Per-node mount targets can be found in **/tmp/pycore.nnnnn/nN.conf/**
|
||||
(where *nnnnn* is the session number and *N* is the node number.)
|
||||
|
||||
The Startup/shutdown tab lists commands that are used to start and stop this
|
||||
service. The startup index allows configuring when this service starts relative
|
||||
to the other services enabled for this node; a service with a lower startup
|
||||
index value is started before those with higher values. Because shell scripts
|
||||
generated by the Files tab will not have execute permissions set, the startup
|
||||
commands should include the shell name, with
|
||||
something like ```sh script.sh```.
|
||||
|
||||
Shutdown commands optionally terminate the process(es) associated with this
|
||||
service. Generally they send a kill signal to the running process using the
|
||||
*kill* or *killall* commands. If the service does not terminate
|
||||
the running processes using a shutdown command, the processes will be killed
|
||||
when the *vnoded* daemon is terminated (with *kill -9*) and
|
||||
the namespace destroyed. It is a good practice to
|
||||
specify shutdown commands, which will allow for proper process termination, and
|
||||
for run-time control of stopping and restarting services.
|
||||
|
||||
Validate commands are executed following the startup commands. A validate
|
||||
command can execute a process or script that should return zero if the service
|
||||
has started successfully, and have a non-zero return value for services that
|
||||
have had a problem starting. For example, the *pidof* command will check
|
||||
if a process is running and return zero when found. When a validate command
|
||||
produces a non-zero return value, an exception is generated, which will cause
|
||||
an error to be displayed in the Check Emulation Light.
|
||||
|
||||
**TIP:**
|
||||
To start, stop, and restart services during run-time, right-click a
|
||||
node and use the *Services...* menu.
|
||||
|
||||
### Creating new Services
|
||||
|
||||
Services can save time required to configure nodes, especially if a number
|
||||
of nodes require similar configuration procedures. New services can be
|
||||
introduced to automate tasks.
|
||||
|
||||
The easiest way to capture the configuration of a new process into a service
|
||||
is by using the **UserDefined** service. This is a blank service where any
|
||||
aspect may be customized. The UserDefined service is convenient for testing
|
||||
ideas for a service before adding a new service type.
|
||||
|
||||
To introduce new service types, a **myservices/** directory exists in the
|
||||
user's CORE configuration directory, at **~/.core/myservices/**. A detailed
|
||||
**README.txt** file exists in that directory to outline the steps necessary
|
||||
for adding a new service. First, you need to create a small Python file that
|
||||
defines the service; then the **custom_services_dir** entry must be set
|
||||
in the **/etc/core/core.conf** configuration file. A sample is provided in
|
||||
the **myservices/** directory.
|
||||
|
||||
**NOTE:**
|
||||
The directory name used in **custom_services_dir** should be unique and
|
||||
should not correspond to
|
||||
any existing Python module name. For example, don't use the name **subprocess**
|
||||
or **services**.
|
||||
|
||||
If you have created a new service type that may be useful to others, please
|
||||
consider contributing it to the CORE project.
|
||||
|
||||
## Check Emulation Light
|
||||
|
||||
The |cel| Check Emulation Light, or CEL, is located in the bottom right-hand corner
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
NAME=`basename $0`
|
||||
PIDFILE="@CORE_STATE_DIR@/run/$NAME.pid"
|
||||
LOG="@CORE_STATE_DIR@/log/$NAME.log"
|
||||
CMD="@PYTHON@ @bindir@/$NAME"
|
||||
CMD="@bindir@/$NAME"
|
||||
|
||||
get_pid() {
|
||||
cat "$PIDFILE"
|
||||
|
|
|
@ -4,7 +4,7 @@ After=network.target
|
|||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=@PYTHON@ @bindir@/core-daemon
|
||||
ExecStart=@bindir@/core-daemon
|
||||
TasksMax=infinity
|
||||
|
||||
[Install]
|
||||
|
|
Loading…
Add table
Reference in a new issue