commit
5f1a261e29
128 changed files with 7314 additions and 7682 deletions
20
Jenkinsfile
vendored
Normal file
20
Jenkinsfile
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage('build core') {
|
||||
steps {
|
||||
sh './bootstrap.sh'
|
||||
sh './configure'
|
||||
sh 'make'
|
||||
sh 'sudo make install'
|
||||
}
|
||||
}
|
||||
stage('test core') {
|
||||
steps {
|
||||
sh 'pytest daemon/tests/test_core.py'
|
||||
sh 'pytest daemon/tests/test_gui.py'
|
||||
sh 'pytest daemon/tests/test_emane.py'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
18
Makefile.am
18
Makefile.am
|
@ -1,14 +1,9 @@
|
|||
# CORE
|
||||
# (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Top-level Makefile for CORE project.
|
||||
#
|
||||
|
||||
if WANT_DOCS
|
||||
DOCS = doc
|
||||
DOCS = docs man
|
||||
endif
|
||||
|
||||
if WANT_GUI
|
||||
|
@ -171,26 +166,15 @@ change-files:
|
|||
$(call change-files,scripts/core-daemon)
|
||||
$(call change-files,daemon/core/constants.py)
|
||||
|
||||
CORE_DOC_HTML = core-html-$(PACKAGE_VERSION)
|
||||
CORE_DOC_PDF = core-manual-$(PACKAGE_VERSION)
|
||||
CORE_DOC_SRC = core-python-$(PACKAGE_VERSION)
|
||||
.PHONY: doc
|
||||
doc: doc-clean
|
||||
$(MAKE) -C doc html
|
||||
mv doc/_build/html doc/$(CORE_DOC_HTML)
|
||||
tar -C doc -czf $(CORE_DOC_HTML).tgz $(CORE_DOC_HTML)
|
||||
$(MAKE) -C doc latexpdf
|
||||
mv doc/_build/latex/CORE.pdf $(CORE_DOC_PDF).pdf
|
||||
$(MAKE) -C daemon/doc html
|
||||
mv daemon/doc/_build/html daemon/doc/$(CORE_DOC_SRC)
|
||||
tar -C daemon/doc -czf $(CORE_DOC_SRC).tgz $(CORE_DOC_SRC)
|
||||
|
||||
.PHONY: doc-clean
|
||||
doc-clean:
|
||||
-rm -rf doc/_build
|
||||
-rm -rf doc/$(CORE_DOC_HTML)
|
||||
-rm -rf daemon/doc/_build
|
||||
-rm -rf daemon/doc/$(CORE_DOC_SRC)
|
||||
-rm -f $(CORE_DOC_HTML).tgz
|
||||
-rm -f $(CORE_DOC_SRC).tgz
|
||||
-rm -f $(CORE_DOC_PDF).pdf
|
||||
|
|
18
configure.ac
18
configure.ac
|
@ -2,7 +2,7 @@
|
|||
# Process this file with autoconf to produce a configure script.
|
||||
|
||||
# this defines the CORE version number, must be static for AC_INIT
|
||||
AC_INIT(core, 5.1, core-dev@nrl.navy.mil)
|
||||
AC_INIT(core, 5.2, core-dev@nrl.navy.mil)
|
||||
|
||||
# autoconf and automake initialization
|
||||
AC_CONFIG_SRCDIR([netns/version.h.in])
|
||||
|
@ -66,7 +66,7 @@ AC_PROG_SED
|
|||
|
||||
want_python=no
|
||||
want_linux_netns=no
|
||||
if test "x$enable_daemon" = "xyes"; then
|
||||
if test "x$enable_daemon" = "xyes"; then
|
||||
want_python=yes
|
||||
want_linux_netns=yes
|
||||
|
||||
|
@ -208,10 +208,8 @@ AC_CONFIG_FILES([Makefile
|
|||
gui/icons/Makefile
|
||||
scripts/Makefile
|
||||
scripts/perf/Makefile
|
||||
doc/Makefile
|
||||
doc/conf.py
|
||||
doc/man/Makefile
|
||||
doc/figures/Makefile
|
||||
man/Makefile
|
||||
docs/Makefile
|
||||
daemon/Makefile
|
||||
daemon/doc/Makefile
|
||||
daemon/doc/conf.py
|
||||
|
@ -228,8 +226,8 @@ ${PACKAGE_STRING} Configuration:
|
|||
Build:
|
||||
Host System Type: ${host}
|
||||
C Compiler and flags: ${CC} ${CFLAGS}
|
||||
Prefix: ${prefix}
|
||||
Exec Prefix: ${exec_prefix}
|
||||
Prefix: ${prefix}
|
||||
Exec Prefix: ${exec_prefix}
|
||||
|
||||
GUI:
|
||||
GUI path: ${CORE_LIB_DIR}
|
||||
|
@ -241,11 +239,11 @@ Daemon:
|
|||
Python modules: ${pythondir}
|
||||
Logs: ${CORE_STATE_DIR}/log
|
||||
|
||||
Startup: ${with_startup}
|
||||
Startup: ${with_startup}
|
||||
|
||||
Features to build:
|
||||
Build GUI: ${enable_gui}
|
||||
Build Daemon: ${enable_daemon}
|
||||
Build Daemon: ${enable_daemon}
|
||||
Documentation: ${want_docs}
|
||||
|
||||
------------------------------------------------------------------------"
|
||||
|
|
|
@ -15,7 +15,7 @@ if WANT_DOCS
|
|||
endif
|
||||
|
||||
SCRIPT_FILES := $(notdir $(wildcard scripts/*))
|
||||
MAN_FILES := $(notdir $(wildcard ../doc/man/*.1))
|
||||
MAN_FILES := $(notdir $(wildcard ../man/*.1))
|
||||
|
||||
# Python package build
|
||||
noinst_SCRIPTS = build
|
||||
|
|
|
@ -2,21 +2,19 @@
|
|||
Converts CORE data objects into legacy API messages.
|
||||
"""
|
||||
|
||||
from core import logger
|
||||
from core.api import coreapi
|
||||
from core.enumerations import ConfigTlvs
|
||||
from core.enumerations import NodeTlvs
|
||||
from core.misc import structutils
|
||||
|
||||
|
||||
def convert_node(node_data):
|
||||
"""
|
||||
Callback to handle an node broadcast out from a session.
|
||||
Convenience method for converting NodeData to a packed TLV message.
|
||||
|
||||
:param core.data.NodeData node_data: node data to handle
|
||||
:param core.data.NodeData node_data: node data to convert
|
||||
:return: packed node message
|
||||
"""
|
||||
logger.debug("converting node data to message: %s", node_data)
|
||||
|
||||
tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [
|
||||
(NodeTlvs.NUMBER, node_data.id),
|
||||
(NodeTlvs.TYPE, node_data.node_type),
|
||||
|
@ -39,5 +37,29 @@ def convert_node(node_data):
|
|||
(NodeTlvs.ICON, node_data.icon),
|
||||
(NodeTlvs.OPAQUE, node_data.opaque)
|
||||
])
|
||||
|
||||
return coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data)
|
||||
|
||||
|
||||
def convert_config(config_data):
|
||||
"""
|
||||
Convenience method for converting ConfigData to a packed TLV message.
|
||||
|
||||
:param core.data.ConfigData config_data: config data to convert
|
||||
:return: packed message
|
||||
"""
|
||||
tlv_data = structutils.pack_values(coreapi.CoreConfigTlv, [
|
||||
(ConfigTlvs.NODE, config_data.node),
|
||||
(ConfigTlvs.OBJECT, config_data.object),
|
||||
(ConfigTlvs.TYPE, config_data.type),
|
||||
(ConfigTlvs.DATA_TYPES, config_data.data_types),
|
||||
(ConfigTlvs.VALUES, config_data.data_values),
|
||||
(ConfigTlvs.CAPTIONS, config_data.captions),
|
||||
(ConfigTlvs.BITMAP, config_data.bitmap),
|
||||
(ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values),
|
||||
(ConfigTlvs.GROUPS, config_data.groups),
|
||||
(ConfigTlvs.SESSION, config_data.session),
|
||||
(ConfigTlvs.INTERFACE_NUMBER, config_data.interface_number),
|
||||
(ConfigTlvs.NETWORK_ID, config_data.network_id),
|
||||
(ConfigTlvs.OPAQUE, config_data.opaque),
|
||||
])
|
||||
return coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data)
|
||||
|
|
|
@ -11,7 +11,6 @@ import threading
|
|||
|
||||
from core import logger
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
@ -81,7 +80,7 @@ class CoreDistributedServer(object):
|
|||
self.sock = None
|
||||
|
||||
|
||||
class CoreBroker(ConfigurableManager):
|
||||
class CoreBroker(object):
|
||||
"""
|
||||
Helps with brokering messages between CORE daemon servers.
|
||||
"""
|
||||
|
@ -100,7 +99,7 @@ class CoreBroker(ConfigurableManager):
|
|||
:return: nothing
|
||||
"""
|
||||
|
||||
ConfigurableManager.__init__(self)
|
||||
# ConfigurableManager.__init__(self)
|
||||
self.session = session
|
||||
self.session_clients = []
|
||||
self.session_id_master = None
|
||||
|
@ -611,62 +610,6 @@ class CoreBroker(ConfigurableManager):
|
|||
"""
|
||||
self.physical_nodes.add(nodenum)
|
||||
|
||||
def configure_reset(self, config_data):
|
||||
"""
|
||||
Ignore reset messages, because node delete responses may still
|
||||
arrive and require the use of nodecounts.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Receive configuration message with a list of server:host:port
|
||||
combinations that we"ll need to connect with.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
values = config_data.data_values
|
||||
session_id = config_data.session
|
||||
|
||||
if values is None:
|
||||
logger.info("emulation server data missing")
|
||||
return None
|
||||
values = values.split("|")
|
||||
|
||||
# string of "server:ip:port,server:ip:port,..."
|
||||
server_strings = values[0]
|
||||
server_list = server_strings.split(",")
|
||||
|
||||
for server in server_list:
|
||||
server_items = server.split(":")
|
||||
(name, host, port) = server_items[:3]
|
||||
|
||||
if host == "":
|
||||
host = None
|
||||
|
||||
if port == "":
|
||||
port = None
|
||||
else:
|
||||
port = int(port)
|
||||
|
||||
if session_id is not None:
|
||||
# receive session ID and my IP from master
|
||||
self.session_id_master = int(session_id.split("|")[0])
|
||||
self.myip = host
|
||||
host = None
|
||||
port = None
|
||||
|
||||
# this connects to the server immediately; maybe we should wait
|
||||
# or spin off a new "client" thread here
|
||||
self.addserver(name, host, port)
|
||||
self.setupserver(name)
|
||||
|
||||
return None
|
||||
|
||||
def handle_message(self, message):
|
||||
"""
|
||||
Handle an API message. Determine whether this needs to be handled
|
||||
|
@ -733,6 +676,7 @@ class CoreBroker(ConfigurableManager):
|
|||
if server is None:
|
||||
logger.warn("ignoring unknown server: %s", servername)
|
||||
return
|
||||
|
||||
if server.sock is None or server.host is None or server.port is None:
|
||||
logger.info("ignoring disconnected server: %s", servername)
|
||||
return
|
||||
|
|
|
@ -2,492 +2,397 @@
|
|||
Common support for configurable CORE objects.
|
||||
"""
|
||||
|
||||
import string
|
||||
from collections import OrderedDict
|
||||
|
||||
from core import logger
|
||||
from core.data import ConfigData
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
|
||||
|
||||
class ConfigurableManager(object):
|
||||
class ConfigShim(object):
|
||||
"""
|
||||
A generic class for managing Configurables. This class can register
|
||||
with a session to receive Config Messages for setting some parameters
|
||||
for itself or for the Configurables that it manages.
|
||||
Provides helper methods for converting newer configuration values into TLV compatible formats.
|
||||
"""
|
||||
# name corresponds to configuration object field
|
||||
name = ""
|
||||
|
||||
# type corresponds with register message types
|
||||
config_type = None
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a ConfigurableManager instance.
|
||||
"""
|
||||
# configurable key=values, indexed by node number
|
||||
self.configs = {}
|
||||
|
||||
# TODO: fix the need for this and isolate to the mobility class that wants it
|
||||
self._modelclsmap = {}
|
||||
|
||||
def configure(self, session, config_data):
|
||||
"""
|
||||
Handle configure messages. The configuration message sent to a
|
||||
ConfigurableManager usually is used to:
|
||||
1. Request a list of Configurables (request flag)
|
||||
2. Reset manager and clear configs (reset flag)
|
||||
3. Send values that configure the manager or one of its Configurables
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: response messages
|
||||
"""
|
||||
|
||||
if config_data.type == ConfigFlags.REQUEST.value:
|
||||
return self.configure_request(config_data)
|
||||
elif config_data.type == ConfigFlags.RESET.value:
|
||||
return self.configure_reset(config_data)
|
||||
else:
|
||||
return self.configure_values(config_data)
|
||||
|
||||
def configure_request(self, config_data):
|
||||
"""
|
||||
Request configuration data.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_reset(self, config_data):
|
||||
"""
|
||||
By default, resets this manager to clear configs.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: reset response messages, or None
|
||||
"""
|
||||
return self.reset()
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Values have been sent to this manager.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_values_keyvalues(self, config_data, target, keys):
|
||||
"""
|
||||
Helper that can be used for configure_values for parsing in
|
||||
'key=value' strings from a values field. The key name must be
|
||||
in the keys list, and target.key=value is set.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:param target: target to set attribute values on
|
||||
:param keys: list of keys to verify validity
|
||||
:return: nothing
|
||||
"""
|
||||
values = config_data.data_values
|
||||
|
||||
if values is None:
|
||||
return None
|
||||
|
||||
kvs = values.split('|')
|
||||
for kv in kvs:
|
||||
try:
|
||||
key, value = kv.split('=', 1)
|
||||
if value is not None and not value.strip():
|
||||
value = None
|
||||
except ValueError:
|
||||
# value only
|
||||
key = keys[kvs.index(kv)]
|
||||
value = kv
|
||||
if key not in keys:
|
||||
raise ValueError("invalid key: %s" % key)
|
||||
if value is not None:
|
||||
setattr(target, key, value)
|
||||
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset functionality for the configurable class.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
"""
|
||||
Add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
|
||||
:param int nodenum: node id
|
||||
:param conftype: configuration types
|
||||
:param values: configuration values
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("setting config for node(%s): %s - %s", nodenum, conftype, values)
|
||||
conflist = []
|
||||
if nodenum in self.configs:
|
||||
oldlist = self.configs[nodenum]
|
||||
found = False
|
||||
for t, v in oldlist:
|
||||
if t == conftype:
|
||||
# replace existing config
|
||||
found = True
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((t, v))
|
||||
if not found:
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((conftype, values))
|
||||
self.configs[nodenum] = conflist
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
"""
|
||||
Get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied
|
||||
|
||||
:param int nodenum: node id
|
||||
:param conftype: configuration type
|
||||
:param defaultvalues: default values
|
||||
:return: configuration type and default values
|
||||
:type: tuple
|
||||
"""
|
||||
logger.info("getting config for node(%s): %s - default(%s)",
|
||||
nodenum, conftype, defaultvalues)
|
||||
if nodenum in self.configs:
|
||||
# return configured values
|
||||
conflist = self.configs[nodenum]
|
||||
for t, v in conflist:
|
||||
if conftype is None or t == conftype:
|
||||
return t, v
|
||||
# return default values provided (may be None)
|
||||
return conftype, defaultvalues
|
||||
|
||||
def getallconfigs(self, use_clsmap=True):
|
||||
"""
|
||||
Return (nodenum, conftype, values) tuples for all stored configs.
|
||||
Used when reconnecting to a session.
|
||||
|
||||
:param bool use_clsmap: should a class map be used, default to True
|
||||
:return: list of all configurations
|
||||
:rtype: list
|
||||
"""
|
||||
r = []
|
||||
for nodenum in self.configs:
|
||||
for t, v in self.configs[nodenum]:
|
||||
if use_clsmap:
|
||||
t = self._modelclsmap[t]
|
||||
r.append((nodenum, t, v))
|
||||
return r
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
"""
|
||||
remove configuration values for the specified node;
|
||||
when nodenum is None, remove all configuration values
|
||||
|
||||
:param int nodenum: node id
|
||||
:return: nothing
|
||||
"""
|
||||
if nodenum is None:
|
||||
self.configs = {}
|
||||
return
|
||||
if nodenum in self.configs:
|
||||
self.configs.pop(nodenum)
|
||||
|
||||
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
|
||||
"""
|
||||
Key values list of tuples for a node.
|
||||
|
||||
:param int nodenum: node id
|
||||
:param conftype: configuration type
|
||||
:param keyvalues: key valyes
|
||||
:return: nothing
|
||||
"""
|
||||
if conftype not in self._modelclsmap:
|
||||
logger.warn("unknown model type '%s'", conftype)
|
||||
return
|
||||
model = self._modelclsmap[conftype]
|
||||
keys = model.getnames()
|
||||
# defaults are merged with supplied values here
|
||||
values = list(model.getdefaultvalues())
|
||||
for key, value in keyvalues:
|
||||
if key not in keys:
|
||||
logger.warn("Skipping unknown configuration key for %s: '%s'", conftype, key)
|
||||
continue
|
||||
i = keys.index(key)
|
||||
values[i] = value
|
||||
self.setconfig(nodenum, conftype, values)
|
||||
|
||||
def getmodels(self, n):
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
This assumes self.configs contains an iterable of (model-names, values)
|
||||
and a self._modelclsmapdict exists.
|
||||
|
||||
:param n: network node to get models for
|
||||
:return: list of model and values tuples for the network node
|
||||
:rtype: list
|
||||
"""
|
||||
r = []
|
||||
if n.objid in self.configs:
|
||||
v = self.configs[n.objid]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
"""
|
||||
A generic class for managing configuration parameters.
|
||||
Parameters are sent via Configuration Messages, which allow the GUI
|
||||
to build dynamic dialogs depending on what is being configured.
|
||||
"""
|
||||
name = ""
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
config_matrix = []
|
||||
config_groups = None
|
||||
bitmap = None
|
||||
|
||||
def __init__(self, session=None, object_id=None):
|
||||
"""
|
||||
Creates a Configurable instance.
|
||||
|
||||
:param core.session.Session session: session for this configurable
|
||||
:param object_id:
|
||||
"""
|
||||
self.session = session
|
||||
self.object_id = object_id
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
def register(self):
|
||||
"""
|
||||
Register method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def getdefaultvalues(cls):
|
||||
def str_to_dict(cls, key_values):
|
||||
"""
|
||||
Retrieve default values from configuration matrix.
|
||||
Converts a TLV key/value string into an ordered mapping.
|
||||
|
||||
:return: tuple of default values
|
||||
:rtype: tuple
|
||||
:param str key_values:
|
||||
:return: ordered mapping of key/value pairs
|
||||
:rtype: OrderedDict
|
||||
"""
|
||||
return tuple(map(lambda x: x[2], cls.config_matrix))
|
||||
key_values = key_values.split("|")
|
||||
values = OrderedDict()
|
||||
for key_value in key_values:
|
||||
key, value = key_value.split("=", 1)
|
||||
values[key] = value
|
||||
return values
|
||||
|
||||
@classmethod
|
||||
def getnames(cls):
|
||||
def groups_to_str(cls, config_groups):
|
||||
"""
|
||||
Retrieve name values from configuration matrix.
|
||||
Converts configuration groups to a TLV formatted string.
|
||||
|
||||
:return: tuple of name values
|
||||
:rtype: tuple
|
||||
:param list[ConfigGroup] config_groups: configuration groups to format
|
||||
:return: TLV configuration group string
|
||||
:rtype: str
|
||||
"""
|
||||
return tuple(map(lambda x: x[0], cls.config_matrix))
|
||||
group_strings = []
|
||||
for config_group in config_groups:
|
||||
group_string = "%s:%s-%s" % (config_group.name, config_group.start, config_group.stop)
|
||||
group_strings.append(group_string)
|
||||
return "|".join(group_strings)
|
||||
|
||||
@classmethod
|
||||
def configure(cls, manager, config_data):
|
||||
"""
|
||||
Handle configuration messages for this object.
|
||||
|
||||
:param ConfigurableManager manager: configuration manager
|
||||
:param config_data: configuration data
|
||||
:return: configuration data object
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
reply = None
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
config_type = config_data.type
|
||||
interface_id = config_data.interface_number
|
||||
values_str = config_data.data_values
|
||||
|
||||
if interface_id is not None:
|
||||
node_id = node_id * 1000 + interface_id
|
||||
|
||||
logger.debug("received configure message for %s nodenum:%s", cls.name, str(node_id))
|
||||
if config_type == ConfigFlags.REQUEST.value:
|
||||
logger.info("replying to configure request for %s model", cls.name)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if object_name == "all":
|
||||
defaults = None
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
else:
|
||||
defaults = cls.getdefaultvalues()
|
||||
typeflags = ConfigFlags.NONE.value
|
||||
values = manager.getconfig(node_id, cls.name, defaults)[1]
|
||||
if values is None:
|
||||
logger.warn("no active configuration for node (%s), ignoring request")
|
||||
# node has no active config for this model (don't send defaults)
|
||||
return None
|
||||
# reply with config options
|
||||
reply = cls.config_data(0, node_id, typeflags, values)
|
||||
elif config_type == ConfigFlags.RESET.value:
|
||||
if object_name == "all":
|
||||
manager.clearconfig(node_id)
|
||||
# elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the node
|
||||
# object has been created
|
||||
if object_name is None:
|
||||
logger.info("no configuration object for node %s", node_id)
|
||||
return None
|
||||
defaults = cls.getdefaultvalues()
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
values = manager.getconfig(node_id, cls.name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
# determine new or old style config
|
||||
new = cls.haskeyvalues(values)
|
||||
if new:
|
||||
new_values = list(defaults)
|
||||
keys = cls.getnames()
|
||||
for v in values:
|
||||
key, value = v.split('=', 1)
|
||||
try:
|
||||
new_values[keys.index(key)] = value
|
||||
except ValueError:
|
||||
logger.info("warning: ignoring invalid key '%s'" % key)
|
||||
values = new_values
|
||||
manager.setconfig(node_id, object_name, values)
|
||||
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def config_data(cls, flags, node_id, type_flags, values):
|
||||
def config_data(cls, flags, node_id, type_flags, configurable_options, config):
|
||||
"""
|
||||
Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
|
||||
:param flags: message flags
|
||||
:param int flags: message flags
|
||||
:param int node_id: node id
|
||||
:param type_flags: type flags
|
||||
:param values: values
|
||||
:param int type_flags: type flags
|
||||
:param ConfigurableOptions configurable_options: options to create config data for
|
||||
:param dict config: configuration values for options
|
||||
:return: configuration data object
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
keys = cls.getnames()
|
||||
keyvalues = map(lambda a, b: "%s=%s" % (a, b), keys, values)
|
||||
values_str = string.join(keyvalues, '|')
|
||||
datatypes = tuple(map(lambda x: x[1], cls.config_matrix))
|
||||
captions = reduce(lambda a, b: a + '|' + b, map(lambda x: x[4], cls.config_matrix))
|
||||
possible_valuess = reduce(lambda a, b: a + '|' + b, map(lambda x: x[3], cls.config_matrix))
|
||||
key_values = None
|
||||
captions = None
|
||||
data_types = []
|
||||
possible_values = []
|
||||
logger.debug("configurable: %s", configurable_options)
|
||||
logger.debug("configuration options: %s", configurable_options.configurations)
|
||||
logger.debug("configuration data: %s", config)
|
||||
for configuration in configurable_options.configurations():
|
||||
if not captions:
|
||||
captions = configuration.label
|
||||
else:
|
||||
captions += "|%s" % configuration.label
|
||||
|
||||
data_types.append(configuration.type.value)
|
||||
|
||||
options = ",".join(configuration.options)
|
||||
possible_values.append(options)
|
||||
|
||||
_id = configuration.id
|
||||
config_value = config.get(_id, configuration.default)
|
||||
key_value = "%s=%s" % (_id, config_value)
|
||||
if not key_values:
|
||||
key_values = key_value
|
||||
else:
|
||||
key_values += "|%s" % key_value
|
||||
|
||||
groups_str = cls.groups_to_str(configurable_options.config_groups())
|
||||
return ConfigData(
|
||||
message_type=flags,
|
||||
node=node_id,
|
||||
object=cls.name,
|
||||
object=configurable_options.name,
|
||||
type=type_flags,
|
||||
data_types=datatypes,
|
||||
data_values=values_str,
|
||||
data_types=tuple(data_types),
|
||||
data_values=key_values,
|
||||
captions=captions,
|
||||
possible_values=possible_valuess,
|
||||
bitmap=cls.bitmap,
|
||||
groups=cls.config_groups
|
||||
possible_values="|".join(possible_values),
|
||||
bitmap=configurable_options.bitmap,
|
||||
groups=groups_str
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def booltooffon(value):
|
||||
"""
|
||||
Convenience helper turns bool into on (True) or off (False) string.
|
||||
|
||||
:param str value: value to retrieve on/off value for
|
||||
:return: on or off string
|
||||
:rtype: str
|
||||
"""
|
||||
if value == "1" or value == "true" or value == "on":
|
||||
return "on"
|
||||
else:
|
||||
return "off"
|
||||
class Configuration(object):
|
||||
"""
|
||||
Represents a configuration options.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def offontobool(value):
|
||||
def __init__(self, _id, _type, label=None, default="", options=None):
|
||||
"""
|
||||
Convenience helper for converting an on/off string to a integer.
|
||||
Creates a Configuration object.
|
||||
|
||||
:param str value: on/off string
|
||||
:return: on/off integer value
|
||||
:rtype: int
|
||||
:param str _id: unique name for configuration
|
||||
:param core.enumerations.ConfigDataTypes _type: configuration data type
|
||||
:param str label: configuration label for display
|
||||
:param str default: default value for configuration
|
||||
:param list options: list options if this is a configuration with a combobox
|
||||
"""
|
||||
if type(value) == str:
|
||||
if value.lower() == "on":
|
||||
return 1
|
||||
elif value.lower() == "off":
|
||||
return 0
|
||||
return value
|
||||
self.id = _id
|
||||
self.type = _type
|
||||
self.default = default
|
||||
if not options:
|
||||
options = []
|
||||
self.options = options
|
||||
if not label:
|
||||
label = _id
|
||||
self.label = label
|
||||
|
||||
@classmethod
|
||||
def valueof(cls, name, values):
|
||||
def __str__(self):
|
||||
return "%s(id=%s, type=%s, default=%s, options=%s)" % (
|
||||
self.__class__.__name__, self.id, self.type, self.default, self.options)
|
||||
|
||||
|
||||
class ConfigurableManager(object):
|
||||
"""
|
||||
Provides convenience methods for storing and retrieving configuration options for nodes.
|
||||
"""
|
||||
_default_node = -1
|
||||
_default_type = _default_node
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Helper to return a value by the name defined in confmatrix.
|
||||
Checks if it is boolean
|
||||
|
||||
:param str name: name to get value of
|
||||
:param values: values to get value from
|
||||
:return: value for name
|
||||
Creates a ConfigurableManager object.
|
||||
"""
|
||||
i = cls.getnames().index(name)
|
||||
if cls.config_matrix[i][1] == ConfigDataTypes.BOOL.value and values[i] != "":
|
||||
return cls.booltooffon(values[i])
|
||||
else:
|
||||
return values[i]
|
||||
self.node_configurations = {}
|
||||
|
||||
@staticmethod
|
||||
def haskeyvalues(values):
|
||||
def nodes(self):
|
||||
"""
|
||||
Helper to check for list of key=value pairs versus a plain old
|
||||
list of values. Returns True if all elements are "key=value".
|
||||
Retrieves the ids of all node configurations known by this manager.
|
||||
|
||||
:param values: items to check for key/value pairs
|
||||
:return: True if all values are key/value pairs, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
if len(values) == 0:
|
||||
return False
|
||||
for v in values:
|
||||
if "=" not in v:
|
||||
return False
|
||||
return True
|
||||
|
||||
def getkeyvaluelist(self):
|
||||
"""
|
||||
Helper to return a list of (key, value) tuples. Keys come from
|
||||
configuration matrix and values are instance attributes.
|
||||
|
||||
:return: tuples of key value pairs
|
||||
:return: list of node ids
|
||||
:rtype: list
|
||||
"""
|
||||
key_values = []
|
||||
return [node_id for node_id in self.node_configurations.iterkeys() if node_id != self._default_node]
|
||||
|
||||
for name in self.getnames():
|
||||
if hasattr(self, name):
|
||||
value = getattr(self, name)
|
||||
key_values.append((name, value))
|
||||
def config_reset(self, node_id=None):
|
||||
"""
|
||||
Clears all configurations or configuration for a specific node.
|
||||
|
||||
return key_values
|
||||
:param int node_id: node id to clear configurations for, default is None and clears all configurations
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("resetting all configurations: %s", self.__class__.__name__)
|
||||
if not node_id:
|
||||
self.node_configurations.clear()
|
||||
elif node_id in self.node_configurations:
|
||||
self.node_configurations.pop(node_id)
|
||||
|
||||
def set_config(self, _id, value, node_id=_default_node, config_type=_default_type):
|
||||
"""
|
||||
Set a specific configuration value for a node and configuration type.
|
||||
|
||||
:param str _id: configuration key
|
||||
:param str value: configuration value
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("setting config for node(%s) type(%s): %s=%s", node_id, config_type, _id, value)
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_type_configs = node_configs.setdefault(config_type, OrderedDict())
|
||||
node_type_configs[_id] = value
|
||||
|
||||
def set_configs(self, config, node_id=_default_node, config_type=_default_type):
|
||||
"""
|
||||
Set configurations for a node and configuration type.
|
||||
|
||||
:param dict config: configurations to set
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("setting config for node(%s) type(%s): %s", node_id, config_type, config)
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_configs[config_type] = config
|
||||
|
||||
def get_config(self, _id, node_id=_default_node, config_type=_default_type, default=None):
|
||||
"""
|
||||
Retrieves a specific configuration for a node and configuration type.
|
||||
|
||||
:param str _id: specific configuration to retrieve
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:param default: default value to return when value is not found
|
||||
:return: configuration value
|
||||
:rtype str
|
||||
"""
|
||||
logger.debug("getting config for node(%s) type(%s): %s", node_id, config_type, _id)
|
||||
result = default
|
||||
node_type_configs = self.get_configs(node_id, config_type)
|
||||
if node_type_configs:
|
||||
result = node_type_configs.get(_id, default)
|
||||
return result
|
||||
|
||||
def get_configs(self, node_id=_default_node, config_type=_default_type):
|
||||
"""
|
||||
Retrieve configurations for a node and configuration type.
|
||||
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:return: configurations
|
||||
:rtype: dict
|
||||
"""
|
||||
logger.debug("getting configs for node(%s) type(%s)", node_id, config_type)
|
||||
result = None
|
||||
node_configs = self.node_configurations.get(node_id)
|
||||
if node_configs:
|
||||
result = node_configs.get(config_type)
|
||||
return result
|
||||
|
||||
def get_all_configs(self, node_id=_default_node):
|
||||
"""
|
||||
Retrieve all current configuration types for a node.
|
||||
|
||||
:param int node_id: node id to retrieve configurations for
|
||||
:return: all configuration types for a node
|
||||
:rtype: dict
|
||||
"""
|
||||
logger.debug("getting all configs for node(%s)", node_id)
|
||||
return self.node_configurations.get(node_id)
|
||||
|
||||
|
||||
class ConfigGroup(object):
|
||||
"""
|
||||
Defines configuration group tabs used for display by ConfigurationOptions.
|
||||
"""
|
||||
|
||||
def __init__(self, name, start, stop):
|
||||
"""
|
||||
Creates a ConfigGroup object.
|
||||
|
||||
:param str name: configuration group display name
|
||||
:param int start: configurations start index for this group
|
||||
:param int stop: configurations stop index for this group
|
||||
"""
|
||||
self.name = name
|
||||
self.start = start
|
||||
self.stop = stop
|
||||
|
||||
|
||||
class ConfigurableOptions(object):
|
||||
"""
|
||||
Provides a base for defining configuration options within CORE.
|
||||
"""
|
||||
name = None
|
||||
bitmap = None
|
||||
options = []
|
||||
|
||||
@classmethod
|
||||
def configurations(cls):
|
||||
"""
|
||||
Provides the configurations for this class.
|
||||
|
||||
:return: configurations
|
||||
:rtype: list[Configuration]
|
||||
"""
|
||||
return cls.options
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
"""
|
||||
Defines how configurations are grouped.
|
||||
|
||||
:return: configuration group definition
|
||||
:rtype: list[ConfigGroup]
|
||||
"""
|
||||
return [
|
||||
ConfigGroup("Options", 1, len(cls.configurations()))
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def default_values(cls):
|
||||
"""
|
||||
Provides an ordered mapping of configuration keys to default values.
|
||||
|
||||
:return: ordered configuration mapping default values
|
||||
:rtype: OrderedDict
|
||||
"""
|
||||
return OrderedDict([(config.id, config.default) for config in cls.configurations()])
|
||||
|
||||
|
||||
class ModelManager(ConfigurableManager):
|
||||
"""
|
||||
Helps handle setting models for nodes and managing their model configurations.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a ModelManager object.
|
||||
"""
|
||||
super(ModelManager, self).__init__()
|
||||
self.models = {}
|
||||
self.node_models = {}
|
||||
|
||||
def set_model_config(self, node_id, model_name, config=None):
|
||||
"""
|
||||
Set configuration data for a model.
|
||||
|
||||
:param int node_id: node id to set model configuration for
|
||||
:param str model_name: model to set configuration for
|
||||
:param dict config: configuration data to set for model
|
||||
:return: nothing
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError("%s is an invalid model" % model_name)
|
||||
|
||||
# retrieve default values
|
||||
model_config = self.get_model_config(node_id, model_name)
|
||||
if not config:
|
||||
config = {}
|
||||
for key, value in config.iteritems():
|
||||
model_config[key] = value
|
||||
|
||||
# set as node model for startup
|
||||
self.node_models[node_id] = model_name
|
||||
|
||||
# set configuration
|
||||
self.set_configs(model_config, node_id=node_id, config_type=model_name)
|
||||
|
||||
def get_model_config(self, node_id, model_name):
|
||||
"""
|
||||
Set configuration data for a model.
|
||||
|
||||
:param int node_id: node id to set model configuration for
|
||||
:param str model_name: model to set configuration for
|
||||
:return: current model configuration for node
|
||||
:rtype: dict
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError("%s is an invalid model" % model_name)
|
||||
|
||||
config = self.get_configs(node_id=node_id, config_type=model_name)
|
||||
if not config:
|
||||
# set default values, when not already set
|
||||
config = model_class.default_values()
|
||||
self.set_configs(config, node_id=node_id, config_type=model_name)
|
||||
|
||||
return config
|
||||
|
||||
def set_model(self, node, model_class, config=None):
|
||||
"""
|
||||
Set model and model configuration for node.
|
||||
|
||||
:param node: node to set model for
|
||||
:param model_class: model class to set for node
|
||||
:param dict config: model configuration, None for default configuration
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("setting mobility model(%s) for node(%s): %s", model_class.name, node.objid, config)
|
||||
self.set_model_config(node.objid, model_class.name, config)
|
||||
config = self.get_model_config(node.objid, model_class.name)
|
||||
node.setmodel(model_class, config)
|
||||
|
||||
def get_models(self, node):
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
|
||||
:param node: network node to get models for
|
||||
:return: list of model and values tuples for the network node
|
||||
:rtype: list
|
||||
"""
|
||||
all_configs = self.get_all_configs(node.objid)
|
||||
if not all_configs:
|
||||
all_configs = {}
|
||||
|
||||
models = []
|
||||
for model_name, config in all_configs.iteritems():
|
||||
if model_name == ModelManager._default_node:
|
||||
continue
|
||||
model_class = self.models[model_name]
|
||||
models.append((model_class, config))
|
||||
|
||||
logger.debug("models for node(%s): %s", node.objid, models)
|
||||
return models
|
||||
|
|
|
@ -10,14 +10,20 @@ import shutil
|
|||
import sys
|
||||
import threading
|
||||
import time
|
||||
from itertools import repeat
|
||||
|
||||
from core import logger
|
||||
from core.api import coreapi
|
||||
from core.data import ConfigData
|
||||
from core.api import dataconversion
|
||||
from core.conf import ConfigShim
|
||||
from core.data import ConfigData, ExceptionData
|
||||
from core.data import EventData
|
||||
from core.data import FileData
|
||||
from core.emulator.emudata import InterfaceData
|
||||
from core.emulator.emudata import LinkOptions
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
from core.enumerations import ConfigTlvs
|
||||
from core.enumerations import EventTlvs
|
||||
from core.enumerations import EventTypes
|
||||
|
@ -35,6 +41,8 @@ from core.enumerations import SessionTlvs
|
|||
from core.misc import nodeutils
|
||||
from core.misc import structutils
|
||||
from core.misc import utils
|
||||
from core.service import ServiceManager
|
||||
from core.service import ServiceShim
|
||||
|
||||
|
||||
class CoreHandler(SocketServer.BaseRequestHandler):
|
||||
|
@ -261,24 +269,7 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
:return: nothing
|
||||
"""
|
||||
logger.debug("handling broadcast config: %s", config_data)
|
||||
|
||||
tlv_data = structutils.pack_values(coreapi.CoreConfigTlv, [
|
||||
(ConfigTlvs.NODE, config_data.node),
|
||||
(ConfigTlvs.OBJECT, config_data.object),
|
||||
(ConfigTlvs.TYPE, config_data.type),
|
||||
(ConfigTlvs.DATA_TYPES, config_data.data_types),
|
||||
(ConfigTlvs.VALUES, config_data.data_values),
|
||||
(ConfigTlvs.CAPTIONS, config_data.captions),
|
||||
(ConfigTlvs.BITMAP, config_data.bitmap),
|
||||
(ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values),
|
||||
(ConfigTlvs.GROUPS, config_data.groups),
|
||||
(ConfigTlvs.SESSION, config_data.session),
|
||||
(ConfigTlvs.INTERFACE_NUMBER, config_data.interface_number),
|
||||
(ConfigTlvs.NETWORK_ID, config_data.network_id),
|
||||
(ConfigTlvs.OPAQUE, config_data.opaque),
|
||||
])
|
||||
message = coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data)
|
||||
|
||||
message = dataconversion.convert_config(config_data)
|
||||
try:
|
||||
self.sendall(message)
|
||||
except IOError:
|
||||
|
@ -315,31 +306,7 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
:return: nothing
|
||||
"""
|
||||
logger.debug("handling broadcast node: %s", node_data)
|
||||
|
||||
tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [
|
||||
(NodeTlvs.NUMBER, node_data.id),
|
||||
(NodeTlvs.TYPE, node_data.node_type),
|
||||
(NodeTlvs.NAME, node_data.name),
|
||||
(NodeTlvs.IP_ADDRESS, node_data.ip_address),
|
||||
(NodeTlvs.MAC_ADDRESS, node_data.mac_address),
|
||||
(NodeTlvs.IP6_ADDRESS, node_data.ip6_address),
|
||||
(NodeTlvs.MODEL, node_data.model),
|
||||
(NodeTlvs.EMULATION_ID, node_data.emulation_id),
|
||||
(NodeTlvs.EMULATION_SERVER, node_data.emulation_server),
|
||||
(NodeTlvs.SESSION, node_data.session),
|
||||
(NodeTlvs.X_POSITION, node_data.x_position),
|
||||
(NodeTlvs.Y_POSITION, node_data.y_position),
|
||||
(NodeTlvs.CANVAS, node_data.canvas),
|
||||
(NodeTlvs.NETWORK_ID, node_data.network_id),
|
||||
(NodeTlvs.SERVICES, node_data.services),
|
||||
(NodeTlvs.LATITUDE, node_data.latitude),
|
||||
(NodeTlvs.LONGITUDE, node_data.longitude),
|
||||
(NodeTlvs.ALTITUDE, node_data.altitude),
|
||||
(NodeTlvs.ICON, node_data.icon),
|
||||
(NodeTlvs.OPAQUE, node_data.opaque)
|
||||
])
|
||||
message = coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data)
|
||||
|
||||
message = dataconversion.convert_node(node_data)
|
||||
try:
|
||||
self.sendall(message)
|
||||
except IOError:
|
||||
|
@ -407,12 +374,17 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
tlv_data = ""
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, "core-daemon")
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EMULATION_SERVER.value, "core-daemon")
|
||||
|
||||
# get config objects for session
|
||||
for name in self.session.config_objects:
|
||||
config_type, callback = self.session.config_objects[name]
|
||||
# type must be in coreapi.reg_tlvs
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(config_type, name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.broker.config_type, self.session.broker.name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.location.config_type, self.session.location.name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.mobility.config_type, self.session.mobility.name)
|
||||
for model_class in self.session.mobility.models.itervalues():
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(model_class.config_type, model_class.name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.services.config_type, self.session.services.name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.emane.config_type, self.session.emane.name)
|
||||
for model_class in self.session.emane.models.itervalues():
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(model_class.config_type, model_class.name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.options.config_type, self.session.options.name)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(self.session.metadata.config_type, self.session.metadata.name)
|
||||
|
||||
return coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlv_data)
|
||||
|
||||
|
@ -606,6 +578,26 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
logger.debug("BROADCAST TO OTHER CLIENT: %s", client)
|
||||
client.sendall(message.raw_message)
|
||||
|
||||
def send_exception(self, level, source, text, node=None):
|
||||
"""
|
||||
Sends an exception for display within the GUI.
|
||||
|
||||
:param core.enumerations.ExceptionLevel level: level for exception
|
||||
:param str source: source where exception came from
|
||||
:param str text: details about exception
|
||||
:param int node: node id, if related to a specific node
|
||||
:return:
|
||||
"""
|
||||
exception_data = ExceptionData(
|
||||
session=str(self.session.session_id),
|
||||
node=node,
|
||||
date=time.ctime(),
|
||||
level=level.value,
|
||||
source=source,
|
||||
text=text
|
||||
)
|
||||
self.handle_broadcast_exception(exception_data)
|
||||
|
||||
def add_session_handlers(self):
|
||||
logger.debug("adding session broadcast handlers")
|
||||
self.session.event_handlers.append(self.handle_broadcast_event)
|
||||
|
@ -653,11 +645,16 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
y=message.get_tlv(NodeTlvs.Y_POSITION.value)
|
||||
)
|
||||
|
||||
node_options.set_location(
|
||||
lat=message.get_tlv(NodeTlvs.LATITUDE.value),
|
||||
lon=message.get_tlv(NodeTlvs.LONGITUDE.value),
|
||||
alt=message.get_tlv(NodeTlvs.ALTITUDE.value)
|
||||
)
|
||||
lat = message.get_tlv(NodeTlvs.LATITUDE.value)
|
||||
if lat is not None:
|
||||
lat = float(lat)
|
||||
lon = message.get_tlv(NodeTlvs.LONGITUDE.value)
|
||||
if lon is not None:
|
||||
lon = float(lon)
|
||||
alt = message.get_tlv(NodeTlvs.ALTITUDE.value)
|
||||
if alt is not None:
|
||||
alt = float(alt)
|
||||
node_options.set_location(lat=lat, lon=lon, alt=alt)
|
||||
|
||||
node_options.icon = message.get_tlv(NodeTlvs.ICON.value)
|
||||
node_options.canvas = message.get_tlv(NodeTlvs.CANVAS.value)
|
||||
|
@ -941,15 +938,395 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
opaque=message.get_tlv(ConfigTlvs.OPAQUE.value)
|
||||
)
|
||||
logger.debug("configuration message for %s node %s", config_data.object, config_data.node)
|
||||
message_type = ConfigFlags(config_data.type)
|
||||
|
||||
# dispatch to any registered callback for this object type
|
||||
replies = self.session.config_object(config_data)
|
||||
replies = []
|
||||
|
||||
# handle session configuration
|
||||
if config_data.object == "all":
|
||||
replies = self.handle_config_all(message_type, config_data)
|
||||
elif config_data.object == self.session.options.name:
|
||||
replies = self.handle_config_session(message_type, config_data)
|
||||
elif config_data.object == self.session.location.name:
|
||||
self.handle_config_location(message_type, config_data)
|
||||
elif config_data.object == self.session.metadata.name:
|
||||
replies = self.handle_config_metadata(message_type, config_data)
|
||||
elif config_data.object == self.session.broker.name:
|
||||
self.handle_config_broker(message_type, config_data)
|
||||
elif config_data.object == self.session.services.name:
|
||||
replies = self.handle_config_services(message_type, config_data)
|
||||
elif config_data.object == self.session.mobility.name:
|
||||
self.handle_config_mobility(message_type, config_data)
|
||||
elif config_data.object in self.session.mobility.models:
|
||||
replies = self.handle_config_mobility_models(message_type, config_data)
|
||||
elif config_data.object == self.session.emane.name:
|
||||
replies = self.handle_config_emane(message_type, config_data)
|
||||
elif config_data.object in self.session.emane.models:
|
||||
replies = self.handle_config_emane_models(message_type, config_data)
|
||||
else:
|
||||
raise Exception("no handler for configuration: %s", config_data.object)
|
||||
|
||||
for reply in replies:
|
||||
self.handle_broadcast_config(reply)
|
||||
|
||||
return []
|
||||
|
||||
def handle_config_all(self, message_type, config_data):
|
||||
replies = []
|
||||
|
||||
if message_type == ConfigFlags.RESET:
|
||||
node_id = config_data.node
|
||||
self.session.location.reset()
|
||||
self.session.services.reset()
|
||||
self.session.mobility.config_reset(node_id)
|
||||
self.session.emane.config_reset(node_id)
|
||||
else:
|
||||
raise Exception("cant handle config all: %s" % message_type)
|
||||
|
||||
return replies
|
||||
|
||||
def handle_config_session(self, message_type, config_data):
|
||||
replies = []
|
||||
if message_type == ConfigFlags.REQUEST:
|
||||
type_flags = ConfigFlags.NONE.value
|
||||
config = self.session.options.get_configs()
|
||||
config_response = ConfigShim.config_data(0, None, type_flags, self.session.options, config)
|
||||
replies.append(config_response)
|
||||
elif message_type != ConfigFlags.RESET and config_data.data_values:
|
||||
values = ConfigShim.str_to_dict(config_data.data_values)
|
||||
for key, value in values.iteritems():
|
||||
self.session.options.set_config(key, value)
|
||||
return replies
|
||||
|
||||
def handle_config_location(self, message_type, config_data):
|
||||
if message_type == ConfigFlags.RESET:
|
||||
self.session.location.reset()
|
||||
else:
|
||||
if not config_data.data_values:
|
||||
logger.warn("location data missing")
|
||||
else:
|
||||
values = config_data.data_values.split("|")
|
||||
|
||||
# Cartesian coordinate reference point
|
||||
refx, refy = map(lambda x: float(x), values[0:2])
|
||||
refz = 0.0
|
||||
lat, lon, alt = map(lambda x: float(x), values[2:5])
|
||||
# xyz point
|
||||
self.session.location.refxyz = (refx, refy, refz)
|
||||
# geographic reference point
|
||||
self.session.location.setrefgeo(lat, lon, alt)
|
||||
self.session.location.refscale = float(values[5])
|
||||
logger.info("location configured: %s = %s scale=%s", self.session.location.refxyz,
|
||||
self.session.location.refgeo, self.session.location.refscale)
|
||||
logger.info("location configured: UTM%s", self.session.location.refutm)
|
||||
|
||||
def handle_config_metadata(self, message_type, config_data):
|
||||
replies = []
|
||||
if message_type == ConfigFlags.REQUEST:
|
||||
node_id = config_data.node
|
||||
data_values = "|".join(["%s=%s" % item for item in self.session.metadata.get_configs().iteritems()])
|
||||
data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs())
|
||||
config_response = ConfigData(
|
||||
message_type=0,
|
||||
node=node_id,
|
||||
object=self.session.metadata.name,
|
||||
type=ConfigFlags.NONE.value,
|
||||
data_types=data_types,
|
||||
data_values=data_values
|
||||
)
|
||||
replies.append(config_response)
|
||||
elif message_type != ConfigFlags.RESET and config_data.data_values:
|
||||
values = ConfigShim.str_to_dict(config_data.data_values)
|
||||
for key, value in values.iteritems():
|
||||
self.session.metadata.set_config(key, value)
|
||||
return replies
|
||||
|
||||
def handle_config_broker(self, message_type, config_data):
|
||||
if message_type not in [ConfigFlags.REQUEST, ConfigFlags.RESET]:
|
||||
session_id = config_data.session
|
||||
if not config_data.data_values:
|
||||
logger.info("emulation server data missing")
|
||||
else:
|
||||
values = config_data.data_values.split("|")
|
||||
|
||||
# string of "server:ip:port,server:ip:port,..."
|
||||
server_strings = values[0]
|
||||
server_list = server_strings.split(",")
|
||||
|
||||
for server in server_list:
|
||||
server_items = server.split(":")
|
||||
name, host, port = server_items[:3]
|
||||
|
||||
if host == "":
|
||||
host = None
|
||||
|
||||
if port == "":
|
||||
port = None
|
||||
else:
|
||||
port = int(port)
|
||||
|
||||
if session_id is not None:
|
||||
# receive session ID and my IP from master
|
||||
self.session.broker.session_id_master = int(session_id.split("|")[0])
|
||||
self.session.broker.myip = host
|
||||
host = None
|
||||
port = None
|
||||
|
||||
# this connects to the server immediately; maybe we should wait
|
||||
# or spin off a new "client" thread here
|
||||
self.session.broker.addserver(name, host, port)
|
||||
self.session.broker.setupserver(name)
|
||||
|
||||
def handle_config_services(self, message_type, config_data):
|
||||
replies = []
|
||||
node_id = config_data.node
|
||||
opaque = config_data.opaque
|
||||
|
||||
if message_type == ConfigFlags.REQUEST:
|
||||
session_id = config_data.session
|
||||
opaque = config_data.opaque
|
||||
|
||||
logger.debug("configuration request: node(%s) session(%s) opaque(%s)", node_id, session_id, opaque)
|
||||
|
||||
# send back a list of available services
|
||||
if opaque is None:
|
||||
type_flag = ConfigFlags.NONE.value
|
||||
data_types = tuple(repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services)))
|
||||
|
||||
# sort groups by name and map services to groups
|
||||
groups = set()
|
||||
group_map = {}
|
||||
for service_name in ServiceManager.services.itervalues():
|
||||
group = service_name.group
|
||||
groups.add(group)
|
||||
group_map.setdefault(group, []).append(service_name)
|
||||
groups = sorted(groups, key=lambda x: x.lower())
|
||||
|
||||
# define tlv values in proper order
|
||||
captions = []
|
||||
possible_values = []
|
||||
values = []
|
||||
group_strings = []
|
||||
start_index = 1
|
||||
logger.info("sorted groups: %s", groups)
|
||||
for group in groups:
|
||||
services = sorted(group_map[group], key=lambda x: x.name.lower())
|
||||
logger.info("sorted services for group(%s): %s", group, services)
|
||||
end_index = start_index + len(services) - 1
|
||||
group_strings.append("%s:%s-%s" % (group, start_index, end_index))
|
||||
start_index += len(services)
|
||||
for service_name in services:
|
||||
captions.append(service_name.name)
|
||||
values.append("0")
|
||||
if service_name.custom_needed:
|
||||
possible_values.append("1")
|
||||
else:
|
||||
possible_values.append("")
|
||||
|
||||
# format for tlv
|
||||
captions = "|".join(captions)
|
||||
possible_values = "|".join(possible_values)
|
||||
values = "|".join(values)
|
||||
groups = "|".join(group_strings)
|
||||
# send back the properties for this service
|
||||
else:
|
||||
if not node_id:
|
||||
return replies
|
||||
|
||||
node = self.session.get_object(node_id)
|
||||
if node is None:
|
||||
logger.warn("request to configure service for unknown node %s", node_id)
|
||||
return replies
|
||||
|
||||
services = ServiceShim.servicesfromopaque(opaque)
|
||||
if not services:
|
||||
return replies
|
||||
|
||||
servicesstring = opaque.split(":")
|
||||
if len(servicesstring) == 3:
|
||||
# a file request: e.g. "service:zebra:quagga.conf"
|
||||
file_name = servicesstring[2]
|
||||
service_name = services[0]
|
||||
file_data = self.session.services.get_service_file(node, service_name, file_name)
|
||||
self.session.broadcast_file(file_data)
|
||||
# short circuit this request early to avoid returning response below
|
||||
return replies
|
||||
|
||||
# the first service in the list is the one being configured
|
||||
service_name = services[0]
|
||||
# send back:
|
||||
# dirs, configs, startindex, startup, shutdown, metadata, config
|
||||
type_flag = ConfigFlags.UPDATE.value
|
||||
data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys)))
|
||||
service = self.session.services.get_service(node_id, service_name, default_service=True)
|
||||
values = ServiceShim.tovaluelist(node, service)
|
||||
captions = None
|
||||
possible_values = None
|
||||
groups = None
|
||||
|
||||
config_response = ConfigData(
|
||||
message_type=0,
|
||||
node=node_id,
|
||||
object=self.session.services.name,
|
||||
type=type_flag,
|
||||
data_types=data_types,
|
||||
data_values=values,
|
||||
captions=captions,
|
||||
possible_values=possible_values,
|
||||
groups=groups,
|
||||
session=session_id,
|
||||
opaque=opaque
|
||||
)
|
||||
replies.append(config_response)
|
||||
elif message_type == ConfigFlags.RESET:
|
||||
self.session.services.reset()
|
||||
else:
|
||||
data_types = config_data.data_types
|
||||
values = config_data.data_values
|
||||
|
||||
error_message = "services config message that I don't know how to handle"
|
||||
if values is None:
|
||||
logger.error(error_message)
|
||||
else:
|
||||
if opaque is None:
|
||||
values = values.split("|")
|
||||
# store default services for a node type in self.defaultservices[]
|
||||
if data_types is None or data_types[0] != ConfigDataTypes.STRING.value:
|
||||
logger.info(error_message)
|
||||
return None
|
||||
key = values.pop(0)
|
||||
self.session.services.default_services[key] = values
|
||||
logger.debug("default services for type %s set to %s", key, values)
|
||||
elif node_id:
|
||||
services = ServiceShim.servicesfromopaque(opaque)
|
||||
if services:
|
||||
service_name = services[0]
|
||||
|
||||
# set custom service for node
|
||||
self.session.services.set_service(node_id, service_name)
|
||||
|
||||
# set custom values for custom service
|
||||
service = self.session.services.get_service(node_id, service_name)
|
||||
if not service:
|
||||
raise ValueError("custom service(%s) for node(%s) does not exist", service_name, node_id)
|
||||
|
||||
values = ConfigShim.str_to_dict(values)
|
||||
for name, value in values.iteritems():
|
||||
ServiceShim.setvalue(service, name, value)
|
||||
|
||||
return replies
|
||||
|
||||
def handle_config_mobility(self, message_type, _):
|
||||
if message_type == ConfigFlags.RESET:
|
||||
self.session.mobility.reset()
|
||||
|
||||
def handle_config_mobility_models(self, message_type, config_data):
|
||||
replies = []
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
interface_id = config_data.interface_number
|
||||
values_str = config_data.data_values
|
||||
|
||||
if interface_id is not None:
|
||||
node_id = node_id * 1000 + interface_id
|
||||
|
||||
logger.debug("received configure message for %s nodenum: %s", object_name, node_id)
|
||||
if message_type == ConfigFlags.REQUEST:
|
||||
logger.info("replying to configure request for model: %s", object_name)
|
||||
typeflags = ConfigFlags.NONE.value
|
||||
|
||||
model_class = self.session.mobility.models.get(object_name)
|
||||
if not model_class:
|
||||
logger.warn("model class does not exist: %s", object_name)
|
||||
return []
|
||||
|
||||
config = self.session.mobility.get_model_config(node_id, object_name)
|
||||
config_response = ConfigShim.config_data(0, node_id, typeflags, model_class, config)
|
||||
replies.append(config_response)
|
||||
elif message_type != ConfigFlags.RESET:
|
||||
# store the configuration values for later use, when the node
|
||||
if not object_name:
|
||||
logger.warn("no configuration object for node: %s", node_id)
|
||||
return []
|
||||
|
||||
parsed_config = {}
|
||||
if values_str:
|
||||
parsed_config = ConfigShim.str_to_dict(values_str)
|
||||
|
||||
self.session.mobility.set_model_config(node_id, object_name, parsed_config)
|
||||
|
||||
return replies
|
||||
|
||||
def handle_config_emane(self, message_type, config_data):
|
||||
replies = []
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
interface_id = config_data.interface_number
|
||||
values_str = config_data.data_values
|
||||
|
||||
if interface_id is not None:
|
||||
node_id = node_id * 1000 + interface_id
|
||||
|
||||
logger.debug("received configure message for %s nodenum: %s", object_name, node_id)
|
||||
if message_type == ConfigFlags.REQUEST:
|
||||
logger.info("replying to configure request for %s model", object_name)
|
||||
typeflags = ConfigFlags.NONE.value
|
||||
config = self.session.emane.get_configs()
|
||||
config_response = ConfigShim.config_data(0, node_id, typeflags, self.session.emane.emane_config, config)
|
||||
replies.append(config_response)
|
||||
elif message_type != ConfigFlags.RESET:
|
||||
if not object_name:
|
||||
logger.info("no configuration object for node %s", node_id)
|
||||
return []
|
||||
|
||||
if values_str:
|
||||
config = ConfigShim.str_to_dict(values_str)
|
||||
self.session.emane.set_configs(config)
|
||||
|
||||
# extra logic to start slave Emane object after nemid has been configured from the master
|
||||
if message_type == ConfigFlags.UPDATE and self.session.master is False:
|
||||
# instantiation was previously delayed by setup returning Emane.NOT_READY
|
||||
self.session.instantiate()
|
||||
|
||||
return replies
|
||||
|
||||
def handle_config_emane_models(self, message_type, config_data):
|
||||
replies = []
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
interface_id = config_data.interface_number
|
||||
values_str = config_data.data_values
|
||||
|
||||
if interface_id is not None:
|
||||
node_id = node_id * 1000 + interface_id
|
||||
|
||||
logger.debug("received configure message for %s nodenum: %s", object_name, node_id)
|
||||
if message_type == ConfigFlags.REQUEST:
|
||||
logger.info("replying to configure request for model: %s", object_name)
|
||||
typeflags = ConfigFlags.NONE.value
|
||||
|
||||
model_class = self.session.emane.models.get(object_name)
|
||||
if not model_class:
|
||||
logger.warn("model class does not exist: %s", object_name)
|
||||
return []
|
||||
|
||||
config = self.session.emane.get_model_config(node_id, object_name)
|
||||
config_response = ConfigShim.config_data(0, node_id, typeflags, model_class, config)
|
||||
replies.append(config_response)
|
||||
elif message_type != ConfigFlags.RESET:
|
||||
# store the configuration values for later use, when the node
|
||||
if not object_name:
|
||||
logger.warn("no configuration object for node: %s", node_id)
|
||||
return []
|
||||
|
||||
parsed_config = {}
|
||||
if values_str:
|
||||
parsed_config = ConfigShim.str_to_dict(values_str)
|
||||
|
||||
self.session.emane.set_model_config(node_id, object_name, parsed_config)
|
||||
|
||||
return replies
|
||||
|
||||
def handle_file_message(self, message):
|
||||
"""
|
||||
File Message handler
|
||||
|
@ -978,7 +1355,7 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
if file_type is not None:
|
||||
if file_type.startswith("service:"):
|
||||
_, service_name = file_type.split(':')[:2]
|
||||
self.session.add_node_service_file(node_num, service_name, file_name, source_name, data)
|
||||
self.session.services.set_service_file(node_num, service_name, file_name, data)
|
||||
return ()
|
||||
elif file_type.startswith("hook:"):
|
||||
_, state = file_type.split(':')[:2]
|
||||
|
@ -1084,7 +1461,7 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
# TODO: register system for event message handlers,
|
||||
# like confobjs
|
||||
if name.startswith("service:"):
|
||||
self.session.services_event(event_data)
|
||||
self.handle_service_event(event_data)
|
||||
handled = True
|
||||
elif name.startswith("mobility:"):
|
||||
self.session.mobility_event(event_data)
|
||||
|
@ -1094,11 +1471,12 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
elif event_type == EventTypes.FILE_OPEN:
|
||||
filename = event_data.name
|
||||
self.session.open_xml(filename, start=False)
|
||||
self.session.send_objects()
|
||||
self.send_objects()
|
||||
return ()
|
||||
elif event_type == EventTypes.FILE_SAVE:
|
||||
filename = event_data.name
|
||||
self.session.save_xml(filename, self.session.config["xmlfilever"])
|
||||
xml_version = self.session.options.get_config("xmlfilever")
|
||||
self.session.save_xml(filename, xml_version)
|
||||
elif event_type == EventTypes.SCHEDULED:
|
||||
etime = event_data.time
|
||||
node = event_data.node
|
||||
|
@ -1116,6 +1494,72 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
|
||||
return ()
|
||||
|
||||
def handle_service_event(self, event_data):
|
||||
"""
|
||||
Handle an Event Message used to start, stop, restart, or validate
|
||||
a service on a given node.
|
||||
|
||||
:param EventData event_data: event data to handle
|
||||
:return: nothing
|
||||
"""
|
||||
event_type = event_data.event_type
|
||||
node_id = event_data.node
|
||||
name = event_data.name
|
||||
|
||||
try:
|
||||
node = self.session.get_object(node_id)
|
||||
except KeyError:
|
||||
logger.warn("ignoring event for service '%s', unknown node '%s'", name, node_id)
|
||||
return
|
||||
|
||||
fail = ""
|
||||
unknown = []
|
||||
services = ServiceShim.servicesfromopaque(name)
|
||||
for service_name in services:
|
||||
service = self.session.services.get_service(node_id, service_name, default_service=True)
|
||||
if not service:
|
||||
unknown.append(service_name)
|
||||
continue
|
||||
|
||||
if event_type == EventTypes.STOP.value or event_type == EventTypes.RESTART.value:
|
||||
status = self.session.services.stop_service(node, service)
|
||||
if status:
|
||||
fail += "Stop %s," % service.name
|
||||
if event_type == EventTypes.START.value or event_type == EventTypes.RESTART.value:
|
||||
status = self.session.services.startup_service(node, service)
|
||||
if status:
|
||||
fail += "Start %s(%s)," % service.name
|
||||
if event_type == EventTypes.PAUSE.value:
|
||||
status = self.session.services.validate_service(node, service)
|
||||
if status:
|
||||
fail += "%s," % service.name
|
||||
if event_type == EventTypes.RECONFIGURE.value:
|
||||
self.session.services.service_reconfigure(node, service)
|
||||
|
||||
fail_data = ""
|
||||
if len(fail) > 0:
|
||||
fail_data += "Fail:" + fail
|
||||
unknown_data = ""
|
||||
num = len(unknown)
|
||||
if num > 0:
|
||||
for u in unknown:
|
||||
unknown_data += u
|
||||
if num > 1:
|
||||
unknown_data += ", "
|
||||
num -= 1
|
||||
logger.warn("Event requested for unknown service(s): %s", unknown_data)
|
||||
unknown_data = "Unknown:" + unknown_data
|
||||
|
||||
event_data = EventData(
|
||||
node=node_id,
|
||||
event_type=event_type,
|
||||
name=name,
|
||||
data=fail_data + ";" + unknown_data,
|
||||
time="%s" % time.time()
|
||||
)
|
||||
|
||||
self.session.broadcast_event(event_data)
|
||||
|
||||
def handle_session_message(self, message):
|
||||
"""
|
||||
Session Message handler
|
||||
|
@ -1196,7 +1640,7 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
self.session.set_user(user)
|
||||
|
||||
if message.flags & MessageFlags.STRING.value:
|
||||
self.session.send_objects()
|
||||
self.send_objects()
|
||||
elif message.flags & MessageFlags.DELETE.value:
|
||||
# shut down the specified session(s)
|
||||
logger.info("request to terminate session %s" % session_id)
|
||||
|
@ -1225,3 +1669,105 @@ class CoreHandler(SocketServer.BaseRequestHandler):
|
|||
logger.exception("error sending node emulation id message: %s", node_id)
|
||||
|
||||
del self.node_status_request[node_id]
|
||||
|
||||
def send_objects(self):
|
||||
"""
|
||||
Return API messages that describe the current session.
|
||||
"""
|
||||
# find all nodes and links
|
||||
|
||||
nodes_data = []
|
||||
links_data = []
|
||||
with self.session._objects_lock:
|
||||
for obj in self.session.objects.itervalues():
|
||||
node_data = obj.data(message_type=MessageFlags.ADD.value)
|
||||
if node_data:
|
||||
nodes_data.append(node_data)
|
||||
|
||||
node_links = obj.all_link_data(flags=MessageFlags.ADD.value)
|
||||
for link_data in node_links:
|
||||
links_data.append(link_data)
|
||||
|
||||
# send all nodes first, so that they will exist for any links
|
||||
for node_data in nodes_data:
|
||||
self.session.broadcast_node(node_data)
|
||||
|
||||
for link_data in links_data:
|
||||
self.session.broadcast_link(link_data)
|
||||
|
||||
# send mobility model info
|
||||
for node_id in self.session.mobility.nodes():
|
||||
for model_name, config in self.session.mobility.get_all_configs(node_id).iteritems():
|
||||
model_class = self.session.mobility.models[model_name]
|
||||
logger.debug("mobility config: node(%s) class(%s) values(%s)", node_id, model_class, config)
|
||||
config_data = ConfigShim.config_data(0, node_id, ConfigFlags.UPDATE.value, model_class, config)
|
||||
self.session.broadcast_config(config_data)
|
||||
|
||||
# send emane model info
|
||||
for node_id in self.session.emane.nodes():
|
||||
for model_name, config in self.session.emane.get_all_configs(node_id).iteritems():
|
||||
model_class = self.session.emane.models[model_name]
|
||||
logger.debug("emane config: node(%s) class(%s) values(%s)", node_id, model_class, config)
|
||||
config_data = ConfigShim.config_data(0, node_id, ConfigFlags.UPDATE.value, model_class, config)
|
||||
self.session.broadcast_config(config_data)
|
||||
|
||||
# service customizations
|
||||
service_configs = self.session.services.all_configs()
|
||||
for node_id, service in service_configs:
|
||||
opaque = "service:%s" % service.name
|
||||
data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys)))
|
||||
node = self.session.get_object(node_id)
|
||||
values = ServiceShim.tovaluelist(node, service)
|
||||
config_data = ConfigData(
|
||||
message_type=0,
|
||||
node=node_id,
|
||||
object=self.session.services.name,
|
||||
type=ConfigFlags.UPDATE.value,
|
||||
data_types=data_types,
|
||||
data_values=values,
|
||||
session=str(self.session.session_id),
|
||||
opaque=opaque
|
||||
)
|
||||
self.session.broadcast_config(config_data)
|
||||
|
||||
for file_name, config_data in self.session.services.all_files(service):
|
||||
file_data = FileData(
|
||||
message_type=MessageFlags.ADD.value,
|
||||
node=node_id,
|
||||
name=str(file_name),
|
||||
type=opaque,
|
||||
data=str(config_data)
|
||||
)
|
||||
self.session.broadcast_file(file_data)
|
||||
|
||||
# TODO: send location info
|
||||
|
||||
# send hook scripts
|
||||
for state in sorted(self.session._hooks.keys()):
|
||||
for file_name, config_data in self.session._hooks[state]:
|
||||
file_data = FileData(
|
||||
message_type=MessageFlags.ADD.value,
|
||||
name=str(file_name),
|
||||
type="hook:%s" % state,
|
||||
data=str(config_data)
|
||||
)
|
||||
self.session.broadcast_file(file_data)
|
||||
|
||||
# send session configuration
|
||||
session_config = self.session.options.get_configs()
|
||||
config_data = ConfigShim.config_data(0, None, ConfigFlags.UPDATE.value, self.session.options, session_config)
|
||||
self.session.broadcast_config(config_data)
|
||||
|
||||
# send session metadata
|
||||
data_values = "|".join(["%s=%s" % item for item in self.session.metadata.get_configs().iteritems()])
|
||||
data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs())
|
||||
config_data = ConfigData(
|
||||
message_type=0,
|
||||
object=self.session.metadata.name,
|
||||
type=ConfigFlags.NONE.value,
|
||||
data_types=data_types,
|
||||
data_values=data_values
|
||||
)
|
||||
self.session.broadcast_config(config_data)
|
||||
|
||||
logger.info("informed GUI about %d nodes and %d links", len(nodes_data), len(links_data))
|
||||
|
|
|
@ -195,9 +195,9 @@ class PyCoreObj(object):
|
|||
Build a data object for this node.
|
||||
|
||||
:param message_type: purpose for the data object we are creating
|
||||
:param float lat: latitude
|
||||
:param float lon: longitude
|
||||
:param float alt: altitude
|
||||
:param str lat: latitude
|
||||
:param str lon: longitude
|
||||
:param str alt: altitude
|
||||
:return: node data object
|
||||
:rtype: core.data.NodeData
|
||||
"""
|
||||
|
@ -218,7 +218,7 @@ class PyCoreObj(object):
|
|||
if hasattr(self, "services") and len(self.services) != 0:
|
||||
nodeservices = []
|
||||
for s in self.services:
|
||||
nodeservices.append(s._name)
|
||||
nodeservices.append(s.name)
|
||||
services = "|".join(nodeservices)
|
||||
|
||||
node_data = NodeData(
|
||||
|
@ -305,8 +305,8 @@ class PyCoreNode(PyCoreObj):
|
|||
|
||||
:return: nothing
|
||||
"""
|
||||
preserve = getattr(self.session.options, "preservedir", None)
|
||||
if preserve == "1":
|
||||
preserve = self.session.options.get_config("preservedir") == "1"
|
||||
if preserve:
|
||||
return
|
||||
|
||||
if self.tmpnodedir:
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
"""
|
||||
EMANE Bypass model for CORE
|
||||
"""
|
||||
|
||||
from core.conf import ConfigGroup
|
||||
from core.conf import Configuration
|
||||
from core.emane import emanemodel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
||||
|
@ -15,13 +16,22 @@ class EmaneBypassModel(emanemodel.EmaneModel):
|
|||
# mac definitions
|
||||
mac_library = "bypassmaclayer"
|
||||
mac_config = [
|
||||
("none", ConfigDataTypes.BOOL.value, "0", "True,False",
|
||||
"There are no parameters for the bypass model."),
|
||||
Configuration(
|
||||
_id="none",
|
||||
_type=ConfigDataTypes.BOOL,
|
||||
default="0",
|
||||
options=["True", "False"],
|
||||
label="There are no parameters for the bypass model."
|
||||
)
|
||||
]
|
||||
|
||||
# phy definitions
|
||||
phy_library = "bypassphylayer"
|
||||
phy_config = []
|
||||
|
||||
# override gui display tabs
|
||||
config_groups_override = "Bypass Parameters:1-1"
|
||||
# override config groups
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
return [
|
||||
ConfigGroup("Bypass Parameters", 1, 1),
|
||||
]
|
||||
|
|
|
@ -2,9 +2,15 @@
|
|||
commeffect.py: EMANE CommEffect model for CORE
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core import logger
|
||||
from core.conf import ConfigGroup
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
from core.xml import emanexml
|
||||
|
||||
try:
|
||||
from emane.events.commeffectevent import CommEffectEvent
|
||||
|
@ -35,58 +41,67 @@ class EmaneCommEffectModel(emanemodel.EmaneModel):
|
|||
shim_defaults = {}
|
||||
config_shim = emanemanifest.parse(shim_xml, shim_defaults)
|
||||
|
||||
config_groups_override = "CommEffect SHIM Parameters:1-%d" % len(config_shim)
|
||||
config_matrix_override = config_shim
|
||||
# comm effect does not need the default phy and external configurations
|
||||
phy_config = ()
|
||||
external_config = ()
|
||||
|
||||
def build_xml_files(self, emane_manager, interface):
|
||||
@classmethod
|
||||
def configurations(cls):
|
||||
return cls.config_shim
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
return [
|
||||
ConfigGroup("CommEffect SHIM Parameters", 1, len(cls.configurations()))
|
||||
]
|
||||
|
||||
def build_xml_files(self, config, interface=None):
|
||||
"""
|
||||
Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param dict config: emane model configuration for the node and interface
|
||||
:param interface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
values = emane_manager.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), interface)
|
||||
if values is None:
|
||||
return
|
||||
|
||||
# retrieve xml names
|
||||
nem_name = self.nem_name(interface)
|
||||
shim_name = self.shim_name(interface)
|
||||
nem_name = emanexml.nem_file_name(self, interface)
|
||||
shim_name = emanexml.shim_file_name(self, interface)
|
||||
|
||||
nem_document = emane_manager.xmldoc("nem")
|
||||
nem_element = nem_document.getElementsByTagName("nem").pop()
|
||||
nem_element.setAttribute("name", "%s NEM" % self.name)
|
||||
nem_element.setAttribute("type", "unstructured")
|
||||
emane_manager.appendtransporttonem(nem_document, nem_element, self.object_id, interface)
|
||||
# create and write nem document
|
||||
nem_element = etree.Element("nem", name="%s NEM" % self.name, type="unstructured")
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
transport_file = emanexml.transport_file_name(self.object_id, transport_type)
|
||||
etree.SubElement(nem_element, "transport", definition=transport_file)
|
||||
|
||||
shim_xml = emane_manager.xmlshimdefinition(nem_document, shim_name)
|
||||
nem_element.appendChild(shim_xml)
|
||||
emane_manager.xmlwrite(nem_document, nem_name)
|
||||
# set shim configuration
|
||||
etree.SubElement(nem_element, "shim", definition=shim_name)
|
||||
|
||||
names = self.getnames()
|
||||
shim_names = list(names)
|
||||
shim_names.remove("filterfile")
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_file(nem_element, "nem", nem_file)
|
||||
|
||||
shim_document = emane_manager.xmldoc("shim")
|
||||
shim_element = shim_document.getElementsByTagName("shim").pop()
|
||||
shim_element.setAttribute("name", "%s SHIM" % self.name)
|
||||
shim_element.setAttribute("library", self.shim_library)
|
||||
# create and write shim document
|
||||
shim_element = etree.Element("shim", name="%s SHIM" % self.name, library=self.shim_library)
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
for name in shim_names:
|
||||
value = self.valueof(name, values)
|
||||
param = emane_manager.xmlparam(shim_document, name, value)
|
||||
shim_element.appendChild(param)
|
||||
for configuration in self.config_shim:
|
||||
name = configuration.id
|
||||
if name == "filterfile":
|
||||
continue
|
||||
value = config[name]
|
||||
emanexml.add_param(shim_element, name, value)
|
||||
|
||||
# empty filterfile is not allowed
|
||||
ff = self.valueof("filterfile", values)
|
||||
ff = config["filterfile"]
|
||||
if ff.strip() != "":
|
||||
shim_element.appendChild(emane_manager.xmlparam(shim_document, "filterfile", ff))
|
||||
emane_manager.xmlwrite(shim_document, shim_name)
|
||||
emanexml.add_param(shim_element, "filterfile", ff)
|
||||
|
||||
shim_file = os.path.join(self.session.session_dir, shim_name)
|
||||
emanexml.create_file(shim_element, "shim", shim_file)
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
|
|
|
@ -4,13 +4,16 @@ emane.py: definition of an Emane class for implementing configuration control of
|
|||
|
||||
import os
|
||||
import threading
|
||||
from xml.dom.minidom import parseString
|
||||
|
||||
from core import CoreCommandError
|
||||
from core import constants
|
||||
from core import logger
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager
|
||||
from core.api import dataconversion
|
||||
from core.conf import ConfigGroup
|
||||
from core.conf import ConfigShim
|
||||
from core.conf import Configuration
|
||||
from core.conf import ModelManager
|
||||
from core.emane import emanemanifest
|
||||
from core.emane.bypass import EmaneBypassModel
|
||||
from core.emane.commeffect import EmaneCommEffectModel
|
||||
|
@ -18,16 +21,16 @@ from core.emane.emanemodel import EmaneModel
|
|||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emane.rfpipe import EmaneRfPipeModel
|
||||
from core.emane.tdma import EmaneTdmaModel
|
||||
from core.enumerations import ConfigDataTypes, NodeTypes
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
from core.enumerations import ConfigTlvs
|
||||
from core.enumerations import MessageFlags
|
||||
from core.enumerations import MessageTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import nodeutils
|
||||
from core.misc import utils
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.xml import xmlutils
|
||||
from core.xml import emanexml
|
||||
|
||||
try:
|
||||
from emane.events import EventService
|
||||
|
@ -50,7 +53,7 @@ EMANE_MODELS = [
|
|||
]
|
||||
|
||||
|
||||
class EmaneManager(ConfigurableManager):
|
||||
class EmaneManager(ModelManager):
|
||||
"""
|
||||
EMANE controller object. Lives in a Session instance and is used for
|
||||
building EMANE config files from all of the EmaneNode objects in this
|
||||
|
@ -58,7 +61,6 @@ class EmaneManager(ConfigurableManager):
|
|||
"""
|
||||
name = "emane"
|
||||
config_type = RegisterTlvs.EMULATION_SERVER.value
|
||||
_hwaddr_prefix = "02:02"
|
||||
SUCCESS, NOT_NEEDED, NOT_READY = (0, 1, 2)
|
||||
EVENTCFGVAR = "LIBEMANEEVENTSERVICECONFIG"
|
||||
DEFAULT_LOG_LEVEL = 3
|
||||
|
@ -70,30 +72,72 @@ class EmaneManager(ConfigurableManager):
|
|||
:param core.session.Session session: session this manager is tied to
|
||||
:return: nothing
|
||||
"""
|
||||
ConfigurableManager.__init__(self)
|
||||
super(EmaneManager, self).__init__()
|
||||
self.session = session
|
||||
self._emane_nodes = {}
|
||||
self._emane_node_lock = threading.Lock()
|
||||
self._ifccounts = {}
|
||||
self._ifccountslock = threading.Lock()
|
||||
# Port numbers are allocated from these counters
|
||||
self.platformport = self.session.get_config_item_int("emane_platform_port", 8100)
|
||||
self.transformport = self.session.get_config_item_int("emane_transform_port", 8200)
|
||||
# port numbers are allocated from these counters
|
||||
self.platformport = self.session.options.get_config_int("emane_platform_port", 8100)
|
||||
self.transformport = self.session.options.get_config_int("emane_transform_port", 8200)
|
||||
self.doeventloop = False
|
||||
self.eventmonthread = None
|
||||
|
||||
# model for global EMANE configuration options
|
||||
self.emane_config = EmaneGlobalModel(session, None)
|
||||
self.emane_config = EmaneGlobalModel(session)
|
||||
self.set_configs(self.emane_config.default_values())
|
||||
|
||||
session.broker.handlers.add(self.handledistributed)
|
||||
self.service = None
|
||||
self.event_device = None
|
||||
self._modelclsmap = {
|
||||
self.emane_config.name: self.emane_config
|
||||
}
|
||||
|
||||
self.service = None
|
||||
self.emane_check()
|
||||
|
||||
def getifcconfig(self, node_id, interface, model_name):
|
||||
"""
|
||||
Retrieve interface configuration or node configuration if not provided.
|
||||
|
||||
:param int node_id: node id
|
||||
:param interface: node interface
|
||||
:param str model_name: model to get configuration for
|
||||
:return: node/interface model configuration
|
||||
:rtype: dict
|
||||
"""
|
||||
# use the network-wide config values or interface(NEM)-specific values?
|
||||
if interface is None:
|
||||
return self.get_configs(node_id=node_id, config_type=model_name)
|
||||
else:
|
||||
# don"t use default values when interface config is the same as net
|
||||
# note here that using ifc.node.objid as key allows for only one type
|
||||
# of each model per node;
|
||||
# TODO: use both node and interface as key
|
||||
|
||||
# Adamson change: first check for iface config keyed by "node:ifc.name"
|
||||
# (so that nodes w/ multiple interfaces of same conftype can have
|
||||
# different configs for each separate interface)
|
||||
key = 1000 * interface.node.objid
|
||||
if interface.netindex is not None:
|
||||
key += interface.netindex
|
||||
|
||||
# try retrieve interface specific configuration, avoid getting defaults
|
||||
config = self.get_configs(node_id=key, config_type=model_name)
|
||||
|
||||
# otherwise retrieve the interfaces node configuration, avoid using defaults
|
||||
if not config:
|
||||
config = self.get_configs(node_id=interface.node.objid, config_type=model_name)
|
||||
|
||||
# get non interface config, when none found
|
||||
if not config:
|
||||
# with EMANE 0.9.2+, we need an extra NEM XML from
|
||||
# model.buildnemxmlfiles(), so defaults are returned here
|
||||
config = self.get_configs(node_id=node_id, config_type=model_name)
|
||||
|
||||
return config
|
||||
|
||||
def config_reset(self, node_id=None):
|
||||
super(EmaneManager, self).config_reset(node_id)
|
||||
self.set_configs(self.emane_config.default_values())
|
||||
|
||||
def emane_check(self):
|
||||
"""
|
||||
Check if emane is installed and load models.
|
||||
|
@ -109,7 +153,7 @@ class EmaneManager(ConfigurableManager):
|
|||
self.load_models(EMANE_MODELS)
|
||||
|
||||
# load custom models
|
||||
custom_models_path = self.session.config.get("emane_models_dir")
|
||||
custom_models_path = self.session.options.get_config("emane_models_dir")
|
||||
if custom_models_path:
|
||||
emane_models = utils.load_classes(custom_models_path, EmaneModel)
|
||||
self.load_models(emane_models)
|
||||
|
@ -138,9 +182,8 @@ class EmaneManager(ConfigurableManager):
|
|||
return
|
||||
|
||||
# Get the control network to be used for events
|
||||
values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1]
|
||||
group, port = self.emane_config.valueof("eventservicegroup", values).split(":")
|
||||
self.event_device = self.emane_config.valueof("eventservicedevice", values)
|
||||
group, port = self.get_config("eventservicegroup").split(":")
|
||||
self.event_device = self.get_config("eventservicedevice")
|
||||
eventnetidx = self.session.get_control_net_index(self.event_device)
|
||||
if eventnetidx < 0:
|
||||
logger.error("invalid emane event service device provided: %s", self.event_device)
|
||||
|
@ -165,12 +208,11 @@ class EmaneManager(ConfigurableManager):
|
|||
|
||||
def load_models(self, emane_models):
|
||||
"""
|
||||
load EMANE models and make them available.
|
||||
Load EMANE models and make them available.
|
||||
"""
|
||||
for emane_model in emane_models:
|
||||
logger.info("loading emane model: %s", emane_model.__name__)
|
||||
self._modelclsmap[emane_model.name] = emane_model
|
||||
self.session.add_config_object(emane_model.name, emane_model.config_type, emane_model.configure_emane)
|
||||
self.models[emane_model.name] = emane_model
|
||||
|
||||
def add_node(self, emane_node):
|
||||
"""
|
||||
|
@ -196,50 +238,6 @@ class EmaneManager(ConfigurableManager):
|
|||
nodes.add(netif.node)
|
||||
return nodes
|
||||
|
||||
def getmodels(self, n):
|
||||
"""
|
||||
Used with XML export; see ConfigurableManager.getmodels()
|
||||
"""
|
||||
r = ConfigurableManager.getmodels(self, n)
|
||||
# EMANE global params are stored with first EMANE node (if non-default
|
||||
# values are configured)
|
||||
sorted_ids = sorted(self.configs.keys())
|
||||
if None in self.configs and len(sorted_ids) > 1 and n.objid == sorted_ids[1]:
|
||||
v = self.configs[None]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
def getifcconfig(self, nodenum, conftype, defaultvalues, ifc):
|
||||
# use the network-wide config values or interface(NEM)-specific values?
|
||||
if ifc is None:
|
||||
return self.getconfig(nodenum, conftype, defaultvalues)[1]
|
||||
else:
|
||||
# don"t use default values when interface config is the same as net
|
||||
# note here that using ifc.node.objid as key allows for only one type
|
||||
# of each model per node;
|
||||
# TODO: use both node and interface as key
|
||||
|
||||
# Adamson change: first check for iface config keyed by "node:ifc.name"
|
||||
# (so that nodes w/ multiple interfaces of same conftype can have
|
||||
# different configs for each separate interface)
|
||||
key = 1000 * ifc.node.objid
|
||||
if ifc.netindex is not None:
|
||||
key += ifc.netindex
|
||||
|
||||
values = self.getconfig(key, conftype, None)[1]
|
||||
if not values:
|
||||
values = self.getconfig(ifc.node.objid, conftype, None)[1]
|
||||
|
||||
if not values and ifc.transport_type == "raw":
|
||||
# with EMANE 0.9.2+, we need an extra NEM XML from
|
||||
# model.buildnemxmlfiles(), so defaults are returned here
|
||||
values = self.getconfig(nodenum, conftype, defaultvalues)[1]
|
||||
|
||||
return values
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Populate self._objs with EmaneNodes; perform distributed setup;
|
||||
|
@ -264,9 +262,7 @@ class EmaneManager(ConfigurableManager):
|
|||
# - needs to be configured before checkdistributed() for distributed
|
||||
# - needs to exist when eventservice binds to it (initeventservice)
|
||||
if self.session.master:
|
||||
values = self.getconfig(None, self.emane_config.name, self.emane_config.getdefaultvalues())[1]
|
||||
logger.debug("emane config default values: %s", values)
|
||||
otadev = self.emane_config.valueof("otamanagerdevice", values)
|
||||
otadev = self.get_config("otamanagerdevice")
|
||||
netidx = self.session.get_control_net_index(otadev)
|
||||
logger.debug("emane ota manager device: index(%s) otadev(%s)", netidx, otadev)
|
||||
if netidx < 0:
|
||||
|
@ -275,7 +271,7 @@ class EmaneManager(ConfigurableManager):
|
|||
|
||||
ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False)
|
||||
self.distributedctrlnet(ctrlnet)
|
||||
eventdev = self.emane_config.valueof("eventservicedevice", values)
|
||||
eventdev = self.get_config("eventservicedevice")
|
||||
logger.debug("emane event service device: eventdev(%s)", eventdev)
|
||||
if eventdev != otadev:
|
||||
netidx = self.session.get_control_net_index(eventdev)
|
||||
|
@ -288,13 +284,14 @@ class EmaneManager(ConfigurableManager):
|
|||
self.distributedctrlnet(ctrlnet)
|
||||
|
||||
if self.checkdistributed():
|
||||
# we are slave, but haven"t received a platformid yet
|
||||
cfgval = self.getconfig(None, self.emane_config.name, self.emane_config.getdefaultvalues())[1]
|
||||
i = self.emane_config.getnames().index("platform_id_start")
|
||||
if cfgval[i] == self.emane_config.getdefaultvalues()[i]:
|
||||
# we are slave, but haven't received a platformid yet
|
||||
platform_id_start = "platform_id_start"
|
||||
default_values = self.emane_config.default_values()
|
||||
value = self.get_config(platform_id_start)
|
||||
if value == default_values[platform_id_start]:
|
||||
return EmaneManager.NOT_READY
|
||||
|
||||
self.setnodemodels()
|
||||
self.check_node_models()
|
||||
return EmaneManager.SUCCESS
|
||||
|
||||
def startup(self):
|
||||
|
@ -318,7 +315,7 @@ class EmaneManager(ConfigurableManager):
|
|||
|
||||
if self.numnems() > 0:
|
||||
self.startdaemons()
|
||||
self.installnetifs(do_netns=False)
|
||||
self.installnetifs()
|
||||
|
||||
for emane_node in self._emane_nodes.itervalues():
|
||||
for netif in emane_node.netifs():
|
||||
|
@ -346,7 +343,7 @@ class EmaneManager(ConfigurableManager):
|
|||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
logger.debug("post startup for emane node: %s - %s", emane_node.objid, emane_node.name)
|
||||
emane_node.model.post_startup(self)
|
||||
emane_node.model.post_startup()
|
||||
for netif in emane_node.netifs():
|
||||
x, y, z = netif.node.position.get()
|
||||
emane_node.setnemposition(netif, x, y, z)
|
||||
|
@ -359,9 +356,9 @@ class EmaneManager(ConfigurableManager):
|
|||
with self._emane_node_lock:
|
||||
self._emane_nodes.clear()
|
||||
|
||||
# don"t clear self._ifccounts here; NEM counts are needed for buildxml
|
||||
self.platformport = self.session.get_config_item_int("emane_platform_port", 8100)
|
||||
self.transformport = self.session.get_config_item_int("emane_transform_port", 8200)
|
||||
# don't clear self._ifccounts here; NEM counts are needed for buildxml
|
||||
self.platformport = self.session.options.get_config_int("emane_platform_port", 8100)
|
||||
self.transformport = self.session.options.get_config_int("emane_transform_port", 8200)
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
|
@ -416,20 +413,16 @@ class EmaneManager(ConfigurableManager):
|
|||
if not master:
|
||||
return True
|
||||
|
||||
cfgval = self.getconfig(None, self.emane_config.name, self.emane_config.getdefaultvalues())[1]
|
||||
values = list(cfgval)
|
||||
|
||||
nemcount = 0
|
||||
with self._emane_node_lock:
|
||||
for key in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[key]
|
||||
nemcount += emane_node.numnetif()
|
||||
|
||||
nemid = int(self.emane_config.valueof("nem_id_start", values))
|
||||
nemid = int(self.get_config("nem_id_start"))
|
||||
nemid += nemcount
|
||||
|
||||
platformid = int(self.emane_config.valueof("platform_id_start", values))
|
||||
names = list(self.emane_config.getnames())
|
||||
platformid = int(self.get_config("platform_id_start"))
|
||||
|
||||
# build an ordered list of servers so platform ID is deterministic
|
||||
servers = []
|
||||
|
@ -448,10 +441,11 @@ class EmaneManager(ConfigurableManager):
|
|||
|
||||
platformid += 1
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
values[names.index("platform_id_start")] = str(platformid)
|
||||
values[names.index("nem_id_start")] = str(nemid)
|
||||
msg = EmaneGlobalModel.config_data(flags=0, node_id=None, type_flags=typeflags, values=values)
|
||||
server.sock.send(msg)
|
||||
self.set_config("platform_id_start", str(platformid))
|
||||
self.set_config("nem_id_start", str(nemid))
|
||||
config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, self.get_configs())
|
||||
message = dataconversion.convert_config(config_data)
|
||||
server.sock.send(message)
|
||||
# increment nemid for next server by number of interfaces
|
||||
with self._ifccountslock:
|
||||
if server in self._ifccounts:
|
||||
|
@ -473,6 +467,7 @@ class EmaneManager(ConfigurableManager):
|
|||
self.buildnemxml()
|
||||
self.buildeventservicexml()
|
||||
|
||||
# TODO: remove need for tlv messaging
|
||||
def distributedctrlnet(self, ctrlnet):
|
||||
"""
|
||||
Distributed EMANE requires multiple control network prefixes to
|
||||
|
@ -489,8 +484,7 @@ class EmaneManager(ConfigurableManager):
|
|||
if len(servers) < 2:
|
||||
return
|
||||
|
||||
prefix = session.config.get("controlnet")
|
||||
prefix = getattr(session.options, "controlnet", prefix)
|
||||
prefix = session.options.get_config("controlnet")
|
||||
prefixes = prefix.split()
|
||||
# normal Config messaging will distribute controlnets
|
||||
if len(prefixes) >= len(servers):
|
||||
|
@ -509,76 +503,30 @@ class EmaneManager(ConfigurableManager):
|
|||
msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:])
|
||||
self.session.broker.handle_message(msg)
|
||||
|
||||
def xmldoc(self, doctype):
|
||||
"""
|
||||
Returns an XML xml.minidom.Document with a DOCTYPE tag set to the
|
||||
provided doctype string, and an initial element having the same
|
||||
name.
|
||||
"""
|
||||
# we hack in the DOCTYPE using the parser
|
||||
docstr = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE %s SYSTEM "file:///usr/share/emane/dtd/%s.dtd">
|
||||
<%s/>""" % (doctype, doctype, doctype)
|
||||
# normally this would be: doc = Document()
|
||||
return parseString(docstr)
|
||||
|
||||
def xmlparam(self, doc, name, value):
|
||||
"""
|
||||
Convenience function for building a parameter tag of the format:
|
||||
<param name="name" value="value" />
|
||||
"""
|
||||
p = doc.createElement("param")
|
||||
p.setAttribute("name", name)
|
||||
p.setAttribute("value", value)
|
||||
return p
|
||||
|
||||
def xmlshimdefinition(self, doc, name):
|
||||
"""
|
||||
Convenience function for building a definition tag of the format:
|
||||
<shim definition="name" />
|
||||
"""
|
||||
p = doc.createElement("shim")
|
||||
p.setAttribute("definition", name)
|
||||
return p
|
||||
|
||||
def xmlwrite(self, doc, filename):
|
||||
"""
|
||||
Write the given XML document to the specified filename.
|
||||
"""
|
||||
pathname = os.path.join(self.session.session_dir, filename)
|
||||
with open(pathname, "w") as xml_file:
|
||||
doc.writexml(writer=xml_file, indent="", addindent=" ", newl="\n", encoding="UTF-8")
|
||||
|
||||
def setnodemodels(self):
|
||||
def check_node_models(self):
|
||||
"""
|
||||
Associate EmaneModel classes with EmaneNode nodes. The model
|
||||
configurations are stored in self.configs.
|
||||
"""
|
||||
for key in self._emane_nodes:
|
||||
self.setnodemodel(key)
|
||||
for node_id in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[node_id]
|
||||
logger.debug("checking emane model for node: %s", node_id)
|
||||
|
||||
def setnodemodel(self, key):
|
||||
logger.debug("setting emane node model: %s", key)
|
||||
emane_node = self._emane_nodes[key]
|
||||
if key not in self.configs:
|
||||
logger.debug("no emane node model configuration, leaving")
|
||||
return False
|
||||
|
||||
for t, v in self.configs[key]:
|
||||
logger.debug("configuration: key(%s) value(%s)", t, v)
|
||||
if t is None:
|
||||
continue
|
||||
if t == self.emane_config.name:
|
||||
# skip nodes that already have a model set
|
||||
if emane_node.model:
|
||||
logger.debug("node(%s) already has model(%s)", emane_node.objid, emane_node.model.name)
|
||||
continue
|
||||
|
||||
# only use the first valid EmaneModel
|
||||
# convert model name to class (e.g. emane_rfpipe -> EmaneRfPipe)
|
||||
cls = self._modelclsmap[t]
|
||||
emane_node.setmodel(cls, v)
|
||||
return True
|
||||
# set model configured for node, due to legacy messaging configuration before nodes exist
|
||||
model_name = self.node_models.get(node_id)
|
||||
if not model_name:
|
||||
logger.error("emane node(%s) has no node model", node_id)
|
||||
raise ValueError("emane node has no model set")
|
||||
|
||||
# no model has been configured for this EmaneNode
|
||||
return False
|
||||
config = self.get_model_config(node_id=node_id, model_name=model_name)
|
||||
logger.debug("setting emane model(%s) config(%s)", model_name, config)
|
||||
model_class = self.models[model_name]
|
||||
emane_node.setmodel(model_class, config)
|
||||
|
||||
def nemlookup(self, nemid):
|
||||
"""
|
||||
|
@ -588,8 +536,8 @@ class EmaneManager(ConfigurableManager):
|
|||
emane_node = None
|
||||
netif = None
|
||||
|
||||
for key in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[key]
|
||||
for node_id in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[node_id]
|
||||
netif = emane_node.getnemnetif(nemid)
|
||||
if netif is not None:
|
||||
break
|
||||
|
@ -607,75 +555,17 @@ class EmaneManager(ConfigurableManager):
|
|||
count += len(emane_node.netifs())
|
||||
return count
|
||||
|
||||
def newplatformxmldoc(self, values, otadev=None, eventdev=None):
|
||||
"""
|
||||
Start a new platform XML file. Use global EMANE config values
|
||||
as keys. Override OTA manager and event service devices if
|
||||
specified (in order to support Raw Transport).
|
||||
"""
|
||||
doc = self.xmldoc("platform")
|
||||
plat = doc.getElementsByTagName("platform").pop()
|
||||
names = list(self.emane_config.getnames())
|
||||
platform_names = names[:len(self.emane_config.emulator_config)]
|
||||
platform_names.remove("platform_id_start")
|
||||
platform_values = list(values)
|
||||
if otadev:
|
||||
i = platform_names.index("otamanagerdevice")
|
||||
platform_values[i] = otadev
|
||||
|
||||
if eventdev:
|
||||
i = platform_names.index("eventservicedevice")
|
||||
platform_values[i] = eventdev
|
||||
|
||||
# append all platform options (except starting id) to doc
|
||||
for name in platform_names:
|
||||
value = self.emane_config.valueof(name, platform_values)
|
||||
param = self.xmlparam(doc, name, value)
|
||||
plat.appendChild(param)
|
||||
|
||||
return doc
|
||||
|
||||
def buildplatformxml(self, ctrlnet):
|
||||
"""
|
||||
Build a platform.xml file now that all nodes are configured.
|
||||
"""
|
||||
values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1]
|
||||
nemid = int(self.emane_config.valueof("nem_id_start", values))
|
||||
platformxmls = {}
|
||||
nemid = int(self.get_config("nem_id_start"))
|
||||
platform_xmls = {}
|
||||
|
||||
# assume self._objslock is already held here
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
nems = emane_node.buildplatformxmlentry(self.xmldoc("platform"))
|
||||
for netif in sorted(nems, key=lambda x: x.node.objid):
|
||||
nementry = nems[netif]
|
||||
nementry.setAttribute("id", "%d" % nemid)
|
||||
key = netif.node.objid
|
||||
if netif.transport_type == "raw":
|
||||
key = "host"
|
||||
otadev = ctrlnet.brname
|
||||
eventdev = ctrlnet.brname
|
||||
else:
|
||||
otadev = None
|
||||
eventdev = None
|
||||
|
||||
if key not in platformxmls:
|
||||
platformxmls[key] = self.newplatformxmldoc(values, otadev, eventdev)
|
||||
|
||||
doc = platformxmls[key]
|
||||
plat = doc.getElementsByTagName("platform").pop()
|
||||
plat.appendChild(nementry)
|
||||
emane_node.setnemid(netif, nemid)
|
||||
macstr = self._hwaddr_prefix + ":00:00:"
|
||||
macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF)
|
||||
netif.sethwaddr(MacAddress.from_string(macstr))
|
||||
nemid += 1
|
||||
|
||||
for key in sorted(platformxmls.keys()):
|
||||
if key == "host":
|
||||
self.xmlwrite(platformxmls["host"], "platform.xml")
|
||||
continue
|
||||
self.xmlwrite(platformxmls[key], "platform%d.xml" % key)
|
||||
nemid = emanexml.build_node_platform_xml(self, ctrlnet, emane_node, nemid, platform_xmls)
|
||||
|
||||
def buildnemxml(self):
|
||||
"""
|
||||
|
@ -684,23 +574,7 @@ class EmaneManager(ConfigurableManager):
|
|||
"""
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
emane_node.build_xml_files(self)
|
||||
|
||||
def appendtransporttonem(self, doc, nem, nodenum, ifc=None):
|
||||
"""
|
||||
Given a nem XML node and EMANE WLAN node number, append
|
||||
a <transport/> tag to the NEM definition, required for using
|
||||
EMANE"s internal transport.
|
||||
"""
|
||||
emane_node = self._emane_nodes[nodenum]
|
||||
transtag = doc.createElement("transport")
|
||||
transtypestr = "virtual"
|
||||
|
||||
if ifc and ifc.transport_type == "raw":
|
||||
transtypestr = "raw"
|
||||
|
||||
transtag.setAttribute("definition", emane_node.transportxmlname(transtypestr))
|
||||
nem.appendChild(transtag)
|
||||
emanexml.build_xml_files(self, emane_node)
|
||||
|
||||
def buildtransportxml(self):
|
||||
"""
|
||||
|
@ -713,13 +587,11 @@ class EmaneManager(ConfigurableManager):
|
|||
Build the libemaneeventservice.xml file if event service options
|
||||
were changed in the global config.
|
||||
"""
|
||||
defaults = self.emane_config.getdefaultvalues()
|
||||
values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1]
|
||||
need_xml = False
|
||||
keys = ("eventservicegroup", "eventservicedevice")
|
||||
for k in keys:
|
||||
a = self.emane_config.valueof(k, defaults)
|
||||
b = self.emane_config.valueof(k, values)
|
||||
default_values = self.emane_config.default_values()
|
||||
for name in ["eventservicegroup", "eventservicedevice"]:
|
||||
a = default_values[name]
|
||||
b = self.get_config(name)
|
||||
if a != b:
|
||||
need_xml = True
|
||||
|
||||
|
@ -729,20 +601,14 @@ class EmaneManager(ConfigurableManager):
|
|||
return
|
||||
|
||||
try:
|
||||
group, port = self.emane_config.valueof("eventservicegroup", values).split(":")
|
||||
group, port = self.get_config("eventservicegroup").split(":")
|
||||
except ValueError:
|
||||
logger.exception("invalid eventservicegroup in EMANE config")
|
||||
return
|
||||
|
||||
dev = self.emane_config.valueof("eventservicedevice", values)
|
||||
doc = self.xmldoc("emaneeventmsgsvc")
|
||||
es = doc.getElementsByTagName("emaneeventmsgsvc").pop()
|
||||
kvs = (("group", group), ("port", port), ("device", dev), ("mcloop", "1"), ("ttl", "32"))
|
||||
xmlutils.add_text_elements_from_tuples(doc, es, kvs)
|
||||
filename = "libemaneeventservice.xml"
|
||||
self.xmlwrite(doc, filename)
|
||||
pathname = os.path.join(self.session.session_dir, filename)
|
||||
self.initeventservice(filename=pathname)
|
||||
dev = self.get_config("eventservicedevice")
|
||||
|
||||
emanexml.create_event_service_xml(group, port, dev, self.session.session_dir)
|
||||
|
||||
def startdaemons(self):
|
||||
"""
|
||||
|
@ -751,8 +617,8 @@ class EmaneManager(ConfigurableManager):
|
|||
"""
|
||||
logger.info("starting emane daemons...")
|
||||
loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL)
|
||||
cfgloglevel = self.session.get_config_item_int("emane_log_level")
|
||||
realtime = self.session.get_config_item_bool("emane_realtime", True)
|
||||
cfgloglevel = self.session.options.get_config_int("emane_log_level")
|
||||
realtime = self.session.options.get_config_bool("emane_realtime", default=True)
|
||||
if cfgloglevel:
|
||||
logger.info("setting user-defined EMANE log level: %d", cfgloglevel)
|
||||
loglevel = str(cfgloglevel)
|
||||
|
@ -761,13 +627,12 @@ class EmaneManager(ConfigurableManager):
|
|||
if realtime:
|
||||
emanecmd += "-r",
|
||||
|
||||
values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1]
|
||||
otagroup, otaport = self.emane_config.valueof("otamanagergroup", values).split(":")
|
||||
otadev = self.emane_config.valueof("otamanagerdevice", values)
|
||||
otagroup, otaport = self.get_config("otamanagergroup").split(":")
|
||||
otadev = self.get_config("otamanagerdevice")
|
||||
otanetidx = self.session.get_control_net_index(otadev)
|
||||
|
||||
eventgroup, eventport = self.emane_config.valueof("eventservicegroup", values).split(":")
|
||||
eventdev = self.emane_config.valueof("eventservicedevice", values)
|
||||
eventgroup, eventport = self.get_config("eventservicegroup").split(":")
|
||||
eventdev = self.get_config("eventservicedevice")
|
||||
eventservicenetidx = self.session.get_control_net_index(eventdev)
|
||||
|
||||
run_emane_on_host = False
|
||||
|
@ -799,8 +664,7 @@ class EmaneManager(ConfigurableManager):
|
|||
node.check_cmd(args)
|
||||
|
||||
# start emane
|
||||
args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n),
|
||||
os.path.join(path, "platform%d.xml" % n)]
|
||||
args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)]
|
||||
output = node.check_cmd(args)
|
||||
logger.info("node(%s) emane daemon running: %s", node.name, args)
|
||||
logger.info("node(%s) emane daemon output: %s", node.name, output)
|
||||
|
@ -837,7 +701,7 @@ class EmaneManager(ConfigurableManager):
|
|||
except CoreCommandError:
|
||||
logger.exception("error shutting down emane daemons")
|
||||
|
||||
def installnetifs(self, do_netns=True):
|
||||
def installnetifs(self):
|
||||
"""
|
||||
Install TUN/TAP virtual interfaces into their proper namespaces
|
||||
now that the EMANE daemons are running.
|
||||
|
@ -845,7 +709,7 @@ class EmaneManager(ConfigurableManager):
|
|||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
logger.info("emane install netifs for node: %d", key)
|
||||
emane_node.installnetifs(do_netns)
|
||||
emane_node.installnetifs()
|
||||
|
||||
def deinstallnetifs(self):
|
||||
"""
|
||||
|
@ -855,30 +719,13 @@ class EmaneManager(ConfigurableManager):
|
|||
emane_node = self._emane_nodes[key]
|
||||
emane_node.deinstallnetifs()
|
||||
|
||||
def configure(self, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for global EMANE config.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
r = self.emane_config.configure_emane(session, config_data)
|
||||
|
||||
# extra logic to start slave Emane object after nemid has been configured from the master
|
||||
config_type = config_data.type
|
||||
if config_type == ConfigFlags.UPDATE.value and self.session.master is False:
|
||||
# instantiation was previously delayed by self.setup()
|
||||
# returning Emane.NOT_READY
|
||||
self.session.instantiate()
|
||||
|
||||
return r
|
||||
|
||||
def doeventmonitor(self):
|
||||
"""
|
||||
Returns boolean whether or not EMANE events will be monitored.
|
||||
"""
|
||||
# this support must be explicitly turned on; by default, CORE will
|
||||
# generate the EMANE events when nodes are moved
|
||||
return self.session.get_config_item_bool("emane_event_monitor", False)
|
||||
return self.session.options.get_config_bool("emane_event_monitor")
|
||||
|
||||
def genlocationevents(self):
|
||||
"""
|
||||
|
@ -886,7 +733,7 @@ class EmaneManager(ConfigurableManager):
|
|||
"""
|
||||
# By default, CORE generates EMANE location events when nodes
|
||||
# are moved; this can be explicitly disabled in core.conf
|
||||
tmp = self.session.get_config_item_bool("emane_event_generate")
|
||||
tmp = self.session.options.get_config_bool("emane_event_generate")
|
||||
if tmp is None:
|
||||
tmp = not self.doeventmonitor()
|
||||
return tmp
|
||||
|
@ -900,11 +747,10 @@ class EmaneManager(ConfigurableManager):
|
|||
return
|
||||
|
||||
if self.service is None:
|
||||
errmsg = "Warning: EMANE events will not be generated " \
|
||||
"because the emaneeventservice\n binding was " \
|
||||
"unable to load " \
|
||||
"(install the python-emaneeventservice bindings)"
|
||||
logger.error(errmsg)
|
||||
logger.error("Warning: EMANE events will not be generated "
|
||||
"because the emaneeventservice\n binding was "
|
||||
"unable to load "
|
||||
"(install the python-emaneeventservice bindings)")
|
||||
return
|
||||
self.doeventloop = True
|
||||
self.eventmonthread = threading.Thread(target=self.eventmonitorloop)
|
||||
|
@ -962,9 +808,10 @@ class EmaneManager(ConfigurableManager):
|
|||
|
||||
# yaw,pitch,roll,azimuth,elevation,velocity are unhandled
|
||||
lat = attrs["latitude"]
|
||||
long = attrs["longitude"]
|
||||
lon = attrs["longitude"]
|
||||
alt = attrs["altitude"]
|
||||
self.handlelocationeventtoxyz(txnemid, lat, long, alt)
|
||||
logger.debug("emane location event: %s,%s,%s", lat, lon, alt)
|
||||
self.handlelocationeventtoxyz(txnemid, lat, lon, alt)
|
||||
|
||||
def handlelocationeventtoxyz(self, nemid, lat, lon, alt):
|
||||
"""
|
||||
|
@ -1002,7 +849,7 @@ class EmaneManager(ConfigurableManager):
|
|||
|
||||
# don"t use node.setposition(x,y,z) which generates an event
|
||||
node.position.set(x, y, z)
|
||||
node_data = node.data(message_type=0, lat=lat, lon=lon, alt=alt)
|
||||
node_data = node.data(message_type=0, lat=str(lat), lon=str(lon), alt=str(alt))
|
||||
self.session.broadcast_node(node_data)
|
||||
return True
|
||||
|
||||
|
@ -1019,6 +866,7 @@ class EmaneGlobalModel(EmaneModel):
|
|||
"""
|
||||
Global EMANE configuration options.
|
||||
"""
|
||||
|
||||
_DEFAULT_DEV = "ctrl0"
|
||||
|
||||
name = "emane"
|
||||
|
@ -1033,22 +881,30 @@ class EmaneGlobalModel(EmaneModel):
|
|||
emulator_config = emanemanifest.parse(emulator_xml, emulator_defaults)
|
||||
emulator_config.insert(
|
||||
0,
|
||||
("platform_id_start", ConfigDataTypes.INT32.value, "1", "", "Starting Platform ID (core)")
|
||||
Configuration(_id="platform_id_start", _type=ConfigDataTypes.INT32, default="1",
|
||||
label="Starting Platform ID (core)")
|
||||
)
|
||||
|
||||
nem_config = [
|
||||
("nem_id_start", ConfigDataTypes.INT32.value, "1", "", "Starting NEM ID (core)"),
|
||||
Configuration(_id="nem_id_start", _type=ConfigDataTypes.INT32, default="1",
|
||||
label="Starting NEM ID (core)")
|
||||
]
|
||||
|
||||
config_matrix_override = emulator_config + nem_config
|
||||
config_groups_override = "Platform Attributes:1-%d|NEM Parameters:%d-%d" % (
|
||||
len(emulator_config), len(emulator_config) + 1, len(config_matrix_override))
|
||||
@classmethod
|
||||
def configurations(cls):
|
||||
return cls.emulator_config + cls.nem_config
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
emulator_len = len(cls.emulator_config)
|
||||
config_len = len(cls.configurations())
|
||||
return [
|
||||
ConfigGroup("Platform Attributes", 1, emulator_len),
|
||||
ConfigGroup("NEM Parameters", emulator_len + 1, config_len)
|
||||
]
|
||||
|
||||
def __init__(self, session, object_id=None):
|
||||
EmaneModel.__init__(self, session, object_id)
|
||||
super(EmaneGlobalModel, self).__init__(session, object_id)
|
||||
|
||||
def build_xml_files(self, emane_manager, interface):
|
||||
"""
|
||||
Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
"""
|
||||
def build_xml_files(self, config, interface=None):
|
||||
raise NotImplementedError
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from core import logger
|
||||
from core.conf import Configuration
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
||||
manifest = None
|
||||
|
@ -23,7 +24,7 @@ def _type_value(config_type):
|
|||
config_type = "FLOAT"
|
||||
elif config_type == "INETADDR":
|
||||
config_type = "STRING"
|
||||
return ConfigDataTypes[config_type].value
|
||||
return ConfigDataTypes[config_type]
|
||||
|
||||
|
||||
def _get_possible(config_type, config_regex):
|
||||
|
@ -33,17 +34,16 @@ def _get_possible(config_type, config_regex):
|
|||
:param str config_type: emane configuration type
|
||||
:param str config_regex: emane configuration regex
|
||||
:return: a string listing comma delimited values, if needed, empty string otherwise
|
||||
:rtype: str
|
||||
:rtype: list
|
||||
"""
|
||||
if config_type == "bool":
|
||||
return "On,Off"
|
||||
return ["On", "Off"]
|
||||
|
||||
if config_type == "string" and config_regex:
|
||||
possible = config_regex[2:-2]
|
||||
possible = possible.replace("|", ",")
|
||||
return possible
|
||||
return possible.split("|")
|
||||
|
||||
return ""
|
||||
return []
|
||||
|
||||
|
||||
def _get_default(config_type_name, config_value):
|
||||
|
@ -116,7 +116,13 @@ def parse(manifest_path, defaults):
|
|||
if config_name.endswith("uri"):
|
||||
config_descriptions = "%s file" % config_descriptions
|
||||
|
||||
config_tuple = (config_name, config_type_value, config_default, possible, config_descriptions)
|
||||
configurations.append(config_tuple)
|
||||
configuration = Configuration(
|
||||
_id=config_name,
|
||||
_type=config_type_value,
|
||||
default=config_default,
|
||||
options=possible,
|
||||
label=config_descriptions
|
||||
)
|
||||
configurations.append(configuration)
|
||||
|
||||
return configurations
|
||||
|
|
|
@ -1,74 +1,15 @@
|
|||
"""
|
||||
Defines Emane Models used within CORE.
|
||||
"""
|
||||
import os
|
||||
|
||||
from core import logger
|
||||
from core.conf import ConfigGroup
|
||||
from core.conf import Configuration
|
||||
from core.emane import emanemanifest
|
||||
from core.misc import utils
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.mobility import WirelessModel
|
||||
from core.xml import xmlutils
|
||||
|
||||
|
||||
def value_to_params(doc, name, value):
|
||||
"""
|
||||
Helper to convert a parameter to a paramlist. Returns an XML paramlist, or None if the value does not expand to
|
||||
multiple values.
|
||||
|
||||
:param xml.dom.minidom.Document doc: xml document
|
||||
:param name: name of element for params
|
||||
:param str value: value string to convert to tuple
|
||||
:return: xml document with added params or None, when an invalid value has been provided
|
||||
"""
|
||||
try:
|
||||
values = utils.make_tuple_fromstr(value, str)
|
||||
except SyntaxError:
|
||||
logger.exception("error in value string to param list")
|
||||
return None
|
||||
|
||||
if not hasattr(values, "__iter__"):
|
||||
return None
|
||||
|
||||
if len(values) < 2:
|
||||
return None
|
||||
|
||||
return xmlutils.add_param_list_to_parent(doc, parent=None, name=name, values=values)
|
||||
|
||||
|
||||
class EmaneModelMetaClass(type):
|
||||
"""
|
||||
Hack into making class level properties to streamline emane model creation, until the Configurable class is
|
||||
removed or refactored.
|
||||
"""
|
||||
|
||||
@property
|
||||
def config_matrix(cls):
|
||||
"""
|
||||
Convenience method for creating the config matrix, allow for a custom override.
|
||||
|
||||
:param EmaneModel cls: emane class
|
||||
:return: config matrix value
|
||||
:rtype: list
|
||||
"""
|
||||
if cls.config_matrix_override:
|
||||
return cls.config_matrix_override
|
||||
else:
|
||||
return cls.mac_config + cls.phy_config
|
||||
|
||||
@property
|
||||
def config_groups(cls):
|
||||
"""
|
||||
Convenience method for creating the config groups, allow for a custom override.
|
||||
|
||||
:param EmaneModel cls: emane class
|
||||
:return: config groups value
|
||||
:rtype: str
|
||||
"""
|
||||
if cls.config_groups_override:
|
||||
return cls.config_groups_override
|
||||
else:
|
||||
mac_len = len(cls.mac_config)
|
||||
config_len = len(cls.config_matrix)
|
||||
return "MAC Parameters:1-%d|PHY Parameters:%d-%d" % (mac_len, mac_len + 1, config_len)
|
||||
from core.xml import emanexml
|
||||
|
||||
|
||||
class EmaneModel(WirelessModel):
|
||||
|
@ -77,8 +18,6 @@ class EmaneModel(WirelessModel):
|
|||
handling configuration messages based on the list of
|
||||
configurable parameters. Helper functions also live here.
|
||||
"""
|
||||
__metaclass__ = EmaneModelMetaClass
|
||||
|
||||
# default mac configuration settings
|
||||
mac_library = None
|
||||
mac_xml = None
|
||||
|
@ -95,277 +34,69 @@ class EmaneModel(WirelessModel):
|
|||
}
|
||||
phy_config = emanemanifest.parse(phy_xml, phy_defaults)
|
||||
|
||||
# support for external configurations
|
||||
external_config = [
|
||||
Configuration("external", ConfigDataTypes.BOOL, default="0"),
|
||||
Configuration("platformendpoint", ConfigDataTypes.STRING, default="127.0.0.1:40001"),
|
||||
Configuration("transportendpoint", ConfigDataTypes.STRING, default="127.0.0.1:50002")
|
||||
]
|
||||
|
||||
config_ignore = set()
|
||||
config_groups_override = None
|
||||
config_matrix_override = None
|
||||
|
||||
def __init__(self, session, object_id=None):
|
||||
WirelessModel.__init__(self, session, object_id)
|
||||
@classmethod
|
||||
def configurations(cls):
|
||||
return cls.mac_config + cls.phy_config + cls.external_config
|
||||
|
||||
def build_xml_files(self, emane_manager, interface):
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
mac_len = len(cls.mac_config)
|
||||
phy_len = len(cls.phy_config) + mac_len
|
||||
config_len = len(cls.configurations())
|
||||
return [
|
||||
ConfigGroup("MAC Parameters", 1, mac_len),
|
||||
ConfigGroup("PHY Parameters", mac_len + 1, phy_len),
|
||||
ConfigGroup("External Parameters", phy_len + 1, config_len)
|
||||
]
|
||||
|
||||
def build_xml_files(self, config, interface=None):
|
||||
"""
|
||||
Builds xml files for emane. Includes a nem.xml file that points to both mac.xml and phy.xml definitions.
|
||||
Builds xml files for this emane model. Creates a nem.xml file that points to both mac.xml and phy.xml
|
||||
definitions.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param dict config: emane model configuration for the node and interface
|
||||
:param interface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
# retrieve configuration values
|
||||
values = emane_manager.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), interface)
|
||||
if values is None:
|
||||
return
|
||||
nem_name = emanexml.nem_file_name(self, interface)
|
||||
mac_name = emanexml.mac_file_name(self, interface)
|
||||
phy_name = emanexml.phy_file_name(self, interface)
|
||||
|
||||
# create document and write to disk
|
||||
nem_name = self.nem_name(interface)
|
||||
nem_document = self.create_nem_doc(emane_manager, interface)
|
||||
emane_manager.xmlwrite(nem_document, nem_name)
|
||||
# check if this is external
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
transport_name = emanexml.transport_file_name(self.object_id, transport_type)
|
||||
|
||||
# create mac document and write to disk
|
||||
mac_name = self.mac_name(interface)
|
||||
mac_document = self.create_mac_doc(emane_manager, values)
|
||||
emane_manager.xmlwrite(mac_document, mac_name)
|
||||
# create nem xml file
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_nem_xml(self, config, nem_file, transport_name, mac_name, phy_name)
|
||||
|
||||
# create phy document and write to disk
|
||||
phy_name = self.phy_name(interface)
|
||||
phy_document = self.create_phy_doc(emane_manager, values)
|
||||
emane_manager.xmlwrite(phy_document, phy_name)
|
||||
# create mac xml file
|
||||
mac_file = os.path.join(self.session.session_dir, mac_name)
|
||||
emanexml.create_mac_xml(self, config, mac_file)
|
||||
|
||||
def create_nem_doc(self, emane_manager, interface):
|
||||
"""
|
||||
Create the nem xml document.
|
||||
# create phy xml file
|
||||
phy_file = os.path.join(self.session.session_dir, phy_name)
|
||||
emanexml.create_phy_xml(self, config, phy_file)
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param interface: interface for the emane node
|
||||
:return: nem document
|
||||
:rtype: xml.dom.minidom.Document
|
||||
"""
|
||||
mac_name = self.mac_name(interface)
|
||||
phy_name = self.phy_name(interface)
|
||||
|
||||
nem_document = emane_manager.xmldoc("nem")
|
||||
nem_element = nem_document.getElementsByTagName("nem").pop()
|
||||
nem_element.setAttribute("name", "%s NEM" % self.name)
|
||||
emane_manager.appendtransporttonem(nem_document, nem_element, self.object_id, interface)
|
||||
|
||||
mac_element = nem_document.createElement("mac")
|
||||
mac_element.setAttribute("definition", mac_name)
|
||||
nem_element.appendChild(mac_element)
|
||||
|
||||
phy_element = nem_document.createElement("phy")
|
||||
phy_element.setAttribute("definition", phy_name)
|
||||
nem_element.appendChild(phy_element)
|
||||
|
||||
return nem_document
|
||||
|
||||
def create_mac_doc(self, emane_manager, values):
|
||||
"""
|
||||
Create the mac xml document.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param tuple values: all current configuration values, mac + phy
|
||||
:return: nem document
|
||||
:rtype: xml.dom.minidom.Document
|
||||
"""
|
||||
names = list(self.getnames())
|
||||
mac_names = names[:len(self.mac_config)]
|
||||
|
||||
mac_document = emane_manager.xmldoc("mac")
|
||||
mac_element = mac_document.getElementsByTagName("mac").pop()
|
||||
mac_element.setAttribute("name", "%s MAC" % self.name)
|
||||
|
||||
if not self.mac_library:
|
||||
raise ValueError("must define emane model library")
|
||||
mac_element.setAttribute("library", self.mac_library)
|
||||
|
||||
for name in mac_names:
|
||||
# ignore custom configurations
|
||||
if name in self.config_ignore:
|
||||
continue
|
||||
|
||||
# check if value is a multi param
|
||||
value = self.valueof(name, values)
|
||||
param = value_to_params(mac_document, name, value)
|
||||
if not param:
|
||||
param = emane_manager.xmlparam(mac_document, name, value)
|
||||
|
||||
mac_element.appendChild(param)
|
||||
|
||||
return mac_document
|
||||
|
||||
def create_phy_doc(self, emane_manager, values):
|
||||
"""
|
||||
Create the phy xml document.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param tuple values: all current configuration values, mac + phy
|
||||
:return: nem document
|
||||
:rtype: xml.dom.minidom.Document
|
||||
"""
|
||||
names = list(self.getnames())
|
||||
phy_names = names[len(self.mac_config):]
|
||||
|
||||
phy_document = emane_manager.xmldoc("phy")
|
||||
phy_element = phy_document.getElementsByTagName("phy").pop()
|
||||
phy_element.setAttribute("name", "%s PHY" % self.name)
|
||||
|
||||
if self.phy_library:
|
||||
phy_element.setAttribute("library", self.phy_library)
|
||||
|
||||
# append all phy options
|
||||
for name in phy_names:
|
||||
# ignore custom configurations
|
||||
if name in self.config_ignore:
|
||||
continue
|
||||
|
||||
# check if value is a multi param
|
||||
value = self.valueof(name, values)
|
||||
param = value_to_params(phy_document, name, value)
|
||||
if not param:
|
||||
param = emane_manager.xmlparam(phy_document, name, value)
|
||||
|
||||
phy_element.appendChild(param)
|
||||
|
||||
return phy_document
|
||||
|
||||
@classmethod
|
||||
def configure_emane(cls, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for configuring an emane model.
|
||||
|
||||
:param core.session.Session session: session to configure emane
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
return cls.configure(session.emane, config_data)
|
||||
|
||||
def post_startup(self, emane_manager):
|
||||
def post_startup(self):
|
||||
"""
|
||||
Logic to execute after the emane manager is finished with startup.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager for the session
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("emane model(%s) has no post setup tasks", self.name)
|
||||
|
||||
def build_nem_xml(self, doc, emane_node, interface):
|
||||
"""
|
||||
Build the NEM definition that goes into the platform.xml file.
|
||||
|
||||
This returns an XML element that will be added to the <platform/> element.
|
||||
|
||||
This default method supports per-interface config (e.g. <nem definition="n2_0_63emane_rfpipe.xml" id="1">
|
||||
or per-EmaneNode config (e.g. <nem definition="n1emane_rfpipe.xml" id="1">.
|
||||
|
||||
This can be overriden by a model for NEM flexibility; n is the EmaneNode.
|
||||
|
||||
<nem name="NODE-001" definition="rfpipenem.xml">
|
||||
|
||||
:param xml.dom.minidom.Document doc: xml document
|
||||
:param core.emane.nodes.EmaneNode emane_node: emane node to get information from
|
||||
:param interface: interface for the emane node
|
||||
:return: created platform xml
|
||||
"""
|
||||
# if this netif contains a non-standard (per-interface) config,
|
||||
# then we need to use a more specific xml file here
|
||||
nem_name = self.nem_name(interface)
|
||||
nem = doc.createElement("nem")
|
||||
nem.setAttribute("name", interface.localname)
|
||||
nem.setAttribute("definition", nem_name)
|
||||
return nem
|
||||
|
||||
def build_transport_xml(self, doc, emane_node, interface):
|
||||
"""
|
||||
Build the transport definition that goes into the platform.xml file.
|
||||
This returns an XML element that will be added to the nem definition.
|
||||
This default method supports raw and virtual transport types, but may be
|
||||
overridden by a model to support the e.g. pluggable virtual transport.
|
||||
|
||||
<transport definition="transvirtual.xml" group="1">
|
||||
<param name="device" value="n1.0.158" />
|
||||
</transport>
|
||||
|
||||
:param xml.dom.minidom.Document doc: xml document
|
||||
:param core.emane.nodes.EmaneNode emane_node: emane node to get information from
|
||||
:param interface: interface for the emane node
|
||||
:return: created transport xml
|
||||
"""
|
||||
transport_type = interface.transport_type
|
||||
if not transport_type:
|
||||
logger.info("warning: %s interface type unsupported!", interface.name)
|
||||
transport_type = "raw"
|
||||
transport_name = emane_node.transportxmlname(transport_type)
|
||||
|
||||
transport = doc.createElement("transport")
|
||||
transport.setAttribute("definition", transport_name)
|
||||
|
||||
param = doc.createElement("param")
|
||||
param.setAttribute("name", "device")
|
||||
param.setAttribute("value", interface.name)
|
||||
|
||||
transport.appendChild(param)
|
||||
return transport
|
||||
|
||||
def _basename(self, interface=None):
|
||||
"""
|
||||
Create name that is leveraged for configuration file creation.
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: basename used for file creation
|
||||
:rtype: str
|
||||
"""
|
||||
name = "n%s" % self.object_id
|
||||
emane_manager = self.session.emane
|
||||
|
||||
if interface:
|
||||
node_id = interface.node.objid
|
||||
if emane_manager.getifcconfig(node_id, self.name, None, interface) is not None:
|
||||
name = interface.localname.replace(".", "_")
|
||||
|
||||
return "%s%s" % (name, self.name)
|
||||
|
||||
def nem_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the NEM XML file, e.g. "n3rfpipenem.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: nem xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
basename = self._basename(interface)
|
||||
append = ""
|
||||
if interface and interface.transport_type == "raw":
|
||||
append = "_raw"
|
||||
return "%snem%s.xml" % (basename, append)
|
||||
|
||||
def shim_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the SHIM XML file, e.g. "commeffectshim.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: shim xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sshim.xml" % self._basename(interface)
|
||||
|
||||
def mac_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the MAC XML file, e.g. "n3rfpipemac.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: mac xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%smac.xml" % self._basename(interface)
|
||||
|
||||
def phy_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the PHY XML file, e.g. "n3rfpipephy.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: phy xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sphy.xml" % self._basename(interface)
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
"""
|
||||
Invoked from MobilityModel when nodes are moved; this causes
|
||||
|
|
|
@ -4,8 +4,6 @@ control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
|||
share the same MAC+PHY model.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from core import logger
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.enumerations import LinkTypes
|
||||
|
@ -67,6 +65,12 @@ class EmaneNode(EmaneNet):
|
|||
def unlink(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
def updatemodel(self, config):
|
||||
if not self.model:
|
||||
raise ValueError("no model set to update for node(%s)", self.objid)
|
||||
logger.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config)
|
||||
self.model.set_configs(config, node_id=self.objid)
|
||||
|
||||
def setmodel(self, model, config):
|
||||
"""
|
||||
set the EmaneModel associated with this node
|
||||
|
@ -76,8 +80,10 @@ class EmaneNode(EmaneNet):
|
|||
# EmaneModel really uses values from ConfigurableManager
|
||||
# when buildnemxml() is called, not during init()
|
||||
self.model = model(session=self.session, object_id=self.objid)
|
||||
self.model.update_config(config)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
self.mobility = model(session=self.session, object_id=self.objid)
|
||||
self.mobility.update_config(config)
|
||||
|
||||
def setnemid(self, netif, nemid):
|
||||
"""
|
||||
|
@ -111,95 +117,7 @@ class EmaneNode(EmaneNet):
|
|||
"""
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
|
||||
|
||||
def buildplatformxmlentry(self, doc):
|
||||
"""
|
||||
Return a dictionary of XML elements describing the NEMs
|
||||
connected to this EmaneNode for inclusion in the platform.xml file.
|
||||
"""
|
||||
ret = {}
|
||||
if self.model is None:
|
||||
logger.info("warning: EmaneNode %s has no associated model", self.name)
|
||||
return ret
|
||||
|
||||
for netif in self.netifs():
|
||||
nementry = self.model.build_nem_xml(doc, self, netif)
|
||||
trans = self.model.build_transport_xml(doc, self, netif)
|
||||
nementry.appendChild(trans)
|
||||
ret[netif] = nementry
|
||||
|
||||
return ret
|
||||
|
||||
def build_xml_files(self, emane_manager):
|
||||
"""
|
||||
Let the configured model build the necessary nem, mac, and phy XMLs.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:return: nothing
|
||||
"""
|
||||
if self.model is None:
|
||||
return
|
||||
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
self.model.build_xml_files(emane_manager, interface=None)
|
||||
|
||||
# build XML for specific interface (NEM) configs
|
||||
need_virtual = False
|
||||
need_raw = False
|
||||
vtype = "virtual"
|
||||
rtype = "raw"
|
||||
|
||||
for netif in self.netifs():
|
||||
self.model.build_xml_files(emane_manager, netif)
|
||||
if "virtual" in netif.transport_type:
|
||||
need_virtual = True
|
||||
vtype = netif.transport_type
|
||||
else:
|
||||
need_raw = True
|
||||
rtype = netif.transport_type
|
||||
|
||||
# build transport XML files depending on type of interfaces involved
|
||||
if need_virtual:
|
||||
self.buildtransportxml(emane_manager, vtype)
|
||||
|
||||
if need_raw:
|
||||
self.buildtransportxml(emane_manager, rtype)
|
||||
|
||||
def buildtransportxml(self, emane, transport_type):
|
||||
"""
|
||||
Write a transport XML file for the Virtual or Raw Transport.
|
||||
"""
|
||||
transdoc = emane.xmldoc("transport")
|
||||
trans = transdoc.getElementsByTagName("transport").pop()
|
||||
trans.setAttribute("name", "%s Transport" % transport_type.capitalize())
|
||||
trans.setAttribute("library", "trans%s" % transport_type.lower())
|
||||
trans.appendChild(emane.xmlparam(transdoc, "bitrate", "0"))
|
||||
|
||||
flowcontrol = False
|
||||
names = self.model.getnames()
|
||||
values = emane.getconfig(self.objid, self.model.name, self.model.getdefaultvalues())[1]
|
||||
|
||||
if "flowcontrolenable" in names and values:
|
||||
i = names.index("flowcontrolenable")
|
||||
if self.model.booltooffon(values[i]) == "on":
|
||||
flowcontrol = True
|
||||
|
||||
if "virtual" in transport_type.lower():
|
||||
if os.path.exists("/dev/net/tun_flowctl"):
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun_flowctl"))
|
||||
else:
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun"))
|
||||
if flowcontrol:
|
||||
trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable", "on"))
|
||||
|
||||
emane.xmlwrite(transdoc, self.transportxmlname(transport_type.lower()))
|
||||
|
||||
def transportxmlname(self, type):
|
||||
"""
|
||||
Return the string name for the Transport XML file, e.g. 'n3transvirtual.xml'
|
||||
"""
|
||||
return "n%strans%s.xml" % (self.objid, type)
|
||||
|
||||
def installnetifs(self, do_netns=True):
|
||||
def installnetifs(self):
|
||||
"""
|
||||
Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
|
@ -211,12 +129,14 @@ class EmaneNode(EmaneNet):
|
|||
logger.error(warntxt)
|
||||
|
||||
for netif in self.netifs():
|
||||
if do_netns and "virtual" in netif.transport_type.lower():
|
||||
netif.install()
|
||||
netif.setaddrs()
|
||||
external = self.session.emane.get_config("external", self.objid, self.model.name)
|
||||
if external == "0":
|
||||
netif.setaddrs()
|
||||
|
||||
if not self.session.emane.genlocationevents():
|
||||
netif.poshook = None
|
||||
continue
|
||||
|
||||
# at this point we register location handlers for generating
|
||||
# EMANE location events
|
||||
netif.poshook = self.setnemposition
|
||||
|
|
|
@ -6,6 +6,7 @@ import os
|
|||
|
||||
from core import constants
|
||||
from core import logger
|
||||
from core.conf import Configuration
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
@ -29,24 +30,29 @@ class EmaneTdmaModel(emanemodel.EmaneModel):
|
|||
default_schedule = os.path.join(constants.CORE_DATA_DIR, "examples", "tdma", "schedule.xml")
|
||||
mac_config.insert(
|
||||
0,
|
||||
(schedule_name, ConfigDataTypes.STRING.value, default_schedule, "", "TDMA schedule file (core)")
|
||||
Configuration(
|
||||
_id=schedule_name,
|
||||
_type=ConfigDataTypes.STRING,
|
||||
default=default_schedule,
|
||||
label="TDMA schedule file (core)"
|
||||
)
|
||||
)
|
||||
config_ignore = {schedule_name}
|
||||
|
||||
def post_startup(self, emane_manager):
|
||||
def post_startup(self):
|
||||
"""
|
||||
Logic to execute after the emane manager is finished with startup.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager for the session
|
||||
:return: nothing
|
||||
"""
|
||||
# get configured schedule
|
||||
values = emane_manager.getconfig(self.object_id, self.name, self.getdefaultvalues())[1]
|
||||
if values is None:
|
||||
config = self.session.emane.get_configs(node_id=self.object_id, config_type=self.name)
|
||||
if not config:
|
||||
return
|
||||
schedule = self.valueof(self.schedule_name, values)
|
||||
schedule = config[self.schedule_name]
|
||||
|
||||
event_device = emane_manager.event_device
|
||||
# get the set event device
|
||||
event_device = self.session.emane.event_device
|
||||
|
||||
# initiate tdma schedule
|
||||
logger.info("setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device)
|
||||
|
|
|
@ -15,9 +15,9 @@ from core.enumerations import LinkTypes
|
|||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodemaps
|
||||
from core.misc import nodeutils
|
||||
from core.service import ServiceManager
|
||||
from core.session import Session
|
||||
from core.xml.xmlparser import core_document_parser
|
||||
from core.xml.xmlwriter import core_document_writer
|
||||
from core.xml.corexml import CoreXmlReader, CoreXmlWriter
|
||||
|
||||
|
||||
def signal_handler(signal_number, _):
|
||||
|
@ -125,7 +125,7 @@ class EmuSession(Session):
|
|||
self.node_id_gen = IdGen()
|
||||
|
||||
# set default services
|
||||
self.services.defaultservices = {
|
||||
self.services.default_services = {
|
||||
"mdr": ("zebra", "OSPFv3MDR", "IPForward"),
|
||||
"PC": ("DefaultRoute",),
|
||||
"prouter": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
|
@ -198,7 +198,7 @@ class EmuSession(Session):
|
|||
objects = [x for x in objects if x]
|
||||
if len(objects) < 2:
|
||||
raise ValueError("wireless link failure: %s", objects)
|
||||
logger.debug("handling wireless linking objects(%) connect(%s)", objects, connect)
|
||||
logger.debug("handling wireless linking objects(%s) connect(%s)", objects, connect)
|
||||
common_networks = objects[0].commonnets(objects[1])
|
||||
if not common_networks:
|
||||
raise ValueError("no common network found for wireless link/unlink")
|
||||
|
@ -325,9 +325,6 @@ class EmuSession(Session):
|
|||
:param core.enumerations.LinkTypes link_type: link type to delete
|
||||
:return: nothing
|
||||
"""
|
||||
# interface data
|
||||
# interface_one_data, interface_two_data = get_interfaces(link_data)
|
||||
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
|
@ -378,7 +375,8 @@ class EmuSession(Session):
|
|||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def update_link(self, node_one_id, node_two_id, interface_one_id=None, interface_two_id=None, link_options=LinkOptions()):
|
||||
def update_link(self, node_one_id, node_two_id, interface_one_id=None, interface_two_id=None,
|
||||
link_options=LinkOptions()):
|
||||
"""
|
||||
Update link information between nodes.
|
||||
|
||||
|
@ -389,9 +387,6 @@ class EmuSession(Session):
|
|||
:param core.emulator.emudata.LinkOptions link_options: data to update link with
|
||||
:return: nothing
|
||||
"""
|
||||
# interface data
|
||||
# interface_one_data, interface_two_data = get_interfaces(link_data)
|
||||
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
|
@ -480,7 +475,7 @@ class EmuSession(Session):
|
|||
|
||||
# set node start based on current session state, override and check when rj45
|
||||
start = self.state > EventTypes.DEFINITION_STATE.value
|
||||
enable_rj45 = getattr(self.options, "enablerj45", "0") == "1"
|
||||
enable_rj45 = self.options.get_config("enablerj45") == "1"
|
||||
if _type == NodeTypes.RJ45 and not enable_rj45:
|
||||
start = False
|
||||
|
||||
|
@ -512,17 +507,14 @@ class EmuSession(Session):
|
|||
if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]:
|
||||
node.type = node_options.model
|
||||
logger.debug("set node type: %s", node.type)
|
||||
services = "|".join(node_options.services) or None
|
||||
self.services.addservicestonode(node, node.type, services)
|
||||
self.services.add_services(node, node.type, node_options.services)
|
||||
|
||||
# boot nodes if created after runtime, LcxNodes, Physical, and RJ45 are all PyCoreNodes
|
||||
is_boot_node = isinstance(node, PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45)
|
||||
if self.state == EventTypes.RUNTIME_STATE.value and is_boot_node:
|
||||
self.write_objects()
|
||||
self.add_remove_control_interface(node=node, remove=False)
|
||||
|
||||
# TODO: common method to both Physical and LxcNodes, but not the common PyCoreNode
|
||||
node.boot()
|
||||
self.services.boot_services(node)
|
||||
|
||||
return node
|
||||
|
||||
|
@ -550,7 +542,7 @@ class EmuSession(Session):
|
|||
# set node as updated successfully
|
||||
result = True
|
||||
except KeyError:
|
||||
logger.error("failure to update node that does not exist: %s", node_options.id)
|
||||
logger.error("failure to update node that does not exist: %s", node_id)
|
||||
|
||||
return result
|
||||
|
||||
|
@ -668,10 +660,10 @@ class EmuSession(Session):
|
|||
# clear out existing session
|
||||
self.clear()
|
||||
|
||||
# set default node class when one is not provided
|
||||
node_class = nodeutils.get_node_class(NodeTypes.DEFAULT)
|
||||
options = {"start": start, "nodecls": node_class}
|
||||
core_document_parser(self, file_name, options)
|
||||
# write out xml file
|
||||
CoreXmlReader(self).read(file_name)
|
||||
|
||||
# start session if needed
|
||||
if start:
|
||||
self.name = os.path.basename(file_name)
|
||||
self.file_name = file_name
|
||||
|
@ -685,8 +677,7 @@ class EmuSession(Session):
|
|||
:param str version: xml version type
|
||||
:return: nothing
|
||||
"""
|
||||
doc = core_document_writer(self, version)
|
||||
doc.writexml(file_name)
|
||||
CoreXmlWriter(self).write(file_name)
|
||||
|
||||
def add_hook(self, state, file_name, source_name, data):
|
||||
"""
|
||||
|
@ -702,21 +693,6 @@ class EmuSession(Session):
|
|||
state = ":%s" % state
|
||||
self.set_hook(state, file_name, source_name, data)
|
||||
|
||||
def add_node_service_file(self, node_id, service_name, file_name, source_name, data):
|
||||
"""
|
||||
Add a service file for a node.
|
||||
|
||||
:param int node_id: node to add service file to
|
||||
:param str service_name: service file to add
|
||||
:param str file_name: file name to use
|
||||
:param str source_name: source file
|
||||
:param str data: file data to save
|
||||
:return: nothing
|
||||
"""
|
||||
# hack to conform with old logic until updated
|
||||
service_name = ":%s" % service_name
|
||||
self.services.setservicefile(node_id, service_name, file_name, source_name, data)
|
||||
|
||||
def add_node_file(self, node_id, source_name, file_name, data):
|
||||
"""
|
||||
Add a file to a node.
|
||||
|
@ -744,6 +720,7 @@ class EmuSession(Session):
|
|||
self.delete_objects()
|
||||
self.del_hooks()
|
||||
self.broker.reset()
|
||||
self.emane.reset()
|
||||
|
||||
def start_events(self):
|
||||
"""
|
||||
|
@ -753,15 +730,6 @@ class EmuSession(Session):
|
|||
"""
|
||||
self.event_loop.run()
|
||||
|
||||
def services_event(self, event_data):
|
||||
"""
|
||||
Handle a service event.
|
||||
|
||||
:param core.data.EventData event_data: event data to handle
|
||||
:return:
|
||||
"""
|
||||
self.services.handleevent(event_data)
|
||||
|
||||
def mobility_event(self, event_data):
|
||||
"""
|
||||
Handle a mobility event.
|
||||
|
@ -783,7 +751,7 @@ class EmuSession(Session):
|
|||
node_options.model = "mdr"
|
||||
return self.add_node(_type=NodeTypes.DEFAULT, _id=_id, node_options=node_options)
|
||||
|
||||
def create_emane_network(self, model, geo_reference, geo_scale=None, node_options=NodeOptions()):
|
||||
def create_emane_network(self, model, geo_reference, geo_scale=None, node_options=NodeOptions(), config=None):
|
||||
"""
|
||||
Convenience method for creating an emane network.
|
||||
|
||||
|
@ -791,6 +759,7 @@ class EmuSession(Session):
|
|||
:param geo_reference: geo reference point to use for emane node locations
|
||||
:param geo_scale: geo scale to use for emane node locations, defaults to 1.0
|
||||
:param core.emulator.emudata.NodeOptions node_options: options for emane node being created
|
||||
:param dict config: emane model configuration
|
||||
:return: create emane network
|
||||
"""
|
||||
# required to be set for emane to function properly
|
||||
|
@ -800,31 +769,9 @@ class EmuSession(Session):
|
|||
|
||||
# create and return network
|
||||
emane_network = self.add_node(_type=NodeTypes.EMANE, node_options=node_options)
|
||||
self.set_emane_model(emane_network, model)
|
||||
self.emane.set_model(emane_network, model, config)
|
||||
return emane_network
|
||||
|
||||
def set_emane_model(self, emane_node, emane_model):
|
||||
"""
|
||||
Set emane model for a given emane node.
|
||||
|
||||
:param emane_node: emane node to set model for
|
||||
:param emane_model: emane model to set
|
||||
:return: nothing
|
||||
"""
|
||||
values = list(emane_model.getdefaultvalues())
|
||||
self.emane.setconfig(emane_node.objid, emane_model.name, values)
|
||||
|
||||
def set_wireless_model(self, node, model):
|
||||
"""
|
||||
Convenience method for setting a wireless model.
|
||||
|
||||
:param node: node to set wireless model for
|
||||
:param core.mobility.WirelessModel model: wireless model to set node to
|
||||
:return: nothing
|
||||
"""
|
||||
values = list(model.getdefaultvalues())
|
||||
node.setmodel(model, values)
|
||||
|
||||
def wireless_link_all(self, network, nodes):
|
||||
"""
|
||||
Link all nodes to the provided wireless network.
|
||||
|
@ -851,8 +798,10 @@ class CoreEmu(object):
|
|||
"""
|
||||
# set umask 0
|
||||
os.umask(0)
|
||||
|
||||
|
||||
# configuration
|
||||
if not config:
|
||||
config = {}
|
||||
self.config = config
|
||||
|
||||
# session management
|
||||
|
@ -863,12 +812,26 @@ class CoreEmu(object):
|
|||
node_map = nodemaps.NODES
|
||||
nodeutils.set_node_map(node_map)
|
||||
|
||||
# load default services
|
||||
core.services.load()
|
||||
# load services
|
||||
self.service_errors = []
|
||||
self.load_services()
|
||||
|
||||
# catch exit event
|
||||
atexit.register(self.shutdown)
|
||||
|
||||
def load_services(self):
|
||||
# load default services
|
||||
self.service_errors = core.services.load()
|
||||
|
||||
# load custom services
|
||||
service_paths = self.config.get("custom_services_dir")
|
||||
logger.debug("custom service paths: %s", service_paths)
|
||||
if service_paths:
|
||||
for service_path in service_paths.split(','):
|
||||
service_path = service_path.strip()
|
||||
custom_service_errors = ServiceManager.add_services(service_path)
|
||||
self.service_errors.extend(custom_service_errors)
|
||||
|
||||
def update_nodes(self, node_map):
|
||||
"""
|
||||
Updates node map used by core.
|
||||
|
@ -890,12 +853,13 @@ class CoreEmu(object):
|
|||
for session in sessions.itervalues():
|
||||
session.shutdown()
|
||||
|
||||
def create_session(self, _id=None, master=True):
|
||||
def create_session(self, _id=None, master=True, _cls=EmuSession):
|
||||
"""
|
||||
Create a new CORE session, set to master if running standalone.
|
||||
|
||||
:param int _id: session id for new session
|
||||
:param bool master: sets session to master
|
||||
:param class _cls: EmuSession class to use
|
||||
:return: created session
|
||||
:rtype: EmuSession
|
||||
"""
|
||||
|
@ -907,7 +871,7 @@ class CoreEmu(object):
|
|||
if session_id not in self.sessions:
|
||||
break
|
||||
|
||||
session = EmuSession(session_id, config=self.config)
|
||||
session = _cls(session_id, config=self.config)
|
||||
logger.info("created session: %s", session_id)
|
||||
if master:
|
||||
session.master = True
|
||||
|
|
|
@ -34,8 +34,8 @@ class NodeOptions(object):
|
|||
"""
|
||||
Convenience method for setting position.
|
||||
|
||||
:param int x: x position
|
||||
:param int y: y position
|
||||
:param float x: x position
|
||||
:param float y: y position
|
||||
:return: nothing
|
||||
"""
|
||||
self.x = x
|
||||
|
@ -161,7 +161,7 @@ class IpPrefixes(object):
|
|||
|
||||
# random mac
|
||||
if not mac:
|
||||
mac = str(MacAddress.random())
|
||||
mac = MacAddress.random()
|
||||
|
||||
return InterfaceData(
|
||||
_id=inteface_id,
|
||||
|
|
|
@ -6,19 +6,15 @@ https://pypi.python.org/pypi/utm (version 0.3.0).
|
|||
"""
|
||||
|
||||
from core import logger
|
||||
from core.conf import ConfigurableManager
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import utm
|
||||
|
||||
|
||||
class CoreLocation(ConfigurableManager):
|
||||
class CoreLocation(object):
|
||||
"""
|
||||
Member of session class for handling global location data. This keeps
|
||||
track of a latitude/longitude/altitude reference point and scale in
|
||||
order to convert between X,Y and geo coordinates.
|
||||
|
||||
TODO: this could be updated to use more generic
|
||||
Configurable/ConfigurableManager code like other Session objects
|
||||
"""
|
||||
name = "location"
|
||||
config_type = RegisterTlvs.UTILITY.value
|
||||
|
@ -29,7 +25,7 @@ class CoreLocation(ConfigurableManager):
|
|||
|
||||
:return: nothing
|
||||
"""
|
||||
ConfigurableManager.__init__(self)
|
||||
# ConfigurableManager.__init__(self)
|
||||
self.reset()
|
||||
self.zonemap = {}
|
||||
self.refxyz = (0.0, 0.0, 0.0)
|
||||
|
@ -52,35 +48,6 @@ class CoreLocation(ConfigurableManager):
|
|||
# cached distance to refpt in other zones
|
||||
self.zoneshifts = {}
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Receive configuration message for setting the reference point
|
||||
and scale.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
values = config_data.data_values
|
||||
|
||||
if values is None:
|
||||
logger.warn("location data missing")
|
||||
return None
|
||||
values = values.split('|')
|
||||
|
||||
# Cartesian coordinate reference point
|
||||
refx, refy = map(lambda x: float(x), values[0:2])
|
||||
refz = 0.0
|
||||
self.refxyz = (refx, refy, refz)
|
||||
# Geographic reference point
|
||||
lat, lon, alt = map(lambda x: float(x), values[2:5])
|
||||
self.setrefgeo(lat, lon, alt)
|
||||
self.refscale = float(values[5])
|
||||
logger.info("location configured: (%.2f,%.2f,%.2f) = (%.5f,%.5f,%.5f) scale=%.2f" %
|
||||
(self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0],
|
||||
self.refgeo[1], self.refgeo[2], self.refscale))
|
||||
logger.info("location configured: UTM(%.5f,%.5f,%.5f)" %
|
||||
(self.refutm[1], self.refutm[2], self.refutm[3]))
|
||||
|
||||
def px2m(self, val):
|
||||
"""
|
||||
Convert the specified value in pixels to meters using the
|
||||
|
|
|
@ -126,13 +126,17 @@ def make_tuple_fromstr(s, value_type):
|
|||
"""
|
||||
Create a tuple from a string.
|
||||
|
||||
:param str s: string to convert to a tuple
|
||||
:param str|unicode s: string to convert to a tuple
|
||||
:param value_type: type of values to be contained within tuple
|
||||
:return: tuple from string
|
||||
:rtype: tuple
|
||||
"""
|
||||
# remove tuple braces and strip commands and space from all values in the tuple string
|
||||
values = [x.strip("' ") for x in s.strip("(), ").split(",")]
|
||||
values = []
|
||||
for x in s.strip("(), ").split(","):
|
||||
x = x.strip("' ")
|
||||
if x:
|
||||
values.append(x)
|
||||
return tuple(value_type(i) for i in values)
|
||||
|
||||
|
||||
|
@ -144,7 +148,7 @@ def split_args(args):
|
|||
:return: shell-like syntax list
|
||||
:rtype: list
|
||||
"""
|
||||
if type(args) == str:
|
||||
if isinstance(args, basestring):
|
||||
args = shlex.split(args)
|
||||
return args
|
||||
|
||||
|
|
|
@ -9,10 +9,13 @@ import threading
|
|||
import time
|
||||
|
||||
from core import logger
|
||||
from core.conf import Configurable
|
||||
from core.conf import ConfigurableManager
|
||||
from core.conf import ConfigGroup
|
||||
from core.conf import ConfigurableOptions
|
||||
from core.conf import Configuration
|
||||
from core.conf import ModelManager
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.data import EventData, LinkData
|
||||
from core.data import EventData
|
||||
from core.data import LinkData
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import EventTypes
|
||||
from core.enumerations import LinkTypes
|
||||
|
@ -24,7 +27,7 @@ from core.misc import utils
|
|||
from core.misc.ipaddress import IpAddress
|
||||
|
||||
|
||||
class MobilityManager(ConfigurableManager):
|
||||
class MobilityManager(ModelManager):
|
||||
"""
|
||||
Member of session class for handling configuration data for mobility and
|
||||
range models.
|
||||
|
@ -38,15 +41,11 @@ class MobilityManager(ConfigurableManager):
|
|||
|
||||
:param core.session.Session session: session this manager is tied to
|
||||
"""
|
||||
ConfigurableManager.__init__(self)
|
||||
super(MobilityManager, self).__init__()
|
||||
self.session = session
|
||||
# configurations for basic range, indexed by WLAN node number, are
|
||||
# stored in self.configs
|
||||
# mapping from model names to their classes
|
||||
self._modelclsmap = {
|
||||
BasicRangeModel.name: BasicRangeModel,
|
||||
Ns2ScriptedMobility.name: Ns2ScriptedMobility
|
||||
}
|
||||
self.models[BasicRangeModel.name] = BasicRangeModel
|
||||
self.models[Ns2ScriptedMobility.name] = Ns2ScriptedMobility
|
||||
|
||||
# dummy node objects for tracking position of nodes on other servers
|
||||
self.phys = {}
|
||||
self.physnets = {}
|
||||
|
@ -61,31 +60,24 @@ class MobilityManager(ConfigurableManager):
|
|||
:return: nothing
|
||||
"""
|
||||
if node_ids is None:
|
||||
node_ids = self.configs.keys()
|
||||
node_ids = self.nodes()
|
||||
|
||||
for node_id in node_ids:
|
||||
logger.info("checking mobility startup for node: %s", node_id)
|
||||
logger.info("node mobility configurations: %s", self.get_all_configs(node_id))
|
||||
|
||||
try:
|
||||
node = self.session.get_object(node_id)
|
||||
except KeyError:
|
||||
logger.warn("skipping mobility configuration for unknown node %d." % node_id)
|
||||
logger.warn("skipping mobility configuration for unknown node: %s", node_id)
|
||||
continue
|
||||
|
||||
if node_id not in self.configs:
|
||||
logger.warn("missing mobility configuration for node %d." % node_id)
|
||||
continue
|
||||
|
||||
v = self.configs[node_id]
|
||||
|
||||
for model in v:
|
||||
try:
|
||||
logger.info("setting mobility model to node: %s", model)
|
||||
cls = self._modelclsmap[model[0]]
|
||||
node.setmodel(cls, model[1])
|
||||
except KeyError:
|
||||
logger.warn("skipping mobility configuration for unknown model '%s'" % model[0])
|
||||
for model_name in self.models.iterkeys():
|
||||
config = self.get_configs(node_id, model_name)
|
||||
if not config:
|
||||
continue
|
||||
model_class = self.models[model_name]
|
||||
self.set_model(node, model_class, config)
|
||||
|
||||
if self.session.master:
|
||||
self.installphysnodes(node)
|
||||
|
@ -93,33 +85,6 @@ class MobilityManager(ConfigurableManager):
|
|||
if node.mobility:
|
||||
self.session.event_loop.add_event(0.0, node.mobility.startup)
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset all configs.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.clearconfig(nodenum=None)
|
||||
|
||||
def setconfig(self, node_id, config_type, values):
|
||||
"""
|
||||
Normal setconfig() with check for run-time updates for WLANs.
|
||||
|
||||
:param int node_id: node id
|
||||
:param config_type: configuration type
|
||||
:param values: configuration value
|
||||
:return: nothing
|
||||
"""
|
||||
super(MobilityManager, self).setconfig(node_id, config_type, values)
|
||||
if self.session is None:
|
||||
return
|
||||
if self.session.state == EventTypes.RUNTIME_STATE.value:
|
||||
try:
|
||||
node = self.session.get_object(node_id)
|
||||
node.updatemodel(config_type, values)
|
||||
except KeyError:
|
||||
logger.exception("Skipping mobility configuration for unknown node %d.", node_id)
|
||||
|
||||
def handleevent(self, event_data):
|
||||
"""
|
||||
Handle an Event Message used to start, stop, or pause
|
||||
|
@ -142,7 +107,7 @@ class MobilityManager(ConfigurableManager):
|
|||
models = name[9:].split(',')
|
||||
for model in models:
|
||||
try:
|
||||
cls = self._modelclsmap[model]
|
||||
cls = self.models[model]
|
||||
except KeyError:
|
||||
logger.warn("Ignoring event for unknown model '%s'", model)
|
||||
continue
|
||||
|
@ -206,13 +171,13 @@ class MobilityManager(ConfigurableManager):
|
|||
:param list moved_netifs: moved network interfaces
|
||||
:return: nothing
|
||||
"""
|
||||
for nodenum in self.configs:
|
||||
for node_id in self.nodes():
|
||||
try:
|
||||
n = self.session.get_object(nodenum)
|
||||
node = self.session.get_object(node_id)
|
||||
except KeyError:
|
||||
continue
|
||||
if n.model:
|
||||
n.model.update(moved, moved_netifs)
|
||||
if node.model:
|
||||
node.model.update(moved, moved_netifs)
|
||||
|
||||
def addphys(self, netnum, node):
|
||||
"""
|
||||
|
@ -222,14 +187,15 @@ class MobilityManager(ConfigurableManager):
|
|||
:param core.coreobj.PyCoreNode node: node to add physical network to
|
||||
:return: nothing
|
||||
"""
|
||||
nodenum = node.objid
|
||||
self.phys[nodenum] = node
|
||||
node_id = node.objid
|
||||
self.phys[node_id] = node
|
||||
if netnum not in self.physnets:
|
||||
self.physnets[netnum] = [nodenum, ]
|
||||
self.physnets[netnum] = [node_id, ]
|
||||
else:
|
||||
self.physnets[netnum].append(nodenum)
|
||||
self.physnets[netnum].append(node_id)
|
||||
|
||||
# TODO: remove need for handling old style message
|
||||
|
||||
# TODO: remove need for handling old style message
|
||||
def physnodehandlelink(self, message):
|
||||
"""
|
||||
Broker handler. Snoop Link add messages to get
|
||||
|
@ -247,8 +213,7 @@ class MobilityManager(ConfigurableManager):
|
|||
return
|
||||
if nn[1] in self.session.broker.physical_nodes:
|
||||
# record the fact that this PhysicalNode is linked to a net
|
||||
dummy = PyCoreNode(session=self.session, objid=nn[1],
|
||||
name="n%d" % nn[1], start=False)
|
||||
dummy = PyCoreNode(session=self.session, objid=nn[1], name="n%d" % nn[1], start=False)
|
||||
self.addphys(nn[0], dummy)
|
||||
|
||||
# TODO: remove need to handling old style messages
|
||||
|
@ -291,7 +256,7 @@ class MobilityManager(ConfigurableManager):
|
|||
netif.poshook(netif, x, y, z)
|
||||
|
||||
|
||||
class WirelessModel(Configurable):
|
||||
class WirelessModel(ConfigurableOptions):
|
||||
"""
|
||||
Base class used by EMANE models and the basic range model.
|
||||
Used for managing arbitrary configuration parameters.
|
||||
|
@ -300,17 +265,16 @@ class WirelessModel(Configurable):
|
|||
bitmap = None
|
||||
position_callback = None
|
||||
|
||||
def __init__(self, session, object_id, values=None):
|
||||
def __init__(self, session, object_id):
|
||||
"""
|
||||
Create a WirelessModel instance.
|
||||
|
||||
:param core.session.Session session: core session we are tied to
|
||||
:param int object_id: object id
|
||||
:param values: values
|
||||
:param dict config: values
|
||||
"""
|
||||
Configurable.__init__(self, session, object_id)
|
||||
# 'values' can be retrieved from a ConfigurableManager, or used here
|
||||
# during initialization, depending on the model.
|
||||
self.session = session
|
||||
self.object_id = object_id
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
|
@ -333,16 +297,15 @@ class WirelessModel(Configurable):
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def updateconfig(self, values):
|
||||
def update_config(self, config):
|
||||
"""
|
||||
For run-time updates of model config. Returns True when position callback and set link
|
||||
parameters should be invoked.
|
||||
|
||||
:param values: value to update
|
||||
:return: False
|
||||
:rtype: bool
|
||||
:param dict config: configuration values to update
|
||||
:return: nothing
|
||||
"""
|
||||
return False
|
||||
pass
|
||||
|
||||
|
||||
class BasicRangeModel(WirelessModel):
|
||||
|
@ -352,82 +315,63 @@ class BasicRangeModel(WirelessModel):
|
|||
the GUI.
|
||||
"""
|
||||
name = "basic_range"
|
||||
|
||||
# configuration parameters are
|
||||
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
config_matrix = [
|
||||
("range", ConfigDataTypes.UINT32.value, '275',
|
||||
'', 'wireless range (pixels)'),
|
||||
("bandwidth", ConfigDataTypes.UINT32.value, '54000',
|
||||
'', 'bandwidth (bps)'),
|
||||
("jitter", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'transmission jitter (usec)'),
|
||||
("delay", ConfigDataTypes.FLOAT.value, '5000.0',
|
||||
'', 'transmission delay (usec)'),
|
||||
("error", ConfigDataTypes.FLOAT.value, '0.0',
|
||||
'', 'error rate (%)'),
|
||||
options = [
|
||||
Configuration(_id="range", _type=ConfigDataTypes.UINT32, default="275", label="wireless range (pixels)"),
|
||||
Configuration(_id="bandwidth", _type=ConfigDataTypes.UINT32, default="54000", label="bandwidth (bps)"),
|
||||
Configuration(_id="jitter", _type=ConfigDataTypes.FLOAT, default="0.0", label="transmission jitter (usec)"),
|
||||
Configuration(_id="delay", _type=ConfigDataTypes.FLOAT, default="5000.0",
|
||||
label="transmission delay (usec)"),
|
||||
Configuration(_id="error", _type=ConfigDataTypes.FLOAT, default="0.0", label="error rate (%)")
|
||||
]
|
||||
|
||||
# value groupings
|
||||
config_groups = "Basic Range Parameters:1-%d" % len(config_matrix)
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
return [
|
||||
ConfigGroup("Basic Range Parameters", 1, len(cls.configurations()))
|
||||
]
|
||||
|
||||
def __init__(self, session, object_id, values=None):
|
||||
def __init__(self, session, object_id):
|
||||
"""
|
||||
Create a BasicRangeModel instance.
|
||||
|
||||
:param core.session.Session session: related core session
|
||||
:param int object_id: object id
|
||||
:param values: values
|
||||
:param dict config: values
|
||||
"""
|
||||
super(BasicRangeModel, self).__init__(session=session, object_id=object_id)
|
||||
self.session = session
|
||||
self.wlan = session.get_object(object_id)
|
||||
self._netifs = {}
|
||||
self._netifslock = threading.Lock()
|
||||
if values is None:
|
||||
values = session.mobility.getconfig(object_id, self.name, self.getdefaultvalues())[1]
|
||||
self.range = float(self.valueof("range", values))
|
||||
logger.info("Basic range model configured for WLAN %d using range %d", object_id, self.range)
|
||||
self.valuestolinkparams(values)
|
||||
|
||||
# link parameters
|
||||
self.range = None
|
||||
self.bw = None
|
||||
self.delay = None
|
||||
self.loss = None
|
||||
self.jitter = None
|
||||
|
||||
def valuestolinkparams(self, values):
|
||||
def values_from_config(self, config):
|
||||
"""
|
||||
Values to convert to link parameters.
|
||||
|
||||
:param values: values to convert
|
||||
:param dict config: values to convert
|
||||
:return: nothing
|
||||
"""
|
||||
self.bw = int(self.valueof("bandwidth", values))
|
||||
self.range = float(config["range"])
|
||||
logger.info("basic range model configured for WLAN %d using range %d", self.wlan.objid, self.range)
|
||||
self.bw = int(config["bandwidth"])
|
||||
if self.bw == 0.0:
|
||||
self.bw = None
|
||||
self.delay = float(self.valueof("delay", values))
|
||||
self.delay = float(config["delay"])
|
||||
if self.delay == 0.0:
|
||||
self.delay = None
|
||||
self.loss = float(self.valueof("error", values))
|
||||
self.loss = float(config["error"])
|
||||
if self.loss == 0.0:
|
||||
self.loss = None
|
||||
self.jitter = float(self.valueof("jitter", values))
|
||||
self.jitter = float(config["jitter"])
|
||||
if self.jitter == 0.0:
|
||||
self.jitter = None
|
||||
|
||||
@classmethod
|
||||
def configure_mob(cls, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for setting up a model.
|
||||
Pass the MobilityManager object as the manager object.
|
||||
|
||||
:param core.session.Session session: current session calling function
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: configuration data
|
||||
:rtype: core.data.ConfigData
|
||||
"""
|
||||
return cls.configure(session.mobility, config_data)
|
||||
|
||||
def setlinkparams(self):
|
||||
"""
|
||||
Apply link parameters to all interfaces. This is invoked from
|
||||
|
@ -435,8 +379,7 @@ class BasicRangeModel(WirelessModel):
|
|||
"""
|
||||
with self._netifslock:
|
||||
for netif in self._netifs:
|
||||
self.wlan.linkconfig(netif, bw=self.bw, delay=self.delay,
|
||||
loss=self.loss, duplicate=None,
|
||||
self.wlan.linkconfig(netif, bw=self.bw, delay=self.delay, loss=self.loss, duplicate=None,
|
||||
jitter=self.jitter)
|
||||
|
||||
def get_position(self, netif):
|
||||
|
@ -461,7 +404,6 @@ class BasicRangeModel(WirelessModel):
|
|||
:param z: z position
|
||||
:return: nothing
|
||||
"""
|
||||
# print "set_position(%s, x=%s, y=%s, z=%s)" % (netif.localname, x, y, z)
|
||||
self._netifslock.acquire()
|
||||
self._netifs[netif] = (x, y, z)
|
||||
if x is None or y is None:
|
||||
|
@ -487,7 +429,7 @@ class BasicRangeModel(WirelessModel):
|
|||
with self._netifslock:
|
||||
while len(moved_netifs):
|
||||
netif = moved_netifs.pop()
|
||||
(nx, ny, nz) = netif.node.getposition()
|
||||
nx, ny, nz = netif.node.getposition()
|
||||
if netif in self._netifs:
|
||||
self._netifs[netif] = (nx, ny, nz)
|
||||
for netif2 in self._netifs:
|
||||
|
@ -529,12 +471,12 @@ class BasicRangeModel(WirelessModel):
|
|||
a.name, b.name, linked, d, self.range)
|
||||
if d > self.range:
|
||||
if linked:
|
||||
logger.info("was linked, unlinking")
|
||||
logger.debug("was linked, unlinking")
|
||||
self.wlan.unlink(a, b)
|
||||
self.sendlinkmsg(a, b, unlink=True)
|
||||
else:
|
||||
if not linked:
|
||||
logger.info("was not linked, linking")
|
||||
logger.debug("was not linked, linking")
|
||||
self.wlan.link(a, b)
|
||||
self.sendlinkmsg(a, b)
|
||||
except KeyError:
|
||||
|
@ -557,18 +499,15 @@ class BasicRangeModel(WirelessModel):
|
|||
c = p1[2] - p2[2]
|
||||
return math.hypot(math.hypot(a, b), c)
|
||||
|
||||
def updateconfig(self, values):
|
||||
def update_config(self, config):
|
||||
"""
|
||||
Configuration has changed during runtime.
|
||||
MobilityManager.setconfig() -> WlanNode.updatemodel() ->
|
||||
WirelessModel.updateconfig()
|
||||
|
||||
:param values: values to update configuration
|
||||
:return: was update successful
|
||||
:rtype: bool
|
||||
:param dict config: values to update configuration
|
||||
:return: nothing
|
||||
"""
|
||||
self.valuestolinkparams(values)
|
||||
self.range = float(self.valueof("range", values))
|
||||
self.values_from_config(config)
|
||||
self.setlinkparams()
|
||||
return True
|
||||
|
||||
def create_link_data(self, interface1, interface2, message_type):
|
||||
|
@ -581,7 +520,6 @@ class BasicRangeModel(WirelessModel):
|
|||
:return: link data
|
||||
:rtype: LinkData
|
||||
"""
|
||||
|
||||
return LinkData(
|
||||
message_type=message_type,
|
||||
node1_id=interface1.node.objid,
|
||||
|
@ -668,16 +606,16 @@ class WayPointMobility(WirelessModel):
|
|||
STATE_RUNNING = 1
|
||||
STATE_PAUSED = 2
|
||||
|
||||
def __init__(self, session, object_id, values=None):
|
||||
def __init__(self, session, object_id):
|
||||
"""
|
||||
Create a WayPointMobility instance.
|
||||
|
||||
:param core.session.Session session: CORE session instance
|
||||
:param int object_id: object id
|
||||
:param values: values for this model
|
||||
:return:
|
||||
"""
|
||||
super(WayPointMobility, self).__init__(session=session, object_id=object_id, values=values)
|
||||
super(WayPointMobility, self).__init__(session=session, object_id=object_id)
|
||||
|
||||
self.state = self.STATE_STOPPED
|
||||
self.queue = []
|
||||
self.queue_copy = []
|
||||
|
@ -705,7 +643,6 @@ class WayPointMobility(WirelessModel):
|
|||
self.lasttime = time.time()
|
||||
now = self.lasttime - self.timezero
|
||||
dt = self.lasttime - t
|
||||
# print "runround(now=%.2f, dt=%.2f)" % (now, dt)
|
||||
|
||||
# keep current waypoints up-to-date
|
||||
self.updatepoints(now)
|
||||
|
@ -741,7 +678,6 @@ class WayPointMobility(WirelessModel):
|
|||
moved_netifs.append(netif)
|
||||
|
||||
# calculate all ranges after moving nodes; this saves calculations
|
||||
# self.wlan.model.update(moved)
|
||||
self.session.mobility.updatewlans(moved, moved_netifs)
|
||||
|
||||
# TODO: check session state
|
||||
|
@ -806,7 +742,6 @@ class WayPointMobility(WirelessModel):
|
|||
self.endtime = self.lasttime - self.timezero
|
||||
del self.points[node.objid]
|
||||
return False
|
||||
# print "node %s dx,dy= <%s, %d>" % (node.name, dx, dy)
|
||||
if (x1 + dx) < 0.0:
|
||||
dx = 0.0 - x1
|
||||
if (y1 + dy) < 0.0:
|
||||
|
@ -826,11 +761,10 @@ class WayPointMobility(WirelessModel):
|
|||
node = netif.node
|
||||
if node.objid not in self.initial:
|
||||
continue
|
||||
(x, y, z) = self.initial[node.objid].coords
|
||||
x, y, z = self.initial[node.objid].coords
|
||||
self.setnodeposition(node, x, y, z)
|
||||
moved.append(node)
|
||||
moved_netifs.append(netif)
|
||||
# self.wlan.model.update(moved)
|
||||
self.session.mobility.updatewlans(moved, moved_netifs)
|
||||
|
||||
def addwaypoint(self, time, nodenum, x, y, z, speed):
|
||||
|
@ -845,7 +779,6 @@ class WayPointMobility(WirelessModel):
|
|||
:param speed: speed
|
||||
:return: nothing
|
||||
"""
|
||||
# print "addwaypoint: %s %s %s,%s,%s %s" % (time, nodenum, x, y, z, speed)
|
||||
wp = WayPoint(time, nodenum, coords=(x, y, z), speed=speed)
|
||||
heapq.heappush(self.queue, wp)
|
||||
|
||||
|
@ -905,7 +838,6 @@ class WayPointMobility(WirelessModel):
|
|||
:return: nothing
|
||||
"""
|
||||
# this would cause PyCoreNetIf.poshook() callback (range calculation)
|
||||
# node.setposition(x, y, z)
|
||||
node.position.set(x, y, z)
|
||||
node_data = node.data(message_type=0)
|
||||
self.session.broadcast_node(node_data)
|
||||
|
@ -975,64 +907,58 @@ class Ns2ScriptedMobility(WayPointMobility):
|
|||
BonnMotion.
|
||||
"""
|
||||
name = "ns2script"
|
||||
|
||||
config_matrix = [
|
||||
("file", ConfigDataTypes.STRING.value, '',
|
||||
'', 'mobility script file'),
|
||||
("refresh_ms", ConfigDataTypes.UINT32.value, '50',
|
||||
'', 'refresh time (ms)'),
|
||||
("loop", ConfigDataTypes.BOOL.value, '1',
|
||||
'On,Off', 'loop'),
|
||||
("autostart", ConfigDataTypes.STRING.value, '',
|
||||
'', 'auto-start seconds (0.0 for runtime)'),
|
||||
("map", ConfigDataTypes.STRING.value, '',
|
||||
'', 'node mapping (optional, e.g. 0:1,1:2,2:3)'),
|
||||
("script_start", ConfigDataTypes.STRING.value, '',
|
||||
'', 'script file to run upon start'),
|
||||
("script_pause", ConfigDataTypes.STRING.value, '',
|
||||
'', 'script file to run upon pause'),
|
||||
("script_stop", ConfigDataTypes.STRING.value, '',
|
||||
'', 'script file to run upon stop'),
|
||||
options = [
|
||||
Configuration(_id="file", _type=ConfigDataTypes.STRING, label="mobility script file"),
|
||||
Configuration(_id="refresh_ms", _type=ConfigDataTypes.UINT32, default="50", label="mobility script file"),
|
||||
Configuration(_id="loop", _type=ConfigDataTypes.BOOL, default="1", options=["On", "Off"], label="loop"),
|
||||
Configuration(_id="autostart", _type=ConfigDataTypes.STRING, label="auto-start seconds (0.0 for runtime)"),
|
||||
Configuration(_id="map", _type=ConfigDataTypes.STRING, label="node mapping (optional, e.g. 0:1,1:2,2:3)"),
|
||||
Configuration(_id="script_start", _type=ConfigDataTypes.STRING, label="script file to run upon start"),
|
||||
Configuration(_id="script_pause", _type=ConfigDataTypes.STRING, label="script file to run upon pause"),
|
||||
Configuration(_id="script_stop", _type=ConfigDataTypes.STRING, label="script file to run upon stop")
|
||||
]
|
||||
config_groups = "ns-2 Mobility Script Parameters:1-%d" % len(config_matrix)
|
||||
|
||||
def __init__(self, session, object_id, values=None):
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
return [
|
||||
ConfigGroup("ns-2 Mobility Script Parameters", 1, len(cls.configurations()))
|
||||
]
|
||||
|
||||
def __init__(self, session, object_id):
|
||||
"""
|
||||
Creates a Ns2ScriptedMobility instance.
|
||||
|
||||
:param core.session.Session session: CORE session instance
|
||||
:param int object_id: object id
|
||||
:param values: values
|
||||
:param config: values
|
||||
"""
|
||||
super(Ns2ScriptedMobility, self).__init__(session=session, object_id=object_id, values=values)
|
||||
super(Ns2ScriptedMobility, self).__init__(session=session, object_id=object_id)
|
||||
self._netifs = {}
|
||||
self._netifslock = threading.Lock()
|
||||
if values is None:
|
||||
values = session.mobility.getconfig(object_id, self.name, self.getdefaultvalues())[1]
|
||||
self.file = self.valueof("file", values)
|
||||
self.refresh_ms = int(self.valueof("refresh_ms", values))
|
||||
self.loop = self.valueof("loop", values).lower() == "on"
|
||||
self.autostart = self.valueof("autostart", values)
|
||||
self.parsemap(self.valueof("map", values))
|
||||
self.script_start = self.valueof("script_start", values)
|
||||
self.script_pause = self.valueof("script_pause", values)
|
||||
self.script_stop = self.valueof("script_stop", values)
|
||||
logger.info("ns-2 scripted mobility configured for WLAN %d using file: %s", object_id, self.file)
|
||||
|
||||
self.file = None
|
||||
self.refresh_ms = None
|
||||
self.loop = None
|
||||
self.autostart = None
|
||||
self.nodemap = {}
|
||||
self.script_start = None
|
||||
self.script_pause = None
|
||||
self.script_stop = None
|
||||
|
||||
def update_config(self, config):
|
||||
self.file = config["file"]
|
||||
logger.info("ns-2 scripted mobility configured for WLAN %d using file: %s", self.object_id, self.file)
|
||||
self.refresh_ms = int(config["refresh_ms"])
|
||||
self.loop = config["loop"].lower() == "on"
|
||||
self.autostart = config["autostart"]
|
||||
self.parsemap(config["map"])
|
||||
self.script_start = config["script_start"]
|
||||
self.script_pause = config["script_pause"]
|
||||
self.script_stop = config["script_stop"]
|
||||
self.readscriptfile()
|
||||
self.copywaypoints()
|
||||
self.setendtime()
|
||||
|
||||
@classmethod
|
||||
def configure_mob(cls, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for setting up a model.
|
||||
Pass the MobilityManager object as the manager object.
|
||||
|
||||
:param core.session.Session session: current session calling function
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
return cls.configure(session.mobility, config_data)
|
||||
|
||||
def readscriptfile(self):
|
||||
"""
|
||||
Read in mobility script from a file. This adds waypoints to a
|
||||
|
@ -1043,9 +969,9 @@ class Ns2ScriptedMobility(WayPointMobility):
|
|||
"""
|
||||
filename = self.findfile(self.file)
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
f = open(filename, "r")
|
||||
except IOError:
|
||||
logger.exception("ns-2 scripted mobility failed to load file '%s'", self.file)
|
||||
logger.exception("ns-2 scripted mobility failed to load file: %s", self.file)
|
||||
return
|
||||
logger.info("reading ns-2 script file: %s" % filename)
|
||||
ln = 0
|
||||
|
|
|
@ -382,12 +382,13 @@ class WlanNode(LxBrNet):
|
|||
Sets the mobility and wireless model.
|
||||
|
||||
:param core.mobility.WirelessModel.cls model: wireless model to set to
|
||||
:param config: model configuration
|
||||
:param dict config: configuration for model being set
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("adding model: %s", model.name)
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
self.model = model(session=self.session, object_id=self.objid, values=config)
|
||||
self.model = model(session=self.session, object_id=self.objid)
|
||||
self.model.update_config(config)
|
||||
if self.model.position_callback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model.position_callback
|
||||
|
@ -396,33 +397,26 @@ class WlanNode(LxBrNet):
|
|||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
self.mobility = model(session=self.session, object_id=self.objid)
|
||||
self.mobility.update_config(config)
|
||||
|
||||
def updatemodel(self, model_name, values):
|
||||
"""
|
||||
Allow for model updates during runtime (similar to setmodel().)
|
||||
def update_mobility(self, config):
|
||||
if not self.mobility:
|
||||
raise ValueError("no mobility set to update for node(%s)", self.objid)
|
||||
self.mobility.set_configs(config, node_id=self.objid)
|
||||
|
||||
:param model_name: model name to update
|
||||
:param values: values to update model with
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("updating model %s" % model_name)
|
||||
if self.model is None or self.model.name != model_name:
|
||||
return
|
||||
|
||||
model = self.model
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
if not model.updateconfig(values):
|
||||
return
|
||||
|
||||
if self.model.position_callback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model.position_callback
|
||||
if netif.node is not None:
|
||||
(x, y, z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
self.model.setlinkparams()
|
||||
def updatemodel(self, config):
|
||||
if not self.model:
|
||||
raise ValueError("no model set to update for node(%s)", self.objid)
|
||||
logger.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config)
|
||||
self.model.set_configs(config, node_id=self.objid)
|
||||
if self.model.position_callback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model.position_callback
|
||||
if netif.node is not None:
|
||||
x, y, z = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.updateconfig()
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
|
|
|
@ -185,55 +185,55 @@ class OvsNet(PyCoreNet):
|
|||
|
||||
ebtables_queue.ebchange(self)
|
||||
|
||||
def linkconfig(self, interface, bw=None, delay=None, loss=None, duplicate=None,
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None,
|
||||
jitter=None, netif2=None, devname=None):
|
||||
"""
|
||||
Configure link parameters by applying tc queuing disciplines on the
|
||||
interface.
|
||||
"""
|
||||
if not devname:
|
||||
devname = interface.localname
|
||||
devname = netif.localname
|
||||
|
||||
tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname]
|
||||
parent = ["root"]
|
||||
|
||||
# attempt to set bandwidth and update as needed if value changed
|
||||
bandwidth_changed = interface.setparam("bw", bw)
|
||||
bandwidth_changed = netif.setparam("bw", bw)
|
||||
if bandwidth_changed:
|
||||
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
burst = max(2 * interface.mtu, bw / 1000)
|
||||
burst = max(2 * netif.mtu, bw / 1000)
|
||||
limit = 0xffff # max IP payload
|
||||
tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)]
|
||||
logger.info("linkconfig: %s" % [tc + parent + ["handle", "1:"] + tbf])
|
||||
utils.check_cmd(tc + parent + ["handle", "1:"] + tbf)
|
||||
interface.setparam("has_tbf", True)
|
||||
elif interface.getparam("has_tbf") and bw <= 0:
|
||||
netif.setparam("has_tbf", True)
|
||||
elif netif.getparam("has_tbf") and bw <= 0:
|
||||
tcd = [] + tc
|
||||
tcd[2] = "delete"
|
||||
|
||||
if self.up:
|
||||
utils.check_cmd(tcd + parent)
|
||||
|
||||
interface.setparam("has_tbf", False)
|
||||
netif.setparam("has_tbf", False)
|
||||
# removing the parent removes the child
|
||||
interface.setparam("has_netem", False)
|
||||
netif.setparam("has_netem", False)
|
||||
|
||||
if interface.getparam("has_tbf"):
|
||||
if netif.getparam("has_tbf"):
|
||||
parent = ["parent", "1:1"]
|
||||
|
||||
netem = ["netem"]
|
||||
delay_changed = interface.setparam("delay", delay)
|
||||
delay_changed = netif.setparam("delay", delay)
|
||||
|
||||
if loss is not None:
|
||||
loss = float(loss)
|
||||
loss_changed = interface.setparam("loss", loss)
|
||||
loss_changed = netif.setparam("loss", loss)
|
||||
|
||||
if duplicate is not None:
|
||||
duplicate = float(duplicate)
|
||||
duplicate_changed = interface.setparam("duplicate", duplicate)
|
||||
jitter_changed = interface.setparam("jitter", jitter)
|
||||
duplicate_changed = netif.setparam("duplicate", duplicate)
|
||||
jitter_changed = netif.setparam("jitter", jitter)
|
||||
|
||||
# if nothing changed return
|
||||
if not any([bandwidth_changed, delay_changed, loss_changed, duplicate_changed, jitter_changed]):
|
||||
|
@ -256,7 +256,7 @@ class OvsNet(PyCoreNet):
|
|||
|
||||
if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0:
|
||||
# possibly remove netem if it exists and parent queue wasn"t removed
|
||||
if not interface.getparam("has_netem"):
|
||||
if not netif.getparam("has_netem"):
|
||||
return
|
||||
|
||||
tc[2] = "delete"
|
||||
|
@ -264,12 +264,12 @@ class OvsNet(PyCoreNet):
|
|||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
|
||||
utils.check_cmd(tc + parent + ["handle", "10:"])
|
||||
interface.setparam("has_netem", False)
|
||||
netif.setparam("has_netem", False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
|
||||
utils.check_cmd(tc + parent + ["handle", "10:"] + netem)
|
||||
interface.setparam("has_netem", True)
|
||||
netif.setparam("has_netem", True)
|
||||
|
||||
def linknet(self, network):
|
||||
"""
|
||||
|
@ -305,7 +305,7 @@ class OvsNet(PyCoreNet):
|
|||
utils.check_cmd([constants.OVS_BIN, "add-port", network.bridge_name, interface.name])
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", interface.name, "up"])
|
||||
|
||||
# TODO: is there a native method for this? see if this causes issues
|
||||
# TODO: is there a native method for this? see if this causes issues
|
||||
# i = network.newifindex()
|
||||
# network._netif[i] = interface
|
||||
# with network._linked_lock:
|
||||
|
@ -351,12 +351,12 @@ class OvsCtrlNet(OvsNet):
|
|||
|
||||
def __init__(self, session, objid="ctrlnet", name=None, prefix=None, hostid=None,
|
||||
start=True, assign_address=True, updown_script=None, serverintf=None):
|
||||
OvsNet.__init__(self, session, objid=objid, name=name, start=start)
|
||||
self.prefix = ipaddress.Ipv4Prefix(prefix)
|
||||
self.hostid = hostid
|
||||
self.assign_address = assign_address
|
||||
self.updown_script = updown_script
|
||||
self.serverintf = serverintf
|
||||
OvsNet.__init__(self, session, objid=objid, name=name, start=start)
|
||||
|
||||
def startup(self):
|
||||
if self.detectoldbridge():
|
||||
|
@ -593,14 +593,14 @@ class OvsWlanNode(OvsNet):
|
|||
interface.setposition(x, y, z)
|
||||
# self.model.setlinkparams()
|
||||
|
||||
def setmodel(self, model, config):
|
||||
def setmodel(self, model, config=None):
|
||||
"""
|
||||
Mobility and wireless model.
|
||||
"""
|
||||
logger.info("adding model %s", model.name)
|
||||
|
||||
if model.type == RegisterTlvs.WIRELESS.value:
|
||||
self.model = model(session=self.session, object_id=self.objid, values=config)
|
||||
self.model = model(session=self.session, object_id=self.objid, config=config)
|
||||
if self.model.position_callback:
|
||||
for interface in self.netifs():
|
||||
interface.poshook = self.model.position_callback
|
||||
|
@ -609,31 +609,20 @@ class OvsWlanNode(OvsNet):
|
|||
interface.poshook(interface, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model.type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
self.mobility = model(session=self.session, object_id=self.objid, config=config)
|
||||
|
||||
def updatemodel(self, model_name, values):
|
||||
"""
|
||||
Allow for model updates during runtime (similar to setmodel().)
|
||||
"""
|
||||
logger.info("updating model %s", model_name)
|
||||
if self.model is None or self.model.name != model_name:
|
||||
logger.info(
|
||||
"failure to update model, model doesn't exist or invalid name: model(%s) - name(%s)",
|
||||
self.model, model_name
|
||||
)
|
||||
return
|
||||
|
||||
model = self.model
|
||||
if model.type == RegisterTlvs.WIRELESS.value:
|
||||
if not model.updateconfig(values):
|
||||
return
|
||||
if self.model.position_callback:
|
||||
for interface in self.netifs():
|
||||
interface.poshook = self.model.position_callback
|
||||
if interface.node is not None:
|
||||
x, y, z = interface.node.position.get()
|
||||
interface.poshook(interface, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
def updatemodel(self, config):
|
||||
if not self.model:
|
||||
raise ValueError("no model set to update for node(%s)", self.objid)
|
||||
logger.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config)
|
||||
self.model.set_configs(config, node_id=self.objid)
|
||||
if self.model.position_callback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model.position_callback
|
||||
if netif.node is not None:
|
||||
x, y, z = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.updateconfig()
|
||||
|
||||
def all_link_data(self, flags):
|
||||
all_links = OvsNet.all_link_data(self, flags)
|
||||
|
|
|
@ -169,6 +169,7 @@ class TunTap(PyCoreNetIf):
|
|||
:return: wait for device local response
|
||||
:rtype: int
|
||||
"""
|
||||
logger.debug("waiting for device local: %s", self.localname)
|
||||
|
||||
def localdevexists():
|
||||
args = [constants.IP_BIN, "link", "show", self.localname]
|
||||
|
@ -182,6 +183,7 @@ class TunTap(PyCoreNetIf):
|
|||
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("waiting for device node: %s", self.name)
|
||||
|
||||
def nodedevexists():
|
||||
args = [constants.IP_BIN, "link", "show", self.name]
|
||||
|
@ -272,7 +274,7 @@ class GreTap(PyCoreNetIf):
|
|||
|
||||
if remoteip is None:
|
||||
raise ValueError, "missing remote IP required for GRE TAP device"
|
||||
args = ["ip", "link", "add", self.localname, "type", "gretap",
|
||||
args = [constants.IP_BIN, "link", "add", self.localname, "type", "gretap",
|
||||
"remote", str(remoteip)]
|
||||
if localip:
|
||||
args += ["local", str(localip)]
|
||||
|
@ -281,7 +283,7 @@ class GreTap(PyCoreNetIf):
|
|||
if key:
|
||||
args += ["key", str(key)]
|
||||
utils.check_cmd(args)
|
||||
args = ["ip", "link", "set", self.localname, "up"]
|
||||
args = [constants.IP_BIN, "link", "set", self.localname, "up"]
|
||||
utils.check_cmd(args)
|
||||
self.up = True
|
||||
|
||||
|
@ -293,9 +295,9 @@ class GreTap(PyCoreNetIf):
|
|||
"""
|
||||
if self.localname:
|
||||
try:
|
||||
args = ["ip", "link", "set", self.localname, "down"]
|
||||
args = [constants.IP_BIN, "link", "set", self.localname, "down"]
|
||||
utils.check_cmd(args)
|
||||
args = ["ip", "link", "del", self.localname]
|
||||
args = [constants.IP_BIN, "link", "del", self.localname]
|
||||
utils.check_cmd(args)
|
||||
except CoreCommandError:
|
||||
logger.exception("error during shutdown")
|
||||
|
|
|
@ -440,7 +440,7 @@ class LxBrNet(PyCoreNet):
|
|||
"burst", str(burst), "limit", str(limit)]
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],))
|
||||
logger.debug("linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],))
|
||||
utils.check_cmd(tc + parent + ["handle", "1:"] + tbf)
|
||||
netif.setparam("has_tbf", True)
|
||||
changed = True
|
||||
|
@ -485,12 +485,12 @@ class LxBrNet(PyCoreNet):
|
|||
return
|
||||
tc[2] = "delete"
|
||||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
|
||||
logger.debug("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
|
||||
utils.check_cmd(tc + parent + ["handle", "10:"])
|
||||
netif.setparam("has_netem", False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
|
||||
logger.debug("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
|
||||
utils.check_cmd(tc + parent + ["handle", "10:"] + netem)
|
||||
netif.setparam("has_netem", True)
|
||||
|
||||
|
|
|
@ -156,14 +156,6 @@ class SimpleLxcNode(PyCoreNode):
|
|||
self.client.close()
|
||||
self.up = False
|
||||
|
||||
def boot(self):
|
||||
"""
|
||||
Boot logic.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
@ -222,7 +214,6 @@ class SimpleLxcNode(PyCoreNode):
|
|||
raise CoreCommandError(status, cmd, output)
|
||||
self._mounts.append((source, target))
|
||||
|
||||
|
||||
def newifindex(self):
|
||||
"""
|
||||
Retrieve a new interface index.
|
||||
|
@ -511,22 +502,6 @@ class LxcNode(SimpleLxcNode):
|
|||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
"""
|
||||
Boot the node.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
"""
|
||||
Validate the node.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Startup logic for the node.
|
||||
|
|
|
@ -25,12 +25,6 @@ class PhysicalNode(PyCoreNode):
|
|||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
with self.lock:
|
||||
self.makenodedir()
|
||||
|
|
|
@ -118,12 +118,7 @@ class Sdt(object):
|
|||
:return: True if enabled, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
if not hasattr(self.session.options, "enablesdt"):
|
||||
return False
|
||||
enabled = self.session.options.enablesdt
|
||||
if enabled in ("1", "true", 1, True):
|
||||
return True
|
||||
return False
|
||||
return self.session.options.get_config("enablesdt") == "1"
|
||||
|
||||
def seturl(self):
|
||||
"""
|
||||
|
@ -132,11 +127,8 @@ class Sdt(object):
|
|||
|
||||
:return: nothing
|
||||
"""
|
||||
url = None
|
||||
if hasattr(self.session.options, "sdturl"):
|
||||
if self.session.options.sdturl != "":
|
||||
url = self.session.options.sdturl
|
||||
if url is None or url == "":
|
||||
url = self.session.options.get_config("stdurl")
|
||||
if not url:
|
||||
url = self.DEFAULT_SDT_URL
|
||||
self.url = urlparse(url)
|
||||
self.address = (self.url.hostname, self.url.port)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -15,6 +15,7 @@ def load():
|
|||
"""
|
||||
Loads all services from the modules that reside under core.services.
|
||||
|
||||
:return: nothing
|
||||
:return: list of services that failed to load
|
||||
:rtype: list[str]
|
||||
"""
|
||||
ServiceManager.add_services(_PATH)
|
||||
return ServiceManager.add_services(_PATH)
|
||||
|
|
|
@ -9,23 +9,22 @@ class Bird(CoreService):
|
|||
"""
|
||||
Bird router support
|
||||
"""
|
||||
_name = "bird"
|
||||
_group = "BIRD"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/bird",)
|
||||
_configs = ("/etc/bird/bird.conf",)
|
||||
_startindex = 35
|
||||
_startup = ("bird -c %s" % (_configs[0]),)
|
||||
_shutdown = ("killall bird",)
|
||||
_validate = ("pidof bird",)
|
||||
name = "bird"
|
||||
executables = ("bird",)
|
||||
group = "BIRD"
|
||||
dirs = ("/etc/bird",)
|
||||
configs = ("/etc/bird/bird.conf",)
|
||||
startup = ("bird -c %s" % (configs[0]),)
|
||||
shutdown = ("killall bird",)
|
||||
validate = ("pidof bird",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the bird.conf file contents.
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateBirdConf(node, services)
|
||||
if filename == cls.configs[0]:
|
||||
return cls.generateBirdConf(node)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
|
@ -35,7 +34,7 @@ class Bird(CoreService):
|
|||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
|
@ -44,7 +43,7 @@ class Bird(CoreService):
|
|||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateBirdConf(cls, node, services):
|
||||
def generateBirdConf(cls, node):
|
||||
"""
|
||||
Returns configuration file text. Other services that depend on bird
|
||||
will have generatebirdifcconfig() and generatebirdconfig()
|
||||
|
@ -73,11 +72,11 @@ protocol device {
|
|||
scan time 10; # Scan interfaces every 10 seconds
|
||||
}
|
||||
|
||||
""" % (cls._name, cls.routerid(node))
|
||||
""" % (cls.name, cls.routerid(node))
|
||||
|
||||
# Generate protocol specific configurations
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
for s in node.services:
|
||||
if cls.name not in s.dependencies:
|
||||
continue
|
||||
cfg += s.generatebirdconfig(node)
|
||||
|
||||
|
@ -90,15 +89,15 @@ class BirdService(CoreService):
|
|||
common to Bird's routing daemons.
|
||||
"""
|
||||
|
||||
_name = None
|
||||
_group = "BIRD"
|
||||
_depends = ("bird",)
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the bird service."
|
||||
name = None
|
||||
executables = ("bird",)
|
||||
group = "BIRD"
|
||||
dependencies = ("bird",)
|
||||
dirs = ()
|
||||
configs = ()
|
||||
startup = ()
|
||||
shutdown = ()
|
||||
meta = "The config file for this service can be found in the bird service."
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
|
@ -106,14 +105,15 @@ class BirdService(CoreService):
|
|||
|
||||
@classmethod
|
||||
def generatebirdifcconfig(cls, node):
|
||||
''' Use only bare interfaces descriptions in generated protocol
|
||||
"""
|
||||
Use only bare interfaces descriptions in generated protocol
|
||||
configurations. This has the slight advantage of being the same
|
||||
everywhere.
|
||||
'''
|
||||
"""
|
||||
cfg = ""
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += ' interface "%s";\n' % ifc.name
|
||||
|
||||
|
@ -125,8 +125,8 @@ class BirdBgp(BirdService):
|
|||
BGP BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_BGP"
|
||||
_custom_needed = True
|
||||
name = "BIRD_BGP"
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
|
@ -156,7 +156,7 @@ class BirdOspf(BirdService):
|
|||
OSPF BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_OSPFv2"
|
||||
name = "BIRD_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
|
@ -181,7 +181,7 @@ class BirdRadv(BirdService):
|
|||
RADV BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_RADV"
|
||||
name = "BIRD_RADV"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
|
@ -209,7 +209,7 @@ class BirdRip(BirdService):
|
|||
RIP BIRD Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_RIP"
|
||||
name = "BIRD_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
|
@ -231,8 +231,8 @@ class BirdStatic(BirdService):
|
|||
Static Bird Service (configuration generation)
|
||||
"""
|
||||
|
||||
_name = "BIRD_static"
|
||||
_custom_needed = True
|
||||
name = "BIRD_static"
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
|
|
|
@ -104,7 +104,7 @@ from core.service import ServiceManager
|
|||
try:
|
||||
from docker import Client
|
||||
except ImportError:
|
||||
logger.error("failure to import docker")
|
||||
logger.warn("missing python docker bindings")
|
||||
|
||||
|
||||
class DockerService(CoreService):
|
||||
|
@ -112,19 +112,18 @@ class DockerService(CoreService):
|
|||
This is a service which will allow running docker containers in a CORE
|
||||
node.
|
||||
"""
|
||||
_name = "Docker"
|
||||
_group = "Docker"
|
||||
_depends = ()
|
||||
_dirs = ('/var/lib/docker/containers/', '/run/shm', '/run/resolvconf',)
|
||||
_configs = ('docker.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh docker.sh',)
|
||||
_shutdown = ('service docker stop',)
|
||||
name = "Docker"
|
||||
executables = ("docker",)
|
||||
group = "Docker"
|
||||
dirs = ('/var/lib/docker/containers/', '/run/shm', '/run/resolvconf',)
|
||||
configs = ('docker.sh',)
|
||||
startup = ('sh docker.sh',)
|
||||
shutdown = ('service docker stop',)
|
||||
# Container image to start
|
||||
_image = ""
|
||||
image = ""
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Returns a string having contents of a docker.sh script that
|
||||
can be modified to start a specific docker image.
|
||||
|
@ -139,7 +138,7 @@ class DockerService(CoreService):
|
|||
# distros may just be docker
|
||||
cfg += 'service docker start\n'
|
||||
cfg += "# you could add a command to start a image here eg:\n"
|
||||
if not cls._image:
|
||||
if not cls.image:
|
||||
cfg += "# docker run -d --net host --name coreDock <imagename>\n"
|
||||
else:
|
||||
cfg += """\
|
||||
|
@ -150,7 +149,7 @@ until [ $result -eq 0 ]; do
|
|||
# this is to alleviate contention to docker's SQLite database
|
||||
sleep 0.3
|
||||
done
|
||||
""" % (cls._image,)
|
||||
""" % (cls.image,)
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
|
|
37
daemon/core/services/emaneservices.py
Normal file
37
daemon/core/services/emaneservices.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodeutils
|
||||
from core.service import CoreService
|
||||
from core.xml import emanexml
|
||||
|
||||
|
||||
class EmaneTransportService(CoreService):
|
||||
name = "transportd"
|
||||
executables = ("emanetransportd", "emanegentransportxml")
|
||||
group = "EMANE"
|
||||
dependencies = ()
|
||||
dirs = ()
|
||||
configs = ("emanetransport.sh",)
|
||||
startup = ("sh %s" % configs[0],)
|
||||
validate = ("pidof %s" % executables[0],)
|
||||
validation_timer = 0.5
|
||||
shutdown = ("killall %s" % executables[0],)
|
||||
|
||||
@classmethod
|
||||
def generate_config(cls, node, filename):
|
||||
if filename == cls.configs[0]:
|
||||
transport_commands = []
|
||||
for interface in node.netifs(sort=True):
|
||||
network_node = node.session.get_object(interface.net.objid)
|
||||
if nodeutils.is_node(network_node, NodeTypes.EMANE):
|
||||
config = node.session.emane.get_configs(network_node.objid, network_node.model.name)
|
||||
if config and emanexml.is_external(config):
|
||||
nem_id = network_node.getnemid(interface)
|
||||
command = "emanetransportd -r -l 0 -d ../transportdaemon%s.xml" % nem_id
|
||||
transport_commands.append(command)
|
||||
transport_commands = "\n".join(transport_commands)
|
||||
return """
|
||||
emanegentransportxml -o ../ ../platform%s.xml
|
||||
%s
|
||||
""" % (node.objid, transport_commands)
|
||||
else:
|
||||
raise ValueError
|
|
@ -13,17 +13,15 @@ class NrlService(CoreService):
|
|||
Parent class for NRL services. Defines properties and methods
|
||||
common to NRL's routing daemons.
|
||||
"""""
|
||||
_name = None
|
||||
_group = "ProtoSvc"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 45
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
name = None
|
||||
group = "ProtoSvc"
|
||||
dirs = ()
|
||||
configs = ()
|
||||
startup = ()
|
||||
shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
|
@ -34,7 +32,7 @@ class NrlService(CoreService):
|
|||
interface's prefix length, so e.g. '/32' can turn into '/24'.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
|
@ -46,15 +44,15 @@ class NrlService(CoreService):
|
|||
|
||||
|
||||
class MgenSinkService(NrlService):
|
||||
_name = "MGEN_Sink"
|
||||
_configs = ("sink.mgen",)
|
||||
_startindex = 5
|
||||
_startup = ("mgen input sink.mgen",)
|
||||
_validate = ("pidof mgen",)
|
||||
_shutdown = ("killall mgen",)
|
||||
name = "MGEN_Sink"
|
||||
executables = ("mgen",)
|
||||
configs = ("sink.mgen",)
|
||||
startup = ("mgen input sink.mgen",)
|
||||
validate = ("pidof mgen",)
|
||||
shutdown = ("killall mgen",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
cfg = "0.0 LISTEN UDP 5000\n"
|
||||
for ifc in node.netifs():
|
||||
name = utils.sysctl_devname(ifc.name)
|
||||
|
@ -62,8 +60,8 @@ class MgenSinkService(NrlService):
|
|||
return cfg
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
cmd = cls._startup[0]
|
||||
def get_startup(cls, node):
|
||||
cmd = cls.startup[0]
|
||||
cmd += " output /tmp/mgen_%s.log" % node.name
|
||||
return cmd,
|
||||
|
||||
|
@ -72,27 +70,27 @@ class NrlNhdp(NrlService):
|
|||
"""
|
||||
NeighborHood Discovery Protocol for MANET networks.
|
||||
"""
|
||||
_name = "NHDP"
|
||||
_startup = ("nrlnhdp",)
|
||||
_shutdown = ("killall nrlnhdp",)
|
||||
_validate = ("pidof nrlnhdp",)
|
||||
name = "NHDP"
|
||||
executables = ("nrlnhdp",)
|
||||
startup = ("nrlnhdp",)
|
||||
shutdown = ("killall nrlnhdp",)
|
||||
validate = ("pidof nrlnhdp",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
def get_startup(cls, node):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
cmd = cls.startup[0]
|
||||
cmd += " -l /var/log/nrlnhdp.log"
|
||||
cmd += " -rpipe %s_nhdp" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
servicenames = map(lambda x: x.name, node.services)
|
||||
if "SMF" in servicenames:
|
||||
cmd += " -flooding ecds"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
|
@ -105,14 +103,15 @@ class NrlSmf(NrlService):
|
|||
"""
|
||||
Simplified Multicast Forwarding for MANET networks.
|
||||
"""
|
||||
_name = "SMF"
|
||||
_startup = ("sh startsmf.sh",)
|
||||
_shutdown = ("killall nrlsmf",)
|
||||
_validate = ("pidof nrlsmf",)
|
||||
_configs = ("startsmf.sh",)
|
||||
name = "SMF"
|
||||
executables = ("nrlsmf",)
|
||||
startup = ("sh startsmf.sh",)
|
||||
shutdown = ("killall nrlsmf",)
|
||||
validate = ("pidof nrlsmf",)
|
||||
configs = ("startsmf.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename, ):
|
||||
"""
|
||||
Generate a startup script for SMF. Because nrlsmf does not
|
||||
daemonize, it can cause problems in some situations when launched
|
||||
|
@ -123,7 +122,7 @@ class NrlSmf(NrlService):
|
|||
comments = ""
|
||||
cmd = "nrlsmf instance %s_smf" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
servicenames = map(lambda x: x.name, node.services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) == 0:
|
||||
return ""
|
||||
|
@ -156,17 +155,18 @@ class NrlOlsr(NrlService):
|
|||
"""
|
||||
Optimized Link State Routing protocol for MANET networks.
|
||||
"""
|
||||
_name = "OLSR"
|
||||
_startup = ("nrlolsrd",)
|
||||
_shutdown = ("killall nrlolsrd",)
|
||||
_validate = ("pidof nrlolsrd",)
|
||||
name = "OLSR"
|
||||
executables = ("nrlolsrd",)
|
||||
startup = ("nrlolsrd",)
|
||||
shutdown = ("killall nrlolsrd",)
|
||||
validate = ("pidof nrlolsrd",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
def get_startup(cls, node):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
cmd = cls.startup[0]
|
||||
# are multiple interfaces supported? No.
|
||||
netifs = list(node.netifs())
|
||||
if len(netifs) > 0:
|
||||
|
@ -175,7 +175,7 @@ class NrlOlsr(NrlService):
|
|||
cmd += " -l /var/log/nrlolsrd.log"
|
||||
cmd += " -rpipe %s_olsr" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
servicenames = map(lambda x: x.name, node.services)
|
||||
if "SMF" in servicenames and not "NHDP" in servicenames:
|
||||
cmd += " -flooding s-mpr"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
@ -189,21 +189,22 @@ class NrlOlsrv2(NrlService):
|
|||
"""
|
||||
Optimized Link State Routing protocol version 2 for MANET networks.
|
||||
"""
|
||||
_name = "OLSRv2"
|
||||
_startup = ("nrlolsrv2",)
|
||||
_shutdown = ("killall nrlolsrv2",)
|
||||
_validate = ("pidof nrlolsrv2",)
|
||||
name = "OLSRv2"
|
||||
executables = ("nrlolsrv2",)
|
||||
startup = ("nrlolsrv2",)
|
||||
shutdown = ("killall nrlolsrv2",)
|
||||
validate = ("pidof nrlolsrv2",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
def get_startup(cls, node):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
cmd = cls.startup[0]
|
||||
cmd += " -l /var/log/nrlolsrv2.log"
|
||||
cmd += " -rpipe %s_olsrv2" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
servicenames = map(lambda x: x.name, node.services)
|
||||
if "SMF" in servicenames:
|
||||
cmd += " -flooding ecds"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
@ -223,19 +224,20 @@ class OlsrOrg(NrlService):
|
|||
"""
|
||||
Optimized Link State Routing protocol from olsr.org for MANET networks.
|
||||
"""
|
||||
_name = "OLSRORG"
|
||||
_configs = ("/etc/olsrd/olsrd.conf",)
|
||||
_dirs = ("/etc/olsrd",)
|
||||
_startup = ("olsrd",)
|
||||
_shutdown = ("killall olsrd",)
|
||||
_validate = ("pidof olsrd",)
|
||||
name = "OLSRORG"
|
||||
executables = ("olsrd",)
|
||||
configs = ("/etc/olsrd/olsrd.conf",)
|
||||
dirs = ("/etc/olsrd",)
|
||||
startup = ("olsrd",)
|
||||
shutdown = ("killall olsrd",)
|
||||
validate = ("pidof olsrd",)
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
def get_startup(cls, node):
|
||||
"""
|
||||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls._startup[0]
|
||||
cmd = cls.startup[0]
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
|
@ -245,7 +247,7 @@ class OlsrOrg(NrlService):
|
|||
return cmd,
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a default olsrd config file to use the broadcast address of 255.255.255.255.
|
||||
"""
|
||||
|
@ -572,27 +574,24 @@ class MgenActor(NrlService):
|
|||
"""
|
||||
|
||||
# a unique name is required, without spaces
|
||||
_name = "MgenActor"
|
||||
name = "MgenActor"
|
||||
executables = ("mgen",)
|
||||
# you can create your own group here
|
||||
_group = "ProtoSvc"
|
||||
# list of other services this service depends on
|
||||
_depends = ()
|
||||
group = "ProtoSvc"
|
||||
# per-node directories
|
||||
_dirs = ()
|
||||
dirs = ()
|
||||
# generated files (without a full path this file goes in the node's dir,
|
||||
# e.g. /tmp/pycore.12345/n1.conf/)
|
||||
_configs = ('start_mgen_actor.sh',)
|
||||
# this controls the starting order vs other enabled services
|
||||
_startindex = 50
|
||||
configs = ('start_mgen_actor.sh',)
|
||||
# list of startup commands, also may be generated during startup
|
||||
_startup = ("sh start_mgen_actor.sh",)
|
||||
startup = ("sh start_mgen_actor.sh",)
|
||||
# list of validation commands
|
||||
_validate = ("pidof mgen",)
|
||||
validate = ("pidof mgen",)
|
||||
# list of shutdown commands
|
||||
_shutdown = ("killall mgen",)
|
||||
shutdown = ("killall mgen",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a startup script for MgenActor. Because mgenActor does not
|
||||
daemonize, it can cause problems in some situations when launched
|
||||
|
@ -603,7 +602,7 @@ class MgenActor(NrlService):
|
|||
comments = ""
|
||||
cmd = "mgenBasicActor.py -n %s -a 0.0.0.0" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
servicenames = map(lambda x: x.name, node.services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
|
||||
if len(netifs) == 0:
|
||||
return ""
|
||||
|
@ -616,15 +615,15 @@ class Arouted(NrlService):
|
|||
"""
|
||||
Adaptive Routing
|
||||
"""
|
||||
_name = "arouted"
|
||||
_configs = ("startarouted.sh",)
|
||||
_startindex = NrlService._startindex + 10
|
||||
_startup = ("sh startarouted.sh",)
|
||||
_shutdown = ("pkill arouted",)
|
||||
_validate = ("pidof arouted",)
|
||||
name = "arouted"
|
||||
executables = ("arouted",)
|
||||
configs = ("startarouted.sh",)
|
||||
startup = ("sh startarouted.sh",)
|
||||
shutdown = ("pkill arouted",)
|
||||
validate = ("pidof arouted",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
"""
|
||||
|
|
|
@ -10,43 +10,41 @@ from core.service import CoreService
|
|||
|
||||
|
||||
class Zebra(CoreService):
|
||||
_name = "zebra"
|
||||
_group = "Quagga"
|
||||
_dirs = ("/usr/local/etc/quagga", "/var/run/quagga")
|
||||
_configs = (
|
||||
name = "zebra"
|
||||
group = "Quagga"
|
||||
dirs = ("/usr/local/etc/quagga", "/var/run/quagga")
|
||||
configs = (
|
||||
"/usr/local/etc/quagga/Quagga.conf",
|
||||
"quaggaboot.sh",
|
||||
"/usr/local/etc/quagga/vtysh.conf"
|
||||
)
|
||||
_startindex = 35
|
||||
_startup = ("sh quaggaboot.sh zebra",)
|
||||
_shutdown = ("killall zebra",)
|
||||
_validate = ("pidof zebra",)
|
||||
startup = ("sh quaggaboot.sh zebra",)
|
||||
shutdown = ("killall zebra",)
|
||||
validate = ("pidof zebra",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateQuaggaConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateQuaggaBoot(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVtyshConf(node, services)
|
||||
if filename == cls.configs[0]:
|
||||
return cls.generateQuaggaConf(node)
|
||||
elif filename == cls.configs[1]:
|
||||
return cls.generateQuaggaBoot(node)
|
||||
elif filename == cls.configs[2]:
|
||||
return cls.generateVtyshConf(node)
|
||||
else:
|
||||
raise ValueError("file name (%s) is not a known configuration: %s",
|
||||
filename, cls._configs)
|
||||
raise ValueError("file name (%s) is not a known configuration: %s", filename, cls.configs)
|
||||
|
||||
@classmethod
|
||||
def generateVtyshConf(cls, node, services):
|
||||
def generateVtyshConf(cls, node):
|
||||
"""
|
||||
Returns configuration file text.
|
||||
"""
|
||||
return "service integrated-vtysh-config\n"
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaConf(cls, node, services):
|
||||
def generateQuaggaConf(cls, node):
|
||||
"""
|
||||
Returns configuration file text. Other services that depend on zebra
|
||||
will have generatequaggaifcconfig() and generatequaggaconfig()
|
||||
|
@ -66,13 +64,13 @@ class Zebra(CoreService):
|
|||
cfgv6 = ""
|
||||
want_ipv4 = False
|
||||
want_ipv6 = False
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
for s in node.services:
|
||||
if cls.name not in s.dependencies:
|
||||
continue
|
||||
ifccfg = s.generatequaggaifcconfig(node, ifc)
|
||||
if s._ipv4_routing:
|
||||
if s.ipv4_routing:
|
||||
want_ipv4 = True
|
||||
if s._ipv6_routing:
|
||||
if s.ipv6_routing:
|
||||
want_ipv6 = True
|
||||
cfgv6 += ifccfg
|
||||
else:
|
||||
|
@ -92,8 +90,8 @@ class Zebra(CoreService):
|
|||
cfg += cfgv6
|
||||
cfg += "!\n"
|
||||
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
for s in node.services:
|
||||
if cls.name not in s.dependencies:
|
||||
continue
|
||||
cfg += s.generatequaggaconfig(node)
|
||||
return cfg
|
||||
|
@ -111,16 +109,14 @@ class Zebra(CoreService):
|
|||
raise ValueError("invalid address: %s", x)
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaBoot(cls, node, services):
|
||||
def generateQuaggaBoot(cls, node):
|
||||
"""
|
||||
Generate a shell script used to boot the Quagga daemons.
|
||||
"""
|
||||
try:
|
||||
quagga_bin_search = node.session.config['quagga_bin_search']
|
||||
quagga_sbin_search = node.session.config['quagga_sbin_search']
|
||||
except KeyError:
|
||||
quagga_bin_search = '"/usr/local/bin /usr/bin /usr/lib/quagga"'
|
||||
quagga_sbin_search = '"/usr/local/sbin /usr/sbin /usr/lib/quagga"'
|
||||
quagga_bin_search = node.session.options.get_config("quagga_bin_search",
|
||||
default='"/usr/local/bin /usr/bin /usr/lib/quagga"')
|
||||
quagga_sbin_search = node.session.options.get_config('quagga_sbin_search',
|
||||
default='"/usr/local/sbin /usr/sbin /usr/lib/quagga"')
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by zebra service (quagga.py)
|
||||
|
@ -214,7 +210,7 @@ if [ "$1" != "zebra" ]; then
|
|||
fi
|
||||
confcheck
|
||||
bootquagga
|
||||
""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, constants.QUAGGA_STATE_DIR)
|
||||
""" % (cls.configs[0], quagga_sbin_search, quagga_bin_search, constants.QUAGGA_STATE_DIR)
|
||||
|
||||
|
||||
class QuaggaService(CoreService):
|
||||
|
@ -222,18 +218,17 @@ class QuaggaService(CoreService):
|
|||
Parent class for Quagga services. Defines properties and methods
|
||||
common to Quagga's routing daemons.
|
||||
"""
|
||||
_name = None
|
||||
_group = "Quagga"
|
||||
_depends = ("zebra",)
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the Zebra service."
|
||||
name = None
|
||||
group = "Quagga"
|
||||
dependencies = ("zebra",)
|
||||
dirs = ()
|
||||
configs = ()
|
||||
startup = ()
|
||||
shutdown = ()
|
||||
meta = "The config file for this service can be found in the Zebra service."
|
||||
|
||||
_ipv4_routing = False
|
||||
_ipv6_routing = False
|
||||
ipv4_routing = False
|
||||
ipv6_routing = False
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
|
@ -241,7 +236,7 @@ class QuaggaService(CoreService):
|
|||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
|
@ -264,7 +259,7 @@ class QuaggaService(CoreService):
|
|||
return False
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
|
@ -282,11 +277,11 @@ class Ospfv2(QuaggaService):
|
|||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
_name = "OSPFv2"
|
||||
_startup = ()
|
||||
_shutdown = ("killall ospfd",)
|
||||
_validate = ("pidof ospfd",)
|
||||
_ipv4_routing = True
|
||||
name = "OSPFv2"
|
||||
startup = ()
|
||||
shutdown = ("killall ospfd",)
|
||||
validate = ("pidof ospfd",)
|
||||
ipv4_routing = True
|
||||
|
||||
@staticmethod
|
||||
def mtucheck(ifc):
|
||||
|
@ -357,12 +352,12 @@ class Ospfv3(QuaggaService):
|
|||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
_name = "OSPFv3"
|
||||
_startup = ()
|
||||
_shutdown = ("killall ospf6d",)
|
||||
_validate = ("pidof ospf6d",)
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
name = "OSPFv3"
|
||||
startup = ()
|
||||
shutdown = ("killall ospf6d",)
|
||||
validate = ("pidof ospf6d",)
|
||||
ipv4_routing = True
|
||||
ipv6_routing = True
|
||||
|
||||
@staticmethod
|
||||
def minmtu(ifc):
|
||||
|
@ -438,8 +433,8 @@ class Ospfv3mdr(Ospfv3):
|
|||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
_name = "OSPFv3MDR"
|
||||
_ipv4_routing = True
|
||||
name = "OSPFv3MDR"
|
||||
ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
|
@ -466,13 +461,13 @@ class Bgp(QuaggaService):
|
|||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
"""
|
||||
_name = "BGP"
|
||||
_startup = ()
|
||||
_shutdown = ("killall bgpd",)
|
||||
_validate = ("pidof bgpd",)
|
||||
_custom_needed = True
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
name = "BGP"
|
||||
startup = ()
|
||||
shutdown = ("killall bgpd",)
|
||||
validate = ("pidof bgpd",)
|
||||
custom_needed = True
|
||||
ipv4_routing = True
|
||||
ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
|
@ -491,11 +486,11 @@ class Rip(QuaggaService):
|
|||
"""
|
||||
The RIP service provides IPv4 routing for wired networks.
|
||||
"""
|
||||
_name = "RIP"
|
||||
_startup = ()
|
||||
_shutdown = ("killall ripd",)
|
||||
_validate = ("pidof ripd",)
|
||||
_ipv4_routing = True
|
||||
name = "RIP"
|
||||
startup = ()
|
||||
shutdown = ("killall ripd",)
|
||||
validate = ("pidof ripd",)
|
||||
ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
|
@ -514,11 +509,11 @@ class Ripng(QuaggaService):
|
|||
"""
|
||||
The RIP NG service provides IPv6 routing for wired networks.
|
||||
"""
|
||||
_name = "RIPNG"
|
||||
_startup = ()
|
||||
_shutdown = ("killall ripngd",)
|
||||
_validate = ("pidof ripngd",)
|
||||
_ipv6_routing = True
|
||||
name = "RIPNG"
|
||||
startup = ()
|
||||
shutdown = ("killall ripngd",)
|
||||
validate = ("pidof ripngd",)
|
||||
ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
|
@ -538,11 +533,11 @@ class Babel(QuaggaService):
|
|||
The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
"""
|
||||
_name = "Babel"
|
||||
_startup = ()
|
||||
_shutdown = ("killall babeld",)
|
||||
_validate = ("pidof babeld",)
|
||||
_ipv6_routing = True
|
||||
name = "Babel"
|
||||
startup = ()
|
||||
shutdown = ("killall babeld",)
|
||||
validate = ("pidof babeld",)
|
||||
ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
|
@ -567,11 +562,11 @@ class Xpimd(QuaggaService):
|
|||
"""
|
||||
PIM multicast routing based on XORP.
|
||||
"""
|
||||
_name = 'Xpimd'
|
||||
_startup = ()
|
||||
_shutdown = ('killall xpimd',)
|
||||
_validate = ('pidof xpimd',)
|
||||
_ipv4_routing = True
|
||||
name = 'Xpimd'
|
||||
startup = ()
|
||||
shutdown = ('killall xpimd',)
|
||||
validate = ('pidof xpimd',)
|
||||
ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
|
|
|
@ -11,42 +11,34 @@ class SdnService(CoreService):
|
|||
"""
|
||||
Parent class for SDN services.
|
||||
"""
|
||||
_name = None
|
||||
_group = "SDN"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 50
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
group = "SDN"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
return ""
|
||||
|
||||
|
||||
class OvsService(SdnService):
|
||||
_name = "OvsService"
|
||||
_group = "SDN"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/openvswitch", "/var/run/openvswitch", "/var/log/openvswitch")
|
||||
_configs = ('OvsService.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh OvsService.sh',)
|
||||
_shutdown = ('killall ovs-vswitchd', 'killall ovsdb-server')
|
||||
name = "OvsService"
|
||||
executables = ("ovs-ofctl", "ovs-vsctl")
|
||||
group = "SDN"
|
||||
dirs = ("/etc/openvswitch", "/var/run/openvswitch", "/var/log/openvswitch")
|
||||
configs = ('OvsService.sh',)
|
||||
startup = ('sh OvsService.sh',)
|
||||
shutdown = ('killall ovs-vswitchd', 'killall ovsdb-server')
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
# Check whether the node is running zebra
|
||||
has_zebra = 0
|
||||
for s in services:
|
||||
if s._name == "zebra":
|
||||
for s in node.services:
|
||||
if s.name == "zebra":
|
||||
has_zebra = 1
|
||||
|
||||
# Check whether the node is running an SDN controller
|
||||
has_sdn_ctrlr = 0
|
||||
for s in services:
|
||||
if s._name == "ryuService":
|
||||
for s in node.services:
|
||||
if s.name == "ryuService":
|
||||
has_sdn_ctrlr = 1
|
||||
|
||||
cfg = "#!/bin/sh\n"
|
||||
|
@ -100,17 +92,16 @@ class OvsService(SdnService):
|
|||
|
||||
|
||||
class RyuService(SdnService):
|
||||
_name = "ryuService"
|
||||
_group = "SDN"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ('ryuService.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh ryuService.sh',)
|
||||
_shutdown = ('killall ryu-manager',)
|
||||
name = "ryuService"
|
||||
executables = ("ryu-manager",)
|
||||
group = "SDN"
|
||||
dirs = ()
|
||||
configs = ('ryuService.sh',)
|
||||
startup = ('sh ryuService.sh',)
|
||||
shutdown = ('killall ryu-manager',)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return a string that will be written to filename, or sent to the
|
||||
GUI for user customization.
|
||||
|
|
|
@ -9,17 +9,16 @@ from core.service import CoreService
|
|||
|
||||
|
||||
class VPNClient(CoreService):
|
||||
_name = "VPNClient"
|
||||
_group = "Security"
|
||||
_configs = ('vpnclient.sh',)
|
||||
_startindex = 60
|
||||
_startup = ('sh vpnclient.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn",)
|
||||
_custom_needed = True
|
||||
name = "VPNClient"
|
||||
group = "Security"
|
||||
configs = ('vpnclient.sh',)
|
||||
startup = ('sh vpnclient.sh',)
|
||||
shutdown = ("killall openvpn",)
|
||||
validate = ("pidof openvpn",)
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the client.conf and vpnclient.sh file contents to
|
||||
"""
|
||||
|
@ -36,17 +35,16 @@ class VPNClient(CoreService):
|
|||
|
||||
|
||||
class VPNServer(CoreService):
|
||||
_name = "VPNServer"
|
||||
_group = "Security"
|
||||
_configs = ('vpnserver.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh vpnserver.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn",)
|
||||
_custom_needed = True
|
||||
name = "VPNServer"
|
||||
group = "Security"
|
||||
configs = ('vpnserver.sh',)
|
||||
startup = ('sh vpnserver.sh',)
|
||||
shutdown = ("killall openvpn",)
|
||||
validate = ("pidof openvpn",)
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the sample server.conf and vpnserver.sh file contents to
|
||||
GUI for user customization.
|
||||
|
@ -64,16 +62,15 @@ class VPNServer(CoreService):
|
|||
|
||||
|
||||
class IPsec(CoreService):
|
||||
_name = "IPsec"
|
||||
_group = "Security"
|
||||
_configs = ('ipsec.sh',)
|
||||
_startindex = 60
|
||||
_startup = ('sh ipsec.sh',)
|
||||
_shutdown = ("killall racoon",)
|
||||
_custom_needed = True
|
||||
name = "IPsec"
|
||||
group = "Security"
|
||||
configs = ('ipsec.sh',)
|
||||
startup = ('sh ipsec.sh',)
|
||||
shutdown = ("killall racoon",)
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the ipsec.conf and racoon.conf file contents to
|
||||
GUI for user customization.
|
||||
|
@ -92,15 +89,14 @@ class IPsec(CoreService):
|
|||
|
||||
|
||||
class Firewall(CoreService):
|
||||
_name = "Firewall"
|
||||
_group = "Security"
|
||||
_configs = ('firewall.sh',)
|
||||
_startindex = 20
|
||||
_startup = ('sh firewall.sh',)
|
||||
_custom_needed = True
|
||||
name = "Firewall"
|
||||
group = "Security"
|
||||
configs = ('firewall.sh',)
|
||||
startup = ('sh firewall.sh',)
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the firewall rule examples to GUI for user customization.
|
||||
"""
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
from inspect import isclass
|
||||
from sys import maxint
|
||||
|
||||
from core.service import CoreService
|
||||
|
||||
|
||||
class Startup(CoreService):
|
||||
"""
|
||||
A CORE service to start other services in order, serially
|
||||
"""
|
||||
_name = 'startup'
|
||||
_group = 'Utility'
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ('startup.sh',)
|
||||
_startindex = maxint
|
||||
_startup = ('sh startup.sh',)
|
||||
_shutdown = ()
|
||||
_validate = ()
|
||||
|
||||
@staticmethod
|
||||
def is_startup_service(s):
|
||||
return isinstance(s, Startup) or (isclass(s) and issubclass(s, Startup))
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
if filename != cls._configs[0]:
|
||||
return ''
|
||||
script = '#!/bin/sh\n' \
|
||||
'# auto-generated by Startup (startup.py)\n\n' \
|
||||
'exec > startup.log 2>&1\n\n'
|
||||
for s in sorted(services, key=lambda x: x._startindex):
|
||||
if cls.is_startup_service(s) or len(str(s._starttime)) > 0:
|
||||
continue
|
||||
start = '\n'.join(s.getstartup(node, services))
|
||||
if start:
|
||||
script += start + '\n'
|
||||
return script
|
|
@ -8,35 +8,33 @@ UCARP_ETC = "/usr/local/etc/ucarp"
|
|||
|
||||
|
||||
class Ucarp(CoreService):
|
||||
_name = "ucarp"
|
||||
_group = "Utility"
|
||||
_depends = ( )
|
||||
_dirs = (UCARP_ETC,)
|
||||
_configs = (
|
||||
name = "ucarp"
|
||||
group = "Utility"
|
||||
dirs = (UCARP_ETC,)
|
||||
configs = (
|
||||
UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",)
|
||||
_startindex = 65
|
||||
_startup = ("sh ucarpboot.sh",)
|
||||
_shutdown = ("killall ucarp",)
|
||||
_validate = ("pidof ucarp",)
|
||||
startup = ("sh ucarpboot.sh",)
|
||||
shutdown = ("killall ucarp",)
|
||||
validate = ("pidof ucarp",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Return the default file contents
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateUcarpConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateVipUp(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVipDown(node, services)
|
||||
elif filename == cls._configs[3]:
|
||||
return cls.generateUcarpBoot(node, services)
|
||||
if filename == cls.configs[0]:
|
||||
return cls.generateUcarpConf(node)
|
||||
elif filename == cls.configs[1]:
|
||||
return cls.generateVipUp(node)
|
||||
elif filename == cls.configs[2]:
|
||||
return cls.generateVipDown(node)
|
||||
elif filename == cls.configs[3]:
|
||||
return cls.generateUcarpBoot(node)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateUcarpConf(cls, node, services):
|
||||
def generateUcarpConf(cls, node):
|
||||
"""
|
||||
Returns configuration file text.
|
||||
"""
|
||||
|
@ -105,7 +103,7 @@ ${UCARP_EXEC} -B ${UCARP_OPTS}
|
|||
""" % (ucarp_bin, UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateUcarpBoot(cls, node, services):
|
||||
def generateUcarpBoot(cls, node):
|
||||
"""
|
||||
Generate a shell script used to boot the Ucarp daemons.
|
||||
"""
|
||||
|
@ -127,7 +125,7 @@ ${UCARP_CFGDIR}/default.sh
|
|||
""" % UCARP_ETC
|
||||
|
||||
@classmethod
|
||||
def generateVipUp(cls, node, services):
|
||||
def generateVipUp(cls, node):
|
||||
"""
|
||||
Generate a shell script used to start the virtual ip
|
||||
"""
|
||||
|
@ -154,7 +152,7 @@ fi
|
|||
"""
|
||||
|
||||
@classmethod
|
||||
def generateVipDown(cls, node, services):
|
||||
def generateVipDown(cls, node):
|
||||
"""
|
||||
Generate a shell script used to stop the virtual ip
|
||||
"""
|
||||
|
|
|
@ -16,35 +16,32 @@ class UtilService(CoreService):
|
|||
"""
|
||||
Parent class for utility services.
|
||||
"""
|
||||
_name = None
|
||||
_group = "Utility"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 80
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
name = None
|
||||
group = "Utility"
|
||||
dirs = ()
|
||||
configs = ()
|
||||
startup = ()
|
||||
shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
return ""
|
||||
|
||||
|
||||
class IPForwardService(UtilService):
|
||||
_name = "IPForward"
|
||||
_configs = ("ipforward.sh",)
|
||||
_startindex = 5
|
||||
_startup = ("sh ipforward.sh",)
|
||||
name = "IPForward"
|
||||
configs = ("ipforward.sh",)
|
||||
startup = ("sh ipforward.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
if os.uname()[0] == "Linux":
|
||||
return cls.generateconfiglinux(node, filename, services)
|
||||
return cls.generateconfiglinux(node, filename)
|
||||
else:
|
||||
raise Exception("unknown platform")
|
||||
|
||||
@classmethod
|
||||
def generateconfiglinux(cls, node, filename, services):
|
||||
def generateconfiglinux(cls, node, filename):
|
||||
cfg = """\
|
||||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
|
@ -67,12 +64,12 @@ class IPForwardService(UtilService):
|
|||
|
||||
|
||||
class DefaultRouteService(UtilService):
|
||||
_name = "DefaultRoute"
|
||||
_configs = ("defaultroute.sh",)
|
||||
_startup = ("sh defaultroute.sh",)
|
||||
name = "DefaultRoute"
|
||||
configs = ("defaultroute.sh",)
|
||||
startup = ("sh defaultroute.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultRoute service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
|
@ -101,19 +98,19 @@ class DefaultRouteService(UtilService):
|
|||
|
||||
|
||||
class DefaultMulticastRouteService(UtilService):
|
||||
_name = "DefaultMulticastRoute"
|
||||
_configs = ("defaultmroute.sh",)
|
||||
_startup = ("sh defaultmroute.sh",)
|
||||
name = "DefaultMulticastRoute"
|
||||
configs = ("defaultmroute.sh",)
|
||||
startup = ("sh defaultmroute.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n"
|
||||
cfg += "# the first interface is chosen below; please change it "
|
||||
cfg += "as needed\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "ip route add 224.0.0.0/4 dev"
|
||||
|
@ -126,13 +123,13 @@ class DefaultMulticastRouteService(UtilService):
|
|||
|
||||
|
||||
class StaticRouteService(UtilService):
|
||||
_name = "StaticRoute"
|
||||
_configs = ("staticroute.sh",)
|
||||
_startup = ("sh staticroute.sh",)
|
||||
_custom_needed = True
|
||||
name = "StaticRoute"
|
||||
configs = ("staticroute.sh",)
|
||||
startup = ("sh staticroute.sh",)
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n"
|
||||
cfg += "# NOTE: this service must be customized to be of any use\n"
|
||||
|
@ -165,21 +162,21 @@ class StaticRouteService(UtilService):
|
|||
|
||||
|
||||
class SshService(UtilService):
|
||||
_name = "SSH"
|
||||
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
|
||||
_dirs = ("/etc/ssh", "/var/run/sshd",)
|
||||
_startup = ("sh startsshd.sh",)
|
||||
_shutdown = ("killall sshd",)
|
||||
_validate = ()
|
||||
name = "SSH"
|
||||
configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
|
||||
dirs = ("/etc/ssh", "/var/run/sshd",)
|
||||
startup = ("sh startsshd.sh",)
|
||||
shutdown = ("killall sshd",)
|
||||
validate = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Use a startup script for launching sshd in order to wait for host
|
||||
key generation.
|
||||
"""
|
||||
sshcfgdir = cls._dirs[0]
|
||||
sshstatedir = cls._dirs[1]
|
||||
sshcfgdir = cls.dirs[0]
|
||||
sshstatedir = cls.dirs[1]
|
||||
sshlibdir = "/usr/lib/openssh"
|
||||
if filename == "startsshd.sh":
|
||||
return """\
|
||||
|
@ -233,15 +230,15 @@ UseDNS no
|
|||
|
||||
|
||||
class DhcpService(UtilService):
|
||||
_name = "DHCP"
|
||||
_configs = ("/etc/dhcp/dhcpd.conf",)
|
||||
_dirs = ("/etc/dhcp",)
|
||||
_startup = ("dhcpd",)
|
||||
_shutdown = ("killall dhcpd",)
|
||||
_validate = ("pidof dhcpd",)
|
||||
name = "DHCP"
|
||||
configs = ("/etc/dhcp/dhcpd.conf",)
|
||||
dirs = ("/etc/dhcp",)
|
||||
startup = ("dhcpd",)
|
||||
shutdown = ("killall dhcpd",)
|
||||
validate = ("pidof dhcpd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a dhcpd config file using the network address of
|
||||
each interface.
|
||||
|
@ -296,14 +293,14 @@ class DhcpClientService(UtilService):
|
|||
"""
|
||||
Use a DHCP client for all interfaces for addressing.
|
||||
"""
|
||||
_name = "DHCPClient"
|
||||
_configs = ("startdhcpclient.sh",)
|
||||
_startup = ("sh startdhcpclient.sh",)
|
||||
_shutdown = ("killall dhclient",)
|
||||
_validate = ("pidof dhclient",)
|
||||
name = "DHCPClient"
|
||||
configs = ("startdhcpclient.sh",)
|
||||
startup = ("sh startdhcpclient.sh",)
|
||||
shutdown = ("killall dhclient",)
|
||||
validate = ("pidof dhclient",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a script to invoke dhclient on all interfaces.
|
||||
"""
|
||||
|
@ -314,7 +311,7 @@ class DhcpClientService(UtilService):
|
|||
cfg += "#mkdir -p /var/run/resolvconf/interface\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name
|
||||
cfg += " /var/run/resolvconf/resolv.conf\n"
|
||||
|
@ -327,15 +324,15 @@ class FtpService(UtilService):
|
|||
"""
|
||||
Start a vsftpd server.
|
||||
"""
|
||||
_name = "FTP"
|
||||
_configs = ("vsftpd.conf",)
|
||||
_dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
|
||||
_startup = ("vsftpd ./vsftpd.conf",)
|
||||
_shutdown = ("killall vsftpd",)
|
||||
_validate = ("pidof vsftpd",)
|
||||
name = "FTP"
|
||||
configs = ("vsftpd.conf",)
|
||||
dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
|
||||
startup = ("vsftpd ./vsftpd.conf",)
|
||||
shutdown = ("killall vsftpd",)
|
||||
validate = ("pidof vsftpd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a vsftpd.conf configuration file.
|
||||
"""
|
||||
|
@ -359,28 +356,28 @@ class HttpService(UtilService):
|
|||
"""
|
||||
Start an apache server.
|
||||
"""
|
||||
_name = "HTTP"
|
||||
_configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
|
||||
"/var/www/index.html",)
|
||||
_dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
|
||||
"/run/lock", "/var/lock/apache2", "/var/www",)
|
||||
_startup = ("chown www-data /var/lock/apache2", "apache2ctl start",)
|
||||
_shutdown = ("apache2ctl stop",)
|
||||
_validate = ("pidof apache2",)
|
||||
name = "HTTP"
|
||||
configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
|
||||
"/var/www/index.html",)
|
||||
dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
|
||||
"/run/lock", "/var/lock/apache2", "/var/www",)
|
||||
startup = ("chown www-data /var/lock/apache2", "apache2ctl start",)
|
||||
shutdown = ("apache2ctl stop",)
|
||||
validate = ("pidof apache2",)
|
||||
|
||||
APACHEVER22, APACHEVER24 = (22, 24)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate an apache2.conf configuration file.
|
||||
"""
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateapache2conf(node, filename, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateenvvars(node, filename, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generatehtml(node, filename, services)
|
||||
if filename == cls.configs[0]:
|
||||
return cls.generateapache2conf(node, filename)
|
||||
elif filename == cls.configs[1]:
|
||||
return cls.generateenvvars(node, filename)
|
||||
elif filename == cls.configs[2]:
|
||||
return cls.generatehtml(node, filename)
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
@ -400,7 +397,7 @@ class HttpService(UtilService):
|
|||
return cls.APACHEVER22
|
||||
|
||||
@classmethod
|
||||
def generateapache2conf(cls, node, filename, services):
|
||||
def generateapache2conf(cls, node, filename):
|
||||
lockstr = {cls.APACHEVER22:
|
||||
'LockFile ${APACHE_LOCK_DIR}/accept.lock\n',
|
||||
cls.APACHEVER24:
|
||||
|
@ -538,7 +535,7 @@ TraceEnable Off
|
|||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generateenvvars(cls, node, filename, services):
|
||||
def generateenvvars(cls, node, filename):
|
||||
return """\
|
||||
# this file is used by apache2ctl - generated by utility.py:HttpService
|
||||
# these settings come from a default Ubuntu apache2 installation
|
||||
|
@ -553,7 +550,7 @@ export LANG
|
|||
"""
|
||||
|
||||
@classmethod
|
||||
def generatehtml(cls, node, filename, services):
|
||||
def generatehtml(cls, node, filename):
|
||||
body = """\
|
||||
<!-- generated by utility.py:HttpService -->
|
||||
<h1>%s web server</h1>
|
||||
|
@ -561,7 +558,7 @@ export LANG
|
|||
<p>The web server software is running but no content has been added, yet.</p>
|
||||
""" % node.name
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
|
||||
return "<html><body>%s</body></html>" % body
|
||||
|
@ -571,17 +568,16 @@ class PcapService(UtilService):
|
|||
"""
|
||||
Pcap service for logging packets.
|
||||
"""
|
||||
_name = "pcap"
|
||||
_configs = ("pcap.sh",)
|
||||
_dirs = ()
|
||||
_startindex = 1
|
||||
_startup = ("sh pcap.sh start",)
|
||||
_shutdown = ("sh pcap.sh stop",)
|
||||
_validate = ("pidof tcpdump",)
|
||||
_meta = "logs network traffic to pcap packet capture files"
|
||||
name = "pcap"
|
||||
configs = ("pcap.sh",)
|
||||
dirs = ()
|
||||
startup = ("sh pcap.sh start",)
|
||||
shutdown = ("sh pcap.sh stop",)
|
||||
validate = ("pidof tcpdump",)
|
||||
meta = "logs network traffic to pcap packet capture files"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a startpcap.sh traffic logging script.
|
||||
"""
|
||||
|
@ -595,7 +591,7 @@ if [ "x$1" = "xstart" ]; then
|
|||
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
cfg += '# '
|
||||
redir = "< /dev/null"
|
||||
cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \
|
||||
|
@ -611,22 +607,22 @@ fi;
|
|||
|
||||
|
||||
class RadvdService(UtilService):
|
||||
_name = "radvd"
|
||||
_configs = ("/etc/radvd/radvd.conf",)
|
||||
_dirs = ("/etc/radvd",)
|
||||
_startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
|
||||
_shutdown = ("pkill radvd",)
|
||||
_validate = ("pidof radvd",)
|
||||
name = "radvd"
|
||||
configs = ("/etc/radvd/radvd.conf",)
|
||||
dirs = ("/etc/radvd",)
|
||||
startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
|
||||
shutdown = ("pkill radvd",)
|
||||
validate = ("pidof radvd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Generate a RADVD router advertisement daemon config file
|
||||
using the network address of each interface.
|
||||
"""
|
||||
cfg = "# auto-generated by RADVD service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
if hasattr(ifc, 'control') and ifc.control is True:
|
||||
continue
|
||||
prefixes = map(cls.subnetentry, ifc.addrlist)
|
||||
if len(prefixes) < 1:
|
||||
|
@ -671,14 +667,14 @@ class AtdService(UtilService):
|
|||
"""
|
||||
Atd service for scheduling at jobs
|
||||
"""
|
||||
_name = "atd"
|
||||
_configs = ("startatd.sh",)
|
||||
_dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
|
||||
_startup = ("sh startatd.sh",)
|
||||
_shutdown = ("pkill atd",)
|
||||
name = "atd"
|
||||
configs = ("startatd.sh",)
|
||||
dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
|
||||
startup = ("sh startatd.sh",)
|
||||
shutdown = ("pkill atd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
return """
|
||||
#!/bin/sh
|
||||
echo 00001 > /var/spool/cron/atjobs/.SEQ
|
||||
|
@ -692,6 +688,5 @@ class UserDefinedService(UtilService):
|
|||
"""
|
||||
Dummy service allowing customization of anything.
|
||||
"""
|
||||
_name = "UserDefined"
|
||||
_startindex = 50
|
||||
_meta = "Customize this service to do anything upon startup."
|
||||
name = "UserDefined"
|
||||
meta = "Customize this service to do anything upon startup."
|
||||
|
|
|
@ -11,18 +11,17 @@ class XorpRtrmgr(CoreService):
|
|||
XORP router manager service builds a config.boot file based on other
|
||||
enabled XORP services, and launches necessary daemons upon startup.
|
||||
"""
|
||||
_name = "xorp_rtrmgr"
|
||||
_group = "XORP"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/xorp",)
|
||||
_configs = ("/etc/xorp/config.boot",)
|
||||
_startindex = 35
|
||||
_startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (_configs[0], _name, _name),)
|
||||
_shutdown = ("killall xorp_rtrmgr",)
|
||||
_validate = ("pidof xorp_rtrmgr",)
|
||||
name = "xorp_rtrmgr"
|
||||
executables = ("xorp_rtrmgr",)
|
||||
group = "XORP"
|
||||
dirs = ("/etc/xorp",)
|
||||
configs = ("/etc/xorp/config.boot",)
|
||||
startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (configs[0], name, name),)
|
||||
shutdown = ("killall xorp_rtrmgr",)
|
||||
validate = ("pidof xorp_rtrmgr",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
"""
|
||||
Returns config.boot configuration file text. Other services that
|
||||
depend on this will have generatexorpconfig() hooks that are
|
||||
|
@ -38,12 +37,12 @@ class XorpRtrmgr(CoreService):
|
|||
cfg += " }\n"
|
||||
cfg += "}\n\n"
|
||||
|
||||
for s in services:
|
||||
for s in node.services:
|
||||
try:
|
||||
s._depends.index(cls._name)
|
||||
s.dependencies.index(cls.name)
|
||||
cfg += s.generatexorpconfig(node)
|
||||
except ValueError:
|
||||
logger.exception("error getting value from service: %s", cls._name)
|
||||
logger.exception("error getting value from service: %s", cls.name)
|
||||
|
||||
return cfg
|
||||
|
||||
|
@ -74,15 +73,15 @@ class XorpService(CoreService):
|
|||
Parent class for XORP services. Defines properties and methods
|
||||
common to XORP's routing daemons.
|
||||
"""
|
||||
_name = None
|
||||
_group = "XORP"
|
||||
_depends = ("xorp_rtrmgr",)
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the xorp_rtrmgr service."
|
||||
name = None
|
||||
executables = ("xorp_rtrmgr",)
|
||||
group = "XORP"
|
||||
dependencies = ("xorp_rtrmgr",)
|
||||
dirs = ()
|
||||
configs = ()
|
||||
startup = ()
|
||||
shutdown = ()
|
||||
meta = "The config file for this service can be found in the xorp_rtrmgr service."
|
||||
|
||||
@staticmethod
|
||||
def fea(forwarding):
|
||||
|
@ -151,7 +150,7 @@ class XorpService(CoreService):
|
|||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
def generate_config(cls, node, filename):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
|
@ -165,7 +164,7 @@ class XorpOspfv2(XorpService):
|
|||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
"""
|
||||
_name = "XORP_OSPFv2"
|
||||
name = "XORP_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -200,7 +199,7 @@ class XorpOspfv3(XorpService):
|
|||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
"""
|
||||
_name = "XORP_OSPFv3"
|
||||
name = "XORP_OSPFv3"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -227,8 +226,8 @@ class XorpBgp(XorpService):
|
|||
"""
|
||||
IPv4 inter-domain routing. AS numbers and peers must be customized.
|
||||
"""
|
||||
_name = "XORP_BGP"
|
||||
_custom_needed = True
|
||||
name = "XORP_BGP"
|
||||
custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -257,7 +256,7 @@ class XorpRip(XorpService):
|
|||
RIP IPv4 unicast routing.
|
||||
"""
|
||||
|
||||
_name = "XORP_RIP"
|
||||
name = "XORP_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -289,7 +288,7 @@ class XorpRipng(XorpService):
|
|||
"""
|
||||
RIP NG IPv6 unicast routing.
|
||||
"""
|
||||
_name = "XORP_RIPNG"
|
||||
name = "XORP_RIPNG"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -324,7 +323,7 @@ class XorpPimSm4(XorpService):
|
|||
"""
|
||||
PIM Sparse Mode IPv4 multicast routing.
|
||||
"""
|
||||
_name = "XORP_PIMSM4"
|
||||
name = "XORP_PIMSM4"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -383,7 +382,7 @@ class XorpPimSm6(XorpService):
|
|||
"""
|
||||
PIM Sparse Mode IPv6 multicast routing.
|
||||
"""
|
||||
_name = "XORP_PIMSM6"
|
||||
name = "XORP_PIMSM6"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
@ -442,7 +441,7 @@ class XorpOlsr(XorpService):
|
|||
"""
|
||||
OLSR IPv4 unicast MANET routing.
|
||||
"""
|
||||
_name = "XORP_OLSR"
|
||||
name = "XORP_OLSR"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
|
|
|
@ -4,13 +4,13 @@ that manages a CORE session.
|
|||
"""
|
||||
|
||||
import os
|
||||
import pprint
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
import pwd
|
||||
|
||||
|
@ -18,18 +18,15 @@ from core import constants
|
|||
from core import logger
|
||||
from core.api import coreapi
|
||||
from core.broker import CoreBroker
|
||||
from core.conf import Configurable
|
||||
from core.conf import ConfigurableManager
|
||||
from core.data import ConfigData
|
||||
from core.conf import ConfigurableOptions
|
||||
from core.conf import Configuration
|
||||
from core.data import EventData
|
||||
from core.data import ExceptionData
|
||||
from core.data import FileData
|
||||
from core.emane.emanemanager import EmaneManager
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
from core.enumerations import EventTypes
|
||||
from core.enumerations import ExceptionLevels
|
||||
from core.enumerations import MessageFlags
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.location import CoreLocation
|
||||
|
@ -37,13 +34,11 @@ from core.misc import nodeutils
|
|||
from core.misc import utils
|
||||
from core.misc.event import EventLoop
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.mobility import BasicRangeModel
|
||||
from core.mobility import MobilityManager
|
||||
from core.mobility import Ns2ScriptedMobility
|
||||
from core.netns import nodes
|
||||
from core.sdt import Sdt
|
||||
from core.service import CoreServices
|
||||
from core.xml.xmlsession import save_session_xml
|
||||
from core.xml import corexml, corexmldeployment
|
||||
|
||||
|
||||
class Session(object):
|
||||
|
@ -61,11 +56,6 @@ class Session(object):
|
|||
"""
|
||||
self.session_id = session_id
|
||||
|
||||
# dict of configuration items from /etc/core/core.conf config file
|
||||
if not config:
|
||||
config = {}
|
||||
self.config = config
|
||||
|
||||
# define and create session directory when desired
|
||||
self.session_dir = os.path.join(tempfile.gettempdir(), "pycore.%s" % self.session_id)
|
||||
if mkdir:
|
||||
|
@ -81,10 +71,6 @@ class Session(object):
|
|||
self.objects = {}
|
||||
self._objects_lock = threading.Lock()
|
||||
|
||||
# dict of configurable objects
|
||||
self.config_objects = {}
|
||||
self._config_objects_lock = threading.Lock()
|
||||
|
||||
# TODO: should the default state be definition?
|
||||
self.state = EventTypes.NONE.value
|
||||
self._state_time = time.time()
|
||||
|
@ -106,60 +92,36 @@ class Session(object):
|
|||
self.config_handlers = []
|
||||
self.shutdown_handlers = []
|
||||
|
||||
# setup broker
|
||||
self.broker = CoreBroker(session=self)
|
||||
self.add_config_object(CoreBroker.name, CoreBroker.config_type, self.broker.configure)
|
||||
|
||||
# setup location
|
||||
self.location = CoreLocation()
|
||||
self.add_config_object(CoreLocation.name, CoreLocation.config_type, self.location.configure)
|
||||
|
||||
# setup mobiliy
|
||||
self.mobility = MobilityManager(session=self)
|
||||
self.add_config_object(MobilityManager.name, MobilityManager.config_type, self.mobility.configure)
|
||||
self.add_config_object(BasicRangeModel.name, BasicRangeModel.config_type, BasicRangeModel.configure_mob)
|
||||
self.add_config_object(Ns2ScriptedMobility.name, Ns2ScriptedMobility.config_type,
|
||||
Ns2ScriptedMobility.configure_mob)
|
||||
|
||||
# setup services
|
||||
self.services = CoreServices(session=self)
|
||||
self.add_config_object(CoreServices.name, CoreServices.config_type, self.services.configure)
|
||||
|
||||
# setup emane
|
||||
self.emane = EmaneManager(session=self)
|
||||
self.add_config_object(EmaneManager.name, EmaneManager.config_type, self.emane.configure)
|
||||
|
||||
# setup sdt
|
||||
self.sdt = Sdt(session=self)
|
||||
|
||||
# future parameters set by the GUI may go here
|
||||
self.options = SessionConfig(session=self)
|
||||
self.add_config_object(SessionConfig.name, SessionConfig.config_type, self.options.configure)
|
||||
# session options/metadata
|
||||
self.options = SessionConfig()
|
||||
if not config:
|
||||
config = {}
|
||||
for key, value in config.iteritems():
|
||||
self.options.set_config(key, value)
|
||||
self.metadata = SessionMetaData()
|
||||
self.add_config_object(SessionMetaData.name, SessionMetaData.config_type, self.metadata.configure)
|
||||
|
||||
# initialize session feature helpers
|
||||
self.broker = CoreBroker(session=self)
|
||||
self.location = CoreLocation()
|
||||
self.mobility = MobilityManager(session=self)
|
||||
self.services = CoreServices(session=self)
|
||||
self.emane = EmaneManager(session=self)
|
||||
self.sdt = Sdt(session=self)
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown all emulation objects and remove the session directory.
|
||||
"""
|
||||
|
||||
# shutdown emane
|
||||
# shutdown/cleanup feature helpers
|
||||
self.emane.shutdown()
|
||||
|
||||
# shutdown broker
|
||||
self.broker.shutdown()
|
||||
|
||||
# shutdown NRL's SDT3D
|
||||
self.sdt.shutdown()
|
||||
|
||||
# delete all current objects
|
||||
self.delete_objects()
|
||||
|
||||
preserve = False
|
||||
if hasattr(self.options, "preservedir") and self.options.preservedir == "1":
|
||||
preserve = True
|
||||
|
||||
# remove this sessions working directory
|
||||
preserve = self.options.get_config("preservedir") == "1"
|
||||
if not preserve:
|
||||
shutil.rmtree(self.session_dir, ignore_errors=True)
|
||||
|
||||
|
@ -379,12 +341,7 @@ class Session(object):
|
|||
except:
|
||||
message = "exception occured when running %s state hook: %s" % (coreapi.state_name(state), hook)
|
||||
logger.exception(message)
|
||||
self.exception(
|
||||
ExceptionLevels.ERROR,
|
||||
"Session.run_state_hooks",
|
||||
None,
|
||||
message
|
||||
)
|
||||
self.exception(ExceptionLevels.ERROR, "Session.run_state_hooks", None, message)
|
||||
|
||||
def add_state_hook(self, state, hook):
|
||||
"""
|
||||
|
@ -421,10 +378,12 @@ class Session(object):
|
|||
"""
|
||||
if state == EventTypes.RUNTIME_STATE.value:
|
||||
self.emane.poststartup()
|
||||
xml_file_version = self.get_config_item("xmlfilever")
|
||||
if xml_file_version in ('1.0',):
|
||||
xml_file_version = self.options.get_config("xmlfilever")
|
||||
if xml_file_version in ("1.0",):
|
||||
xml_file_name = os.path.join(self.session_dir, "session-deployed.xml")
|
||||
save_session_xml(self, xml_file_name, xml_file_version)
|
||||
xml_writer = corexml.CoreXmlWriter(self)
|
||||
corexmldeployment.CoreXmlDeployment(self, xml_writer.scenario)
|
||||
xml_writer.write(xml_file_name)
|
||||
|
||||
def get_environment(self, state=True):
|
||||
"""
|
||||
|
@ -597,64 +556,6 @@ class Session(object):
|
|||
except IOError:
|
||||
logger.exception("error writing nodes file")
|
||||
|
||||
def add_config_object(self, name, object_type, callback):
|
||||
"""
|
||||
Objects can register configuration objects that are included in
|
||||
the Register Message and may be configured via the Configure
|
||||
Message. The callback is invoked when receiving a Configure Message.
|
||||
|
||||
:param str name: name of configuration object to add
|
||||
:param int object_type: register tlv type
|
||||
:param func callback: callback function for object
|
||||
:return: nothing
|
||||
"""
|
||||
register_tlv = RegisterTlvs(object_type)
|
||||
logger.debug("adding config object callback: %s - %s", name, register_tlv)
|
||||
with self._config_objects_lock:
|
||||
self.config_objects[name] = (object_type, callback)
|
||||
|
||||
def config_object(self, config_data):
|
||||
"""
|
||||
Invoke the callback for an object upon receipt of configuration data for that object.
|
||||
A no-op if the object doesn't exist.
|
||||
|
||||
:param core.data.ConfigData config_data: configuration data to execute against
|
||||
:return: responses to the configuration data
|
||||
:rtype: list
|
||||
"""
|
||||
name = config_data.object
|
||||
logger.info("session(%s) setting config(%s)", self.session_id, name)
|
||||
for key, value in config_data.__dict__.iteritems():
|
||||
logger.debug("%s = %s", key, value)
|
||||
|
||||
replies = []
|
||||
|
||||
if name == "all":
|
||||
with self._config_objects_lock:
|
||||
for name in self.config_objects:
|
||||
config_type, callback = self.config_objects[name]
|
||||
reply = callback(self, config_data)
|
||||
|
||||
if reply:
|
||||
replies.append(reply)
|
||||
|
||||
return replies
|
||||
|
||||
if name in self.config_objects:
|
||||
with self._config_objects_lock:
|
||||
config_type, callback = self.config_objects[name]
|
||||
|
||||
reply = callback(self, config_data)
|
||||
|
||||
if reply:
|
||||
replies.append(reply)
|
||||
|
||||
return replies
|
||||
else:
|
||||
logger.info("session object doesn't own model '%s', ignoring", name)
|
||||
|
||||
return replies
|
||||
|
||||
def dump_session(self):
|
||||
"""
|
||||
Log information about the session in its current state.
|
||||
|
@ -685,46 +586,6 @@ class Session(object):
|
|||
|
||||
self.broadcast_exception(exception_data)
|
||||
|
||||
def get_config_item(self, name):
|
||||
"""
|
||||
Return an entry from the configuration dictionary that comes from
|
||||
command-line arguments and/or the core.conf config file.
|
||||
|
||||
:param str name: name of configuration to retrieve
|
||||
:return: config value
|
||||
"""
|
||||
return self.config.get(name)
|
||||
|
||||
def get_config_item_bool(self, name, default=None):
|
||||
"""
|
||||
Return a boolean entry from the configuration dictionary, may
|
||||
return None if undefined.
|
||||
|
||||
:param str name: configuration item name
|
||||
:param default: default value to return if not found
|
||||
:return: boolean value of the configuration item
|
||||
:rtype: bool
|
||||
"""
|
||||
item = self.get_config_item(name)
|
||||
if item is None:
|
||||
return default
|
||||
return bool(item.lower() == "true")
|
||||
|
||||
def get_config_item_int(self, name, default=None):
|
||||
"""
|
||||
Return an integer entry from the configuration dictionary, may
|
||||
return None if undefined.
|
||||
|
||||
:param str name: configuration item name
|
||||
:param default: default value to return if not found
|
||||
:return: integer value of the configuration item
|
||||
:rtype: int
|
||||
"""
|
||||
item = self.get_config_item(name)
|
||||
if item is None:
|
||||
return default
|
||||
return int(item)
|
||||
|
||||
def instantiate(self):
|
||||
"""
|
||||
We have entered the instantiation state, invoke startup methods
|
||||
|
@ -742,21 +603,13 @@ class Session(object):
|
|||
if self.emane.startup() == self.emane.NOT_READY:
|
||||
return
|
||||
|
||||
# startup broker
|
||||
# start feature helpers
|
||||
self.broker.startup()
|
||||
|
||||
# startup mobility
|
||||
self.mobility.startup()
|
||||
|
||||
# boot the services on each node
|
||||
self.boot_nodes()
|
||||
|
||||
# allow time for processes to start
|
||||
time.sleep(0.125)
|
||||
|
||||
# validate nodes
|
||||
self.validate_nodes()
|
||||
|
||||
# set broker local instantiation to complete
|
||||
self.broker.local_instantiation_complete()
|
||||
|
||||
|
@ -822,7 +675,7 @@ class Session(object):
|
|||
for obj in self.objects.itervalues():
|
||||
# TODO: determine if checking for CoreNode alone is ok
|
||||
if isinstance(obj, nodes.PyCoreNode):
|
||||
self.services.stopnodeservices(obj)
|
||||
self.services.stop_services(obj)
|
||||
|
||||
# shutdown emane
|
||||
self.emane.shutdown()
|
||||
|
@ -866,34 +719,27 @@ class Session(object):
|
|||
request flag.
|
||||
"""
|
||||
with self._objects_lock:
|
||||
pool = ThreadPool()
|
||||
results = []
|
||||
|
||||
start = time.time()
|
||||
for obj in self.objects.itervalues():
|
||||
# TODO: PyCoreNode is not the type to check
|
||||
if isinstance(obj, nodes.PyCoreNode) and not nodeutils.is_node(obj, NodeTypes.RJ45):
|
||||
# add a control interface if configured
|
||||
logger.info("booting node: %s", obj.name)
|
||||
self.add_remove_control_interface(node=obj, remove=False)
|
||||
obj.boot()
|
||||
result = pool.apply_async(self.services.boot_services, (obj,))
|
||||
results.append(result)
|
||||
|
||||
pool.close()
|
||||
pool.join()
|
||||
for result in results:
|
||||
result.get()
|
||||
logger.debug("boot run time: %s", time.time() - start)
|
||||
|
||||
self.update_control_interface_hosts()
|
||||
|
||||
def validate_nodes(self):
|
||||
"""
|
||||
Validate all nodes that are known by the session.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
with self._objects_lock:
|
||||
for obj in self.objects.itervalues():
|
||||
# TODO: issues with checking PyCoreNode alone, validate is not a method
|
||||
# such as vnoded process, bridges, etc.
|
||||
if not isinstance(obj, nodes.PyCoreNode):
|
||||
continue
|
||||
|
||||
if nodeutils.is_node(obj, NodeTypes.RJ45):
|
||||
continue
|
||||
|
||||
obj.validate()
|
||||
|
||||
def get_control_net_prefixes(self):
|
||||
"""
|
||||
Retrieve control net prefixes.
|
||||
|
@ -901,11 +747,11 @@ class Session(object):
|
|||
:return: control net prefix list
|
||||
:rtype: list
|
||||
"""
|
||||
p = getattr(self.options, "controlnet", self.config.get("controlnet"))
|
||||
p0 = getattr(self.options, "controlnet0", self.config.get("controlnet0"))
|
||||
p1 = getattr(self.options, "controlnet1", self.config.get("controlnet1"))
|
||||
p2 = getattr(self.options, "controlnet2", self.config.get("controlnet2"))
|
||||
p3 = getattr(self.options, "controlnet3", self.config.get("controlnet3"))
|
||||
p = self.options.get_config("controlnet")
|
||||
p0 = self.options.get_config("controlnet0")
|
||||
p1 = self.options.get_config("controlnet1")
|
||||
p2 = self.options.get_config("controlnet2")
|
||||
p3 = self.options.get_config("controlnet3")
|
||||
|
||||
if not p0 and p:
|
||||
p0 = p
|
||||
|
@ -919,12 +765,12 @@ class Session(object):
|
|||
:return: list of control net server interfaces
|
||||
:rtype: list
|
||||
"""
|
||||
d0 = self.config.get("controlnetif0")
|
||||
d0 = self.options.get_config("controlnetif0")
|
||||
if d0:
|
||||
logger.error("controlnet0 cannot be assigned with a host interface")
|
||||
d1 = self.config.get("controlnetif1")
|
||||
d2 = self.config.get("controlnetif2")
|
||||
d3 = self.config.get("controlnetif3")
|
||||
d1 = self.options.get_config("controlnetif1")
|
||||
d2 = self.options.get_config("controlnetif2")
|
||||
d3 = self.options.get_config("controlnetif3")
|
||||
return [None, d1, d2, d3]
|
||||
|
||||
def get_control_net_index(self, dev):
|
||||
|
@ -995,15 +841,10 @@ class Session(object):
|
|||
updown_script = None
|
||||
|
||||
if net_index == 0:
|
||||
updown_script = self.config.get("controlnet_updown_script")
|
||||
updown_script = self.options.get_config("controlnet_updown_script")
|
||||
if not updown_script:
|
||||
logger.warning("controlnet updown script not configured")
|
||||
|
||||
# check if session option set, overwrite if so
|
||||
options_updown_script = getattr(self.options, "controlnet_updown_script", None)
|
||||
if options_updown_script:
|
||||
updown_script = options_updown_script
|
||||
|
||||
prefixes = prefix_spec.split()
|
||||
if len(prefixes) > 1:
|
||||
# a list of per-host prefixes is provided
|
||||
|
@ -1112,7 +953,7 @@ class Session(object):
|
|||
:param bool remove: flag to check if it should be removed
|
||||
:return: nothing
|
||||
"""
|
||||
if not self.get_config_item_bool("update_etc_hosts", False):
|
||||
if not self.options.get_config_bool("update_etc_hosts", default=False):
|
||||
return
|
||||
|
||||
try:
|
||||
|
@ -1194,165 +1035,51 @@ class Session(object):
|
|||
node = self.get_object(node_id)
|
||||
node.cmd(data, wait=False)
|
||||
|
||||
def send_objects(self):
|
||||
"""
|
||||
Return API messages that describe the current session.
|
||||
"""
|
||||
# find all nodes and links
|
||||
nodes_data = []
|
||||
links_data = []
|
||||
with self._objects_lock:
|
||||
for obj in self.objects.itervalues():
|
||||
node_data = obj.data(message_type=MessageFlags.ADD.value)
|
||||
if node_data:
|
||||
nodes_data.append(node_data)
|
||||
|
||||
node_links = obj.all_link_data(flags=MessageFlags.ADD.value)
|
||||
for link_data in node_links:
|
||||
links_data.append(link_data)
|
||||
|
||||
# send all nodes first, so that they will exist for any links
|
||||
logger.info("sending nodes:")
|
||||
for node_data in nodes_data:
|
||||
logger.info(pprint.pformat(dict(node_data._asdict())))
|
||||
self.broadcast_node(node_data)
|
||||
|
||||
logger.info("sending links:")
|
||||
for link_data in links_data:
|
||||
logger.info(pprint.pformat(dict(link_data._asdict())))
|
||||
self.broadcast_link(link_data)
|
||||
|
||||
# send model info
|
||||
configs = self.mobility.getallconfigs()
|
||||
configs += self.emane.getallconfigs()
|
||||
logger.info("sending model configs:")
|
||||
for node_number, cls, values in configs:
|
||||
logger.info("config: node(%s) class(%s) values(%s)", node_number, cls, values)
|
||||
config_data = cls.config_data(
|
||||
flags=0,
|
||||
node_id=node_number,
|
||||
type_flags=ConfigFlags.UPDATE.value,
|
||||
values=values
|
||||
)
|
||||
logger.info(pprint.pformat(dict(config_data._asdict())))
|
||||
self.broadcast_config(config_data)
|
||||
|
||||
# service customizations
|
||||
service_configs = self.services.getallconfigs()
|
||||
for node_number, service in service_configs:
|
||||
opaque = "service:%s" % service._name
|
||||
config_data = ConfigData(
|
||||
node=node_number,
|
||||
opaque=opaque
|
||||
)
|
||||
config_response = self.services.configure_request(config_data)
|
||||
self.broadcast_config(config_response)
|
||||
|
||||
for file_name, config_data in self.services.getallfiles(service):
|
||||
file_data = FileData(
|
||||
message_type=MessageFlags.ADD.value,
|
||||
node=node_number,
|
||||
name=str(file_name),
|
||||
type=opaque,
|
||||
data=str(config_data)
|
||||
)
|
||||
self.broadcast_file(file_data)
|
||||
|
||||
# TODO: send location info
|
||||
|
||||
# send hook scripts
|
||||
for state in sorted(self._hooks.keys()):
|
||||
for file_name, config_data in self._hooks[state]:
|
||||
file_data = FileData(
|
||||
message_type=MessageFlags.ADD.value,
|
||||
name=str(file_name),
|
||||
type="hook:%s" % state,
|
||||
data=str(config_data)
|
||||
)
|
||||
self.broadcast_file(file_data)
|
||||
|
||||
config_data = ConfigData()
|
||||
|
||||
# retrieve session configuration data
|
||||
options_config = self.options.configure_request(config_data, type_flags=ConfigFlags.UPDATE.value)
|
||||
self.broadcast_config(options_config)
|
||||
|
||||
# retrieve session metadata
|
||||
metadata_config = self.metadata.configure_request(config_data, type_flags=ConfigFlags.UPDATE.value)
|
||||
self.broadcast_config(metadata_config)
|
||||
|
||||
logger.info("informed GUI about %d nodes and %d links", len(nodes_data), len(links_data))
|
||||
|
||||
|
||||
class SessionConfig(ConfigurableManager, Configurable):
|
||||
class SessionConfig(ConfigurableManager, ConfigurableOptions):
|
||||
"""
|
||||
Session configuration object.
|
||||
"""
|
||||
name = "session"
|
||||
config_type = RegisterTlvs.UTILITY.value
|
||||
config_matrix = [
|
||||
("controlnet", ConfigDataTypes.STRING.value, "", "", "Control network"),
|
||||
("controlnet_updown_script", ConfigDataTypes.STRING.value, "", "", "Control network script"),
|
||||
("enablerj45", ConfigDataTypes.BOOL.value, "1", "On,Off", "Enable RJ45s"),
|
||||
("preservedir", ConfigDataTypes.BOOL.value, "0", "On,Off", "Preserve session dir"),
|
||||
("enablesdt", ConfigDataTypes.BOOL.value, "0", "On,Off", "Enable SDT3D output"),
|
||||
("sdturl", ConfigDataTypes.STRING.value, Sdt.DEFAULT_SDT_URL, "", "SDT3D URL"),
|
||||
options = [
|
||||
Configuration(_id="controlnet", _type=ConfigDataTypes.STRING, label="Control Network"),
|
||||
Configuration(_id="controlnet0", _type=ConfigDataTypes.STRING, label="Control Network 0"),
|
||||
Configuration(_id="controlnet1", _type=ConfigDataTypes.STRING, label="Control Network 1"),
|
||||
Configuration(_id="controlnet2", _type=ConfigDataTypes.STRING, label="Control Network 2"),
|
||||
Configuration(_id="controlnet3", _type=ConfigDataTypes.STRING, label="Control Network 3"),
|
||||
Configuration(_id="controlnet_updown_script", _type=ConfigDataTypes.STRING, label="Control Network Script"),
|
||||
Configuration(_id="enablerj45", _type=ConfigDataTypes.BOOL, default="1", options=["On", "Off"],
|
||||
label="Enable RJ45s"),
|
||||
Configuration(_id="preservedir", _type=ConfigDataTypes.BOOL, default="0", options=["On", "Off"],
|
||||
label="Preserve session dir"),
|
||||
Configuration(_id="enablesdt", _type=ConfigDataTypes.BOOL, default="0", options=["On", "Off"],
|
||||
label="Enable SDT3D output"),
|
||||
Configuration(_id="sdturl", _type=ConfigDataTypes.STRING, default=Sdt.DEFAULT_SDT_URL, label="SDT3D URL")
|
||||
]
|
||||
config_groups = "Options:1-%d" % len(config_matrix)
|
||||
config_type = RegisterTlvs.UTILITY.value
|
||||
|
||||
def __init__(self, session):
|
||||
"""
|
||||
Creates a SessionConfig instance.
|
||||
def __init__(self):
|
||||
super(SessionConfig, self).__init__()
|
||||
self.set_configs(self.default_values())
|
||||
|
||||
:param core.session.Session session: session this manager is tied to
|
||||
:return: nothing
|
||||
"""
|
||||
ConfigurableManager.__init__(self)
|
||||
self.session = session
|
||||
self.reset()
|
||||
def get_config(self, _id, node_id=ConfigurableManager._default_node,
|
||||
config_type=ConfigurableManager._default_type, default=None):
|
||||
value = super(SessionConfig, self).get_config(_id, node_id, config_type, default)
|
||||
if value == "":
|
||||
value = default
|
||||
return value
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset the session configuration.
|
||||
def get_config_bool(self, name, default=None):
|
||||
value = self.get_config(name)
|
||||
if value is None:
|
||||
return default
|
||||
return value.lower() == "true"
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
defaults = self.getdefaultvalues()
|
||||
for key in self.getnames():
|
||||
# value may come from config file
|
||||
value = self.session.get_config_item(key)
|
||||
if value is None:
|
||||
value = self.valueof(key, defaults)
|
||||
value = self.offontobool(value)
|
||||
setattr(self, key, value)
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Handle configuration values.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: None
|
||||
"""
|
||||
return self.configure_values_keyvalues(config_data, self, self.getnames())
|
||||
|
||||
def configure_request(self, config_data, type_flags=ConfigFlags.NONE.value):
|
||||
"""
|
||||
Handle a configuration request.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:param type_flags:
|
||||
:return:
|
||||
"""
|
||||
node_id = config_data.node
|
||||
values = []
|
||||
|
||||
for key in self.getnames():
|
||||
value = getattr(self, key)
|
||||
if value is None:
|
||||
value = ""
|
||||
values.append("%s" % value)
|
||||
|
||||
return self.config_data(0, node_id, type_flags, values)
|
||||
def get_config_int(self, name, default=None):
|
||||
value = self.get_config(name, default=default)
|
||||
if value is not None:
|
||||
value = int(value)
|
||||
return value
|
||||
|
||||
|
||||
class SessionMetaData(ConfigurableManager):
|
||||
|
@ -1363,92 +1090,3 @@ class SessionMetaData(ConfigurableManager):
|
|||
"""
|
||||
name = "metadata"
|
||||
config_type = RegisterTlvs.UTILITY.value
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Handle configuration values.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: None
|
||||
"""
|
||||
values = config_data.data_values
|
||||
if values is None:
|
||||
return None
|
||||
|
||||
key_values = values.split('|')
|
||||
for key_value in key_values:
|
||||
try:
|
||||
key, value = key_value.split('=', 1)
|
||||
except ValueError:
|
||||
raise ValueError("invalid key in metdata: %s", key_value)
|
||||
|
||||
self.add_item(key, value)
|
||||
|
||||
return None
|
||||
|
||||
def configure_request(self, config_data, type_flags=ConfigFlags.NONE.value):
|
||||
"""
|
||||
Handle a configuration request.
|
||||
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
:param int type_flags: configuration request flag value
|
||||
:return: configuration data
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
node_number = config_data.node
|
||||
values_str = "|".join(map(lambda item: "%s=%s" % item, self.items()))
|
||||
return self.config_data(0, node_number, type_flags, values_str)
|
||||
|
||||
def config_data(self, flags, node_id, type_flags, values_str):
|
||||
"""
|
||||
Retrieve configuration data object, leveraging provided data.
|
||||
|
||||
:param flags: configuration data flags
|
||||
:param int node_id: node id
|
||||
:param type_flags: type flags
|
||||
:param values_str: values string
|
||||
:return: configuration data
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
data_types = tuple(map(lambda (k, v): ConfigDataTypes.STRING.value, self.items()))
|
||||
|
||||
return ConfigData(
|
||||
message_type=flags,
|
||||
node=node_id,
|
||||
object=self.name,
|
||||
type=type_flags,
|
||||
data_types=data_types,
|
||||
data_values=values_str
|
||||
)
|
||||
|
||||
def add_item(self, key, value):
|
||||
"""
|
||||
Add configuration key/value pair.
|
||||
|
||||
:param key: configuration key
|
||||
:param value: configuration value
|
||||
:return: nothing
|
||||
"""
|
||||
self.configs[key] = value
|
||||
|
||||
def get_item(self, key):
|
||||
"""
|
||||
Retrieve configuration value.
|
||||
|
||||
:param key: key for configuration value to retrieve
|
||||
:return: configuration value
|
||||
"""
|
||||
try:
|
||||
return self.configs[key]
|
||||
except KeyError:
|
||||
logger.exception("error retrieving item from configs: %s", key)
|
||||
|
||||
return None
|
||||
|
||||
def items(self):
|
||||
"""
|
||||
Retrieve configuration items.
|
||||
|
||||
:return: configuration items iterator
|
||||
"""
|
||||
return self.configs.iteritems()
|
||||
|
|
938
daemon/core/xml/corexml.py
Normal file
938
daemon/core/xml/corexml.py
Normal file
|
@ -0,0 +1,938 @@
|
|||
from lxml import etree
|
||||
|
||||
from core import coreobj
|
||||
from core import logger
|
||||
from core.emulator.emudata import InterfaceData
|
||||
from core.emulator.emudata import LinkOptions
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import ipaddress
|
||||
from core.misc import nodeutils
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.netns import nodes
|
||||
|
||||
|
||||
def write_xml_file(xml_element, file_path, doctype=None):
|
||||
xml_data = etree.tostring(xml_element, xml_declaration=True, pretty_print=True, encoding="UTF-8", doctype=doctype)
|
||||
with open(file_path, "w") as xml_file:
|
||||
xml_file.write(xml_data)
|
||||
|
||||
|
||||
def get_type(element, name, _type):
|
||||
value = element.get(name)
|
||||
if value is not None:
|
||||
value = _type(value)
|
||||
return value
|
||||
|
||||
|
||||
def get_float(element, name):
|
||||
return get_type(element, name, float)
|
||||
|
||||
|
||||
def get_int(element, name):
|
||||
return get_type(element, name, int)
|
||||
|
||||
|
||||
def add_attribute(element, name, value):
|
||||
if value is not None:
|
||||
element.set(name, str(value))
|
||||
|
||||
|
||||
def create_interface_data(interface_element):
|
||||
interface_id = int(interface_element.get("id"))
|
||||
name = interface_element.get("name")
|
||||
mac = interface_element.get("mac")
|
||||
if mac:
|
||||
mac = MacAddress.from_string(mac)
|
||||
ip4 = interface_element.get("ip4")
|
||||
ip4_mask = get_int(interface_element, "ip4_mask")
|
||||
ip6 = interface_element.get("ip6")
|
||||
ip6_mask = get_int(interface_element, "ip6_mask")
|
||||
return InterfaceData(interface_id, name, mac, ip4, ip4_mask, ip6, ip6_mask)
|
||||
|
||||
|
||||
def create_emane_config(node_id, emane_config, config):
|
||||
emane_configuration = etree.Element("emane_configuration")
|
||||
add_attribute(emane_configuration, "node", node_id)
|
||||
add_attribute(emane_configuration, "model", "emane")
|
||||
|
||||
emulator_element = etree.SubElement(emane_configuration, "emulator")
|
||||
for emulator_config in emane_config.emulator_config:
|
||||
value = config[emulator_config.id]
|
||||
add_configuration(emulator_element, emulator_config.id, value)
|
||||
|
||||
nem_element = etree.SubElement(emane_configuration, "nem")
|
||||
for nem_config in emane_config.nem_config:
|
||||
value = config[nem_config.id]
|
||||
add_configuration(nem_element, nem_config.id, value)
|
||||
|
||||
return emane_configuration
|
||||
|
||||
|
||||
def create_emane_model_config(node_id, model, config):
|
||||
emane_element = etree.Element("emane_configuration")
|
||||
add_attribute(emane_element, "node", node_id)
|
||||
add_attribute(emane_element, "model", model.name)
|
||||
|
||||
mac_element = etree.SubElement(emane_element, "mac")
|
||||
for mac_config in model.mac_config:
|
||||
value = config[mac_config.id]
|
||||
add_configuration(mac_element, mac_config.id, value)
|
||||
|
||||
phy_element = etree.SubElement(emane_element, "phy")
|
||||
for phy_config in model.phy_config:
|
||||
value = config[phy_config.id]
|
||||
add_configuration(phy_element, phy_config.id, value)
|
||||
|
||||
external_element = etree.SubElement(emane_element, "external")
|
||||
for external_config in model.external_config:
|
||||
value = config[external_config.id]
|
||||
add_configuration(external_element, external_config.id, value)
|
||||
|
||||
return emane_element
|
||||
|
||||
|
||||
def add_configuration(parent, name, value):
|
||||
config_element = etree.SubElement(parent, "configuration")
|
||||
add_attribute(config_element, "name", name)
|
||||
add_attribute(config_element, "value", value)
|
||||
|
||||
|
||||
def get_endpoints(node):
|
||||
endpoints = []
|
||||
for interface in node.netifs(sort=True):
|
||||
endpoint = get_endpoint(node, interface)
|
||||
endpoints.append(endpoint)
|
||||
return endpoints
|
||||
|
||||
|
||||
def get_endpoint(node, interface):
|
||||
l2devport = None
|
||||
othernet = getattr(interface, "othernet", None)
|
||||
|
||||
# reference interface of node that is part of this network
|
||||
if interface.net.objid == node.objid and interface.node:
|
||||
params = interface.getparams()
|
||||
if nodeutils.is_node(interface.net, (NodeTypes.HUB, NodeTypes.SWITCH)):
|
||||
l2devport = "%s/e%s" % (interface.net.name, interface.netindex)
|
||||
endpoint_id = "%s/%s" % (interface.node.name, interface.name)
|
||||
endpoint = Endpoint(
|
||||
node,
|
||||
interface,
|
||||
"interface",
|
||||
endpoint_id,
|
||||
l2devport,
|
||||
params
|
||||
)
|
||||
# references another node connected to this network
|
||||
elif othernet and othernet.objid == node.objid:
|
||||
interface.swapparams("_params_up")
|
||||
params = interface.getparams()
|
||||
interface.swapparams("_params_up")
|
||||
l2devport = "%s/e%s" % (othernet.name, interface.netindex)
|
||||
endpoint_id = "%s/%s/%s" % (node.name, interface.node.name, interface.netindex)
|
||||
endpoint = Endpoint(
|
||||
interface.net,
|
||||
interface,
|
||||
"interface",
|
||||
endpoint_id,
|
||||
l2devport,
|
||||
params
|
||||
)
|
||||
else:
|
||||
endpoint = Endpoint(
|
||||
node,
|
||||
interface,
|
||||
)
|
||||
|
||||
return endpoint
|
||||
|
||||
|
||||
def get_downstream_l2_devices(node):
|
||||
all_endpoints = []
|
||||
l2_devices = [node]
|
||||
current_endpoint = get_endpoints(node)
|
||||
all_endpoints.extend(current_endpoint)
|
||||
for endpoint in current_endpoint:
|
||||
if endpoint.type and endpoint.network.objid != node.objid:
|
||||
new_l2_devices, new_endpoints = get_downstream_l2_devices(endpoint.network)
|
||||
l2_devices.extend(new_l2_devices)
|
||||
all_endpoints.extend(new_endpoints)
|
||||
return l2_devices, all_endpoints
|
||||
|
||||
|
||||
class Endpoint(object):
|
||||
def __init__(self, network, interface, _type=None, _id=None, l2devport=None, params=None):
|
||||
self.network = network
|
||||
self.interface = interface
|
||||
self.type = _type
|
||||
self.id = _id
|
||||
self.l2devport = l2devport
|
||||
self.params = params
|
||||
|
||||
|
||||
class NodeElement(object):
|
||||
def __init__(self, session, node, element_name):
|
||||
self.session = session
|
||||
self.node = node
|
||||
self.element = etree.Element(element_name)
|
||||
add_attribute(self.element, "id", node.objid)
|
||||
add_attribute(self.element, "name", node.name)
|
||||
add_attribute(self.element, "icon", node.icon)
|
||||
add_attribute(self.element, "canvas", node.canvas)
|
||||
self.add_position()
|
||||
|
||||
def add_position(self):
|
||||
x = self.node.position.x
|
||||
y = self.node.position.y
|
||||
z = self.node.position.z
|
||||
lat, lon, alt = None, None, None
|
||||
if x is not None and y is not None:
|
||||
lat, lon, alt = self.session.location.getgeo(x, y, z)
|
||||
position = etree.SubElement(self.element, "position")
|
||||
add_attribute(position, "x", x)
|
||||
add_attribute(position, "y", y)
|
||||
add_attribute(position, "z", z)
|
||||
add_attribute(position, "lat", lat)
|
||||
add_attribute(position, "lon", lon)
|
||||
add_attribute(position, "alt", alt)
|
||||
|
||||
|
||||
class InterfaceElement(object):
|
||||
def __init__(self, session, node, interface):
|
||||
self.session = session
|
||||
self.node = node
|
||||
self.interface = interface
|
||||
self.element = etree.Element("interface")
|
||||
add_attribute(self.element, "id", interface.netindex)
|
||||
add_attribute(self.element, "name", interface.name)
|
||||
mac = etree.SubElement(self.element, "mac")
|
||||
mac.text = str(interface.hwaddr)
|
||||
self.add_mtu()
|
||||
self.addresses = etree.SubElement(self.element, "addresses")
|
||||
self.add_addresses()
|
||||
self.add_model()
|
||||
|
||||
def add_mtu(self):
|
||||
# check to add mtu
|
||||
if self.interface.mtu and self.interface.mtu != 1500:
|
||||
add_attribute(self.element, "mtu", self.interface.mtu)
|
||||
|
||||
def add_model(self):
|
||||
# check for emane specific interface configuration
|
||||
net_model = None
|
||||
if self.interface.net and hasattr(self.interface.net, "model"):
|
||||
net_model = self.interface.net.model
|
||||
|
||||
if net_model and net_model.name.startswith("emane_"):
|
||||
config = self.session.emane.getifcconfig(self.node.objid, self.interface, net_model.name)
|
||||
if config:
|
||||
emane_element = create_emane_model_config(net_model, config)
|
||||
self.element.append(emane_element)
|
||||
|
||||
def add_addresses(self):
|
||||
for address in self.interface.addrlist:
|
||||
ip, mask = address.split("/")
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
address_type = "IPv4"
|
||||
else:
|
||||
address_type = "IPv6"
|
||||
address_element = etree.SubElement(self.addresses, "address")
|
||||
add_attribute(address_element, "type", address_type)
|
||||
address_element.text = str(address)
|
||||
|
||||
|
||||
class ServiceElement(object):
|
||||
def __init__(self, service):
|
||||
self.service = service
|
||||
self.element = etree.Element("service")
|
||||
add_attribute(self.element, "name", service.name)
|
||||
self.add_directories()
|
||||
self.add_startup()
|
||||
self.add_validate()
|
||||
self.add_shutdown()
|
||||
self.add_files()
|
||||
|
||||
def add_directories(self):
|
||||
# get custom directories
|
||||
directories = etree.Element("directories")
|
||||
for directory in self.service.dirs:
|
||||
directory_element = etree.SubElement(directories, "directory")
|
||||
directory_element.text = directory
|
||||
|
||||
if directories.getchildren():
|
||||
self.element.append(directories)
|
||||
|
||||
def add_files(self):
|
||||
# get custom files
|
||||
file_elements = etree.Element("files")
|
||||
for file_name, data in self.service.config_data.iteritems():
|
||||
file_element = etree.SubElement(file_elements, "file")
|
||||
add_attribute(file_element, "name", file_name)
|
||||
file_element.text = data
|
||||
|
||||
if file_elements.getchildren():
|
||||
self.element.append(file_elements)
|
||||
|
||||
def add_startup(self):
|
||||
# get custom startup
|
||||
startup_elements = etree.Element("startups")
|
||||
for startup in self.service.startup:
|
||||
startup_element = etree.SubElement(startup_elements, "startup")
|
||||
startup_element.text = startup
|
||||
|
||||
if startup_elements.getchildren():
|
||||
self.element.append(startup_elements)
|
||||
|
||||
def add_validate(self):
|
||||
# get custom validate
|
||||
validate_elements = etree.Element("validates")
|
||||
for validate in self.service.validate:
|
||||
validate_element = etree.SubElement(validate_elements, "validate")
|
||||
validate_element.text = validate
|
||||
|
||||
if validate_elements.getchildren():
|
||||
self.element.append(validate_elements)
|
||||
|
||||
def add_shutdown(self):
|
||||
# get custom shutdown
|
||||
shutdown_elements = etree.Element("shutdowns")
|
||||
for shutdown in self.service.shutdown:
|
||||
shutdown_element = etree.SubElement(shutdown_elements, "shutdown")
|
||||
shutdown_element.text = shutdown
|
||||
|
||||
if shutdown_elements.getchildren():
|
||||
self.element.append(shutdown_elements)
|
||||
|
||||
|
||||
class DeviceElement(NodeElement):
|
||||
def __init__(self, session, node):
|
||||
super(DeviceElement, self).__init__(session, node, "device")
|
||||
add_attribute(self.element, "type", node.type)
|
||||
# self.add_interfaces()
|
||||
self.add_services()
|
||||
|
||||
def add_services(self):
|
||||
service_elements = etree.Element("services")
|
||||
for service in self.node.services:
|
||||
etree.SubElement(service_elements, "service", name=service.name)
|
||||
|
||||
if service_elements.getchildren():
|
||||
self.element.append(service_elements)
|
||||
|
||||
def add_interfaces(self):
|
||||
interfaces = etree.Element("interfaces")
|
||||
for interface in self.node.netifs(sort=True):
|
||||
interface_element = InterfaceElement(self.session, self.node, interface)
|
||||
interfaces.append(interface_element.element)
|
||||
|
||||
if interfaces.getchildren():
|
||||
self.element.append(interfaces)
|
||||
|
||||
|
||||
class NetworkElement(NodeElement):
|
||||
def __init__(self, session, node):
|
||||
super(NetworkElement, self).__init__(session, node, "network")
|
||||
model = getattr(self.node, "model", None)
|
||||
if model:
|
||||
add_attribute(self.element, "model", model.name)
|
||||
mobility = getattr(self.node, "mobility", None)
|
||||
if mobility:
|
||||
add_attribute(self.element, "mobility", mobility.name)
|
||||
grekey = getattr(self.node, "grekey", None)
|
||||
if grekey and grekey is not None:
|
||||
add_attribute(self.element, "grekey", grekey)
|
||||
self.add_type()
|
||||
# self.endpoints = get_endpoints(self.node)
|
||||
# self.l2_devices = self.get_l2_devices()
|
||||
# self.add_configs()
|
||||
|
||||
def add_type(self):
|
||||
if self.node.apitype:
|
||||
node_type = NodeTypes(self.node.apitype).name
|
||||
else:
|
||||
node_type = self.node.__class__.__name__
|
||||
add_attribute(self.element, "type", node_type)
|
||||
|
||||
def get_l2_devices(self):
|
||||
l2_devices = []
|
||||
found_l2_devices = []
|
||||
found_endpoints = []
|
||||
if nodeutils.is_node(self.node, (NodeTypes.SWITCH, NodeTypes.HUB)):
|
||||
for endpoint in self.endpoints:
|
||||
if endpoint.type and endpoint.network.objid != self.node.objid:
|
||||
downstream_l2_devices, downstream_endpoints = get_downstream_l2_devices(endpoint.network)
|
||||
found_l2_devices.extend(downstream_l2_devices)
|
||||
found_endpoints.extend(downstream_endpoints)
|
||||
|
||||
for l2_device in found_l2_devices:
|
||||
pass
|
||||
|
||||
self.endpoints.extend(found_endpoints)
|
||||
return l2_devices
|
||||
|
||||
def add_peer_to_peer_config(self):
|
||||
pass
|
||||
|
||||
def add_switch_hub_tunnel_config(self):
|
||||
pass
|
||||
|
||||
def add_configs(self):
|
||||
if nodeutils.is_node(self.node, NodeTypes.PEER_TO_PEER):
|
||||
self.add_peer_to_peer_config()
|
||||
elif nodeutils.is_node(self.node, (NodeTypes.SWITCH, NodeTypes.HUB, NodeTypes.TUNNEL)):
|
||||
self.add_switch_hub_tunnel_config()
|
||||
|
||||
|
||||
class CoreXmlWriter(object):
|
||||
def __init__(self, session):
|
||||
self.session = session
|
||||
self.scenario = etree.Element("scenario")
|
||||
self.networks = None
|
||||
self.devices = None
|
||||
self.write_session()
|
||||
|
||||
def write_session(self):
|
||||
# generate xml content
|
||||
links = self.write_nodes()
|
||||
self.write_links(links)
|
||||
self.write_mobility_configs()
|
||||
self.write_emane_configs()
|
||||
self.write_service_configs()
|
||||
self.write_session_origin()
|
||||
self.write_session_hooks()
|
||||
self.write_session_options()
|
||||
self.write_session_metadata()
|
||||
self.write_default_services()
|
||||
|
||||
def write(self, file_name):
|
||||
self.scenario.set("name", file_name)
|
||||
|
||||
# write out generated xml
|
||||
xml_tree = etree.ElementTree(self.scenario)
|
||||
xml_tree.write(file_name, xml_declaration=True, pretty_print=True, encoding="UTF-8")
|
||||
|
||||
def write_session_origin(self):
|
||||
# origin: geolocation of cartesian coordinate 0,0,0
|
||||
lat, lon, alt = self.session.location.refgeo
|
||||
origin = etree.Element("session_origin")
|
||||
add_attribute(origin, "lat", lat)
|
||||
add_attribute(origin, "lon", lon)
|
||||
add_attribute(origin, "alt", alt)
|
||||
has_origin = len(origin.items()) > 0
|
||||
|
||||
if has_origin:
|
||||
self.scenario.append(origin)
|
||||
refscale = self.session.location.refscale
|
||||
if refscale != 1.0:
|
||||
add_attribute(origin, "scale", refscale)
|
||||
if self.session.location.refxyz != (0.0, 0.0, 0.0):
|
||||
x, y, z = self.session.location.refxyz
|
||||
add_attribute(origin, "x", x)
|
||||
add_attribute(origin, "y", y)
|
||||
add_attribute(origin, "z", z)
|
||||
|
||||
def write_session_hooks(self):
|
||||
# hook scripts
|
||||
hooks = etree.Element("session_hooks")
|
||||
for state in sorted(self.session._hooks.keys()):
|
||||
for file_name, data in self.session._hooks[state]:
|
||||
hook = etree.SubElement(hooks, "hook")
|
||||
add_attribute(hook, "name", file_name)
|
||||
add_attribute(hook, "state", state)
|
||||
hook.text = data
|
||||
|
||||
if hooks.getchildren():
|
||||
self.scenario.append(hooks)
|
||||
|
||||
def write_session_options(self):
|
||||
option_elements = etree.Element("session_options")
|
||||
options_config = self.session.options.get_configs()
|
||||
if not options_config:
|
||||
return
|
||||
|
||||
for _id, default_value in self.session.options.default_values().iteritems():
|
||||
# TODO: should we just save the current config regardless, since it may change?
|
||||
value = options_config[_id]
|
||||
if value != default_value:
|
||||
add_configuration(option_elements, _id, value)
|
||||
|
||||
if option_elements.getchildren():
|
||||
self.scenario.append(option_elements)
|
||||
|
||||
def write_session_metadata(self):
|
||||
# metadata
|
||||
metadata_elements = etree.Element("session_metadata")
|
||||
config = self.session.metadata.get_configs()
|
||||
if not config:
|
||||
return
|
||||
|
||||
for _id, value in config.iteritems():
|
||||
add_configuration(metadata_elements, _id, value)
|
||||
|
||||
if metadata_elements.getchildren():
|
||||
self.scenario.append(metadata_elements)
|
||||
|
||||
def write_emane_configs(self):
|
||||
emane_configurations = etree.Element("emane_configurations")
|
||||
for node_id in self.session.emane.nodes():
|
||||
all_configs = self.session.emane.get_all_configs(node_id)
|
||||
if not all_configs:
|
||||
continue
|
||||
|
||||
for model_name, config in all_configs.iteritems():
|
||||
logger.info("writing emane config node(%s) model(%s)", node_id, model_name)
|
||||
if model_name == -1:
|
||||
emane_configuration = create_emane_config(node_id, self.session.emane.emane_config, config)
|
||||
else:
|
||||
model = self.session.emane.models[model_name]
|
||||
emane_configuration = create_emane_model_config(node_id, model, config)
|
||||
emane_configurations.append(emane_configuration)
|
||||
|
||||
if emane_configurations.getchildren():
|
||||
self.scenario.append(emane_configurations)
|
||||
|
||||
def write_mobility_configs(self):
|
||||
mobility_configurations = etree.Element("mobility_configurations")
|
||||
for node_id in self.session.mobility.nodes():
|
||||
all_configs = self.session.mobility.get_all_configs(node_id)
|
||||
if not all_configs:
|
||||
continue
|
||||
|
||||
for model_name, config in all_configs.iteritems():
|
||||
logger.info("writing mobility config node(%s) model(%s)", node_id, model_name)
|
||||
mobility_configuration = etree.SubElement(mobility_configurations, "mobility_configuration")
|
||||
add_attribute(mobility_configuration, "node", node_id)
|
||||
add_attribute(mobility_configuration, "model", model_name)
|
||||
for name, value in config.iteritems():
|
||||
add_configuration(mobility_configuration, name, value)
|
||||
|
||||
if mobility_configurations.getchildren():
|
||||
self.scenario.append(mobility_configurations)
|
||||
|
||||
def write_service_configs(self):
|
||||
service_configurations = etree.Element("service_configurations")
|
||||
service_configs = self.session.services.all_configs()
|
||||
for node_id, service in service_configs:
|
||||
service_element = ServiceElement(service)
|
||||
add_attribute(service_element.element, "node", node_id)
|
||||
service_configurations.append(service_element.element)
|
||||
|
||||
if service_configurations.getchildren():
|
||||
self.scenario.append(service_configurations)
|
||||
|
||||
def write_default_services(self):
|
||||
node_types = etree.Element("default_services")
|
||||
for node_type, services in self.session.services.default_services.iteritems():
|
||||
node_type = etree.SubElement(node_types, "node", type=node_type)
|
||||
for service in services:
|
||||
etree.SubElement(node_type, "service", name=service)
|
||||
|
||||
if node_types.getchildren():
|
||||
self.scenario.append(node_types)
|
||||
|
||||
def write_nodes(self):
|
||||
self.networks = etree.SubElement(self.scenario, "networks")
|
||||
self.devices = etree.SubElement(self.scenario, "devices")
|
||||
|
||||
links = []
|
||||
for node in self.session.objects.itervalues():
|
||||
logger.info("writer adding node(%s)", node.name)
|
||||
|
||||
# network node
|
||||
if isinstance(node, coreobj.PyCoreNet) and not nodeutils.is_node(node, NodeTypes.CONTROL_NET):
|
||||
self.write_network(node)
|
||||
# device node
|
||||
elif isinstance(node, nodes.PyCoreNode):
|
||||
self.write_device(node)
|
||||
|
||||
# add known links
|
||||
links.extend(node.all_link_data(0))
|
||||
|
||||
return links
|
||||
|
||||
def write_network(self, node):
|
||||
# ignore p2p and other nodes that are not part of the api
|
||||
if not node.apitype:
|
||||
return
|
||||
|
||||
# ignore nodes tied to a different network
|
||||
if nodeutils.is_node(node, (NodeTypes.SWITCH, NodeTypes.HUB)):
|
||||
for netif in node.netifs(sort=True):
|
||||
othernet = getattr(netif, "othernet", None)
|
||||
if othernet and othernet.objid != node.objid:
|
||||
logger.info("writer ignoring node(%s) othernet(%s)", node.name, othernet.name)
|
||||
return
|
||||
|
||||
network = NetworkElement(self.session, node)
|
||||
self.networks.append(network.element)
|
||||
|
||||
def write_links(self, links):
|
||||
link_elements = etree.Element("links")
|
||||
# add link data
|
||||
for link_data in links:
|
||||
# skip basic range links
|
||||
if link_data.interface1_id is None and link_data.interface2_id is None:
|
||||
continue
|
||||
|
||||
link_element = self.create_link_element(link_data)
|
||||
link_elements.append(link_element)
|
||||
|
||||
if link_elements.getchildren():
|
||||
self.scenario.append(link_elements)
|
||||
|
||||
def write_device(self, node):
|
||||
device = DeviceElement(self.session, node)
|
||||
self.devices.append(device.element)
|
||||
|
||||
def create_link_element(self, link_data):
|
||||
link_element = etree.Element("link")
|
||||
add_attribute(link_element, "node_one", link_data.node1_id)
|
||||
add_attribute(link_element, "node_two", link_data.node2_id)
|
||||
|
||||
# check for interface one
|
||||
if link_data.interface1_id is not None:
|
||||
interface_one = etree.Element("interface_one")
|
||||
node = self.session.get_object(link_data.node1_id)
|
||||
node_interface = node.netif(link_data.interface1_id)
|
||||
|
||||
add_attribute(interface_one, "id", link_data.interface1_id)
|
||||
add_attribute(interface_one, "name", node_interface.name)
|
||||
add_attribute(interface_one, "mac", link_data.interface1_mac)
|
||||
add_attribute(interface_one, "ip4", link_data.interface1_ip4)
|
||||
add_attribute(interface_one, "ip4_mask", link_data.interface1_ip4_mask)
|
||||
add_attribute(interface_one, "ip6", link_data.interface1_ip6)
|
||||
add_attribute(interface_one, "ip6_mask", link_data.interface1_ip6_mask)
|
||||
|
||||
# check if emane interface
|
||||
if nodeutils.is_node(node_interface.net, NodeTypes.EMANE):
|
||||
nem = node_interface.net.getnemid(node_interface)
|
||||
add_attribute(interface_one, "nem", nem)
|
||||
|
||||
link_element.append(interface_one)
|
||||
|
||||
# check for interface two
|
||||
if link_data.interface2_id is not None:
|
||||
interface_two = etree.Element("interface_two")
|
||||
node = self.session.get_object(link_data.node2_id)
|
||||
node_interface = node.netif(link_data.interface2_id)
|
||||
|
||||
add_attribute(interface_two, "id", link_data.interface2_id)
|
||||
add_attribute(interface_two, "name", node_interface.name)
|
||||
add_attribute(interface_two, "mac", link_data.interface2_mac)
|
||||
add_attribute(interface_two, "ip4", link_data.interface2_ip4)
|
||||
add_attribute(interface_two, "ip4_mask", link_data.interface2_ip4_mask)
|
||||
add_attribute(interface_two, "ip6", link_data.interface2_ip6)
|
||||
add_attribute(interface_two, "ip6_mask", link_data.interface2_ip6_mask)
|
||||
|
||||
# check if emane interface
|
||||
if nodeutils.is_node(node_interface.net, NodeTypes.EMANE):
|
||||
nem = node_interface.net.getnemid(node_interface)
|
||||
add_attribute(interface_two, "nem", nem)
|
||||
|
||||
link_element.append(interface_two)
|
||||
|
||||
# check for options
|
||||
options = etree.Element("options")
|
||||
add_attribute(options, "delay", link_data.delay)
|
||||
add_attribute(options, "bandwidth", link_data.bandwidth)
|
||||
add_attribute(options, "per", link_data.per)
|
||||
add_attribute(options, "dup", link_data.dup)
|
||||
add_attribute(options, "jitter", link_data.jitter)
|
||||
add_attribute(options, "mer", link_data.mer)
|
||||
add_attribute(options, "burst", link_data.burst)
|
||||
add_attribute(options, "mburst", link_data.mburst)
|
||||
add_attribute(options, "type", link_data.link_type)
|
||||
add_attribute(options, "gui_attributes", link_data.gui_attributes)
|
||||
add_attribute(options, "unidirectional", link_data.unidirectional)
|
||||
add_attribute(options, "emulation_id", link_data.emulation_id)
|
||||
add_attribute(options, "network_id", link_data.network_id)
|
||||
add_attribute(options, "key", link_data.key)
|
||||
add_attribute(options, "opaque", link_data.opaque)
|
||||
add_attribute(options, "session", link_data.session)
|
||||
if options.items():
|
||||
link_element.append(options)
|
||||
|
||||
return link_element
|
||||
|
||||
|
||||
class CoreXmlReader(object):
|
||||
def __init__(self, session):
|
||||
self.session = session
|
||||
self.scenario = None
|
||||
|
||||
def read(self, file_name):
|
||||
xml_tree = etree.parse(file_name)
|
||||
self.scenario = xml_tree.getroot()
|
||||
|
||||
# read xml session content
|
||||
self.read_default_services()
|
||||
self.read_session_metadata()
|
||||
self.read_session_options()
|
||||
self.read_session_hooks()
|
||||
self.read_session_origin()
|
||||
self.read_service_configs()
|
||||
self.read_mobility_configs()
|
||||
self.read_emane_configs()
|
||||
self.read_nodes()
|
||||
self.read_links()
|
||||
|
||||
def read_default_services(self):
|
||||
default_services = self.scenario.find("default_services")
|
||||
if default_services is None:
|
||||
return
|
||||
|
||||
for node in default_services.iterchildren():
|
||||
node_type = node.get("type")
|
||||
services = []
|
||||
for service in node.iterchildren():
|
||||
services.append(service.get("name"))
|
||||
logger.info("reading default services for nodes(%s): %s", node_type, services)
|
||||
self.session.services.default_services[node_type] = services
|
||||
|
||||
def read_session_metadata(self):
|
||||
session_metadata = self.scenario.find("session_metadata")
|
||||
if session_metadata is None:
|
||||
return
|
||||
|
||||
configs = {}
|
||||
for data in session_metadata.iterchildren():
|
||||
name = data.get("name")
|
||||
value = data.get("value")
|
||||
configs[name] = value
|
||||
logger.info("reading session metadata: %s", configs)
|
||||
self.session.metadata.set_configs(configs)
|
||||
|
||||
def read_session_options(self):
|
||||
session_options = self.scenario.find("session_options")
|
||||
if session_options is None:
|
||||
return
|
||||
|
||||
configs = {}
|
||||
for config in session_options.iterchildren():
|
||||
name = config.get("name")
|
||||
value = config.get("value")
|
||||
configs[name] = value
|
||||
logger.info("reading session options: %s", configs)
|
||||
self.session.options.set_configs(configs)
|
||||
|
||||
def read_session_hooks(self):
|
||||
session_hooks = self.scenario.find("session_hooks")
|
||||
if session_hooks is None:
|
||||
return
|
||||
|
||||
for hook in session_hooks.iterchildren():
|
||||
name = hook.get("name")
|
||||
state = hook.get("state")
|
||||
data = hook.text
|
||||
hook_type = "hook:%s" % state
|
||||
logger.info("reading hook: state(%s) name(%s)", state, name)
|
||||
self.session.set_hook(hook_type, file_name=name, source_name=None, data=data)
|
||||
|
||||
def read_session_origin(self):
|
||||
session_origin = self.scenario.find("session_origin")
|
||||
if session_origin is None:
|
||||
return
|
||||
|
||||
lat = get_float(session_origin, "lat")
|
||||
lon = get_float(session_origin, "lon")
|
||||
alt = get_float(session_origin, "alt")
|
||||
if all([lat, lon, alt]):
|
||||
logger.info("reading session reference geo: %s, %s, %s", lat, lon, alt)
|
||||
self.session.location.setrefgeo(lat, lon, alt)
|
||||
|
||||
scale = get_float(session_origin, "scale")
|
||||
if scale:
|
||||
logger.info("reading session reference scale: %s", scale)
|
||||
self.session.location.refscale = scale
|
||||
|
||||
x = get_float(session_origin, "x")
|
||||
y = get_float(session_origin, "y")
|
||||
z = get_float(session_origin, "z")
|
||||
if all([x, y]):
|
||||
logger.info("reading session reference xyz: %s, %s, %s", x, y, z)
|
||||
self.session.location.refxyz = (x, y, z)
|
||||
|
||||
def read_service_configs(self):
|
||||
service_configurations = self.scenario.find("service_configurations")
|
||||
if service_configurations is None:
|
||||
return
|
||||
|
||||
for service_configuration in service_configurations.iterchildren():
|
||||
node_id = get_int(service_configuration, "node")
|
||||
service_name = service_configuration.get("name")
|
||||
logger.info("reading custom service(%s) for node(%s)", service_name, node_id)
|
||||
self.session.services.set_service(node_id, service_name)
|
||||
service = self.session.services.get_service(node_id, service_name)
|
||||
|
||||
directory_elements = service_configuration.find("directories")
|
||||
if directory_elements is not None:
|
||||
service.dirs = tuple(x.text for x in directory_elements.iterchildren())
|
||||
|
||||
startup_elements = service_configuration.find("startups")
|
||||
if startup_elements is not None:
|
||||
service.startup = tuple(x.text for x in startup_elements.iterchildren())
|
||||
|
||||
validate_elements = service_configuration.find("validates")
|
||||
if validate_elements is not None:
|
||||
service.validate = tuple(x.text for x in validate_elements.iterchildren())
|
||||
|
||||
shutdown_elements = service_configuration.find("shutdowns")
|
||||
if shutdown_elements is not None:
|
||||
service.shutdown = tuple(x.text for x in shutdown_elements.iterchildren())
|
||||
|
||||
file_elements = service_configuration.find("files")
|
||||
if file_elements is not None:
|
||||
for file_element in file_elements.iterchildren():
|
||||
name = file_element.get("name")
|
||||
data = file_element.text
|
||||
service.config_data[name] = data
|
||||
|
||||
def read_emane_configs(self):
|
||||
emane_configurations = self.scenario.find("emane_configurations")
|
||||
if emane_configurations is None:
|
||||
return
|
||||
|
||||
for emane_configuration in emane_configurations.iterchildren():
|
||||
node_id = get_int(emane_configuration, "node")
|
||||
model_name = emane_configuration.get("model")
|
||||
configs = {}
|
||||
|
||||
mac_configuration = emane_configuration.find("mac")
|
||||
for config in mac_configuration.iterchildren():
|
||||
name = config.get("name")
|
||||
value = config.get("value")
|
||||
configs[name] = value
|
||||
|
||||
phy_configuration = emane_configuration.find("phy")
|
||||
for config in phy_configuration.iterchildren():
|
||||
name = config.get("name")
|
||||
value = config.get("value")
|
||||
configs[name] = value
|
||||
|
||||
external_configuration = emane_configuration.find("external")
|
||||
for config in external_configuration.iterchildren():
|
||||
name = config.get("name")
|
||||
value = config.get("value")
|
||||
configs[name] = value
|
||||
|
||||
logger.info("reading emane configuration node(%s) model(%s)", node_id, model_name)
|
||||
self.session.emane.set_model_config(node_id, model_name, configs)
|
||||
|
||||
def read_mobility_configs(self):
|
||||
mobility_configurations = self.scenario.find("mobility_configurations")
|
||||
if mobility_configurations is None:
|
||||
return
|
||||
|
||||
for mobility_configuration in mobility_configurations.iterchildren():
|
||||
node_id = get_int(mobility_configuration, "node")
|
||||
model_name = mobility_configuration.get("model")
|
||||
configs = {}
|
||||
|
||||
for config in mobility_configuration.iterchildren():
|
||||
name = config.get("name")
|
||||
value = config.get("value")
|
||||
configs[name] = value
|
||||
|
||||
logger.info("reading mobility configuration node(%s) model(%s)", node_id, model_name)
|
||||
self.session.mobility.set_model_config(node_id, model_name, configs)
|
||||
|
||||
def read_nodes(self):
|
||||
device_elements = self.scenario.find("devices")
|
||||
if device_elements is not None:
|
||||
for device_element in device_elements.iterchildren():
|
||||
self.read_device(device_element)
|
||||
|
||||
network_elements = self.scenario.find("networks")
|
||||
if network_elements is not None:
|
||||
for network_element in network_elements.iterchildren():
|
||||
self.read_network(network_element)
|
||||
|
||||
def read_device(self, device_element):
|
||||
node_id = get_int(device_element, "id")
|
||||
name = device_element.get("name")
|
||||
model = device_element.get("type")
|
||||
node_options = NodeOptions(name, model)
|
||||
|
||||
service_elements = device_element.find("services")
|
||||
if service_elements is not None:
|
||||
node_options.services = [x.get("name") for x in service_elements.iterchildren()]
|
||||
|
||||
position_element = device_element.find("position")
|
||||
if position_element is not None:
|
||||
x = get_float(position_element, "x")
|
||||
y = get_float(position_element, "y")
|
||||
if all([x, y]):
|
||||
node_options.set_position(x, y)
|
||||
|
||||
lat = get_float(position_element, "lat")
|
||||
lon = get_float(position_element, "lon")
|
||||
alt = get_float(position_element, "alt")
|
||||
if all([lat, lon, alt]):
|
||||
node_options.set_location(lat, lon, alt)
|
||||
|
||||
logger.info("reading node id(%s) model(%s) name(%s)", node_id, model, name)
|
||||
self.session.add_node(_id=node_id, node_options=node_options)
|
||||
|
||||
def read_network(self, network_element):
|
||||
node_id = get_int(network_element, "id")
|
||||
name = network_element.get("name")
|
||||
node_type = NodeTypes[network_element.get("type")]
|
||||
node_options = NodeOptions(name)
|
||||
|
||||
position_element = network_element.find("position")
|
||||
if position_element is not None:
|
||||
x = get_float(position_element, "x")
|
||||
y = get_float(position_element, "y")
|
||||
if all([x, y]):
|
||||
node_options.set_position(x, y)
|
||||
|
||||
lat = get_float(position_element, "lat")
|
||||
lon = get_float(position_element, "lon")
|
||||
alt = get_float(position_element, "alt")
|
||||
if all([lat, lon, alt]):
|
||||
node_options.set_location(lat, lon, alt)
|
||||
|
||||
logger.info("reading node id(%s) node_type(%s) name(%s)", node_id, node_type, name)
|
||||
self.session.add_node(_type=node_type, _id=node_id, node_options=node_options)
|
||||
|
||||
def read_links(self):
|
||||
link_elements = self.scenario.find("links")
|
||||
if link_elements is None:
|
||||
return
|
||||
|
||||
for link_element in link_elements.iterchildren():
|
||||
node_one = get_int(link_element, "node_one")
|
||||
node_two = get_int(link_element, "node_two")
|
||||
|
||||
interface_one_element = link_element.find("interface_one")
|
||||
interface_one = None
|
||||
if interface_one_element is not None:
|
||||
interface_one = create_interface_data(interface_one_element)
|
||||
|
||||
interface_two_element = link_element.find("interface_two")
|
||||
interface_two = None
|
||||
if interface_two_element is not None:
|
||||
interface_two = create_interface_data(interface_two_element)
|
||||
|
||||
options_element = link_element.find("options")
|
||||
link_options = LinkOptions()
|
||||
if options_element is not None:
|
||||
link_options.bandwidth = get_float(options_element, "bandwidth")
|
||||
link_options.burst = get_float(options_element, "burst")
|
||||
link_options.delay = get_float(options_element, "delay")
|
||||
link_options.dup = get_float(options_element, "dup")
|
||||
link_options.mer = get_float(options_element, "mer")
|
||||
link_options.mburst = get_float(options_element, "mburst")
|
||||
link_options.jitter = get_float(options_element, "jitter")
|
||||
link_options.key = get_float(options_element, "key")
|
||||
link_options.per = get_float(options_element, "per")
|
||||
link_options.unidirectional = get_int(options_element, "unidirectional")
|
||||
link_options.session = options_element.get("session")
|
||||
link_options.emulation_id = get_int(options_element, "emulation_id")
|
||||
link_options.network_id = get_int(options_element, "network_id")
|
||||
link_options.opaque = options_element.get("opaque")
|
||||
link_options.gui_attributes = options_element.get("gui_attributes")
|
||||
|
||||
logger.info("reading link node_one(%s) node_two(%s)", node_one, node_two)
|
||||
self.session.add_link(node_one, node_two, interface_one, interface_two, link_options)
|
157
daemon/core/xml/corexmldeployment.py
Normal file
157
daemon/core/xml/corexmldeployment.py
Normal file
|
@ -0,0 +1,157 @@
|
|||
import os
|
||||
import socket
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core import constants
|
||||
from core import logger
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import utils, nodeutils, ipaddress
|
||||
|
||||
|
||||
def add_type(parent_element, name):
|
||||
type_element = etree.SubElement(parent_element, "type")
|
||||
type_element.text = name
|
||||
|
||||
|
||||
def add_address(parent_element, address_type, address, interface_name=None):
|
||||
address_element = etree.SubElement(parent_element, "address", type=address_type)
|
||||
address_element.text = address
|
||||
if interface_name is not None:
|
||||
address_element.set("iface", interface_name)
|
||||
|
||||
|
||||
def add_mapping(parent_element, maptype, mapref):
|
||||
etree.SubElement(parent_element, "mapping", type=maptype, ref=mapref)
|
||||
|
||||
|
||||
def add_emane_interface(host_element, netif, platform_name="p1", transport_name="t1"):
|
||||
nem_id = netif.net.nemidmap[netif]
|
||||
host_id = host_element.get("id")
|
||||
|
||||
# platform data
|
||||
platform_id = "%s/%s" % (host_id, platform_name)
|
||||
platform_element = etree.SubElement(host_element, "emanePlatform", id=platform_id, name=platform_name)
|
||||
|
||||
# transport data
|
||||
transport_id = "%s/%s" % (host_id, transport_name)
|
||||
etree.SubElement(platform_element, "transport", id=transport_id, name=transport_name)
|
||||
|
||||
# nem data
|
||||
nem_name = "nem%s" % nem_id
|
||||
nem_element_id = "%s/%s" % (host_id, nem_name)
|
||||
nem_element = etree.SubElement(platform_element, "nem", id=nem_element_id, name=nem_name)
|
||||
nem_id_element = etree.SubElement(nem_element, "parameter", name="nemid")
|
||||
nem_id_element.text = str(nem_id)
|
||||
|
||||
return platform_element
|
||||
|
||||
|
||||
def get_address_type(address):
|
||||
addr, slash, prefixlen = address.partition("/")
|
||||
if ipaddress.is_ipv4_address(addr):
|
||||
address_type = "IPv4"
|
||||
elif ipaddress.is_ipv6_address(addr):
|
||||
address_type = "IPv6"
|
||||
else:
|
||||
raise NotImplementedError
|
||||
return address_type
|
||||
|
||||
|
||||
def get_ipv4_addresses(hostname):
|
||||
if hostname == "localhost":
|
||||
addresses = []
|
||||
args = [constants.IP_BIN, "-o", "-f", "inet", "addr", "show"]
|
||||
output = utils.check_cmd(args)
|
||||
for line in output.split(os.linesep):
|
||||
split = line.split()
|
||||
if not split:
|
||||
continue
|
||||
interface_name = split[1]
|
||||
address = split[3]
|
||||
if not address.startswith("127."):
|
||||
addresses.append((interface_name, address))
|
||||
return addresses
|
||||
else:
|
||||
# TODO: handle other hosts
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CoreXmlDeployment(object):
|
||||
def __init__(self, session, scenario):
|
||||
self.session = session
|
||||
self.scenario = scenario
|
||||
self.root = etree.SubElement(scenario, "container", id="TestBed", name="TestBed")
|
||||
self.add_deployment()
|
||||
|
||||
def find_device(self, name):
|
||||
device = self.scenario.find("devices/device[@name='%s']" % name)
|
||||
logger.info("specific found scenario device: %s", device)
|
||||
return device
|
||||
|
||||
def find_interface(self, device, name):
|
||||
interface = self.scenario.find("devices/device[@name='%s']/interfaces/interface[@name='%s']" % (
|
||||
device.name, name))
|
||||
logger.info("specific found scenario interface: %s", interface)
|
||||
return interface
|
||||
|
||||
def add_deployment(self):
|
||||
physical_host = self.add_physical_host(socket.gethostname())
|
||||
|
||||
# TODO: handle other servers
|
||||
# servers = self.session.broker.getservernames()
|
||||
# servers.remove("localhost")
|
||||
|
||||
for node in self.session.objects.itervalues():
|
||||
if isinstance(node, PyCoreNode):
|
||||
self.add_virtual_host(physical_host, node)
|
||||
|
||||
def add_physical_host(self, name):
|
||||
# add host
|
||||
host_id = "%s/%s" % (self.root.get("id"), name)
|
||||
host_element = etree.SubElement(self.root, "testHost", id=host_id, name=name)
|
||||
|
||||
# add type element
|
||||
add_type(host_element, "physical")
|
||||
|
||||
# add ipv4 addresses
|
||||
for interface_name, address in get_ipv4_addresses("localhost"):
|
||||
add_address(host_element, "IPv4", address, interface_name)
|
||||
|
||||
return host_element
|
||||
|
||||
def add_virtual_host(self, physical_host, node):
|
||||
assert isinstance(node, PyCoreNode)
|
||||
|
||||
# create virtual host element
|
||||
host_id = "%s/%s" % (physical_host.get("id"), node.name)
|
||||
host_element = etree.SubElement(physical_host, "testHost", id=host_id, name=node.name)
|
||||
|
||||
# TODO: need to inject mapping into device element?
|
||||
self.find_device(node.name)
|
||||
# device = self.find_device(self.root.base_element, obj.name)
|
||||
# if device is None:
|
||||
# logger.warn("corresponding XML device not found for %s", obj.name)
|
||||
# return
|
||||
# add_mapping(device, "testHost", host_id)
|
||||
|
||||
# add host type
|
||||
add_type(host_element, "virtual")
|
||||
|
||||
for netif in node.netifs():
|
||||
emane_element = None
|
||||
if nodeutils.is_node(netif.net, NodeTypes.EMANE):
|
||||
emane_element = add_emane_interface(host_element, netif)
|
||||
|
||||
parent_element = host_element
|
||||
if emane_element is not None:
|
||||
parent_element = emane_element
|
||||
|
||||
for address in netif.addrlist:
|
||||
address_type = get_address_type(address)
|
||||
add_address(parent_element, address_type, address, netif.name)
|
||||
|
||||
# TODO: need to inject mapping in interface?
|
||||
# interface = self.find_interface(device, netif.name)
|
||||
# add_mapping(interface, "nem", nem.getAttribute("id"))
|
441
daemon/core/xml/emanexml.py
Normal file
441
daemon/core/xml/emanexml.py
Normal file
|
@ -0,0 +1,441 @@
|
|||
import os
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core import logger
|
||||
from core.misc import utils
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.xml import corexml
|
||||
|
||||
_hwaddr_prefix = "02:02"
|
||||
|
||||
|
||||
def is_external(config):
|
||||
"""
|
||||
Checks if the configuration is for an external transport.
|
||||
|
||||
:param dict config: configuration to check
|
||||
:return: True if external, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return config.get("external") == "1"
|
||||
|
||||
|
||||
def _value_to_params(value):
|
||||
"""
|
||||
Helper to convert a parameter to a parameter tuple.
|
||||
|
||||
:param str value: value string to convert to tuple
|
||||
:return: parameter tuple, None otherwise
|
||||
"""
|
||||
try:
|
||||
values = utils.make_tuple_fromstr(value, str)
|
||||
|
||||
if not hasattr(values, "__iter__"):
|
||||
return None
|
||||
|
||||
if len(values) < 2:
|
||||
return None
|
||||
|
||||
return values
|
||||
|
||||
except SyntaxError:
|
||||
logger.exception("error in value string to param list")
|
||||
return None
|
||||
|
||||
|
||||
def create_file(xml_element, doc_name, file_path):
|
||||
"""
|
||||
Create xml file.
|
||||
|
||||
:param lxml.etree.Element xml_element: root element to write to file
|
||||
:param str doc_name: name to use in the emane doctype
|
||||
:param str file_path: file path to write xml file to
|
||||
:return: nothing
|
||||
"""
|
||||
doctype = '<!DOCTYPE %(doc_name)s SYSTEM "file:///usr/share/emane/dtd/%(doc_name)s.dtd">' % {"doc_name": doc_name}
|
||||
corexml.write_xml_file(xml_element, file_path, doctype=doctype)
|
||||
|
||||
|
||||
def add_param(xml_element, name, value):
|
||||
"""
|
||||
Add emane configuration parameter to xml element.
|
||||
|
||||
:param lxml.etree.Element xml_element: element to append parameter to
|
||||
:param str name: name of parameter
|
||||
:param str value: value for parameter
|
||||
:return: nothing
|
||||
"""
|
||||
etree.SubElement(xml_element, "param", name=name, value=value)
|
||||
|
||||
|
||||
def add_configurations(xml_element, configurations, config, config_ignore):
|
||||
"""
|
||||
Add emane model configurations to xml element.
|
||||
|
||||
:param lxml.etree.Element xml_element: xml element to add emane configurations to
|
||||
:param list[core.config.Configuration] configurations: configurations to add to xml
|
||||
:param dict config: configuration values
|
||||
:param set config_ignore: configuration options to ignore
|
||||
:return:
|
||||
"""
|
||||
for configuration in configurations:
|
||||
# ignore custom configurations
|
||||
name = configuration.id
|
||||
if name in config_ignore:
|
||||
continue
|
||||
|
||||
# check if value is a multi param
|
||||
value = str(config[name])
|
||||
params = _value_to_params(value)
|
||||
if params:
|
||||
params_element = etree.SubElement(xml_element, "paramlist", name=name)
|
||||
for param in params:
|
||||
etree.SubElement(params_element, "item", value=param)
|
||||
else:
|
||||
add_param(xml_element, name, value)
|
||||
|
||||
|
||||
def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_xmls):
|
||||
"""
|
||||
Create platform xml for a specific node.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations
|
||||
:param core.netns.nodes.CtrlNet control_net: control net node for this emane network
|
||||
:param core.emane.nodes.EmaneNode node: node to write platform xml for
|
||||
:param int nem_id: nem id to use for interfaces for this node
|
||||
:param dict platform_xmls: stores platform xml elements to append nem entries to
|
||||
:return: the next nem id that can be used for creating platform xml files
|
||||
:rtype: int
|
||||
"""
|
||||
logger.debug("building emane platform xml for node(%s): %s", node, node.name)
|
||||
nem_entries = {}
|
||||
|
||||
if node.model is None:
|
||||
logger.warn("warning: EmaneNode %s has no associated model", node.name)
|
||||
return nem_entries
|
||||
|
||||
for netif in node.netifs():
|
||||
# build nem xml
|
||||
nem_definition = nem_file_name(node.model, netif)
|
||||
nem_element = etree.Element("nem", id=str(nem_id), name=netif.localname, definition=nem_definition)
|
||||
|
||||
# check if this is an external transport, get default config if an interface specific one does not exist
|
||||
config = emane_manager.getifcconfig(node.model.object_id, netif, node.model.name)
|
||||
|
||||
if is_external(config):
|
||||
nem_element.set("transport", "external")
|
||||
platform_endpoint = "platformendpoint"
|
||||
add_param(nem_element, platform_endpoint, config[platform_endpoint])
|
||||
transport_endpoint = "transportendpoint"
|
||||
add_param(nem_element, transport_endpoint, config[transport_endpoint])
|
||||
else:
|
||||
# build transport xml
|
||||
transport_type = netif.transport_type
|
||||
if not transport_type:
|
||||
logger.info("warning: %s interface type unsupported!", netif.name)
|
||||
transport_type = "raw"
|
||||
transport_file = transport_file_name(node.objid, transport_type)
|
||||
transport_element = etree.SubElement(nem_element, "transport", definition=transport_file)
|
||||
|
||||
# add transport parameter
|
||||
add_param(transport_element, "device", netif.name)
|
||||
|
||||
# add nem entry
|
||||
nem_entries[netif] = nem_element
|
||||
|
||||
# merging code
|
||||
key = netif.node.objid
|
||||
if netif.transport_type == "raw":
|
||||
key = "host"
|
||||
otadev = control_net.brname
|
||||
eventdev = control_net.brname
|
||||
else:
|
||||
otadev = None
|
||||
eventdev = None
|
||||
|
||||
platform_element = platform_xmls.get(key)
|
||||
if platform_element is None:
|
||||
platform_element = etree.Element("platform")
|
||||
|
||||
if otadev:
|
||||
emane_manager.set_config("otamanagerdevice", otadev)
|
||||
|
||||
if eventdev:
|
||||
emane_manager.set_config("eventservicedevice", eventdev)
|
||||
|
||||
# append all platform options (except starting id) to doc
|
||||
for configuration in emane_manager.emane_config.emulator_config:
|
||||
name = configuration.id
|
||||
if name == "platform_id_start":
|
||||
continue
|
||||
|
||||
value = emane_manager.get_config(name)
|
||||
add_param(platform_element, name, value)
|
||||
|
||||
# add platform xml
|
||||
platform_xmls[key] = platform_element
|
||||
|
||||
platform_element.append(nem_element)
|
||||
|
||||
node.setnemid(netif, nem_id)
|
||||
macstr = _hwaddr_prefix + ":00:00:"
|
||||
macstr += "%02X:%02X" % ((nem_id >> 8) & 0xFF, nem_id & 0xFF)
|
||||
netif.sethwaddr(MacAddress.from_string(macstr))
|
||||
|
||||
# increment nem id
|
||||
nem_id += 1
|
||||
|
||||
for key in sorted(platform_xmls.keys()):
|
||||
if key == "host":
|
||||
file_name = "platform.xml"
|
||||
else:
|
||||
file_name = "platform%d.xml" % key
|
||||
|
||||
platform_element = platform_xmls[key]
|
||||
|
||||
doc_name = "platform"
|
||||
file_path = os.path.join(emane_manager.session.session_dir, file_name)
|
||||
create_file(platform_element, doc_name, file_path)
|
||||
|
||||
return nem_id
|
||||
|
||||
|
||||
def build_xml_files(emane_manager, node):
|
||||
"""
|
||||
Generate emane xml files required for node.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations
|
||||
:param core.emane.nodes.EmaneNode node: node to write platform xml for
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("building all emane xml for node(%s): %s", node, node.name)
|
||||
if node.model is None:
|
||||
return
|
||||
|
||||
# get model configurations
|
||||
config = emane_manager.get_configs(node.model.object_id, node.model.name)
|
||||
if not config:
|
||||
return
|
||||
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
node.model.build_xml_files(config)
|
||||
|
||||
# build XML for specific interface (NEM) configs
|
||||
need_virtual = False
|
||||
need_raw = False
|
||||
vtype = "virtual"
|
||||
rtype = "raw"
|
||||
|
||||
for netif in node.netifs():
|
||||
# check for interface specific emane configuration and write xml files, if needed
|
||||
config = emane_manager.getifcconfig(node.model.object_id, netif, node.model.name)
|
||||
if config:
|
||||
node.model.build_xml_files(config, netif)
|
||||
|
||||
# check transport type needed for interface
|
||||
if "virtual" in netif.transport_type:
|
||||
need_virtual = True
|
||||
vtype = netif.transport_type
|
||||
else:
|
||||
need_raw = True
|
||||
rtype = netif.transport_type
|
||||
|
||||
if need_virtual:
|
||||
build_transport_xml(emane_manager, node, vtype)
|
||||
|
||||
if need_raw:
|
||||
build_transport_xml(emane_manager, node, rtype)
|
||||
|
||||
|
||||
def build_transport_xml(emane_manager, node, transport_type):
|
||||
"""
|
||||
Build transport xml file for node and transport type.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations
|
||||
:param core.emane.nodes.EmaneNode node: node to write platform xml for
|
||||
:param str transport_type: transport type to build xml for
|
||||
:return: nothing
|
||||
"""
|
||||
transport_element = etree.Element(
|
||||
"transport",
|
||||
name="%s Transport" % transport_type.capitalize(),
|
||||
library="trans%s" % transport_type.lower()
|
||||
)
|
||||
|
||||
# add bitrate
|
||||
add_param(transport_element, "bitrate", "0")
|
||||
|
||||
# get emane model cnfiguration
|
||||
config = emane_manager.get_configs(node.objid, node.model.name)
|
||||
flowcontrol = config.get("flowcontrolenable", "0") == "1"
|
||||
|
||||
if "virtual" in transport_type.lower():
|
||||
device_path = "/dev/net/tun_flowctl"
|
||||
if not os.path.exists(device_path):
|
||||
device_path = "/dev/net/tun"
|
||||
add_param(transport_element, "devicepath", device_path)
|
||||
|
||||
if flowcontrol:
|
||||
add_param(transport_element, "flowcontrolenable", "on")
|
||||
|
||||
doc_name = "transport"
|
||||
file_name = transport_file_name(node.objid, transport_type)
|
||||
file_path = os.path.join(emane_manager.session.session_dir, file_name)
|
||||
create_file(transport_element, doc_name, file_path)
|
||||
|
||||
|
||||
def create_phy_xml(emane_model, config, file_path):
|
||||
"""
|
||||
Create the phy xml document.
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param dict config: all current configuration values
|
||||
:param str file_path: path to write file to
|
||||
:return: nothing
|
||||
"""
|
||||
phy_element = etree.Element("phy", name="%s PHY" % emane_model.name)
|
||||
if emane_model.phy_library:
|
||||
phy_element.set("library", emane_model.phy_library)
|
||||
|
||||
add_configurations(phy_element, emane_model.phy_config, config, emane_model.config_ignore)
|
||||
create_file(phy_element, "phy", file_path)
|
||||
|
||||
|
||||
def create_mac_xml(emane_model, config, file_path):
|
||||
"""
|
||||
Create the mac xml document.
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param dict config: all current configuration values
|
||||
:param str file_path: path to write file to
|
||||
:return: nothing
|
||||
"""
|
||||
if not emane_model.mac_library:
|
||||
raise ValueError("must define emane model library")
|
||||
|
||||
mac_element = etree.Element("mac", name="%s MAC" % emane_model.name, library=emane_model.mac_library)
|
||||
add_configurations(mac_element, emane_model.mac_config, config, emane_model.config_ignore)
|
||||
create_file(mac_element, "mac", file_path)
|
||||
|
||||
|
||||
def create_nem_xml(emane_model, config, nem_file, transport_definition, mac_definition, phy_definition):
|
||||
"""
|
||||
Create the nem xml document.
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param dict config: all current configuration values
|
||||
:param str nem_file: nem file path to write
|
||||
:param str transport_definition: transport file definition path
|
||||
:param str mac_definition: mac file definition path
|
||||
:param str phy_definition: phy file definition path
|
||||
:return: nothing
|
||||
"""
|
||||
nem_element = etree.Element("nem", name="%s NEM" % emane_model.name)
|
||||
if is_external(config):
|
||||
nem_element.set("type", "unstructured")
|
||||
else:
|
||||
etree.SubElement(nem_element, "transport", definition=transport_definition)
|
||||
etree.SubElement(nem_element, "mac", definition=mac_definition)
|
||||
etree.SubElement(nem_element, "phy", definition=phy_definition)
|
||||
create_file(nem_element, "nem", nem_file)
|
||||
|
||||
|
||||
def create_event_service_xml(group, port, device, file_directory):
|
||||
"""
|
||||
Create a emane event service xml file.
|
||||
|
||||
:param str group: event group
|
||||
:param str port: event port
|
||||
:param str device: event device
|
||||
:param str file_directory: directory to create file in
|
||||
:return: nothing
|
||||
"""
|
||||
event_element = etree.Element("emaneeventmsgsvc")
|
||||
for name, value in (("group", group), ("port", port), ("device", device), ("mcloop", "1"), ("ttl", "32")):
|
||||
sub_element = etree.SubElement(event_element, name)
|
||||
sub_element.text = value
|
||||
file_name = "libemaneeventservice.xml"
|
||||
file_path = os.path.join(file_directory, file_name)
|
||||
create_file(event_element, "emaneeventmsgsvc", file_path)
|
||||
|
||||
|
||||
def transport_file_name(node_id, transport_type):
|
||||
"""
|
||||
Create name for a transport xml file.
|
||||
|
||||
:param int node_id: node id to generate transport file name for
|
||||
:param str transport_type: transport type to generate transport file
|
||||
:return:
|
||||
"""
|
||||
return "n%strans%s.xml" % (node_id, transport_type)
|
||||
|
||||
|
||||
def _basename(emane_model, interface=None):
|
||||
"""
|
||||
Create name that is leveraged for configuration file creation.
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: basename used for file creation
|
||||
:rtype: str
|
||||
"""
|
||||
name = "n%s" % emane_model.object_id
|
||||
|
||||
if interface:
|
||||
node_id = interface.node.objid
|
||||
if emane_model.session.emane.getifcconfig(node_id, interface, emane_model.name):
|
||||
name = interface.localname.replace(".", "_")
|
||||
|
||||
return "%s%s" % (name, emane_model.name)
|
||||
|
||||
|
||||
def nem_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the NEM XML file, e.g. "n3rfpipenem.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param interface: interface for this model
|
||||
:return: nem xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
basename = _basename(emane_model, interface)
|
||||
append = ""
|
||||
if interface and interface.transport_type == "raw":
|
||||
append = "_raw"
|
||||
return "%snem%s.xml" % (basename, append)
|
||||
|
||||
|
||||
def shim_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the SHIM XML file, e.g. "commeffectshim.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param interface: interface for this model
|
||||
:return: shim xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sshim.xml" % _basename(emane_model, interface)
|
||||
|
||||
|
||||
def mac_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the MAC XML file, e.g. "n3rfpipemac.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param interface: interface for this model
|
||||
:return: mac xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%smac.xml" % _basename(emane_model, interface)
|
||||
|
||||
|
||||
def phy_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the PHY XML file, e.g. "n3rfpipephy.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param interface: interface for this model
|
||||
:return: phy xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sphy.xml" % _basename(emane_model, interface)
|
|
@ -1,9 +1,10 @@
|
|||
from xml.dom.minidom import parse
|
||||
|
||||
from core import logger
|
||||
from core.conf import ConfigShim
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodeutils
|
||||
from core.service import ServiceManager
|
||||
from core.service import ServiceManager, ServiceShim
|
||||
from core.xml import xmlutils
|
||||
|
||||
|
||||
|
@ -71,7 +72,12 @@ class CoreDocumentParser0(object):
|
|||
"""
|
||||
Helper to return tuple of attributes common to nodes and nets.
|
||||
"""
|
||||
node_id = int(obj.getAttribute("id"))
|
||||
node_id = obj.getAttribute("id")
|
||||
try:
|
||||
node_id = int(node_id)
|
||||
except:
|
||||
logger.debug("parsing node without integer id: %s", node_id)
|
||||
|
||||
name = str(obj.getAttribute("name"))
|
||||
node_type = str(obj.getAttribute("type"))
|
||||
return node_id, name, node_type
|
||||
|
@ -204,7 +210,8 @@ class CoreDocumentParser0(object):
|
|||
|
||||
# TODO: assign other config managers here
|
||||
if mgr:
|
||||
mgr.setconfig_keyvalues(nodenum, name, kvs)
|
||||
for k, v in kvs:
|
||||
mgr.set_config(k, v, node_id=nodenum, config_type=name)
|
||||
|
||||
def parsenetem(self, model, obj, kvs):
|
||||
"""
|
||||
|
@ -217,7 +224,6 @@ class CoreDocumentParser0(object):
|
|||
# nodes and interfaces do not exist yet, at this point of the parsing,
|
||||
# save (key, value) pairs for later
|
||||
try:
|
||||
# kvs = map(lambda(k, v): (int(v)), kvs)
|
||||
kvs = map(self.numericvalue, kvs)
|
||||
except ValueError:
|
||||
logger.warn("error parsing link parameters for '%s' on '%s'", ifname, peer)
|
||||
|
@ -273,7 +279,7 @@ class CoreDocumentParser0(object):
|
|||
services = []
|
||||
for service in node.getElementsByTagName("Service"):
|
||||
services.append(str(service.getAttribute("name")))
|
||||
self.session.services.defaultservices[type] = services
|
||||
self.session.services.default_services[type] = services
|
||||
logger.info("default services for type %s set to %s" % (type, services))
|
||||
|
||||
def parseservices(self):
|
||||
|
@ -310,7 +316,10 @@ class CoreDocumentParser0(object):
|
|||
# associate nodes with services
|
||||
for objid in sorted(svclists.keys()):
|
||||
n = self.session.get_object(objid)
|
||||
self.session.services.addservicestonode(node=n, nodetype=n.type, services_str=svclists[objid])
|
||||
services = svclists[objid]
|
||||
if services:
|
||||
services = services.split("|")
|
||||
self.session.services.add_services(node=n, node_type=n.type, services=services)
|
||||
|
||||
def parseservice(self, service, n):
|
||||
"""
|
||||
|
@ -361,15 +370,20 @@ class CoreDocumentParser0(object):
|
|||
filename = file.getAttribute("name")
|
||||
files.append(filename)
|
||||
data = xmlutils.get_text_child(file)
|
||||
typestr = "service:%s:%s" % (name, filename)
|
||||
self.session.services.setservicefile(nodenum=n.objid, type=typestr,
|
||||
filename=filename,
|
||||
srcname=None, data=data)
|
||||
self.session.services.set_service_file(node_id=n.objid, service_name=name, file_name=filename, data=data)
|
||||
|
||||
if len(files):
|
||||
values.append("files=%s" % files)
|
||||
if not bool(service.getAttribute("custom")):
|
||||
return True
|
||||
self.session.services.setcustomservice(n.objid, svc, values)
|
||||
self.session.services.set_service(n.objid, svc)
|
||||
# set custom values for custom service
|
||||
svc = self.session.services.get_service(n.objid, None)
|
||||
if not svc:
|
||||
raise ValueError("custom service(%s) for node(%s) does not exist", svc.name, n.objid)
|
||||
values = ConfigShim.str_to_dict("|".join(values))
|
||||
for name, value in values.iteritems():
|
||||
ServiceShim.setvalue(svc, name, value)
|
||||
return True
|
||||
|
||||
def parsehooks(self, hooks):
|
||||
|
@ -392,7 +406,7 @@ class CoreDocumentParser0(object):
|
|||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = xmlutils.get_text_child(param) # allow attribute/text for newlines
|
||||
setattr(self.session.options, k, v)
|
||||
self.session.options.set_config(k, v)
|
||||
hooks = xmlutils.get_one_element(self.meta, "Hooks")
|
||||
if hooks:
|
||||
self.parsehooks(hooks)
|
||||
|
@ -403,4 +417,4 @@ class CoreDocumentParser0(object):
|
|||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = xmlutils.get_text_child(param)
|
||||
self.session.metadata.add_item(k, v)
|
||||
self.session.metadata.set_config(k, v)
|
||||
|
|
|
@ -4,10 +4,11 @@ from xml.dom.minidom import parse
|
|||
|
||||
from core import constants
|
||||
from core import logger
|
||||
from core.conf import ConfigShim
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodeutils
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.service import ServiceManager
|
||||
from core.service import ServiceManager, ServiceShim
|
||||
from core.xml import xmlutils
|
||||
|
||||
|
||||
|
@ -209,9 +210,11 @@ class CoreDocumentParser1(object):
|
|||
raise NotImplementedError
|
||||
logger.info("setting wireless link params: node(%s) model(%s) mobility_model(%s)",
|
||||
nodenum, model_name, mobility_model_name)
|
||||
mgr.setconfig_keyvalues(nodenum, model_name, link_params.items())
|
||||
mgr.set_model_config(node_id=nodenum, model_name=model_name, config=link_params)
|
||||
|
||||
if mobility_model_name and mobility_params:
|
||||
mgr.setconfig_keyvalues(nodenum, mobility_model_name, mobility_params.items())
|
||||
self.session.mobility.set_model_config(node_id=nodenum, model_name=mobility_model_name,
|
||||
config=mobility_params)
|
||||
|
||||
def link_layer2_devices(self, device1, ifname1, device2, ifname2):
|
||||
"""
|
||||
|
@ -615,6 +618,7 @@ class CoreDocumentParser1(object):
|
|||
values.append('cmddown=%s' % shutdown)
|
||||
if validate:
|
||||
values.append('cmdval=%s' % validate)
|
||||
|
||||
filenames = []
|
||||
files = []
|
||||
for f in xmlutils.iter_children_with_name(service, 'file'):
|
||||
|
@ -629,19 +633,25 @@ class CoreDocumentParser1(object):
|
|||
data = None
|
||||
typestr = 'service:%s:%s' % (name, filename)
|
||||
files.append((typestr, filename, data))
|
||||
|
||||
if filenames:
|
||||
values.append('files=%s' % filenames)
|
||||
|
||||
custom = service.getAttribute('custom')
|
||||
if custom and custom.lower() == 'true':
|
||||
self.session.services.setcustomservice(node.objid, session_service, values)
|
||||
self.session.services.set_service(node.objid, session_service.name)
|
||||
values = ConfigShim.str_to_dict("|".join(values))
|
||||
for key, value in values.iteritems():
|
||||
ServiceShim.setvalue(session_service, key, value)
|
||||
|
||||
# NOTE: if a custom service is used, setservicefile() must be
|
||||
# called after the custom service exists
|
||||
for typestr, filename, data in files:
|
||||
self.session.services.setservicefile(
|
||||
nodenum=node.objid,
|
||||
type=typestr,
|
||||
filename=filename,
|
||||
srcname=None,
|
||||
svcname = typestr.split(":")[1]
|
||||
self.session.services.set_service_file(
|
||||
node_id=node.objid,
|
||||
service_name=svcname,
|
||||
file_name=filename,
|
||||
data=data
|
||||
)
|
||||
return str(name)
|
||||
|
@ -670,10 +680,13 @@ class CoreDocumentParser1(object):
|
|||
services_str = None # default services will be added
|
||||
else:
|
||||
return
|
||||
self.session.services.addservicestonode(
|
||||
if services_str:
|
||||
services_str = services_str.split("|")
|
||||
|
||||
self.session.services.add_services(
|
||||
node=node,
|
||||
nodetype=node_type,
|
||||
services_str=services_str
|
||||
node_type=node_type,
|
||||
services=services_str
|
||||
)
|
||||
|
||||
def set_object_presentation(self, obj, element, node_type):
|
||||
|
@ -812,7 +825,7 @@ class CoreDocumentParser1(object):
|
|||
params = self.parse_parameter_children(options)
|
||||
for name, value in params.iteritems():
|
||||
if name and value:
|
||||
setattr(self.session.options, str(name), str(value))
|
||||
self.session.options.set_config(str(name), str(value))
|
||||
|
||||
def parse_session_hooks(self, session_config):
|
||||
"""
|
||||
|
@ -837,7 +850,7 @@ class CoreDocumentParser1(object):
|
|||
params = self.parse_parameter_children(metadata)
|
||||
for name, value in params.iteritems():
|
||||
if name and value:
|
||||
self.session.metadata.add_item(str(name), str(value))
|
||||
self.session.metadata.set_config(str(name), str(value))
|
||||
|
||||
def parse_session_config(self):
|
||||
session_config = xmlutils.get_first_child_by_tag_name(self.scenario, 'CORE:sessionconfig')
|
||||
|
@ -872,5 +885,5 @@ class CoreDocumentParser1(object):
|
|||
self.default_services[device_type] = services
|
||||
# store default services for the session
|
||||
for t, s in self.default_services.iteritems():
|
||||
self.session.services.defaultservices[t] = s
|
||||
self.session.services.default_services[t] = s
|
||||
logger.info('default services for node type \'%s\' set to: %s' % (t, s))
|
||||
|
|
|
@ -6,7 +6,7 @@ import pwd
|
|||
from core import logger
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.enumerations import RegisterTlvs, EventTypes
|
||||
from core.xml import xmlutils
|
||||
|
||||
|
||||
|
@ -38,7 +38,8 @@ class CoreDocumentWriter0(Document):
|
|||
self.populatefromsession()
|
||||
|
||||
def populatefromsession(self):
|
||||
self.session.emane.setup() # not during runtime?
|
||||
if self.session.state != EventTypes.RUNTIME_STATE.value:
|
||||
self.session.emane.setup() # not during runtime?
|
||||
self.addorigin()
|
||||
self.adddefaultservices()
|
||||
self.addnets()
|
||||
|
@ -83,8 +84,8 @@ class CoreDocumentWriter0(Document):
|
|||
for netif in net.netifs(sort=True):
|
||||
self.addnetem(n, netif)
|
||||
# wireless/mobility models
|
||||
modelconfigs = net.session.mobility.getmodels(net)
|
||||
modelconfigs += net.session.emane.getmodels(net)
|
||||
modelconfigs = net.session.mobility.get_models(net)
|
||||
modelconfigs += net.session.emane.get_models(net)
|
||||
self.addmodels(n, modelconfigs)
|
||||
self.addposition(net)
|
||||
|
||||
|
@ -136,14 +137,14 @@ class CoreDocumentWriter0(Document):
|
|||
for m, conf in configs:
|
||||
model = self.createElement("model")
|
||||
n.appendChild(model)
|
||||
model.setAttribute("name", m._name)
|
||||
model.setAttribute("name", m.name)
|
||||
type = "wireless"
|
||||
if m._type == RegisterTlvs.MOBILITY.value:
|
||||
if m.config_type == RegisterTlvs.MOBILITY.value:
|
||||
type = "mobility"
|
||||
model.setAttribute("type", type)
|
||||
for i, k in enumerate(m.getnames()):
|
||||
|
||||
for k, value in conf.iteritems():
|
||||
key = self.createElement(k)
|
||||
value = conf[i]
|
||||
if value is None:
|
||||
value = ""
|
||||
key.appendChild(self.createTextNode("%s" % value))
|
||||
|
@ -193,9 +194,8 @@ class CoreDocumentWriter0(Document):
|
|||
# could use ifc.params, transport_type
|
||||
self.addaddresses(i, ifc)
|
||||
# per-interface models
|
||||
if netmodel and netmodel._name[:6] == "emane_":
|
||||
cfg = self.session.emane.getifcconfig(node.objid, netmodel._name,
|
||||
None, ifc)
|
||||
if netmodel and netmodel.name[:6] == "emane_":
|
||||
cfg = self.session.emane.getifcconfig(node.objid, ifc, netmodel.name)
|
||||
if cfg:
|
||||
self.addmodels(i, ((netmodel, cfg),))
|
||||
|
||||
|
@ -276,15 +276,15 @@ class CoreDocumentWriter0(Document):
|
|||
"""
|
||||
Add default services and node types to the ServicePlan.
|
||||
"""
|
||||
for type in self.session.services.defaultservices:
|
||||
defaults = self.session.services.getdefaultservices(type)
|
||||
for type in self.session.services.default_services:
|
||||
defaults = self.session.services.get_default_services(type)
|
||||
spn = self.createElement("Node")
|
||||
spn.setAttribute("type", type)
|
||||
self.sp.appendChild(spn)
|
||||
for svc in defaults:
|
||||
s = self.createElement("Service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("name", str(svc.name))
|
||||
|
||||
def addservices(self, node):
|
||||
"""
|
||||
|
@ -292,7 +292,7 @@ class CoreDocumentWriter0(Document):
|
|||
"""
|
||||
if len(node.services) == 0:
|
||||
return
|
||||
defaults = self.session.services.getdefaultservices(node.type)
|
||||
defaults = self.session.services.get_default_services(node.type)
|
||||
if node.services == defaults:
|
||||
return
|
||||
spn = self.createElement("Node")
|
||||
|
@ -302,24 +302,21 @@ class CoreDocumentWriter0(Document):
|
|||
for svc in node.services:
|
||||
s = self.createElement("Service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("startup_idx", str(svc._startindex))
|
||||
if svc._starttime != "":
|
||||
s.setAttribute("start_time", str(svc._starttime))
|
||||
s.setAttribute("name", str(svc.name))
|
||||
# only record service names if not a customized service
|
||||
if not svc._custom:
|
||||
if not svc.custom:
|
||||
continue
|
||||
s.setAttribute("custom", str(svc._custom))
|
||||
xmlutils.add_elements_from_list(self, s, svc._dirs, "Directory", "name")
|
||||
s.setAttribute("custom", str(svc.custom))
|
||||
xmlutils.add_elements_from_list(self, s, svc.dirs, "Directory", "name")
|
||||
|
||||
for fn in svc._configs:
|
||||
for fn in svc.configs:
|
||||
if len(fn) == 0:
|
||||
continue
|
||||
f = self.createElement("File")
|
||||
f.setAttribute("name", fn)
|
||||
# all file names are added to determine when a file has been deleted
|
||||
s.appendChild(f)
|
||||
data = self.session.services.getservicefiledata(svc, fn)
|
||||
data = svc.config_data.get(fn)
|
||||
if data is None:
|
||||
# this includes only customized file contents and skips
|
||||
# the auto-generated files
|
||||
|
@ -327,9 +324,9 @@ class CoreDocumentWriter0(Document):
|
|||
txt = self.createTextNode(data)
|
||||
f.appendChild(txt)
|
||||
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._startup, "Command", (("type", "start"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._shutdown, "Command", (("type", "stop"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._validate, "Command", (("type", "validate"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc.startup, "Command", (("type", "start"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc.shutdown, "Command", (("type", "stop"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc.validate, "Command", (("type", "validate"),))
|
||||
|
||||
def addaddresses(self, i, netif):
|
||||
"""
|
||||
|
@ -370,18 +367,20 @@ class CoreDocumentWriter0(Document):
|
|||
"""
|
||||
# options
|
||||
options = self.createElement("SessionOptions")
|
||||
defaults = self.session.options.getdefaultvalues()
|
||||
for i, (k, v) in enumerate(self.session.options.getkeyvaluelist()):
|
||||
if str(v) != str(defaults[i]):
|
||||
xmlutils.add_text_param_to_parent(self, options, k, v)
|
||||
# addparamtoparent(self, options, k, v)
|
||||
defaults = self.session.options.default_values()
|
||||
for name, current_value in self.session.options.get_configs().iteritems():
|
||||
default_value = defaults[name]
|
||||
if current_value != default_value:
|
||||
xmlutils.add_text_param_to_parent(self, options, name, current_value)
|
||||
|
||||
if options.hasChildNodes():
|
||||
self.meta.appendChild(options)
|
||||
|
||||
# hook scripts
|
||||
self.addhooks()
|
||||
|
||||
# meta
|
||||
meta = self.createElement("MetaData")
|
||||
self.meta.appendChild(meta)
|
||||
for k, v in self.session.metadata.items():
|
||||
xmlutils.add_text_param_to_parent(self, meta, k, v)
|
||||
# addparamtoparent(self, meta, k, v)
|
||||
for name, current_value in self.session.metadata.get_configs().iteritems():
|
||||
xmlutils.add_text_param_to_parent(self, meta, name, current_value)
|
||||
|
|
|
@ -217,13 +217,7 @@ class ScenarioPlan(XmlElement):
|
|||
self.last_network_id = 0
|
||||
self.addNetworks()
|
||||
self.addDevices()
|
||||
|
||||
# XXX Do we need these?
|
||||
# self.session.emane.setup() # not during runtime?
|
||||
# self.addorigin()
|
||||
|
||||
self.addDefaultServices()
|
||||
|
||||
self.addSessionConfiguration()
|
||||
|
||||
def addNetworks(self):
|
||||
|
@ -275,15 +269,15 @@ class ScenarioPlan(XmlElement):
|
|||
Add default services and node types to the ServicePlan.
|
||||
"""
|
||||
defaultservices = self.createElement("CORE:defaultservices")
|
||||
for type in self.coreSession.services.defaultservices:
|
||||
defaults = self.coreSession.services.getdefaultservices(type)
|
||||
for type in self.coreSession.services.default_services:
|
||||
defaults = self.coreSession.services.get_default_services(type)
|
||||
spn = self.createElement("device")
|
||||
spn.setAttribute("type", type)
|
||||
defaultservices.appendChild(spn)
|
||||
for svc in defaults:
|
||||
s = self.createElement("service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("name", str(svc.name))
|
||||
if defaultservices.hasChildNodes():
|
||||
self.appendChild(defaultservices)
|
||||
|
||||
|
@ -318,10 +312,12 @@ class ScenarioPlan(XmlElement):
|
|||
|
||||
# options
|
||||
options = self.createElement("options")
|
||||
defaults = self.coreSession.options.getdefaultvalues()
|
||||
for i, (k, v) in enumerate(self.coreSession.options.getkeyvaluelist()):
|
||||
if str(v) != str(defaults[i]):
|
||||
XmlElement.add_parameter(self.document, options, k, v)
|
||||
options_config = self.coreSession.options.get_configs()
|
||||
for _id, default_value in self.coreSession.options.default_values().iteritems():
|
||||
value = options_config[_id]
|
||||
if value != default_value:
|
||||
XmlElement.add_parameter(self.document, options, _id, value)
|
||||
|
||||
if options.hasChildNodes():
|
||||
config.appendChild(options)
|
||||
|
||||
|
@ -340,7 +336,7 @@ class ScenarioPlan(XmlElement):
|
|||
|
||||
# metadata
|
||||
meta = self.createElement("metadata")
|
||||
for k, v in self.coreSession.metadata.items():
|
||||
for k, v in self.coreSession.metadata.get_configs().iteritems():
|
||||
XmlElement.add_parameter(self.document, meta, k, v)
|
||||
if meta.hasChildNodes():
|
||||
config.appendChild(meta)
|
||||
|
@ -479,9 +475,10 @@ class NetworkElement(NamedXmlElement):
|
|||
"""
|
||||
|
||||
if nodeutils.is_node(network_object, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)):
|
||||
modelconfigs = network_object.session.mobility.getmodels(network_object)
|
||||
modelconfigs += network_object.session.emane.getmodels(network_object)
|
||||
modelconfigs = network_object.session.mobility.get_models(network_object)
|
||||
modelconfigs += network_object.session.emane.get_models(network_object)
|
||||
chan = None
|
||||
|
||||
for model, conf in modelconfigs:
|
||||
# Handle mobility parameters below
|
||||
if model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
|
@ -496,10 +493,9 @@ class NetworkElement(NamedXmlElement):
|
|||
channel_domain="CORE")
|
||||
|
||||
# Add wireless model parameters
|
||||
for i, key in enumerate(model.getnames()):
|
||||
value = conf[i]
|
||||
for key, value in conf.iteritems():
|
||||
if value is not None:
|
||||
chan.addParameter(key, model.valueof(key, conf))
|
||||
chan.addParameter(key, value)
|
||||
|
||||
for model, conf in modelconfigs:
|
||||
if model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
|
@ -509,8 +505,8 @@ class NetworkElement(NamedXmlElement):
|
|||
type_element = self.createElement("type")
|
||||
type_element.appendChild(self.createTextNode(model.name))
|
||||
mobility.appendChild(type_element)
|
||||
for i, key in enumerate(model.getnames()):
|
||||
value = conf[i]
|
||||
|
||||
for key, value in conf.iteritems():
|
||||
if value is not None:
|
||||
mobility.addParameter(key, value)
|
||||
|
||||
|
@ -658,8 +654,7 @@ class DeviceElement(NamedXmlElement):
|
|||
# per-interface models
|
||||
# XXX Remove???
|
||||
if netmodel and netmodel.name[:6] == "emane_":
|
||||
cfg = self.coreSession.emane.getifcconfig(device_object.objid, netmodel.name,
|
||||
None, interface_object)
|
||||
cfg = self.coreSession.emane.getifcconfig(device_object.objid, interface_object, netmodel.name)
|
||||
if cfg:
|
||||
interface_element.addModels(((netmodel, cfg),))
|
||||
|
||||
|
@ -675,7 +670,7 @@ class DeviceElement(NamedXmlElement):
|
|||
if len(device_object.services) == 0:
|
||||
return
|
||||
|
||||
defaults = self.coreSession.services.getdefaultservices(device_object.type)
|
||||
defaults = self.coreSession.services.get_default_services(device_object.type)
|
||||
if device_object.services == defaults:
|
||||
return
|
||||
spn = self.createElement("CORE:services")
|
||||
|
@ -685,24 +680,21 @@ class DeviceElement(NamedXmlElement):
|
|||
for svc in device_object.services:
|
||||
s = self.createElement("service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("startup_idx", str(svc._startindex))
|
||||
if svc._starttime != "":
|
||||
s.setAttribute("start_time", str(svc._starttime))
|
||||
s.setAttribute("name", str(svc.name))
|
||||
# only record service names if not a customized service
|
||||
if not svc._custom:
|
||||
if not svc.custom:
|
||||
continue
|
||||
s.setAttribute("custom", str(svc._custom))
|
||||
xmlutils.add_elements_from_list(self, s, svc._dirs, "directory", "name")
|
||||
s.setAttribute("custom", str(svc.custom))
|
||||
xmlutils.add_elements_from_list(self, s, svc.dirs, "directory", "name")
|
||||
|
||||
for fn in svc._configs:
|
||||
for fn in svc.configs:
|
||||
if len(fn) == 0:
|
||||
continue
|
||||
f = self.createElement("file")
|
||||
f.setAttribute("name", fn)
|
||||
# all file names are added to determine when a file has been deleted
|
||||
s.appendChild(f)
|
||||
data = self.coreSession.services.getservicefiledata(svc, fn)
|
||||
data = svc.config_data.get(fn)
|
||||
if data is None:
|
||||
# this includes only customized file contents and skips
|
||||
# the auto-generated files
|
||||
|
@ -710,12 +702,9 @@ class DeviceElement(NamedXmlElement):
|
|||
txt = self.createTextNode("\n" + data)
|
||||
f.appendChild(txt)
|
||||
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._startup, "command",
|
||||
(("type", "start"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._shutdown, "command",
|
||||
(("type", "stop"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc._validate, "command",
|
||||
(("type", "validate"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc.startup, "command", (("type", "start"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc.shutdown, "command", (("type", "stop"),))
|
||||
xmlutils.add_text_elements_from_list(self, s, svc.validate, "command", (("type", "validate"),))
|
||||
|
||||
|
||||
class ChannelElement(NamedXmlElement):
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
/var/log/core-daemon.log {
|
||||
rotate 7
|
||||
daily
|
||||
missingok
|
||||
notifempty
|
||||
compress
|
||||
sharedscripts
|
||||
postrotate
|
||||
if [ -r /var/run/core-daemon.pid ]; then
|
||||
kill -USR1 $(cat /var/run/core-daemon.pid) > /dev/null 2>&1
|
||||
fi
|
||||
endscript
|
||||
}
|
|
@ -27,7 +27,7 @@ def example(options):
|
|||
|
||||
# create wlan network node
|
||||
wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
session.set_wireless_model(wlan, BasicRangeModel)
|
||||
session.mobility.set_model(wlan, BasicRangeModel)
|
||||
|
||||
# create nodes
|
||||
wireless_nodes = []
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
|
||||
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
## Custom EMANE Model
|
||||
class ExampleModel(emanemodel.EmaneModel):
|
||||
### MAC Definition
|
||||
|
@ -47,10 +46,3 @@ class ExampleModel(emanemodel.EmaneModel):
|
|||
# Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display
|
||||
# within the gui.
|
||||
config_ignore = set()
|
||||
# Allows you to override how options are displayed with the GUI, using the GUI format of
|
||||
# "name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed
|
||||
# numbers after ":" for including values in each tab.
|
||||
config_groups_override = None
|
||||
# Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in
|
||||
# that order.
|
||||
config_matrix_override = None
|
||||
|
|
|
@ -2,64 +2,81 @@
|
|||
Sample user-defined service.
|
||||
"""
|
||||
|
||||
from core.misc.ipaddress import Ipv4Prefix
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceManager
|
||||
from core.service import ServiceMode
|
||||
|
||||
|
||||
## Custom CORE Service
|
||||
class MyService(CoreService):
|
||||
"""
|
||||
This is a sample user-defined service.
|
||||
"""
|
||||
# a unique name is required, without spaces
|
||||
_name = "MyService"
|
||||
# you can create your own group here
|
||||
_group = "Utility"
|
||||
# list of other services this service depends on
|
||||
_depends = ()
|
||||
# per-node directories
|
||||
_dirs = ()
|
||||
# generated files (without a full path this file goes in the node's dir,
|
||||
# e.g. /tmp/pycore.12345/n1.conf/)
|
||||
_configs = ('myservice.sh',)
|
||||
# this controls the starting order vs other enabled services
|
||||
_startindex = 50
|
||||
# list of startup commands, also may be generated during startup
|
||||
_startup = ('sh myservice.sh',)
|
||||
# list of shutdown commands
|
||||
_shutdown = ()
|
||||
### Service Attributes
|
||||
|
||||
# Name used as a unique ID for this service and is required, no spaces.
|
||||
name = "MyService"
|
||||
# Allows you to group services within the GUI under a common name.
|
||||
group = "Utility"
|
||||
# Executables this service depends on to function, if executable is not on the path, service will not be loaded.
|
||||
executables = ()
|
||||
# Services that this service depends on for startup, tuple of service names.
|
||||
dependencies = ()
|
||||
# Directories that this service will create within a node.
|
||||
dirs = ()
|
||||
# Files that this service will generate, without a full path this file goes in the node's directory.
|
||||
# e.g. /tmp/pycore.12345/n1.conf/myfile
|
||||
configs = ("sh myservice1.sh", "sh myservice2.sh")
|
||||
# Commands used to start this service, any non-zero exit code will cause a failure.
|
||||
startup = ("sh %s" % configs[0], "sh %s" % configs[1])
|
||||
# Commands used to validate that a service was started, any non-zero exit code will cause a failure.
|
||||
validate = ()
|
||||
# Validation mode, used to determine startup success.
|
||||
# * NON_BLOCKING - runs startup commands, and validates success with validation commands
|
||||
# * BLOCKING - runs startup commands, and validates success with the startup commands themselves
|
||||
# * TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone
|
||||
validation_mode = ServiceMode.NON_BLOCKING
|
||||
# Time for a service to wait before running validation commands or determining success in TIMER mode.
|
||||
validation_timer = 0
|
||||
# Shutdown commands to stop this service.
|
||||
shutdown = ()
|
||||
|
||||
### On Load
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
"""
|
||||
Return a string that will be written to filename, or sent to the
|
||||
GUI for user customization.
|
||||
"""
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by MyService (sample.py)\n"
|
||||
def on_load(cls):
|
||||
# Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
|
||||
# dynamic settings for the environment.
|
||||
pass
|
||||
|
||||
### Get Configs
|
||||
@classmethod
|
||||
def get_configs(cls, node):
|
||||
# Provides a way to dynamically generate the config files from the node a service will run.
|
||||
# Defaults to the class definition and can be left out entirely if not needed.
|
||||
return cls.configs
|
||||
|
||||
### Generate Config
|
||||
@classmethod
|
||||
def generate_config(cls, node, filename):
|
||||
# Returns a string representation for a file, given the node the service is starting on the config filename
|
||||
# that this information will be used for. This must be defined, if "configs" are defined.
|
||||
cfg = "#!/bin/sh\n"
|
||||
|
||||
if filename == cls.configs[0]:
|
||||
cfg += "# auto-generated by MyService (sample.py)\n"
|
||||
for ifc in node.netifs():
|
||||
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
|
||||
elif filename == cls.configs[1]:
|
||||
cfg += "echo hello"
|
||||
|
||||
for ifc in node.netifs():
|
||||
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
|
||||
# here we do something interesting
|
||||
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
|
||||
break
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
"""
|
||||
Generate a subnet declaration block given an IPv4 prefix string
|
||||
for inclusion in the config file.
|
||||
"""
|
||||
if x.find(":") >= 0:
|
||||
# this is an IPv6 address
|
||||
return ""
|
||||
else:
|
||||
net = Ipv4Prefix(x)
|
||||
return 'echo " network %s"' % net
|
||||
### Get Startup
|
||||
@classmethod
|
||||
def get_startup(cls, node):
|
||||
# Provides a way to dynamically generate the startup commands from the node a service will run.
|
||||
# Defaults to the class definition and can be left out entirely if not needed.
|
||||
return cls.startup
|
||||
|
||||
|
||||
# this is needed to load desired services when being integrated into core, otherwise this is not needed
|
||||
def load_services():
|
||||
# this line is required to add the above class to the list of available services
|
||||
ServiceManager.add(MyService)
|
||||
### Get Validate
|
||||
@classmethod
|
||||
def get_validate(cls, node):
|
||||
# Provides a way to dynamically generate the validate commands from the node a service will run.
|
||||
# Defaults to the class definition and can be left out entirely if not needed.
|
||||
return cls.validate
|
||||
|
|
|
@ -159,8 +159,8 @@ def main():
|
|||
n.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
n.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
if options.services is not None:
|
||||
session.services.addservicestonode(n, "", options.services)
|
||||
n.boot()
|
||||
session.services.add_services(n, "", options.services.split("|"))
|
||||
session.services.boot_services(n)
|
||||
nodelist.append(n)
|
||||
if i % 25 == 0:
|
||||
print "\n%s nodes created " % i,
|
||||
|
|
|
@ -88,7 +88,7 @@ ip forwarding
|
|||
|
||||
def boot(self):
|
||||
self.config()
|
||||
self.session.services.bootnodeservices(self)
|
||||
self.session.services.boot_services(self)
|
||||
|
||||
def bootscript(self):
|
||||
return """\
|
||||
|
|
|
@ -420,8 +420,8 @@ class Experiment(object):
|
|||
tmp = self.session.add_object(cls=nodes.CoreNode, objid=i, name="n%d" % i)
|
||||
tmp.newnetif(self.net, [addr])
|
||||
self.nodes.append(tmp)
|
||||
self.session.services.addservicestonode(tmp, "router", "IPForward")
|
||||
self.session.services.bootnodeservices(tmp)
|
||||
self.session.services.add_services(tmp, "router", "IPForward")
|
||||
self.session.services.boot_services(tmp)
|
||||
self.staticroutes(i, prefix, numnodes)
|
||||
|
||||
# link each node in a chain, with the previous node
|
||||
|
@ -429,8 +429,7 @@ class Experiment(object):
|
|||
self.net.link(prev.netif(0), tmp.netif(0))
|
||||
prev = tmp
|
||||
|
||||
def createemanesession(self, numnodes, verbose=False, cls=None,
|
||||
values=None):
|
||||
def createemanesession(self, numnodes, verbose=False, cls=None, values=None):
|
||||
""" Build a topology consisting of the given number of LxcNodes
|
||||
connected to an EMANE WLAN.
|
||||
"""
|
||||
|
@ -440,7 +439,6 @@ class Experiment(object):
|
|||
self.session.master = True
|
||||
self.session.location.setrefgeo(47.57917, -122.13232, 2.00000)
|
||||
self.session.location.refscale = 150.0
|
||||
self.session.config["emane_models"] = "RfPipe, Ieee80211abg, Bypass"
|
||||
self.session.emane.loadmodels()
|
||||
self.net = self.session.add_object(cls=EmaneNode, objid=numnodes + 1, name="wlan1")
|
||||
self.net.verbose = verbose
|
||||
|
@ -453,7 +451,7 @@ class Experiment(object):
|
|||
tmp.setposition(50, 50, None)
|
||||
tmp.newnetif(self.net, [addr])
|
||||
self.nodes.append(tmp)
|
||||
self.session.services.addservicestonode(tmp, "router", "IPForward")
|
||||
self.session.services.add_services(tmp, "router", "IPForward")
|
||||
|
||||
if values is None:
|
||||
values = cls.getdefaultvalues()
|
||||
|
@ -465,7 +463,7 @@ class Experiment(object):
|
|||
|
||||
for i in xrange(1, numnodes + 1):
|
||||
tmp = self.nodes[i - 1]
|
||||
self.session.services.bootnodeservices(tmp)
|
||||
self.session.services.boot_services(tmp)
|
||||
self.staticroutes(i, prefix, numnodes)
|
||||
|
||||
def setnodes(self):
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
enum34==1.1.6
|
||||
lxml==3.5.0
|
||||
mock==1.3.0
|
||||
pycco==0.5.1
|
||||
pytest==3.0.7
|
||||
|
|
|
@ -1,357 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
|
||||
"""
|
||||
core-daemon: the CORE daemon is a server process that receives CORE API
|
||||
messages and instantiates emulated nodes and networks within the kernel. Various
|
||||
message handlers are defined and some support for sending messages.
|
||||
"""
|
||||
|
||||
import ConfigParser
|
||||
import atexit
|
||||
import importlib
|
||||
import optparse
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
|
||||
from pkg_resources import require
|
||||
require("core_python", "corens3_python", "core_python_netns")
|
||||
|
||||
from core import constants
|
||||
from core import corehandlers
|
||||
from core import coreserver
|
||||
from core import enumerations
|
||||
from core import logger
|
||||
from core import services
|
||||
from core.api import coreapi
|
||||
from core.corehandlers import CoreDatagramRequestHandler
|
||||
from core.enumerations import MessageFlags
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.misc import nodemaps
|
||||
from core.misc import nodeutils
|
||||
from core.misc.utils import closeonexec
|
||||
from core.misc.utils import daemonize
|
||||
|
||||
DEFAULT_MAXFD = 1024
|
||||
|
||||
|
||||
def startudp(core_server, server_address):
|
||||
"""
|
||||
Start a thread running a UDP server on the same host,port for connectionless requests.
|
||||
|
||||
:param core.coreserver.CoreServer core_server: core server instance
|
||||
:param tuple[str, int] server_address: server address
|
||||
:return: created core udp server
|
||||
:rtype: core.coreserver.CoreUdpServer
|
||||
"""
|
||||
core_server.udpserver = coreserver.CoreUdpServer(server_address, CoreDatagramRequestHandler, core_server)
|
||||
core_server.udpthread = threading.Thread(target=core_server.udpserver.start)
|
||||
core_server.udpthread.daemon = True
|
||||
core_server.udpthread.start()
|
||||
return core_server.udpserver
|
||||
|
||||
|
||||
def startaux(core_server, aux_address, aux_handler):
|
||||
"""
|
||||
Start a thread running an auxiliary TCP server on the given address.
|
||||
This server will communicate with client requests using a handler
|
||||
using the aux_handler class. The aux_handler can provide an alternative
|
||||
API to CORE.
|
||||
|
||||
:param core.coreserver.CoreServer core_server: core server instance
|
||||
:param tuple[str, int] aux_address: auxiliary server address
|
||||
:param str aux_handler: auxiliary handler string to import
|
||||
:return: auxiliary server
|
||||
"""
|
||||
handlermodname, dot, handlerclassname = aux_handler.rpartition(".")
|
||||
handlermod = importlib.import_module(handlermodname)
|
||||
handlerclass = getattr(handlermod, handlerclassname)
|
||||
core_server.auxserver = coreserver.CoreAuxServer(aux_address, handlerclass, core_server)
|
||||
core_server.auxthread = threading.Thread(target=core_server.auxserver.start)
|
||||
core_server.auxthread.daemon = True
|
||||
core_server.auxthread.start()
|
||||
return core_server.auxserver
|
||||
|
||||
|
||||
def banner():
|
||||
"""
|
||||
Output the program banner printed to the terminal or log file.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("CORE daemon v.%s started %s\n" % (constants.COREDPY_VERSION, time.ctime()))
|
||||
|
||||
|
||||
def cored(cfg=None):
|
||||
"""
|
||||
Start the CoreServer object and enter the server loop.
|
||||
|
||||
:param dict cfg: core configuration
|
||||
:return: nothing
|
||||
"""
|
||||
host = cfg["listenaddr"]
|
||||
port = int(cfg["port"])
|
||||
if host == "" or host is None:
|
||||
host = "localhost"
|
||||
try:
|
||||
server = coreserver.CoreServer((host, port), corehandlers.CoreRequestHandler, cfg)
|
||||
except:
|
||||
logger.exception("error starting main server on: %s:%s", host, port)
|
||||
sys.exit(1)
|
||||
|
||||
closeonexec(server.fileno())
|
||||
logger.info("main server started, listening on: %s:%s\n" % (host, port))
|
||||
|
||||
udpserver = startudp(server, (host, port))
|
||||
closeonexec(udpserver.fileno())
|
||||
|
||||
auxreqhandler = cfg["aux_request_handler"]
|
||||
if auxreqhandler:
|
||||
handler, auxport = auxreqhandler.rsplit(":")
|
||||
auxserver = startaux(server, (host, int(auxport)), handler)
|
||||
closeonexec(auxserver.fileno())
|
||||
|
||||
server.serve_forever()
|
||||
|
||||
|
||||
# TODO: should sessions and the main core daemon both catch at exist to shutdown independently?
|
||||
def cleanup():
|
||||
"""
|
||||
Runs server shutdown and cleanup when catching an exit signal.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
while coreserver.CoreServer.servers:
|
||||
server = coreserver.CoreServer.servers.pop()
|
||||
server.shutdown()
|
||||
|
||||
|
||||
atexit.register(cleanup)
|
||||
|
||||
|
||||
def sighandler(signum, stackframe):
|
||||
"""
|
||||
Signal handler when different signals are sent.
|
||||
|
||||
:param int signum: singal number sent
|
||||
:param stackframe: stack frame sent
|
||||
:return: nothing
|
||||
"""
|
||||
logger.error("terminated by signal: %s", signum)
|
||||
sys.exit(signum)
|
||||
|
||||
|
||||
signal.signal(signal.SIGHUP, sighandler)
|
||||
signal.signal(signal.SIGINT, sighandler)
|
||||
signal.signal(signal.SIGTERM, sighandler)
|
||||
signal.signal(signal.SIGUSR1, sighandler)
|
||||
signal.signal(signal.SIGUSR2, sighandler)
|
||||
|
||||
|
||||
def logrotate(stdout, stderr, stdoutmode=0644, stderrmode=0644):
|
||||
"""
|
||||
Log rotation method.
|
||||
|
||||
:param stdout: stdout
|
||||
:param stderr: stderr
|
||||
:param int stdoutmode: stdout mode
|
||||
:param int stderrmode: stderr mode
|
||||
:return:
|
||||
"""
|
||||
|
||||
def reopen(fileno, filename, mode):
|
||||
err = 0
|
||||
fd = -1
|
||||
try:
|
||||
fd = os.open(filename,
|
||||
os.O_WRONLY | os.O_CREAT | os.O_APPEND, mode)
|
||||
os.dup2(fd, fileno)
|
||||
except OSError as e:
|
||||
err = e.errno
|
||||
finally:
|
||||
if fd >= 0:
|
||||
os.close(fd)
|
||||
return err
|
||||
|
||||
if stdout:
|
||||
err = reopen(1, stdout, stdoutmode)
|
||||
if stderr:
|
||||
if stderr == stdout and not err:
|
||||
try:
|
||||
os.dup2(1, 2)
|
||||
except OSError as e:
|
||||
pass
|
||||
else:
|
||||
reopen(2, stderr, stderrmode)
|
||||
|
||||
|
||||
def get_merged_config(filename):
|
||||
"""
|
||||
Return a configuration after merging config file and command-line arguments.
|
||||
|
||||
:param str filename: file name to merge configuration settings with
|
||||
:return: merged configuration
|
||||
:rtype: dict
|
||||
"""
|
||||
# these are the defaults used in the config file
|
||||
defaults = {"port": "%d" % enumerations.CORE_API_PORT,
|
||||
"listenaddr": "localhost",
|
||||
"pidfile": "%s/run/core-daemon.pid" % constants.CORE_STATE_DIR,
|
||||
"logfile": "%s/log/core-daemon.log" % constants.CORE_STATE_DIR,
|
||||
"xmlfilever": "1.0",
|
||||
"numthreads": "1",
|
||||
"verbose": "False",
|
||||
"daemonize": "False",
|
||||
"debug": "False",
|
||||
"execfile": None,
|
||||
"aux_request_handler": None,
|
||||
}
|
||||
|
||||
usagestr = "usage: %prog [-h] [options] [args]\n\n" + \
|
||||
"CORE daemon v.%s instantiates Linux network namespace " \
|
||||
"nodes." % constants.COREDPY_VERSION
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
parser.add_option("-f", "--configfile", dest="configfile",
|
||||
type="string",
|
||||
help="read config from specified file; default = %s" %
|
||||
filename)
|
||||
parser.add_option("-d", "--daemonize", dest="daemonize",
|
||||
action="store_true",
|
||||
help="run in background as daemon; default=%s" % \
|
||||
defaults["daemonize"])
|
||||
parser.add_option("-e", "--execute", dest="execfile", type="string",
|
||||
help="execute a Python/XML-based session")
|
||||
parser.add_option("-l", "--logfile", dest="logfile", type="string",
|
||||
help="log output to specified file; default = %s" %
|
||||
defaults["logfile"])
|
||||
parser.add_option("-p", "--port", dest="port", type=int,
|
||||
help="port number to listen on; default = %s" % \
|
||||
defaults["port"])
|
||||
parser.add_option("-i", "--pidfile", dest="pidfile",
|
||||
help="filename to write pid to; default = %s" % \
|
||||
defaults["pidfile"])
|
||||
parser.add_option("-t", "--numthreads", dest="numthreads", type=int,
|
||||
help="number of server threads; default = %s" % \
|
||||
defaults["numthreads"])
|
||||
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
|
||||
help="enable verbose logging; default = %s" % \
|
||||
defaults["verbose"])
|
||||
parser.add_option("-g", "--debug", dest="debug", action="store_true",
|
||||
help="enable debug logging; default = %s" % \
|
||||
defaults["debug"])
|
||||
|
||||
# parse command line options
|
||||
options, args = parser.parse_args()
|
||||
|
||||
# read the config file
|
||||
if options.configfile is not None:
|
||||
filename = options.configfile
|
||||
del options.configfile
|
||||
cfg = ConfigParser.SafeConfigParser(defaults)
|
||||
cfg.read(filename)
|
||||
|
||||
section = "core-daemon"
|
||||
if not cfg.has_section(section):
|
||||
cfg.add_section(section)
|
||||
# gracefully support legacy configs (cored.py/cored now core-daemon)
|
||||
if cfg.has_section("cored.py"):
|
||||
for name, val in cfg.items("cored.py"):
|
||||
if name == "pidfile" or name == "logfile":
|
||||
bn = os.path.basename(val).replace("coredpy", "core-daemon")
|
||||
val = os.path.join(os.path.dirname(val), bn)
|
||||
cfg.set(section, name, val)
|
||||
if cfg.has_section("cored"):
|
||||
for name, val in cfg.items("cored"):
|
||||
if name == "pidfile" or name == "logfile":
|
||||
bn = os.path.basename(val).replace("cored", "core-daemon")
|
||||
val = os.path.join(os.path.dirname(val), bn)
|
||||
cfg.set(section, name, val)
|
||||
|
||||
# merge command line with config file
|
||||
for opt in options.__dict__:
|
||||
val = options.__dict__[opt]
|
||||
if val is not None:
|
||||
cfg.set(section, opt, val.__str__())
|
||||
|
||||
return dict(cfg.items(section)), args
|
||||
|
||||
|
||||
def exec_file(cfg):
|
||||
"""
|
||||
Send a Register Message to execute a new session based on XML or Python script file.
|
||||
|
||||
:param dict cfg: configuration settings
|
||||
:return: 0
|
||||
"""
|
||||
filename = cfg["execfile"]
|
||||
logger.info("Telling daemon to execute file: %s...", filename)
|
||||
tlvdata = coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, filename)
|
||||
msg = coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlvdata)
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
# TODO: connect address option
|
||||
sock.connect(("localhost", int(cfg["port"])))
|
||||
sock.sendall(msg)
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main program startup.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# get a configuration merged from config file and command-line arguments
|
||||
cfg, args = get_merged_config("%s/core.conf" % constants.CORE_CONF_DIR)
|
||||
for a in args:
|
||||
logger.error("ignoring command line argument: %s", a)
|
||||
|
||||
if cfg["daemonize"] == "True":
|
||||
daemonize(rootdir=None, umask=0, close_fds=False,
|
||||
stdin=os.devnull,
|
||||
stdout=cfg["logfile"], stderr=cfg["logfile"],
|
||||
pidfilename=cfg["pidfile"],
|
||||
defaultmaxfd=DEFAULT_MAXFD)
|
||||
signal.signal(signal.SIGUSR1, lambda signum, stackframe:
|
||||
logrotate(stdout=cfg["logfile"], stderr=cfg["logfile"]))
|
||||
|
||||
banner()
|
||||
if cfg["execfile"]:
|
||||
cfg["execfile"] = os.path.abspath(cfg["execfile"])
|
||||
sys.exit(exec_file(cfg))
|
||||
try:
|
||||
cored(cfg)
|
||||
except KeyboardInterrupt:
|
||||
logger.info("keyboard interrupt, stopping core daemon")
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# configure nodes to use
|
||||
node_map = nodemaps.NODES
|
||||
if len(sys.argv) == 2 and sys.argv[1] == "ovs":
|
||||
from core.netns.openvswitch import OVS_NODES
|
||||
node_map.update(OVS_NODES)
|
||||
|
||||
# update with BSD based nodes
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
from core.bsd.nodes import BSD_NODES
|
||||
node_map.update(BSD_NODES)
|
||||
|
||||
nodeutils.set_node_map(node_map)
|
||||
|
||||
# load default services
|
||||
services.load()
|
||||
|
||||
main()
|
|
@ -16,7 +16,6 @@ from core import logger
|
|||
from core.corehandlers import CoreHandler
|
||||
from core.coreserver import CoreServer
|
||||
from core.misc.utils import close_onexec
|
||||
from core.service import ServiceManager
|
||||
|
||||
|
||||
def banner():
|
||||
|
@ -116,14 +115,6 @@ def main():
|
|||
for a in args:
|
||||
logger.error("ignoring command line argument: %s", a)
|
||||
|
||||
# attempt load custom services
|
||||
service_paths = cfg.get("custom_services_dir")
|
||||
logger.debug("custom service paths: %s", service_paths)
|
||||
if service_paths:
|
||||
for service_path in service_paths.split(','):
|
||||
service_path = service_path.strip()
|
||||
ServiceManager.add_services(service_path)
|
||||
|
||||
banner()
|
||||
|
||||
# check if ovs flag was provided
|
||||
|
|
|
@ -33,16 +33,17 @@ data_files = [
|
|||
"data/core.conf",
|
||||
"data/logging.conf",
|
||||
]),
|
||||
(_MAN_DIR, glob_files("../doc/man/**.1")),
|
||||
(_MAN_DIR, glob_files("../man/**.1")),
|
||||
]
|
||||
data_files.extend(recursive_files(_EXAMPLES_DIR, "examples"))
|
||||
|
||||
setup(
|
||||
name="core",
|
||||
version="5.1",
|
||||
version="5.2",
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
"enum34",
|
||||
"lxml"
|
||||
],
|
||||
tests_require=[
|
||||
"pytest",
|
||||
|
|
|
@ -7,7 +7,6 @@ import os
|
|||
import pytest
|
||||
from mock.mock import MagicMock
|
||||
|
||||
from core import services
|
||||
from core.api.coreapi import CoreConfMessage
|
||||
from core.api.coreapi import CoreEventMessage
|
||||
from core.api.coreapi import CoreExecMessage
|
||||
|
@ -29,6 +28,7 @@ from core.enumerations import NodeTlvs
|
|||
from core.enumerations import NodeTypes
|
||||
from core.misc import ipaddress
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from core.service import ServiceManager
|
||||
|
||||
EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward"
|
||||
|
||||
|
@ -199,6 +199,7 @@ class CoreServerTest(object):
|
|||
self.request_handler.handle_message(message)
|
||||
|
||||
def shutdown(self):
|
||||
self.server.coreemu.shutdown()
|
||||
self.server.shutdown()
|
||||
self.server.server_close()
|
||||
|
||||
|
@ -214,9 +215,18 @@ def session():
|
|||
# return created session
|
||||
yield session_fixture
|
||||
|
||||
# clear session configurations
|
||||
session_fixture.location.reset()
|
||||
session_fixture.services.reset()
|
||||
session_fixture.mobility.config_reset()
|
||||
session_fixture.emane.config_reset()
|
||||
|
||||
# shutdown coreemu
|
||||
coreemu.shutdown()
|
||||
|
||||
# clear services, since they will be reloaded
|
||||
ServiceManager.services.clear()
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def ip_prefixes():
|
||||
|
@ -225,9 +235,6 @@ def ip_prefixes():
|
|||
|
||||
@pytest.fixture()
|
||||
def cored():
|
||||
# load default services
|
||||
services.load()
|
||||
|
||||
# create and return server
|
||||
server = CoreServerTest()
|
||||
yield server
|
||||
|
@ -235,6 +242,11 @@ def cored():
|
|||
# cleanup
|
||||
server.shutdown()
|
||||
|
||||
#
|
||||
|
||||
# cleanup services
|
||||
ServiceManager.services.clear()
|
||||
|
||||
|
||||
def ping(from_node, to_node, ip_prefixes, count=3):
|
||||
address = ip_prefixes.ip4_address(to_node)
|
||||
|
|
|
@ -6,22 +6,25 @@ from core.service import CoreService
|
|||
|
||||
|
||||
class MyService(CoreService):
|
||||
_name = "MyService"
|
||||
_group = "Utility"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ('myservice.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh myservice.sh',)
|
||||
_shutdown = ()
|
||||
name = "MyService"
|
||||
group = "Utility"
|
||||
configs = ("myservice.sh",)
|
||||
startup = ("sh myservice.sh",)
|
||||
shutdown = ("sh myservice.sh",)
|
||||
|
||||
@classmethod
|
||||
def generate_config(cls, node, filename):
|
||||
return "# test file"
|
||||
|
||||
|
||||
class MyService2(CoreService):
|
||||
_name = "MyService2"
|
||||
_group = "Utility"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ('myservice.sh',)
|
||||
_startindex = 50
|
||||
_startup = ('sh myservice.sh',)
|
||||
_shutdown = ()
|
||||
class MyService2(MyService):
|
||||
name = "MyService2"
|
||||
group = "Utility"
|
||||
configs = ("myservice2.sh",)
|
||||
startup = ("sh myservice2.sh",)
|
||||
shutdown = startup
|
||||
validate = startup
|
||||
|
||||
@classmethod
|
||||
def generate_config(cls, node, filename):
|
||||
return "exit 1"
|
||||
|
|
183
daemon/tests/test_conf.py
Normal file
183
daemon/tests/test_conf.py
Normal file
|
@ -0,0 +1,183 @@
|
|||
import pytest
|
||||
|
||||
from core.conf import ConfigurableManager
|
||||
from core.conf import ConfigurableOptions
|
||||
from core.conf import Configuration
|
||||
from core.conf import ModelManager
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.mobility import BasicRangeModel
|
||||
|
||||
|
||||
class TestConfigurableOptions(ConfigurableOptions):
|
||||
name_one = "value1"
|
||||
name_two = "value2"
|
||||
options = [
|
||||
Configuration(
|
||||
_id=name_one,
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label=name_one
|
||||
),
|
||||
Configuration(
|
||||
_id=name_two,
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label=name_two
|
||||
)
|
||||
]
|
||||
|
||||
|
||||
class TestConf:
|
||||
def test_configurable_options_default(self):
|
||||
# given
|
||||
configurable_options = TestConfigurableOptions()
|
||||
|
||||
# when
|
||||
default_values = TestConfigurableOptions.default_values()
|
||||
instance_default_values = configurable_options.default_values()
|
||||
|
||||
# then
|
||||
assert len(default_values) == 2
|
||||
assert TestConfigurableOptions.name_one in default_values
|
||||
assert TestConfigurableOptions.name_two in default_values
|
||||
assert len(instance_default_values) == 2
|
||||
assert TestConfigurableOptions.name_one in instance_default_values
|
||||
assert TestConfigurableOptions.name_two in instance_default_values
|
||||
|
||||
def test_nodes(self):
|
||||
# given
|
||||
config_manager = ConfigurableManager()
|
||||
test_config = {1: 2}
|
||||
node_id = 1
|
||||
config_manager.set_configs(test_config)
|
||||
config_manager.set_configs(test_config, node_id=node_id)
|
||||
|
||||
# when
|
||||
nodes = config_manager.nodes()
|
||||
|
||||
# then
|
||||
assert len(nodes) == 1
|
||||
assert node_id in nodes
|
||||
|
||||
def test_config_reset_all(self):
|
||||
# given
|
||||
config_manager = ConfigurableManager()
|
||||
test_config = {1: 2}
|
||||
node_id = 1
|
||||
config_manager.set_configs(test_config)
|
||||
config_manager.set_configs(test_config, node_id=node_id)
|
||||
|
||||
# when
|
||||
config_manager.config_reset()
|
||||
|
||||
# then
|
||||
assert not config_manager.node_configurations
|
||||
|
||||
def test_config_reset_node(self):
|
||||
# given
|
||||
config_manager = ConfigurableManager()
|
||||
test_config = {1: 2}
|
||||
node_id = 1
|
||||
config_manager.set_configs(test_config)
|
||||
config_manager.set_configs(test_config, node_id=node_id)
|
||||
|
||||
# when
|
||||
config_manager.config_reset(node_id)
|
||||
|
||||
# then
|
||||
assert not config_manager.get_configs(node_id=node_id)
|
||||
assert config_manager.get_configs()
|
||||
|
||||
def test_configs_setget(self):
|
||||
# given
|
||||
config_manager = ConfigurableManager()
|
||||
test_config = {1: 2}
|
||||
node_id = 1
|
||||
config_manager.set_configs(test_config)
|
||||
config_manager.set_configs(test_config, node_id=node_id)
|
||||
|
||||
# when
|
||||
default_config = config_manager.get_configs()
|
||||
node_config = config_manager.get_configs(node_id)
|
||||
|
||||
# then
|
||||
assert default_config
|
||||
assert node_config
|
||||
|
||||
def test_config_setget(self):
|
||||
# given
|
||||
config_manager = ConfigurableManager()
|
||||
name = "test"
|
||||
value = "1"
|
||||
node_id = 1
|
||||
config_manager.set_config(name, value)
|
||||
config_manager.set_config(name, value, node_id=node_id)
|
||||
|
||||
# when
|
||||
defaults_value = config_manager.get_config(name)
|
||||
node_value = config_manager.get_config(name, node_id=node_id)
|
||||
|
||||
# then
|
||||
assert defaults_value == value
|
||||
assert node_value == value
|
||||
|
||||
def test_model_setget_config(self):
|
||||
# given
|
||||
manager = ModelManager()
|
||||
manager.models[BasicRangeModel.name] = BasicRangeModel
|
||||
|
||||
# when
|
||||
manager.set_model_config(1, BasicRangeModel.name)
|
||||
|
||||
# then
|
||||
assert manager.get_model_config(1, BasicRangeModel.name)
|
||||
|
||||
def test_model_set_config_error(self):
|
||||
# given
|
||||
manager = ModelManager()
|
||||
manager.models[BasicRangeModel.name] = BasicRangeModel
|
||||
bad_name = "bad-model"
|
||||
|
||||
# when/then
|
||||
with pytest.raises(ValueError):
|
||||
manager.set_model_config(1, bad_name)
|
||||
|
||||
def test_model_get_config_error(self):
|
||||
# given
|
||||
manager = ModelManager()
|
||||
manager.models[BasicRangeModel.name] = BasicRangeModel
|
||||
bad_name = "bad-model"
|
||||
|
||||
# when/then
|
||||
with pytest.raises(ValueError):
|
||||
manager.get_model_config(1, bad_name)
|
||||
|
||||
def test_model_set(self, session):
|
||||
# given
|
||||
wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
|
||||
# when
|
||||
session.mobility.set_model(wlan_node, BasicRangeModel)
|
||||
|
||||
# then
|
||||
assert session.mobility.get_model_config(wlan_node.objid, BasicRangeModel.name)
|
||||
|
||||
def test_model_set_error(self, session):
|
||||
# given
|
||||
wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
|
||||
# when / then
|
||||
with pytest.raises(ValueError):
|
||||
session.mobility.set_model(wlan_node, EmaneIeee80211abgModel)
|
||||
|
||||
def test_get_models(self, session):
|
||||
# given
|
||||
wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
session.mobility.set_model(wlan_node, BasicRangeModel)
|
||||
|
||||
# when
|
||||
models = session.mobility.get_models(wlan_node)
|
||||
|
||||
# then
|
||||
assert models
|
||||
assert len(models) == 1
|
|
@ -5,22 +5,19 @@ Unit tests for testing basic CORE networks.
|
|||
import os
|
||||
import stat
|
||||
import threading
|
||||
from xml.etree import ElementTree
|
||||
|
||||
import pytest
|
||||
from mock import MagicMock
|
||||
|
||||
from core.data import ConfigData
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.enumerations import MessageFlags, NodeTypes
|
||||
from core.enumerations import MessageFlags
|
||||
from core.enumerations import NodeTypes
|
||||
from core.mobility import BasicRangeModel
|
||||
from core.mobility import Ns2ScriptedMobility
|
||||
from core.netns.vnodeclient import VnodeClient
|
||||
from core.service import ServiceManager
|
||||
|
||||
_PATH = os.path.abspath(os.path.dirname(__file__))
|
||||
_SERVICES_PATH = os.path.join(_PATH, "myservices")
|
||||
_MOBILITY_FILE = os.path.join(_PATH, "mobility.scen")
|
||||
_XML_VERSIONS = ["0.0", "1.0"]
|
||||
_WIRED = [
|
||||
NodeTypes.PEER_TO_PEER,
|
||||
NodeTypes.HUB,
|
||||
|
@ -52,16 +49,6 @@ def ping(from_node, to_node, ip_prefixes):
|
|||
|
||||
|
||||
class TestCore:
|
||||
def test_import_service(self):
|
||||
"""
|
||||
Test importing a custom service.
|
||||
|
||||
:param conftest.Core core: core fixture to test with
|
||||
"""
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
assert ServiceManager.get("MyService")
|
||||
assert ServiceManager.get("MyService2")
|
||||
|
||||
@pytest.mark.parametrize("net_type", _WIRED)
|
||||
def test_wired_ping(self, session, net_type, ip_prefixes):
|
||||
"""
|
||||
|
@ -91,61 +78,6 @@ class TestCore:
|
|||
status = ping(node_one, node_two, ip_prefixes)
|
||||
assert not status
|
||||
|
||||
@pytest.mark.parametrize("version", _XML_VERSIONS)
|
||||
def test_xml(self, session, tmpdir, version, ip_prefixes):
|
||||
"""
|
||||
Test xml client methods.
|
||||
|
||||
:param session: session for test
|
||||
:param tmpdir: tmpdir to create data in
|
||||
:param str version: xml version to write and parse
|
||||
:param ip_prefixes: generates ip addresses for nodes
|
||||
"""
|
||||
|
||||
# create ptp
|
||||
ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER)
|
||||
|
||||
# create nodes
|
||||
node_one = session.add_node()
|
||||
node_two = session.add_node()
|
||||
|
||||
# link nodes to ptp net
|
||||
for node in [node_one, node_two]:
|
||||
interface = ip_prefixes.create_interface(node)
|
||||
session.add_link(node.objid, ptp_node.objid, interface_one=interface)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# get ids for nodes
|
||||
n1_id = node_one.objid
|
||||
n2_id = node_two.objid
|
||||
|
||||
# save xml
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
session.save_xml(file_path, version)
|
||||
|
||||
# verify xml file was created and can be parsed
|
||||
assert xml_file.isfile()
|
||||
assert ElementTree.parse(file_path)
|
||||
|
||||
# stop current session, clearing data
|
||||
session.shutdown()
|
||||
|
||||
# verify nodes have been removed from session
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n1_id)
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n2_id)
|
||||
|
||||
# load saved xml
|
||||
session.open_xml(file_path, start=True)
|
||||
|
||||
# verify nodes have been recreated
|
||||
assert session.get_object(n1_id)
|
||||
assert session.get_object(n2_id)
|
||||
|
||||
def test_vnode_client(self, session, ip_prefixes):
|
||||
"""
|
||||
Test vnode client methods.
|
||||
|
@ -256,7 +188,7 @@ class TestCore:
|
|||
|
||||
# create wlan
|
||||
wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
session.set_wireless_model(wlan_node, BasicRangeModel)
|
||||
session.mobility.set_model(wlan_node, BasicRangeModel)
|
||||
|
||||
# create nodes
|
||||
node_options = NodeOptions()
|
||||
|
@ -289,7 +221,7 @@ class TestCore:
|
|||
|
||||
# create wlan
|
||||
wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
session.set_wireless_model(wlan_node, BasicRangeModel)
|
||||
session.mobility.set_model(wlan_node, BasicRangeModel)
|
||||
|
||||
# create nodes
|
||||
node_options = NodeOptions()
|
||||
|
@ -306,15 +238,17 @@ class TestCore:
|
|||
session.wireless_link_all(wlan_node, [node_one, node_two])
|
||||
|
||||
# configure mobility script for session
|
||||
config = ConfigData(
|
||||
node=wlan_node.objid,
|
||||
object="ns2script",
|
||||
type=0,
|
||||
data_types=(10, 3, 11, 10, 10, 10, 10, 10, 0),
|
||||
data_values="file=%s|refresh_ms=50|loop=1|autostart=0.0|"
|
||||
"map=|script_start=|script_pause=|script_stop=" % _MOBILITY_FILE
|
||||
)
|
||||
session.config_object(config)
|
||||
config = {
|
||||
"file": _MOBILITY_FILE,
|
||||
"refresh_ms": "50",
|
||||
"loop": "1",
|
||||
"autostart": "0.0",
|
||||
"map": "",
|
||||
"script_start": "",
|
||||
"script_pause": "",
|
||||
"script_stop": "",
|
||||
}
|
||||
session.mobility.set_model(wlan_node, Ns2ScriptedMobility, config)
|
||||
|
||||
# add handler for receiving node updates
|
||||
event = threading.Event()
|
||||
|
|
|
@ -101,7 +101,7 @@ def run_cmd(node, exec_cmd):
|
|||
|
||||
|
||||
class TestGui:
|
||||
def test_broker(self, session, cored):
|
||||
def test_broker(self, cored):
|
||||
"""
|
||||
Test session broker creation.
|
||||
|
||||
|
@ -119,6 +119,7 @@ class TestGui:
|
|||
daemon = "localhost"
|
||||
|
||||
# add server
|
||||
session = cored.server.coreemu.create_session()
|
||||
session.broker.addserver(daemon, "127.0.0.1", CORE_API_PORT)
|
||||
|
||||
# setup server
|
||||
|
|
|
@ -39,7 +39,6 @@ class TestNodes:
|
|||
assert node.alive()
|
||||
assert node.up
|
||||
assert node.check_cmd(["ip", "addr", "show", "lo"])
|
||||
node.validate()
|
||||
|
||||
def test_node_update(self, session):
|
||||
# given
|
||||
|
|
298
daemon/tests/test_services.py
Normal file
298
daemon/tests/test_services.py
Normal file
|
@ -0,0 +1,298 @@
|
|||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.service import CoreService
|
||||
from core.service import ServiceDependencies
|
||||
from core.service import ServiceManager
|
||||
|
||||
_PATH = os.path.abspath(os.path.dirname(__file__))
|
||||
_SERVICES_PATH = os.path.join(_PATH, "myservices")
|
||||
|
||||
SERVICE_ONE = "MyService"
|
||||
SERVICE_TWO = "MyService2"
|
||||
|
||||
|
||||
class ServiceA(CoreService):
|
||||
name = "A"
|
||||
dependencies = ("B",)
|
||||
|
||||
|
||||
class ServiceB(CoreService):
|
||||
name = "B"
|
||||
dependencies = ()
|
||||
|
||||
|
||||
class ServiceC(CoreService):
|
||||
name = "C"
|
||||
dependencies = ("B", "D")
|
||||
|
||||
|
||||
class ServiceD(CoreService):
|
||||
name = "D"
|
||||
dependencies = ()
|
||||
|
||||
|
||||
class ServiceBadDependency(CoreService):
|
||||
name = "E"
|
||||
dependencies = ("Z",)
|
||||
|
||||
|
||||
class ServiceF(CoreService):
|
||||
name = "F"
|
||||
dependencies = ()
|
||||
|
||||
|
||||
class ServiceCycleDependency(CoreService):
|
||||
name = "G"
|
||||
|
||||
|
||||
class TestServices:
|
||||
def test_service_all_files(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
file_name = "myservice.sh"
|
||||
node = session.add_node()
|
||||
|
||||
# when
|
||||
session.services.set_service_file(node.objid, SERVICE_ONE, file_name, "# test")
|
||||
|
||||
# then
|
||||
service = session.services.get_service(node.objid, SERVICE_ONE)
|
||||
all_files = session.services.all_files(service)
|
||||
assert service
|
||||
assert all_files and len(all_files) == 1
|
||||
|
||||
def test_service_all_configs(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
node = session.add_node()
|
||||
|
||||
# when
|
||||
session.services.set_service(node.objid, SERVICE_ONE)
|
||||
session.services.set_service(node.objid, SERVICE_TWO)
|
||||
|
||||
# then
|
||||
all_configs = session.services.all_configs()
|
||||
assert all_configs
|
||||
assert len(all_configs) == 2
|
||||
|
||||
def test_service_add_services(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
node = session.add_node()
|
||||
total_service = len(node.services)
|
||||
|
||||
# when
|
||||
session.services.add_services(node, node.type, [SERVICE_ONE, SERVICE_TWO])
|
||||
|
||||
# then
|
||||
assert node.services
|
||||
assert len(node.services) == total_service + 2
|
||||
|
||||
def test_service_file(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node = session.add_node()
|
||||
file_name = my_service.configs[0]
|
||||
file_path = node.hostfilename(file_name)
|
||||
|
||||
# when
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# then
|
||||
assert os.path.exists(file_path)
|
||||
|
||||
def test_service_validate(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node = session.add_node()
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# when
|
||||
status = session.services.validate_service(node, my_service)
|
||||
|
||||
# then
|
||||
assert not status
|
||||
|
||||
def test_service_validate_error(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_TWO)
|
||||
node = session.add_node()
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# when
|
||||
status = session.services.validate_service(node, my_service)
|
||||
|
||||
# then
|
||||
assert status
|
||||
|
||||
def test_service_startup(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node = session.add_node()
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# when
|
||||
status = session.services.startup_service(node, my_service, wait=True)
|
||||
|
||||
# then
|
||||
assert not status
|
||||
|
||||
def test_service_startup_error(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_TWO)
|
||||
node = session.add_node()
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# when
|
||||
status = session.services.startup_service(node, my_service, wait=True)
|
||||
|
||||
# then
|
||||
assert status
|
||||
|
||||
def test_service_stop(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node = session.add_node()
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# when
|
||||
status = session.services.stop_service(node, my_service)
|
||||
|
||||
# then
|
||||
assert not status
|
||||
|
||||
def test_service_stop_error(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_TWO)
|
||||
node = session.add_node()
|
||||
session.services.create_service_files(node, my_service)
|
||||
|
||||
# when
|
||||
status = session.services.stop_service(node, my_service)
|
||||
|
||||
# then
|
||||
assert status
|
||||
|
||||
def test_service_custom_startup(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node = session.add_node()
|
||||
|
||||
# when
|
||||
session.services.set_service(node.objid, my_service.name)
|
||||
custom_my_service = session.services.get_service(node.objid, my_service.name)
|
||||
custom_my_service.startup = ("sh custom.sh",)
|
||||
|
||||
# then
|
||||
assert my_service.startup != custom_my_service.startup
|
||||
|
||||
def test_service_set_file(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node_one = session.add_node()
|
||||
node_two = session.add_node()
|
||||
file_name = my_service.configs[0]
|
||||
file_data_one = "# custom file one"
|
||||
file_data_two = "# custom file two"
|
||||
session.services.set_service_file(node_one.objid, my_service.name, file_name, file_data_one)
|
||||
session.services.set_service_file(node_two.objid, my_service.name, file_name, file_data_two)
|
||||
|
||||
# when
|
||||
custom_service_one = session.services.get_service(node_one.objid, my_service.name)
|
||||
session.services.create_service_files(node_one, custom_service_one)
|
||||
custom_service_two = session.services.get_service(node_two.objid, my_service.name)
|
||||
session.services.create_service_files(node_two, custom_service_two)
|
||||
|
||||
# then
|
||||
file_path_one = node_one.hostfilename(file_name)
|
||||
assert os.path.exists(file_path_one)
|
||||
with open(file_path_one, "r") as custom_file:
|
||||
assert custom_file.read() == file_data_one
|
||||
|
||||
file_path_two = node_two.hostfilename(file_name)
|
||||
assert os.path.exists(file_path_two)
|
||||
with open(file_path_two, "r") as custom_file:
|
||||
assert custom_file.read() == file_data_two
|
||||
|
||||
def test_service_import(self):
|
||||
"""
|
||||
Test importing a custom service.
|
||||
"""
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
assert ServiceManager.get(SERVICE_ONE)
|
||||
assert ServiceManager.get(SERVICE_TWO)
|
||||
|
||||
def test_service_setget(self, session):
|
||||
# given
|
||||
ServiceManager.add_services(_SERVICES_PATH)
|
||||
my_service = ServiceManager.get(SERVICE_ONE)
|
||||
node = session.add_node()
|
||||
|
||||
# when
|
||||
no_service = session.services.get_service(node.objid, SERVICE_ONE)
|
||||
default_service = session.services.get_service(node.objid, SERVICE_ONE, default_service=True)
|
||||
session.services.set_service(node.objid, SERVICE_ONE)
|
||||
custom_service = session.services.get_service(node.objid, SERVICE_ONE, default_service=True)
|
||||
|
||||
# then
|
||||
assert no_service is None
|
||||
assert default_service == my_service
|
||||
assert custom_service and custom_service != my_service
|
||||
|
||||
def test_services_dependencies(self):
|
||||
# given
|
||||
services = [
|
||||
ServiceA,
|
||||
ServiceB,
|
||||
ServiceC,
|
||||
ServiceD,
|
||||
ServiceF
|
||||
]
|
||||
|
||||
# when
|
||||
boot_paths = ServiceDependencies(services).boot_paths()
|
||||
|
||||
# then
|
||||
assert len(boot_paths) == 2
|
||||
|
||||
def test_services_dependencies_not_present(self):
|
||||
# given
|
||||
services = [
|
||||
ServiceA,
|
||||
ServiceB,
|
||||
ServiceC,
|
||||
ServiceD,
|
||||
ServiceF,
|
||||
ServiceBadDependency
|
||||
]
|
||||
|
||||
# when, then
|
||||
with pytest.raises(ValueError):
|
||||
ServiceDependencies(services).boot_paths()
|
||||
|
||||
def test_services_dependencies_cycle(self):
|
||||
# given
|
||||
service_d = ServiceD()
|
||||
service_d.dependencies = ("C",)
|
||||
services = [
|
||||
ServiceA,
|
||||
ServiceB,
|
||||
ServiceC,
|
||||
service_d,
|
||||
ServiceF
|
||||
]
|
||||
|
||||
# when, then
|
||||
with pytest.raises(ValueError):
|
||||
ServiceDependencies(services).boot_paths()
|
22
daemon/tests/test_utils.py
Normal file
22
daemon/tests/test_utils.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
from core.misc import utils
|
||||
|
||||
|
||||
class TestUtils:
|
||||
def test_make_tuple_fromstr(self):
|
||||
# given
|
||||
no_args = "()"
|
||||
one_arg = "('one',)"
|
||||
two_args = "('one', 'two')"
|
||||
unicode_args = u"('one', 'two', 'three')"
|
||||
|
||||
# when
|
||||
no_args = utils.make_tuple_fromstr(no_args, str)
|
||||
one_arg = utils.make_tuple_fromstr(one_arg, str)
|
||||
two_args = utils.make_tuple_fromstr(two_args, str)
|
||||
unicode_args = utils.make_tuple_fromstr(unicode_args, str)
|
||||
|
||||
# then
|
||||
assert no_args == ()
|
||||
assert len(one_arg) == 1
|
||||
assert len(two_args) == 2
|
||||
assert len(unicode_args) == 3
|
305
daemon/tests/test_xml.py
Normal file
305
daemon/tests/test_xml.py
Normal file
|
@ -0,0 +1,305 @@
|
|||
from xml.etree import ElementTree
|
||||
|
||||
import pytest
|
||||
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.enumerations import NodeTypes
|
||||
from core.mobility import BasicRangeModel
|
||||
from core.services.utility import SshService
|
||||
|
||||
_XML_VERSIONS = [
|
||||
"0.0",
|
||||
"1.0"
|
||||
]
|
||||
|
||||
|
||||
class TestXml:
|
||||
@pytest.mark.parametrize("version", _XML_VERSIONS)
|
||||
def test_xml_hooks(self, session, tmpdir, version):
|
||||
"""
|
||||
Test save/load hooks in xml.
|
||||
|
||||
:param session: session for test
|
||||
:param tmpdir: tmpdir to create data in
|
||||
:param str version: xml version to write and parse
|
||||
"""
|
||||
# create hook
|
||||
file_name = "runtime_hook.sh"
|
||||
data = "#!/bin/sh\necho hello"
|
||||
session.set_hook("hook:4", file_name, None, data)
|
||||
|
||||
# save xml
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
session.save_xml(file_path, version)
|
||||
|
||||
# verify xml file was created and can be parsed
|
||||
assert xml_file.isfile()
|
||||
assert ElementTree.parse(file_path)
|
||||
|
||||
# stop current session, clearing data
|
||||
session.shutdown()
|
||||
|
||||
# load saved xml
|
||||
session.open_xml(file_path, start=True)
|
||||
|
||||
# verify nodes have been recreated
|
||||
runtime_hooks = session._hooks.get(4)
|
||||
assert runtime_hooks
|
||||
runtime_hook = runtime_hooks[0]
|
||||
assert file_name == runtime_hook[0]
|
||||
assert data == runtime_hook[1]
|
||||
|
||||
@pytest.mark.parametrize("version", _XML_VERSIONS)
|
||||
def test_xml_ptp(self, session, tmpdir, version, ip_prefixes):
|
||||
"""
|
||||
Test xml client methods for a ptp network.
|
||||
|
||||
:param session: session for test
|
||||
:param tmpdir: tmpdir to create data in
|
||||
:param str version: xml version to write and parse
|
||||
:param ip_prefixes: generates ip addresses for nodes
|
||||
"""
|
||||
# create ptp
|
||||
ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER)
|
||||
|
||||
# create nodes
|
||||
node_one = session.add_node()
|
||||
node_two = session.add_node()
|
||||
|
||||
# link nodes to ptp net
|
||||
for node in [node_one, node_two]:
|
||||
interface = ip_prefixes.create_interface(node)
|
||||
session.add_link(node.objid, ptp_node.objid, interface_one=interface)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# get ids for nodes
|
||||
n1_id = node_one.objid
|
||||
n2_id = node_two.objid
|
||||
|
||||
# save xml
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
session.save_xml(file_path, version)
|
||||
|
||||
# verify xml file was created and can be parsed
|
||||
assert xml_file.isfile()
|
||||
assert ElementTree.parse(file_path)
|
||||
|
||||
# stop current session, clearing data
|
||||
session.shutdown()
|
||||
|
||||
# verify nodes have been removed from session
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n1_id)
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n2_id)
|
||||
|
||||
# load saved xml
|
||||
session.open_xml(file_path, start=True)
|
||||
|
||||
# verify nodes have been recreated
|
||||
assert session.get_object(n1_id)
|
||||
assert session.get_object(n2_id)
|
||||
|
||||
@pytest.mark.parametrize("version", _XML_VERSIONS)
|
||||
def test_xml_ptp_services(self, session, tmpdir, version, ip_prefixes):
|
||||
"""
|
||||
Test xml client methods for a ptp neetwork.
|
||||
|
||||
:param session: session for test
|
||||
:param tmpdir: tmpdir to create data in
|
||||
:param str version: xml version to write and parse
|
||||
:param ip_prefixes: generates ip addresses for nodes
|
||||
"""
|
||||
# create ptp
|
||||
ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER)
|
||||
|
||||
# create nodes
|
||||
node_options = NodeOptions(model="host")
|
||||
node_one = session.add_node(node_options=node_options)
|
||||
node_two = session.add_node()
|
||||
|
||||
# link nodes to ptp net
|
||||
for node in [node_one, node_two]:
|
||||
interface = ip_prefixes.create_interface(node)
|
||||
session.add_link(node.objid, ptp_node.objid, interface_one=interface)
|
||||
|
||||
# set custom values for node service
|
||||
session.services.set_service(node_one.objid, SshService.name)
|
||||
service_file = SshService.configs[0]
|
||||
file_data = "# test"
|
||||
session.services.set_service_file(node_one.objid, SshService.name, service_file, file_data)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# get ids for nodes
|
||||
n1_id = node_one.objid
|
||||
n2_id = node_two.objid
|
||||
|
||||
# save xml
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
session.save_xml(file_path, version)
|
||||
|
||||
# verify xml file was created and can be parsed
|
||||
assert xml_file.isfile()
|
||||
assert ElementTree.parse(file_path)
|
||||
|
||||
# stop current session, clearing data
|
||||
session.shutdown()
|
||||
|
||||
# verify nodes have been removed from session
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n1_id)
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n2_id)
|
||||
|
||||
# load saved xml
|
||||
session.open_xml(file_path, start=True)
|
||||
|
||||
# retrieve custom service
|
||||
service = session.services.get_service(node_one.objid, SshService.name)
|
||||
|
||||
# verify nodes have been recreated
|
||||
assert session.get_object(n1_id)
|
||||
assert session.get_object(n2_id)
|
||||
assert service.config_data.get(service_file) == file_data
|
||||
|
||||
@pytest.mark.parametrize("version", _XML_VERSIONS)
|
||||
def test_xml_mobility(self, session, tmpdir, version, ip_prefixes):
|
||||
"""
|
||||
Test xml client methods for mobility.
|
||||
|
||||
:param session: session for test
|
||||
:param tmpdir: tmpdir to create data in
|
||||
:param str version: xml version to write and parse
|
||||
:param ip_prefixes: generates ip addresses for nodes
|
||||
"""
|
||||
# create wlan
|
||||
wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN)
|
||||
session.mobility.set_model(wlan_node, BasicRangeModel, {"test": "1"})
|
||||
|
||||
# create nodes
|
||||
node_options = NodeOptions()
|
||||
node_options.set_position(0, 0)
|
||||
node_one = session.create_wireless_node(node_options=node_options)
|
||||
node_two = session.create_wireless_node(node_options=node_options)
|
||||
|
||||
# link nodes
|
||||
for node in [node_one, node_two]:
|
||||
interface = ip_prefixes.create_interface(node)
|
||||
session.add_link(node.objid, wlan_node.objid, interface_one=interface)
|
||||
|
||||
# link nodes in wlan
|
||||
session.wireless_link_all(wlan_node, [node_one, node_two])
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# get ids for nodes
|
||||
wlan_id = wlan_node.objid
|
||||
n1_id = node_one.objid
|
||||
n2_id = node_two.objid
|
||||
|
||||
# save xml
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
session.save_xml(file_path, version)
|
||||
|
||||
# verify xml file was created and can be parsed
|
||||
assert xml_file.isfile()
|
||||
assert ElementTree.parse(file_path)
|
||||
|
||||
# stop current session, clearing data
|
||||
session.shutdown()
|
||||
|
||||
# verify nodes have been removed from session
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n1_id)
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n2_id)
|
||||
|
||||
# load saved xml
|
||||
session.open_xml(file_path, start=True)
|
||||
|
||||
# retrieve configuration we set originally
|
||||
value = str(session.mobility.get_config("test", wlan_id, BasicRangeModel.name))
|
||||
|
||||
# verify nodes and configuration were restored
|
||||
assert session.get_object(n1_id)
|
||||
assert session.get_object(n2_id)
|
||||
assert session.get_object(wlan_id)
|
||||
assert value == "1"
|
||||
|
||||
@pytest.mark.parametrize("version", ["1.0"])
|
||||
def test_xml_emane(self, session, tmpdir, version, ip_prefixes):
|
||||
"""
|
||||
Test xml client methods for emane.
|
||||
|
||||
:param session: session for test
|
||||
:param tmpdir: tmpdir to create data in
|
||||
:param str version: xml version to write and parse
|
||||
:param ip_prefixes: generates ip addresses for nodes
|
||||
"""
|
||||
# create emane node for networking the core nodes
|
||||
emane_network = session.create_emane_network(
|
||||
EmaneIeee80211abgModel,
|
||||
geo_reference=(47.57917, -122.13232, 2.00000),
|
||||
config={"test": "1"}
|
||||
)
|
||||
emane_network.setposition(x=80, y=50)
|
||||
|
||||
# create nodes
|
||||
node_options = NodeOptions()
|
||||
node_options.set_position(150, 150)
|
||||
node_one = session.create_wireless_node(node_options=node_options)
|
||||
node_options.set_position(300, 150)
|
||||
node_two = session.create_wireless_node(node_options=node_options)
|
||||
|
||||
for i, node in enumerate([node_one, node_two]):
|
||||
node.setposition(x=150 * (i + 1), y=150)
|
||||
interface = ip_prefixes.create_interface(node)
|
||||
session.add_link(node.objid, emane_network.objid, interface_one=interface)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# get ids for nodes
|
||||
emane_id = emane_network.objid
|
||||
n1_id = node_one.objid
|
||||
n2_id = node_two.objid
|
||||
|
||||
# save xml
|
||||
xml_file = tmpdir.join("session.xml")
|
||||
file_path = xml_file.strpath
|
||||
session.save_xml(file_path, version)
|
||||
|
||||
# verify xml file was created and can be parsed
|
||||
assert xml_file.isfile()
|
||||
assert ElementTree.parse(file_path)
|
||||
|
||||
# stop current session, clearing data
|
||||
session.shutdown()
|
||||
|
||||
# verify nodes have been removed from session
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n1_id)
|
||||
with pytest.raises(KeyError):
|
||||
assert not session.get_object(n2_id)
|
||||
|
||||
# load saved xml
|
||||
session.open_xml(file_path, start=True)
|
||||
|
||||
# retrieve configuration we set originally
|
||||
value = str(session.emane.get_config("test", emane_id, EmaneIeee80211abgModel.name))
|
||||
|
||||
# verify nodes and configuration were restored
|
||||
assert session.get_object(n1_id)
|
||||
assert session.get_object(n2_id)
|
||||
assert session.get_object(emane_id)
|
||||
assert value == "1"
|
2
doc/.gitignore
vendored
2
doc/.gitignore
vendored
|
@ -1,2 +0,0 @@
|
|||
_build
|
||||
conf.py
|
163
doc/Makefile.am
163
doc/Makefile.am
|
@ -1,163 +0,0 @@
|
|||
# CORE
|
||||
# (c)2009-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Builds html and pdf documentation using Sphinx.
|
||||
#
|
||||
|
||||
SUBDIRS = man figures
|
||||
|
||||
|
||||
# extra cruft to remove
|
||||
DISTCLEANFILES = Makefile.in stamp-vti
|
||||
|
||||
rst_files = conf.py.in constants.txt credits.rst ctrlnet.rst devguide.rst \
|
||||
emane.rst index.rst install.rst intro.rst machine.rst \
|
||||
ns3.rst performance.rst scripting.rst usage.rst requirements.txt
|
||||
|
||||
EXTRA_DIST = $(rst_files)
|
||||
|
||||
|
||||
###### below this line was generated using sphinx-quickstart ######
|
||||
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS = -q
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
STATICDIR = _static
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest figures-icons
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean-local:
|
||||
-rm -rf $(BUILDDIR) $(STATICDIR)
|
||||
|
||||
html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest: figures-icons $(BUILDDIR) $(STATICDIR)
|
||||
|
||||
$(BUILDDIR) $(STATICDIR):
|
||||
$(MKDIR_P) $@
|
||||
|
||||
figures-icons:
|
||||
cd figures && make icons
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CORE.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CORE.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/CORE"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CORE"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
make -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
257
doc/conf.py.in
257
doc/conf.py.in
|
@ -1,257 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# CORE documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed Jun 13 10:44:22 2012.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
import sphinx_rtd_theme
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'CORE'
|
||||
copyright = u'2005-2018, core-dev'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '@PACKAGE_VERSION@'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '@PACKAGE_VERSION@'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'COREdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'CORE.tex', u'CORE Documentation',
|
||||
u'core-dev', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'core', u'CORE Documentation',
|
||||
[u'core-dev'], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Epub output ---------------------------------------------------
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = u'CORE'
|
||||
epub_author = u'core-dev'
|
||||
epub_publisher = u'core-dev'
|
||||
epub_copyright = u'2005-2018, core-dev'
|
||||
|
||||
# The language of the text. It defaults to the language option
|
||||
# or en if the language is not set.
|
||||
#epub_language = ''
|
||||
|
||||
# The scheme of the identifier. Typical schemes are ISBN or URL.
|
||||
#epub_scheme = ''
|
||||
|
||||
# The unique identifier of the text. This can be a ISBN number
|
||||
# or the project homepage.
|
||||
#epub_identifier = ''
|
||||
|
||||
# A unique identification for the text.
|
||||
#epub_uid = ''
|
||||
|
||||
# HTML files that should be inserted before the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_pre_files = []
|
||||
|
||||
# HTML files shat should be inserted after the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_post_files = []
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
#epub_exclude_files = []
|
||||
|
||||
# The depth of the table of contents in toc.ncx.
|
||||
#epub_tocdepth = 3
|
||||
|
||||
# Allow duplicate toc entries.
|
||||
#epub_tocdup = True
|
|
@ -1,23 +0,0 @@
|
|||
.. |UBUNTUVERSION| replace:: 12.04 or 14.04
|
||||
|
||||
.. |FEDORAVERSION| replace:: 19 or 20
|
||||
|
||||
.. |CENTOSVERSION| replace:: 6.x or 7.x
|
||||
|
||||
.. |CORERPM| replace:: 1.fc20.x86_64.rpm
|
||||
.. |CORERPM2| replace:: 1.fc20.noarch.rpm
|
||||
.. |COREDEB| replace:: 0ubuntu1_precise_amd64.deb
|
||||
.. |COREDEB2| replace:: 0ubuntu1_precise_all.deb
|
||||
|
||||
.. |QVER| replace:: quagga-0.99.21mr2.2
|
||||
.. |QVERDEB| replace:: quagga-mr_0.99.21mr2.2_amd64.deb
|
||||
.. |QVERRPM| replace:: quagga-0.99.21mr2.2-1.fc16.x86_64.rpm
|
||||
|
||||
.. |APTDEPS| replace:: bash bridge-utils ebtables iproute libev-dev python
|
||||
.. |APTDEPS2| replace:: tcl8.5 tk8.5 libtk-img
|
||||
.. |APTDEPS3| replace:: autoconf automake gcc libev-dev make python-dev libreadline-dev pkg-config imagemagick help2man
|
||||
|
||||
.. |YUMDEPS| replace:: bash bridge-utils ebtables iproute libev python procps-ng net-tools
|
||||
.. |YUMDEPS2| replace:: tcl tk tkimg
|
||||
.. |YUMDEPS3| replace:: autoconf automake make libev-devel python-devel ImageMagick help2man
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _Acknowledgements:
|
||||
|
||||
***************
|
||||
Acknowledgments
|
||||
***************
|
||||
|
||||
The CORE project was derived from the open source IMUNES project from the
|
||||
University of Zagreb in 2004. In 2006, changes for CORE were released back to
|
||||
that project, some items of which were adopted. Marko Zec <zec@fer.hr> is the
|
||||
primary developer from the University of Zagreb responsible for the IMUNES
|
||||
(GUI) and VirtNet (kernel) projects. Ana Kukec and Miljenko Mikuc are known
|
||||
contributors.
|
||||
|
||||
Jeff Ahrenholz has been the primary Boeing
|
||||
developer of CORE, and has written this manual. Tom Goff
|
||||
designed the Python framework and has made significant
|
||||
contributions. Claudiu Danilov, Rod Santiago,
|
||||
Kevin Larson, Gary Pei, Phil Spagnolo, and Ian Chakeres
|
||||
have contributed code to CORE. Dan Mackley helped
|
||||
develop the CORE API, originally to interface with a simulator.
|
||||
Jae Kim and Tom Henderson
|
||||
have supervised the project and provided direction.
|
||||
|
193
doc/ctrlnet.rst
193
doc/ctrlnet.rst
|
@ -1,193 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2015 the Boeing Company
|
||||
|
||||
.. _Control_Network:
|
||||
|
||||
***************
|
||||
Control Network
|
||||
***************
|
||||
.. index:: controlnet
|
||||
|
||||
.. index:: control network
|
||||
|
||||
.. index:: X11 applications
|
||||
|
||||
.. index:: node access to the host
|
||||
|
||||
.. index:: host access to a node
|
||||
|
||||
The CORE control network allows the virtual nodes to communicate with their host environment.
|
||||
There are two types: the primary control network and auxiliary control networks. The primary
|
||||
control network is used mainly for communicating with the virtual nodes from host machines
|
||||
and for master-slave communications in a multi-server distributed environment. Auxiliary control networks
|
||||
have been introduced to for routing namespace hosted emulation software traffic
|
||||
to the test network.
|
||||
|
||||
.. _Activating_the_Primary_Control_Network:
|
||||
|
||||
Activating the Primary Control Network
|
||||
======================================
|
||||
|
||||
Under the :ref:`Session_Menu`, the *Options...* dialog has an option to set
|
||||
a *control network prefix*.
|
||||
|
||||
This can be set to a network prefix such as
|
||||
``172.16.0.0/24``. A bridge will be created on the host machine having the last
|
||||
address in the prefix range (e.g. ``172.16.0.254``), and each node will have
|
||||
an extra ``ctrl0`` control interface configured with an address corresponding
|
||||
to its node number (e.g. ``172.16.0.3`` for ``n3``.)
|
||||
|
||||
A default for the primary control network may also
|
||||
be specified by setting the ``controlnet`` line in the
|
||||
:file:`/etc/core/core.conf` configuration file which new
|
||||
sessions will use by default. To simultaneously run multiple sessions with control networks, the session
|
||||
option should be used instead of the :file:`core.conf` default.
|
||||
|
||||
.. NOTE::
|
||||
If you have a large scenario with more than 253 nodes, use a control
|
||||
network prefix that allows more than the suggested ``/24``, such as ``/23``
|
||||
or greater.
|
||||
|
||||
|
||||
|
||||
.. IMPORTANT::
|
||||
Running a session with a control network can fail if a previous session has set up a control network and the its bridge is still up.
|
||||
Close the previous session first or wait for it to complete. If unable to, the ``core-daemon`` may need to be restarted and the lingering
|
||||
bridge(s) removed manually:
|
||||
|
||||
::
|
||||
|
||||
# Restart the CORE Daemon
|
||||
sudo /etc/init.d core-daemon restart
|
||||
|
||||
# Remove lingering control network bridges
|
||||
ctrlbridges=`brctl show | grep b.ctrl | awk '{print $1}'`
|
||||
for cb in $ctrlbridges; do
|
||||
sudo ifconfig $cb down
|
||||
sudo brctl delbr $cb
|
||||
done
|
||||
|
||||
.. TIP::
|
||||
If adjustments to the primary control network configuration made in :file:`/etc/core/core.conf` do not seem
|
||||
to take affect, check if there is anything set in the :ref:`Session_Menu`, the *Options...* dialog. They may
|
||||
need to be cleared. These per session settings override the defaults in :file:`/etc/core/core.conf`.
|
||||
|
||||
|
||||
|
||||
.. _Distributed_Control_Network:
|
||||
|
||||
Control Network in Distributed Sessions
|
||||
=======================================
|
||||
|
||||
.. index:: distributed control network
|
||||
|
||||
.. index:: control network distributed
|
||||
|
||||
When the primary control network is activated for a distributed session,
|
||||
a control network bridge will be created on each of the slave servers, with GRE tunnels back
|
||||
to the master server's bridge. The slave control bridges are not assigned an
|
||||
address. From the host, any of the nodes (local or remote) can be accessed,
|
||||
just like the single server case.
|
||||
|
||||
In some situations, remote emulated nodes need to communicate with the
|
||||
host on which they are running and not the master server.
|
||||
Multiple control network prefixes can be specified in the either the session option
|
||||
or :file:`/etc/core/core.conf`, separated by spaces and beginning with the master server.
|
||||
Each entry has the form "``server:prefix``". For example, if the servers *core1*,*core2*, and *core3*
|
||||
are assigned with nodes in the scenario and using :file:`/etc/core/core.conf` instead of
|
||||
the session option:
|
||||
|
||||
::
|
||||
|
||||
controlnet=core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.1.0/24
|
||||
|
||||
then, the control network bridges will be assigned as follows:
|
||||
*core1* = ``172.16.1.254`` (assuming it is the master server),
|
||||
*core2* = ``172.16.2.254``, and *core3* = ``172.16.3.254``.
|
||||
|
||||
Tunnels back to the master server will still be built, but it is up to the
|
||||
user to add appropriate routes if networking between control network
|
||||
prefixes is desired. The control network script may help with this.
|
||||
|
||||
|
||||
|
||||
Control Network Script
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. index:: control network scripts
|
||||
|
||||
.. index:: controlnet_updown_script
|
||||
|
||||
A control network script may be specified using the ``controlnet_updown_script``
|
||||
option in the :file:`/etc/core/core.conf` file. This script will be run after
|
||||
the bridge has been built (and address assigned) with the first argument being
|
||||
the name of the bridge, and the second argument being the keyword "``startup``".
|
||||
The script will again be invoked prior to bridge removal with the second
|
||||
argument being the keyword "``shutdown``".
|
||||
|
||||
Auxiliary Control Networks
|
||||
==========================
|
||||
|
||||
.. index:: auxiliary control networks
|
||||
|
||||
.. index:: auxiliary_controlnet
|
||||
|
||||
Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces.
|
||||
Since it is advisable to separate the OTA traffic from other traffic
|
||||
(See :ref:`Distributed_EMANE`), we will need more than single channel
|
||||
leading out from the namespace. Up to three auxiliary control networks may be defined.
|
||||
Multiple control networks are set up in :file:`/etc/core/core.conf` file.
|
||||
Lines ``controlnet1``, ``controlnet2`` and ``controlnet3`` define the auxiliary
|
||||
networks. The format of value to assign the controlnets are the same as in
|
||||
:ref:`Distributed_Control_Network`. For example, having the following
|
||||
lines in :file:`/etc/core/core.conf`:
|
||||
|
||||
::
|
||||
|
||||
controlnet = core1:172.17.1.0/24 core2:172.17.2.0/24 core3:172.17.3.0/24
|
||||
controlnet1 = core1:172.18.1.0/24 core2:172.18.2.0/24 core3:172.18.3.0/24
|
||||
controlnet2 = core1:172.19.1.0/24 core2:172.19.2.0/24 core3:172.19.3.0/24
|
||||
|
||||
will activate the primary and two auxiliary control networks and
|
||||
add interfaces ``ctrl0``, ``ctrl1``, ``ctrl2`` to each node. One use case would
|
||||
be to assign ``ctrl1`` to the OTA manager device and ``ctrl2`` to the Event Service device
|
||||
in the EMANE Options dialog box and leave ``ctrl0`` for CORE control traffic.
|
||||
|
||||
.. NOTE::
|
||||
``controlnet0`` may be used in place of ``controlnet`` to configure the primary control network.
|
||||
|
||||
Unlike the primary control network, the auxiliary control networks will not employ
|
||||
tunneling since their primary purpose is for efficiently transporting multicast EMANE OTA and
|
||||
event traffic. Note that there is no per-session configuration for auxiliary control networks.
|
||||
|
||||
To extend the auxiliary control networks across a distributed test environment,
|
||||
host network interfaces need to be added to them. The following lines in
|
||||
:file:`/etc/core/core.conf` will add host devices ``eth1``, ``eth2`` and
|
||||
``eth3`` to ``controlnet1``, ``controlnet2``, ``controlnet3``:
|
||||
|
||||
::
|
||||
|
||||
controlnetif1 = eth1
|
||||
controlnetif2 = eth2
|
||||
controlnetif3 = eth3
|
||||
|
||||
.. NOTE::
|
||||
There is no need to assign an interface to the primary control network
|
||||
because tunnels are formed between the master and the slaves using IP
|
||||
addresses that are provided in ``servers.conf``. (See :ref:`Distributed_Emulation`.)
|
||||
|
||||
Shown below is a representative diagram of the configuration above.
|
||||
|
||||
|
||||
.. _example_control_network:
|
||||
|
||||
.. figure:: figures/controlnetwork.*
|
||||
:alt: Control Network Diagram
|
||||
:align: center
|
||||
:scale: 75%
|
||||
|
||||
|
||||
Example Control Network
|
||||
|
||||
|
||||
|
180
doc/devguide.rst
180
doc/devguide.rst
|
@ -1,180 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _Developer's_Guide:
|
||||
|
||||
*****************
|
||||
Developer's Guide
|
||||
*****************
|
||||
|
||||
This section contains advanced usage information, intended for developers and
|
||||
others who are comfortable with the command line.
|
||||
|
||||
.. _Coding_Standard:
|
||||
|
||||
Coding Standard
|
||||
===============
|
||||
|
||||
The coding standard and style guide for the CORE project are maintained online.
|
||||
Please refer to the `coding standard
|
||||
<http://code.google.com/p/coreemu/wiki/Hacking>`_ posted on the CORE Wiki.
|
||||
|
||||
.. _Source_Code_Guide:
|
||||
|
||||
Source Code Guide
|
||||
=================
|
||||
|
||||
The CORE source consists of several different programming languages for
|
||||
historical reasons. Current development focuses on the Python modules and
|
||||
daemon. Here is a brief description of the source directories.
|
||||
|
||||
These are being actively developed as of CORE |version|:
|
||||
|
||||
* *gui* - Tcl/Tk GUI. This uses Tcl/Tk because of its roots with the IMUNES
|
||||
project.
|
||||
* *daemon* - Python modules are found in the :file:`daemon/core` directory, the
|
||||
daemon under :file:`daemon/scripts/core-daemon`
|
||||
* *netns* - Python extension modules for Linux Network Namespace support are in :file:`netns`.
|
||||
* *ns3* - Python ns3 script support for running CORE.
|
||||
* *doc* - Documentation for the manual lives here in reStructuredText format.
|
||||
|
||||
.. _The_CORE_API:
|
||||
|
||||
The CORE API
|
||||
============
|
||||
|
||||
.. index:: CORE; API
|
||||
|
||||
.. index:: API
|
||||
|
||||
.. index:: remote API
|
||||
|
||||
The CORE API is used between different components of CORE for communication.
|
||||
The GUI communicates with the CORE daemon using the API. One emulation server
|
||||
communicates with another using the API. The API also allows other systems to
|
||||
interact with the CORE emulation. The API allows another system to add, remove,
|
||||
or modify nodes and links, and enables executing commands on the emulated
|
||||
systems. Wireless link parameters are updated on-the-fly based on node
|
||||
positions.
|
||||
|
||||
CORE listens on a local TCP port for API messages. The other system could be
|
||||
software running locally or another machine accessible across the network.
|
||||
|
||||
The CORE API is currently specified in a separate document, available from the
|
||||
CORE website.
|
||||
|
||||
.. _Linux_network_namespace_Commands:
|
||||
|
||||
Linux network namespace Commands
|
||||
================================
|
||||
|
||||
.. index:: lxctools
|
||||
|
||||
Linux network namespace containers are often managed using the *Linux Container
|
||||
Tools* or *lxc-tools* package. The lxc-tools website is available here
|
||||
`<http://lxc.sourceforge.net/>`_ for more information. CORE does not use these
|
||||
management utilities, but includes its own set of tools for instantiating and
|
||||
configuring network namespace containers. This section describes these tools.
|
||||
|
||||
.. index:: vnoded
|
||||
|
||||
The *vnoded* daemon is the program used to create a new namespace, and
|
||||
listen on a control channel for commands that may instantiate other processes.
|
||||
This daemon runs as PID 1 in the container. It is launched automatically by
|
||||
the CORE daemon. The control channel is a UNIX domain socket usually named
|
||||
:file:`/tmp/pycore.23098/n3`, for node 3 running on CORE
|
||||
session 23098, for example. Root privileges are required for creating a new
|
||||
namespace.
|
||||
|
||||
.. index:: vcmd
|
||||
|
||||
The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network
|
||||
namespace, for running commands in the namespace. The CORE daemon
|
||||
uses the same channel for setting up a node and running processes within it.
|
||||
This program has two
|
||||
required arguments, the control channel name, and the command line to be run
|
||||
within the namespace. This command does not need to run with root privileges.
|
||||
|
||||
When you double-click
|
||||
on a node in a running emulation, CORE will open a shell window for that node
|
||||
using a command such as:
|
||||
::
|
||||
|
||||
gnome-terminal -e vcmd -c /tmp/pycore.50160/n1 -- bash
|
||||
|
||||
|
||||
Similarly, the IPv4 routes Observer Widget will run a command to display the routing table using a command such as:
|
||||
::
|
||||
|
||||
vcmd -c /tmp/pycore.50160/n1 -- /sbin/ip -4 ro
|
||||
|
||||
|
||||
.. index:: core-cleanup
|
||||
|
||||
A script named *core-cleanup* is provided to clean up any running CORE
|
||||
emulations. It will attempt to kill any remaining vnoded processes, kill any
|
||||
EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and
|
||||
remove any bridges or *ebtables* rules. With a *-d* option, it will also kill
|
||||
any running CORE daemon.
|
||||
|
||||
.. index:: netns
|
||||
|
||||
The *netns* command is not used by CORE directly. This utility can be used to
|
||||
run a command in a new network namespace for testing purposes. It does not open
|
||||
a control channel for receiving further commands.
|
||||
|
||||
Here are some other Linux commands that are useful for managing the Linux
|
||||
network namespace emulation.
|
||||
::
|
||||
|
||||
# view the Linux bridging setup
|
||||
brctl show
|
||||
# view the netem rules used for applying link effects
|
||||
tc qdisc show
|
||||
# view the rules that make the wireless LAN work
|
||||
ebtables -L
|
||||
|
||||
|
||||
Below is a transcript of creating two emulated nodes and connecting them together with a wired link:
|
||||
|
||||
.. index:: create nodes from command-line
|
||||
|
||||
.. index:: command-line
|
||||
|
||||
::
|
||||
|
||||
# create node 1 namespace container
|
||||
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 1
|
||||
ip link add name n1.0.1 type veth peer name n1.0
|
||||
ip link set n1.0 netns `cat /tmp/n1.pid`
|
||||
vcmd -c /tmp/n1.ctl -- ip link set lo up
|
||||
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0 up
|
||||
vcmd -c /tmp/n1.ctl -- ip addr add 10.0.0.1/24 dev eth0
|
||||
|
||||
# create node 2 namespace container
|
||||
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 2
|
||||
ip link add name n2.0.1 type veth peer name n2.0
|
||||
ip link set n2.0 netns `cat /tmp/n2.pid`
|
||||
vcmd -c /tmp/n2.ctl -- ip link set lo up
|
||||
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0 up
|
||||
vcmd -c /tmp/n2.ctl -- ip addr add 10.0.0.2/24 eth0
|
||||
|
||||
# bridge together nodes 1 and 2 using the other end of each veth pair
|
||||
brctl addbr b.1.1
|
||||
brctl setfd b.1.1 0
|
||||
brctl addif b.1.1 n1.0.1
|
||||
brctl addif b.1.1 n2.0.1
|
||||
ip link set n1.0.1 up
|
||||
ip link set n2.0.1 up
|
||||
ip link set b.1.1 up
|
||||
|
||||
# display connectivity and ping from node 1 to node 2
|
||||
brctl show
|
||||
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
|
||||
|
||||
|
||||
The above example script can be found as :file:`twonodes.sh` in the
|
||||
:file:`examples/netns` directory. Use *core-cleanup* to clean up after the
|
||||
script.
|
359
doc/emane.rst
359
doc/emane.rst
|
@ -1,359 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _EMANE:
|
||||
|
||||
*****
|
||||
EMANE
|
||||
*****
|
||||
|
||||
.. index:: EMANE
|
||||
|
||||
This chapter describes running CORE with the EMANE emulator.
|
||||
|
||||
.. _What_is_EMANE?:
|
||||
|
||||
What is EMANE?
|
||||
==============
|
||||
|
||||
.. index:: EMANE; introduction to
|
||||
|
||||
The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous
|
||||
network emulation using a pluggable MAC and PHY layer architecture. The EMANE
|
||||
framework provides an implementation architecture for modeling different radio
|
||||
interface types in the form of *Network Emulation Modules* (NEMs) and
|
||||
incorporating these modules into a real-time emulation running in a distributed
|
||||
environment.
|
||||
|
||||
EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent
|
||||
Link LLC,
|
||||
who maintain these websites:
|
||||
|
||||
* `<http://www.nrl.navy.mil/itd/ncs/products/emane>`_
|
||||
* `<http://www.adjacentlink.com/>`_
|
||||
|
||||
Instead of building Linux Ethernet bridging networks with CORE, higher-fidelity
|
||||
wireless networks can be emulated using EMANE bound to virtual devices. CORE
|
||||
emulates layers 3 and above (network, session, application) with its virtual
|
||||
network stacks and process space for protocols and applications, while EMANE
|
||||
emulates layers 1 and 2 (physical and data link) using its pluggable PHY and
|
||||
MAC models.
|
||||
|
||||
The interface between CORE and EMANE is a TAP device. CORE builds the virtual
|
||||
node using Linux network namespaces, installs the TAP device into the
|
||||
namespace and instantiates one EMANE process in the namespace.
|
||||
The EMANE process binds a user space socket to the TAP device for
|
||||
sending and receiving data from CORE.
|
||||
|
||||
|
||||
.. NOTE::
|
||||
When the installed EMANE version is older than 0.9.2, EMANE runs on the host
|
||||
and binds a userspace socket to the TAP device, before it is pushed into the
|
||||
namespace, for sending and receiving data. The *Virtual Transport* was
|
||||
the EMANE component responsible for connecting with the TAP device.
|
||||
|
||||
An EMANE instance sends and receives OTA traffic to and from other
|
||||
EMANE instances via a control port (e.g. ``ctrl0``, ``ctrl1``).
|
||||
It also sends and receives Events to and from the Event Service using
|
||||
the same or a different control port.
|
||||
EMANE models are configured through CORE's WLAN configuration dialog. A
|
||||
corresponding EmaneModel Python class is sub-classed for each supported EMANE
|
||||
model, to provide configuration items and their mapping to XML files. This way
|
||||
new models can be easily supported. When CORE starts the emulation, it
|
||||
generates the appropriate XML files that specify the EMANE NEM configuration,
|
||||
and launches the EMANE daemons.
|
||||
|
||||
Some EMANE models support location information to determine when packets should
|
||||
be dropped. EMANE has an event system where location events are broadcast to
|
||||
all NEMs. CORE can generate these location events when nodes are moved on the
|
||||
canvas. The canvas size and scale dialog has controls for mapping the X,Y
|
||||
coordinate system to a latitude, longitude geographic system that EMANE uses.
|
||||
When specified in the :file:`core.conf` configuration file, CORE can also
|
||||
subscribe to EMANE location events and move the nodes on the canvas as they are
|
||||
moved in the EMANE emulation. This would occur when an Emulation Script
|
||||
Generator, for example, is running a mobility script.
|
||||
|
||||
.. index:: EMANE; Configuration
|
||||
|
||||
.. index:: EMANE; Installation
|
||||
|
||||
.. _EMANE_Configuration:
|
||||
|
||||
EMANE Configuration
|
||||
===================
|
||||
|
||||
|
||||
CORE and EMANE currently work together only on the Linux network namespaces
|
||||
platform. The normal CORE installation instructions should be followed from
|
||||
:ref:`Installation`.
|
||||
|
||||
The CORE configuration file :file:`/etc/core/core.conf` has options specific to
|
||||
EMANE. Namely, the `emane_models` line contains a comma-separated list of EMANE
|
||||
models that will be available. Each model has a corresponding Python file
|
||||
containing the *EmaneModel* subclass. A portion of the default
|
||||
:file:`core.conf` file is shown below:
|
||||
|
||||
::
|
||||
|
||||
# EMANE configuration
|
||||
emane_platform_port = 8101
|
||||
emane_transform_port = 8201
|
||||
emane_event_monitor = False
|
||||
emane_models = RfPipe, Ieee80211abg
|
||||
|
||||
|
||||
EMANE can be installed from deb or RPM packages or from source. See the
|
||||
`EMANE website <http://www.nrl.navy.mil/itd/ncs/products/emane>`_ for
|
||||
full details.
|
||||
|
||||
Here are quick instructions for installing all EMANE packages:
|
||||
|
||||
::
|
||||
|
||||
# install dependencies
|
||||
sudo apt-get install libssl-dev libxml-libxml-perl libxml-simple-perl
|
||||
# download and install EMANE 0.8.1
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/emane/0.8.1-r2
|
||||
wget $URL/emane-0.8.1-release-2.ubuntu-12_04.amd64.tgz
|
||||
tar xzf emane-0.8.1-release-2.ubuntu-12_04.amd64.tgz
|
||||
sudo dpkg -i emane-0.8.1-release-2/deb/ubuntu-12_04/amd64/*.deb
|
||||
|
||||
|
||||
If you have an EMANE event generator (e.g. mobility or pathloss scripts) and
|
||||
want to have CORE subscribe to EMANE location events, set the following line in
|
||||
the :file:`/etc/core/core.conf` configuration file:
|
||||
::
|
||||
|
||||
emane_event_monitor = True
|
||||
|
||||
Do not set the above option to True if you want to manually drag nodes around
|
||||
on the canvas to update their location in EMANE.
|
||||
|
||||
Another common issue is if installing EMANE from source, the default configure
|
||||
prefix will place the DTD files in :file:`/usr/local/share/emane/dtd` while
|
||||
CORE expects them in :file:`/usr/share/emane/dtd`. A symbolic link will fix
|
||||
this:
|
||||
::
|
||||
|
||||
sudo ln -s /usr/local/share/emane /usr/share/emane
|
||||
|
||||
|
||||
.. _Single_PC_with_EMANE:
|
||||
|
||||
Single PC with EMANE
|
||||
====================
|
||||
|
||||
This section describes running CORE and EMANE on a single machine. This is the
|
||||
default mode of operation when building an EMANE network with CORE. The OTA
|
||||
manager and Event service interface are set to use ``ctrl0`` and the virtual nodes
|
||||
use the primary control channel for communicating with one another. The primary
|
||||
control channel is automatically activated when a scenario involves EMANE.
|
||||
Using the primary control channel prevents your emulation session from sending
|
||||
multicast traffic on your local network and interfering with other EMANE users.
|
||||
|
||||
.. NOTE::
|
||||
When the installed EMANE version is earlier than 0.9.2, the OTA manager and
|
||||
Event service interfaces are set to use the loopback device.
|
||||
|
||||
|
||||
EMANE is configured through a WLAN node, because it is all about emulating
|
||||
wireless radio networks. Once a node is linked to a WLAN cloud configured with
|
||||
an EMANE model, the radio interface on that node may also be configured
|
||||
separately (apart from the cloud.)
|
||||
|
||||
Double-click on a WLAN node to invoke the WLAN configuration dialog. Click the
|
||||
*EMANE* tab; when EMANE has
|
||||
been properly installed, EMANE wireless modules should be listed in the
|
||||
*EMANE Models* list. (You may need to restart the CORE daemon if
|
||||
it was running prior to installing the EMANE Python bindings.)
|
||||
Click on a model name to enable it.
|
||||
|
||||
When an EMANE model is selected in the *EMANE Models* list, clicking on
|
||||
the *model options* button causes the GUI to query the CORE daemon for
|
||||
configuration items. Each model will have different parameters, refer to the
|
||||
EMANE documentation for an explanation of each item. The defaults values are
|
||||
presented in the dialog. Clicking *Apply* and *Apply* again will store
|
||||
the EMANE model selections.
|
||||
|
||||
The *EMANE options* button
|
||||
allows specifying some global parameters for EMANE, some of
|
||||
which are necessary for distributed operation, see :ref:`Distributed_EMANE`.
|
||||
|
||||
.. index:: RF-PIPE model
|
||||
|
||||
.. index:: 802.11 model
|
||||
|
||||
.. index:: ieee80211abg model
|
||||
|
||||
.. index:: geographic location
|
||||
|
||||
.. index:: Universal PHY
|
||||
|
||||
The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports
|
||||
geographic location information for determining pathloss between nodes. A
|
||||
default latitude and longitude location is provided by CORE and this
|
||||
location-based pathloss is enabled by default; this is the *pathloss mode*
|
||||
setting for the Universal PHY. Moving a node on the canvas while the emulation
|
||||
is running generates location events for EMANE. To view or change the
|
||||
geographic location or scale of the canvas use the *Canvas Size and Scale*
|
||||
dialog available from the *Canvas* menu.
|
||||
|
||||
.. index:: UTM zones
|
||||
|
||||
.. index:: UTM projection
|
||||
|
||||
Note that conversion between geographic and Cartesian
|
||||
coordinate systems is done using UTM
|
||||
(Universal Transverse Mercator) projection, where
|
||||
different zones of 6 degree longitude bands are defined.
|
||||
The location events generated by
|
||||
CORE may become inaccurate near the zone boundaries for very large scenarios
|
||||
that span multiple UTM zones. It is recommended that EMANE location scripts
|
||||
be used to achieve geo-location accuracy in this situation.
|
||||
|
||||
Clicking the green *Start* button launches the emulation and causes TAP
|
||||
devices to be created in the virtual nodes that are linked to the EMANE WLAN.
|
||||
These devices appear with interface names such as eth0, eth1, etc. The EMANE
|
||||
processes should now be running in each namespace. For a four node scenario:
|
||||
|
||||
::
|
||||
|
||||
> ps -aef | grep emane
|
||||
root 1063 969 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane4.log /tmp/pycore.59992/platform4.xml
|
||||
root 1117 959 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane2.log /tmp/pycore.59992/platform2.xml
|
||||
root 1179 942 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane1.log /tmp/pycore.59992/platform1.xml
|
||||
root 1239 979 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane5.log /tmp/pycore.59992/platform5.xml
|
||||
|
||||
The example above shows the EMANE processes started by CORE. To view the configuration generated by CORE, look in the
|
||||
:file:`/tmp/pycore.nnnnn/` session directory for a :file:`platform.xml` file
|
||||
and other XML files. One easy way to view this information is by
|
||||
double-clicking one of the virtual nodes, and typing *cd ..* in the shell to go
|
||||
up to the session directory.
|
||||
|
||||
|
||||
.. _single_pc_emane_figure:
|
||||
|
||||
.. figure:: figures/single-pc-emane.*
|
||||
:alt: Single PC Emane
|
||||
:align: center
|
||||
:scale: 75%
|
||||
|
||||
Single PC with EMANE
|
||||
|
||||
|
||||
|
||||
.. index:: Distributed_EMANE
|
||||
.. _Distributed_EMANE:
|
||||
|
||||
Distributed EMANE
|
||||
=================
|
||||
|
||||
|
||||
Running CORE and EMANE distributed among two or more emulation servers is
|
||||
similar to running on a single machine. There are a few key configuration items
|
||||
that need to be set in order to be successful, and those are outlined here.
|
||||
|
||||
It is a good idea to maintain separate networks for data (OTA) and control. The control
|
||||
network may be a shared laboratory network, for example, and you do not want
|
||||
multicast traffic on the data network to interfere with other EMANE users. Furthermore,
|
||||
control traffic could interfere with the OTA latency and thoughput and might affect
|
||||
emulation fidelity. The examples described here will use *eth0* as a control interface
|
||||
and *eth1* as a data interface, although using separate interfaces
|
||||
is not strictly required. Note that these interface names refer to interfaces
|
||||
present on the host machine, not virtual interfaces within a node.
|
||||
|
||||
.. IMPORTANT::
|
||||
If an auxiliary control network is used, an interface on the host has to be assigned to that network.
|
||||
See :ref:`Distributed_Control_Network`
|
||||
|
||||
Each machine that will act as an emulation server needs to have CORE and EMANE
|
||||
installed. Refer to the :ref:`Distributed_Emulation` section for configuring
|
||||
CORE.
|
||||
|
||||
The IP addresses of the available servers are configured from the
|
||||
CORE emulation servers dialog box (choose *Session* then
|
||||
*Emulation servers...*) described in :ref:`Distributed_Emulation`.
|
||||
This list of servers is stored in a :file:`~/.core/servers.conf` file.
|
||||
The dialog shows available servers, some or all of which may be
|
||||
assigned to nodes on the canvas.
|
||||
|
||||
Nodes need to be assigned to emulation servers as described in
|
||||
:ref:`Distributed_Emulation`. Select several nodes, right-click them, and
|
||||
choose *Assign to* and the name of the desired server. When a node is not
|
||||
assigned to any emulation server, it will be emulated locally. The local
|
||||
machine that the GUI connects with is considered the "master" machine, which in
|
||||
turn connects to the other emulation server "slaves". Public key SSH should
|
||||
be configured from the master to the slaves as mentioned in the
|
||||
:ref:`Distributed_Emulation` section.
|
||||
|
||||
|
||||
Under the *EMANE* tab of the EMANE WLAN, click on the *EMANE options* button.
|
||||
This brings
|
||||
up the emane configuration dialog. The *enable OTA Manager channel* should
|
||||
be set to *on*. The *OTA Manager device* and *Event Service device* should
|
||||
be set to a control network device. For example, if you have
|
||||
a primary and auxiliary control network (i.e. controlnet and controlnet1), and you want
|
||||
the OTA traffic to have its dedicated network, set the OTA Manager device to ``ctrl1``
|
||||
and the Event Service device to ``ctrl0``.
|
||||
The EMANE models can be configured as described in :ref:`Single_PC_with_EMANE`.
|
||||
Click *Apply* to save these settings.
|
||||
|
||||
|
||||
|
||||
.. _distributed_emane_figure:
|
||||
|
||||
.. figure:: figures/distributed-emane-configuration.*
|
||||
:alt: Distribute EMANE
|
||||
:align: center
|
||||
:scale: 75%
|
||||
|
||||
Distributed EMANE Configuration
|
||||
|
||||
|
||||
|
||||
|
||||
.. NOTE::
|
||||
When the installed EMANE version is earlier than 0.9.2, EMANE access to the host machine's
|
||||
interfaces and OTA manager and Event service devices an be set to physical interfaces.
|
||||
|
||||
.. HINT::
|
||||
Here is a quick checklist for distributed emulation with EMANE.
|
||||
|
||||
1. Follow the steps outlined for normal CORE :ref:`Distributed_Emulation`.
|
||||
2. Under the *EMANE* tab of the EMANE WLAN, click on *EMANE options*.
|
||||
3. Turn on the *OTA Manager channel* and set the *OTA Manager device*.
|
||||
Also set the *Event Service device*.
|
||||
4. Select groups of nodes, right-click them, and assign them to servers
|
||||
using the *Assign to* menu.
|
||||
5. Synchronize your machine's clocks prior to starting the emulation,
|
||||
using ``ntp`` or ``ptp``. Some EMANE models are sensitive to timing.
|
||||
6. Press the *Start* button to launch the distributed emulation.
|
||||
|
||||
|
||||
Now when the Start button is used to instantiate the emulation,
|
||||
the local CORE Python
|
||||
daemon will connect to other emulation servers that have been assigned to nodes.
|
||||
Each server will have its own session directory where the :file:`platform.xml`
|
||||
file and other EMANE XML files are generated. The NEM IDs are automatically
|
||||
coordinated across servers so there is no overlap. Each server also gets its
|
||||
own Platform ID.
|
||||
|
||||
An Ethernet device is used for disseminating multicast EMANE events, as specified in the
|
||||
*configure emane* dialog. EMANE's Event Service can be run with mobility or pathloss scripts
|
||||
as described in :ref:`Single_PC_with_EMANE`. If CORE is not subscribed to location events, it
|
||||
will generate them as nodes are moved on the canvas.
|
||||
|
||||
Double-clicking on a node during runtime will cause the GUI to attempt to SSH
|
||||
to the emulation server for that node and run an interactive shell. The public
|
||||
key SSH configuration should be tested with all emulation servers prior to
|
||||
starting the emulation.
|
||||
|
||||
|
||||
.. _distributed_emane_network_diagram:
|
||||
|
||||
.. figure:: figures/distributed-emane-network.*
|
||||
:alt: Distribute EMANE
|
||||
:align: center
|
||||
:scale: 75%
|
||||
|
||||
Notional Distributed EMANE Network Diagram
|
24
doc/figures/.gitignore
vendored
24
doc/figures/.gitignore
vendored
|
@ -1,24 +0,0 @@
|
|||
cel.jpg
|
||||
document-properties.jpg
|
||||
host.jpg
|
||||
hub.jpg
|
||||
lanswitch.jpg
|
||||
link.jpg
|
||||
marker.jpg
|
||||
mdr.jpg
|
||||
observe.jpg
|
||||
oval.jpg
|
||||
pc.jpg
|
||||
plot.jpg
|
||||
rectangle.jpg
|
||||
rj45.jpg
|
||||
router.jpg
|
||||
router_green.jpg
|
||||
run.jpg
|
||||
select.jpg
|
||||
start.jpg
|
||||
stop.jpg
|
||||
text.jpg
|
||||
tunnel.jpg
|
||||
twonode.jpg
|
||||
wlan.jpg
|
|
@ -1,61 +0,0 @@
|
|||
# CORE
|
||||
# (c)2009-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
#
|
||||
|
||||
# define new file extensions for handling figures and html
|
||||
SUFFIXES = .jpg .gif
|
||||
GIFTOJPG = convert -background white -flatten
|
||||
|
||||
# dia figures can be manually converted to jpg
|
||||
# On Ubuntu 11.10, this is failing for some reason.
|
||||
DIATOJPG = dia -t jpg -e
|
||||
|
||||
|
||||
# these are file extension handlers for automatically converting between image
|
||||
# file types; the .jpg files are built from .gif files from the GUI
|
||||
|
||||
# file extension handler to convert .gif to .jpg
|
||||
%.jpg: %.gif
|
||||
$(GIFTOJPG) $(top_srcdir)/gui/icons/tiny/$< $@
|
||||
|
||||
# file extension handler so we can list .gif as dependency for .gif.jpg
|
||||
%.gif:
|
||||
@echo "Using GUI file $(top_srcdir)/gui/icons/tiny/$@"
|
||||
|
||||
|
||||
# list of base names for figures
|
||||
figures = core-architecture core-workflow
|
||||
# list of figures + dia suffix
|
||||
figures_dia = $(figures:%=%.dia)
|
||||
# list of figure + jpg suffix
|
||||
figures_jpg = $(figures:%=%.jpg)
|
||||
|
||||
figures_png = \
|
||||
controlnetwork.png \
|
||||
distributed-controlnetwork.png \
|
||||
distributed-emane-configuration.png \
|
||||
distributed-emane-network.png \
|
||||
single-pc-emane.png
|
||||
|
||||
# icons from the GUI source
|
||||
icons = select start router host pc mdr router_green \
|
||||
lanswitch hub wlan cel \
|
||||
link rj45 tunnel marker oval rectangle text \
|
||||
stop observe plot twonode run document-properties
|
||||
# list of icons + .gif.jpg suffix
|
||||
icons_jpg = $(icons:%=%.jpg)
|
||||
|
||||
icons: $(icons_jpg)
|
||||
|
||||
clean-local:
|
||||
rm -f $(icons_jpg)
|
||||
|
||||
EXTRA_DIST = $(figures_dia) $(figures_jpg) $(figures_png)
|
||||
|
||||
# extra cruft to remove
|
||||
DISTCLEANFILES = Makefile.in
|
||||
|
Binary file not shown.
Binary file not shown.
|
@ -1,33 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012,2015 the Boeing Company
|
||||
|
||||
.. only:: html or latex
|
||||
|
||||
CORE Manual
|
||||
===========
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:numbered:
|
||||
|
||||
intro
|
||||
install
|
||||
usage
|
||||
scripting
|
||||
machine
|
||||
ctrlnet
|
||||
emane
|
||||
ns3
|
||||
performance
|
||||
devguide
|
||||
credits
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
.. only:: html
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
||||
|
560
doc/install.rst
560
doc/install.rst
|
@ -1,560 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. include:: constants.txt
|
||||
|
||||
.. _Installation:
|
||||
|
||||
************
|
||||
Installation
|
||||
************
|
||||
|
||||
This chapter describes how to set up a CORE machine. Note that the easiest
|
||||
way to install CORE is using a binary
|
||||
package on Ubuntu or Fedora (deb or rpm) using the distribution's package
|
||||
manager
|
||||
to automatically install dependencies, see :ref:`Installing_from_Packages`.
|
||||
|
||||
Ubuntu and Fedora Linux are the recommended distributions for running CORE. Ubuntu |UBUNTUVERSION| and Fedora |FEDORAVERSION| ship with kernels with support for namespaces built-in. They support the latest hardware. However,
|
||||
these distributions are not strictly required. CORE will likely work on other
|
||||
flavors of Linux, see :ref:`Installing_from_Source`.
|
||||
|
||||
The primary dependencies are Tcl/Tk (8.5 or newer) for the GUI, and Python 2.6 or 2.7 for the CORE daemon.
|
||||
|
||||
.. index:: install locations
|
||||
.. index:: paths
|
||||
.. index:: install paths
|
||||
|
||||
CORE files are installed to the following directories.
|
||||
|
||||
======================================================= =================================
|
||||
Install Path Description
|
||||
======================================================= =================================
|
||||
:file:`/usr/local/bin/core-gui` GUI startup command
|
||||
:file:`/usr/local/bin/core-daemon` Daemon startup command
|
||||
:file:`/usr/local/bin/` Misc. helper commands/scripts
|
||||
:file:`/usr/local/lib/core` GUI files
|
||||
:file:`/usr/local/lib/python2.7/dist-packages/core` Python modules for daemon/scripts
|
||||
:file:`/etc/core/` Daemon configuration files
|
||||
:file:`~/.core/` User-specific GUI preferences and scenario files
|
||||
:file:`/usr/local/share/core/` Example scripts and scenarios
|
||||
:file:`/usr/local/share/man/man1/` Command man pages
|
||||
:file:`/etc/init.d/core-daemon` SysV startup script for daemon
|
||||
:file:`/etc/systemd/system/core-daemon.service` Systemd startup script for daemon
|
||||
======================================================= =================================
|
||||
|
||||
.. _Prerequisites:
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
.. index:: Prerequisites
|
||||
|
||||
A Linux operating system is required. The GUI uses the Tcl/Tk scripting toolkit, and the CORE daemon requires Python. Details of the individual software packages required can be found in the installation steps.
|
||||
|
||||
.. _Required_Hardware:
|
||||
|
||||
Required Hardware
|
||||
-----------------
|
||||
|
||||
.. index:: Hardware requirements
|
||||
|
||||
.. index:: System requirements
|
||||
|
||||
Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous virtual machines, as a general rule you should select a machine having as much RAM and CPU resources as possible.
|
||||
|
||||
A *general recommendation* would be:
|
||||
|
||||
* 2.0GHz or better x86 processor, the more processor cores the better
|
||||
* 2 GB or more of RAM
|
||||
* about 3 MB of free disk space (plus more for dependency packages such as Tcl/Tk)
|
||||
* X11 for the GUI, or remote X11 over SSH
|
||||
|
||||
The computer can be a laptop, desktop, or rack-mount server. A keyboard, mouse,
|
||||
and monitor are not required if a network connection is available
|
||||
for remotely accessing the machine. A 3D accelerated graphics card
|
||||
is not required.
|
||||
|
||||
.. _Required_Software:
|
||||
|
||||
Required Software
|
||||
-----------------
|
||||
|
||||
CORE requires a Linux operating systems because it uses virtualization provided by the kernel. It does not run on the Windows or Mac OS X operating systems (unless it is running within a virtual machine guest.)
|
||||
The virtualization technology that CORE currently uses:
|
||||
Linux network namespaces,
|
||||
see :ref:`How_Does_it_Work?` for virtualization details.
|
||||
|
||||
**Linux network namespaces is the recommended platform.** Development is focused here and it supports the latest features. It is the easiest to install because there is no need to patch, install, and run a special Linux kernel.
|
||||
|
||||
The CORE GUI requires the X.Org X Window system (X11), or can run over a
|
||||
remote X11 session. For specific Tcl/Tk, Python, and other libraries required
|
||||
to run CORE, refer to the :ref:`Installation` section.
|
||||
|
||||
.. NOTE::
|
||||
CORE :ref:`Services` determine what runs on each node. You may require
|
||||
other software packages depending on the services you wish to use.
|
||||
For example, the `HTTP` service will require the `apache2` package.
|
||||
|
||||
|
||||
.. _Installing_from_Packages:
|
||||
|
||||
Installing from Packages
|
||||
========================
|
||||
|
||||
.. index:: installer
|
||||
|
||||
.. index:: binary packages
|
||||
|
||||
The easiest way to install CORE is using the pre-built packages. The package
|
||||
managers on Ubuntu or Fedora will
|
||||
automatically install dependencies for you.
|
||||
You can obtain the CORE packages from the `CORE downloads <http://downloads.pf.itd.nrl.navy.mil/core/packages/>`_ page
|
||||
or `CORE GitHub <https://github.com/coreemu/core/releases>`_.
|
||||
|
||||
.. _Installing_from_Packages_on_Ubuntu:
|
||||
|
||||
Installing from Packages on Ubuntu
|
||||
----------------------------------
|
||||
|
||||
First install the Ubuntu |UBUNTUVERSION| operating system.
|
||||
|
||||
* Install Quagga for routing. If you plan on working with wireless
|
||||
networks, we recommend
|
||||
installing
|
||||
`OSPF MDR <http://www.nrl.navy.mil/itd/ncs/products/ospf-manet>`__
|
||||
(replace `amd64` below with `i386` if needed
|
||||
to match your architecture):
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERDEB|
|
||||
sudo dpkg -i |QVERDEB|
|
||||
|
||||
|
||||
or, for the regular Ubuntu version of Quagga:
|
||||
::
|
||||
|
||||
sudo apt-get install quagga
|
||||
|
||||
* Install the CORE deb packages for Ubuntu, using a GUI that automatically
|
||||
resolves dependencies (note that the absolute path to the deb file
|
||||
must be used with ``software-center``):
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
software-center /home/user/Downloads/core-daemon\_\ |version|-|COREDEB|
|
||||
software-center /home/user/Downloads/core-gui\_\ |version|-|COREDEB2|
|
||||
|
||||
or install from command-line:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo dpkg -i core-daemon\_\ |version|-|COREDEB|
|
||||
sudo dpkg -i core-gui\_\ |version|-|COREDEB2|
|
||||
|
||||
* Start the CORE daemon as root.
|
||||
::
|
||||
|
||||
sudo /etc/init.d/core-daemon start
|
||||
|
||||
* Run the CORE GUI as a normal user:
|
||||
::
|
||||
|
||||
core-gui
|
||||
|
||||
|
||||
After running the ``core-gui`` command, a GUI should appear with a canvas
|
||||
for drawing topologies. Messages will print out on the console about
|
||||
connecting to the CORE daemon.
|
||||
|
||||
.. _Installing_from_Packages_on_Fedora:
|
||||
|
||||
Installing from Packages on Fedora/CentOS
|
||||
-----------------------------------------
|
||||
|
||||
The commands shown here should be run as root. First Install the Fedora
|
||||
|FEDORAVERSION| or CentOS |CENTOSVERSION| operating system.
|
||||
The `x86_64` architecture is shown in the
|
||||
examples below, replace with `i686` is using a 32-bit architecture. Also,
|
||||
`fc15` is shown below for Fedora 15 packages, replace with the appropriate
|
||||
Fedora release number.
|
||||
|
||||
* **CentOS only:** in order to install the `libev` and `tkimg` prerequisite
|
||||
packages, you
|
||||
first need to install the `EPEL <http://fedoraproject.org/wiki/EPEL>`_ repo
|
||||
(Extra Packages for Enterprise Linux):
|
||||
|
||||
::
|
||||
|
||||
wget http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
yum localinstall epel-release-6-8.noarch.rpm
|
||||
|
||||
|
||||
* **CentOS 7.x only:** as of this writing, the `tkimg` prerequisite package
|
||||
is missing from EPEL 7.x, but the EPEL 6.x package can be manually installed
|
||||
from
|
||||
`here <http://dl.fedoraproject.org/pub/epel/6/x86_64/repoview/tkimg.html>`_
|
||||
|
||||
::
|
||||
|
||||
wget http://dl.fedoraproject.org/pub/epel/6/x86_64/tkimg-1.4-1.el6.x86_64.rpm
|
||||
yum localinstall tkimg-1.4-1.el6.x86_64.rpm
|
||||
|
||||
|
||||
* **Optional:** install the prerequisite packages (otherwise skip this
|
||||
step and have the package manager install them for you.)
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
# make sure the system is up to date; you can also use the
|
||||
# update applet instead of yum update
|
||||
yum update
|
||||
yum install |YUMDEPS| |YUMDEPS2|
|
||||
|
||||
|
||||
* **Optional (Fedora 17+):** Fedora 17 and newer have an additional
|
||||
prerequisite providing the required netem kernel modules (otherwise
|
||||
skip this step and have the package manager install it for you.)
|
||||
|
||||
::
|
||||
|
||||
yum install kernel-modules-extra
|
||||
|
||||
|
||||
* Install Quagga for routing. If you plan on working with wireless networks,
|
||||
we recommend installing
|
||||
`OSPF MDR <http://www.nrl.navy.mil/itd/ncs/products/ospf-manet>`_:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERRPM|
|
||||
yum localinstall |QVERRPM|
|
||||
|
||||
or, for the regular Fedora version of Quagga:
|
||||
::
|
||||
|
||||
yum install quagga
|
||||
|
||||
|
||||
* Install the CORE RPM packages for Fedora and automatically resolve
|
||||
dependencies:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
yum localinstall python-core_|service|-|version|-|CORERPM| --nogpgcheck
|
||||
yum localinstall core-gui-|version|-|CORERPM2| --nogpgcheck
|
||||
|
||||
or install from the command-line:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
rpm -ivh python-core_|service|-|version|-|CORERPM|
|
||||
rpm -ivh core-gui-|version|-|CORERPM2|
|
||||
|
||||
|
||||
* Turn off SELINUX by setting ``SELINUX=disabled`` in the :file:`/etc/sysconfig/selinux` file, and adding ``selinux=0`` to the kernel line in
|
||||
your :file:`/etc/grub.conf` file; on Fedora 15 and newer, disable sandboxd using ``chkconfig sandbox off``;
|
||||
you need to reboot in order for this change to take effect
|
||||
* Turn off firewalls with ``systemctl disable firewalld``, ``systemctl disable iptables.service``, ``systemctl disable ip6tables.service`` (``chkconfig iptables off``, ``chkconfig ip6tables off``) or configure them with permissive rules for CORE virtual networks; you need to reboot after making this change, or flush the firewall using ``iptables -F``, ``ip6tables -F``.
|
||||
|
||||
* Start the CORE daemon as root. Fedora uses the ``systemd`` start-up daemon
|
||||
instead of traditional init scripts. CentOS uses the init script.
|
||||
::
|
||||
|
||||
# for Fedora using systemd:
|
||||
systemctl daemon-reload
|
||||
systemctl start core-daemon.service
|
||||
# or for CentOS:
|
||||
/etc/init.d/core-daemon start
|
||||
|
||||
* Run the CORE GUI as a normal user:
|
||||
::
|
||||
|
||||
core-gui
|
||||
|
||||
|
||||
After running the ``core-gui`` command, a GUI should appear with a canvas
|
||||
for drawing topologies. Messages will print out on the console about
|
||||
connecting to the CORE daemon.
|
||||
|
||||
.. _Installing_from_Source:
|
||||
|
||||
Installing from Source
|
||||
======================
|
||||
|
||||
This option is listed here for developers and advanced users who are comfortable patching and building source code. Please consider using the binary packages instead for a simplified install experience.
|
||||
|
||||
.. _Installing_from_Source_on_Ubuntu:
|
||||
|
||||
Installing from Source on Ubuntu
|
||||
--------------------------------
|
||||
|
||||
To build CORE from source on Ubuntu, first install these development packages.
|
||||
These packages are not required for normal binary package installs.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo apt-get install |APTDEPS| \\
|
||||
|APTDEPS2| \\
|
||||
|APTDEPS3|
|
||||
|
||||
|
||||
You can obtain the CORE source from the `CORE source <http://downloads.pf.itd.nrl.navy.mil/core/source/>`_ page. Choose either a stable release version or
|
||||
the development snapshot available in the `nightly_snapshots` directory.
|
||||
The ``-j8`` argument to ``make`` will run eight simultaneous jobs, to speed up
|
||||
builds on multi-core systems.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf core-|version|.tar.gz
|
||||
cd core-|version|
|
||||
./bootstrap.sh
|
||||
./configure
|
||||
make -j8
|
||||
sudo make install
|
||||
|
||||
|
||||
The CORE Manual documentation is built separately from the :file:`doc/`
|
||||
sub-directory in the source. It requires Sphinx:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo apt-get install python-sphinx
|
||||
cd core-|version|/doc
|
||||
make html
|
||||
make latexpdf
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_Fedora:
|
||||
|
||||
Installing from Source on Fedora
|
||||
--------------------------------
|
||||
|
||||
To build CORE from source on Fedora, install these development packages.
|
||||
These packages are not required for normal binary package installs.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
yum install |YUMDEPS| \\
|
||||
|YUMDEPS2| \\
|
||||
|YUMDEPS3|
|
||||
|
||||
|
||||
.. NOTE::
|
||||
For a minimal X11 installation, also try these packages::
|
||||
|
||||
yum install xauth xterm urw-fonts
|
||||
|
||||
You can obtain the CORE source from the `CORE source <http://downloads.pf.itd.nrl.navy.mil/core/source/>`_ page. Choose either a stable release version or
|
||||
the development snapshot available in the :file:`nightly_snapshots` directory.
|
||||
The ``-j8`` argument to ``make`` will run eight simultaneous jobs, to speed up
|
||||
builds on multi-core systems. Notice the ``configure`` flag to tell the build
|
||||
system that a systemd service file should be installed under Fedora.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf core-|version|.tar.gz
|
||||
cd core-|version|
|
||||
./bootstrap.sh
|
||||
./configure --with-startup=systemd
|
||||
make -j8
|
||||
sudo make install
|
||||
|
||||
Another note is that the Python distutils in Fedora Linux will install the CORE
|
||||
Python modules to :file:`/usr/lib/python2.7/site-packages/core`, instead of
|
||||
using the :file:`dist-packages` directory.
|
||||
|
||||
The CORE Manual documentation is built separately from the :file:`doc/`
|
||||
sub-directory in the source. It requires Sphinx:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo yum install python-sphinx
|
||||
cd core-|version|/doc
|
||||
make html
|
||||
make latexpdf
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_CentOS:
|
||||
|
||||
Installing from Source on CentOS/EL6
|
||||
------------------------------------
|
||||
|
||||
To build CORE from source on CentOS/EL6, first install the `EPEL <http://fedoraproject.org/wiki/EPEL>`_ repo (Extra Packages for Enterprise Linux) in order
|
||||
to provide the `libev` package.
|
||||
|
||||
::
|
||||
|
||||
wget http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
yum localinstall epel-release-6-8.noarch.rpm
|
||||
|
||||
|
||||
Now use the same instructions shown in :ref:`Installing_from_Source_on_Fedora`.
|
||||
CentOS/EL6 does not use the systemd service file, so the `configure` option
|
||||
`--with-startup=systemd` should be omitted:
|
||||
|
||||
::
|
||||
|
||||
./configure
|
||||
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_SUSE:
|
||||
|
||||
Installing from Source on SUSE
|
||||
------------------------------
|
||||
|
||||
To build CORE from source on SUSE or OpenSUSE,
|
||||
use the similar instructions shown in :ref:`Installing_from_Source_on_Fedora`,
|
||||
except that the following `configure` option should be used:
|
||||
|
||||
::
|
||||
|
||||
./configure --with-startup=suse
|
||||
|
||||
This causes a separate init script to be installed that is tailored towards SUSE systems.
|
||||
|
||||
The `zypper` command is used instead of `yum`.
|
||||
|
||||
The Quagga routing suite is recommended for routing,
|
||||
:ref:`Quagga_Routing_Software` for installation.
|
||||
|
||||
.. _Quagga_Routing_Software:
|
||||
|
||||
Quagga Routing Software
|
||||
=======================
|
||||
|
||||
.. index:: Quagga
|
||||
|
||||
Virtual networks generally require some form of routing in order to work (e.g.
|
||||
to automatically populate routing tables for routing packets from one subnet
|
||||
to another.) CORE builds OSPF routing protocol
|
||||
configurations by default when the blue router
|
||||
node type is used. The OSPF protocol is available
|
||||
from the `Quagga open source routing suite <http://www.quagga.net>`_.
|
||||
Other routing protocols are available using different
|
||||
node services, :ref:`Default_Services_and_Node_Types`.
|
||||
|
||||
Quagga is not specified as a dependency for the CORE packages because
|
||||
there are two different Quagga packages that you may use:
|
||||
|
||||
* `Quagga <http://www.quagga.net>`_ - the standard version of Quagga, suitable for static wired networks, and usually available via your distribution's package manager.
|
||||
.. index:: OSPFv3 MANET
|
||||
|
||||
.. index:: OSPFv3 MDR
|
||||
|
||||
.. index:: MANET Designated Routers (MDR)
|
||||
|
||||
*
|
||||
`OSPF MANET Designated Routers <http://www.nrl.navy.mil/itd/ncs/products/ospf-manet>`_ (MDR) - the Quagga routing suite with a modified version of OSPFv3,
|
||||
optimized for use with mobile wireless networks. The *mdr* node type (and the MDR service) requires this variant of Quagga.
|
||||
|
||||
If you plan on working with wireless networks, we recommend installing OSPF MDR;
|
||||
otherwise install the standard version of Quagga using your package manager or from source.
|
||||
|
||||
.. _Installing_Quagga_from_Packages:
|
||||
|
||||
Installing Quagga from Packages
|
||||
-------------------------------
|
||||
|
||||
To install the standard version of Quagga from packages, use your package manager (Linux).
|
||||
|
||||
Ubuntu users:
|
||||
::
|
||||
|
||||
sudo apt-get install quagga
|
||||
|
||||
Fedora users:
|
||||
::
|
||||
|
||||
yum install quagga
|
||||
|
||||
To install the Quagga variant having OSPFv3 MDR, first download the
|
||||
appropriate package, and install using the package manager.
|
||||
|
||||
Ubuntu users:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERDEB|
|
||||
sudo dpkg -i |QVERDEB|
|
||||
|
||||
Replace `amd64` with `i686` if using a 32-bit architecture.
|
||||
|
||||
Fedora users:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERRPM|
|
||||
yum localinstall |QVERRPM|
|
||||
|
||||
Replace `x86_64` with `i686` if using a 32-bit architecture.
|
||||
|
||||
.. _Compiling_Quagga_for_CORE:
|
||||
|
||||
Compiling Quagga for CORE
|
||||
-------------------------
|
||||
|
||||
To compile Quagga to work with CORE on Linux:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf |QVER|.tar.gz
|
||||
cd |QVER|
|
||||
./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \\
|
||||
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \\
|
||||
--localstatedir=/var/run/quagga
|
||||
make
|
||||
sudo make install
|
||||
|
||||
|
||||
Note that the configuration directory :file:`/usr/local/etc/quagga` shown for
|
||||
Quagga above could be :file:`/etc/quagga`, if you create a symbolic link from
|
||||
:file:`/etc/quagga/Quagga.conf -> /usr/local/etc/quagga/Quagga.conf` on the
|
||||
host. The :file:`quaggaboot.sh` script in a Linux network namespace will try and
|
||||
do this for you if needed.
|
||||
|
||||
If you try to run quagga after installing from source and get an error such as:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
error while loading shared libraries libzebra.so.0
|
||||
|
||||
this is usually a sign that you have to run `sudo ldconfig` to refresh the
|
||||
cache file.
|
||||
|
||||
VCORE
|
||||
=====
|
||||
|
||||
.. index:: virtual machines
|
||||
|
||||
.. index:: VirtualBox
|
||||
|
||||
.. index:: VMware
|
||||
|
||||
CORE is capable of running inside of a virtual machine, using
|
||||
software such as VirtualBox,
|
||||
VMware Server or QEMU. However, CORE itself is performing machine
|
||||
virtualization in order to realize multiple emulated nodes, and running CORE
|
||||
virtually adds additional contention for the physical resources. **For performance reasons, this is not recommended.** Timing inside of a VM often has
|
||||
problems. If you do run CORE from within a VM, it is recommended that you view
|
||||
the GUI with remote X11 over SSH, so the virtual machine does not need to
|
||||
emulate the video card with the X11 application.
|
||||
|
||||
.. index:: VCORE
|
||||
|
||||
A CORE virtual machine is provided for download, named VCORE.
|
||||
This is the perhaps the easiest way to get CORE up and running as the machine
|
||||
is already set up for you. This may be adequate for initially evaluating the
|
||||
tool but keep in mind the performance limitations of running within VirtualBox
|
||||
or VMware. To install the virtual machine, you first need to obtain VirtualBox
|
||||
from http://www.virtualbox.org, or VMware Server or Player from
|
||||
http://www.vmware.com (this commercial software is distributed for free.)
|
||||
Once virtualization software has been installed, you can import the virtual
|
||||
machine appliance using the ``vbox`` file for VirtualBox or the ``vmx`` file for VMware. See the documentation that comes with VCORE for login information.
|
||||
|
204
doc/intro.rst
204
doc/intro.rst
|
@ -1,204 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _Introduction:
|
||||
|
||||
************
|
||||
Introduction
|
||||
************
|
||||
|
||||
The Common Open Research Emulator (CORE) is a tool for building virtual
|
||||
networks. As an emulator, CORE builds a representation of a real computer
|
||||
network that runs in real time, as opposed to simulation, where abstract models
|
||||
are used. The live-running emulation can be connected to physical networks and
|
||||
routers. It provides an environment for running real applications and
|
||||
protocols, taking advantage of virtualization provided by the Linux operating
|
||||
system.
|
||||
|
||||
Some of its key features are:
|
||||
|
||||
.. index::
|
||||
single: key features
|
||||
|
||||
* efficient and scalable
|
||||
* runs applications and protocols without modification
|
||||
* easy-to-use GUI
|
||||
* highly customizable
|
||||
|
||||
CORE is typically used for network and protocol research,
|
||||
demonstrations, application and platform testing, evaluating networking
|
||||
scenarios, security studies, and increasing the size of physical test networks.
|
||||
|
||||
What's New?
|
||||
===========
|
||||
For readers who are already familiar with CORE and have read this manual before, below is a list of what changed in version 5.0:
|
||||
|
||||
* :ref:`Services` - Added Ryu SD and Open vSwitch services
|
||||
* :ref:`Python_Scripting` - Updated script examples to reflect code changes
|
||||
|
||||
.. index::
|
||||
single: CORE; components of
|
||||
single: CORE; API
|
||||
single: API
|
||||
single: CORE; GUI
|
||||
|
||||
.. _Architecture:
|
||||
|
||||
Architecture
|
||||
============
|
||||
The main components of CORE are shown in :ref:`core-architecture`. A
|
||||
*CORE daemon* (backend) manages emulation sessions. It builds emulated networks
|
||||
using kernel virtualization for virtual nodes and some form of bridging and
|
||||
packet manipulation for virtual networks. The nodes and networks come together
|
||||
via interfaces installed on nodes. The daemon is controlled via the
|
||||
graphical user interface, the *CORE GUI* (frontend).
|
||||
The daemon uses Python modules
|
||||
that can be imported directly by Python scripts.
|
||||
The GUI and the daemon communicate using a custom,
|
||||
asynchronous, sockets-based API, known as the *CORE API*. The dashed line
|
||||
in the figure notionally depicts the user-space and kernel-space separation.
|
||||
The components the user interacts with are colored blue: GUI, scripts, or
|
||||
command-line tools.
|
||||
|
||||
The system is modular to allow mixing different components. The virtual
|
||||
networks component, for example, can be realized with other network
|
||||
simulators and emulators, such as ns-3 and EMANE.
|
||||
Another example is how a session can be designed and started using
|
||||
the GUI, and continue to run in "headless" operation with the GUI closed.
|
||||
The CORE API is sockets based,
|
||||
to allow the possibility of running different components on different physical
|
||||
machines.
|
||||
|
||||
.. _core-architecture:
|
||||
|
||||
.. figure:: figures/core-architecture.*
|
||||
:alt: CORE architecture diagram
|
||||
:align: center
|
||||
:scale: 75 %
|
||||
|
||||
CORE Architecture
|
||||
|
||||
The CORE GUI is a Tcl/Tk program; it is started using the command
|
||||
``core-gui``. The CORE daemon, named ``core-daemon``,
|
||||
is usually started via the init script
|
||||
(``/etc/init.d/core-daemon`` or ``core-daemon.service``,
|
||||
depending on platform.)
|
||||
The CORE daemon manages sessions of virtual
|
||||
nodes and networks, of which other scripts and utilities may be used for
|
||||
further control.
|
||||
|
||||
|
||||
.. _How_Does_It_Work?:
|
||||
|
||||
How Does it Work?
|
||||
=================
|
||||
|
||||
A CORE node is a lightweight virtual machine. The CORE framework runs on Linux.
|
||||
|
||||
.. index::
|
||||
single: Linux; virtualization
|
||||
single: Linux; containers
|
||||
single: LXC
|
||||
single: network namespaces
|
||||
|
||||
* :ref:`Linux` CORE uses Linux network namespace virtualization to build virtual nodes, and ties them together with virtual networks using Linux Ethernet bridging.
|
||||
|
||||
.. _Linux:
|
||||
|
||||
Linux
|
||||
-----
|
||||
Linux network namespaces (also known as netns, LXC, or `Linux containers
|
||||
<http://lxc.sourceforge.net/>`_) is the primary virtualization
|
||||
technique used by CORE. LXC has been part of the mainline Linux kernel since
|
||||
2.6.24. Recent Linux distributions such as Fedora and Ubuntu have
|
||||
namespaces-enabled kernels out of the box.
|
||||
A namespace is created using the ``clone()`` system call. Each namespace has
|
||||
its own process environment and private network stack. Network namespaces
|
||||
share the same filesystem in CORE.
|
||||
|
||||
.. index::
|
||||
single: Linux; bridging
|
||||
single: Linux; networking
|
||||
single: ebtables
|
||||
|
||||
CORE combines these namespaces with Linux Ethernet bridging
|
||||
to form networks. Link characteristics are applied using Linux Netem queuing
|
||||
disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless
|
||||
networks are emulated by controlling which interfaces can send and receive with
|
||||
ebtables rules.
|
||||
|
||||
.. index::
|
||||
single: IMUNES
|
||||
single: VirtNet
|
||||
single: prior work
|
||||
|
||||
.. rubric:: Footnotes
|
||||
.. [#f1] http://www.nlnet.nl/project/virtnet/
|
||||
.. [#f2] http://www.imunes.net/virtnet/
|
||||
|
||||
.. _Prior_Work:
|
||||
|
||||
Prior Work
|
||||
==========
|
||||
|
||||
The Tcl/Tk CORE GUI was originally derived from the open source
|
||||
`IMUNES <http://www.tel.fer.hr/imunes/>`_
|
||||
project from the University of Zagreb
|
||||
as a custom project within Boeing Research and Technology's Network
|
||||
Technology research group in 2004. Since then they have developed the CORE
|
||||
framework to use Linux virtualization, have developed a
|
||||
Python framework, and made numerous user- and kernel-space developments, such
|
||||
as support for wireless networks, IPsec, the ability to distribute emulations,
|
||||
simulation integration, and more. The IMUNES project also consists of userspace
|
||||
and kernel components. Originally, one had to download and apply a patch for
|
||||
the FreeBSD 4.11 kernel, but the more recent
|
||||
`VirtNet <http://www.nlnet.nl/project/virtnet/>`_
|
||||
effort has brought network stack
|
||||
virtualization to the more modern FreeBSD 8.x kernel.
|
||||
|
||||
.. _Open_Source_Project_and_Resources:
|
||||
|
||||
Open Source Project and Resources
|
||||
=================================
|
||||
.. index::
|
||||
single: open source project
|
||||
single: license
|
||||
single: website
|
||||
single: supplemental website
|
||||
single: contributing
|
||||
|
||||
CORE has been released by Boeing to the open source community under the BSD
|
||||
license. If you find CORE useful for your work, please contribute back to the
|
||||
project. Contributions can be as simple as reporting a bug, dropping a line of
|
||||
encouragement or technical suggestions to the mailing lists, or can also
|
||||
include submitting patches or maintaining aspects of the tool. For contributing to
|
||||
CORE, please visit the
|
||||
`CORE GitHub <https://github.com/coreemu/core>`_.
|
||||
|
||||
Besides this manual, there are other additional resources available online:
|
||||
|
||||
* `CORE website <http://www.nrl.navy.mil/itd/ncs/products/core>`_ - main project page containing demos, downloads, and mailing list information.
|
||||
|
||||
.. index::
|
||||
single: CORE
|
||||
|
||||
Goals
|
||||
-----
|
||||
These are the Goals of the CORE project; they are similar to what we consider to be the :ref:`key features <Introduction>`.
|
||||
|
||||
#. Ease of use - In a few clicks the user should have a running network.
|
||||
#. Efficiency and scalability - A node is more lightweight than a full virtual machine. Tens of nodes should be possible on a standard laptop computer.
|
||||
#. Software re-use - Re-use real implementation code, protocols, networking stacks.
|
||||
#. Networking - CORE is focused on emulating networks and offers various ways to connect the running emulation with real or simulated networks.
|
||||
#. Hackable - The source code is available and easy to understand and modify.
|
||||
|
||||
Non-Goals
|
||||
---------
|
||||
This is a list of Non-Goals, specific things that people may be interested in but are not areas that we will pursue.
|
||||
|
||||
#. Reinventing the wheel - Where possible, CORE reuses existing open source components such as virtualization, Netgraph, netem, bridging, Quagga, etc.
|
||||
#. 1,000,000 nodes - While the goal of CORE is to provide efficient, scalable network emulation, there is no set goal of N number of nodes. There are realistic limits on what a machine can handle as its resources are divided amongst virtual nodes. We will continue to make things more efficient and let the user determine the right number of nodes based on available hardware and the activities each node is performing.
|
||||
#. Solves every problem - CORE is about emulating networking layers 3-7 using virtual network stacks in Linux operating systems.
|
||||
#. Hardware-specific - CORE itself is not an instantiation of hardware, a testbed, or a specific laboratory setup; it should run on commodity laptop and desktop PCs, in addition to high-end server hardware.
|
||||
|
||||
|
|
@ -1,71 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _Machine_Types:
|
||||
|
||||
*************
|
||||
Machine Types
|
||||
*************
|
||||
|
||||
.. index:: machine types
|
||||
|
||||
Different node types can be configured in CORE, and each node type has a
|
||||
*machine type* that indicates how the node will be represented at run time.
|
||||
Different machine types allow for different virtualization options.
|
||||
|
||||
.. _netns:
|
||||
|
||||
netns
|
||||
=====
|
||||
|
||||
.. index:: netns machine type
|
||||
|
||||
The *netns* machine type is the default. This is for nodes that will be
|
||||
backed by Linux network namespaces. See :ref:`Linux` for a brief explanation of
|
||||
netns. This default machine type is very lightweight, providing a minimum
|
||||
amount of
|
||||
virtualization in order to emulate a network.
|
||||
Another reason this is designated as the default machine type
|
||||
is because this virtualization technology
|
||||
typically requires no changes to the kernel; it is available out-of-the-box
|
||||
from the latest mainstream Linux distributions.
|
||||
|
||||
.. index:: physical machine type
|
||||
|
||||
.. index:: emulation testbed machines
|
||||
|
||||
.. index:: real node
|
||||
|
||||
.. index:: physical node
|
||||
|
||||
.. _physical:
|
||||
|
||||
physical
|
||||
========
|
||||
|
||||
The *physical* machine type is used for nodes that represent a real
|
||||
Linux-based machine that will participate in the emulated network scenario.
|
||||
This is typically used, for example, to incorporate racks of server machines
|
||||
from an emulation testbed. A physical node is one that is running the CORE
|
||||
daemon (:file:`core-daemon`), but will not be further partitioned into virtual
|
||||
machines. Services that are run on the physical node do not run in an
|
||||
isolated or virtualized environment, but directly on the operating system.
|
||||
|
||||
Physical nodes must be assigned to servers, the same way nodes
|
||||
are assigned to emulation servers with :ref:`Distributed_Emulation`.
|
||||
The list of available physical nodes currently shares the same dialog box
|
||||
and list as the emulation servers, accessed using the *Emulation Servers...*
|
||||
entry from the *Session* menu.
|
||||
|
||||
.. index:: GRE tunnels with physical nodes
|
||||
|
||||
Support for physical nodes is under development and may be improved in future
|
||||
releases. Currently, when any node is linked to a physical node, a dashed line
|
||||
is drawn to indicate network tunneling. A GRE tunneling interface will be
|
||||
created on the physical node and used to tunnel traffic to and from the
|
||||
emulated world.
|
||||
|
||||
Double-clicking on a physical node during runtime
|
||||
opens a terminal with an SSH shell to that
|
||||
node. Users should configure public-key SSH login as done with emulation
|
||||
servers.
|
314
doc/ns3.rst
314
doc/ns3.rst
|
@ -1,314 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _ns-3:
|
||||
|
||||
****
|
||||
ns-3
|
||||
****
|
||||
|
||||
.. index:: ns-3
|
||||
|
||||
This chapter describes running CORE with the
|
||||
`ns-3 network simulator <http://www.nsnam.org>`_.
|
||||
|
||||
.. _What_is_ns-3?:
|
||||
|
||||
What is ns-3?
|
||||
=============
|
||||
|
||||
.. index:: ns-3 Introduction
|
||||
|
||||
ns-3 is a discrete-event network simulator for Internet systems, targeted primarily for research and educational use. [#f1]_ By default, ns-3 simulates entire networks, from applications down to channels, and it does so in simulated time, instead of real (wall-clock) time.
|
||||
|
||||
CORE can run in conjunction with ns-3 to simulate some types of networks. CORE
|
||||
network namespace virtual nodes can have virtual TAP interfaces installed using
|
||||
the simulator for communication. The simulator needs to run at wall clock time
|
||||
with the real-time scheduler. In this type of configuration, the CORE
|
||||
namespaces are used to provide packets to the ns-3 devices and channels.
|
||||
This allows, for example, wireless models developed for ns-3 to be used
|
||||
in an emulation context.
|
||||
|
||||
Users simulate networks with ns-3 by writing C++ programs or Python scripts that
|
||||
import the ns-3 library. Simulation models are objects instantiated in these
|
||||
scripts. Combining the CORE Python modules with ns-3 Python bindings allow
|
||||
a script to easily set up and manage an emulation + simulation environment.
|
||||
|
||||
.. rubric:: Footnotes
|
||||
.. [#f1] http://www.nsnam.org
|
||||
|
||||
.. _ns-3_Scripting:
|
||||
|
||||
ns-3 Scripting
|
||||
==============
|
||||
|
||||
.. index:: ns-3 scripting
|
||||
|
||||
Currently, ns-3 is supported by writing
|
||||
:ref:`Python scripts <Python_Scripting>`, but not through
|
||||
drag-and-drop actions within the GUI.
|
||||
If you have a copy of the CORE source, look under :file:`ns3/examples/` for example scripts; a CORE installation package puts these under
|
||||
:file:`/usr/share/core/examples/corens3`.
|
||||
|
||||
To run these scripts, install CORE so the CORE Python libraries are accessible,
|
||||
and download and build ns-3. This has been tested using ns-3 releases starting
|
||||
with 3.11 (and through 3.16 as of this writing).
|
||||
|
||||
The first step is to open an ns-3 waf shell. `waf <http://code.google.com/p/waf/>`_ is the build system for ns-3. Opening a waf shell as root will merely
|
||||
set some environment variables useful for finding python modules and ns-3
|
||||
executables. The following environment variables are extended or set by
|
||||
issuing `waf shell`:
|
||||
|
||||
::
|
||||
|
||||
PATH
|
||||
PYTHONPATH
|
||||
LD_LIBRARY_PATH
|
||||
NS3_MODULE_PATH
|
||||
NS3_EXECUTABLE_PATH
|
||||
|
||||
Open a waf shell as root, so that network namespaces may be instantiated
|
||||
by the script with root permissions. For an example, run the
|
||||
:file:`ns3wifi.py`
|
||||
program, which simply instantiates 10 nodes (by default) and places them on
|
||||
an ns-3 WiFi channel. That is, the script will instantiate 10 namespace nodes,
|
||||
and create a special tap device that sends packets between the namespace
|
||||
node and a special ns-3 simulation node, where the tap device is bridged
|
||||
to an ns-3 WiFi network device, and attached to an ns-3 WiFi channel.
|
||||
|
||||
::
|
||||
|
||||
> cd ns-allinone-3.16/ns-3.16
|
||||
> sudo ./waf shell
|
||||
# # use '/usr/local' below if installed from source
|
||||
# cd /usr/share/core/examples/corens3/
|
||||
# python -i ns3wifi.py
|
||||
running ns-3 simulation for 600 seconds
|
||||
|
||||
>>> print session
|
||||
<corens3.obj.Ns3Session object at 0x1963e50>
|
||||
>>>
|
||||
|
||||
|
||||
The interactive Python shell allows some interaction with the Python objects
|
||||
for the emulation.
|
||||
|
||||
In another terminal, nodes can be accessed using *vcmd*:
|
||||
::
|
||||
|
||||
vcmd -c /tmp/pycore.10781/n1 -- bash
|
||||
root@n1:/tmp/pycore.10781/n1.conf#
|
||||
root@n1:/tmp/pycore.10781/n1.conf# ping 10.0.0.3
|
||||
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
|
||||
64 bytes from 10.0.0.3: icmp_req=1 ttl=64 time=7.99 ms
|
||||
64 bytes from 10.0.0.3: icmp_req=2 ttl=64 time=3.73 ms
|
||||
64 bytes from 10.0.0.3: icmp_req=3 ttl=64 time=3.60 ms
|
||||
^C
|
||||
--- 10.0.0.3 ping statistics ---
|
||||
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
|
||||
rtt min/avg/max/mdev = 3.603/5.111/7.993/2.038 ms
|
||||
root@n1:/tmp/pycore.10781/n1.conf#
|
||||
|
||||
|
||||
The ping packets shown above are traversing an ns-3 ad-hoc Wifi simulated
|
||||
network.
|
||||
|
||||
To clean up the session, use the Session.shutdown() method from the Python
|
||||
terminal.
|
||||
|
||||
::
|
||||
|
||||
>>> print session
|
||||
<corens3.obj.Ns3Session object at 0x1963e50>
|
||||
>>>
|
||||
>>> session.shutdown()
|
||||
>>>
|
||||
|
||||
|
||||
A CORE/ns-3 Python script will instantiate an Ns3Session, which is a
|
||||
CORE Session
|
||||
having CoreNs3Nodes, an ns-3 MobilityHelper, and a fixed duration.
|
||||
The CoreNs3Node inherits from both the CoreNode and the ns-3 Node classes -- it
|
||||
is a network namespace having an associated simulator object. The CORE TunTap
|
||||
interface is used, represented by a ns-3 TapBridge in `CONFIGURE_LOCAL`
|
||||
mode, where ns-3 creates and configures the tap device. An event is scheduled
|
||||
to install the taps at time 0.
|
||||
|
||||
.. NOTE::
|
||||
The GUI can be used to run the :file:`ns3wifi.py`
|
||||
and :file:`ns3wifirandomwalk.py` scripts directly. First, ``core-daemon``
|
||||
must be
|
||||
stopped and run within the waf root shell. Then the GUI may be run as
|
||||
a normal user, and the *Execute Python Script...* option may be used from
|
||||
the *File* menu. Dragging nodes around in the :file:`ns3wifi.py` example
|
||||
will cause their ns-3 positions to be updated.
|
||||
|
||||
|
||||
Users may find the files :file:`ns3wimax.py` and :file:`ns3lte.py`
|
||||
in that example
|
||||
directory; those files were similarly configured, but the underlying
|
||||
ns-3 support is not present as of ns-3.16, so they will not work. Specifically,
|
||||
the ns-3 has to be extended to support bridging the Tap device to
|
||||
an LTE and a WiMax device.
|
||||
|
||||
.. _ns-3_Integration_details:
|
||||
|
||||
Integration details
|
||||
===================
|
||||
|
||||
.. index:: ns-3 integration details
|
||||
|
||||
The previous example :file:`ns3wifi.py` used Python API from the special Python
|
||||
objects *Ns3Session* and *Ns3WifiNet*. The example program does not import
|
||||
anything directly from the ns-3 python modules; rather, only the above
|
||||
two objects are used, and the API available to configure the underlying
|
||||
ns-3 objects is constrained. For example, *Ns3WifiNet* instantiates
|
||||
a constant-rate 802.11a-based ad hoc network, using a lot of ns-3 defaults.
|
||||
|
||||
However, programs may be written with a blend of ns-3 API and CORE Python
|
||||
API calls. This section examines some of the fundamental objects in
|
||||
the CORE ns-3 support. Source code can be found in
|
||||
:file:`ns3/corens3/obj.py` and example
|
||||
code in :file:`ns3/corens3/examples/`.
|
||||
|
||||
Ns3Session
|
||||
----------
|
||||
|
||||
The *Ns3Session* class is a CORE Session that starts an ns-3 simulation
|
||||
thread. ns-3 actually runs as a separate process on the same host as
|
||||
the CORE daemon, and the control of starting and stopping this process
|
||||
is performed by the *Ns3Session* class.
|
||||
|
||||
Example:
|
||||
|
||||
::
|
||||
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
|
||||
Note the use of the duration attribute to control how long the ns-3 simulation
|
||||
should run. By default, the duration is 600 seconds.
|
||||
|
||||
Typically, the session keeps track of the ns-3 nodes (holding a node
|
||||
container for references to the nodes). This is accomplished via the
|
||||
`addnode()` method, e.g.:
|
||||
|
||||
::
|
||||
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
|
||||
`addnode()` creates instances of a *CoreNs3Node*, which we'll cover next.
|
||||
|
||||
CoreNs3Node
|
||||
-----------
|
||||
|
||||
A *CoreNs3Node* is both a CoreNode and an ns-3 node:
|
||||
|
||||
::
|
||||
|
||||
class CoreNs3Node(CoreNode, ns.network.Node):
|
||||
''' The CoreNs3Node is both a CoreNode backed by a network namespace and
|
||||
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
|
||||
device will be used.
|
||||
|
||||
|
||||
CoreNs3Net
|
||||
-----------
|
||||
|
||||
A *CoreNs3Net* derives from *PyCoreNet*. This network exists entirely
|
||||
in simulation, using the TunTap device to interact between the emulated
|
||||
and the simulated realm. *Ns3WifiNet* is a specialization of this.
|
||||
|
||||
As an example, this type of code would be typically used to add a WiFi
|
||||
network to a session:
|
||||
|
||||
::
|
||||
|
||||
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps")
|
||||
wifi.setposition(30, 30, 0)
|
||||
|
||||
The above two lines will create a wlan1 object and set its initial canvas
|
||||
position. Later in the code, the newnetif method of the CoreNs3Node can
|
||||
be used to add interfaces on particular nodes to this network; e.g.:
|
||||
|
||||
::
|
||||
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
|
||||
|
||||
.. _ns-3_Mobility:
|
||||
|
||||
Mobility
|
||||
========
|
||||
|
||||
.. index:: ns-3 mobility
|
||||
|
||||
Mobility in ns-3 is handled by an object (a MobilityModel) aggregated to
|
||||
an ns-3 node. The MobilityModel is able to report the position of the
|
||||
object in the ns-3 space. This is a slightly different model from, for
|
||||
instance, EMANE, where location is associated with an interface, and the
|
||||
CORE GUI, where mobility is configured by right-clicking on a WiFi
|
||||
cloud.
|
||||
|
||||
The CORE GUI supports the ability to render the underlying ns-3 mobility
|
||||
model, if one is configured, on the CORE canvas. For example, the
|
||||
example program :file:`ns3wifirandomwalk.py` uses five nodes (by default) in
|
||||
a random walk mobility model. This can be executed by starting the
|
||||
core daemon from an ns-3 waf shell:
|
||||
|
||||
::
|
||||
|
||||
# sudo bash
|
||||
# cd /path/to/ns-3
|
||||
# ./waf shell
|
||||
# core-daemon
|
||||
|
||||
and in a separate window, starting the CORE GUI (not from a waf shell)
|
||||
and selecting the
|
||||
*Execute Python script...* option from the File menu, selecting the
|
||||
:file:`ns3wifirandomwalk.py` script.
|
||||
|
||||
The program invokes ns-3 mobility through the following statement:
|
||||
|
||||
::
|
||||
|
||||
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
|
||||
This can be replaced by a different mode of mobility, in which nodes
|
||||
are placed according to a constant mobility model, and a special
|
||||
API call to the CoreNs3Net object is made to use the CORE canvas
|
||||
positions.
|
||||
|
||||
::
|
||||
|
||||
- session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
+ session.setupconstantmobility()
|
||||
+ wifi.usecorepositions()
|
||||
|
||||
|
||||
In this mode, the user dragging around the nodes on the canvas will
|
||||
cause CORE to update the position of the underlying ns-3 nodes.
|
||||
|
||||
|
||||
.. _ns-3_Under_Development:
|
||||
|
||||
Under Development
|
||||
=================
|
||||
|
||||
.. index:: limitations with ns-3
|
||||
|
||||
Support for ns-3 is fairly new and still under active development.
|
||||
Improved support may be found in the development snapshots available on the web.
|
||||
|
||||
The following limitations will be addressed in future releases:
|
||||
|
||||
* GUI configuration and control - currently ns-3 networks can only be
|
||||
instantiated from a Python script or from the GUI hooks facility.
|
||||
|
||||
* Model support - currently the WiFi model is supported. The WiMAX and 3GPP LTE
|
||||
models have been experimented with, but are not currently working with the
|
||||
TapBridge device.
|
||||
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _Performance:
|
||||
|
||||
.. include:: constants.txt
|
||||
|
||||
***********
|
||||
Performance
|
||||
***********
|
||||
|
||||
.. index:: performance
|
||||
|
||||
.. index:: number of nodes
|
||||
|
||||
The top question about the performance of CORE is often
|
||||
*how many nodes can it handle?* The answer depends on several factors:
|
||||
|
||||
* Hardware - the number and speed of processors in the computer, the available
|
||||
processor cache, RAM memory, and front-side bus speed may greatly affect
|
||||
overall performance.
|
||||
* Operating system version - distribution of Linux and the specific kernel versions
|
||||
used will affect overall performance.
|
||||
* Active processes - all nodes share the same CPU resources, so if one or more
|
||||
nodes is performing a CPU-intensive task, overall performance will suffer.
|
||||
* Network traffic - the more packets that are sent around the virtual network
|
||||
increases the amount of CPU usage.
|
||||
* GUI usage - widgets that run periodically, mobility scenarios, and other GUI
|
||||
interactions generally consume CPU cycles that may be needed for emulation.
|
||||
|
||||
On a typical single-CPU Xeon 3.0GHz server machine with 2GB RAM running Linux,
|
||||
we have found it reasonable to run 30-75 nodes running
|
||||
OSPFv2 and OSPFv3 routing. On this hardware CORE can instantiate 100 or more
|
||||
nodes, but at that point it becomes critical as to what each of the nodes is
|
||||
doing.
|
||||
|
||||
.. index:: network performance
|
||||
|
||||
Because this software is primarily a network emulator, the more appropriate
|
||||
question is *how much network traffic can it handle?* On the same 3.0GHz server
|
||||
described above, running Linux, about 300,000 packets-per-second can be
|
||||
pushed through the system. The number of hops and the size of the packets is
|
||||
less important. The limiting factor is the number of times that the operating
|
||||
system needs to handle a packet. The 300,000 pps figure represents the number
|
||||
of times the system as a whole needed to deal with a packet. As more network
|
||||
hops are added, this increases the number of context switches and decreases the
|
||||
throughput seen on the full length of the network path.
|
||||
|
||||
.. NOTE::
|
||||
The right question to be asking is *"how much traffic?"*,
|
||||
not *"how many nodes?"*.
|
||||
|
||||
For a more detailed study of performance in CORE, refer to the following publications:
|
||||
|
||||
* J\. Ahrenholz, T. Goff, and B. Adamson, Integration of the CORE and EMANE Network Emulators, Proceedings of the IEEE Military Communications Conference 2011, November 2011.
|
||||
|
||||
* Ahrenholz, J., Comparison of CORE Network Emulation Platforms, Proceedings of the IEEE Military Communications Conference 2010, pp. 864-869, November 2010.
|
||||
|
||||
* J\. Ahrenholz, C. Danilov, T. Henderson, and J.H. Kim, CORE: A real-time network emulator, Proceedings of IEEE MILCOM Conference, 2008.
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
sphinx==1.6.3
|
||||
sphinx_rtd_theme==0.2.4
|
|
@ -1,124 +0,0 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _Python_Scripting:
|
||||
|
||||
****************
|
||||
Python Scripting
|
||||
****************
|
||||
|
||||
.. index:: Python scripting
|
||||
|
||||
CORE can be used via the :ref:`GUI <Using_the_CORE_GUI>` or Python scripting.
|
||||
Writing your own Python scripts offers a rich programming
|
||||
environment with complete control over all aspects of the emulation.
|
||||
This chapter provides a brief introduction to scripting. Most of the
|
||||
documentation is available from sample scripts,
|
||||
or online via interactive Python.
|
||||
|
||||
.. index:: sample Python scripts
|
||||
|
||||
The best starting point is the sample scripts that are
|
||||
included with CORE. If you have a CORE source tree, the example script files
|
||||
can be found under :file:`core/daemon/examples/netns/`. When CORE is installed
|
||||
from packages, the example script files will be in
|
||||
:file:`/usr/share/core/examples/netns/` (or the :file:`/usr/local/...` prefix
|
||||
when installed from source.) For the most part, the example scripts
|
||||
are self-documenting; see the comments contained within the Python code.
|
||||
|
||||
The scripts should be run with root privileges because they create new
|
||||
network namespaces. In general, a CORE Python script does not connect to the
|
||||
CORE daemon, :file:`core-daemon`; in fact, :file:`core-daemon`
|
||||
is just another Python script
|
||||
that uses the CORE Python modules and exchanges messages with the GUI.
|
||||
To connect the GUI to your scripts, see the included sample scripts that
|
||||
allow for GUI connections.
|
||||
|
||||
Here are the basic elements of a CORE Python script:
|
||||
::
|
||||
|
||||
from core.session import Session
|
||||
from core.netns import nodes
|
||||
|
||||
session = Session(1, persistent=True)
|
||||
node1 = session.add_object(cls=nodes.CoreNode, name="n1")
|
||||
node2 = session.add_object(cls=nodes.CoreNode, name="n2")
|
||||
hub1 = session.add_object(cls=nodes.HubNode, name="hub1")
|
||||
node1.newnetif(hub1, ["10.0.0.1/24"])
|
||||
node2.newnetif(hub1, ["10.0.0.2/24"])
|
||||
|
||||
node1.vnodeclient.icmd(["ping", "-c", "5", "10.0.0.2"])
|
||||
session.shutdown()
|
||||
|
||||
|
||||
The above script creates a CORE session having two nodes connected with a hub.
|
||||
The first node pings the second node with 5 ping packets; the result is
|
||||
displayed on screen.
|
||||
|
||||
A good way to learn about the CORE Python modules is via interactive Python.
|
||||
Scripts can be run using *python -i*. Cut and paste the simple script
|
||||
above and you will have two nodes connected by a hub, with one node running
|
||||
a test ping to the other.
|
||||
|
||||
The CORE Python modules are documented with comments in the code. From an
|
||||
interactive Python shell, you can retrieve online help about the various
|
||||
classes and methods; for example *help(nodes.CoreNode)* or
|
||||
*help(Session)*.
|
||||
|
||||
.. index:: daemon versus script
|
||||
.. index:: script versus daemon
|
||||
.. index:: script with GUI support
|
||||
.. index:: connecting GUI to script
|
||||
|
||||
.. NOTE::
|
||||
The CORE daemon :file:`core-daemon` manages a list of sessions and allows
|
||||
the GUI to connect and control sessions. Your Python script uses the
|
||||
same CORE modules but runs independently of the daemon. The daemon
|
||||
does not need to be running for your script to work.
|
||||
|
||||
The session created by a Python script may be viewed in the GUI if certain
|
||||
steps are followed. The GUI has a :ref:`File_Menu`, *Execute Python script...*
|
||||
option for running a script and automatically connecting to it. Once connected,
|
||||
normal GUI interaction is possible, such as moving and double-clicking nodes,
|
||||
activating Widgets, etc.
|
||||
|
||||
The script should have a line such as the following for running it from
|
||||
the GUI.
|
||||
::
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
||||
|
||||
Also, the script should add its session to the session list after creating it.
|
||||
A global ``server`` variable is exposed to the script pointing to the
|
||||
``CoreServer`` object in the :file:`core-daemon`.
|
||||
::
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.add_session(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
::
|
||||
|
||||
session = Session(persistent=True)
|
||||
add_to_server(session)
|
||||
|
||||
|
||||
Finally, nodes and networks need to have their coordinates set to something,
|
||||
otherwise they will be grouped at the coordinates ``<0, 0>``. First sketching
|
||||
the topology in the GUI and then using the *Export Python script* option may
|
||||
help here.
|
||||
::
|
||||
|
||||
switch.setposition(x=80,y=50)
|
||||
|
||||
|
||||
A fully-worked example script that you can launch from the GUI is available
|
||||
in the file :file:`switch.py` in the examples directory.
|
1
docs/Makefile.am
Normal file
1
docs/Makefile.am
Normal file
|
@ -0,0 +1 @@
|
|||
EXTRA_DIST = $(wildcard *)
|
39
docs/architecture.md
Normal file
39
docs/architecture.md
Normal file
|
@ -0,0 +1,39 @@
|
|||
# CORE Architecture
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## Main Components
|
||||
|
||||
* CORE Daemon
|
||||
* Manages emulation sessions
|
||||
* Builds the emulated networks using kernel virtualization for nodes and some form of bridging and packet manipulation for virtual networks
|
||||
* Nodes and networks come together via interfaces installed on nodes
|
||||
* Controlled via the CORE GUI
|
||||
* Written in python and can be scripted, given direct control of scenarios
|
||||
* CORE GUI
|
||||
* GUI and daemon communicate using a custom, asynchronous, sockets-based API, known as the CORE API
|
||||
* Drag and drop creation for nodes and network interfaces
|
||||
* Can launch terminals for emulated nodes in running scenarios
|
||||
* Can save/open scenario files to recreate previous sessions
|
||||
* TCL/TK program
|
||||
|
||||

|
||||
|
||||
## How Does it Work?
|
||||
|
||||
A CORE node is a lightweight virtual machine. The CORE framework runs on Linux. CORE uses Linux network namespace virtualization to build virtual nodes, and ties them together with virtual networks using Linux Ethernet bridging.
|
||||
|
||||
### Linux
|
||||
|
||||
Linux network namespaces (also known as netns, LXC, or [Linux containers](http://lxc.sourceforge.net/)) is the primary virtualization technique used by CORE. LXC has been part of the mainline Linux kernel since 2.6.24. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE.
|
||||
|
||||
CORE combines these namespaces with Linux Ethernet bridging to form networks. Link characteristics are applied using Linux Netem queuing disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless networks are emulated by controlling which interfaces can send and receive with ebtables rules.
|
||||
|
||||
## Prior Work
|
||||
|
||||
The Tcl/Tk CORE GUI was originally derived from the open source [IMUNES](http://imunes.net) project from the University of Zagreb as a custom project within Boeing Research and Technology's Network Technology research group in 2004. Since then they have developed the CORE framework to use Linux virtualization, have developed a Python framework, and made numerous user- and kernel-space developments, such as support for wireless networks, IPsec, the ability to distribute emulations, simulation integration, and more. The IMUNES project also consists of userspace and kernel components.
|
||||
|
||||
## Open Source Project and Resources
|
||||
|
||||
CORE has been released by Boeing to the open source community under the BSD license. If you find CORE useful for your work, please contribute back to the project. Contributions can be as simple as reporting a bug, dropping a line of encouragement or technical suggestions to the mailing lists, or can also include submitting patches or maintaining aspects of the tool. For contributing to CORE, please visit [CORE GitHub](https://github.com/coreemu/core).
|
88
docs/ctrlnet.md
Normal file
88
docs/ctrlnet.md
Normal file
|
@ -0,0 +1,88 @@
|
|||
# CORE Control Network
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## Overview
|
||||
|
||||
The CORE control network allows the virtual nodes to communicate with their host environment. There are two types: the primary control network and auxiliary control networks. The primary control network is used mainly for communicating with the virtual nodes from host machines and for master-slave communications in a multi-server distributed environment. Auxiliary control networks have been introduced to for routing namespace hosted emulation software traffic to the test network.
|
||||
|
||||
## Activating the Primary Control Network
|
||||
|
||||
Under the *Session Menu*, the *Options...* dialog has an option to set a *control network prefix*.
|
||||
|
||||
This can be set to a network prefix such as *172.16.0.0/24*. A bridge will be created on the host machine having the last address in the prefix range (e.g. *172.16.0.254*), and each node will have an extra *ctrl0* control interface configured with an address corresponding to its node number (e.g. *172.16.0.3* for *n3*.)
|
||||
|
||||
A default for the primary control network may also be specified by setting the *controlnet* line in the */etc/core/core.conf* configuration file which new sessions will use by default. To simultaneously run multiple sessions with control networks, the session option should be used instead of the *core.conf* default.
|
||||
|
||||
**NOTE: If you have a large scenario with more than 253 nodes, use a control network prefix that allows more than the suggested */24*, such as */23* or greater.**
|
||||
|
||||
**IMPORTANT: Running a session with a control network can fail if a previous session has set up a control network and the its bridge is still up. Close the previous session first or wait for it to complete. If unable to, the *core-daemon* may need to be restarted and the lingering bridge(s) removed manually.**
|
||||
|
||||
```shell
|
||||
# Restart the CORE Daemon
|
||||
sudo /etc/init.d core-daemon restart
|
||||
|
||||
# Remove lingering control network bridges
|
||||
ctrlbridges=`brctl show | grep b.ctrl | awk '{print $1}'`
|
||||
for cb in $ctrlbridges; do
|
||||
sudo ifconfig $cb down
|
||||
sudo brctl delbr $cb
|
||||
done
|
||||
```
|
||||
|
||||
**TIP: If adjustments to the primary control network configuration made in */etc/core/core.conf* do not seem to take affect, check if there is anything set in the *Session Menu*, the *Options...* dialog. They may need to be cleared. These per session settings override the defaults in */etc/core/core.conf*.**
|
||||
|
||||
## Control Network in Distributed Sessions
|
||||
|
||||
When the primary control network is activated for a distributed session, a control network bridge will be created on each of the slave servers, with GRE tunnels back to the master server's bridge. The slave control bridges are not assigned an address. From the host, any of the nodes (local or remote) can be accessed, just like the single server case.
|
||||
|
||||
In some situations, remote emulated nodes need to communicate with the host on which they are running and not the master server. Multiple control network prefixes can be specified in the either the session option or */etc/core/core.conf*, separated by spaces and beginning with the master server. Each entry has the form *"server:prefix"*. For example, if the servers *core1*,*core2*, and *core3* are assigned with nodes in the scenario and using :file:`/etc/core/core.conf` instead of the session option:
|
||||
|
||||
```shell
|
||||
controlnet=core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.1.0/24
|
||||
```
|
||||
|
||||
Then, the control network bridges will be assigned as follows:
|
||||
|
||||
* core1 = 172.16.1.254 (assuming it is the master server),
|
||||
* core2 = 172.16.2.254
|
||||
* core3 = 172.16.3.254
|
||||
|
||||
Tunnels back to the master server will still be built, but it is up to the user to add appropriate routes if networking between control network prefixes is desired. The control network script may help with this.
|
||||
|
||||
## Control Network Script
|
||||
|
||||
A control network script may be specified using the *controlnet_updown_script* option in the */etc/core/core.conf* file. This script will be run after the bridge has been built (and address assigned) with the first argument being the name of the bridge, and the second argument being the keyword *"startup"*. The script will again be invoked prior to bridge removal with the second argument being the keyword *"shutdown"*.
|
||||
|
||||
## Auxiliary Control Networks
|
||||
|
||||
Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces. Since it is advisable to separate the OTA traffic from other traffic, we will need more than single channel leading out from the namespace. Up to three auxiliary control networks may be defined. Multiple control networks are set up in */etc/core/core.conf* file. Lines *controlnet1*, *controlnet2* and *controlnet3* define the auxiliary networks.
|
||||
|
||||
For example, having the following */etc/core/core.conf*:
|
||||
|
||||
```shell
|
||||
controlnet = core1:172.17.1.0/24 core2:172.17.2.0/24 core3:172.17.3.0/24
|
||||
controlnet1 = core1:172.18.1.0/24 core2:172.18.2.0/24 core3:172.18.3.0/24
|
||||
controlnet2 = core1:172.19.1.0/24 core2:172.19.2.0/24 core3:172.19.3.0/24
|
||||
```
|
||||
|
||||
This will activate the primary and two auxiliary control networks and add interfaces *ctrl0*, *ctrl1*, *ctrl2* to each node. One use case would be to assign *ctrl1* to the OTA manager device and *ctrl2* to the Event Service device in the EMANE Options dialog box and leave *ctrl0* for CORE control traffic.
|
||||
|
||||
**NOTE: *controlnet0* may be used in place of *controlnet* to configure the primary control network.**
|
||||
|
||||
Unlike the primary control network, the auxiliary control networks will not employ tunneling since their primary purpose is for efficiently transporting multicast EMANE OTA and event traffic. Note that there is no per-session configuration for auxiliary control networks.
|
||||
|
||||
To extend the auxiliary control networks across a distributed test environment, host network interfaces need to be added to them. The following lines in */etc/core/core.conf* will add host devices *eth1*, *eth2* and *eth3* to *controlnet1*, *controlnet2*, *controlnet3*:
|
||||
|
||||
```shell
|
||||
controlnetif1 = eth1
|
||||
controlnetif2 = eth2
|
||||
controlnetif3 = eth3
|
||||
```
|
||||
|
||||
**NOTE: There is no need to assign an interface to the primary control network because tunnels are formed between the master and the slaves using IP addresses that are provided in *servers.conf*.**
|
||||
|
||||
Shown below is a representative diagram of the configuration above.
|
||||
|
||||

|
113
docs/devguide.md
Normal file
113
docs/devguide.md
Normal file
|
@ -0,0 +1,113 @@
|
|||
# CORE Developer's Guide
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## Source Code Guide
|
||||
|
||||
The CORE source consists of several different programming languages for historical reasons. Current development focuses on the Python modules and daemon. Here is a brief description of the source directories.
|
||||
|
||||
These are being actively developed as of CORE 5.1:
|
||||
|
||||
* *gui* - Tcl/Tk GUI. This uses Tcl/Tk because of its roots with the IMUNES
|
||||
project.
|
||||
* *daemon* - Python modules are found in the :file:`daemon/core` directory, the
|
||||
daemon under :file:`daemon/scripts/core-daemon`
|
||||
* *netns* - Python extension modules for Linux Network Namespace support are in :file:`netns`.
|
||||
* *doc* - Documentation for the manual lives here in reStructuredText format.
|
||||
|
||||
Not actively being developed:
|
||||
|
||||
* *ns3* - Python ns3 script support for running CORE.
|
||||
|
||||
## The CORE API
|
||||
|
||||
The CORE API is used between different components of CORE for communication. The GUI communicates with the CORE daemon using the API. One emulation server communicates with another using the API. The API also allows other systems to interact with the CORE emulation. The API allows another system to add, remove, or modify nodes and links, and enables executing commands on the emulated systems. Wireless link parameters are updated on-the-fly based on node positions.
|
||||
|
||||
CORE listens on a local TCP port for API messages. The other system could be software running locally or another machine accessible across the network.
|
||||
|
||||
The CORE API is currently specified in a separate document, available from the CORE website.
|
||||
|
||||
## Linux network namespace Commands
|
||||
|
||||
Linux network namespace containers are often managed using the *Linux Container Tools* or *lxc-tools* package. The lxc-tools website is available here http://lxc.sourceforge.net/ for more information. CORE does not use these management utilities, but includes its own set of tools for instantiating and configuring network namespace containers. This section describes these tools.
|
||||
|
||||
### vnoded command
|
||||
|
||||
The *vnoded* daemon is the program used to create a new namespace, and listen on a control channel for commands that may instantiate other processes. This daemon runs as PID 1 in the container. It is launched automatically by the CORE daemon. The control channel is a UNIX domain socket usually named */tmp/pycore.23098/n3*, for node 3 running on CORE session 23098, for example. Root privileges are required for creating a new namespace.
|
||||
|
||||
### vcmd command
|
||||
|
||||
The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network namespace, for running commands in the namespace. The CORE daemon uses the same channel for setting up a node and running processes within it. This program has two required arguments, the control channel name, and the command line to be run within the namespace. This command does not need to run with root privileges.
|
||||
|
||||
When you double-click on a node in a running emulation, CORE will open a shell window for that node using a command such as:
|
||||
|
||||
```shell
|
||||
gnome-terminal -e vcmd -c /tmp/pycore.50160/n1 -- bash
|
||||
```
|
||||
|
||||
Similarly, the IPv4 routes Observer Widget will run a command to display the routing table using a command such as:
|
||||
|
||||
```shell
|
||||
vcmd -c /tmp/pycore.50160/n1 -- /sbin/ip -4 ro
|
||||
```
|
||||
|
||||
### core-cleanup script
|
||||
|
||||
A script named *core-cleanup* is provided to clean up any running CORE emulations. It will attempt to kill any remaining vnoded processes, kill any EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and remove any bridges or *ebtables* rules. With a *-d* option, it will also kill any running CORE daemon.
|
||||
|
||||
### netns command
|
||||
|
||||
The *netns* command is not used by CORE directly. This utility can be used to run a command in a new network namespace for testing purposes. It does not open a control channel for receiving further commands.
|
||||
|
||||
### Other Useful Commands
|
||||
|
||||
Here are some other Linux commands that are useful for managing the Linux network namespace emulation.
|
||||
|
||||
```shell
|
||||
# view the Linux bridging setup
|
||||
brctl show
|
||||
# view the netem rules used for applying link effects
|
||||
tc qdisc show
|
||||
# view the rules that make the wireless LAN work
|
||||
ebtables -L
|
||||
```
|
||||
|
||||
### Example Command Usage
|
||||
|
||||
Below is a transcript of creating two emulated nodes and connecting them together with a wired link:
|
||||
|
||||
```shell
|
||||
# create node 1 namespace container
|
||||
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 1
|
||||
ip link add name n1.0.1 type veth peer name n1.0
|
||||
ip link set n1.0 netns `cat /tmp/n1.pid`
|
||||
vcmd -c /tmp/n1.ctl -- ip link set lo up
|
||||
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0 up
|
||||
vcmd -c /tmp/n1.ctl -- ip addr add 10.0.0.1/24 dev eth0
|
||||
|
||||
# create node 2 namespace container
|
||||
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 2
|
||||
ip link add name n2.0.1 type veth peer name n2.0
|
||||
ip link set n2.0 netns `cat /tmp/n2.pid`
|
||||
vcmd -c /tmp/n2.ctl -- ip link set lo up
|
||||
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0 up
|
||||
vcmd -c /tmp/n2.ctl -- ip addr add 10.0.0.2/24 eth0
|
||||
|
||||
# bridge together nodes 1 and 2 using the other end of each veth pair
|
||||
brctl addbr b.1.1
|
||||
brctl setfd b.1.1 0
|
||||
brctl addif b.1.1 n1.0.1
|
||||
brctl addif b.1.1 n2.0.1
|
||||
ip link set n1.0.1 up
|
||||
ip link set n2.0.1 up
|
||||
ip link set b.1.1 up
|
||||
|
||||
# display connectivity and ping from node 1 to node 2
|
||||
brctl show
|
||||
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
|
||||
```
|
||||
|
||||
The above example script can be found as *twonodes.sh* in the *examples/netns* directory. Use *core-cleanup* to clean up after the script.
|
143
docs/emane.md
Normal file
143
docs/emane.md
Normal file
|
@ -0,0 +1,143 @@
|
|||
# CORE/EMANE
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## What is EMANE?
|
||||
|
||||
The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous network emulation using a pluggable MAC and PHY layer architecture. The EMANE framework provides an implementation architecture for modeling different radio interface types in the form of *Network Emulation Modules* (NEMs) and incorporating these modules into a real-time emulation running in a distributed environment.
|
||||
|
||||
EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent Link LLC, who maintain these websites:
|
||||
|
||||
* http://www.nrl.navy.mil/itd/ncs/products/emane
|
||||
* http://www.adjacentlink.com/
|
||||
|
||||
Instead of building Linux Ethernet bridging networks with CORE, higher-fidelity wireless networks can be emulated using EMANE bound to virtual devices. CORE emulates layers 3 and above (network, session, application) with its virtual network stacks and process space for protocols and applications, while EMANE emulates layers 1 and 2 (physical and data link) using its pluggable PHY and MAC models.
|
||||
|
||||
The interface between CORE and EMANE is a TAP device. CORE builds the virtual node using Linux network namespaces, installs the TAP device into the namespace and instantiates one EMANE process in the namespace. The EMANE process binds a user space socket to the TAP device for sending and receiving data from CORE.
|
||||
|
||||
An EMANE instance sends and receives OTA traffic to and from other EMANE instances via a control port (e.g. *ctrl0*, *ctrl1*). It also sends and receives Events to and from the Event Service using the same or a different control port. EMANE models are configured through CORE's WLAN configuration dialog. A corresponding EmaneModel Python class is sub-classed for each supported EMANE model, to provide configuration items and their mapping to XML files. This way new models can be easily supported. When CORE starts the emulation, it generates the appropriate XML files that specify the EMANE NEM configuration, and launches the EMANE daemons.
|
||||
|
||||
Some EMANE models support location information to determine when packets should be dropped. EMANE has an event system where location events are broadcast to all NEMs. CORE can generate these location events when nodes are moved on the canvas. The canvas size and scale dialog has controls for mapping the X,Y coordinate system to a latitude, longitude geographic system that EMANE uses. When specified in the *core.conf* configuration file, CORE can also subscribe to EMANE location events and move the nodes on the canvas as they are moved in the EMANE emulation. This would occur when an Emulation Script Generator, for example, is running a mobility script.
|
||||
|
||||
## EMANE Configuration
|
||||
|
||||
The CORE configuration file */etc/core/core.conf* has options specific to EMANE. An example emane section from the *core.conf* file is shown below:
|
||||
|
||||
```shell
|
||||
# EMANE configuration
|
||||
emane_platform_port = 8101
|
||||
emane_transform_port = 8201
|
||||
emane_event_monitor = False
|
||||
#emane_models_dir = /home/username/.core/myemane
|
||||
# EMANE log level range [0,4] default: 2
|
||||
emane_log_level = 2
|
||||
emane_realtime = True
|
||||
```
|
||||
|
||||
EMANE can be installed from deb or RPM packages or from source. See the [EMANE GitHub](https://github.com/adjacentlink/emane) for full details.
|
||||
|
||||
Here are quick instructions for installing all EMANE packages:
|
||||
|
||||
```shell
|
||||
# install dependencies
|
||||
sudo apt-get install libssl-dev libxml-libxml-perl libxml-simple-perl
|
||||
wget https://adjacentlink.com/downloads/emane/emane-1.2.1-release-1.ubuntu-16_04.amd64.tar.gz
|
||||
tar xzf emane-1.2.1-release-1.ubuntu-16_04.amd64.tar.gz
|
||||
sudo dpkg -i emane-1.2.1-release-1/deb/ubuntu-16_04/amd64/*.deb
|
||||
```
|
||||
|
||||
If you have an EMANE event generator (e.g. mobility or pathloss scripts) and want to have CORE subscribe to EMANE location events, set the following line in the */etc/core/core.conf* configuration file:
|
||||
|
||||
```shell
|
||||
emane_event_monitor = True
|
||||
```
|
||||
|
||||
Do not set the above option to True if you want to manually drag nodes around on the canvas to update their location in EMANE.
|
||||
|
||||
Another common issue is if installing EMANE from source, the default configure prefix will place the DTD files in */usr/local/share/emane/dtd* while CORE expects them in */usr/share/emane/dtd*.
|
||||
|
||||
A symbolic link will fix this:
|
||||
|
||||
```shell
|
||||
sudo ln -s /usr/local/share/emane /usr/share/emane
|
||||
```
|
||||
|
||||
## Custom EMANE Models
|
||||
|
||||
CORE supports custom developed EMANE models by way of dynamically loading user created python files that represent the model. Custom EMANE models should be placed within the path defined by **emane_models_dir** in the CORE configuration file. This path cannot end in **/emane**.
|
||||
|
||||
Here is an example model with documentation describing functionality:
|
||||
[Example Model](examplemodel.html)
|
||||
|
||||
|
||||
|
||||
## Single PC with EMANE
|
||||
|
||||
This section describes running CORE and EMANE on a single machine. This is the default mode of operation when building an EMANE network with CORE. The OTA manager and Event service interface are set to use *ctrl0* and the virtual nodes use the primary control channel for communicating with one another. The primary control channel is automatically activated when a scenario involves EMANE. Using the primary control channel prevents your emulation session from sending multicast traffic on your local network and interfering with other EMANE users.
|
||||
|
||||
EMANE is configured through a WLAN node, because it is all about emulating wireless radio networks. Once a node is linked to a WLAN cloud configured with an EMANE model, the radio interface on that node may also be configured separately (apart from the cloud.)
|
||||
|
||||
Double-click on a WLAN node to invoke the WLAN configuration dialog. Click the *EMANE* tab; when EMANE has been properly installed, EMANE wireless modules should be listed in the *EMANE Models* list. (You may need to restart the CORE daemon if it was running prior to installing the EMANE Python bindings.) Click on a model name to enable it.
|
||||
|
||||
When an EMANE model is selected in the *EMANE Models* list, clicking on the *model options* button causes the GUI to query the CORE daemon for configuration items. Each model will have different parameters, refer to the EMANE documentation for an explanation of each item. The defaults values are presented in the dialog. Clicking *Apply* and *Apply* again will store the EMANE model selections.
|
||||
|
||||
The *EMANE options* button allows specifying some global parameters for EMANE, some of which are necessary for distributed operation.
|
||||
|
||||
The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports geographic location information for determining pathloss between nodes. A default latitude and longitude location is provided by CORE and this location-based pathloss is enabled by default; this is the *pathloss mode* setting for the Universal PHY. Moving a node on the canvas while the emulation is running generates location events for EMANE. To view or change the geographic location or scale of the canvas use the *Canvas Size and Scale* dialog available from the *Canvas* menu.
|
||||
|
||||
Note that conversion between geographic and Cartesian coordinate systems is done using UTM (Universal Transverse Mercator) projection, where different zones of 6 degree longitude bands are defined. The location events generated by CORE may become inaccurate near the zone boundaries for very large scenarios that span multiple UTM zones. It is recommended that EMANE location scripts be used to achieve geo-location accuracy in this situation.
|
||||
|
||||
Clicking the green *Start* button launches the emulation and causes TAP devices to be created in the virtual nodes that are linked to the EMANE WLAN. These devices appear with interface names such as eth0, eth1, etc. The EMANE processes should now be running in each namespace. For a four node scenario:
|
||||
|
||||
```shell
|
||||
ps -aef | grep emane
|
||||
root 1063 969 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane4.log /tmp/pycore.59992/platform4.xml
|
||||
root 1117 959 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane2.log /tmp/pycore.59992/platform2.xml
|
||||
root 1179 942 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane1.log /tmp/pycore.59992/platform1.xml
|
||||
root 1239 979 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane5.log /tmp/pycore.59992/platform5.xml
|
||||
```
|
||||
|
||||
The example above shows the EMANE processes started by CORE. To view the configuration generated by CORE, look in the */tmp/pycore.nnnnn/* session directory for a *platform.xml* file and other XML files. One easy way to view this information is by double-clicking one of the virtual nodes, and typing *cd ..* in the shell to go up to the session directory.
|
||||
|
||||

|
||||
|
||||
## Distributed EMANE
|
||||
|
||||
Running CORE and EMANE distributed among two or more emulation servers is similar to running on a single machine. There are a few key configuration items that need to be set in order to be successful, and those are outlined here.
|
||||
|
||||
It is a good idea to maintain separate networks for data (OTA) and control. The control network may be a shared laboratory network, for example, and you do not want multicast traffic on the data network to interfere with other EMANE users. Furthermore, control traffic could interfere with the OTA latency and thoughput and might affect emulation fidelity. The examples described here will use *eth0* as a control interface and *eth1* as a data interface, although using separate interfaces is not strictly required. Note that these interface names refer to interfaces present on the host machine, not virtual interfaces within a node.
|
||||
|
||||
**IMPORTANT: If an auxiliary control network is used, an interface on the host has to be assigned to that network.**
|
||||
|
||||
Each machine that will act as an emulation server needs to have CORE and EMANE installed.
|
||||
|
||||
The IP addresses of the available servers are configured from the CORE emulation servers dialog box (choose *Session* then *Emulation servers...*). This list of servers is stored in a *~/.core/servers.conf* file. The dialog shows available servers, some or all of which may be assigned to nodes on the canvas.
|
||||
|
||||
Nodes need to be assigned to emulation servers. Select several nodes, right-click them, and choose *Assign to* and the name of the desired server. When a node is not assigned to any emulation server, it will be emulated locally. The local machine that the GUI connects with is considered the "master" machine, which in turn connects to the other emulation server "slaves". Public key SSH should be configured from the master to the slaves.
|
||||
|
||||
Under the *EMANE* tab of the EMANE WLAN, click on the *EMANE options* button. This brings up the emane configuration dialog. The *enable OTA Manager channel* should be set to *on*. The *OTA Manager device* and *Event Service device* should be set to a control network device. For example, if you have a primary and auxiliary control network (i.e. controlnet and controlnet1), and you want the OTA traffic to have its dedicated network, set the OTA Manager device to *ctrl1* and the Event Service device to *ctrl0*. The EMANE models can be configured. Click *Apply* to save these settings.
|
||||
|
||||

|
||||
|
||||
**HINT:**
|
||||
Here is a quick checklist for distributed emulation with EMANE.
|
||||
|
||||
1. Follow the steps outlined for normal CORE.
|
||||
2. Under the *EMANE* tab of the EMANE WLAN, click on *EMANE options*.
|
||||
3. Turn on the *OTA Manager channel* and set the *OTA Manager device*.
|
||||
Also set the *Event Service device*.
|
||||
4. Select groups of nodes, right-click them, and assign them to servers
|
||||
using the *Assign to* menu.
|
||||
5. Synchronize your machine's clocks prior to starting the emulation,
|
||||
using *ntp* or *ptp*. Some EMANE models are sensitive to timing.
|
||||
6. Press the *Start* button to launch the distributed emulation.
|
||||
|
||||
|
||||
Now when the Start button is used to instantiate the emulation, the local CORE Python daemon will connect to other emulation servers that have been assigned to nodes. Each server will have its own session directory where the *platform.xml* file and other EMANE XML files are generated. The NEM IDs are automatically coordinated across servers so there is no overlap. Each server also gets its own Platform ID.
|
||||
|
||||
An Ethernet device is used for disseminating multicast EMANE events, as specified in the *configure emane* dialog. EMANE's Event Service can be run with mobility or pathloss scripts as described in :ref:`Single_PC_with_EMANE`. If CORE is not subscribed to location events, it will generate them as nodes are moved on the canvas.
|
||||
|
||||
Double-clicking on a node during runtime will cause the GUI to attempt to SSH to the emulation server for that node and run an interactive shell. The public key SSH configuration should be tested with all emulation servers prior to starting the emulation.
|
||||
|
||||

|
239
docs/examplemodel.html
Normal file
239
docs/examplemodel.html
Normal file
|
@ -0,0 +1,239 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=utf-8">
|
||||
<title>examplemodel.py</title>
|
||||
<link rel="stylesheet" href="pycco.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id='container'>
|
||||
<div id="background"></div>
|
||||
<div class='section'>
|
||||
<div class='docs'><h1>examplemodel.py</h1></div>
|
||||
</div>
|
||||
<div class='clearall'>
|
||||
<div class='section' id='section-0'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-0'>#</a>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">core.emane</span> <span class="kn">import</span> <span class="n">emanemanifest</span>
|
||||
<span class="kn">from</span> <span class="nn">core.emane</span> <span class="kn">import</span> <span class="n">emanemodel</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-1'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-1'>#</a>
|
||||
</div>
|
||||
<h1>Custom EMANE Model</h1>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span class="k">class</span> <span class="nc">ExampleModel</span><span class="p">(</span><span class="n">emanemodel</span><span class="o">.</span><span class="n">EmaneModel</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-2'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-2'>#</a>
|
||||
</div>
|
||||
<h2>MAC Definition</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-3'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-3'>#</a>
|
||||
</div>
|
||||
<p>Defines the emane model name that will show up in the GUI.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">name</span> <span class="o">=</span> <span class="s2">"emane_example"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-4'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-4'>#</a>
|
||||
</div>
|
||||
<p>Defines that mac library that the model will reference.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_library</span> <span class="o">=</span> <span class="s2">"rfpipemaclayer"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-5'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-5'>#</a>
|
||||
</div>
|
||||
<p>Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed
|
||||
within the GUI.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_xml</span> <span class="o">=</span> <span class="s2">"/usr/share/emane/manifest/rfpipemaclayer.xml"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-6'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-6'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override options that are maintained within the manifest file above.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_defaults</span> <span class="o">=</span> <span class="p">{</span>
|
||||
<span class="s2">"pcrcurveuri"</span><span class="p">:</span> <span class="s2">"/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml"</span><span class="p">,</span>
|
||||
<span class="p">}</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-7'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-7'>#</a>
|
||||
</div>
|
||||
<p>Parses the manifest file and converts configurations into core supported formats.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">mac_config</span> <span class="o">=</span> <span class="n">emanemanifest</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">mac_xml</span><span class="p">,</span> <span class="n">mac_defaults</span><span class="p">)</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-8'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-8'>#</a>
|
||||
</div>
|
||||
<h2>PHY Definition</h2>
|
||||
<p><strong>NOTE: phy configuration will default to the universal model as seen below and the below section does not
|
||||
have to be included.</strong></p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-9'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-9'>#</a>
|
||||
</div>
|
||||
<p>Defines that phy library that the model will reference, used if you need to provide a custom phy.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_library</span> <span class="o">=</span> <span class="bp">None</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-10'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-10'>#</a>
|
||||
</div>
|
||||
<p>Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed
|
||||
within the GUI.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_xml</span> <span class="o">=</span> <span class="s2">"/usr/share/emane/manifest/emanephy.xml"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-11'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-11'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override options that are maintained within the manifest file above or for the default universal
|
||||
model.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_defaults</span> <span class="o">=</span> <span class="p">{</span>
|
||||
<span class="s2">"subid"</span><span class="p">:</span> <span class="s2">"1"</span><span class="p">,</span>
|
||||
<span class="s2">"propagationmodel"</span><span class="p">:</span> <span class="s2">"2ray"</span><span class="p">,</span>
|
||||
<span class="s2">"noisemode"</span><span class="p">:</span> <span class="s2">"none"</span>
|
||||
<span class="p">}</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-12'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-12'>#</a>
|
||||
</div>
|
||||
<p>Parses the manifest file and converts configurations into core supported formats.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">phy_config</span> <span class="o">=</span> <span class="n">emanemanifest</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">phy_xml</span><span class="p">,</span> <span class="n">phy_defaults</span><span class="p">)</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-13'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-13'>#</a>
|
||||
</div>
|
||||
<h2>Custom override options</h2>
|
||||
<p><strong>NOTE: these options default to what's seen below and do not have to be included.</strong></p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-14'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-14'>#</a>
|
||||
</div>
|
||||
<p>Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display
|
||||
within the gui.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">config_ignore</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-15'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-15'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override how options are displayed with the GUI, using the GUI format of
|
||||
"name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed
|
||||
numbers after ":" for including values in each tab.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">config_groups_override</span> <span class="o">=</span> <span class="bp">None</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-16'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-16'>#</a>
|
||||
</div>
|
||||
<p>Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in
|
||||
that order.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">config_matrix_override</span> <span class="o">=</span> <span class="bp">None</span>
|
||||
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
</div>
|
||||
</body>
|
330
docs/exampleservice.html
Normal file
330
docs/exampleservice.html
Normal file
|
@ -0,0 +1,330 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html;charset=utf-8">
|
||||
<title>sample.py</title>
|
||||
<link rel="stylesheet" href="pycco.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id='container'>
|
||||
<div id="background"></div>
|
||||
<div class='section'>
|
||||
<div class='docs'><h1>sample.py</h1></div>
|
||||
</div>
|
||||
<div class='clearall'>
|
||||
<div class='section' id='section-0'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-0'>#</a>
|
||||
</div>
|
||||
<p>Sample user-defined service.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">core.service</span> <span class="kn">import</span> <span class="n">CoreService</span>
|
||||
<span class="kn">from</span> <span class="nn">core.service</span> <span class="kn">import</span> <span class="n">ServiceMode</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-1'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-1'>#</a>
|
||||
</div>
|
||||
<h1>Custom CORE Service</h1>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span class="k">class</span> <span class="nc">MyService</span><span class="p">(</span><span class="n">CoreService</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-2'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-2'>#</a>
|
||||
</div>
|
||||
<h2>Service Attributes</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-3'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-3'>#</a>
|
||||
</div>
|
||||
<p>Name used as a unique ID for this service and is required, no spaces.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">name</span> <span class="o">=</span> <span class="s2">"MyService"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-4'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-4'>#</a>
|
||||
</div>
|
||||
<p>Allows you to group services within the GUI under a common name.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">group</span> <span class="o">=</span> <span class="s2">"Utility"</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-5'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-5'>#</a>
|
||||
</div>
|
||||
<p>Executables this service depends on to function, if executable is not on the path, service will not be loaded.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">executables</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-6'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-6'>#</a>
|
||||
</div>
|
||||
<p>Services that this service depends on for startup, tuple of service names.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">dependencies</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-7'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-7'>#</a>
|
||||
</div>
|
||||
<p>Directories that this service will create within a node.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">dirs</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-8'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-8'>#</a>
|
||||
</div>
|
||||
<p>Files that this service will generate, without a full path this file goes in the node's directory.
|
||||
e.g. /tmp/pycore.12345/n1.conf/myfile</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">configs</span> <span class="o">=</span> <span class="p">(</span><span class="s2">"sh myservice1.sh"</span><span class="p">,</span> <span class="s2">"sh myservice2.sh"</span><span class="p">)</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-9'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-9'>#</a>
|
||||
</div>
|
||||
<p>Commands used to start this service, any non-zero exit code will cause a failure.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">startup</span> <span class="o">=</span> <span class="p">(</span><span class="s2">"sh </span><span class="si">%s</span><span class="s2">"</span> <span class="o">%</span> <span class="n">configs</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="s2">"sh </span><span class="si">%s</span><span class="s2">"</span> <span class="o">%</span> <span class="n">configs</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-10'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-10'>#</a>
|
||||
</div>
|
||||
<p>Commands used to validate that a service was started, any non-zero exit code will cause a failure.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validate</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-11'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-11'>#</a>
|
||||
</div>
|
||||
<p>Validation mode, used to determine startup success.
|
||||
<em> NON_BLOCKING - runs startup commands, and validates success with validation commands
|
||||
</em> BLOCKING - runs startup commands, and validates success with the startup commands themselves
|
||||
* TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validation_mode</span> <span class="o">=</span> <span class="n">ServiceMode</span><span class="o">.</span><span class="n">NON_BLOCKING</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-12'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-12'>#</a>
|
||||
</div>
|
||||
<p>Time for a service to wait before running validation commands or determining success in TIMER mode.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">validation_timer</span> <span class="o">=</span> <span class="mi">0</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-13'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-13'>#</a>
|
||||
</div>
|
||||
<p>Shutdown commands to stop this service.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">shutdown</span> <span class="o">=</span> <span class="p">()</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-14'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-14'>#</a>
|
||||
</div>
|
||||
<h2>On Load</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">on_load</span><span class="p">(</span><span class="bp">cls</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-15'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-15'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
|
||||
dynamic settings for the environment.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">pass</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-16'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-16'>#</a>
|
||||
</div>
|
||||
<h2>Get Configs</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">get_configs</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-17'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-17'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to dynamically generate the config files from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-18'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-18'>#</a>
|
||||
</div>
|
||||
<h2>Generate Config</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">generate_config</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">,</span> <span class="n">filename</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-19'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-19'>#</a>
|
||||
</div>
|
||||
<p>Returns a string representation for a file, given the node the service is starting on the config filename
|
||||
that this information will be used for. This must be defined, if "configs" are defined.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="n">cfg</span> <span class="o">=</span> <span class="s2">"#!/bin/sh</span><span class="se">\n</span><span class="s2">"</span>
|
||||
|
||||
<span class="k">if</span> <span class="n">filename</span> <span class="o">==</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
|
||||
<span class="n">cfg</span> <span class="o">+=</span> <span class="s2">"# auto-generated by MyService (sample.py)</span><span class="se">\n</span><span class="s2">"</span>
|
||||
<span class="k">for</span> <span class="n">ifc</span> <span class="ow">in</span> <span class="n">node</span><span class="o">.</span><span class="n">netifs</span><span class="p">():</span>
|
||||
<span class="n">cfg</span> <span class="o">+=</span> <span class="s1">'echo "Node </span><span class="si">%s</span><span class="s1"> has interface </span><span class="si">%s</span><span class="s1">"</span><span class="se">\n</span><span class="s1">'</span> <span class="o">%</span> <span class="p">(</span><span class="n">node</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">ifc</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
|
||||
<span class="k">elif</span> <span class="n">filename</span> <span class="o">==</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span><span class="p">[</span><span class="mi">1</span><span class="p">]:</span>
|
||||
<span class="n">cfg</span> <span class="o">+=</span> <span class="s2">"echo hello"</span>
|
||||
|
||||
<span class="k">return</span> <span class="n">cfg</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-20'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-20'>#</a>
|
||||
</div>
|
||||
<h2>Get Startup</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">get_startup</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-21'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-21'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to dynamically generate the startup commands from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">startup</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-22'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-22'>#</a>
|
||||
</div>
|
||||
<h2>Get Validate</h2>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="nd">@classmethod</span>
|
||||
<span class="k">def</span> <span class="nf">get_validate</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
<div class='section' id='section-23'>
|
||||
<div class='docs'>
|
||||
<div class='octowrap'>
|
||||
<a class='octothorpe' href='#section-23'>#</a>
|
||||
</div>
|
||||
<p>Provides a way to dynamically generate the validate commands from the node a service will run.
|
||||
Defaults to the class definition and can be left out entirely if not needed.</p>
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">validate</span>
|
||||
|
||||
</pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='clearall'></div>
|
||||
</div>
|
||||
</body>
|
35
docs/index.md
Normal file
35
docs/index.md
Normal file
|
@ -0,0 +1,35 @@
|
|||
# CORE Documentation
|
||||
|
||||
## Introduction
|
||||
|
||||
CORE (Common Open Research Emulator) is a tool for building virtual networks. As an emulator, CORE builds a representation of a real computer network that runs in real time, as opposed to simulation, where abstract models are used. The live-running emulation can be connected to physical networks and routers. It provides an environment for running real applications and protocols, taking advantage of virtualization provided by the Linux operating system.
|
||||
|
||||
CORE is typically used for network and protocol research, demonstrations, application and platform testing, evaluating networking scenarios, security studies, and increasing the size of physical test networks.
|
||||
|
||||
### Key Features
|
||||
* Efficient and scalable
|
||||
* Runs applications and protocols without modification
|
||||
* Drag and drop GUI
|
||||
* Highly customizable
|
||||
|
||||
## Topics
|
||||
|
||||
* [Architecture](architecture.md)
|
||||
* [Installation](install.md)
|
||||
* [Usage](usage.md)
|
||||
* [Python Scripting](scripting.md)
|
||||
* [Node Types](machine.md)
|
||||
* [CTRLNET](ctrlnet.md)
|
||||
* [Services](services.md)
|
||||
* [EMANE](emane.md)
|
||||
* [NS3](ns3.md)
|
||||
* [Performance](performance.md)
|
||||
* [Developers Guide](devguide.md)
|
||||
|
||||
## Credits
|
||||
|
||||
The CORE project was derived from the open source IMUNES project from the University of Zagreb in 2004. In 2006, changes for CORE were released back to that project, some items of which were adopted. Marko Zec <zec@fer.hr> is the primary developer from the University of Zagreb responsible for the IMUNES (GUI) and VirtNet (kernel) projects. Ana Kukec and Miljenko Mikuc are known contributors.
|
||||
|
||||
Jeff Ahrenholz has been the primary Boeing developer of CORE, and has written this manual. Tom Goff designed the Python framework and has made significant contributions. Claudiu Danilov, Rod Santiago, Kevin Larson, Gary Pei, Phil Spagnolo, and Ian Chakeres have contributed code to CORE. Dan Mackley helped develop the CORE API, originally to interface with a simulator. Jae Kim and Tom Henderson have supervised the project and provided direction.
|
||||
|
||||
Copyright (c) 2005-2018, the Boeing Company.
|
280
docs/install.md
Normal file
280
docs/install.md
Normal file
|
@ -0,0 +1,280 @@
|
|||
|
||||
# CORE Installation
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
# Overview
|
||||
|
||||
This section will describe how to set up a CORE machine. Note that the easiest way to install CORE is using a binary package on Ubuntu or Fedora/CentOS (deb or rpm) using the distribution's package manager to automatically install dependencies.
|
||||
|
||||
Ubuntu and Fedora/CentOS Linux are the recommended distributions for running CORE. However, these distributions are not strictly required. CORE will likely work on other flavors of Linux as well.
|
||||
|
||||
The primary dependencies are Tcl/Tk (8.5 or newer) for the GUI, and Python 2.7 for the CORE daemon.
|
||||
|
||||
CORE files are installed to the following directories, when the installation prefix is */usr*.
|
||||
|
||||
Install Path | Description
|
||||
-------------|------------
|
||||
/usr/bin/core-gui|GUI startup command
|
||||
/usr/bin/core-daemon|Daemon startup command
|
||||
/usr/bin/|Misc. helper commands/scripts
|
||||
/usr/lib/core|GUI files
|
||||
/usr/lib/python2.7/dist-packages/core|Python modules for daemon/scripts
|
||||
/etc/core/|Daemon configuration files
|
||||
~/.core/|User-specific GUI preferences and scenario files
|
||||
/usr/share/core/|Example scripts and scenarios
|
||||
/usr/share/man/man1/|Command man pages
|
||||
/etc/init.d/core-daemon|SysV startup script for daemon
|
||||
/etc/systemd/system/core-daemon.service|Systemd startup script for daemon
|
||||
|
||||
## Prerequisites
|
||||
|
||||
A Linux operating system is required. The GUI uses the Tcl/Tk scripting toolkit, and the CORE daemon requires Python. Details of the individual software packages required can be found in the installation steps.
|
||||
|
||||
## Required Hardware
|
||||
|
||||
Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous virtual machines, as a general rule you should select a machine having as much RAM and CPU resources as possible.
|
||||
|
||||
## Required Software
|
||||
|
||||
CORE requires a Linux operating system because it uses virtualization provided by the kernel. It does not run on Windows or Mac OS X operating systems (unless it is running within a virtual machine guest.) The virtualization technology that CORE currently uses is Linux network namespaces.
|
||||
|
||||
The CORE GUI requires the X.Org X Window system (X11), or can run over a remote X11 session. For specific Tcl/Tk, Python, and other libraries required to run CORE.
|
||||
|
||||
**NOTE: CORE *Services* determine what run on each node. You may require other software packages depending on the services you wish to use. For example, the *HTTP* service will require the *apache2* package.**
|
||||
|
||||
## Installing from Packages
|
||||
|
||||
The easiest way to install CORE is using the pre-built packages. The package managers on Ubuntu or Fedora/CentOS will automatically install dependencies for you. You can obtain the CORE packages from [CORE GitHub](https://github.com/coreemu/core/releases).
|
||||
|
||||
### Installing from Packages on Ubuntu
|
||||
|
||||
Install Quagga for routing. If you plan on working with wireless networks, we recommend installing [OSPF MDR](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet) (replace *amd64* below with *i386* if needed to match your architecture):
|
||||
|
||||
```shell
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-mr_0.99.21mr2.2_amd64.deb
|
||||
sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb
|
||||
```
|
||||
|
||||
Or, for the regular Ubuntu version of Quagga:
|
||||
|
||||
```shell
|
||||
sudo apt-get install quagga
|
||||
```
|
||||
|
||||
Install the CORE deb packages for Ubuntu from command line.
|
||||
|
||||
```shell
|
||||
sudo dpkg -i python-core_*.deb
|
||||
sudo dpkg -i core-gui_*.deb
|
||||
```
|
||||
|
||||
Start the CORE daemon as root, the systemd installation will auto start the daemon, but you can use the commands below if need be.
|
||||
|
||||
```shell
|
||||
# systemd
|
||||
sudo systemctl start core-daemon
|
||||
|
||||
# sysv
|
||||
sudo service core-daemon start
|
||||
```
|
||||
|
||||
Run the CORE GUI as a normal user:
|
||||
|
||||
```shell
|
||||
core-gui
|
||||
```
|
||||
|
||||
After running the *core-gui* command, a GUI should appear with a canvas for drawing topologies. Messages will print out on the console about connecting to the CORE daemon.
|
||||
|
||||
### Installing from Packages on Fedora/CentOS
|
||||
|
||||
The commands shown here should be run as root. The *x86_64* architecture is shown in the examples below, replace with *i686* is using a 32-bit architecture.
|
||||
|
||||
**CentOS 7 Only: in order to install *tkimg* package you must build from source.**
|
||||
|
||||
Make sure the system is up to date.
|
||||
|
||||
```shell
|
||||
yum update
|
||||
```
|
||||
|
||||
**Optional (Fedora 17+): Fedora 17 and newer have an additional prerequisite providing the required netem kernel modules (otherwise skip this step and have the package manager install it for you.)**
|
||||
|
||||
```shell
|
||||
yum install kernel-modules-extra
|
||||
```
|
||||
|
||||
Install Quagga for routing. If you plan on working with wireless networks, we recommend installing [OSPF MDR](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet):
|
||||
|
||||
```shell
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2-1.el6.x86_64.rpm
|
||||
sudo yum install quagga-0.99.21mr2.2-1.el6.x86_64.rpm
|
||||
```
|
||||
|
||||
Or, for the regular Fedora/CentOS version of Quagga:
|
||||
|
||||
```shell
|
||||
yum install quagga
|
||||
```
|
||||
|
||||
Install the CORE RPM packages and automatically resolve dependencies:
|
||||
|
||||
```shell
|
||||
yum install python-core_*.rpm
|
||||
yum install core-gui_*.rpm
|
||||
```
|
||||
|
||||
Turn off SELINUX by setting *SELINUX=disabled* in the */etc/sysconfig/selinux* file, and adding *selinux=0* to the kernel line in your */etc/grub.conf* file; on Fedora 15 and newer, disable sandboxd using ```chkconfig sandbox off```; you need to reboot in order for this change to take effect
|
||||
|
||||
Turn off firewalls:
|
||||
|
||||
```shell
|
||||
systemctl disable firewalld
|
||||
systemctl disable iptables.service
|
||||
systemctl disable ip6tables.service
|
||||
chkconfig iptables off
|
||||
chkconfig ip6tables off
|
||||
```
|
||||
|
||||
You need to reboot after making these changes, or flush the firewall using
|
||||
|
||||
```shell
|
||||
iptables -F
|
||||
ip6tables -F
|
||||
```
|
||||
|
||||
Start the CORE daemon as root.
|
||||
|
||||
```shell
|
||||
# systemd
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl start core-daemon
|
||||
|
||||
# sysv
|
||||
sudo service core-daemon start
|
||||
```
|
||||
|
||||
Run the CORE GUI as a normal user:
|
||||
|
||||
```shell
|
||||
core-gui
|
||||
```
|
||||
|
||||
After running the *core-gui* command, a GUI should appear with a canvas for drawing topologies. Messages will print out on the console about connecting to the CORE daemon.
|
||||
|
||||
### Installing from Source
|
||||
|
||||
This option is listed here for developers and advanced users who are comfortable patching and building source code. Please consider using the binary packages instead for a simplified install experience.
|
||||
|
||||
To build CORE from source on Ubuntu, first install these development packages. These packages are not required for normal binary package installs.
|
||||
|
||||
#### Ubuntu 18.04 pre-reqs
|
||||
|
||||
```shell
|
||||
sudo apt install automake pkg-config gcc libev-dev bridge-utils ebtables python-dev python-sphinx python-setuptools python-lxml python-enum34 tk libtk-img
|
||||
```
|
||||
|
||||
#### Ubuntu 16.04 Requirements
|
||||
|
||||
```shell
|
||||
sudo apt-get install automake bridge-utils ebtables python-dev libev-dev python-sphinx python-setuptools python-enum34 python-lxml
|
||||
```
|
||||
|
||||
|
||||
#### CentOS 7 with Gnome Desktop Requirements
|
||||
|
||||
```shell
|
||||
sudo yum -y install automake gcc python-devel libev-devel python-sphinx tk python-lxml python-enum34
|
||||
```
|
||||
|
||||
You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu/core) page. Choose either a stable release version or the development snapshot available in the *nightly_snapshots* directory.
|
||||
|
||||
```shell
|
||||
tar xzf core-*.tar.gz
|
||||
cd core-*
|
||||
./bootstrap.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Quagga Routing Software
|
||||
|
||||
Virtual networks generally require some form of routing in order to work (e.g. to automatically populate routing tables for routing packets from one subnet to another.) CORE builds OSPF routing protocol configurations by default when the blue router node type is used. The OSPF protocol is available from the [Quagga open source routing suit](http://www.quagga.net).
|
||||
|
||||
Quagga is not specified as a dependency for the CORE packages because there are two different Quagga packages that you may use:
|
||||
|
||||
* [Quagga](http://www.quagga.net) - the standard version of Quagga, suitable for static wired networks, and usually available via your distribution's package manager.
|
||||
|
||||
* [OSPF MANET Designated Routers](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet) (MDR) - the Quagga routing suite with a modified version of OSPFv3, optimized for use with mobile wireless networks. The *mdr* node type (and the MDR service) requires this variant of Quagga.
|
||||
|
||||
If you plan on working with wireless networks, we recommend installing OSPF MDR; otherwise install the standard version of Quagga using your package manager or from source.
|
||||
|
||||
#### Installing Quagga from Packages
|
||||
|
||||
To install the standard version of Quagga from packages, use your package manager (Linux).
|
||||
|
||||
Ubuntu users:
|
||||
|
||||
```shell
|
||||
sudo apt-get install quagga
|
||||
```
|
||||
|
||||
Fedora/CentOS users:
|
||||
|
||||
```shell
|
||||
sudo yum install quagga
|
||||
```
|
||||
|
||||
To install the Quagga variant having OSPFv3 MDR, first download the appropriate package, and install using the package manager.
|
||||
|
||||
Ubuntu users:
|
||||
```shell
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-mr_0.99.21mr2.2_amd64.deb
|
||||
sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb
|
||||
```
|
||||
|
||||
Replace *amd64* with *i686* if using a 32-bit architecture.
|
||||
|
||||
Fedora/CentOS users:
|
||||
|
||||
```shell
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2-1.el6.x86_64.rpm
|
||||
sudo yum install quagga-0.99.21mr2.2-1.el6.x86_64.rpm
|
||||
````
|
||||
|
||||
Replace *x86_64* with *i686* if using a 32-bit architecture.
|
||||
|
||||
#### Compiling Quagga for CORE
|
||||
|
||||
To compile Quagga to work with CORE on Linux:
|
||||
|
||||
```shell
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2.tar.gz
|
||||
tar xzf quagga-0.99.21mr2.2.tar.gz
|
||||
cd quagga-0.99
|
||||
./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \\
|
||||
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \\
|
||||
--localstatedir=/var/run/quagga
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Note that the configuration directory */usr/local/etc/quagga* shown for Quagga above could be */etc/quagga*, if you create a symbolic link from */etc/quagga/Quagga.conf -> /usr/local/etc/quagga/Quagga.conf* on the host. The *quaggaboot.sh* script in a Linux network namespace will try and do this for you if needed.
|
||||
|
||||
If you try to run quagga after installing from source and get an error such as:
|
||||
|
||||
```shell
|
||||
error while loading shared libraries libzebra.so.0
|
||||
```
|
||||
|
||||
this is usually a sign that you have to run ```sudo ldconfig```` to refresh the cache file.
|
||||
|
||||
### VCORE
|
||||
|
||||
CORE is capable of running inside of a virtual machine, using software such as VirtualBox, VMware Server or QEMU. However, CORE itself is performing machine virtualization in order to realize multiple emulated nodes, and running CORE virtually adds additional contention for the physical resources. **For performance reasons, this is not recommended.** Timing inside of a VM often has problems. If you do run CORE from within a VM, it is recommended that you view the GUI with remote X11 over SSH, so the virtual machine does not need to emulate the video card with the X11 application.
|
||||
|
||||
A CORE virtual machine is provided for download, named VCORE. This is the perhaps the easiest way to get CORE up and running as the machine is already set up for you. This may be adequate for initially evaluating the tool but keep in mind the performance limitations of running within VirtualBox or VMware. To install the virtual machine, you first need to obtain VirtualBox from http://www.virtualbox.org, or VMware Server or Player from http://www.vmware.com (this commercial software is distributed for free.) Once virtualization software has been installed, you can import the virtual machine appliance using the *vbox* file for VirtualBox or the *vmx* file for VMware. See the documentation that comes with VCORE for login information.
|
||||
|
22
docs/machine.md
Normal file
22
docs/machine.md
Normal file
|
@ -0,0 +1,22 @@
|
|||
# CORE Node Types
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
## Overview
|
||||
|
||||
Different node types can be configured in CORE, and each node type has a *machine type* that indicates how the node will be represented at run time. Different machine types allow for different virtualization options.
|
||||
|
||||
## netns nodes
|
||||
|
||||
The *netns* machine type is the default. This is for nodes that will be backed by Linux network namespaces. See :ref:`Linux` for a brief explanation of netns. This default machine type is very lightweight, providing a minimum amount of virtualization in order to emulate a network. Another reason this is designated as the default machine type is because this virtualization technology typically requires no changes to the kernel; it is available out-of-the-box from the latest mainstream Linux distributions.
|
||||
|
||||
## physical nodes
|
||||
|
||||
The *physical* machine type is used for nodes that represent a real Linux-based machine that will participate in the emulated network scenario. This is typically used, for example, to incorporate racks of server machines from an emulation testbed. A physical node is one that is running the CORE daemon (*core-daemon*), but will not be further partitioned into virtual machines. Services that are run on the physical node do not run in an isolated or virtualized environment, but directly on the operating system.
|
||||
|
||||
Physical nodes must be assigned to servers, the same way nodes are assigned to emulation servers with *Distributed Emulation*. The list of available physical nodes currently shares the same dialog box and list as the emulation servers, accessed using the *Emulation Servers...* entry from the *Session* menu.
|
||||
|
||||
Support for physical nodes is under development and may be improved in future releases. Currently, when any node is linked to a physical node, a dashed line is drawn to indicate network tunneling. A GRE tunneling interface will be created on the physical node and used to tunnel traffic to and from the emulated world.
|
||||
|
||||
Double-clicking on a physical node during runtime opens a terminal with an SSH shell to that node. Users should configure public-key SSH login as done with emulation servers.
|
171
docs/ns3.md
Normal file
171
docs/ns3.md
Normal file
|
@ -0,0 +1,171 @@
|
|||
# CORE / NS3
|
||||
|
||||
* Table of Contents
|
||||
{:toc}
|
||||
|
||||
**NOTE: Support for ns-3 is limited and not currently being developed.**
|
||||
|
||||
## What is ns-3?
|
||||
|
||||
[ns-3 network simulator](http://www.nsnam.org) is a discrete-event network simulator for Internet systems, targeted primarily for research and educational use. By default, ns-3 simulates entire networks, from applications down to channels, and it does so in simulated time, instead of real (wall-clock) time.
|
||||
|
||||
CORE can run in conjunction with ns-3 to simulate some types of networks. CORE network namespace virtual nodes can have virtual TAP interfaces installed using the simulator for communication. The simulator needs to run at wall clock time with the real-time scheduler. In this type of configuration, the CORE namespaces are used to provide packets to the ns-3 devices and channels. This allows, for example, wireless models developed for ns-3 to be used in an emulation context.
|
||||
|
||||
Users simulate networks with ns-3 by writing C++ programs or Python scripts that import the ns-3 library. Simulation models are objects instantiated in these scripts. Combining the CORE Python modules with ns-3 Python bindings allow a script to easily set up and manage an emulation + simulation environment.
|
||||
|
||||
## ns-3 Scripting
|
||||
|
||||
Currently, ns-3 is supported by writing Python scripts, but not through drag-and-drop actions within the GUI. If you have a copy of the CORE source, look under *ns3/examples/* for example scripts; a CORE installation package puts these under */usr/share/core/examples/corens3*.
|
||||
|
||||
To run these scripts, install CORE so the CORE Python libraries are accessible, and download and build ns-3. This has been tested using ns-3 releases starting with 3.11 (and through 3.16 as of this writing).
|
||||
|
||||
The first step is to open an ns-3 waf shell. [waf](http://code.google.com/p/waf/) is the build system for ns-3. Opening a waf shell as root will merely set some environment variables useful for finding python modules and ns-3 executables. The following environment variables are extended or set by issuing *waf shell*:
|
||||
|
||||
```shell
|
||||
PATH
|
||||
PYTHONPATH
|
||||
LD_LIBRARY_PATH
|
||||
NS3_MODULE_PATH
|
||||
NS3_EXECUTABLE_PATH
|
||||
```
|
||||
|
||||
Open a waf shell as root, so that network namespaces may be instantiated by the script with root permissions. For an example, run the *ns3wifi.py* program, which simply instantiates 10 nodes (by default) and places them on an ns-3 WiFi channel. That is, the script will instantiate 10 namespace nodes, and create a special tap device that sends packets between the namespace node and a special ns-3 simulation node, where the tap device is bridged to an ns-3 WiFi network device, and attached to an ns-3 WiFi channel.
|
||||
|
||||
```shell
|
||||
cd ns-allinone-3.16/ns-3.16
|
||||
sudo ./waf shell
|
||||
# use '/usr/local' below if installed from source
|
||||
cd /usr/share/core/examples/corens3/
|
||||
```
|
||||
|
||||
```python
|
||||
python -i ns3wifi.py
|
||||
# running ns-3 simulation for 600 seconds
|
||||
print session
|
||||
<corens3.obj.Ns3Session object at 0x1963e50>
|
||||
```
|
||||
|
||||
The interactive Python shell allows some interaction with the Python objects for the emulation.
|
||||
|
||||
In another terminal, nodes can be accessed using *vcmd*:
|
||||
|
||||
```shell
|
||||
vcmd -c /tmp/pycore.10781/n1 -- bash
|
||||
root@n1:/tmp/pycore.10781/n1.conf#
|
||||
root@n1:/tmp/pycore.10781/n1.conf# ping 10.0.0.3
|
||||
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
|
||||
64 bytes from 10.0.0.3: icmp_req=1 ttl=64 time=7.99 ms
|
||||
64 bytes from 10.0.0.3: icmp_req=2 ttl=64 time=3.73 ms
|
||||
64 bytes from 10.0.0.3: icmp_req=3 ttl=64 time=3.60 ms
|
||||
^C
|
||||
--- 10.0.0.3 ping statistics ---
|
||||
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
|
||||
rtt min/avg/max/mdev = 3.603/5.111/7.993/2.038 ms
|
||||
root@n1:/tmp/pycore.10781/n1.conf#
|
||||
```
|
||||
|
||||
The ping packets shown above are traversing an ns-3 ad-hoc Wifi simulated network.
|
||||
|
||||
To clean up the session, use the Session.shutdown() method from the Python terminal.
|
||||
|
||||
```python
|
||||
print session
|
||||
<corens3.obj.Ns3Session object at 0x1963e50>
|
||||
session.shutdown()
|
||||
```
|
||||
|
||||
A CORE/ns-3 Python script will instantiate an Ns3Session, which is a CORE Session having CoreNs3Nodes, an ns-3 MobilityHelper, and a fixed duration. The CoreNs3Node inherits from both the CoreNode and the ns-3 Node classes -- it is a network namespace having an associated simulator object. The CORE TunTap interface is used, represented by a ns-3 TapBridge in *CONFIGURE_LOCAL* mode, where ns-3 creates and configures the tap device. An event is scheduled to install the taps at time 0.
|
||||
|
||||
**NOTE: The GUI can be used to run the *ns3wifi.py* and *ns3wifirandomwalk.py* scripts directly. First, *core-daemon* must be stopped and run within the waf root shell. Then the GUI may be run as a normal user, and the *Execute Python Script...* option may be used from the *File* menu. Dragging nodes around in the *ns3wifi.py* example will cause their ns-3 positions to be updated.**
|
||||
|
||||
Users may find the files *ns3wimax.py* and *ns3lte.py* in that example directory; those files were similarly configured, but the underlying ns-3 support is not present as of ns-3.16, so they will not work. Specifically, the ns-3 has to be extended to support bridging the Tap device to an LTE and a WiMax device.
|
||||
|
||||
## Integration details
|
||||
|
||||
The previous example *ns3wifi.py* used Python API from the special Python objects *Ns3Session* and *Ns3WifiNet*. The example program does not import anything directly from the ns-3 python modules; rather, only the above two objects are used, and the API available to configure the underlying ns-3 objects is constrained. For example, *Ns3WifiNet* instantiates a constant-rate 802.11a-based ad hoc network, using a lot of ns-3 defaults.
|
||||
|
||||
However, programs may be written with a blend of ns-3 API and CORE Python API calls. This section examines some of the fundamental objects in the CORE ns-3 support. Source code can be found in *ns3/corens3/obj.py* and example code in *ns3/corens3/examples/*.
|
||||
|
||||
## Ns3Session
|
||||
|
||||
The *Ns3Session* class is a CORE Session that starts an ns-3 simulation thread. ns-3 actually runs as a separate process on the same host as the CORE daemon, and the control of starting and stopping this process is performed by the *Ns3Session* class.
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
```
|
||||
|
||||
Note the use of the duration attribute to control how long the ns-3 simulation should run. By default, the duration is 600 seconds.
|
||||
|
||||
Typically, the session keeps track of the ns-3 nodes (holding a node container for references to the nodes). This is accomplished via the ```addnode()``` method, e.g.:
|
||||
|
||||
```python
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
```
|
||||
|
||||
```addnode()``` creates instances of a *CoreNs3Node*, which we'll cover next.
|
||||
|
||||
## CoreNs3Node
|
||||
|
||||
A *CoreNs3Node* is both a CoreNode and an ns-3 node:
|
||||
|
||||
```python
|
||||
class CoreNs3Node(CoreNode, ns.network.Node):
|
||||
"""
|
||||
The CoreNs3Node is both a CoreNode backed by a network namespace and
|
||||
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
|
||||
device will be used.
|
||||
"""
|
||||
```
|
||||
|
||||
## CoreNs3Net
|
||||
|
||||
A *CoreNs3Net* derives from *PyCoreNet*. This network exists entirely in simulation, using the TunTap device to interact between the emulated and the simulated realm. *Ns3WifiNet* is a specialization of this.
|
||||
|
||||
As an example, this type of code would be typically used to add a WiFi network to a session:
|
||||
|
||||
```python
|
||||
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps")
|
||||
wifi.setposition(30, 30, 0)
|
||||
```
|
||||
|
||||
The above two lines will create a wlan1 object and set its initial canvas position. Later in the code, the newnetif method of the CoreNs3Node can be used to add interfaces on particular nodes to this network; e.g.:
|
||||
|
||||
```python
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
```
|
||||
|
||||
## Mobility
|
||||
|
||||
Mobility in ns-3 is handled by an object (a MobilityModel) aggregated to an ns-3 node. The MobilityModel is able to report the position of the object in the ns-3 space. This is a slightly different model from, for instance, EMANE, where location is associated with an interface, and the CORE GUI, where mobility is configured by right-clicking on a WiFi cloud.
|
||||
|
||||
The CORE GUI supports the ability to render the underlying ns-3 mobility model, if one is configured, on the CORE canvas. For example, the example program :file:`ns3wifirandomwalk.py` uses five nodes (by default) in a random walk mobility model. This can be executed by starting the core daemon from an ns-3 waf shell:
|
||||
|
||||
```shell
|
||||
sudo bash
|
||||
cd /path/to/ns-3
|
||||
./waf shell
|
||||
core-daemon
|
||||
```
|
||||
|
||||
and in a separate window, starting the CORE GUI (not from a waf shell) and selecting the *Execute Python script...* option from the File menu, selecting the *ns3wifirandomwalk.py* script.
|
||||
|
||||
The program invokes ns-3 mobility through the following statement:
|
||||
|
||||
```python
|
||||
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
```
|
||||
|
||||
This can be replaced by a different mode of mobility, in which nodes are placed according to a constant mobility model, and a special API call to the CoreNs3Net object is made to use the CORE canvas positions.
|
||||
|
||||
```python
|
||||
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
session.setupconstantmobility()
|
||||
wifi.usecorepositions()
|
||||
```
|
||||
|
||||
In this mode, the user dragging around the nodes on the canvas will cause CORE to update the position of the underlying ns-3 nodes.
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue