Merge branch 'rel/5.2' into core-rest-flask

This commit is contained in:
Blake J. Harnden 2018-08-03 09:49:27 -07:00
commit f053f11eb4
62 changed files with 2252 additions and 3787 deletions

View file

@ -1,14 +1,9 @@
# CORE
# (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
# Top-level Makefile for CORE project.
#
if WANT_DOCS
DOCS = doc
DOCS = docs man
endif
if WANT_GUI
@ -171,26 +166,15 @@ change-files:
$(call change-files,scripts/core-daemon)
$(call change-files,daemon/core/constants.py)
CORE_DOC_HTML = core-html-$(PACKAGE_VERSION)
CORE_DOC_PDF = core-manual-$(PACKAGE_VERSION)
CORE_DOC_SRC = core-python-$(PACKAGE_VERSION)
.PHONY: doc
doc: doc-clean
$(MAKE) -C doc html
mv doc/_build/html doc/$(CORE_DOC_HTML)
tar -C doc -czf $(CORE_DOC_HTML).tgz $(CORE_DOC_HTML)
$(MAKE) -C doc latexpdf
mv doc/_build/latex/CORE.pdf $(CORE_DOC_PDF).pdf
$(MAKE) -C daemon/doc html
mv daemon/doc/_build/html daemon/doc/$(CORE_DOC_SRC)
tar -C daemon/doc -czf $(CORE_DOC_SRC).tgz $(CORE_DOC_SRC)
.PHONY: doc-clean
doc-clean:
-rm -rf doc/_build
-rm -rf doc/$(CORE_DOC_HTML)
-rm -rf daemon/doc/_build
-rm -rf daemon/doc/$(CORE_DOC_SRC)
-rm -f $(CORE_DOC_HTML).tgz
-rm -f $(CORE_DOC_SRC).tgz
-rm -f $(CORE_DOC_PDF).pdf

View file

@ -208,10 +208,8 @@ AC_CONFIG_FILES([Makefile
gui/icons/Makefile
scripts/Makefile
scripts/perf/Makefile
doc/Makefile
doc/conf.py
doc/man/Makefile
doc/figures/Makefile
man/Makefile
docs/Makefile
daemon/Makefile
daemon/doc/Makefile
daemon/doc/conf.py
@ -229,7 +227,7 @@ Build:
Host System Type: ${host}
C Compiler and flags: ${CC} ${CFLAGS}
Prefix: ${prefix}
Exec Prefix: ${exec_prefix}
Exec Prefix: ${exec_prefix}
GUI:
GUI path: ${CORE_LIB_DIR}
@ -241,11 +239,11 @@ Daemon:
Python modules: ${pythondir}
Logs: ${CORE_STATE_DIR}/log
Startup: ${with_startup}
Startup: ${with_startup}
Features to build:
Build GUI: ${enable_gui}
Build Daemon: ${enable_daemon}
Build Daemon: ${enable_daemon}
Documentation: ${want_docs}
------------------------------------------------------------------------"

View file

@ -15,7 +15,7 @@ if WANT_DOCS
endif
SCRIPT_FILES := $(notdir $(wildcard scripts/*))
MAN_FILES := $(notdir $(wildcard ../doc/man/*.1))
MAN_FILES := $(notdir $(wildcard ../man/*.1))
# Python package build
noinst_SCRIPTS = build

View file

@ -31,6 +31,92 @@ class ServiceMode(enum.Enum):
TIMER = 2
class ServiceDependencies(object):
"""
Can generate boot paths for services, based on their dependencies. Will validate
that all services will be booted and that all dependencies exist within the services provided.
"""
def __init__(self, services):
# helpers to check validity
self.dependents = {}
self.booted = set()
self.node_services = {}
for service in services:
self.node_services[service.name] = service
for dependency in service.dependencies:
dependents = self.dependents.setdefault(dependency, set())
dependents.add(service.name)
# used to find paths
self.path = []
self.visited = set()
self.visiting = set()
def boot_paths(self):
"""
Generates the boot paths for the services provided to the class.
:return: list of services to boot, in order
:rtype: list[core.service.CoreService]
"""
paths = []
for service in self.node_services.itervalues():
if service.name in self.booted:
logger.debug("skipping service that will already be booted: %s", service.name)
continue
path = self._start(service)
if path:
paths.append(path)
if self.booted != set(self.node_services.iterkeys()):
raise ValueError("failure to boot all services: %s != %s" % (self.booted, self.node_services.keys()))
return paths
def _reset(self):
self.path = []
self.visited.clear()
self.visiting.clear()
def _start(self, service):
logger.debug("starting service dependency check: %s", service.name)
self._reset()
return self._visit(service)
def _visit(self, current_service):
logger.debug("visiting service(%s): %s", current_service.name, self.path)
self.visited.add(current_service.name)
self.visiting.add(current_service.name)
# dive down
for service_name in current_service.dependencies:
if service_name not in self.node_services:
raise ValueError("required dependency was not included in node services: %s" % service_name)
if service_name in self.visiting:
raise ValueError("cyclic dependency at service(%s): %s" % (current_service.name, service_name))
if service_name not in self.visited:
service = self.node_services[service_name]
self._visit(service)
# add service when bottom is found
logger.debug("adding service to boot path: %s", current_service.name)
self.booted.add(current_service.name)
self.path.append(current_service)
self.visiting.remove(current_service.name)
# rise back up
for service_name in self.dependents.get(current_service.name, []):
if service_name not in self.visited:
service = self.node_services[service_name]
self._visit(service)
return self.path
class ServiceShim(object):
keys = ["dirs", "files", "startidx", "cmdup", "cmddown", "cmdval", "meta", "starttime"]
@ -84,7 +170,7 @@ class ServiceShim(object):
:return: nothing
"""
if key not in cls.keys:
raise ValueError('key `%s` not in `%s`' % (key, cls.keys))
raise ValueError("key `%s` not in `%s`" % (key, cls.keys))
# this handles data conversion to int, string, and tuples
if value:
if key == "startidx":
@ -217,87 +303,6 @@ class CoreServices(object):
self.default_services.clear()
self.custom_services.clear()
def create_boot_paths(self, services):
"""
Create boot paths for starting up services based on dependencies. All services provided and their dependencies
must exist within this set of services, to be valid.
:param list[CoreService] services: service to create boot paths for
:return: list of boot paths for services
:rtype: list[list[CoreService]]
"""
# generate service map and find starting points
node_services = {service.name: service for service in services}
all_services = set()
has_dependency = set()
dependency_map = {}
for service in services:
all_services.add(service.name)
if service.dependencies:
has_dependency.add(service.name)
for dependency in service.dependencies:
dependents = dependency_map.setdefault(dependency, set())
dependents.add(service.name)
starting_points = all_services - has_dependency
# cycles means no starting points
if not starting_points:
raise ValueError("no valid service starting points")
stack = [iter(starting_points)]
# information used to traverse dependency graph
visited = set()
path = []
path_set = set()
# store startup orderings
startups = []
startup = []
logger.debug("starting points: %s", starting_points)
while stack:
for service_name in stack[-1]:
service = node_services[service_name]
logger.debug("evaluating: %s", service.name)
# check this is not a cycle
if service.name in path_set:
raise ValueError("service has a cyclic dependency: %s" % service.name)
# check that we have not already visited this node
elif service.name not in visited:
logger.debug("visiting: %s", service.name)
visited.add(service.name)
path.append(service.name)
path_set.add(service.name)
# retrieve and dependent services and add to stack
dependents = iter(dependency_map.get(service.name, []))
stack.append(dependents)
startup.append(service)
break
# for loop completed without a break
else:
logger.debug("finished a visit: path(%s)", path)
if path:
path_set.remove(path.pop())
if not path and startup:
# finalize startup path
startups.append(startup)
# reset new startup path
startup = []
stack.pop()
if visited != all_services:
raise ValueError("failure to visit all services for boot path")
return startups
def get_default_services(self, node_type):
"""
Get the list of default services that should be enabled for a
@ -422,7 +427,7 @@ class CoreServices(object):
pool = ThreadPool()
results = []
boot_paths = self.create_boot_paths(node.services)
boot_paths = ServiceDependencies(node.services).boot_paths()
for boot_path in boot_paths:
result = pool.apply_async(self._start_boot_paths, (node, boot_path))
results.append(result)
@ -440,7 +445,7 @@ class CoreServices(object):
:param list[CoreService] boot_path: service to start in dependent order
:return: nothing
"""
logger.debug("booting node service dependencies: %s", boot_path)
logger.info("booting node services: %s", boot_path)
for service in boot_path:
self.boot_service(node, service)

View file

@ -734,7 +734,7 @@ class Session(object):
pool.join()
for result in results:
result.get()
logger.info("BOOT RUN TIME: %s", time.time() - start)
logger.debug("boot run time: %s", time.time() - start)
self.update_control_interface_hosts()

View file

@ -155,7 +155,7 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
eventdev = None
platform_element = platform_xmls.get(key)
if not platform_element:
if platform_element is None:
platform_element = etree.Element("platform")
if otadev:

View file

@ -2,57 +2,81 @@
Sample user-defined service.
"""
from core.misc.ipaddress import Ipv4Prefix
from core.service import CoreService
from core.service import ServiceMode
## Custom CORE Service
class MyService(CoreService):
"""
This is a sample user-defined service.
"""
# a unique name is required, without spaces
### Service Attributes
# Name used as a unique ID for this service and is required, no spaces.
name = "MyService"
# you can create your own group here
# Allows you to group services within the GUI under a common name.
group = "Utility"
# list executables that this service requires
# Executables this service depends on to function, if executable is not on the path, service will not be loaded.
executables = ()
# list of other services this service depends on
# Services that this service depends on for startup, tuple of service names.
dependencies = ()
# per-node directories
# Directories that this service will create within a node.
dirs = ()
# generated files (without a full path this file goes in the node's dir,
# e.g. /tmp/pycore.12345/n1.conf/)
configs = ("myservice.sh",)
# list of startup commands, also may be generated during startup
startup = ("sh myservice.sh",)
# list of shutdown commands
# Files that this service will generate, without a full path this file goes in the node's directory.
# e.g. /tmp/pycore.12345/n1.conf/myfile
configs = ("sh myservice1.sh", "sh myservice2.sh")
# Commands used to start this service, any non-zero exit code will cause a failure.
startup = ("sh %s" % configs[0], "sh %s" % configs[1])
# Commands used to validate that a service was started, any non-zero exit code will cause a failure.
validate = ()
# Validation mode, used to determine startup success.
# * NON_BLOCKING - runs startup commands, and validates success with validation commands
# * BLOCKING - runs startup commands, and validates success with the startup commands themselves
# * TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone
validation_mode = ServiceMode.NON_BLOCKING
# Time for a service to wait before running validation commands or determining success in TIMER mode.
validation_timer = 0
# Shutdown commands to stop this service.
shutdown = ()
### On Load
@classmethod
def on_load(cls):
# Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
# dynamic settings for the environment.
pass
### Get Configs
@classmethod
def get_configs(cls, node):
# Provides a way to dynamically generate the config files from the node a service will run.
# Defaults to the class definition and can be left out entirely if not needed.
return cls.configs
### Generate Config
@classmethod
def generate_config(cls, node, filename):
"""
Return a string that will be written to filename, or sent to the
GUI for user customization.
"""
# Returns a string representation for a file, given the node the service is starting on the config filename
# that this information will be used for. This must be defined, if "configs" are defined.
cfg = "#!/bin/sh\n"
cfg += "# auto-generated by MyService (sample.py)\n"
for ifc in node.netifs():
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
# here we do something interesting
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
break
if filename == cls.configs[0]:
cfg += "# auto-generated by MyService (sample.py)\n"
for ifc in node.netifs():
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
elif filename == cls.configs[1]:
cfg += "echo hello"
return cfg
@staticmethod
def subnetentry(x):
"""
Generate a subnet declaration block given an IPv4 prefix string
for inclusion in the config file.
"""
if x.find(":") >= 0:
# this is an IPv6 address
return ""
else:
net = Ipv4Prefix(x)
return 'echo " network %s"' % net
### Get Startup
@classmethod
def get_startup(cls, node):
# Provides a way to dynamically generate the startup commands from the node a service will run.
# Defaults to the class definition and can be left out entirely if not needed.
return cls.startup
### Get Validate
@classmethod
def get_validate(cls, node):
# Provides a way to dynamically generate the validate commands from the node a service will run.
# Defaults to the class definition and can be left out entirely if not needed.
return cls.validate

View file

@ -33,7 +33,7 @@ data_files = [
"data/core.conf",
"data/logging.conf",
]),
(_MAN_DIR, glob_files("../doc/man/**.1")),
(_MAN_DIR, glob_files("../man/**.1")),
]
data_files.extend(recursive_files(_EXAMPLES_DIR, "examples"))

View file

@ -3,6 +3,7 @@ import os
import pytest
from core.service import CoreService
from core.service import ServiceDependencies
from core.service import ServiceManager
_PATH = os.path.abspath(os.path.dirname(__file__))
@ -19,20 +20,20 @@ class ServiceA(CoreService):
class ServiceB(CoreService):
name = "B"
dependencies = ("C",)
dependencies = ()
class ServiceC(CoreService):
name = "C"
dependencies = ()
dependencies = ("B", "D")
class ServiceD(CoreService):
name = "D"
dependencies = ("A",)
dependencies = ()
class ServiceE(CoreService):
class ServiceBadDependency(CoreService):
name = "E"
dependencies = ("Z",)
@ -42,6 +43,10 @@ class ServiceF(CoreService):
dependencies = ()
class ServiceCycleDependency(CoreService):
name = "G"
class TestServices:
def test_service_all_files(self, session):
# given
@ -245,7 +250,23 @@ class TestServices:
assert default_service == my_service
assert custom_service and custom_service != my_service
def test_services_dependencies(self, session):
def test_services_dependencies(self):
# given
services = [
ServiceA,
ServiceB,
ServiceC,
ServiceD,
ServiceF
]
# when
boot_paths = ServiceDependencies(services).boot_paths()
# then
assert len(boot_paths) == 2
def test_services_dependencies_not_present(self):
# given
services = [
ServiceA,
@ -253,39 +274,25 @@ class TestServices:
ServiceC,
ServiceD,
ServiceF,
ServiceBadDependency
]
# when
startups = session.services.create_boot_paths(services)
# when, then
with pytest.raises(ValueError):
ServiceDependencies(services).boot_paths()
# then
assert len(startups) == 2
def test_services_dependencies_not_present(self, session):
def test_services_dependencies_cycle(self):
# given
service_d = ServiceD()
service_d.dependencies = ("C",)
services = [
ServiceA,
ServiceB,
ServiceC,
ServiceE
]
# when
with pytest.raises(ValueError):
session.services.create_boot_paths(services)
def test_services_dependencies_cycle(self, session):
# given
service_c = ServiceC()
service_c.dependencies = ("D",)
services = [
ServiceA,
ServiceB,
service_c,
ServiceD,
service_d,
ServiceF
]
# when
# when, then
with pytest.raises(ValueError):
session.services.create_boot_paths(services)
ServiceDependencies(services).boot_paths()

2
doc/.gitignore vendored
View file

@ -1,2 +0,0 @@
_build
conf.py

View file

@ -1,163 +0,0 @@
# CORE
# (c)2009-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
# Builds html and pdf documentation using Sphinx.
#
SUBDIRS = man figures
# extra cruft to remove
DISTCLEANFILES = Makefile.in stamp-vti
rst_files = conf.py.in constants.txt credits.rst ctrlnet.rst devguide.rst \
emane.rst index.rst install.rst intro.rst machine.rst \
ns3.rst performance.rst scripting.rst usage.rst requirements.txt
EXTRA_DIST = $(rst_files)
###### below this line was generated using sphinx-quickstart ######
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS = -q
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
STATICDIR = _static
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest figures-icons
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean-local:
-rm -rf $(BUILDDIR) $(STATICDIR)
html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest: figures-icons $(BUILDDIR) $(STATICDIR)
$(BUILDDIR) $(STATICDIR):
$(MKDIR_P) $@
figures-icons:
cd figures && make icons
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CORE.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CORE.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/CORE"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CORE"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View file

@ -1,257 +0,0 @@
# -*- coding: utf-8 -*-
#
# CORE documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 13 10:44:22 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CORE'
copyright = u'2005-2018, core-dev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '@PACKAGE_VERSION@'
# The full version, including alpha/beta/rc tags.
release = '@PACKAGE_VERSION@'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'COREdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'CORE.tex', u'CORE Documentation',
u'core-dev', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'core', u'CORE Documentation',
[u'core-dev'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'CORE'
epub_author = u'core-dev'
epub_publisher = u'core-dev'
epub_copyright = u'2005-2018, core-dev'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True

View file

@ -1,23 +0,0 @@
.. |UBUNTUVERSION| replace:: 12.04 or 14.04
.. |FEDORAVERSION| replace:: 19 or 20
.. |CENTOSVERSION| replace:: 6.x or 7.x
.. |CORERPM| replace:: 1.fc20.x86_64.rpm
.. |CORERPM2| replace:: 1.fc20.noarch.rpm
.. |COREDEB| replace:: 0ubuntu1_precise_amd64.deb
.. |COREDEB2| replace:: 0ubuntu1_precise_all.deb
.. |QVER| replace:: quagga-0.99.21mr2.2
.. |QVERDEB| replace:: quagga-mr_0.99.21mr2.2_amd64.deb
.. |QVERRPM| replace:: quagga-0.99.21mr2.2-1.fc16.x86_64.rpm
.. |APTDEPS| replace:: bash bridge-utils ebtables iproute libev-dev python
.. |APTDEPS2| replace:: tcl8.5 tk8.5 libtk-img
.. |APTDEPS3| replace:: autoconf automake gcc libev-dev make python-dev libreadline-dev pkg-config imagemagick help2man
.. |YUMDEPS| replace:: bash bridge-utils ebtables iproute libev python procps-ng net-tools
.. |YUMDEPS2| replace:: tcl tk tkimg
.. |YUMDEPS3| replace:: autoconf automake make libev-devel python-devel ImageMagick help2man

View file

@ -1,26 +0,0 @@
.. This file is part of the CORE Manual
(c)2012 the Boeing Company
.. _Acknowledgements:
***************
Acknowledgments
***************
The CORE project was derived from the open source IMUNES project from the
University of Zagreb in 2004. In 2006, changes for CORE were released back to
that project, some items of which were adopted. Marko Zec <zec@fer.hr> is the
primary developer from the University of Zagreb responsible for the IMUNES
(GUI) and VirtNet (kernel) projects. Ana Kukec and Miljenko Mikuc are known
contributors.
Jeff Ahrenholz has been the primary Boeing
developer of CORE, and has written this manual. Tom Goff
designed the Python framework and has made significant
contributions. Claudiu Danilov, Rod Santiago,
Kevin Larson, Gary Pei, Phil Spagnolo, and Ian Chakeres
have contributed code to CORE. Dan Mackley helped
develop the CORE API, originally to interface with a simulator.
Jae Kim and Tom Henderson
have supervised the project and provided direction.

View file

@ -1,193 +0,0 @@
.. This file is part of the CORE Manual
(c)2015 the Boeing Company
.. _Control_Network:
***************
Control Network
***************
.. index:: controlnet
.. index:: control network
.. index:: X11 applications
.. index:: node access to the host
.. index:: host access to a node
The CORE control network allows the virtual nodes to communicate with their host environment.
There are two types: the primary control network and auxiliary control networks. The primary
control network is used mainly for communicating with the virtual nodes from host machines
and for master-slave communications in a multi-server distributed environment. Auxiliary control networks
have been introduced to for routing namespace hosted emulation software traffic
to the test network.
.. _Activating_the_Primary_Control_Network:
Activating the Primary Control Network
======================================
Under the :ref:`Session_Menu`, the *Options...* dialog has an option to set
a *control network prefix*.
This can be set to a network prefix such as
``172.16.0.0/24``. A bridge will be created on the host machine having the last
address in the prefix range (e.g. ``172.16.0.254``), and each node will have
an extra ``ctrl0`` control interface configured with an address corresponding
to its node number (e.g. ``172.16.0.3`` for ``n3``.)
A default for the primary control network may also
be specified by setting the ``controlnet`` line in the
:file:`/etc/core/core.conf` configuration file which new
sessions will use by default. To simultaneously run multiple sessions with control networks, the session
option should be used instead of the :file:`core.conf` default.
.. NOTE::
If you have a large scenario with more than 253 nodes, use a control
network prefix that allows more than the suggested ``/24``, such as ``/23``
or greater.
.. IMPORTANT::
Running a session with a control network can fail if a previous session has set up a control network and the its bridge is still up.
Close the previous session first or wait for it to complete. If unable to, the ``core-daemon`` may need to be restarted and the lingering
bridge(s) removed manually:
::
# Restart the CORE Daemon
sudo /etc/init.d core-daemon restart
# Remove lingering control network bridges
ctrlbridges=`brctl show | grep b.ctrl | awk '{print $1}'`
for cb in $ctrlbridges; do
sudo ifconfig $cb down
sudo brctl delbr $cb
done
.. TIP::
If adjustments to the primary control network configuration made in :file:`/etc/core/core.conf` do not seem
to take affect, check if there is anything set in the :ref:`Session_Menu`, the *Options...* dialog. They may
need to be cleared. These per session settings override the defaults in :file:`/etc/core/core.conf`.
.. _Distributed_Control_Network:
Control Network in Distributed Sessions
=======================================
.. index:: distributed control network
.. index:: control network distributed
When the primary control network is activated for a distributed session,
a control network bridge will be created on each of the slave servers, with GRE tunnels back
to the master server's bridge. The slave control bridges are not assigned an
address. From the host, any of the nodes (local or remote) can be accessed,
just like the single server case.
In some situations, remote emulated nodes need to communicate with the
host on which they are running and not the master server.
Multiple control network prefixes can be specified in the either the session option
or :file:`/etc/core/core.conf`, separated by spaces and beginning with the master server.
Each entry has the form "``server:prefix``". For example, if the servers *core1*,*core2*, and *core3*
are assigned with nodes in the scenario and using :file:`/etc/core/core.conf` instead of
the session option:
::
controlnet=core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.1.0/24
then, the control network bridges will be assigned as follows:
*core1* = ``172.16.1.254`` (assuming it is the master server),
*core2* = ``172.16.2.254``, and *core3* = ``172.16.3.254``.
Tunnels back to the master server will still be built, but it is up to the
user to add appropriate routes if networking between control network
prefixes is desired. The control network script may help with this.
Control Network Script
^^^^^^^^^^^^^^^^^^^^^^
.. index:: control network scripts
.. index:: controlnet_updown_script
A control network script may be specified using the ``controlnet_updown_script``
option in the :file:`/etc/core/core.conf` file. This script will be run after
the bridge has been built (and address assigned) with the first argument being
the name of the bridge, and the second argument being the keyword "``startup``".
The script will again be invoked prior to bridge removal with the second
argument being the keyword "``shutdown``".
Auxiliary Control Networks
==========================
.. index:: auxiliary control networks
.. index:: auxiliary_controlnet
Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces.
Since it is advisable to separate the OTA traffic from other traffic
(See :ref:`Distributed_EMANE`), we will need more than single channel
leading out from the namespace. Up to three auxiliary control networks may be defined.
Multiple control networks are set up in :file:`/etc/core/core.conf` file.
Lines ``controlnet1``, ``controlnet2`` and ``controlnet3`` define the auxiliary
networks. The format of value to assign the controlnets are the same as in
:ref:`Distributed_Control_Network`. For example, having the following
lines in :file:`/etc/core/core.conf`:
::
controlnet = core1:172.17.1.0/24 core2:172.17.2.0/24 core3:172.17.3.0/24
controlnet1 = core1:172.18.1.0/24 core2:172.18.2.0/24 core3:172.18.3.0/24
controlnet2 = core1:172.19.1.0/24 core2:172.19.2.0/24 core3:172.19.3.0/24
will activate the primary and two auxiliary control networks and
add interfaces ``ctrl0``, ``ctrl1``, ``ctrl2`` to each node. One use case would
be to assign ``ctrl1`` to the OTA manager device and ``ctrl2`` to the Event Service device
in the EMANE Options dialog box and leave ``ctrl0`` for CORE control traffic.
.. NOTE::
``controlnet0`` may be used in place of ``controlnet`` to configure the primary control network.
Unlike the primary control network, the auxiliary control networks will not employ
tunneling since their primary purpose is for efficiently transporting multicast EMANE OTA and
event traffic. Note that there is no per-session configuration for auxiliary control networks.
To extend the auxiliary control networks across a distributed test environment,
host network interfaces need to be added to them. The following lines in
:file:`/etc/core/core.conf` will add host devices ``eth1``, ``eth2`` and
``eth3`` to ``controlnet1``, ``controlnet2``, ``controlnet3``:
::
controlnetif1 = eth1
controlnetif2 = eth2
controlnetif3 = eth3
.. NOTE::
There is no need to assign an interface to the primary control network
because tunnels are formed between the master and the slaves using IP
addresses that are provided in ``servers.conf``. (See :ref:`Distributed_Emulation`.)
Shown below is a representative diagram of the configuration above.
.. _example_control_network:
.. figure:: figures/controlnetwork.*
:alt: Control Network Diagram
:align: center
:scale: 75%
Example Control Network

View file

@ -1,180 +0,0 @@
.. This file is part of the CORE Manual
(c)2012-2013 the Boeing Company
.. _Developer's_Guide:
*****************
Developer's Guide
*****************
This section contains advanced usage information, intended for developers and
others who are comfortable with the command line.
.. _Coding_Standard:
Coding Standard
===============
The coding standard and style guide for the CORE project are maintained online.
Please refer to the `coding standard
<http://code.google.com/p/coreemu/wiki/Hacking>`_ posted on the CORE Wiki.
.. _Source_Code_Guide:
Source Code Guide
=================
The CORE source consists of several different programming languages for
historical reasons. Current development focuses on the Python modules and
daemon. Here is a brief description of the source directories.
These are being actively developed as of CORE |version|:
* *gui* - Tcl/Tk GUI. This uses Tcl/Tk because of its roots with the IMUNES
project.
* *daemon* - Python modules are found in the :file:`daemon/core` directory, the
daemon under :file:`daemon/scripts/core-daemon`
* *netns* - Python extension modules for Linux Network Namespace support are in :file:`netns`.
* *ns3* - Python ns3 script support for running CORE.
* *doc* - Documentation for the manual lives here in reStructuredText format.
.. _The_CORE_API:
The CORE API
============
.. index:: CORE; API
.. index:: API
.. index:: remote API
The CORE API is used between different components of CORE for communication.
The GUI communicates with the CORE daemon using the API. One emulation server
communicates with another using the API. The API also allows other systems to
interact with the CORE emulation. The API allows another system to add, remove,
or modify nodes and links, and enables executing commands on the emulated
systems. Wireless link parameters are updated on-the-fly based on node
positions.
CORE listens on a local TCP port for API messages. The other system could be
software running locally or another machine accessible across the network.
The CORE API is currently specified in a separate document, available from the
CORE website.
.. _Linux_network_namespace_Commands:
Linux network namespace Commands
================================
.. index:: lxctools
Linux network namespace containers are often managed using the *Linux Container
Tools* or *lxc-tools* package. The lxc-tools website is available here
`<http://lxc.sourceforge.net/>`_ for more information. CORE does not use these
management utilities, but includes its own set of tools for instantiating and
configuring network namespace containers. This section describes these tools.
.. index:: vnoded
The *vnoded* daemon is the program used to create a new namespace, and
listen on a control channel for commands that may instantiate other processes.
This daemon runs as PID 1 in the container. It is launched automatically by
the CORE daemon. The control channel is a UNIX domain socket usually named
:file:`/tmp/pycore.23098/n3`, for node 3 running on CORE
session 23098, for example. Root privileges are required for creating a new
namespace.
.. index:: vcmd
The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network
namespace, for running commands in the namespace. The CORE daemon
uses the same channel for setting up a node and running processes within it.
This program has two
required arguments, the control channel name, and the command line to be run
within the namespace. This command does not need to run with root privileges.
When you double-click
on a node in a running emulation, CORE will open a shell window for that node
using a command such as:
::
gnome-terminal -e vcmd -c /tmp/pycore.50160/n1 -- bash
Similarly, the IPv4 routes Observer Widget will run a command to display the routing table using a command such as:
::
vcmd -c /tmp/pycore.50160/n1 -- /sbin/ip -4 ro
.. index:: core-cleanup
A script named *core-cleanup* is provided to clean up any running CORE
emulations. It will attempt to kill any remaining vnoded processes, kill any
EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and
remove any bridges or *ebtables* rules. With a *-d* option, it will also kill
any running CORE daemon.
.. index:: netns
The *netns* command is not used by CORE directly. This utility can be used to
run a command in a new network namespace for testing purposes. It does not open
a control channel for receiving further commands.
Here are some other Linux commands that are useful for managing the Linux
network namespace emulation.
::
# view the Linux bridging setup
brctl show
# view the netem rules used for applying link effects
tc qdisc show
# view the rules that make the wireless LAN work
ebtables -L
Below is a transcript of creating two emulated nodes and connecting them together with a wired link:
.. index:: create nodes from command-line
.. index:: command-line
::
# create node 1 namespace container
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
# create a virtual Ethernet (veth) pair, installing one end into node 1
ip link add name n1.0.1 type veth peer name n1.0
ip link set n1.0 netns `cat /tmp/n1.pid`
vcmd -c /tmp/n1.ctl -- ip link set lo up
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0 up
vcmd -c /tmp/n1.ctl -- ip addr add 10.0.0.1/24 dev eth0
# create node 2 namespace container
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
# create a virtual Ethernet (veth) pair, installing one end into node 2
ip link add name n2.0.1 type veth peer name n2.0
ip link set n2.0 netns `cat /tmp/n2.pid`
vcmd -c /tmp/n2.ctl -- ip link set lo up
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0 up
vcmd -c /tmp/n2.ctl -- ip addr add 10.0.0.2/24 eth0
# bridge together nodes 1 and 2 using the other end of each veth pair
brctl addbr b.1.1
brctl setfd b.1.1 0
brctl addif b.1.1 n1.0.1
brctl addif b.1.1 n2.0.1
ip link set n1.0.1 up
ip link set n2.0.1 up
ip link set b.1.1 up
# display connectivity and ping from node 1 to node 2
brctl show
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
The above example script can be found as :file:`twonodes.sh` in the
:file:`examples/netns` directory. Use *core-cleanup* to clean up after the
script.

View file

@ -1,359 +0,0 @@
.. This file is part of the CORE Manual
(c)2012 the Boeing Company
.. _EMANE:
*****
EMANE
*****
.. index:: EMANE
This chapter describes running CORE with the EMANE emulator.
.. _What_is_EMANE?:
What is EMANE?
==============
.. index:: EMANE; introduction to
The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous
network emulation using a pluggable MAC and PHY layer architecture. The EMANE
framework provides an implementation architecture for modeling different radio
interface types in the form of *Network Emulation Modules* (NEMs) and
incorporating these modules into a real-time emulation running in a distributed
environment.
EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent
Link LLC,
who maintain these websites:
* `<http://www.nrl.navy.mil/itd/ncs/products/emane>`_
* `<http://www.adjacentlink.com/>`_
Instead of building Linux Ethernet bridging networks with CORE, higher-fidelity
wireless networks can be emulated using EMANE bound to virtual devices. CORE
emulates layers 3 and above (network, session, application) with its virtual
network stacks and process space for protocols and applications, while EMANE
emulates layers 1 and 2 (physical and data link) using its pluggable PHY and
MAC models.
The interface between CORE and EMANE is a TAP device. CORE builds the virtual
node using Linux network namespaces, installs the TAP device into the
namespace and instantiates one EMANE process in the namespace.
The EMANE process binds a user space socket to the TAP device for
sending and receiving data from CORE.
.. NOTE::
When the installed EMANE version is older than 0.9.2, EMANE runs on the host
and binds a userspace socket to the TAP device, before it is pushed into the
namespace, for sending and receiving data. The *Virtual Transport* was
the EMANE component responsible for connecting with the TAP device.
An EMANE instance sends and receives OTA traffic to and from other
EMANE instances via a control port (e.g. ``ctrl0``, ``ctrl1``).
It also sends and receives Events to and from the Event Service using
the same or a different control port.
EMANE models are configured through CORE's WLAN configuration dialog. A
corresponding EmaneModel Python class is sub-classed for each supported EMANE
model, to provide configuration items and their mapping to XML files. This way
new models can be easily supported. When CORE starts the emulation, it
generates the appropriate XML files that specify the EMANE NEM configuration,
and launches the EMANE daemons.
Some EMANE models support location information to determine when packets should
be dropped. EMANE has an event system where location events are broadcast to
all NEMs. CORE can generate these location events when nodes are moved on the
canvas. The canvas size and scale dialog has controls for mapping the X,Y
coordinate system to a latitude, longitude geographic system that EMANE uses.
When specified in the :file:`core.conf` configuration file, CORE can also
subscribe to EMANE location events and move the nodes on the canvas as they are
moved in the EMANE emulation. This would occur when an Emulation Script
Generator, for example, is running a mobility script.
.. index:: EMANE; Configuration
.. index:: EMANE; Installation
.. _EMANE_Configuration:
EMANE Configuration
===================
CORE and EMANE currently work together only on the Linux network namespaces
platform. The normal CORE installation instructions should be followed from
:ref:`Installation`.
The CORE configuration file :file:`/etc/core/core.conf` has options specific to
EMANE. Namely, the `emane_models` line contains a comma-separated list of EMANE
models that will be available. Each model has a corresponding Python file
containing the *EmaneModel* subclass. A portion of the default
:file:`core.conf` file is shown below:
::
# EMANE configuration
emane_platform_port = 8101
emane_transform_port = 8201
emane_event_monitor = False
emane_models = RfPipe, Ieee80211abg
EMANE can be installed from deb or RPM packages or from source. See the
`EMANE website <http://www.nrl.navy.mil/itd/ncs/products/emane>`_ for
full details.
Here are quick instructions for installing all EMANE packages:
::
# install dependencies
sudo apt-get install libssl-dev libxml-libxml-perl libxml-simple-perl
# download and install EMANE 0.8.1
export URL=http://downloads.pf.itd.nrl.navy.mil/emane/0.8.1-r2
wget $URL/emane-0.8.1-release-2.ubuntu-12_04.amd64.tgz
tar xzf emane-0.8.1-release-2.ubuntu-12_04.amd64.tgz
sudo dpkg -i emane-0.8.1-release-2/deb/ubuntu-12_04/amd64/*.deb
If you have an EMANE event generator (e.g. mobility or pathloss scripts) and
want to have CORE subscribe to EMANE location events, set the following line in
the :file:`/etc/core/core.conf` configuration file:
::
emane_event_monitor = True
Do not set the above option to True if you want to manually drag nodes around
on the canvas to update their location in EMANE.
Another common issue is if installing EMANE from source, the default configure
prefix will place the DTD files in :file:`/usr/local/share/emane/dtd` while
CORE expects them in :file:`/usr/share/emane/dtd`. A symbolic link will fix
this:
::
sudo ln -s /usr/local/share/emane /usr/share/emane
.. _Single_PC_with_EMANE:
Single PC with EMANE
====================
This section describes running CORE and EMANE on a single machine. This is the
default mode of operation when building an EMANE network with CORE. The OTA
manager and Event service interface are set to use ``ctrl0`` and the virtual nodes
use the primary control channel for communicating with one another. The primary
control channel is automatically activated when a scenario involves EMANE.
Using the primary control channel prevents your emulation session from sending
multicast traffic on your local network and interfering with other EMANE users.
.. NOTE::
When the installed EMANE version is earlier than 0.9.2, the OTA manager and
Event service interfaces are set to use the loopback device.
EMANE is configured through a WLAN node, because it is all about emulating
wireless radio networks. Once a node is linked to a WLAN cloud configured with
an EMANE model, the radio interface on that node may also be configured
separately (apart from the cloud.)
Double-click on a WLAN node to invoke the WLAN configuration dialog. Click the
*EMANE* tab; when EMANE has
been properly installed, EMANE wireless modules should be listed in the
*EMANE Models* list. (You may need to restart the CORE daemon if
it was running prior to installing the EMANE Python bindings.)
Click on a model name to enable it.
When an EMANE model is selected in the *EMANE Models* list, clicking on
the *model options* button causes the GUI to query the CORE daemon for
configuration items. Each model will have different parameters, refer to the
EMANE documentation for an explanation of each item. The defaults values are
presented in the dialog. Clicking *Apply* and *Apply* again will store
the EMANE model selections.
The *EMANE options* button
allows specifying some global parameters for EMANE, some of
which are necessary for distributed operation, see :ref:`Distributed_EMANE`.
.. index:: RF-PIPE model
.. index:: 802.11 model
.. index:: ieee80211abg model
.. index:: geographic location
.. index:: Universal PHY
The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports
geographic location information for determining pathloss between nodes. A
default latitude and longitude location is provided by CORE and this
location-based pathloss is enabled by default; this is the *pathloss mode*
setting for the Universal PHY. Moving a node on the canvas while the emulation
is running generates location events for EMANE. To view or change the
geographic location or scale of the canvas use the *Canvas Size and Scale*
dialog available from the *Canvas* menu.
.. index:: UTM zones
.. index:: UTM projection
Note that conversion between geographic and Cartesian
coordinate systems is done using UTM
(Universal Transverse Mercator) projection, where
different zones of 6 degree longitude bands are defined.
The location events generated by
CORE may become inaccurate near the zone boundaries for very large scenarios
that span multiple UTM zones. It is recommended that EMANE location scripts
be used to achieve geo-location accuracy in this situation.
Clicking the green *Start* button launches the emulation and causes TAP
devices to be created in the virtual nodes that are linked to the EMANE WLAN.
These devices appear with interface names such as eth0, eth1, etc. The EMANE
processes should now be running in each namespace. For a four node scenario:
::
> ps -aef | grep emane
root 1063 969 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane4.log /tmp/pycore.59992/platform4.xml
root 1117 959 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane2.log /tmp/pycore.59992/platform2.xml
root 1179 942 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane1.log /tmp/pycore.59992/platform1.xml
root 1239 979 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane5.log /tmp/pycore.59992/platform5.xml
The example above shows the EMANE processes started by CORE. To view the configuration generated by CORE, look in the
:file:`/tmp/pycore.nnnnn/` session directory for a :file:`platform.xml` file
and other XML files. One easy way to view this information is by
double-clicking one of the virtual nodes, and typing *cd ..* in the shell to go
up to the session directory.
.. _single_pc_emane_figure:
.. figure:: figures/single-pc-emane.*
:alt: Single PC Emane
:align: center
:scale: 75%
Single PC with EMANE
.. index:: Distributed_EMANE
.. _Distributed_EMANE:
Distributed EMANE
=================
Running CORE and EMANE distributed among two or more emulation servers is
similar to running on a single machine. There are a few key configuration items
that need to be set in order to be successful, and those are outlined here.
It is a good idea to maintain separate networks for data (OTA) and control. The control
network may be a shared laboratory network, for example, and you do not want
multicast traffic on the data network to interfere with other EMANE users. Furthermore,
control traffic could interfere with the OTA latency and thoughput and might affect
emulation fidelity. The examples described here will use *eth0* as a control interface
and *eth1* as a data interface, although using separate interfaces
is not strictly required. Note that these interface names refer to interfaces
present on the host machine, not virtual interfaces within a node.
.. IMPORTANT::
If an auxiliary control network is used, an interface on the host has to be assigned to that network.
See :ref:`Distributed_Control_Network`
Each machine that will act as an emulation server needs to have CORE and EMANE
installed. Refer to the :ref:`Distributed_Emulation` section for configuring
CORE.
The IP addresses of the available servers are configured from the
CORE emulation servers dialog box (choose *Session* then
*Emulation servers...*) described in :ref:`Distributed_Emulation`.
This list of servers is stored in a :file:`~/.core/servers.conf` file.
The dialog shows available servers, some or all of which may be
assigned to nodes on the canvas.
Nodes need to be assigned to emulation servers as described in
:ref:`Distributed_Emulation`. Select several nodes, right-click them, and
choose *Assign to* and the name of the desired server. When a node is not
assigned to any emulation server, it will be emulated locally. The local
machine that the GUI connects with is considered the "master" machine, which in
turn connects to the other emulation server "slaves". Public key SSH should
be configured from the master to the slaves as mentioned in the
:ref:`Distributed_Emulation` section.
Under the *EMANE* tab of the EMANE WLAN, click on the *EMANE options* button.
This brings
up the emane configuration dialog. The *enable OTA Manager channel* should
be set to *on*. The *OTA Manager device* and *Event Service device* should
be set to a control network device. For example, if you have
a primary and auxiliary control network (i.e. controlnet and controlnet1), and you want
the OTA traffic to have its dedicated network, set the OTA Manager device to ``ctrl1``
and the Event Service device to ``ctrl0``.
The EMANE models can be configured as described in :ref:`Single_PC_with_EMANE`.
Click *Apply* to save these settings.
.. _distributed_emane_figure:
.. figure:: figures/distributed-emane-configuration.*
:alt: Distribute EMANE
:align: center
:scale: 75%
Distributed EMANE Configuration
.. NOTE::
When the installed EMANE version is earlier than 0.9.2, EMANE access to the host machine's
interfaces and OTA manager and Event service devices an be set to physical interfaces.
.. HINT::
Here is a quick checklist for distributed emulation with EMANE.
1. Follow the steps outlined for normal CORE :ref:`Distributed_Emulation`.
2. Under the *EMANE* tab of the EMANE WLAN, click on *EMANE options*.
3. Turn on the *OTA Manager channel* and set the *OTA Manager device*.
Also set the *Event Service device*.
4. Select groups of nodes, right-click them, and assign them to servers
using the *Assign to* menu.
5. Synchronize your machine's clocks prior to starting the emulation,
using ``ntp`` or ``ptp``. Some EMANE models are sensitive to timing.
6. Press the *Start* button to launch the distributed emulation.
Now when the Start button is used to instantiate the emulation,
the local CORE Python
daemon will connect to other emulation servers that have been assigned to nodes.
Each server will have its own session directory where the :file:`platform.xml`
file and other EMANE XML files are generated. The NEM IDs are automatically
coordinated across servers so there is no overlap. Each server also gets its
own Platform ID.
An Ethernet device is used for disseminating multicast EMANE events, as specified in the
*configure emane* dialog. EMANE's Event Service can be run with mobility or pathloss scripts
as described in :ref:`Single_PC_with_EMANE`. If CORE is not subscribed to location events, it
will generate them as nodes are moved on the canvas.
Double-clicking on a node during runtime will cause the GUI to attempt to SSH
to the emulation server for that node and run an interactive shell. The public
key SSH configuration should be tested with all emulation servers prior to
starting the emulation.
.. _distributed_emane_network_diagram:
.. figure:: figures/distributed-emane-network.*
:alt: Distribute EMANE
:align: center
:scale: 75%
Notional Distributed EMANE Network Diagram

View file

@ -1,24 +0,0 @@
cel.jpg
document-properties.jpg
host.jpg
hub.jpg
lanswitch.jpg
link.jpg
marker.jpg
mdr.jpg
observe.jpg
oval.jpg
pc.jpg
plot.jpg
rectangle.jpg
rj45.jpg
router.jpg
router_green.jpg
run.jpg
select.jpg
start.jpg
stop.jpg
text.jpg
tunnel.jpg
twonode.jpg
wlan.jpg

View file

@ -1,61 +0,0 @@
# CORE
# (c)2009-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
#
# define new file extensions for handling figures and html
SUFFIXES = .jpg .gif
GIFTOJPG = convert -background white -flatten
# dia figures can be manually converted to jpg
# On Ubuntu 11.10, this is failing for some reason.
DIATOJPG = dia -t jpg -e
# these are file extension handlers for automatically converting between image
# file types; the .jpg files are built from .gif files from the GUI
# file extension handler to convert .gif to .jpg
%.jpg: %.gif
$(GIFTOJPG) $(top_srcdir)/gui/icons/tiny/$< $@
# file extension handler so we can list .gif as dependency for .gif.jpg
%.gif:
@echo "Using GUI file $(top_srcdir)/gui/icons/tiny/$@"
# list of base names for figures
figures = core-architecture core-workflow
# list of figures + dia suffix
figures_dia = $(figures:%=%.dia)
# list of figure + jpg suffix
figures_jpg = $(figures:%=%.jpg)
figures_png = \
controlnetwork.png \
distributed-controlnetwork.png \
distributed-emane-configuration.png \
distributed-emane-network.png \
single-pc-emane.png
# icons from the GUI source
icons = select start router host pc mdr router_green \
lanswitch hub wlan cel \
link rj45 tunnel marker oval rectangle text \
stop observe plot twonode run document-properties
# list of icons + .gif.jpg suffix
icons_jpg = $(icons:%=%.jpg)
icons: $(icons_jpg)
clean-local:
rm -f $(icons_jpg)
EXTRA_DIST = $(figures_dia) $(figures_jpg) $(figures_png)
# extra cruft to remove
DISTCLEANFILES = Makefile.in

Binary file not shown.

Binary file not shown.

View file

@ -1,33 +0,0 @@
.. This file is part of the CORE Manual
(c)2012,2015 the Boeing Company
.. only:: html or latex
CORE Manual
===========
.. toctree::
:maxdepth: 2
:numbered:
intro
install
usage
scripting
machine
ctrlnet
emane
ns3
performance
devguide
credits
Indices and tables
==================
.. only:: html
* :ref:`genindex`
* :ref:`search`

View file

@ -1,560 +0,0 @@
.. This file is part of the CORE Manual
(c)2012-2013 the Boeing Company
.. include:: constants.txt
.. _Installation:
************
Installation
************
This chapter describes how to set up a CORE machine. Note that the easiest
way to install CORE is using a binary
package on Ubuntu or Fedora (deb or rpm) using the distribution's package
manager
to automatically install dependencies, see :ref:`Installing_from_Packages`.
Ubuntu and Fedora Linux are the recommended distributions for running CORE. Ubuntu |UBUNTUVERSION| and Fedora |FEDORAVERSION| ship with kernels with support for namespaces built-in. They support the latest hardware. However,
these distributions are not strictly required. CORE will likely work on other
flavors of Linux, see :ref:`Installing_from_Source`.
The primary dependencies are Tcl/Tk (8.5 or newer) for the GUI, and Python 2.6 or 2.7 for the CORE daemon.
.. index:: install locations
.. index:: paths
.. index:: install paths
CORE files are installed to the following directories.
======================================================= =================================
Install Path Description
======================================================= =================================
:file:`/usr/local/bin/core-gui` GUI startup command
:file:`/usr/local/bin/core-daemon` Daemon startup command
:file:`/usr/local/bin/` Misc. helper commands/scripts
:file:`/usr/local/lib/core` GUI files
:file:`/usr/local/lib/python2.7/dist-packages/core` Python modules for daemon/scripts
:file:`/etc/core/` Daemon configuration files
:file:`~/.core/` User-specific GUI preferences and scenario files
:file:`/usr/local/share/core/` Example scripts and scenarios
:file:`/usr/local/share/man/man1/` Command man pages
:file:`/etc/init.d/core-daemon` SysV startup script for daemon
:file:`/etc/systemd/system/core-daemon.service` Systemd startup script for daemon
======================================================= =================================
.. _Prerequisites:
Prerequisites
=============
.. index:: Prerequisites
A Linux operating system is required. The GUI uses the Tcl/Tk scripting toolkit, and the CORE daemon requires Python. Details of the individual software packages required can be found in the installation steps.
.. _Required_Hardware:
Required Hardware
-----------------
.. index:: Hardware requirements
.. index:: System requirements
Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous virtual machines, as a general rule you should select a machine having as much RAM and CPU resources as possible.
A *general recommendation* would be:
* 2.0GHz or better x86 processor, the more processor cores the better
* 2 GB or more of RAM
* about 3 MB of free disk space (plus more for dependency packages such as Tcl/Tk)
* X11 for the GUI, or remote X11 over SSH
The computer can be a laptop, desktop, or rack-mount server. A keyboard, mouse,
and monitor are not required if a network connection is available
for remotely accessing the machine. A 3D accelerated graphics card
is not required.
.. _Required_Software:
Required Software
-----------------
CORE requires a Linux operating systems because it uses virtualization provided by the kernel. It does not run on the Windows or Mac OS X operating systems (unless it is running within a virtual machine guest.)
The virtualization technology that CORE currently uses:
Linux network namespaces,
see :ref:`How_Does_it_Work?` for virtualization details.
**Linux network namespaces is the recommended platform.** Development is focused here and it supports the latest features. It is the easiest to install because there is no need to patch, install, and run a special Linux kernel.
The CORE GUI requires the X.Org X Window system (X11), or can run over a
remote X11 session. For specific Tcl/Tk, Python, and other libraries required
to run CORE, refer to the :ref:`Installation` section.
.. NOTE::
CORE :ref:`Services` determine what runs on each node. You may require
other software packages depending on the services you wish to use.
For example, the `HTTP` service will require the `apache2` package.
.. _Installing_from_Packages:
Installing from Packages
========================
.. index:: installer
.. index:: binary packages
The easiest way to install CORE is using the pre-built packages. The package
managers on Ubuntu or Fedora will
automatically install dependencies for you.
You can obtain the CORE packages from the `CORE downloads <http://downloads.pf.itd.nrl.navy.mil/core/packages/>`_ page
or `CORE GitHub <https://github.com/coreemu/core/releases>`_.
.. _Installing_from_Packages_on_Ubuntu:
Installing from Packages on Ubuntu
----------------------------------
First install the Ubuntu |UBUNTUVERSION| operating system.
* Install Quagga for routing. If you plan on working with wireless
networks, we recommend
installing
`OSPF MDR <http://www.nrl.navy.mil/itd/ncs/products/ospf-manet>`__
(replace `amd64` below with `i386` if needed
to match your architecture):
.. parsed-literal::
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
wget $URL/|QVER|/|QVERDEB|
sudo dpkg -i |QVERDEB|
or, for the regular Ubuntu version of Quagga:
::
sudo apt-get install quagga
* Install the CORE deb packages for Ubuntu, using a GUI that automatically
resolves dependencies (note that the absolute path to the deb file
must be used with ``software-center``):
.. parsed-literal::
software-center /home/user/Downloads/core-daemon\_\ |version|-|COREDEB|
software-center /home/user/Downloads/core-gui\_\ |version|-|COREDEB2|
or install from command-line:
.. parsed-literal::
sudo dpkg -i core-daemon\_\ |version|-|COREDEB|
sudo dpkg -i core-gui\_\ |version|-|COREDEB2|
* Start the CORE daemon as root.
::
sudo /etc/init.d/core-daemon start
* Run the CORE GUI as a normal user:
::
core-gui
After running the ``core-gui`` command, a GUI should appear with a canvas
for drawing topologies. Messages will print out on the console about
connecting to the CORE daemon.
.. _Installing_from_Packages_on_Fedora:
Installing from Packages on Fedora/CentOS
-----------------------------------------
The commands shown here should be run as root. First Install the Fedora
|FEDORAVERSION| or CentOS |CENTOSVERSION| operating system.
The `x86_64` architecture is shown in the
examples below, replace with `i686` is using a 32-bit architecture. Also,
`fc15` is shown below for Fedora 15 packages, replace with the appropriate
Fedora release number.
* **CentOS only:** in order to install the `libev` and `tkimg` prerequisite
packages, you
first need to install the `EPEL <http://fedoraproject.org/wiki/EPEL>`_ repo
(Extra Packages for Enterprise Linux):
::
wget http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
yum localinstall epel-release-6-8.noarch.rpm
* **CentOS 7.x only:** as of this writing, the `tkimg` prerequisite package
is missing from EPEL 7.x, but the EPEL 6.x package can be manually installed
from
`here <http://dl.fedoraproject.org/pub/epel/6/x86_64/repoview/tkimg.html>`_
::
wget http://dl.fedoraproject.org/pub/epel/6/x86_64/tkimg-1.4-1.el6.x86_64.rpm
yum localinstall tkimg-1.4-1.el6.x86_64.rpm
* **Optional:** install the prerequisite packages (otherwise skip this
step and have the package manager install them for you.)
.. parsed-literal::
# make sure the system is up to date; you can also use the
# update applet instead of yum update
yum update
yum install |YUMDEPS| |YUMDEPS2|
* **Optional (Fedora 17+):** Fedora 17 and newer have an additional
prerequisite providing the required netem kernel modules (otherwise
skip this step and have the package manager install it for you.)
::
yum install kernel-modules-extra
* Install Quagga for routing. If you plan on working with wireless networks,
we recommend installing
`OSPF MDR <http://www.nrl.navy.mil/itd/ncs/products/ospf-manet>`_:
.. parsed-literal::
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
wget $URL/|QVER|/|QVERRPM|
yum localinstall |QVERRPM|
or, for the regular Fedora version of Quagga:
::
yum install quagga
* Install the CORE RPM packages for Fedora and automatically resolve
dependencies:
.. parsed-literal::
yum localinstall python-core_|service|-|version|-|CORERPM| --nogpgcheck
yum localinstall core-gui-|version|-|CORERPM2| --nogpgcheck
or install from the command-line:
.. parsed-literal::
rpm -ivh python-core_|service|-|version|-|CORERPM|
rpm -ivh core-gui-|version|-|CORERPM2|
* Turn off SELINUX by setting ``SELINUX=disabled`` in the :file:`/etc/sysconfig/selinux` file, and adding ``selinux=0`` to the kernel line in
your :file:`/etc/grub.conf` file; on Fedora 15 and newer, disable sandboxd using ``chkconfig sandbox off``;
you need to reboot in order for this change to take effect
* Turn off firewalls with ``systemctl disable firewalld``, ``systemctl disable iptables.service``, ``systemctl disable ip6tables.service`` (``chkconfig iptables off``, ``chkconfig ip6tables off``) or configure them with permissive rules for CORE virtual networks; you need to reboot after making this change, or flush the firewall using ``iptables -F``, ``ip6tables -F``.
* Start the CORE daemon as root. Fedora uses the ``systemd`` start-up daemon
instead of traditional init scripts. CentOS uses the init script.
::
# for Fedora using systemd:
systemctl daemon-reload
systemctl start core-daemon.service
# or for CentOS:
/etc/init.d/core-daemon start
* Run the CORE GUI as a normal user:
::
core-gui
After running the ``core-gui`` command, a GUI should appear with a canvas
for drawing topologies. Messages will print out on the console about
connecting to the CORE daemon.
.. _Installing_from_Source:
Installing from Source
======================
This option is listed here for developers and advanced users who are comfortable patching and building source code. Please consider using the binary packages instead for a simplified install experience.
.. _Installing_from_Source_on_Ubuntu:
Installing from Source on Ubuntu
--------------------------------
To build CORE from source on Ubuntu, first install these development packages.
These packages are not required for normal binary package installs.
.. parsed-literal::
sudo apt-get install |APTDEPS| \\
|APTDEPS2| \\
|APTDEPS3|
You can obtain the CORE source from the `CORE source <http://downloads.pf.itd.nrl.navy.mil/core/source/>`_ page. Choose either a stable release version or
the development snapshot available in the `nightly_snapshots` directory.
The ``-j8`` argument to ``make`` will run eight simultaneous jobs, to speed up
builds on multi-core systems.
.. parsed-literal::
tar xzf core-|version|.tar.gz
cd core-|version|
./bootstrap.sh
./configure
make -j8
sudo make install
The CORE Manual documentation is built separately from the :file:`doc/`
sub-directory in the source. It requires Sphinx:
.. parsed-literal::
sudo apt-get install python-sphinx
cd core-|version|/doc
make html
make latexpdf
.. _Installing_from_Source_on_Fedora:
Installing from Source on Fedora
--------------------------------
To build CORE from source on Fedora, install these development packages.
These packages are not required for normal binary package installs.
.. parsed-literal::
yum install |YUMDEPS| \\
|YUMDEPS2| \\
|YUMDEPS3|
.. NOTE::
For a minimal X11 installation, also try these packages::
yum install xauth xterm urw-fonts
You can obtain the CORE source from the `CORE source <http://downloads.pf.itd.nrl.navy.mil/core/source/>`_ page. Choose either a stable release version or
the development snapshot available in the :file:`nightly_snapshots` directory.
The ``-j8`` argument to ``make`` will run eight simultaneous jobs, to speed up
builds on multi-core systems. Notice the ``configure`` flag to tell the build
system that a systemd service file should be installed under Fedora.
.. parsed-literal::
tar xzf core-|version|.tar.gz
cd core-|version|
./bootstrap.sh
./configure --with-startup=systemd
make -j8
sudo make install
Another note is that the Python distutils in Fedora Linux will install the CORE
Python modules to :file:`/usr/lib/python2.7/site-packages/core`, instead of
using the :file:`dist-packages` directory.
The CORE Manual documentation is built separately from the :file:`doc/`
sub-directory in the source. It requires Sphinx:
.. parsed-literal::
sudo yum install python-sphinx
cd core-|version|/doc
make html
make latexpdf
.. _Installing_from_Source_on_CentOS:
Installing from Source on CentOS/EL6
------------------------------------
To build CORE from source on CentOS/EL6, first install the `EPEL <http://fedoraproject.org/wiki/EPEL>`_ repo (Extra Packages for Enterprise Linux) in order
to provide the `libev` package.
::
wget http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
yum localinstall epel-release-6-8.noarch.rpm
Now use the same instructions shown in :ref:`Installing_from_Source_on_Fedora`.
CentOS/EL6 does not use the systemd service file, so the `configure` option
`--with-startup=systemd` should be omitted:
::
./configure
.. _Installing_from_Source_on_SUSE:
Installing from Source on SUSE
------------------------------
To build CORE from source on SUSE or OpenSUSE,
use the similar instructions shown in :ref:`Installing_from_Source_on_Fedora`,
except that the following `configure` option should be used:
::
./configure --with-startup=suse
This causes a separate init script to be installed that is tailored towards SUSE systems.
The `zypper` command is used instead of `yum`.
The Quagga routing suite is recommended for routing,
:ref:`Quagga_Routing_Software` for installation.
.. _Quagga_Routing_Software:
Quagga Routing Software
=======================
.. index:: Quagga
Virtual networks generally require some form of routing in order to work (e.g.
to automatically populate routing tables for routing packets from one subnet
to another.) CORE builds OSPF routing protocol
configurations by default when the blue router
node type is used. The OSPF protocol is available
from the `Quagga open source routing suite <http://www.quagga.net>`_.
Other routing protocols are available using different
node services, :ref:`Default_Services_and_Node_Types`.
Quagga is not specified as a dependency for the CORE packages because
there are two different Quagga packages that you may use:
* `Quagga <http://www.quagga.net>`_ - the standard version of Quagga, suitable for static wired networks, and usually available via your distribution's package manager.
.. index:: OSPFv3 MANET
.. index:: OSPFv3 MDR
.. index:: MANET Designated Routers (MDR)
*
`OSPF MANET Designated Routers <http://www.nrl.navy.mil/itd/ncs/products/ospf-manet>`_ (MDR) - the Quagga routing suite with a modified version of OSPFv3,
optimized for use with mobile wireless networks. The *mdr* node type (and the MDR service) requires this variant of Quagga.
If you plan on working with wireless networks, we recommend installing OSPF MDR;
otherwise install the standard version of Quagga using your package manager or from source.
.. _Installing_Quagga_from_Packages:
Installing Quagga from Packages
-------------------------------
To install the standard version of Quagga from packages, use your package manager (Linux).
Ubuntu users:
::
sudo apt-get install quagga
Fedora users:
::
yum install quagga
To install the Quagga variant having OSPFv3 MDR, first download the
appropriate package, and install using the package manager.
Ubuntu users:
.. parsed-literal::
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
wget $URL/|QVER|/|QVERDEB|
sudo dpkg -i |QVERDEB|
Replace `amd64` with `i686` if using a 32-bit architecture.
Fedora users:
.. parsed-literal::
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
wget $URL/|QVER|/|QVERRPM|
yum localinstall |QVERRPM|
Replace `x86_64` with `i686` if using a 32-bit architecture.
.. _Compiling_Quagga_for_CORE:
Compiling Quagga for CORE
-------------------------
To compile Quagga to work with CORE on Linux:
.. parsed-literal::
tar xzf |QVER|.tar.gz
cd |QVER|
./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \\
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \\
--localstatedir=/var/run/quagga
make
sudo make install
Note that the configuration directory :file:`/usr/local/etc/quagga` shown for
Quagga above could be :file:`/etc/quagga`, if you create a symbolic link from
:file:`/etc/quagga/Quagga.conf -> /usr/local/etc/quagga/Quagga.conf` on the
host. The :file:`quaggaboot.sh` script in a Linux network namespace will try and
do this for you if needed.
If you try to run quagga after installing from source and get an error such as:
.. parsed-literal::
error while loading shared libraries libzebra.so.0
this is usually a sign that you have to run `sudo ldconfig` to refresh the
cache file.
VCORE
=====
.. index:: virtual machines
.. index:: VirtualBox
.. index:: VMware
CORE is capable of running inside of a virtual machine, using
software such as VirtualBox,
VMware Server or QEMU. However, CORE itself is performing machine
virtualization in order to realize multiple emulated nodes, and running CORE
virtually adds additional contention for the physical resources. **For performance reasons, this is not recommended.** Timing inside of a VM often has
problems. If you do run CORE from within a VM, it is recommended that you view
the GUI with remote X11 over SSH, so the virtual machine does not need to
emulate the video card with the X11 application.
.. index:: VCORE
A CORE virtual machine is provided for download, named VCORE.
This is the perhaps the easiest way to get CORE up and running as the machine
is already set up for you. This may be adequate for initially evaluating the
tool but keep in mind the performance limitations of running within VirtualBox
or VMware. To install the virtual machine, you first need to obtain VirtualBox
from http://www.virtualbox.org, or VMware Server or Player from
http://www.vmware.com (this commercial software is distributed for free.)
Once virtualization software has been installed, you can import the virtual
machine appliance using the ``vbox`` file for VirtualBox or the ``vmx`` file for VMware. See the documentation that comes with VCORE for login information.

View file

@ -1,204 +0,0 @@
.. This file is part of the CORE Manual
(c)2012-2013 the Boeing Company
.. _Introduction:
************
Introduction
************
The Common Open Research Emulator (CORE) is a tool for building virtual
networks. As an emulator, CORE builds a representation of a real computer
network that runs in real time, as opposed to simulation, where abstract models
are used. The live-running emulation can be connected to physical networks and
routers. It provides an environment for running real applications and
protocols, taking advantage of virtualization provided by the Linux operating
system.
Some of its key features are:
.. index::
single: key features
* efficient and scalable
* runs applications and protocols without modification
* easy-to-use GUI
* highly customizable
CORE is typically used for network and protocol research,
demonstrations, application and platform testing, evaluating networking
scenarios, security studies, and increasing the size of physical test networks.
What's New?
===========
For readers who are already familiar with CORE and have read this manual before, below is a list of what changed in version 5.0:
* :ref:`Services` - Added Ryu SD and Open vSwitch services
* :ref:`Python_Scripting` - Updated script examples to reflect code changes
.. index::
single: CORE; components of
single: CORE; API
single: API
single: CORE; GUI
.. _Architecture:
Architecture
============
The main components of CORE are shown in :ref:`core-architecture`. A
*CORE daemon* (backend) manages emulation sessions. It builds emulated networks
using kernel virtualization for virtual nodes and some form of bridging and
packet manipulation for virtual networks. The nodes and networks come together
via interfaces installed on nodes. The daemon is controlled via the
graphical user interface, the *CORE GUI* (frontend).
The daemon uses Python modules
that can be imported directly by Python scripts.
The GUI and the daemon communicate using a custom,
asynchronous, sockets-based API, known as the *CORE API*. The dashed line
in the figure notionally depicts the user-space and kernel-space separation.
The components the user interacts with are colored blue: GUI, scripts, or
command-line tools.
The system is modular to allow mixing different components. The virtual
networks component, for example, can be realized with other network
simulators and emulators, such as ns-3 and EMANE.
Another example is how a session can be designed and started using
the GUI, and continue to run in "headless" operation with the GUI closed.
The CORE API is sockets based,
to allow the possibility of running different components on different physical
machines.
.. _core-architecture:
.. figure:: figures/core-architecture.*
:alt: CORE architecture diagram
:align: center
:scale: 75 %
CORE Architecture
The CORE GUI is a Tcl/Tk program; it is started using the command
``core-gui``. The CORE daemon, named ``core-daemon``,
is usually started via the init script
(``/etc/init.d/core-daemon`` or ``core-daemon.service``,
depending on platform.)
The CORE daemon manages sessions of virtual
nodes and networks, of which other scripts and utilities may be used for
further control.
.. _How_Does_It_Work?:
How Does it Work?
=================
A CORE node is a lightweight virtual machine. The CORE framework runs on Linux.
.. index::
single: Linux; virtualization
single: Linux; containers
single: LXC
single: network namespaces
* :ref:`Linux` CORE uses Linux network namespace virtualization to build virtual nodes, and ties them together with virtual networks using Linux Ethernet bridging.
.. _Linux:
Linux
-----
Linux network namespaces (also known as netns, LXC, or `Linux containers
<http://lxc.sourceforge.net/>`_) is the primary virtualization
technique used by CORE. LXC has been part of the mainline Linux kernel since
2.6.24. Recent Linux distributions such as Fedora and Ubuntu have
namespaces-enabled kernels out of the box.
A namespace is created using the ``clone()`` system call. Each namespace has
its own process environment and private network stack. Network namespaces
share the same filesystem in CORE.
.. index::
single: Linux; bridging
single: Linux; networking
single: ebtables
CORE combines these namespaces with Linux Ethernet bridging
to form networks. Link characteristics are applied using Linux Netem queuing
disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless
networks are emulated by controlling which interfaces can send and receive with
ebtables rules.
.. index::
single: IMUNES
single: VirtNet
single: prior work
.. rubric:: Footnotes
.. [#f1] http://www.nlnet.nl/project/virtnet/
.. [#f2] http://www.imunes.net/virtnet/
.. _Prior_Work:
Prior Work
==========
The Tcl/Tk CORE GUI was originally derived from the open source
`IMUNES <http://www.tel.fer.hr/imunes/>`_
project from the University of Zagreb
as a custom project within Boeing Research and Technology's Network
Technology research group in 2004. Since then they have developed the CORE
framework to use Linux virtualization, have developed a
Python framework, and made numerous user- and kernel-space developments, such
as support for wireless networks, IPsec, the ability to distribute emulations,
simulation integration, and more. The IMUNES project also consists of userspace
and kernel components. Originally, one had to download and apply a patch for
the FreeBSD 4.11 kernel, but the more recent
`VirtNet <http://www.nlnet.nl/project/virtnet/>`_
effort has brought network stack
virtualization to the more modern FreeBSD 8.x kernel.
.. _Open_Source_Project_and_Resources:
Open Source Project and Resources
=================================
.. index::
single: open source project
single: license
single: website
single: supplemental website
single: contributing
CORE has been released by Boeing to the open source community under the BSD
license. If you find CORE useful for your work, please contribute back to the
project. Contributions can be as simple as reporting a bug, dropping a line of
encouragement or technical suggestions to the mailing lists, or can also
include submitting patches or maintaining aspects of the tool. For contributing to
CORE, please visit the
`CORE GitHub <https://github.com/coreemu/core>`_.
Besides this manual, there are other additional resources available online:
* `CORE website <http://www.nrl.navy.mil/itd/ncs/products/core>`_ - main project page containing demos, downloads, and mailing list information.
.. index::
single: CORE
Goals
-----
These are the Goals of the CORE project; they are similar to what we consider to be the :ref:`key features <Introduction>`.
#. Ease of use - In a few clicks the user should have a running network.
#. Efficiency and scalability - A node is more lightweight than a full virtual machine. Tens of nodes should be possible on a standard laptop computer.
#. Software re-use - Re-use real implementation code, protocols, networking stacks.
#. Networking - CORE is focused on emulating networks and offers various ways to connect the running emulation with real or simulated networks.
#. Hackable - The source code is available and easy to understand and modify.
Non-Goals
---------
This is a list of Non-Goals, specific things that people may be interested in but are not areas that we will pursue.
#. Reinventing the wheel - Where possible, CORE reuses existing open source components such as virtualization, Netgraph, netem, bridging, Quagga, etc.
#. 1,000,000 nodes - While the goal of CORE is to provide efficient, scalable network emulation, there is no set goal of N number of nodes. There are realistic limits on what a machine can handle as its resources are divided amongst virtual nodes. We will continue to make things more efficient and let the user determine the right number of nodes based on available hardware and the activities each node is performing.
#. Solves every problem - CORE is about emulating networking layers 3-7 using virtual network stacks in Linux operating systems.
#. Hardware-specific - CORE itself is not an instantiation of hardware, a testbed, or a specific laboratory setup; it should run on commodity laptop and desktop PCs, in addition to high-end server hardware.

View file

@ -1,71 +0,0 @@
.. This file is part of the CORE Manual
(c)2012-2013 the Boeing Company
.. _Machine_Types:
*************
Machine Types
*************
.. index:: machine types
Different node types can be configured in CORE, and each node type has a
*machine type* that indicates how the node will be represented at run time.
Different machine types allow for different virtualization options.
.. _netns:
netns
=====
.. index:: netns machine type
The *netns* machine type is the default. This is for nodes that will be
backed by Linux network namespaces. See :ref:`Linux` for a brief explanation of
netns. This default machine type is very lightweight, providing a minimum
amount of
virtualization in order to emulate a network.
Another reason this is designated as the default machine type
is because this virtualization technology
typically requires no changes to the kernel; it is available out-of-the-box
from the latest mainstream Linux distributions.
.. index:: physical machine type
.. index:: emulation testbed machines
.. index:: real node
.. index:: physical node
.. _physical:
physical
========
The *physical* machine type is used for nodes that represent a real
Linux-based machine that will participate in the emulated network scenario.
This is typically used, for example, to incorporate racks of server machines
from an emulation testbed. A physical node is one that is running the CORE
daemon (:file:`core-daemon`), but will not be further partitioned into virtual
machines. Services that are run on the physical node do not run in an
isolated or virtualized environment, but directly on the operating system.
Physical nodes must be assigned to servers, the same way nodes
are assigned to emulation servers with :ref:`Distributed_Emulation`.
The list of available physical nodes currently shares the same dialog box
and list as the emulation servers, accessed using the *Emulation Servers...*
entry from the *Session* menu.
.. index:: GRE tunnels with physical nodes
Support for physical nodes is under development and may be improved in future
releases. Currently, when any node is linked to a physical node, a dashed line
is drawn to indicate network tunneling. A GRE tunneling interface will be
created on the physical node and used to tunnel traffic to and from the
emulated world.
Double-clicking on a physical node during runtime
opens a terminal with an SSH shell to that
node. Users should configure public-key SSH login as done with emulation
servers.

View file

@ -1,314 +0,0 @@
.. This file is part of the CORE Manual
(c)2012-2013 the Boeing Company
.. _ns-3:
****
ns-3
****
.. index:: ns-3
This chapter describes running CORE with the
`ns-3 network simulator <http://www.nsnam.org>`_.
.. _What_is_ns-3?:
What is ns-3?
=============
.. index:: ns-3 Introduction
ns-3 is a discrete-event network simulator for Internet systems, targeted primarily for research and educational use. [#f1]_ By default, ns-3 simulates entire networks, from applications down to channels, and it does so in simulated time, instead of real (wall-clock) time.
CORE can run in conjunction with ns-3 to simulate some types of networks. CORE
network namespace virtual nodes can have virtual TAP interfaces installed using
the simulator for communication. The simulator needs to run at wall clock time
with the real-time scheduler. In this type of configuration, the CORE
namespaces are used to provide packets to the ns-3 devices and channels.
This allows, for example, wireless models developed for ns-3 to be used
in an emulation context.
Users simulate networks with ns-3 by writing C++ programs or Python scripts that
import the ns-3 library. Simulation models are objects instantiated in these
scripts. Combining the CORE Python modules with ns-3 Python bindings allow
a script to easily set up and manage an emulation + simulation environment.
.. rubric:: Footnotes
.. [#f1] http://www.nsnam.org
.. _ns-3_Scripting:
ns-3 Scripting
==============
.. index:: ns-3 scripting
Currently, ns-3 is supported by writing
:ref:`Python scripts <Python_Scripting>`, but not through
drag-and-drop actions within the GUI.
If you have a copy of the CORE source, look under :file:`ns3/examples/` for example scripts; a CORE installation package puts these under
:file:`/usr/share/core/examples/corens3`.
To run these scripts, install CORE so the CORE Python libraries are accessible,
and download and build ns-3. This has been tested using ns-3 releases starting
with 3.11 (and through 3.16 as of this writing).
The first step is to open an ns-3 waf shell. `waf <http://code.google.com/p/waf/>`_ is the build system for ns-3. Opening a waf shell as root will merely
set some environment variables useful for finding python modules and ns-3
executables. The following environment variables are extended or set by
issuing `waf shell`:
::
PATH
PYTHONPATH
LD_LIBRARY_PATH
NS3_MODULE_PATH
NS3_EXECUTABLE_PATH
Open a waf shell as root, so that network namespaces may be instantiated
by the script with root permissions. For an example, run the
:file:`ns3wifi.py`
program, which simply instantiates 10 nodes (by default) and places them on
an ns-3 WiFi channel. That is, the script will instantiate 10 namespace nodes,
and create a special tap device that sends packets between the namespace
node and a special ns-3 simulation node, where the tap device is bridged
to an ns-3 WiFi network device, and attached to an ns-3 WiFi channel.
::
> cd ns-allinone-3.16/ns-3.16
> sudo ./waf shell
# # use '/usr/local' below if installed from source
# cd /usr/share/core/examples/corens3/
# python -i ns3wifi.py
running ns-3 simulation for 600 seconds
>>> print session
<corens3.obj.Ns3Session object at 0x1963e50>
>>>
The interactive Python shell allows some interaction with the Python objects
for the emulation.
In another terminal, nodes can be accessed using *vcmd*:
::
vcmd -c /tmp/pycore.10781/n1 -- bash
root@n1:/tmp/pycore.10781/n1.conf#
root@n1:/tmp/pycore.10781/n1.conf# ping 10.0.0.3
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
64 bytes from 10.0.0.3: icmp_req=1 ttl=64 time=7.99 ms
64 bytes from 10.0.0.3: icmp_req=2 ttl=64 time=3.73 ms
64 bytes from 10.0.0.3: icmp_req=3 ttl=64 time=3.60 ms
^C
--- 10.0.0.3 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
rtt min/avg/max/mdev = 3.603/5.111/7.993/2.038 ms
root@n1:/tmp/pycore.10781/n1.conf#
The ping packets shown above are traversing an ns-3 ad-hoc Wifi simulated
network.
To clean up the session, use the Session.shutdown() method from the Python
terminal.
::
>>> print session
<corens3.obj.Ns3Session object at 0x1963e50>
>>>
>>> session.shutdown()
>>>
A CORE/ns-3 Python script will instantiate an Ns3Session, which is a
CORE Session
having CoreNs3Nodes, an ns-3 MobilityHelper, and a fixed duration.
The CoreNs3Node inherits from both the CoreNode and the ns-3 Node classes -- it
is a network namespace having an associated simulator object. The CORE TunTap
interface is used, represented by a ns-3 TapBridge in `CONFIGURE_LOCAL`
mode, where ns-3 creates and configures the tap device. An event is scheduled
to install the taps at time 0.
.. NOTE::
The GUI can be used to run the :file:`ns3wifi.py`
and :file:`ns3wifirandomwalk.py` scripts directly. First, ``core-daemon``
must be
stopped and run within the waf root shell. Then the GUI may be run as
a normal user, and the *Execute Python Script...* option may be used from
the *File* menu. Dragging nodes around in the :file:`ns3wifi.py` example
will cause their ns-3 positions to be updated.
Users may find the files :file:`ns3wimax.py` and :file:`ns3lte.py`
in that example
directory; those files were similarly configured, but the underlying
ns-3 support is not present as of ns-3.16, so they will not work. Specifically,
the ns-3 has to be extended to support bridging the Tap device to
an LTE and a WiMax device.
.. _ns-3_Integration_details:
Integration details
===================
.. index:: ns-3 integration details
The previous example :file:`ns3wifi.py` used Python API from the special Python
objects *Ns3Session* and *Ns3WifiNet*. The example program does not import
anything directly from the ns-3 python modules; rather, only the above
two objects are used, and the API available to configure the underlying
ns-3 objects is constrained. For example, *Ns3WifiNet* instantiates
a constant-rate 802.11a-based ad hoc network, using a lot of ns-3 defaults.
However, programs may be written with a blend of ns-3 API and CORE Python
API calls. This section examines some of the fundamental objects in
the CORE ns-3 support. Source code can be found in
:file:`ns3/corens3/obj.py` and example
code in :file:`ns3/corens3/examples/`.
Ns3Session
----------
The *Ns3Session* class is a CORE Session that starts an ns-3 simulation
thread. ns-3 actually runs as a separate process on the same host as
the CORE daemon, and the control of starting and stopping this process
is performed by the *Ns3Session* class.
Example:
::
session = Ns3Session(persistent=True, duration=opt.duration)
Note the use of the duration attribute to control how long the ns-3 simulation
should run. By default, the duration is 600 seconds.
Typically, the session keeps track of the ns-3 nodes (holding a node
container for references to the nodes). This is accomplished via the
`addnode()` method, e.g.:
::
for i in xrange(1, opt.numnodes + 1):
node = session.addnode(name = "n%d" % i)
`addnode()` creates instances of a *CoreNs3Node*, which we'll cover next.
CoreNs3Node
-----------
A *CoreNs3Node* is both a CoreNode and an ns-3 node:
::
class CoreNs3Node(CoreNode, ns.network.Node):
''' The CoreNs3Node is both a CoreNode backed by a network namespace and
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
device will be used.
CoreNs3Net
-----------
A *CoreNs3Net* derives from *PyCoreNet*. This network exists entirely
in simulation, using the TunTap device to interact between the emulated
and the simulated realm. *Ns3WifiNet* is a specialization of this.
As an example, this type of code would be typically used to add a WiFi
network to a session:
::
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps")
wifi.setposition(30, 30, 0)
The above two lines will create a wlan1 object and set its initial canvas
position. Later in the code, the newnetif method of the CoreNs3Node can
be used to add interfaces on particular nodes to this network; e.g.:
::
for i in xrange(1, opt.numnodes + 1):
node = session.addnode(name = "n%d" % i)
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
.. _ns-3_Mobility:
Mobility
========
.. index:: ns-3 mobility
Mobility in ns-3 is handled by an object (a MobilityModel) aggregated to
an ns-3 node. The MobilityModel is able to report the position of the
object in the ns-3 space. This is a slightly different model from, for
instance, EMANE, where location is associated with an interface, and the
CORE GUI, where mobility is configured by right-clicking on a WiFi
cloud.
The CORE GUI supports the ability to render the underlying ns-3 mobility
model, if one is configured, on the CORE canvas. For example, the
example program :file:`ns3wifirandomwalk.py` uses five nodes (by default) in
a random walk mobility model. This can be executed by starting the
core daemon from an ns-3 waf shell:
::
# sudo bash
# cd /path/to/ns-3
# ./waf shell
# core-daemon
and in a separate window, starting the CORE GUI (not from a waf shell)
and selecting the
*Execute Python script...* option from the File menu, selecting the
:file:`ns3wifirandomwalk.py` script.
The program invokes ns-3 mobility through the following statement:
::
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
This can be replaced by a different mode of mobility, in which nodes
are placed according to a constant mobility model, and a special
API call to the CoreNs3Net object is made to use the CORE canvas
positions.
::
- session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
+ session.setupconstantmobility()
+ wifi.usecorepositions()
In this mode, the user dragging around the nodes on the canvas will
cause CORE to update the position of the underlying ns-3 nodes.
.. _ns-3_Under_Development:
Under Development
=================
.. index:: limitations with ns-3
Support for ns-3 is fairly new and still under active development.
Improved support may be found in the development snapshots available on the web.
The following limitations will be addressed in future releases:
* GUI configuration and control - currently ns-3 networks can only be
instantiated from a Python script or from the GUI hooks facility.
* Model support - currently the WiFi model is supported. The WiMAX and 3GPP LTE
models have been experimented with, but are not currently working with the
TapBridge device.

View file

@ -1,60 +0,0 @@
.. This file is part of the CORE Manual
(c)2012 the Boeing Company
.. _Performance:
.. include:: constants.txt
***********
Performance
***********
.. index:: performance
.. index:: number of nodes
The top question about the performance of CORE is often
*how many nodes can it handle?* The answer depends on several factors:
* Hardware - the number and speed of processors in the computer, the available
processor cache, RAM memory, and front-side bus speed may greatly affect
overall performance.
* Operating system version - distribution of Linux and the specific kernel versions
used will affect overall performance.
* Active processes - all nodes share the same CPU resources, so if one or more
nodes is performing a CPU-intensive task, overall performance will suffer.
* Network traffic - the more packets that are sent around the virtual network
increases the amount of CPU usage.
* GUI usage - widgets that run periodically, mobility scenarios, and other GUI
interactions generally consume CPU cycles that may be needed for emulation.
On a typical single-CPU Xeon 3.0GHz server machine with 2GB RAM running Linux,
we have found it reasonable to run 30-75 nodes running
OSPFv2 and OSPFv3 routing. On this hardware CORE can instantiate 100 or more
nodes, but at that point it becomes critical as to what each of the nodes is
doing.
.. index:: network performance
Because this software is primarily a network emulator, the more appropriate
question is *how much network traffic can it handle?* On the same 3.0GHz server
described above, running Linux, about 300,000 packets-per-second can be
pushed through the system. The number of hops and the size of the packets is
less important. The limiting factor is the number of times that the operating
system needs to handle a packet. The 300,000 pps figure represents the number
of times the system as a whole needed to deal with a packet. As more network
hops are added, this increases the number of context switches and decreases the
throughput seen on the full length of the network path.
.. NOTE::
The right question to be asking is *"how much traffic?"*,
not *"how many nodes?"*.
For a more detailed study of performance in CORE, refer to the following publications:
* J\. Ahrenholz, T. Goff, and B. Adamson, Integration of the CORE and EMANE Network Emulators, Proceedings of the IEEE Military Communications Conference 2011, November 2011.
* Ahrenholz, J., Comparison of CORE Network Emulation Platforms, Proceedings of the IEEE Military Communications Conference 2010, pp. 864-869, November 2010.
* J\. Ahrenholz, C. Danilov, T. Henderson, and J.H. Kim, CORE: A real-time network emulator, Proceedings of IEEE MILCOM Conference, 2008.

View file

@ -1,2 +0,0 @@
sphinx==1.6.3
sphinx_rtd_theme==0.2.4

View file

@ -1,124 +0,0 @@
.. This file is part of the CORE Manual
(c)2012 the Boeing Company
.. _Python_Scripting:
****************
Python Scripting
****************
.. index:: Python scripting
CORE can be used via the :ref:`GUI <Using_the_CORE_GUI>` or Python scripting.
Writing your own Python scripts offers a rich programming
environment with complete control over all aspects of the emulation.
This chapter provides a brief introduction to scripting. Most of the
documentation is available from sample scripts,
or online via interactive Python.
.. index:: sample Python scripts
The best starting point is the sample scripts that are
included with CORE. If you have a CORE source tree, the example script files
can be found under :file:`core/daemon/examples/netns/`. When CORE is installed
from packages, the example script files will be in
:file:`/usr/share/core/examples/netns/` (or the :file:`/usr/local/...` prefix
when installed from source.) For the most part, the example scripts
are self-documenting; see the comments contained within the Python code.
The scripts should be run with root privileges because they create new
network namespaces. In general, a CORE Python script does not connect to the
CORE daemon, :file:`core-daemon`; in fact, :file:`core-daemon`
is just another Python script
that uses the CORE Python modules and exchanges messages with the GUI.
To connect the GUI to your scripts, see the included sample scripts that
allow for GUI connections.
Here are the basic elements of a CORE Python script:
::
from core.session import Session
from core.netns import nodes
session = Session(1, persistent=True)
node1 = session.add_object(cls=nodes.CoreNode, name="n1")
node2 = session.add_object(cls=nodes.CoreNode, name="n2")
hub1 = session.add_object(cls=nodes.HubNode, name="hub1")
node1.newnetif(hub1, ["10.0.0.1/24"])
node2.newnetif(hub1, ["10.0.0.2/24"])
node1.vnodeclient.icmd(["ping", "-c", "5", "10.0.0.2"])
session.shutdown()
The above script creates a CORE session having two nodes connected with a hub.
The first node pings the second node with 5 ping packets; the result is
displayed on screen.
A good way to learn about the CORE Python modules is via interactive Python.
Scripts can be run using *python -i*. Cut and paste the simple script
above and you will have two nodes connected by a hub, with one node running
a test ping to the other.
The CORE Python modules are documented with comments in the code. From an
interactive Python shell, you can retrieve online help about the various
classes and methods; for example *help(nodes.CoreNode)* or
*help(Session)*.
.. index:: daemon versus script
.. index:: script versus daemon
.. index:: script with GUI support
.. index:: connecting GUI to script
.. NOTE::
The CORE daemon :file:`core-daemon` manages a list of sessions and allows
the GUI to connect and control sessions. Your Python script uses the
same CORE modules but runs independently of the daemon. The daemon
does not need to be running for your script to work.
The session created by a Python script may be viewed in the GUI if certain
steps are followed. The GUI has a :ref:`File_Menu`, *Execute Python script...*
option for running a script and automatically connecting to it. Once connected,
normal GUI interaction is possible, such as moving and double-clicking nodes,
activating Widgets, etc.
The script should have a line such as the following for running it from
the GUI.
::
if __name__ == "__main__" or __name__ == "__builtin__":
main()
Also, the script should add its session to the session list after creating it.
A global ``server`` variable is exposed to the script pointing to the
``CoreServer`` object in the :file:`core-daemon`.
::
def add_to_server(session):
''' Add this session to the server's list if this script is executed from
the core-daemon server.
'''
global server
try:
server.add_session(session)
return True
except NameError:
return False
::
session = Session(persistent=True)
add_to_server(session)
Finally, nodes and networks need to have their coordinates set to something,
otherwise they will be grouped at the coordinates ``<0, 0>``. First sketching
the topology in the GUI and then using the *Export Python script* option may
help here.
::
switch.setposition(x=80,y=50)
A fully-worked example script that you can launch from the GUI is available
in the file :file:`switch.py` in the examples directory.

1
docs/Makefile.am Normal file
View file

@ -0,0 +1 @@
EXTRA_DIST = $(wildcard *)

39
docs/architecture.md Normal file
View file

@ -0,0 +1,39 @@
# CORE Architecture
* Table of Contents
{:toc}
## Main Components
* CORE Daemon
* Manages emulation sessions
* Builds the emulated networks using kernel virtualization for nodes and some form of bridging and packet manipulation for virtual networks
* Nodes and networks come together via interfaces installed on nodes
* Controlled via the CORE GUI
* Written in python and can be scripted, given direct control of scenarios
* CORE GUI
* GUI and daemon communicate using a custom, asynchronous, sockets-based API, known as the CORE API
* Drag and drop creation for nodes and network interfaces
* Can launch terminals for emulated nodes in running scenarios
* Can save/open scenario files to recreate previous sessions
* TCL/TK program
![](static/core-architecture.jpg)
## How Does it Work?
A CORE node is a lightweight virtual machine. The CORE framework runs on Linux. CORE uses Linux network namespace virtualization to build virtual nodes, and ties them together with virtual networks using Linux Ethernet bridging.
### Linux
Linux network namespaces (also known as netns, LXC, or [Linux containers](http://lxc.sourceforge.net/)) is the primary virtualization technique used by CORE. LXC has been part of the mainline Linux kernel since 2.6.24. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE.
CORE combines these namespaces with Linux Ethernet bridging to form networks. Link characteristics are applied using Linux Netem queuing disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless networks are emulated by controlling which interfaces can send and receive with ebtables rules.
## Prior Work
The Tcl/Tk CORE GUI was originally derived from the open source [IMUNES](http://imunes.net) project from the University of Zagreb as a custom project within Boeing Research and Technology's Network Technology research group in 2004. Since then they have developed the CORE framework to use Linux virtualization, have developed a Python framework, and made numerous user- and kernel-space developments, such as support for wireless networks, IPsec, the ability to distribute emulations, simulation integration, and more. The IMUNES project also consists of userspace and kernel components.
## Open Source Project and Resources
CORE has been released by Boeing to the open source community under the BSD license. If you find CORE useful for your work, please contribute back to the project. Contributions can be as simple as reporting a bug, dropping a line of encouragement or technical suggestions to the mailing lists, or can also include submitting patches or maintaining aspects of the tool. For contributing to CORE, please visit [CORE GitHub](https://github.com/coreemu/core).

88
docs/ctrlnet.md Normal file
View file

@ -0,0 +1,88 @@
# CORE Control Network
* Table of Contents
{:toc}
## Overview
The CORE control network allows the virtual nodes to communicate with their host environment. There are two types: the primary control network and auxiliary control networks. The primary control network is used mainly for communicating with the virtual nodes from host machines and for master-slave communications in a multi-server distributed environment. Auxiliary control networks have been introduced to for routing namespace hosted emulation software traffic to the test network.
## Activating the Primary Control Network
Under the *Session Menu*, the *Options...* dialog has an option to set a *control network prefix*.
This can be set to a network prefix such as *172.16.0.0/24*. A bridge will be created on the host machine having the last address in the prefix range (e.g. *172.16.0.254*), and each node will have an extra *ctrl0* control interface configured with an address corresponding to its node number (e.g. *172.16.0.3* for *n3*.)
A default for the primary control network may also be specified by setting the *controlnet* line in the */etc/core/core.conf* configuration file which new sessions will use by default. To simultaneously run multiple sessions with control networks, the session option should be used instead of the *core.conf* default.
**NOTE: If you have a large scenario with more than 253 nodes, use a control network prefix that allows more than the suggested */24*, such as */23* or greater.**
**IMPORTANT: Running a session with a control network can fail if a previous session has set up a control network and the its bridge is still up. Close the previous session first or wait for it to complete. If unable to, the *core-daemon* may need to be restarted and the lingering bridge(s) removed manually.**
```shell
# Restart the CORE Daemon
sudo /etc/init.d core-daemon restart
# Remove lingering control network bridges
ctrlbridges=`brctl show | grep b.ctrl | awk '{print $1}'`
for cb in $ctrlbridges; do
sudo ifconfig $cb down
sudo brctl delbr $cb
done
```
**TIP: If adjustments to the primary control network configuration made in */etc/core/core.conf* do not seem to take affect, check if there is anything set in the *Session Menu*, the *Options...* dialog. They may need to be cleared. These per session settings override the defaults in */etc/core/core.conf*.**
## Control Network in Distributed Sessions
When the primary control network is activated for a distributed session, a control network bridge will be created on each of the slave servers, with GRE tunnels back to the master server's bridge. The slave control bridges are not assigned an address. From the host, any of the nodes (local or remote) can be accessed, just like the single server case.
In some situations, remote emulated nodes need to communicate with the host on which they are running and not the master server. Multiple control network prefixes can be specified in the either the session option or */etc/core/core.conf*, separated by spaces and beginning with the master server. Each entry has the form *"server:prefix"*. For example, if the servers *core1*,*core2*, and *core3* are assigned with nodes in the scenario and using :file:`/etc/core/core.conf` instead of the session option:
```shell
controlnet=core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.1.0/24
```
Then, the control network bridges will be assigned as follows:
* core1 = 172.16.1.254 (assuming it is the master server),
* core2 = 172.16.2.254
* core3 = 172.16.3.254
Tunnels back to the master server will still be built, but it is up to the user to add appropriate routes if networking between control network prefixes is desired. The control network script may help with this.
## Control Network Script
A control network script may be specified using the *controlnet_updown_script* option in the */etc/core/core.conf* file. This script will be run after the bridge has been built (and address assigned) with the first argument being the name of the bridge, and the second argument being the keyword *"startup"*. The script will again be invoked prior to bridge removal with the second argument being the keyword *"shutdown"*.
## Auxiliary Control Networks
Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces. Since it is advisable to separate the OTA traffic from other traffic, we will need more than single channel leading out from the namespace. Up to three auxiliary control networks may be defined. Multiple control networks are set up in */etc/core/core.conf* file. Lines *controlnet1*, *controlnet2* and *controlnet3* define the auxiliary networks.
For example, having the following */etc/core/core.conf*:
```shell
controlnet = core1:172.17.1.0/24 core2:172.17.2.0/24 core3:172.17.3.0/24
controlnet1 = core1:172.18.1.0/24 core2:172.18.2.0/24 core3:172.18.3.0/24
controlnet2 = core1:172.19.1.0/24 core2:172.19.2.0/24 core3:172.19.3.0/24
```
This will activate the primary and two auxiliary control networks and add interfaces *ctrl0*, *ctrl1*, *ctrl2* to each node. One use case would be to assign *ctrl1* to the OTA manager device and *ctrl2* to the Event Service device in the EMANE Options dialog box and leave *ctrl0* for CORE control traffic.
**NOTE: *controlnet0* may be used in place of *controlnet* to configure the primary control network.**
Unlike the primary control network, the auxiliary control networks will not employ tunneling since their primary purpose is for efficiently transporting multicast EMANE OTA and event traffic. Note that there is no per-session configuration for auxiliary control networks.
To extend the auxiliary control networks across a distributed test environment, host network interfaces need to be added to them. The following lines in */etc/core/core.conf* will add host devices *eth1*, *eth2* and *eth3* to *controlnet1*, *controlnet2*, *controlnet3*:
```shell
controlnetif1 = eth1
controlnetif2 = eth2
controlnetif3 = eth3
```
**NOTE: There is no need to assign an interface to the primary control network because tunnels are formed between the master and the slaves using IP addresses that are provided in *servers.conf*.**
Shown below is a representative diagram of the configuration above.
![](static/controlnetwork.png)

113
docs/devguide.md Normal file
View file

@ -0,0 +1,113 @@
# CORE Developer's Guide
* Table of Contents
{:toc}
## Source Code Guide
The CORE source consists of several different programming languages for historical reasons. Current development focuses on the Python modules and daemon. Here is a brief description of the source directories.
These are being actively developed as of CORE 5.1:
* *gui* - Tcl/Tk GUI. This uses Tcl/Tk because of its roots with the IMUNES
project.
* *daemon* - Python modules are found in the :file:`daemon/core` directory, the
daemon under :file:`daemon/scripts/core-daemon`
* *netns* - Python extension modules for Linux Network Namespace support are in :file:`netns`.
* *doc* - Documentation for the manual lives here in reStructuredText format.
Not actively being developed:
* *ns3* - Python ns3 script support for running CORE.
## The CORE API
The CORE API is used between different components of CORE for communication. The GUI communicates with the CORE daemon using the API. One emulation server communicates with another using the API. The API also allows other systems to interact with the CORE emulation. The API allows another system to add, remove, or modify nodes and links, and enables executing commands on the emulated systems. Wireless link parameters are updated on-the-fly based on node positions.
CORE listens on a local TCP port for API messages. The other system could be software running locally or another machine accessible across the network.
The CORE API is currently specified in a separate document, available from the CORE website.
## Linux network namespace Commands
Linux network namespace containers are often managed using the *Linux Container Tools* or *lxc-tools* package. The lxc-tools website is available here http://lxc.sourceforge.net/ for more information. CORE does not use these management utilities, but includes its own set of tools for instantiating and configuring network namespace containers. This section describes these tools.
### vnoded command
The *vnoded* daemon is the program used to create a new namespace, and listen on a control channel for commands that may instantiate other processes. This daemon runs as PID 1 in the container. It is launched automatically by the CORE daemon. The control channel is a UNIX domain socket usually named */tmp/pycore.23098/n3*, for node 3 running on CORE session 23098, for example. Root privileges are required for creating a new namespace.
### vcmd command
The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network namespace, for running commands in the namespace. The CORE daemon uses the same channel for setting up a node and running processes within it. This program has two required arguments, the control channel name, and the command line to be run within the namespace. This command does not need to run with root privileges.
When you double-click on a node in a running emulation, CORE will open a shell window for that node using a command such as:
```shell
gnome-terminal -e vcmd -c /tmp/pycore.50160/n1 -- bash
```
Similarly, the IPv4 routes Observer Widget will run a command to display the routing table using a command such as:
```shell
vcmd -c /tmp/pycore.50160/n1 -- /sbin/ip -4 ro
```
### core-cleanup script
A script named *core-cleanup* is provided to clean up any running CORE emulations. It will attempt to kill any remaining vnoded processes, kill any EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and remove any bridges or *ebtables* rules. With a *-d* option, it will also kill any running CORE daemon.
### netns command
The *netns* command is not used by CORE directly. This utility can be used to run a command in a new network namespace for testing purposes. It does not open a control channel for receiving further commands.
### Other Useful Commands
Here are some other Linux commands that are useful for managing the Linux network namespace emulation.
```shell
# view the Linux bridging setup
brctl show
# view the netem rules used for applying link effects
tc qdisc show
# view the rules that make the wireless LAN work
ebtables -L
```
### Example Command Usage
Below is a transcript of creating two emulated nodes and connecting them together with a wired link:
```shell
# create node 1 namespace container
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
# create a virtual Ethernet (veth) pair, installing one end into node 1
ip link add name n1.0.1 type veth peer name n1.0
ip link set n1.0 netns `cat /tmp/n1.pid`
vcmd -c /tmp/n1.ctl -- ip link set lo up
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0 up
vcmd -c /tmp/n1.ctl -- ip addr add 10.0.0.1/24 dev eth0
# create node 2 namespace container
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
# create a virtual Ethernet (veth) pair, installing one end into node 2
ip link add name n2.0.1 type veth peer name n2.0
ip link set n2.0 netns `cat /tmp/n2.pid`
vcmd -c /tmp/n2.ctl -- ip link set lo up
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0 up
vcmd -c /tmp/n2.ctl -- ip addr add 10.0.0.2/24 eth0
# bridge together nodes 1 and 2 using the other end of each veth pair
brctl addbr b.1.1
brctl setfd b.1.1 0
brctl addif b.1.1 n1.0.1
brctl addif b.1.1 n2.0.1
ip link set n1.0.1 up
ip link set n2.0.1 up
ip link set b.1.1 up
# display connectivity and ping from node 1 to node 2
brctl show
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
```
The above example script can be found as *twonodes.sh* in the *examples/netns* directory. Use *core-cleanup* to clean up after the script.

143
docs/emane.md Normal file
View file

@ -0,0 +1,143 @@
# CORE/EMANE
* Table of Contents
{:toc}
## What is EMANE?
The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous network emulation using a pluggable MAC and PHY layer architecture. The EMANE framework provides an implementation architecture for modeling different radio interface types in the form of *Network Emulation Modules* (NEMs) and incorporating these modules into a real-time emulation running in a distributed environment.
EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent Link LLC, who maintain these websites:
* http://www.nrl.navy.mil/itd/ncs/products/emane
* http://www.adjacentlink.com/
Instead of building Linux Ethernet bridging networks with CORE, higher-fidelity wireless networks can be emulated using EMANE bound to virtual devices. CORE emulates layers 3 and above (network, session, application) with its virtual network stacks and process space for protocols and applications, while EMANE emulates layers 1 and 2 (physical and data link) using its pluggable PHY and MAC models.
The interface between CORE and EMANE is a TAP device. CORE builds the virtual node using Linux network namespaces, installs the TAP device into the namespace and instantiates one EMANE process in the namespace. The EMANE process binds a user space socket to the TAP device for sending and receiving data from CORE.
An EMANE instance sends and receives OTA traffic to and from other EMANE instances via a control port (e.g. *ctrl0*, *ctrl1*). It also sends and receives Events to and from the Event Service using the same or a different control port. EMANE models are configured through CORE's WLAN configuration dialog. A corresponding EmaneModel Python class is sub-classed for each supported EMANE model, to provide configuration items and their mapping to XML files. This way new models can be easily supported. When CORE starts the emulation, it generates the appropriate XML files that specify the EMANE NEM configuration, and launches the EMANE daemons.
Some EMANE models support location information to determine when packets should be dropped. EMANE has an event system where location events are broadcast to all NEMs. CORE can generate these location events when nodes are moved on the canvas. The canvas size and scale dialog has controls for mapping the X,Y coordinate system to a latitude, longitude geographic system that EMANE uses. When specified in the *core.conf* configuration file, CORE can also subscribe to EMANE location events and move the nodes on the canvas as they are moved in the EMANE emulation. This would occur when an Emulation Script Generator, for example, is running a mobility script.
## EMANE Configuration
The CORE configuration file */etc/core/core.conf* has options specific to EMANE. An example emane section from the *core.conf* file is shown below:
```shell
# EMANE configuration
emane_platform_port = 8101
emane_transform_port = 8201
emane_event_monitor = False
#emane_models_dir = /home/username/.core/myemane
# EMANE log level range [0,4] default: 2
emane_log_level = 2
emane_realtime = True
```
EMANE can be installed from deb or RPM packages or from source. See the [EMANE GitHub](https://github.com/adjacentlink/emane) for full details.
Here are quick instructions for installing all EMANE packages:
```shell
# install dependencies
sudo apt-get install libssl-dev libxml-libxml-perl libxml-simple-perl
wget https://adjacentlink.com/downloads/emane/emane-1.2.1-release-1.ubuntu-16_04.amd64.tar.gz
tar xzf emane-1.2.1-release-1.ubuntu-16_04.amd64.tar.gz
sudo dpkg -i emane-1.2.1-release-1/deb/ubuntu-16_04/amd64/*.deb
```
If you have an EMANE event generator (e.g. mobility or pathloss scripts) and want to have CORE subscribe to EMANE location events, set the following line in the */etc/core/core.conf* configuration file:
```shell
emane_event_monitor = True
```
Do not set the above option to True if you want to manually drag nodes around on the canvas to update their location in EMANE.
Another common issue is if installing EMANE from source, the default configure prefix will place the DTD files in */usr/local/share/emane/dtd* while CORE expects them in */usr/share/emane/dtd*.
A symbolic link will fix this:
```shell
sudo ln -s /usr/local/share/emane /usr/share/emane
```
## Custom EMANE Models
CORE supports custom developed EMANE models by way of dynamically loading user created python files that represent the model. Custom EMANE models should be placed within the path defined by **emane_models_dir** in the CORE configuration file. This path cannot end in **/emane**.
Here is an example model with documentation describing functionality:
[Example Model](examplemodel.html)
## Single PC with EMANE
This section describes running CORE and EMANE on a single machine. This is the default mode of operation when building an EMANE network with CORE. The OTA manager and Event service interface are set to use *ctrl0* and the virtual nodes use the primary control channel for communicating with one another. The primary control channel is automatically activated when a scenario involves EMANE. Using the primary control channel prevents your emulation session from sending multicast traffic on your local network and interfering with other EMANE users.
EMANE is configured through a WLAN node, because it is all about emulating wireless radio networks. Once a node is linked to a WLAN cloud configured with an EMANE model, the radio interface on that node may also be configured separately (apart from the cloud.)
Double-click on a WLAN node to invoke the WLAN configuration dialog. Click the *EMANE* tab; when EMANE has been properly installed, EMANE wireless modules should be listed in the *EMANE Models* list. (You may need to restart the CORE daemon if it was running prior to installing the EMANE Python bindings.) Click on a model name to enable it.
When an EMANE model is selected in the *EMANE Models* list, clicking on the *model options* button causes the GUI to query the CORE daemon for configuration items. Each model will have different parameters, refer to the EMANE documentation for an explanation of each item. The defaults values are presented in the dialog. Clicking *Apply* and *Apply* again will store the EMANE model selections.
The *EMANE options* button allows specifying some global parameters for EMANE, some of which are necessary for distributed operation.
The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports geographic location information for determining pathloss between nodes. A default latitude and longitude location is provided by CORE and this location-based pathloss is enabled by default; this is the *pathloss mode* setting for the Universal PHY. Moving a node on the canvas while the emulation is running generates location events for EMANE. To view or change the geographic location or scale of the canvas use the *Canvas Size and Scale* dialog available from the *Canvas* menu.
Note that conversion between geographic and Cartesian coordinate systems is done using UTM (Universal Transverse Mercator) projection, where different zones of 6 degree longitude bands are defined. The location events generated by CORE may become inaccurate near the zone boundaries for very large scenarios that span multiple UTM zones. It is recommended that EMANE location scripts be used to achieve geo-location accuracy in this situation.
Clicking the green *Start* button launches the emulation and causes TAP devices to be created in the virtual nodes that are linked to the EMANE WLAN. These devices appear with interface names such as eth0, eth1, etc. The EMANE processes should now be running in each namespace. For a four node scenario:
```shell
ps -aef | grep emane
root 1063 969 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane4.log /tmp/pycore.59992/platform4.xml
root 1117 959 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane2.log /tmp/pycore.59992/platform2.xml
root 1179 942 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane1.log /tmp/pycore.59992/platform1.xml
root 1239 979 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane5.log /tmp/pycore.59992/platform5.xml
```
The example above shows the EMANE processes started by CORE. To view the configuration generated by CORE, look in the */tmp/pycore.nnnnn/* session directory for a *platform.xml* file and other XML files. One easy way to view this information is by double-clicking one of the virtual nodes, and typing *cd ..* in the shell to go up to the session directory.
![](static/single-pc-emane.png)
## Distributed EMANE
Running CORE and EMANE distributed among two or more emulation servers is similar to running on a single machine. There are a few key configuration items that need to be set in order to be successful, and those are outlined here.
It is a good idea to maintain separate networks for data (OTA) and control. The control network may be a shared laboratory network, for example, and you do not want multicast traffic on the data network to interfere with other EMANE users. Furthermore, control traffic could interfere with the OTA latency and thoughput and might affect emulation fidelity. The examples described here will use *eth0* as a control interface and *eth1* as a data interface, although using separate interfaces is not strictly required. Note that these interface names refer to interfaces present on the host machine, not virtual interfaces within a node.
**IMPORTANT: If an auxiliary control network is used, an interface on the host has to be assigned to that network.**
Each machine that will act as an emulation server needs to have CORE and EMANE installed.
The IP addresses of the available servers are configured from the CORE emulation servers dialog box (choose *Session* then *Emulation servers...*). This list of servers is stored in a *~/.core/servers.conf* file. The dialog shows available servers, some or all of which may be assigned to nodes on the canvas.
Nodes need to be assigned to emulation servers. Select several nodes, right-click them, and choose *Assign to* and the name of the desired server. When a node is not assigned to any emulation server, it will be emulated locally. The local machine that the GUI connects with is considered the "master" machine, which in turn connects to the other emulation server "slaves". Public key SSH should be configured from the master to the slaves.
Under the *EMANE* tab of the EMANE WLAN, click on the *EMANE options* button. This brings up the emane configuration dialog. The *enable OTA Manager channel* should be set to *on*. The *OTA Manager device* and *Event Service device* should be set to a control network device. For example, if you have a primary and auxiliary control network (i.e. controlnet and controlnet1), and you want the OTA traffic to have its dedicated network, set the OTA Manager device to *ctrl1* and the Event Service device to *ctrl0*. The EMANE models can be configured. Click *Apply* to save these settings.
![](static/distributed-emane-configuration.png)
**HINT:**
Here is a quick checklist for distributed emulation with EMANE.
1. Follow the steps outlined for normal CORE.
2. Under the *EMANE* tab of the EMANE WLAN, click on *EMANE options*.
3. Turn on the *OTA Manager channel* and set the *OTA Manager device*.
Also set the *Event Service device*.
4. Select groups of nodes, right-click them, and assign them to servers
using the *Assign to* menu.
5. Synchronize your machine's clocks prior to starting the emulation,
using *ntp* or *ptp*. Some EMANE models are sensitive to timing.
6. Press the *Start* button to launch the distributed emulation.
Now when the Start button is used to instantiate the emulation, the local CORE Python daemon will connect to other emulation servers that have been assigned to nodes. Each server will have its own session directory where the *platform.xml* file and other EMANE XML files are generated. The NEM IDs are automatically coordinated across servers so there is no overlap. Each server also gets its own Platform ID.
An Ethernet device is used for disseminating multicast EMANE events, as specified in the *configure emane* dialog. EMANE's Event Service can be run with mobility or pathloss scripts as described in :ref:`Single_PC_with_EMANE`. If CORE is not subscribed to location events, it will generate them as nodes are moved on the canvas.
Double-clicking on a node during runtime will cause the GUI to attempt to SSH to the emulation server for that node and run an interactive shell. The public key SSH configuration should be tested with all emulation servers prior to starting the emulation.
![](static/distributed-emane-network.png)

239
docs/examplemodel.html Normal file
View file

@ -0,0 +1,239 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>examplemodel.py</title>
<link rel="stylesheet" href="pycco.css">
</head>
<body>
<div id='container'>
<div id="background"></div>
<div class='section'>
<div class='docs'><h1>examplemodel.py</h1></div>
</div>
<div class='clearall'>
<div class='section' id='section-0'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-0'>#</a>
</div>
</div>
<div class='code'>
<div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">core.emane</span> <span class="kn">import</span> <span class="n">emanemanifest</span>
<span class="kn">from</span> <span class="nn">core.emane</span> <span class="kn">import</span> <span class="n">emanemodel</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-1'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-1'>#</a>
</div>
<h1>Custom EMANE Model</h1>
</div>
<div class='code'>
<div class="highlight"><pre><span class="k">class</span> <span class="nc">ExampleModel</span><span class="p">(</span><span class="n">emanemodel</span><span class="o">.</span><span class="n">EmaneModel</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-2'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-2'>#</a>
</div>
<h2>MAC Definition</h2>
</div>
<div class='code'>
<div class="highlight"><pre></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-3'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-3'>#</a>
</div>
<p>Defines the emane model name that will show up in the GUI.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">name</span> <span class="o">=</span> <span class="s2">&quot;emane_example&quot;</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-4'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-4'>#</a>
</div>
<p>Defines that mac library that the model will reference.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">mac_library</span> <span class="o">=</span> <span class="s2">&quot;rfpipemaclayer&quot;</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-5'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-5'>#</a>
</div>
<p>Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed
within the GUI.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">mac_xml</span> <span class="o">=</span> <span class="s2">&quot;/usr/share/emane/manifest/rfpipemaclayer.xml&quot;</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-6'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-6'>#</a>
</div>
<p>Allows you to override options that are maintained within the manifest file above.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">mac_defaults</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;pcrcurveuri&quot;</span><span class="p">:</span> <span class="s2">&quot;/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml&quot;</span><span class="p">,</span>
<span class="p">}</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-7'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-7'>#</a>
</div>
<p>Parses the manifest file and converts configurations into core supported formats.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">mac_config</span> <span class="o">=</span> <span class="n">emanemanifest</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">mac_xml</span><span class="p">,</span> <span class="n">mac_defaults</span><span class="p">)</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-8'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-8'>#</a>
</div>
<h2>PHY Definition</h2>
<p><strong>NOTE: phy configuration will default to the universal model as seen below and the below section does not
have to be included.</strong></p>
</div>
<div class='code'>
<div class="highlight"><pre></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-9'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-9'>#</a>
</div>
<p>Defines that phy library that the model will reference, used if you need to provide a custom phy.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">phy_library</span> <span class="o">=</span> <span class="bp">None</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-10'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-10'>#</a>
</div>
<p>Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed
within the GUI.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">phy_xml</span> <span class="o">=</span> <span class="s2">&quot;/usr/share/emane/manifest/emanephy.xml&quot;</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-11'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-11'>#</a>
</div>
<p>Allows you to override options that are maintained within the manifest file above or for the default universal
model.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">phy_defaults</span> <span class="o">=</span> <span class="p">{</span>
<span class="s2">&quot;subid&quot;</span><span class="p">:</span> <span class="s2">&quot;1&quot;</span><span class="p">,</span>
<span class="s2">&quot;propagationmodel&quot;</span><span class="p">:</span> <span class="s2">&quot;2ray&quot;</span><span class="p">,</span>
<span class="s2">&quot;noisemode&quot;</span><span class="p">:</span> <span class="s2">&quot;none&quot;</span>
<span class="p">}</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-12'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-12'>#</a>
</div>
<p>Parses the manifest file and converts configurations into core supported formats.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">phy_config</span> <span class="o">=</span> <span class="n">emanemanifest</span><span class="o">.</span><span class="n">parse</span><span class="p">(</span><span class="n">phy_xml</span><span class="p">,</span> <span class="n">phy_defaults</span><span class="p">)</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-13'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-13'>#</a>
</div>
<h2>Custom override options</h2>
<p><strong>NOTE: these options default to what's seen below and do not have to be included.</strong></p>
</div>
<div class='code'>
<div class="highlight"><pre></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-14'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-14'>#</a>
</div>
<p>Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display
within the gui.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">config_ignore</span> <span class="o">=</span> <span class="nb">set</span><span class="p">()</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-15'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-15'>#</a>
</div>
<p>Allows you to override how options are displayed with the GUI, using the GUI format of
"name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed
numbers after ":" for including values in each tab.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">config_groups_override</span> <span class="o">=</span> <span class="bp">None</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-16'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-16'>#</a>
</div>
<p>Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in
that order.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">config_matrix_override</span> <span class="o">=</span> <span class="bp">None</span>
</pre></div>
</div>
</div>
<div class='clearall'></div>
</div>
</body>

330
docs/exampleservice.html Normal file
View file

@ -0,0 +1,330 @@
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>sample.py</title>
<link rel="stylesheet" href="pycco.css">
</head>
<body>
<div id='container'>
<div id="background"></div>
<div class='section'>
<div class='docs'><h1>sample.py</h1></div>
</div>
<div class='clearall'>
<div class='section' id='section-0'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-0'>#</a>
</div>
<p>Sample user-defined service.</p>
</div>
<div class='code'>
<div class="highlight"><pre><span></span><span class="kn">from</span> <span class="nn">core.service</span> <span class="kn">import</span> <span class="n">CoreService</span>
<span class="kn">from</span> <span class="nn">core.service</span> <span class="kn">import</span> <span class="n">ServiceMode</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-1'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-1'>#</a>
</div>
<h1>Custom CORE Service</h1>
</div>
<div class='code'>
<div class="highlight"><pre><span class="k">class</span> <span class="nc">MyService</span><span class="p">(</span><span class="n">CoreService</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-2'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-2'>#</a>
</div>
<h2>Service Attributes</h2>
</div>
<div class='code'>
<div class="highlight"><pre></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-3'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-3'>#</a>
</div>
<p>Name used as a unique ID for this service and is required, no spaces.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">name</span> <span class="o">=</span> <span class="s2">&quot;MyService&quot;</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-4'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-4'>#</a>
</div>
<p>Allows you to group services within the GUI under a common name.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">group</span> <span class="o">=</span> <span class="s2">&quot;Utility&quot;</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-5'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-5'>#</a>
</div>
<p>Executables this service depends on to function, if executable is not on the path, service will not be loaded.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">executables</span> <span class="o">=</span> <span class="p">()</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-6'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-6'>#</a>
</div>
<p>Services that this service depends on for startup, tuple of service names.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">dependencies</span> <span class="o">=</span> <span class="p">()</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-7'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-7'>#</a>
</div>
<p>Directories that this service will create within a node.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">dirs</span> <span class="o">=</span> <span class="p">()</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-8'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-8'>#</a>
</div>
<p>Files that this service will generate, without a full path this file goes in the node's directory.
e.g. /tmp/pycore.12345/n1.conf/myfile</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">configs</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;sh myservice1.sh&quot;</span><span class="p">,</span> <span class="s2">&quot;sh myservice2.sh&quot;</span><span class="p">)</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-9'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-9'>#</a>
</div>
<p>Commands used to start this service, any non-zero exit code will cause a failure.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">startup</span> <span class="o">=</span> <span class="p">(</span><span class="s2">&quot;sh </span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="n">configs</span><span class="p">[</span><span class="mi">0</span><span class="p">],</span> <span class="s2">&quot;sh </span><span class="si">%s</span><span class="s2">&quot;</span> <span class="o">%</span> <span class="n">configs</span><span class="p">[</span><span class="mi">1</span><span class="p">])</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-10'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-10'>#</a>
</div>
<p>Commands used to validate that a service was started, any non-zero exit code will cause a failure.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">validate</span> <span class="o">=</span> <span class="p">()</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-11'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-11'>#</a>
</div>
<p>Validation mode, used to determine startup success.
<em> NON_BLOCKING - runs startup commands, and validates success with validation commands
</em> BLOCKING - runs startup commands, and validates success with the startup commands themselves
* TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">validation_mode</span> <span class="o">=</span> <span class="n">ServiceMode</span><span class="o">.</span><span class="n">NON_BLOCKING</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-12'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-12'>#</a>
</div>
<p>Time for a service to wait before running validation commands or determining success in TIMER mode.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">validation_timer</span> <span class="o">=</span> <span class="mi">0</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-13'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-13'>#</a>
</div>
<p>Shutdown commands to stop this service.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">shutdown</span> <span class="o">=</span> <span class="p">()</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-14'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-14'>#</a>
</div>
<h2>On Load</h2>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">on_load</span><span class="p">(</span><span class="bp">cls</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-15'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-15'>#</a>
</div>
<p>Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate
dynamic settings for the environment.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="k">pass</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-16'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-16'>#</a>
</div>
<h2>Get Configs</h2>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">get_configs</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-17'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-17'>#</a>
</div>
<p>Provides a way to dynamically generate the config files from the node a service will run.
Defaults to the class definition and can be left out entirely if not needed.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-18'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-18'>#</a>
</div>
<h2>Generate Config</h2>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">generate_config</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">,</span> <span class="n">filename</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-19'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-19'>#</a>
</div>
<p>Returns a string representation for a file, given the node the service is starting on the config filename
that this information will be used for. This must be defined, if "configs" are defined.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="n">cfg</span> <span class="o">=</span> <span class="s2">&quot;#!/bin/sh</span><span class="se">\n</span><span class="s2">&quot;</span>
<span class="k">if</span> <span class="n">filename</span> <span class="o">==</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span>
<span class="n">cfg</span> <span class="o">+=</span> <span class="s2">&quot;# auto-generated by MyService (sample.py)</span><span class="se">\n</span><span class="s2">&quot;</span>
<span class="k">for</span> <span class="n">ifc</span> <span class="ow">in</span> <span class="n">node</span><span class="o">.</span><span class="n">netifs</span><span class="p">():</span>
<span class="n">cfg</span> <span class="o">+=</span> <span class="s1">&#39;echo &quot;Node </span><span class="si">%s</span><span class="s1"> has interface </span><span class="si">%s</span><span class="s1">&quot;</span><span class="se">\n</span><span class="s1">&#39;</span> <span class="o">%</span> <span class="p">(</span><span class="n">node</span><span class="o">.</span><span class="n">name</span><span class="p">,</span> <span class="n">ifc</span><span class="o">.</span><span class="n">name</span><span class="p">)</span>
<span class="k">elif</span> <span class="n">filename</span> <span class="o">==</span> <span class="bp">cls</span><span class="o">.</span><span class="n">configs</span><span class="p">[</span><span class="mi">1</span><span class="p">]:</span>
<span class="n">cfg</span> <span class="o">+=</span> <span class="s2">&quot;echo hello&quot;</span>
<span class="k">return</span> <span class="n">cfg</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-20'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-20'>#</a>
</div>
<h2>Get Startup</h2>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">get_startup</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-21'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-21'>#</a>
</div>
<p>Provides a way to dynamically generate the startup commands from the node a service will run.
Defaults to the class definition and can be left out entirely if not needed.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">startup</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-22'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-22'>#</a>
</div>
<h2>Get Validate</h2>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="nd">@classmethod</span>
<span class="k">def</span> <span class="nf">get_validate</span><span class="p">(</span><span class="bp">cls</span><span class="p">,</span> <span class="n">node</span><span class="p">):</span></pre></div>
</div>
</div>
<div class='clearall'></div>
<div class='section' id='section-23'>
<div class='docs'>
<div class='octowrap'>
<a class='octothorpe' href='#section-23'>#</a>
</div>
<p>Provides a way to dynamically generate the validate commands from the node a service will run.
Defaults to the class definition and can be left out entirely if not needed.</p>
</div>
<div class='code'>
<div class="highlight"><pre> <span class="k">return</span> <span class="bp">cls</span><span class="o">.</span><span class="n">validate</span>
</pre></div>
</div>
</div>
<div class='clearall'></div>
</div>
</body>

35
docs/index.md Normal file
View file

@ -0,0 +1,35 @@
# CORE Documentation
## Introduction
CORE (Common Open Research Emulator) is a tool for building virtual networks. As an emulator, CORE builds a representation of a real computer network that runs in real time, as opposed to simulation, where abstract models are used. The live-running emulation can be connected to physical networks and routers. It provides an environment for running real applications and protocols, taking advantage of virtualization provided by the Linux operating system.
CORE is typically used for network and protocol research, demonstrations, application and platform testing, evaluating networking scenarios, security studies, and increasing the size of physical test networks.
### Key Features
* Efficient and scalable
* Runs applications and protocols without modification
* Drag and drop GUI
* Highly customizable
## Topics
* [Architecture](architecture.md)
* [Installation](install.md)
* [Usage](usage.md)
* [Python Scripting](scripting.md)
* [Node Types](machine.md)
* [CTRLNET](ctrlnet.md)
* [Services](services.md)
* [EMANE](emane.md)
* [NS3](ns3.md)
* [Performance](performance.md)
* [Developers Guide](devguide.md)
## Credits
The CORE project was derived from the open source IMUNES project from the University of Zagreb in 2004. In 2006, changes for CORE were released back to that project, some items of which were adopted. Marko Zec <zec@fer.hr> is the primary developer from the University of Zagreb responsible for the IMUNES (GUI) and VirtNet (kernel) projects. Ana Kukec and Miljenko Mikuc are known contributors.
Jeff Ahrenholz has been the primary Boeing developer of CORE, and has written this manual. Tom Goff designed the Python framework and has made significant contributions. Claudiu Danilov, Rod Santiago, Kevin Larson, Gary Pei, Phil Spagnolo, and Ian Chakeres have contributed code to CORE. Dan Mackley helped develop the CORE API, originally to interface with a simulator. Jae Kim and Tom Henderson have supervised the project and provided direction.
Copyright (c) 2005-2018, the Boeing Company.

274
docs/install.md Normal file
View file

@ -0,0 +1,274 @@
# CORE Installation
* Table of Contents
{:toc}
# Overview
This section will describe how to set up a CORE machine. Note that the easiest way to install CORE is using a binary package on Ubuntu or Fedora/CentOS (deb or rpm) using the distribution's package manager to automatically install dependencies.
Ubuntu and Fedora/CentOS Linux are the recommended distributions for running CORE. However, these distributions are not strictly required. CORE will likely work on other flavors of Linux as well.
The primary dependencies are Tcl/Tk (8.5 or newer) for the GUI, and Python 2.7 for the CORE daemon.
CORE files are installed to the following directories, when the installation prefix is */usr*.
Install Path | Description
-------------|------------
/usr/bin/core-gui|GUI startup command
/usr/bin/core-daemon|Daemon startup command
/usr/bin/|Misc. helper commands/scripts
/usr/lib/core|GUI files
/usr/lib/python2.7/dist-packages/core|Python modules for daemon/scripts
/etc/core/|Daemon configuration files
~/.core/|User-specific GUI preferences and scenario files
/usr/share/core/|Example scripts and scenarios
/usr/share/man/man1/|Command man pages
/etc/init.d/core-daemon|SysV startup script for daemon
/etc/systemd/system/core-daemon.service|Systemd startup script for daemon
## Prerequisites
A Linux operating system is required. The GUI uses the Tcl/Tk scripting toolkit, and the CORE daemon requires Python. Details of the individual software packages required can be found in the installation steps.
## Required Hardware
Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous virtual machines, as a general rule you should select a machine having as much RAM and CPU resources as possible.
## Required Software
CORE requires a Linux operating system because it uses virtualization provided by the kernel. It does not run on Windows or Mac OS X operating systems (unless it is running within a virtual machine guest.) The virtualization technology that CORE currently uses is Linux network namespaces.
The CORE GUI requires the X.Org X Window system (X11), or can run over a remote X11 session. For specific Tcl/Tk, Python, and other libraries required to run CORE.
**NOTE: CORE *Services* determine what run on each node. You may require other software packages depending on the services you wish to use. For example, the *HTTP* service will require the *apache2* package.**
## Installing from Packages
The easiest way to install CORE is using the pre-built packages. The package managers on Ubuntu or Fedora/CentOS will automatically install dependencies for you. You can obtain the CORE packages from [CORE GitHub](https://github.com/coreemu/core/releases).
### Installing from Packages on Ubuntu
Install Quagga for routing. If you plan on working with wireless networks, we recommend installing [OSPF MDR](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet) (replace *amd64* below with *i386* if needed to match your architecture):
```shell
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-mr_0.99.21mr2.2_amd64.deb
sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb
```
Or, for the regular Ubuntu version of Quagga:
```shell
sudo apt-get install quagga
```
Install the CORE deb packages for Ubuntu from command line.
```shell
sudo dpkg -i python-core_*.deb
sudo dpkg -i core-gui_*.deb
```
Start the CORE daemon as root, the systemd installation will auto start the daemon, but you can use the commands below if need be.
```shell
# systemd
sudo systemctl start core-daemon
# sysv
sudo service core-daemon start
```
Run the CORE GUI as a normal user:
```shell
core-gui
```
After running the *core-gui* command, a GUI should appear with a canvas for drawing topologies. Messages will print out on the console about connecting to the CORE daemon.
### Installing from Packages on Fedora/CentOS
The commands shown here should be run as root. The *x86_64* architecture is shown in the examples below, replace with *i686* is using a 32-bit architecture.
**CentOS 7 Only: in order to install *tkimg* package you must build from source.**
Make sure the system is up to date.
```shell
yum update
```
**Optional (Fedora 17+): Fedora 17 and newer have an additional prerequisite providing the required netem kernel modules (otherwise skip this step and have the package manager install it for you.)**
```shell
yum install kernel-modules-extra
```
Install Quagga for routing. If you plan on working with wireless networks, we recommend installing [OSPF MDR](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet):
```shell
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2-1.el6.x86_64.rpm
sudo yum install quagga-0.99.21mr2.2-1.el6.x86_64.rpm
```
Or, for the regular Fedora/CentOS version of Quagga:
```shell
yum install quagga
```
Install the CORE RPM packages and automatically resolve dependencies:
```shell
yum install python-core_*.rpm
yum install core-gui_*.rpm
```
Turn off SELINUX by setting *SELINUX=disabled* in the */etc/sysconfig/selinux* file, and adding *selinux=0* to the kernel line in your */etc/grub.conf* file; on Fedora 15 and newer, disable sandboxd using ```chkconfig sandbox off```; you need to reboot in order for this change to take effect
Turn off firewalls:
```shell
systemctl disable firewalld
systemctl disable iptables.service
systemctl disable ip6tables.service
chkconfig iptables off
chkconfig ip6tables off
```
You need to reboot after making these changes, or flush the firewall using
```shell
iptables -F
ip6tables -F
```
Start the CORE daemon as root.
```shell
# systemd
sudo systemctl daemon-reload
sudo systemctl start core-daemon
# sysv
sudo service core-daemon start
```
Run the CORE GUI as a normal user:
```shell
core-gui
```
After running the *core-gui* command, a GUI should appear with a canvas for drawing topologies. Messages will print out on the console about connecting to the CORE daemon.
### Installing from Source
This option is listed here for developers and advanced users who are comfortable patching and building source code. Please consider using the binary packages instead for a simplified install experience.
To build CORE from source on Ubuntu, first install these development packages. These packages are not required for normal binary package installs.
#### Ubuntu 16.04 Requirements
```shell
sudo apt-get install automake bridge-utils ebtables python-dev libev-dev python-sphinx python-setuptools python-enum34 python-lxml
```
#### CentOS 7 with Gnome Desktop Requirements
```shell
sudo yum -y install automake gcc python-devel libev-devel python-sphinx tk python-lxml python-enum34
```
You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu/core) page. Choose either a stable release version or the development snapshot available in the *nightly_snapshots* directory.
```shell
tar xzf core-*.tar.gz
cd core-*
./bootstrap.sh
./configure
make
sudo make install
```
### Quagga Routing Software
Virtual networks generally require some form of routing in order to work (e.g. to automatically populate routing tables for routing packets from one subnet to another.) CORE builds OSPF routing protocol configurations by default when the blue router node type is used. The OSPF protocol is available from the [Quagga open source routing suit](http://www.quagga.net).
Quagga is not specified as a dependency for the CORE packages because there are two different Quagga packages that you may use:
* [Quagga](http://www.quagga.net) - the standard version of Quagga, suitable for static wired networks, and usually available via your distribution's package manager.
* [OSPF MANET Designated Routers](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet) (MDR) - the Quagga routing suite with a modified version of OSPFv3, optimized for use with mobile wireless networks. The *mdr* node type (and the MDR service) requires this variant of Quagga.
If you plan on working with wireless networks, we recommend installing OSPF MDR; otherwise install the standard version of Quagga using your package manager or from source.
#### Installing Quagga from Packages
To install the standard version of Quagga from packages, use your package manager (Linux).
Ubuntu users:
```shell
sudo apt-get install quagga
```
Fedora/CentOS users:
```shell
sudo yum install quagga
```
To install the Quagga variant having OSPFv3 MDR, first download the appropriate package, and install using the package manager.
Ubuntu users:
```shell
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-mr_0.99.21mr2.2_amd64.deb
sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb
```
Replace *amd64* with *i686* if using a 32-bit architecture.
Fedora/CentOS users:
```shell
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2-1.el6.x86_64.rpm
sudo yum install quagga-0.99.21mr2.2-1.el6.x86_64.rpm
````
Replace *x86_64* with *i686* if using a 32-bit architecture.
#### Compiling Quagga for CORE
To compile Quagga to work with CORE on Linux:
```shell
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2.tar.gz
tar xzf quagga-0.99.21mr2.2.tar.gz
cd quagga-0.99
./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \\
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \\
--localstatedir=/var/run/quagga
make
sudo make install
```
Note that the configuration directory */usr/local/etc/quagga* shown for Quagga above could be */etc/quagga*, if you create a symbolic link from */etc/quagga/Quagga.conf -> /usr/local/etc/quagga/Quagga.conf* on the host. The *quaggaboot.sh* script in a Linux network namespace will try and do this for you if needed.
If you try to run quagga after installing from source and get an error such as:
```shell
error while loading shared libraries libzebra.so.0
```
this is usually a sign that you have to run ```sudo ldconfig```` to refresh the cache file.
### VCORE
CORE is capable of running inside of a virtual machine, using software such as VirtualBox, VMware Server or QEMU. However, CORE itself is performing machine virtualization in order to realize multiple emulated nodes, and running CORE virtually adds additional contention for the physical resources. **For performance reasons, this is not recommended.** Timing inside of a VM often has problems. If you do run CORE from within a VM, it is recommended that you view the GUI with remote X11 over SSH, so the virtual machine does not need to emulate the video card with the X11 application.
A CORE virtual machine is provided for download, named VCORE. This is the perhaps the easiest way to get CORE up and running as the machine is already set up for you. This may be adequate for initially evaluating the tool but keep in mind the performance limitations of running within VirtualBox or VMware. To install the virtual machine, you first need to obtain VirtualBox from http://www.virtualbox.org, or VMware Server or Player from http://www.vmware.com (this commercial software is distributed for free.) Once virtualization software has been installed, you can import the virtual machine appliance using the *vbox* file for VirtualBox or the *vmx* file for VMware. See the documentation that comes with VCORE for login information.

22
docs/machine.md Normal file
View file

@ -0,0 +1,22 @@
# CORE Node Types
* Table of Contents
{:toc}
## Overview
Different node types can be configured in CORE, and each node type has a *machine type* that indicates how the node will be represented at run time. Different machine types allow for different virtualization options.
## netns nodes
The *netns* machine type is the default. This is for nodes that will be backed by Linux network namespaces. See :ref:`Linux` for a brief explanation of netns. This default machine type is very lightweight, providing a minimum amount of virtualization in order to emulate a network. Another reason this is designated as the default machine type is because this virtualization technology typically requires no changes to the kernel; it is available out-of-the-box from the latest mainstream Linux distributions.
## physical nodes
The *physical* machine type is used for nodes that represent a real Linux-based machine that will participate in the emulated network scenario. This is typically used, for example, to incorporate racks of server machines from an emulation testbed. A physical node is one that is running the CORE daemon (*core-daemon*), but will not be further partitioned into virtual machines. Services that are run on the physical node do not run in an isolated or virtualized environment, but directly on the operating system.
Physical nodes must be assigned to servers, the same way nodes are assigned to emulation servers with *Distributed Emulation*. The list of available physical nodes currently shares the same dialog box and list as the emulation servers, accessed using the *Emulation Servers...* entry from the *Session* menu.
Support for physical nodes is under development and may be improved in future releases. Currently, when any node is linked to a physical node, a dashed line is drawn to indicate network tunneling. A GRE tunneling interface will be created on the physical node and used to tunnel traffic to and from the emulated world.
Double-clicking on a physical node during runtime opens a terminal with an SSH shell to that node. Users should configure public-key SSH login as done with emulation servers.

171
docs/ns3.md Normal file
View file

@ -0,0 +1,171 @@
# CORE / NS3
* Table of Contents
{:toc}
**NOTE: Support for ns-3 is limited and not currently being developed.**
## What is ns-3?
[ns-3 network simulator](http://www.nsnam.org) is a discrete-event network simulator for Internet systems, targeted primarily for research and educational use. By default, ns-3 simulates entire networks, from applications down to channels, and it does so in simulated time, instead of real (wall-clock) time.
CORE can run in conjunction with ns-3 to simulate some types of networks. CORE network namespace virtual nodes can have virtual TAP interfaces installed using the simulator for communication. The simulator needs to run at wall clock time with the real-time scheduler. In this type of configuration, the CORE namespaces are used to provide packets to the ns-3 devices and channels. This allows, for example, wireless models developed for ns-3 to be used in an emulation context.
Users simulate networks with ns-3 by writing C++ programs or Python scripts that import the ns-3 library. Simulation models are objects instantiated in these scripts. Combining the CORE Python modules with ns-3 Python bindings allow a script to easily set up and manage an emulation + simulation environment.
## ns-3 Scripting
Currently, ns-3 is supported by writing Python scripts, but not through drag-and-drop actions within the GUI. If you have a copy of the CORE source, look under *ns3/examples/* for example scripts; a CORE installation package puts these under */usr/share/core/examples/corens3*.
To run these scripts, install CORE so the CORE Python libraries are accessible, and download and build ns-3. This has been tested using ns-3 releases starting with 3.11 (and through 3.16 as of this writing).
The first step is to open an ns-3 waf shell. [waf](http://code.google.com/p/waf/) is the build system for ns-3. Opening a waf shell as root will merely set some environment variables useful for finding python modules and ns-3 executables. The following environment variables are extended or set by issuing *waf shell*:
```shell
PATH
PYTHONPATH
LD_LIBRARY_PATH
NS3_MODULE_PATH
NS3_EXECUTABLE_PATH
```
Open a waf shell as root, so that network namespaces may be instantiated by the script with root permissions. For an example, run the *ns3wifi.py* program, which simply instantiates 10 nodes (by default) and places them on an ns-3 WiFi channel. That is, the script will instantiate 10 namespace nodes, and create a special tap device that sends packets between the namespace node and a special ns-3 simulation node, where the tap device is bridged to an ns-3 WiFi network device, and attached to an ns-3 WiFi channel.
```shell
cd ns-allinone-3.16/ns-3.16
sudo ./waf shell
# use '/usr/local' below if installed from source
cd /usr/share/core/examples/corens3/
```
```python
python -i ns3wifi.py
# running ns-3 simulation for 600 seconds
print session
<corens3.obj.Ns3Session object at 0x1963e50>
```
The interactive Python shell allows some interaction with the Python objects for the emulation.
In another terminal, nodes can be accessed using *vcmd*:
```shell
vcmd -c /tmp/pycore.10781/n1 -- bash
root@n1:/tmp/pycore.10781/n1.conf#
root@n1:/tmp/pycore.10781/n1.conf# ping 10.0.0.3
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
64 bytes from 10.0.0.3: icmp_req=1 ttl=64 time=7.99 ms
64 bytes from 10.0.0.3: icmp_req=2 ttl=64 time=3.73 ms
64 bytes from 10.0.0.3: icmp_req=3 ttl=64 time=3.60 ms
^C
--- 10.0.0.3 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
rtt min/avg/max/mdev = 3.603/5.111/7.993/2.038 ms
root@n1:/tmp/pycore.10781/n1.conf#
```
The ping packets shown above are traversing an ns-3 ad-hoc Wifi simulated network.
To clean up the session, use the Session.shutdown() method from the Python terminal.
```python
print session
<corens3.obj.Ns3Session object at 0x1963e50>
session.shutdown()
```
A CORE/ns-3 Python script will instantiate an Ns3Session, which is a CORE Session having CoreNs3Nodes, an ns-3 MobilityHelper, and a fixed duration. The CoreNs3Node inherits from both the CoreNode and the ns-3 Node classes -- it is a network namespace having an associated simulator object. The CORE TunTap interface is used, represented by a ns-3 TapBridge in *CONFIGURE_LOCAL* mode, where ns-3 creates and configures the tap device. An event is scheduled to install the taps at time 0.
**NOTE: The GUI can be used to run the *ns3wifi.py* and *ns3wifirandomwalk.py* scripts directly. First, *core-daemon* must be stopped and run within the waf root shell. Then the GUI may be run as a normal user, and the *Execute Python Script...* option may be used from the *File* menu. Dragging nodes around in the *ns3wifi.py* example will cause their ns-3 positions to be updated.**
Users may find the files *ns3wimax.py* and *ns3lte.py* in that example directory; those files were similarly configured, but the underlying ns-3 support is not present as of ns-3.16, so they will not work. Specifically, the ns-3 has to be extended to support bridging the Tap device to an LTE and a WiMax device.
## Integration details
The previous example *ns3wifi.py* used Python API from the special Python objects *Ns3Session* and *Ns3WifiNet*. The example program does not import anything directly from the ns-3 python modules; rather, only the above two objects are used, and the API available to configure the underlying ns-3 objects is constrained. For example, *Ns3WifiNet* instantiates a constant-rate 802.11a-based ad hoc network, using a lot of ns-3 defaults.
However, programs may be written with a blend of ns-3 API and CORE Python API calls. This section examines some of the fundamental objects in the CORE ns-3 support. Source code can be found in *ns3/corens3/obj.py* and example code in *ns3/corens3/examples/*.
## Ns3Session
The *Ns3Session* class is a CORE Session that starts an ns-3 simulation thread. ns-3 actually runs as a separate process on the same host as the CORE daemon, and the control of starting and stopping this process is performed by the *Ns3Session* class.
Example:
```python
session = Ns3Session(persistent=True, duration=opt.duration)
```
Note the use of the duration attribute to control how long the ns-3 simulation should run. By default, the duration is 600 seconds.
Typically, the session keeps track of the ns-3 nodes (holding a node container for references to the nodes). This is accomplished via the ```addnode()``` method, e.g.:
```python
for i in xrange(1, opt.numnodes + 1):
node = session.addnode(name = "n%d" % i)
```
```addnode()``` creates instances of a *CoreNs3Node*, which we'll cover next.
## CoreNs3Node
A *CoreNs3Node* is both a CoreNode and an ns-3 node:
```python
class CoreNs3Node(CoreNode, ns.network.Node):
"""
The CoreNs3Node is both a CoreNode backed by a network namespace and
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
device will be used.
"""
```
## CoreNs3Net
A *CoreNs3Net* derives from *PyCoreNet*. This network exists entirely in simulation, using the TunTap device to interact between the emulated and the simulated realm. *Ns3WifiNet* is a specialization of this.
As an example, this type of code would be typically used to add a WiFi network to a session:
```python
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps")
wifi.setposition(30, 30, 0)
```
The above two lines will create a wlan1 object and set its initial canvas position. Later in the code, the newnetif method of the CoreNs3Node can be used to add interfaces on particular nodes to this network; e.g.:
```python
for i in xrange(1, opt.numnodes + 1):
node = session.addnode(name = "n%d" % i)
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
```
## Mobility
Mobility in ns-3 is handled by an object (a MobilityModel) aggregated to an ns-3 node. The MobilityModel is able to report the position of the object in the ns-3 space. This is a slightly different model from, for instance, EMANE, where location is associated with an interface, and the CORE GUI, where mobility is configured by right-clicking on a WiFi cloud.
The CORE GUI supports the ability to render the underlying ns-3 mobility model, if one is configured, on the CORE canvas. For example, the example program :file:`ns3wifirandomwalk.py` uses five nodes (by default) in a random walk mobility model. This can be executed by starting the core daemon from an ns-3 waf shell:
```shell
sudo bash
cd /path/to/ns-3
./waf shell
core-daemon
```
and in a separate window, starting the CORE GUI (not from a waf shell) and selecting the *Execute Python script...* option from the File menu, selecting the *ns3wifirandomwalk.py* script.
The program invokes ns-3 mobility through the following statement:
```python
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
```
This can be replaced by a different mode of mobility, in which nodes are placed according to a constant mobility model, and a special API call to the CoreNs3Net object is made to use the CORE canvas positions.
```python
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
session.setupconstantmobility()
wifi.usecorepositions()
```
In this mode, the user dragging around the nodes on the canvas will cause CORE to update the position of the underlying ns-3 nodes.

28
docs/performance.md Normal file
View file

@ -0,0 +1,28 @@
# CORE Performance
* Table of Contents
{:toc}
## Overview
The top question about the performance of CORE is often *how many nodes can it handle?* The answer depends on several factors:
* Hardware - the number and speed of processors in the computer, the available processor cache, RAM memory, and front-side bus speed may greatly affect overall performance.
* Operating system version - distribution of Linux and the specific kernel versions used will affect overall performance.
* Active processes - all nodes share the same CPU resources, so if one or more nodes is performing a CPU-intensive task, overall performance will suffer.
* Network traffic - the more packets that are sent around the virtual network increases the amount of CPU usage.
* GUI usage - widgets that run periodically, mobility scenarios, and other GUI interactions generally consume CPU cycles that may be needed for emulation.
On a typical single-CPU Xeon 3.0GHz server machine with 2GB RAM running Linux, we have found it reasonable to run 30-75 nodes running OSPFv2 and OSPFv3 routing. On this hardware CORE can instantiate 100 or more nodes, but at that point it becomes critical as to what each of the nodes is doing.
Because this software is primarily a network emulator, the more appropriate question is *how much network traffic can it handle?* On the same 3.0GHz server described above, running Linux, about 300,000 packets-per-second can be pushed through the system. The number of hops and the size of the packets is less important. The limiting factor is the number of times that the operating system needs to handle a packet. The 300,000 pps figure represents the number of times the system as a whole needed to deal with a packet. As more network hops are added, this increases the number of context switches and decreases the throughput seen on the full length of the network path.
**NOTE: The right question to be asking is *"how much traffic?"*, not *"how many nodes?"*.**
For a more detailed study of performance in CORE, refer to the following publications:
* J\. Ahrenholz, T. Goff, and B. Adamson, Integration of the CORE and EMANE Network Emulators, Proceedings of the IEEE Military Communications Conference 2011, November 2011.
* Ahrenholz, J., Comparison of CORE Network Emulation Platforms, Proceedings of the IEEE Military Communications Conference 2010, pp. 864-869, November 2010.
* J\. Ahrenholz, C. Danilov, T. Henderson, and J.H. Kim, CORE: A real-time network emulator, Proceedings of IEEE MILCOM Conference, 2008.

190
docs/pycco.css Normal file
View file

@ -0,0 +1,190 @@
/*--------------------- Layout and Typography ----------------------------*/
body {
font-family: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif;
font-size: 16px;
line-height: 24px;
color: #252519;
margin: 0; padding: 0;
background: #f5f5ff;
}
a {
color: #261a3b;
}
a:visited {
color: #261a3b;
}
p {
margin: 0 0 15px 0;
}
h1, h2, h3, h4, h5, h6 {
margin: 40px 0 15px 0;
}
h2, h3, h4, h5, h6 {
margin-top: 0;
}
#container {
background: white;
}
#container, div.section {
position: relative;
}
#background {
position: absolute;
top: 0; left: 580px; right: 0; bottom: 0;
background: #f5f5ff;
border-left: 1px solid #e5e5ee;
z-index: 0;
}
#jump_to, #jump_page {
background: white;
-webkit-box-shadow: 0 0 25px #777; -moz-box-shadow: 0 0 25px #777;
-webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px;
font: 10px Arial;
text-transform: uppercase;
cursor: pointer;
text-align: right;
}
#jump_to, #jump_wrapper {
position: fixed;
right: 0; top: 0;
padding: 5px 10px;
}
#jump_wrapper {
padding: 0;
display: none;
}
#jump_to:hover #jump_wrapper {
display: block;
}
#jump_page {
padding: 5px 0 3px;
margin: 0 0 25px 25px;
}
#jump_page .source {
display: block;
padding: 5px 10px;
text-decoration: none;
border-top: 1px solid #eee;
}
#jump_page .source:hover {
background: #f5f5ff;
}
#jump_page .source:first-child {
}
div.docs {
float: left;
max-width: 500px;
min-width: 500px;
min-height: 5px;
padding: 10px 25px 1px 50px;
vertical-align: top;
text-align: left;
}
.docs pre {
margin: 15px 0 15px;
padding-left: 15px;
}
.docs p tt, .docs p code {
background: #f8f8ff;
border: 1px solid #dedede;
font-size: 12px;
padding: 0 0.2em;
}
.octowrap {
position: relative;
}
.octothorpe {
font: 12px Arial;
text-decoration: none;
color: #454545;
position: absolute;
top: 3px; left: -20px;
padding: 1px 2px;
opacity: 0;
-webkit-transition: opacity 0.2s linear;
}
div.docs:hover .octothorpe {
opacity: 1;
}
div.code {
margin-left: 580px;
padding: 14px 15px 16px 50px;
vertical-align: top;
}
.code pre, .docs p code {
font-size: 12px;
}
pre, tt, code {
line-height: 18px;
font-family: Monaco, Consolas, "Lucida Console", monospace;
margin: 0; padding: 0;
}
div.clearall {
clear: both;
}
/*---------------------- Syntax Highlighting -----------------------------*/
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
body .hll { background-color: #ffffcc }
body .c { color: #408080; font-style: italic } /* Comment */
body .err { border: 1px solid #FF0000 } /* Error */
body .k { color: #954121 } /* Keyword */
body .o { color: #666666 } /* Operator */
body .cm { color: #408080; font-style: italic } /* Comment.Multiline */
body .cp { color: #BC7A00 } /* Comment.Preproc */
body .c1 { color: #408080; font-style: italic } /* Comment.Single */
body .cs { color: #408080; font-style: italic } /* Comment.Special */
body .gd { color: #A00000 } /* Generic.Deleted */
body .ge { font-style: italic } /* Generic.Emph */
body .gr { color: #FF0000 } /* Generic.Error */
body .gh { color: #000080; font-weight: bold } /* Generic.Heading */
body .gi { color: #00A000 } /* Generic.Inserted */
body .go { color: #808080 } /* Generic.Output */
body .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
body .gs { font-weight: bold } /* Generic.Strong */
body .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
body .gt { color: #0040D0 } /* Generic.Traceback */
body .kc { color: #954121 } /* Keyword.Constant */
body .kd { color: #954121; font-weight: bold } /* Keyword.Declaration */
body .kn { color: #954121; font-weight: bold } /* Keyword.Namespace */
body .kp { color: #954121 } /* Keyword.Pseudo */
body .kr { color: #954121; font-weight: bold } /* Keyword.Reserved */
body .kt { color: #B00040 } /* Keyword.Type */
body .m { color: #666666 } /* Literal.Number */
body .s { color: #219161 } /* Literal.String */
body .na { color: #7D9029 } /* Name.Attribute */
body .nb { color: #954121 } /* Name.Builtin */
body .nc { color: #0000FF; font-weight: bold } /* Name.Class */
body .no { color: #880000 } /* Name.Constant */
body .nd { color: #AA22FF } /* Name.Decorator */
body .ni { color: #999999; font-weight: bold } /* Name.Entity */
body .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
body .nf { color: #0000FF } /* Name.Function */
body .nl { color: #A0A000 } /* Name.Label */
body .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
body .nt { color: #954121; font-weight: bold } /* Name.Tag */
body .nv { color: #19469D } /* Name.Variable */
body .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
body .w { color: #bbbbbb } /* Text.Whitespace */
body .mf { color: #666666 } /* Literal.Number.Float */
body .mh { color: #666666 } /* Literal.Number.Hex */
body .mi { color: #666666 } /* Literal.Number.Integer */
body .mo { color: #666666 } /* Literal.Number.Oct */
body .sb { color: #219161 } /* Literal.String.Backtick */
body .sc { color: #219161 } /* Literal.String.Char */
body .sd { color: #219161; font-style: italic } /* Literal.String.Doc */
body .s2 { color: #219161 } /* Literal.String.Double */
body .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
body .sh { color: #219161 } /* Literal.String.Heredoc */
body .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
body .sx { color: #954121 } /* Literal.String.Other */
body .sr { color: #BB6688 } /* Literal.String.Regex */
body .s1 { color: #219161 } /* Literal.String.Single */
body .ss { color: #19469D } /* Literal.String.Symbol */
body .bp { color: #954121 } /* Name.Builtin.Pseudo */
body .vc { color: #19469D } /* Name.Variable.Class */
body .vg { color: #19469D } /* Name.Variable.Global */
body .vi { color: #19469D } /* Name.Variable.Instance */
body .il { color: #666666 } /* Literal.Number.Integer.Long */

120
docs/scripting.md Normal file
View file

@ -0,0 +1,120 @@
# CORE Python Scripting
* Table of Contents
{:toc}
## Overview
CORE can be used via the GUI or Python scripting. Writing your own Python scripts offers a rich programming environment with complete control over all aspects of the emulation. This chapter provides a brief introduction to scripting. Most of the documentation is available from sample scripts, or online via interactive Python.
The best starting point is the sample scripts that are included with CORE. If you have a CORE source tree, the example script files can be found under *core/daemon/examples/api/*. When CORE is installed from packages, the example script files will be in */usr/share/core/examples/api/* (or */usr/local/* prefix when installed from source.) For the most part, the example scripts are self-documenting; see the comments contained within the Python code.
The scripts should be run with root privileges because they create new network namespaces. In general, a CORE Python script does not connect to the CORE daemon, in fact the *core-daemon* is just another Python script that uses the CORE Python modules and exchanges messages with the GUI. To connect the GUI to your scripts, see the included sample scripts that allow for GUI connections.
Here are the basic elements of a CORE Python script:
```python
from core.emulator.coreemu import CoreEmu
from core.emulator.emudata import IpPrefixes
from core.enumerations import EventTypes
from core.enumerations import NodeTypes
# ip generator for example
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = CoreEmu()
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create switch network node
switch = session.add_node(_type=NodeTypes.SWITCH)
# create nodes
for _ in xrange(options.nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, switch.objid, interface_one=interface)
# instantiate session
session.instantiate()
# shutdown session
coreemu.shutdown()
```
The above script creates a CORE session having two nodes connected with a hub. The first node pings the second node with 5 ping packets; the result is displayed on screen.
A good way to learn about the CORE Python modules is via interactive Python. Scripts can be run using *python -i*. Cut and paste the simple script above and you will have two nodes connected by a hub, with one node running a test ping to the other.
The CORE Python modules are documented with comments in the code. From an interactive Python shell, you can retrieve online help about the various classes and methods; for example *help(nodes.CoreNode)* or *help(Session)*.
**NOTE: The CORE daemon *core-daemon* manages a list of sessions and allows the GUI to connect and control sessions. Your Python script uses the same CORE modules but runs independently of the daemon. The daemon does not need to be running for your script to work.**
The session created by a Python script may be viewed in the GUI if certain steps are followed. The GUI has a *File Menu*, *Execute Python script...* option for running a script and automatically connecting to it. Once connected, normal GUI interaction is possible, such as moving and double-clicking nodes, activating Widgets, etc.
The script should have a line such as the following for running it from the GUI.
```python
if __name__ in ["__main__", "__builtin__"]:
main()
```
A script can add sessions to the core-daemon. A global *coreemu* variable is exposed to the script pointing to the *CoreEmu* object.
The example below has a fallback to a new CoreEmu object, in the case you would like to run the script standalone, outside of the core-daemon.
```python
coreemu = globals().get("coreemu", CoreEmu())
session = coreemu.create_session()
```
Finally, nodes and networks need to have their coordinates set to something, otherwise they will be grouped at the coordinates *<0, 0>*. First sketching the topology in the GUI and then using the *Export Python script* option may help here.
```python
switch.setposition(x=80,y=50)
```
A fully-worked example script that you can launch from the GUI is available in the examples directory.
## Configuring Services
Examples setting or configuring custom services for a node.
```python
# create session and node
coreemu = CoreEmu()
session = coreemu.create_session()
node = session.add_node()
# create and retrieve custom service
session.services.set_service(node.objid, "ServiceName")
custom_service = session.services.get_service(node.objid, "ServiceName")
# set custom file data
session.services.set_service_file(node.objid, "ServiceName", "FileName", "custom file data")
# set services to a node, using custom services when defined
session.services.add_services(node, node.type, ["Service1", "Service2"])
```
# Configuring EMANE Models
Examples for configuring custom emane model settings.
```python
# create session and emane network
coreemu = CoreEmu()
session = coreemu.create_session()
emane_network = session.create_emane_network(
model=EmaneIeee80211abgModel,
geo_reference=(47.57917, -122.13232, 2.00000)
)
emane_network.setposition(x=80, y=50)
# set custom emane model config
config = {}
session.emane.set_model_config(emane_network.objid, EmaneIeee80211abgModel.name, config)
```

13
docs/services.md Normal file
View file

@ -0,0 +1,13 @@
# CORE Services
* Table of Contents
{:toc}
## Custom Services
CORE supports custom developed services by way of dynamically loading user created python files.
Custom services should be placed within the path defined by **custom_services_dir** in the CORE
configuration file. This path cannot end in **/services**.
Here is an example service with documentation describing functionality:
[Example Service](exampleservice.html)

View file

Before

Width:  |  Height:  |  Size: 27 KiB

After

Width:  |  Height:  |  Size: 27 KiB

View file

Before

Width:  |  Height:  |  Size: 38 KiB

After

Width:  |  Height:  |  Size: 38 KiB

View file

Before

Width:  |  Height:  |  Size: 21 KiB

After

Width:  |  Height:  |  Size: 21 KiB

View file

Before

Width:  |  Height:  |  Size: 45 KiB

After

Width:  |  Height:  |  Size: 45 KiB

View file

Before

Width:  |  Height:  |  Size: 160 KiB

After

Width:  |  Height:  |  Size: 160 KiB

View file

Before

Width:  |  Height:  |  Size: 43 KiB

After

Width:  |  Height:  |  Size: 43 KiB

View file

Before

Width:  |  Height:  |  Size: 131 KiB

After

Width:  |  Height:  |  Size: 131 KiB

File diff suppressed because it is too large Load diff