initial import (Boeing r1752, NRL r878)
This commit is contained in:
commit
f8f46d28be
394 changed files with 99738 additions and 0 deletions
159
doc/Makefile.am
Normal file
159
doc/Makefile.am
Normal file
|
@ -0,0 +1,159 @@
|
|||
# CORE
|
||||
# (c)2009-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Builds html and pdf documentation using Sphinx.
|
||||
#
|
||||
|
||||
SUBDIRS = man figures
|
||||
|
||||
|
||||
# extra cruft to remove
|
||||
DISTCLEANFILES = Makefile.in stamp-vti
|
||||
|
||||
rst_files = conf.py constants.txt credits.rst devguide.rst emane.rst \
|
||||
index.rst install.rst intro.rst machine.rst ns3.rst \
|
||||
performance.rst scripting.rst usage.rst
|
||||
|
||||
EXTRA_DIST = $(rst_files) _build _static _templates
|
||||
|
||||
# clean up dirs included by EXTRA_DIST
|
||||
dist-hook:
|
||||
rm -rf $(distdir)/_build/.svn $(distdir)/_static/.svn \
|
||||
$(distdir)/_templates/.svn
|
||||
|
||||
|
||||
###### below this line was generated using sphinx-quickstart ######
|
||||
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CORE.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CORE.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/CORE"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CORE"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
make -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
256
doc/conf.py.in
Normal file
256
doc/conf.py.in
Normal file
|
@ -0,0 +1,256 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# CORE documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed Jun 13 10:44:22 2012.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'CORE'
|
||||
copyright = u'2012, core-dev'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '@CORE_VERSION@'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '@CORE_VERSION@'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'COREdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'CORE.tex', u'CORE Documentation',
|
||||
u'core-dev', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'core', u'CORE Documentation',
|
||||
[u'core-dev'], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Epub output ---------------------------------------------------
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = u'CORE'
|
||||
epub_author = u'core-dev'
|
||||
epub_publisher = u'core-dev'
|
||||
epub_copyright = u'2012, core-dev'
|
||||
|
||||
# The language of the text. It defaults to the language option
|
||||
# or en if the language is not set.
|
||||
#epub_language = ''
|
||||
|
||||
# The scheme of the identifier. Typical schemes are ISBN or URL.
|
||||
#epub_scheme = ''
|
||||
|
||||
# The unique identifier of the text. This can be a ISBN number
|
||||
# or the project homepage.
|
||||
#epub_identifier = ''
|
||||
|
||||
# A unique identification for the text.
|
||||
#epub_uid = ''
|
||||
|
||||
# HTML files that should be inserted before the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_pre_files = []
|
||||
|
||||
# HTML files shat should be inserted after the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_post_files = []
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
#epub_exclude_files = []
|
||||
|
||||
# The depth of the table of contents in toc.ncx.
|
||||
#epub_tocdepth = 3
|
||||
|
||||
# Allow duplicate toc entries.
|
||||
#epub_tocdup = True
|
25
doc/constants.txt
Normal file
25
doc/constants.txt
Normal file
|
@ -0,0 +1,25 @@
|
|||
.. |UBUNTUVERSION| replace:: 12.04, 12.10, or 13.04
|
||||
|
||||
.. |FEDORAVERSION| replace:: 17, 18, or 19
|
||||
|
||||
.. |CENTOSVERSION| replace:: 6.x
|
||||
|
||||
.. |BSDVERSION| replace:: 9.0
|
||||
|
||||
.. |CORERPM| replace:: 1.fc19.x86_64.rpm
|
||||
.. |CORERPM2| replace:: 1.fc19.noarch.rpm
|
||||
.. |COREDEB| replace:: 0ubuntu1_precise_amd64.deb
|
||||
.. |COREDEB2| replace:: 0ubuntu1_precise_all.deb
|
||||
|
||||
.. |QVER| replace:: quagga-0.99.21mr2.2
|
||||
.. |QVERDEB| replace:: quagga-mr_0.99.21mr2.2_amd64.deb
|
||||
.. |QVERRPM| replace:: quagga-0.99.21mr2.2-1.fc16.x86_64.rpm
|
||||
|
||||
.. |APTDEPS| replace:: bash bridge-utils ebtables iproute libev-dev python
|
||||
.. |APTDEPS2| replace:: tcl8.5 tk8.5 libtk-img
|
||||
.. |APTDEPS3| replace:: autoconf automake gcc libev-dev make python-dev libreadline-dev pkg-config imagemagick help2man
|
||||
|
||||
.. |YUMDEPS| replace:: bash bridge-utils ebtables iproute libev python
|
||||
.. |YUMDEPS2| replace:: tcl tk tkimg
|
||||
.. |YUMDEPS3| replace:: autoconf automake make libev-devel python-devel ImageMagick help2man
|
||||
|
26
doc/credits.rst
Normal file
26
doc/credits.rst
Normal file
|
@ -0,0 +1,26 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _Acknowledgements:
|
||||
|
||||
***************
|
||||
Acknowledgments
|
||||
***************
|
||||
|
||||
The CORE project was derived from the open source IMUNES project from the
|
||||
University of Zagreb in 2004. In 2006, changes for CORE were released back to
|
||||
that project, some items of which were adopted. Marko Zec <zec@fer.hr> is the
|
||||
primary developer from the University of Zagreb responsible for the IMUNES
|
||||
(GUI) and VirtNet (kernel) projects. Ana Kukec and Miljenko Mikuc are known
|
||||
contributors.
|
||||
|
||||
Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com> has been the primary Boeing
|
||||
developer of CORE, and has written this manual. Tom Goff
|
||||
<thomas.goff@boeing.com> designed the Python framework and has made significant
|
||||
contributions. Claudiu Danilov <claudiu.b.danilov@boeing.com>, Gary Pei
|
||||
<guangyu.pei@boeing.com>, Phil Spagnolo, and Ian Chakeres have contributed code
|
||||
to CORE. Dan Mackley <daniel.c.mackley@boeing.com> helped develop the CORE API,
|
||||
originally to interface with a simulator. Jae Kim <jae.h.kim@boeing.com> and
|
||||
Tom Henderson <thomas.r.henderson@boeing.com> have supervised the project and
|
||||
provided direction.
|
||||
|
331
doc/devguide.rst
Normal file
331
doc/devguide.rst
Normal file
|
@ -0,0 +1,331 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _Developer's_Guide:
|
||||
|
||||
*****************
|
||||
Developer's Guide
|
||||
*****************
|
||||
|
||||
This section contains advanced usage information, intended for developers and
|
||||
others who are comfortable with the command line.
|
||||
|
||||
.. _Coding_Standard:
|
||||
|
||||
Coding Standard
|
||||
===============
|
||||
|
||||
The coding standard and style guide for the CORE project are maintained online.
|
||||
Please refer to the `coding standard
|
||||
<http://code.google.com/p/coreemu/wiki/Hacking>`_ posted on the CORE Wiki.
|
||||
|
||||
.. _Source_Code_Guide:
|
||||
|
||||
Source Code Guide
|
||||
=================
|
||||
|
||||
The CORE source consists of several different programming languages for
|
||||
historical reasons. Current development focuses on the Python modules and
|
||||
daemon. Here is a brief description of the source directories.
|
||||
|
||||
These are being actively developed as of CORE |version|:
|
||||
|
||||
* *gui* - Tcl/Tk GUI. This uses Tcl/Tk because of its roots with the IMUNES
|
||||
project.
|
||||
* *daemon* - Python modules are found in the :file:`daemon/core` directory, the
|
||||
daemon under :file:`daemon/sbin/core-daemon`, and Python extension modules for
|
||||
Linux Network Namespace support are in :file:`daemon/src`.
|
||||
* *doc* - Documentation for the manual lives here in reStructuredText format.
|
||||
* *packaging* - Control files and script for building CORE packages are here.
|
||||
|
||||
These directories are not so actively developed:
|
||||
|
||||
* *kernel* - patches and modules mostly related to FreeBSD.
|
||||
|
||||
.. _The_CORE_API:
|
||||
|
||||
The CORE API
|
||||
============
|
||||
|
||||
.. index:: CORE; API
|
||||
|
||||
.. index:: API
|
||||
|
||||
.. index:: remote API
|
||||
|
||||
The CORE API is used between different components of CORE for communication.
|
||||
The GUI communicates with the CORE daemon using the API. One emulation server
|
||||
communicates with another using the API. The API also allows other systems to
|
||||
interact with the CORE emulation. The API allows another system to add, remove,
|
||||
or modify nodes and links, and enables executing commands on the emulated
|
||||
systems. On FreeBSD, the API is used for enhancing the wireless LAN
|
||||
calculations. Wireless link parameters are updated on-the-fly based on node
|
||||
positions.
|
||||
|
||||
CORE listens on a local TCP port for API messages. The other system could be
|
||||
software running locally or another machine accessible across the network.
|
||||
|
||||
The CORE API is currently specified in a separate document, available from the
|
||||
CORE website.
|
||||
|
||||
.. _Linux_network_namespace_Commands:
|
||||
|
||||
Linux network namespace Commands
|
||||
================================
|
||||
|
||||
.. index:: lxctools
|
||||
|
||||
Linux network namespace containers are often managed using the *Linux Container
|
||||
Tools* or *lxc-tools* package. The lxc-tools website is available here
|
||||
`<http://lxc.sourceforge.net/>`_ for more information. CORE does not use these
|
||||
management utilities, but includes its own set of tools for instantiating and
|
||||
configuring network namespace containers. This section describes these tools.
|
||||
|
||||
.. index:: vnoded
|
||||
|
||||
The *vnoded* daemon is the program used to create a new namespace, and
|
||||
listen on a control channel for commands that may instantiate other processes.
|
||||
This daemon runs as PID 1 in the container. It is launched automatically by
|
||||
the CORE daemon. The control channel is a UNIX domain socket usually named
|
||||
:file:`/tmp/pycore.23098/n3`, for node 3 running on CORE
|
||||
session 23098, for example. Root privileges are required for creating a new
|
||||
namespace.
|
||||
|
||||
.. index:: vcmd
|
||||
|
||||
The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network
|
||||
namespace, for running commands in the namespace. The CORE daemon
|
||||
uses the same channel for setting up a node and running processes within it.
|
||||
This program has two
|
||||
required arguments, the control channel name, and the command line to be run
|
||||
within the namespace. This command does not need to run with root privileges.
|
||||
|
||||
When you double-click
|
||||
on a node in a running emulation, CORE will open a shell window for that node
|
||||
using a command such as:
|
||||
::
|
||||
|
||||
gnome-terminal -e vcmd -c /tmp/pycore.50160/n1 -- bash
|
||||
|
||||
|
||||
Similarly, the IPv4 routes Observer Widget will run a command to display the routing table using a command such as:
|
||||
::
|
||||
|
||||
vcmd -c /tmp/pycore.50160/n1 -- /sbin/ip -4 ro
|
||||
|
||||
|
||||
.. index:: core-cleanup
|
||||
|
||||
A script named *core-cleanup* is provided to clean up any running CORE
|
||||
emulations. It will attempt to kill any remaining vnoded processes, kill any
|
||||
EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and
|
||||
remove any bridges or *ebtables* rules. With a *-d* option, it will also kill
|
||||
any running CORE daemon.
|
||||
|
||||
.. index:: netns
|
||||
|
||||
The *netns* command is not used by CORE directly. This utility can be used to
|
||||
run a command in a new network namespace for testing purposes. It does not open
|
||||
a control channel for receiving further commands.
|
||||
|
||||
Here are some other Linux commands that are useful for managing the Linux
|
||||
network namespace emulation.
|
||||
::
|
||||
|
||||
# view the Linux bridging setup
|
||||
brctl show
|
||||
# view the netem rules used for applying link effects
|
||||
tc qdisc show
|
||||
# view the rules that make the wireless LAN work
|
||||
ebtables -L
|
||||
|
||||
|
||||
Below is a transcript of creating two emulated nodes and connecting them together with a wired link:
|
||||
|
||||
.. index:: create nodes from command-line
|
||||
|
||||
.. index:: command-line
|
||||
|
||||
::
|
||||
|
||||
# create node 1 namespace container
|
||||
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 1
|
||||
ip link add name n1.0.1 type veth peer name n1.0
|
||||
ip link set n1.0 netns `cat /tmp/n1.pid`
|
||||
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0
|
||||
vcmd -c /tmp/n1.ctl -- ifconfig eth0 10.0.0.1/24
|
||||
|
||||
# create node 2 namespace container
|
||||
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 2
|
||||
ip link add name n2.0.1 type veth peer name n2.0
|
||||
ip link set n2.0 netns `cat /tmp/n2.pid`
|
||||
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0
|
||||
vcmd -c /tmp/n2.ctl -- ifconfig eth0 10.0.0.2/24
|
||||
|
||||
# bridge together nodes 1 and 2 using the other end of each veth pair
|
||||
brctl addbr b.1.1
|
||||
brctl setfd b.1.1 0
|
||||
brctl addif b.1.1 n1.0.1
|
||||
brctl addif b.1.1 n2.0.1
|
||||
ip link set n1.0.1 up
|
||||
ip link set n2.0.1 up
|
||||
ip link set b.1.1 up
|
||||
|
||||
# display connectivity and ping from node 1 to node 2
|
||||
brctl show
|
||||
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
|
||||
|
||||
|
||||
The above example script can be found as :file:`twonodes.sh` in the
|
||||
:file:`examples/netns` directory. Use *core-cleanup* to clean up after the
|
||||
script.
|
||||
|
||||
.. _FreeBSD_Commands:
|
||||
|
||||
FreeBSD Commands
|
||||
================
|
||||
|
||||
|
||||
.. index:: vimage
|
||||
.. index:: ngctl
|
||||
.. index:: Netgraph
|
||||
.. _FreeBSD_Kernel_Commands:
|
||||
|
||||
FreeBSD Kernel Commands
|
||||
-----------------------
|
||||
|
||||
The FreeBSD kernel emulation controlled by CORE is realized through several
|
||||
userspace commands. The CORE GUI itself could be thought of as a glorified
|
||||
script that dispatches these commands to build and manage the kernel emulation.
|
||||
|
||||
|
||||
* **vimage** - the vimage command, short for "virtual image", is used to
|
||||
create lightweight virtual machines and execute commands within the virtual
|
||||
image context. On a FreeBSD CORE machine, see the *vimage(8)* man page for
|
||||
complete details. The vimage command comes from the VirtNet project which
|
||||
virtualizes the FreeBSD network stack.
|
||||
|
||||
|
||||
* **ngctl** - the ngctl command, short for "netgraph control", creates
|
||||
Netgraph nodes and hooks, connects them together, and allows for various
|
||||
interactions with the Netgraph nodes. See the *ngctl(8)* man page for
|
||||
complete details. The ngctl command is built-in to FreeBSD because the
|
||||
Netgraph system is part of the kernel.
|
||||
|
||||
Both commands must be run as root.
|
||||
Some example usage of the *vimage* command follows below.
|
||||
::
|
||||
|
||||
vimage # displays the current virtual image
|
||||
vimage -l # lists running virtual images
|
||||
vimage e0_n0 ps aux # list the processes running on node 0
|
||||
for i in 1 2 3 4 5
|
||||
do # execute a command on all nodes
|
||||
vimage e0_n$i sysctl -w net.inet.ip.redirect=0
|
||||
done
|
||||
|
||||
|
||||
The *ngctl* command is more complex, due to the variety of Netgraph nodes
|
||||
available and each of their options.
|
||||
::
|
||||
|
||||
ngctl l # list active Netgraph nodes
|
||||
ngctl show e0_n8: # display node hook information
|
||||
ngctl msg e0_n0-n1: getstats # get pkt count statistics from a pipe node
|
||||
ngctl shutdown \\[0x0da3\\]: # shut down unnamed node using hex node ID
|
||||
|
||||
|
||||
There are many other combinations of commands not shown here. See the online
|
||||
manual (man) pages for complete details.
|
||||
|
||||
Below is a transcript of creating two emulated nodes, `router0` and `router1`,
|
||||
and connecting them together with a link:
|
||||
|
||||
.. index:: create nodes from command-line
|
||||
|
||||
.. index:: command-line
|
||||
|
||||
::
|
||||
|
||||
# create node 0
|
||||
vimage -c e0_n0
|
||||
vimage e0_n0 hostname router0
|
||||
ngctl mkpeer eiface ether ether
|
||||
vimage -i e0_n0 ngeth0 eth0
|
||||
vimage e0_n0 ifconfig eth0 link 40:00:aa:aa:00:00
|
||||
vimage e0_n0 ifconfig lo0 inet localhost
|
||||
vimage e0_n0 sysctl net.inet.ip.forwarding=1
|
||||
vimage e0_n0 sysctl net.inet6.ip6.forwarding=1
|
||||
vimage e0_n0 ifconfig eth0 mtu 1500
|
||||
|
||||
# create node 1
|
||||
vimage -c e0_n1
|
||||
vimage e0_n1 hostname router1
|
||||
ngctl mkpeer eiface ether ether
|
||||
vimage -i e0_n1 ngeth1 eth0
|
||||
vimage e0_n1 ifconfig eth0 link 40:00:aa:aa:0:1
|
||||
vimage e0_n1 ifconfig lo0 inet localhost
|
||||
vimage e0_n1 sysctl net.inet.ip.forwarding=1
|
||||
vimage e0_n1 sysctl net.inet6.ip6.forwarding=1
|
||||
vimage e0_n1 ifconfig eth0 mtu 1500
|
||||
|
||||
# create a link between n0 and n1
|
||||
ngctl mkpeer eth0@e0_n0: pipe ether upper
|
||||
ngctl name eth0@e0_n0:ether e0_n0-n1
|
||||
ngctl connect e0_n0-n1: eth0@e0_n1: lower ether
|
||||
ngctl msg e0_n0-n1: setcfg \\
|
||||
{{ bandwidth=100000000 delay=0 upstream={ BER=0 dupl
|
||||
icate=0 } downstream={ BER=0 duplicate=0 } }}
|
||||
ngctl msg e0_n0-n1: setcfg {{ downstream={ fifo=1 } }}
|
||||
ngctl msg e0_n0-n1: setcfg {{ downstream={ droptail=1 } }}
|
||||
ngctl msg e0_n0-n1: setcfg {{ downstream={ queuelen=50 } }}
|
||||
ngctl msg e0_n0-n1: setcfg {{ upstream={ fifo=1 } }}
|
||||
ngctl msg e0_n0-n1: setcfg {{ upstream={ droptail=1 } }}
|
||||
ngctl msg e0_n0-n1: setcfg {{ upstream={ queuelen=50 } }}
|
||||
|
||||
|
||||
Other FreeBSD commands that may be of interest:
|
||||
.. index:: FreeBSD commands
|
||||
|
||||
* **kldstat**, **kldload**, **kldunload** - list, load, and unload
|
||||
FreeBSD kernel modules
|
||||
* **sysctl** - display and modify various pieces of kernel state
|
||||
* **pkg_info**, **pkg_add**, **pkg_delete** - list, add, or remove
|
||||
FreeBSD software packages.
|
||||
* **vtysh** - start a Quagga CLI for router configuration
|
||||
|
||||
Netgraph Nodes
|
||||
--------------
|
||||
|
||||
.. index:: Netgraph
|
||||
|
||||
.. index:: Netgraph nodes
|
||||
|
||||
Each Netgraph node implements a protocol or processes data in some well-defined
|
||||
manner (see the `netgraph(4)` man page). The netgraph source code is located
|
||||
in `/usr/src/sys/netgraph`. There you might discover additional nodes that
|
||||
implement some desired functionality, that have not yet been included in CORE.
|
||||
Using certain kernel commands, you can likely include these types of nodes into
|
||||
your CORE emulation.
|
||||
|
||||
The following Netgraph nodes are used by CORE:
|
||||
|
||||
* **ng_bridge** - switch node performs Ethernet bridging
|
||||
|
||||
* **ng_cisco** - Cisco HDLC serial links
|
||||
|
||||
* **ng_eiface** - virtual Ethernet interface that is assigned to each virtual machine
|
||||
|
||||
* **ng_ether** - physical Ethernet devices, used by the RJ45 tool
|
||||
|
||||
* **ng_hub** - hub node
|
||||
|
||||
* **ng_pipe** - used for wired Ethernet links, imposes packet delay, bandwidth restrictions, and other link characteristics
|
||||
|
||||
* **ng_socket** - socket used by *ngctl* utility
|
||||
|
||||
* **ng_wlan** - wireless LAN node
|
||||
|
||||
|
293
doc/emane.rst
Normal file
293
doc/emane.rst
Normal file
|
@ -0,0 +1,293 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _EMANE:
|
||||
|
||||
*****
|
||||
EMANE
|
||||
*****
|
||||
|
||||
.. index:: EMANE
|
||||
|
||||
This chapter describes running CORE with the EMANE emulator.
|
||||
|
||||
.. _What_is_EMANE?:
|
||||
|
||||
What is EMANE?
|
||||
==============
|
||||
|
||||
.. index:: EMANE; introduction to
|
||||
|
||||
The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous
|
||||
network emulation using a pluggable MAC and PHY layer architecture. The EMANE
|
||||
framework provides an implementation architecture for modeling different radio
|
||||
interface types in the form of *Network Emulation Modules* (NEMs) and
|
||||
incorporating these modules into a real-time emulation running in a distributed
|
||||
environment.
|
||||
|
||||
EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent
|
||||
Link LLC,
|
||||
who maintain these websites:
|
||||
|
||||
* `<http://cs.itd.nrl.navy.mil/work/emane/index.php>`_
|
||||
* `<http://www.adjacentlink.com/>`_
|
||||
* `<http://labs.cengen.com/emane/>`_ (former EMANE project home)
|
||||
|
||||
Instead of building Linux Ethernet bridging networks with CORE, higher-fidelity
|
||||
wireless networks can be emulated using EMANE bound to virtual devices. CORE
|
||||
emulates layers 3 and above (network, session, application) with its virtual
|
||||
network stacks and process space for protocols and applications, while EMANE
|
||||
emulates layers 1 and 2 (physical and data link) using its pluggable PHY and
|
||||
MAC models.
|
||||
|
||||
The interface between CORE and EMANE is a TAP device. CORE builds the virtual
|
||||
node using Linux network namespaces, and installs the TAP device into the
|
||||
namespace. EMANE binds a userspace socket to the device, on the host before it
|
||||
is pushed into the namespace, for sending and receiving data. The *Virtual
|
||||
Transport* is the EMANE component responsible for connecting with the TAP
|
||||
device.
|
||||
|
||||
EMANE models are configured through CORE's WLAN configuration dialog. A
|
||||
corresponding EmaneModel Python class is sub-classed for each supported EMANE
|
||||
model, to provide configuration items and their mapping to XML files. This way
|
||||
new models can be easily supported. When CORE starts the emulation, it
|
||||
generates the appropriate XML files that specify the EMANE NEM configuration,
|
||||
and launches the EMANE daemons.
|
||||
|
||||
Some EMANE models support location information to determine when packets should
|
||||
be dropped. EMANE has an event system where location events are broadcast to
|
||||
all NEMs. CORE can generate these location events when nodes are moved on the
|
||||
canvas. The canvas size and scale dialog has controls for mapping the X,Y
|
||||
coordinate system to a latitude, longitude geographic system that EMANE uses.
|
||||
When specified in the :file:`core.conf` configuration file, CORE can also
|
||||
subscribe to EMANE location events and move the nodes on the canvas as they are
|
||||
moved in the EMANE emulation. This would occur when an Emulation Script
|
||||
Generator, for example, is running a mobility script.
|
||||
|
||||
.. index:: EMANE; Configuration
|
||||
|
||||
.. index:: EMANE; Installation
|
||||
|
||||
.. _EMANE_Configuration:
|
||||
|
||||
EMANE Configuration
|
||||
===================
|
||||
|
||||
|
||||
CORE and EMANE currently work together only on the Linux network namespaces
|
||||
platform. The normal CORE installation instructions should be followed from
|
||||
:ref:`Installation`.
|
||||
|
||||
The CORE configuration file :file:`/etc/core/core.conf` has options specific to
|
||||
EMANE. Namely, the `emane_models` line contains a comma-separated list of EMANE
|
||||
models that will be available. Each model has a corresponding Python file
|
||||
containing the *EmaneModel* subclass. A portion of the default
|
||||
:file:`core.conf` file is shown below:
|
||||
|
||||
::
|
||||
|
||||
# EMANE configuration
|
||||
emane_platform_port = 8101
|
||||
emane_transform_port = 8201
|
||||
emane_event_monitor = False
|
||||
emane_models = RfPipe, Ieee80211abg
|
||||
|
||||
|
||||
EMANE can be installed from deb or RPM packages or from source. See the
|
||||
`EMANE website <http://cs.itd.nrl.navy.mil/work/emane/index.php>`_ for
|
||||
full details.
|
||||
|
||||
Here are quick instructions for installing all EMANE packages:
|
||||
|
||||
::
|
||||
|
||||
# install dependencies
|
||||
sudo apt-get install libssl-dev libxml-lixbml-perl libxml-simple-perl
|
||||
# download and install EMANE 0.8.1
|
||||
export URL=http://labs.cengen.com/emane/download/deb/ubuntu-12_04
|
||||
wget $URL/0.8.1/amd64/emane-bundle-0.8.1.amd64.tgz
|
||||
mkdir emane-0.8.1
|
||||
cd emane-0.8.1
|
||||
tar xzf ../emane-bundle-0.8.1.amd64.tgz
|
||||
sudo dpkg -i *.deb
|
||||
|
||||
|
||||
If you have an EMANE event generator (e.g. mobility or pathloss scripts) and
|
||||
want to have CORE subscribe to EMANE location events, set the following line in
|
||||
the :file:`/etc/core/core.conf` configuration file:
|
||||
::
|
||||
|
||||
emane_event_monitor = True
|
||||
|
||||
Do not set the above option to True if you want to manually drag nodes around
|
||||
on the canvas to update their location in EMANE.
|
||||
|
||||
Another common issue is if installing EMANE from source, the default configure
|
||||
prefix will place the DTD files in :file:`/usr/local/share/emane/dtd` while
|
||||
CORE expects them in :file:`/usr/share/emane/dtd`. A symbolic link will fix
|
||||
this:
|
||||
::
|
||||
|
||||
sudo ln -s /usr/local/share/emane /usr/share/emane
|
||||
|
||||
|
||||
.. _Single_PC_with_EMANE:
|
||||
|
||||
Single PC with EMANE
|
||||
====================
|
||||
|
||||
This section describes running CORE and EMANE on a single machine. This is the
|
||||
default mode of operation when building an EMANE network with CORE. The OTA
|
||||
manager interface is off and the virtual nodes use the loopback device for
|
||||
communicating with one another. This prevents your emulation session from
|
||||
sending data on your local network and interfering with other EMANE users.
|
||||
|
||||
EMANE is configured through a WLAN node, because it is all about emulating
|
||||
wireless radio networks. Once a node is linked to a WLAN cloud configured with
|
||||
an EMANE model, the radio interface on that node may also be configured
|
||||
separately (apart from the cloud.)
|
||||
|
||||
Double-click on a WLAN node to invoke the WLAN configuration dialog. Click the
|
||||
*EMANE* tab; when EMANE has
|
||||
been properly installed, EMANE wireless modules should be listed in the
|
||||
*EMANE Models* list. (You may need to restart the CORE daemon if
|
||||
it was running prior to installing the EMANE Python bindings.)
|
||||
Click on a model name to enable it.
|
||||
|
||||
When an EMANE model is selected in the *EMANE Models* list, clicking on
|
||||
the *model options* button causes the GUI to query the CORE daemon for
|
||||
configuration items. Each model will have different parameters, refer to the
|
||||
EMANE documentation for an explanation of each item. The defaults values are
|
||||
presented in the dialog. Clicking *Apply* and *Apply* again will store
|
||||
the EMANE model selections.
|
||||
|
||||
The *EMANE options* button
|
||||
allows specifying some global parameters for EMANE, some of
|
||||
which are necessary for distributed operation, see :ref:`Distributed_EMANE`.
|
||||
|
||||
.. index:: RF-PIPE model
|
||||
|
||||
.. index:: 802.11 model
|
||||
|
||||
.. index:: ieee80211abg model
|
||||
|
||||
.. index:: geographic location
|
||||
|
||||
.. index:: Universal PHY
|
||||
|
||||
The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports
|
||||
geographic location information for determining pathloss between nodes. A
|
||||
default latitude and longitude location is provided by CORE and this
|
||||
location-based pathloss is enabled by default; this is the *pathloss mode*
|
||||
setting for the Universal PHY. Moving a node on the canvas while the emulation
|
||||
is running generates location events for EMANE. To view or change the
|
||||
geographic location or scale of the canvas use the *Canvas Size and Scale*
|
||||
dialog available from the *Canvas* menu.
|
||||
|
||||
Clicking the green *Start* button launches the emulation and causes TAP
|
||||
devices to be created in the virtual nodes that are linked to the EMANE WLAN.
|
||||
These devices appear with interface names such as eth0, eth1, etc. The EMANE
|
||||
daemons should now be running on the host:
|
||||
::
|
||||
|
||||
> ps -aef | grep emane
|
||||
root 10472 1 1 12:57 ? 00:00:00 emane --logl 0 platform.xml
|
||||
root 10526 1 1 12:57 ? 00:00:00 emanetransportd --logl 0 tr
|
||||
|
||||
The above example shows the *emane* and *emanetransportd* daemons started by
|
||||
CORE. To view the configuration generated by CORE, look in the
|
||||
:file:`/tmp/pycore.nnnnn/` session directory for a :file:`platform.xml` file
|
||||
and other XML files. One easy way to view this information is by
|
||||
double-clicking one of the virtual nodes, and typing *cd ..* in the shell to go
|
||||
up to the session directory.
|
||||
|
||||
When EMANE is used to network together CORE nodes, no Ethernet bridging device
|
||||
is used. The Virtual Transport creates a TAP device that is installed into the
|
||||
network namespace container, so no corresponding device is visible on the host.
|
||||
|
||||
.. index:: Distributed_EMANE
|
||||
.. _Distributed_EMANE:
|
||||
|
||||
Distributed EMANE
|
||||
=================
|
||||
|
||||
|
||||
Running CORE and EMANE distributed among two or more emulation servers is
|
||||
similar to running on a single machine. There are a few key configuration items
|
||||
that need to be set in order to be successful, and those are outlined here.
|
||||
|
||||
Because EMANE uses a multicast channel to disseminate data to all NEMs, it is
|
||||
a good idea to maintain separate networks for data and control. The control
|
||||
network may be a shared laboratory network, for example, but you do not want
|
||||
multicast traffic on the data network to interfere with other EMANE users.
|
||||
The examples described here will use *eth0* as a control interface
|
||||
and *eth1* as a data interface, although using separate interfaces
|
||||
is not strictly required. Note that these interface names refer to interfaces
|
||||
present on the host machine, not virtual interfaces within a node.
|
||||
|
||||
Each machine that will act as an emulation server needs to have CORE and EMANE
|
||||
installed. Refer to the :ref:`Distributed_Emulation` section for configuring
|
||||
CORE.
|
||||
|
||||
The IP addresses of the available servers are configured from the
|
||||
CORE emulation servers dialog box (choose *Session* then
|
||||
*Emulation servers...*) described in :ref:`Distributed_Emulation`.
|
||||
This list of servers is stored in a :file:`~/.core/servers.conf` file.
|
||||
The dialog shows available servers, some or all of which may be
|
||||
assigned to nodes on the canvas.
|
||||
|
||||
Nodes need to be assigned to emulation servers as described in
|
||||
:ref:`Distributed_Emulation`. Select several nodes, right-click them, and
|
||||
choose *Assign to* and the name of the desired server. When a node is not
|
||||
assigned to any emulation server, it will be emulated locally. The local
|
||||
machine that the GUI connects with is considered the "master" machine, which in
|
||||
turn connects to the other emulation server "slaves". Public key SSH should
|
||||
be configured from the master to the slaves as mentioned in the
|
||||
:ref:`Distributed_Emulation` section.
|
||||
|
||||
The EMANE models can be configured as described in :ref:`Single_PC_with_EMANE`.
|
||||
Under the *EMANE* tab of the EMANE WLAN, click on the *EMANE options* button.
|
||||
This brings
|
||||
up the emane configuration dialog. The *enable OTA Manager channel* should
|
||||
be set to *on*. The *OTA Manager device* and *Event Service device* should
|
||||
be set to something other than the loopback *lo* device. For example, if eth0
|
||||
is your control device and eth1 is for data, set the OTA Manager device to eth1
|
||||
and the Event Service device to eth0. Click *Apply* to
|
||||
save these settings.
|
||||
|
||||
.. HINT::
|
||||
Here is a quick checklist for distributed emulation with EMANE.
|
||||
|
||||
1. Follow the steps outlined for normal CORE :ref:`Distributed_Emulation`.
|
||||
2. Under the *EMANE* tab of the EMANE WLAN, click on *EMANE options*.
|
||||
3. Turn on the *OTA Manager channel* and set the *OTA Manager device*.
|
||||
Also set the *Event Service device*.
|
||||
4. Select groups of nodes, right-click them, and assign them to servers
|
||||
using the *Assign to* menu.
|
||||
5. Synchronize your machine's clocks prior to starting the emulation,
|
||||
using ``ntp`` or ``ptp``. Some EMANE models are sensitive to timing.
|
||||
6. Press the *Start* button to launch the distributed emulation.
|
||||
|
||||
|
||||
Now when the Start button is used to instantiate the emulation,
|
||||
the local CORE Python
|
||||
daemon will connect to other emulation servers that have been assigned to nodes.
|
||||
Each server will have its own session directory where the :file:`platform.xml`
|
||||
file and other EMANE XML files are generated. The NEM IDs are automatically
|
||||
coordinated across servers so there is no overlap. Each server also gets its
|
||||
own Platform ID.
|
||||
|
||||
Instead of using the loopback device for disseminating multicast
|
||||
EMANE events, an Ethernet device is used as specified in the
|
||||
*configure emane* dialog.
|
||||
EMANE's Event Service can be run with mobility or pathloss scripts
|
||||
as described in
|
||||
:ref:`Single_PC_with_EMANE`. If CORE is not subscribed to location events, it
|
||||
will generate them as nodes are moved on the canvas.
|
||||
|
||||
Double-clicking on a node during runtime will cause the GUI to attempt to SSH
|
||||
to the emulation server for that node and run an interactive shell. The public
|
||||
key SSH configuration should be tested with all emulation servers prior to
|
||||
starting the emulation.
|
||||
|
||||
|
54
doc/figures/Makefile.am
Normal file
54
doc/figures/Makefile.am
Normal file
|
@ -0,0 +1,54 @@
|
|||
# CORE
|
||||
# (c)2009-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
#
|
||||
|
||||
# define new file extensions for handling figures and html
|
||||
SUFFIXES = .jpg .gif
|
||||
GIFTOJPG = convert -background white -flatten
|
||||
|
||||
# dia figures can be manually converted to jpg
|
||||
# On Ubuntu 11.10, this is failing for some reason.
|
||||
DIATOJPG = dia -t jpg -e
|
||||
|
||||
|
||||
# these are file extension handlers for automatically converting between image
|
||||
# file types; the .jpg files are built from .gif files from the GUI
|
||||
|
||||
# file extension handler to convert .gif to .jpg
|
||||
%.jpg: %.gif
|
||||
$(GIFTOJPG) $(top_srcdir)/gui/icons/tiny/$< $@
|
||||
|
||||
# file extension handler so we can list .gif as dependency for .gif.jpg
|
||||
%.gif:
|
||||
@echo "Using GUI file $(top_srcdir)/gui/icons/tiny/$@"
|
||||
|
||||
|
||||
# list of base names for figures
|
||||
figures = core-architecture core-workflow
|
||||
# list of figures + dia suffix
|
||||
figures_dia = $(figures:%=%.dia)
|
||||
# list of figure + jpg suffix
|
||||
figures_jpg = $(figures:%=%.jpg)
|
||||
|
||||
# icons from the GUI source
|
||||
icons = select start router host pc mdr router_green \
|
||||
lanswitch hub wlan \
|
||||
link rj45 tunnel marker oval rectangle text \
|
||||
stop observe plot twonode run document-properties
|
||||
# list of icons + .gif.jpg suffix
|
||||
icons_jpg = $(icons:%=%.jpg)
|
||||
|
||||
BUILT_SOURCES = $(figures_dia) $(figures_jpg) $(icons_jpg)
|
||||
|
||||
clean-local:
|
||||
rm -f $(icons_jpg)
|
||||
|
||||
EXTRA_DIST = $(figures_dia) $(figures_jpg)
|
||||
|
||||
# extra cruft to remove
|
||||
DISTCLEANFILES = Makefile.in $(icons_jpg)
|
||||
|
BIN
doc/figures/core-architecture.dia
Normal file
BIN
doc/figures/core-architecture.dia
Normal file
Binary file not shown.
BIN
doc/figures/core-architecture.jpg
Normal file
BIN
doc/figures/core-architecture.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 38 KiB |
BIN
doc/figures/core-workflow.dia
Normal file
BIN
doc/figures/core-workflow.dia
Normal file
Binary file not shown.
BIN
doc/figures/core-workflow.jpg
Normal file
BIN
doc/figures/core-workflow.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 21 KiB |
32
doc/index.rst
Normal file
32
doc/index.rst
Normal file
|
@ -0,0 +1,32 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. only:: html or latex
|
||||
|
||||
CORE Manual
|
||||
===========
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:numbered:
|
||||
|
||||
intro
|
||||
install
|
||||
usage
|
||||
scripting
|
||||
machine
|
||||
emane
|
||||
ns3
|
||||
performance
|
||||
devguide
|
||||
credits
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
.. only:: html
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`search`
|
||||
|
756
doc/install.rst
Normal file
756
doc/install.rst
Normal file
|
@ -0,0 +1,756 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. include:: constants.txt
|
||||
|
||||
.. _Installation:
|
||||
|
||||
************
|
||||
Installation
|
||||
************
|
||||
|
||||
This chapter describes how to set up a CORE machine. Note that the easiest
|
||||
way to install CORE is using a binary
|
||||
package on Ubuntu or Fedora (deb or rpm) using the distribution's package
|
||||
manager
|
||||
to automatically install dependencies, see :ref:`Installing_from_Packages`.
|
||||
|
||||
Ubuntu and Fedora Linux are the recommended distributions for running CORE. Ubuntu |UBUNTUVERSION| and Fedora |FEDORAVERSION| ship with kernels with support for namespaces built-in. They support the latest hardware. However,
|
||||
these distributions are not strictly required. CORE will likely work on other
|
||||
flavors of Linux, see :ref:`Installing_from_Source`.
|
||||
|
||||
The primary dependencies are Tcl/Tk (8.5 or newer) for the GUI, and Python 2.6 or 2.7 for the CORE daemon.
|
||||
|
||||
.. index:: install locations
|
||||
.. index:: paths
|
||||
.. index:: install paths
|
||||
|
||||
CORE files are installed to the following directories. When installing from
|
||||
source, the :file:`/usr/local` prefix is used in place of :file:`/usr` by
|
||||
default.
|
||||
|
||||
============================================= =================================
|
||||
Install Path Description
|
||||
============================================= =================================
|
||||
:file:`/usr/bin/core-gui` GUI startup command
|
||||
:file:`/usr/sbin/core-daemon` Daemon startup command
|
||||
:file:`/usr/sbin/` Misc. helper commands/scripts
|
||||
:file:`/usr/lib/core` GUI files
|
||||
:file:`/usr/lib/python2.7/dist-packages/core` Python modules for daemon/scripts
|
||||
:file:`/etc/core/` Daemon configuration files
|
||||
:file:`~/.core/` User-specific GUI preferences and scenario files
|
||||
:file:`/usr/share/core/` Example scripts and scenarios
|
||||
:file:`/usr/share/man/man1/` Command man pages
|
||||
:file:`/etc/init.d/core-daemon` System startup script for daemon
|
||||
============================================= =================================
|
||||
|
||||
|
||||
Under Fedora, :file:`/site-packages/` is used instead of :file:`/dist-packages/`
|
||||
for the Python modules, and :file:`/etc/systemd/system/core-daemon.service`
|
||||
instead of :file:`/etc/init.d/core-daemon` for the system startup script.
|
||||
|
||||
|
||||
.. _Prerequisites:
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
.. index:: Prerequisites
|
||||
|
||||
The Linux or FreeBSD operating system is required. The GUI uses the Tcl/Tk scripting toolkit, and the CORE daemon require Python. Details of the individual software packages required can be found in the installation steps.
|
||||
|
||||
.. _Required_Hardware:
|
||||
|
||||
Required Hardware
|
||||
-----------------
|
||||
|
||||
.. index:: Hardware requirements
|
||||
|
||||
.. index:: System requirements
|
||||
|
||||
Any computer capable of running Linux or FreeBSD should be able to run CORE. Since the physical machine will be hosting numerous virtual machines, as a general rule you should select a machine having as much RAM and CPU resources as possible.
|
||||
|
||||
A *general recommendation* would be:
|
||||
|
||||
* 2.0GHz or better x86 processor, the more processor cores the better
|
||||
* 2 GB or more of RAM
|
||||
* about 3 MB of free disk space (plus more for dependency packages such as Tcl/Tk)
|
||||
* X11 for the GUI, or remote X11 over SSH
|
||||
|
||||
The computer can be a laptop, desktop, or rack-mount server. A keyboard, mouse,
|
||||
and monitor are not required if a network connection is available
|
||||
for remotely accessing the machine. A 3D accelerated graphics card
|
||||
is not required.
|
||||
|
||||
.. _Required_Software:
|
||||
|
||||
Required Software
|
||||
-----------------
|
||||
|
||||
CORE requires the Linux or FreeBSD operating systems because it uses virtualization provided by the kernel. It does not run on the Windows or Mac OS X operating systems (unless it is running within a virtual machine guest.) There are two
|
||||
different virtualization technologies that CORE can currently use:
|
||||
Linux network namespaces and FreeBSD jails,
|
||||
see :ref:`How_Does_it_Work?` for virtualization details.
|
||||
|
||||
**Linux network namespaces is the recommended platform.** Development is focused here and it supports the latest features. It is the easiest to install because there is no need to patch, install, and run a special Linux kernel.
|
||||
|
||||
FreeBSD |BSDVERSION|-RELEASE may offer the best scalability. If your
|
||||
applications run under FreeBSD and you are comfortable with that platform,
|
||||
this may be a good choice. Device and application support by BSD
|
||||
may not be as extensive as Linux.
|
||||
|
||||
The CORE GUI requires the X.Org X Window system (X11), or can run over a
|
||||
remote X11 session. For specific Tcl/Tk, Python, and other libraries required
|
||||
to run CORE, refer to the :ref:`Installation` section.
|
||||
|
||||
.. NOTE::
|
||||
CORE :ref:`Services` determine what runs on each node. You may require
|
||||
other software packages depending on the services you wish to use.
|
||||
For example, the `HTTP` service will require the `apache2` package.
|
||||
|
||||
|
||||
.. _Installing_from_Packages:
|
||||
|
||||
Installing from Packages
|
||||
========================
|
||||
|
||||
.. index:: installer
|
||||
|
||||
.. index:: binary packages
|
||||
|
||||
The easiest way to install CORE is using the pre-built packages. The package
|
||||
managers on Ubuntu or Fedora will
|
||||
automatically install dependencies for you.
|
||||
You can obtain the CORE packages from the `CORE downloads <http://downloads.pf.itd.nrl.navy.mil/core/packages/>`_ page.
|
||||
|
||||
.. _Installing_from_Packages_on_Ubuntu:
|
||||
|
||||
Installing from Packages on Ubuntu
|
||||
----------------------------------
|
||||
|
||||
First install the Ubuntu |UBUNTUVERSION| operating system.
|
||||
|
||||
.. NOTE::
|
||||
Linux package managers (e.g. `software-center`, `yum`) will take care
|
||||
of installing the dependencies for you when you use the CORE packages.
|
||||
You do not need to manually use these installation lines. You do need
|
||||
to select which Quagga package to use.
|
||||
|
||||
|
||||
* **Optional:** install the prerequisite packages (otherwise skip this
|
||||
step and have the package manager install them for you.)
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
# make sure the system is up to date; you can also use synaptic or
|
||||
# update-manager instead of apt-get update/dist-upgrade
|
||||
sudo apt-get update
|
||||
sudo apt-get dist-upgrade
|
||||
sudo apt-get install |APTDEPS| |APTDEPS2|
|
||||
|
||||
* Install Quagga for routing. If you plan on working with wireless
|
||||
networks, we recommend
|
||||
installing
|
||||
`OSPF MDR <http://cs.itd.nrl.navy.mil/work/ospf-manet/index.php>`__
|
||||
(replace `amd64` below with `i386` if needed
|
||||
to match your architecture):
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERDEB|
|
||||
sudo dpkg -i |QVERDEB|
|
||||
|
||||
|
||||
or, for the regular Ubuntu version of Quagga:
|
||||
::
|
||||
|
||||
sudo apt-get install quagga
|
||||
|
||||
* Install the CORE deb packages for Ubuntu, using a GUI that automatically
|
||||
resolves dependencies (note that the absolute path to the deb file
|
||||
must be used with ``software-center``):
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
software-center /home/user/Downloads/core-daemon\_\ |version|-|COREDEB|
|
||||
software-center /home/user/Downloads/core-gui\_\ |version|-|COREDEB2|
|
||||
|
||||
or install from command-line:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo dpkg -i core-daemon\_\ |version|-|COREDEB|
|
||||
sudo dpkg -i core-gui\_\ |version|-|COREDEB2|
|
||||
|
||||
* Start the CORE daemon as root.
|
||||
::
|
||||
|
||||
sudo /etc/init.d/core-daemon start
|
||||
|
||||
* Run the CORE GUI as a normal user:
|
||||
::
|
||||
|
||||
core-gui
|
||||
|
||||
|
||||
After running the ``core-gui`` command, a GUI should appear with a canvas
|
||||
for drawing topologies. Messages will print out on the console about
|
||||
connecting to the CORE daemon.
|
||||
|
||||
.. _Installing_from_Packages_on_Fedora:
|
||||
|
||||
Installing from Packages on Fedora/CentOS
|
||||
-----------------------------------------
|
||||
|
||||
The commands shown here should be run as root. First Install the Fedora
|
||||
|FEDORAVERSION| or CentOS |CENTOSVERSION| operating system.
|
||||
The `x86_64` architecture is shown in the
|
||||
examples below, replace with `i686` is using a 32-bit architecture. Also,
|
||||
`fc15` is shown below for Fedora 15 packages, replace with the appropriate
|
||||
Fedora release number.
|
||||
|
||||
* **CentOS only:** in order to install the `libev` prerequisite package, you
|
||||
first need to install the `EPEL <http://fedoraproject.org/wiki/EPEL>`_ repo
|
||||
(Extra Packages for Enterprise Linux):
|
||||
|
||||
::
|
||||
|
||||
wget http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
yum localinstall epel-release-6-8.noarch.rpm
|
||||
|
||||
* **Optional:** install the prerequisite packages (otherwise skip this
|
||||
step and have the package manager install them for you.)
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
# make sure the system is up to date; you can also use the
|
||||
# update applet instead of yum update
|
||||
yum update
|
||||
yum install |YUMDEPS| |YUMDEPS2|
|
||||
|
||||
|
||||
* **Optional (Fedora 17+):** Fedora 17 and newer have an additional
|
||||
prerequisite providing the required netem kernel modules (otherwise
|
||||
skip this step and have the package manager install it for you.)
|
||||
|
||||
::
|
||||
|
||||
yum install kernel-modules-extra
|
||||
|
||||
|
||||
* Install Quagga for routing. If you plan on working with wireless networks,
|
||||
we recommend installing
|
||||
`OSPF MDR <http://cs.itd.nrl.navy.mil/work/ospf-manet/>`_:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERRPM|
|
||||
yum localinstall |QVERRPM|
|
||||
|
||||
or, for the regular Fedora version of Quagga:
|
||||
::
|
||||
|
||||
yum install quagga
|
||||
|
||||
|
||||
* Install the CORE RPM packages for Fedora and automatically resolve
|
||||
dependencies:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
yum localinstall core-daemon-|version|-|CORERPM| --nogpgcheck
|
||||
yum localinstall core-gui-|version|-|CORERPM2| --nogpgcheck
|
||||
|
||||
or install from the command-line:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
rpm -ivh core-daemon-|version|-|CORERPM|
|
||||
rpm -ivh core-gui-|version|-|CORERPM2|
|
||||
|
||||
|
||||
* Turn off SELINUX by setting ``SELINUX=disabled`` in the :file:`/etc/sysconfig/selinux` file, and adding ``selinux=0`` to the kernel line in
|
||||
your :file:`/etc/grub.conf` file; on Fedora 15 and newer, disable sandboxd using ``chkconfig sandbox off``;
|
||||
you need to reboot in order for this change to take effect
|
||||
* Turn off firewalls with ``systemctl disable firewalld``, ``systemctl disable iptables.service``, ``systemctl disable ip6tables.service`` (``chkconfig iptables off``, ``chkconfig ip6tables off``) or configure them with permissive rules for CORE virtual networks; you need to reboot after making this change, or flush the firewall using ``iptables -F``, ``ip6tables -F``.
|
||||
|
||||
* Start the CORE daemon as root. Fedora uses the ``systemd`` start-up daemon
|
||||
instead of traditional init scripts. CentOS uses the init script.
|
||||
::
|
||||
|
||||
# for Fedora using systemd:
|
||||
systemctl daemon-reload
|
||||
systemctl start core-daemon.service
|
||||
# or for CentOS:
|
||||
/etc/init.d/core-daemon start
|
||||
|
||||
* Run the CORE GUI as a normal user:
|
||||
::
|
||||
|
||||
core-gui
|
||||
|
||||
|
||||
After running the ``core-gui`` command, a GUI should appear with a canvas
|
||||
for drawing topologies. Messages will print out on the console about
|
||||
connecting to the CORE daemon.
|
||||
|
||||
.. _Installing_from_Source:
|
||||
|
||||
Installing from Source
|
||||
======================
|
||||
|
||||
This option is listed here for developers and advanced users who are comfortable patching and building source code. Please consider using the binary packages instead for a simplified install experience.
|
||||
|
||||
.. _Installing_from_Source_on_Ubuntu:
|
||||
|
||||
Installing from Source on Ubuntu
|
||||
--------------------------------
|
||||
|
||||
To build CORE from source on Ubuntu, first install these development packages.
|
||||
These packages are not required for normal binary package installs.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo apt-get install |APTDEPS| \\
|
||||
|APTDEPS2| \\
|
||||
|APTDEPS3|
|
||||
|
||||
|
||||
You can obtain the CORE source from the `CORE source <http://downloads.pf.itd.nrl.navy.mil/core/source/>`_ page. Choose either a stable release version or
|
||||
the development snapshot available in the `nightly_snapshots` directory.
|
||||
The ``-j8`` argument to ``make`` will run eight simultaneous jobs, to speed up
|
||||
builds on multi-core systems.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf core-|version|.tar.gz
|
||||
cd core-|version|
|
||||
./bootstrap.sh
|
||||
./configure
|
||||
make -j8
|
||||
sudo make install
|
||||
|
||||
|
||||
The CORE Manual documentation is built separately from the :file:`doc/`
|
||||
sub-directory in the source. It requires Sphinx:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo apt-get install python-sphinx
|
||||
cd core-|version|/doc
|
||||
make html
|
||||
make latexpdf
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_Fedora:
|
||||
|
||||
Installing from Source on Fedora
|
||||
--------------------------------
|
||||
|
||||
To build CORE from source on Fedora, install these development packages.
|
||||
These packages are not required for normal binary package installs.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
yum install |YUMDEPS| \\
|
||||
|YUMDEPS2| \\
|
||||
|YUMDEPS3|
|
||||
|
||||
|
||||
.. NOTE::
|
||||
For a minimal X11 installation, also try these packages::
|
||||
|
||||
yum install xauth xterm urw-fonts
|
||||
|
||||
You can obtain the CORE source from the `CORE source <http://downloads.pf.itd.nrl.navy.mil/core/source/>`_ page. Choose either a stable release version or
|
||||
the development snapshot available in the :file:`nightly_snapshots` directory.
|
||||
The ``-j8`` argument to ``make`` will run eight simultaneous jobs, to speed up
|
||||
builds on multi-core systems. Notice the ``configure`` flag to tell the build
|
||||
system that a systemd service file should be installed under Fedora.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf core-|version|.tar.gz
|
||||
cd core-|version|
|
||||
./bootstrap.sh
|
||||
./configure --with-startup=systemd
|
||||
make -j8
|
||||
sudo make install
|
||||
|
||||
|
||||
Note that the Linux RPM and Debian packages do not use the ``/usr/local``
|
||||
prefix, and files are instead installed to ``/usr/sbin``, and
|
||||
``/usr/lib``. This difference is a result of aligning with the directory
|
||||
structure of Linux packaging systems and FreeBSD ports packaging.
|
||||
|
||||
Another note is that the Python distutils in Fedora Linux will install the CORE
|
||||
Python modules to :file:`/usr/lib/python2.7/site-packages/core`, instead of
|
||||
using the :file:`dist-packages` directory.
|
||||
|
||||
The CORE Manual documentation is built separately from the :file:`doc/`
|
||||
sub-directory in the source. It requires Sphinx:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
sudo yum install python-sphinx
|
||||
cd core-|version|/doc
|
||||
make html
|
||||
make latexpdf
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_CentOS:
|
||||
|
||||
Installing from Source on CentOS/EL6
|
||||
------------------------------------
|
||||
|
||||
To build CORE from source on CentOS/EL6, first install the `EPEL <http://fedoraproject.org/wiki/EPEL>`_ repo (Extra Packages for Enterprise Linux) in order
|
||||
to provide the `libev` package.
|
||||
|
||||
::
|
||||
|
||||
wget http://dl.fedoraproject.org/pub/epel/6/i386/epel-release-6-8.noarch.rpm
|
||||
yum localinstall epel-release-6-8.noarch.rpm
|
||||
|
||||
|
||||
Now use the same instructions shown in :ref:`Installing_from_Source_on_Fedora`.
|
||||
CentOS/EL6 does not use the systemd service file, so the `configure` option
|
||||
`--with-startup=systemd` should be omitted:
|
||||
|
||||
::
|
||||
|
||||
./configure
|
||||
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_SUSE:
|
||||
|
||||
Installing from Source on SUSE
|
||||
------------------------------
|
||||
|
||||
To build CORE from source on SUSE or OpenSUSE,
|
||||
use the similar instructions shown in :ref:`Installing_from_Source_on_Fedora`,
|
||||
except that the following `configure` option should be used:
|
||||
|
||||
::
|
||||
|
||||
./configure --with-startup=suse
|
||||
|
||||
This causes a separate init script to be installed that is tailored towards SUSE systems.
|
||||
|
||||
The `zypper` command is used instead of `yum`.
|
||||
|
||||
For OpenSUSE/Xen based installations, refer to the `README-Xen` file included
|
||||
in the CORE source.
|
||||
|
||||
|
||||
.. _Installing_from_Source_on_FreeBSD:
|
||||
|
||||
Installing from Source on FreeBSD
|
||||
---------------------------------
|
||||
|
||||
.. index:: kernel patch
|
||||
|
||||
**Rebuilding the FreeBSD Kernel**
|
||||
|
||||
|
||||
The FreeBSD kernel requires a small patch to allow per-node directories in the
|
||||
filesystem. Also, the `VIMAGE` build option needs to be turned on to enable
|
||||
jail-based network stack virtualization. The source code for the FreeBSD
|
||||
kernel is located in :file:`/usr/src/sys`.
|
||||
|
||||
Instructions below will use the :file:`/usr/src/sys/amd64` architecture
|
||||
directory, but the directory :file:`/usr/src/sys/i386` should be substituted
|
||||
if you are using a 32-bit architecture.
|
||||
|
||||
The kernel patch is available from the CORE source tarball under core-|version|/kernel/symlinks-8.1-RELEASE.diff. This patch applies to the
|
||||
FreeBSD 8.x or 9.x kernels.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
cd /usr/src/sys
|
||||
# first you can check if the patch applies cleanly using the '-C' option
|
||||
patch -p1 -C < ~/core-|version|/kernel/symlinks-8.1-RELEASE.diff
|
||||
# without '-C' applies the patch
|
||||
patch -p1 < ~/core-|version|/kernel/symlinks-8.1-RELEASE.diff
|
||||
|
||||
|
||||
A kernel configuration file named :file:`CORE` can be found within the source tarball: core-|version|/kernel/freebsd8-config-CORE. The config is valid for
|
||||
FreeBSD 8.x or 9.x kernels.
|
||||
|
||||
The contents of this configuration file are shown below; you can edit it to suit your needs.
|
||||
|
||||
::
|
||||
|
||||
# this is the FreeBSD 9.x kernel configuration file for CORE
|
||||
include GENERIC
|
||||
ident CORE
|
||||
|
||||
options VIMAGE
|
||||
nooptions SCTP
|
||||
options IPSEC
|
||||
device crypto
|
||||
|
||||
options IPFIREWALL
|
||||
options IPFIREWALL_DEFAULT_TO_ACCEPT
|
||||
|
||||
|
||||
The kernel configuration file can be linked or copied to the kernel source directory. Use it to configure and build the kernel:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
cd /usr/src/sys/amd64/conf
|
||||
cp ~/core-|version|/kernel/freebsd8-config-CORE CORE
|
||||
config CORE
|
||||
cd ../compile/CORE
|
||||
make cleandepend && make depend
|
||||
make -j8 && make install
|
||||
|
||||
|
||||
Change the number 8 above to match the number of CPU cores you have times two.
|
||||
Note that the ``make install`` step will move your existing kernel to
|
||||
``/boot/kernel.old`` and removes that directory if it already exists. Reboot to
|
||||
enable this new patched kernel.
|
||||
|
||||
**Building CORE from Source on FreeBSD**
|
||||
|
||||
Here are the prerequisite packages from the FreeBSD ports system:
|
||||
|
||||
::
|
||||
|
||||
pkg_add -r tk85
|
||||
pkg_add -r libimg
|
||||
pkg_add -r bash
|
||||
pkg_add -r libev
|
||||
pkg_add -r sudo
|
||||
pkg_add -r python
|
||||
pkg_add -r autotools
|
||||
pkg_add -r gmake
|
||||
|
||||
|
||||
Note that if you are installing to a bare FreeBSD system and want to SSH with X11 forwarding to that system, these packages will help:
|
||||
|
||||
::
|
||||
|
||||
pkg_add -r xauth
|
||||
pkg_add -r xorg-fonts
|
||||
|
||||
|
||||
The ``sudo`` package needs to be configured so a normal user can run the CORE
|
||||
GUI using the command ``core-gui`` (opening a shell window on a node uses a
|
||||
command such as ``sudo vimage n1``.)
|
||||
|
||||
On FreeBSD, the CORE source is built using autotools and gmake:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf core-|version|.tar.gz
|
||||
cd core-|version|
|
||||
./bootstrap.sh
|
||||
./configure
|
||||
gmake -j8
|
||||
sudo gmake install
|
||||
|
||||
|
||||
Build and install the ``vimage`` utility for controlling virtual images. The source can be obtained from `FreeBSD SVN <http://svn.freebsd.org/viewvc/base/head/tools/tools/vimage/>`_, or it is included with the CORE source for convenience:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
cd core-|version|/kernel/vimage
|
||||
make
|
||||
make install
|
||||
|
||||
|
||||
.. index:: FreeBSD; kernel modules
|
||||
|
||||
.. index:: kernel modules
|
||||
|
||||
.. index:: ng_wlan and ng_pipe
|
||||
|
||||
On FreeBSD you should also install the CORE kernel modules for wireless emulation. Perform this step after you have recompiled and installed FreeBSD kernel.
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
cd core-|version|/kernel/ng_pipe
|
||||
make
|
||||
sudo make install
|
||||
cd ../ng_wlan
|
||||
make
|
||||
sudo make install
|
||||
|
||||
|
||||
The :file:`ng_wlan` kernel module allows for the creation of WLAN nodes. This
|
||||
is a modified :file:`ng_hub` Netgraph module. Instead of packets being copied
|
||||
to every connected node, the WLAN maintains a hash table of connected node
|
||||
pairs. Furthermore, link parameters can be specified for node pairs, in
|
||||
addition to the on/off connectivity. The parameters are tagged to each packet
|
||||
and sent to the connected :file:`ng_pipe` module. The :file:`ng_pipe` has been
|
||||
modified to read any tagged parameters and apply them instead of its default
|
||||
link effects.
|
||||
|
||||
The :file:`ng_wlan` also supports linking together multiple WLANs across different machines using the :file:`ng_ksocket` Netgraph node, for distributed emulation.
|
||||
|
||||
The Quagga routing suite is recommended for routing,
|
||||
:ref:`Quagga_Routing_Software` for installation.
|
||||
|
||||
.. _Quagga_Routing_Software:
|
||||
|
||||
Quagga Routing Software
|
||||
=======================
|
||||
|
||||
.. index:: Quagga
|
||||
|
||||
Virtual networks generally require some form of routing in order to work (e.g.
|
||||
to automatically populate routing tables for routing packets from one subnet
|
||||
to another.) CORE builds OSPF routing protocol
|
||||
configurations by default when the blue router
|
||||
node type is used. The OSPF protocol is available
|
||||
from the `Quagga open source routing suite <http://www.quagga.net>`_.
|
||||
Other routing protocols are available using different
|
||||
node services, :ref:`Default_Services_and_Node_Types`.
|
||||
|
||||
Quagga is not specified as a dependency for the CORE packages because
|
||||
there are two different Quagga packages that you may use:
|
||||
|
||||
* `Quagga <http://www.quagga.net>`_ - the standard version of Quagga, suitable for static wired networks, and usually available via your distribution's package manager.
|
||||
.. index:: OSPFv3 MANET
|
||||
|
||||
.. index:: OSPFv3 MDR
|
||||
|
||||
.. index:: MANET Designated Routers (MDR)
|
||||
|
||||
*
|
||||
`OSPF MANET Designated Routers <http://cs.itd.nrl.navy.mil/work/ospf-manet/index.php>`_ (MDR) - the Quagga routing suite with a modified version of OSPFv3,
|
||||
optimized for use with mobile wireless networks. The *mdr* node type (and the MDR service) requires this variant of Quagga.
|
||||
|
||||
If you plan on working with wireless networks, we recommend installing OSPF MDR;
|
||||
otherwise install the standard version of Quagga using your package manager or from source.
|
||||
|
||||
.. _Installing_Quagga_from_Packages:
|
||||
|
||||
Installing Quagga from Packages
|
||||
-------------------------------
|
||||
|
||||
To install the standard version of Quagga from packages, use your package
|
||||
manager (Linux) or the ports system (FreeBSD).
|
||||
|
||||
Ubuntu users:
|
||||
::
|
||||
|
||||
sudo apt-get install quagga
|
||||
|
||||
Fedora users:
|
||||
::
|
||||
|
||||
yum install quagga
|
||||
|
||||
FreeBSD users:
|
||||
::
|
||||
|
||||
pkg_add -r quagga
|
||||
|
||||
|
||||
To install the Quagga variant having OSPFv3 MDR, first download the
|
||||
appropriate package, and install using the package manager.
|
||||
|
||||
Ubuntu users:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERDEB|
|
||||
sudo dpkg -i |QVERDEB|
|
||||
|
||||
Replace `amd64` with `i686` if using a 32-bit architecture.
|
||||
|
||||
Fedora users:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
export URL=http://downloads.pf.itd.nrl.navy.mil/ospf-manet
|
||||
wget $URL/|QVER|/|QVERRPM|
|
||||
yum localinstall |QVERRPM|
|
||||
|
||||
Replace `x86_64` with `i686` if using a 32-bit architecture.
|
||||
|
||||
.. _Compiling_Quagga_for_CORE:
|
||||
|
||||
Compiling Quagga for CORE
|
||||
-------------------------
|
||||
|
||||
To compile Quagga to work with CORE on Linux:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf |QVER|.tar.gz
|
||||
cd |QVER|
|
||||
./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \\
|
||||
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \\
|
||||
--localstatedir=/var/run/quagga
|
||||
make
|
||||
sudo make install
|
||||
|
||||
|
||||
Note that the configuration directory :file:`/usr/local/etc/quagga` shown for
|
||||
Quagga above could be :file:`/etc/quagga`, if you create a symbolic link from
|
||||
:file:`/etc/quagga/Quagga.conf -> /usr/local/etc/quagga/Quagga.conf` on the
|
||||
host. The :file:`quaggaboot.sh` script in a Linux network namespace will try and
|
||||
do this for you if needed.
|
||||
|
||||
If you try to run quagga after installing from source and get an error such as:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
error while loading shared libraries libzebra.so.0
|
||||
|
||||
this is usually a sign that you have to run `sudo ldconfig` to refresh the
|
||||
cache file.
|
||||
|
||||
To compile Quagga to work with CORE on FreeBSD:
|
||||
|
||||
.. parsed-literal::
|
||||
|
||||
tar xzf |QVER|.tar.gz
|
||||
cd |QVER|
|
||||
./configure --enable-user=root --enable-group=wheel \\
|
||||
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \\
|
||||
--localstatedir=/var/run/quagga
|
||||
gmake
|
||||
gmake install
|
||||
|
||||
|
||||
On FreeBSD |BSDVERSION| you can use ``make`` or ``gmake``.
|
||||
You probably want to compile Quagga from the ports system in
|
||||
:file:`/usr/ports/net/quagga`.
|
||||
|
||||
VCORE
|
||||
=====
|
||||
|
||||
.. index:: virtual machines
|
||||
|
||||
.. index:: VirtualBox
|
||||
|
||||
.. index:: VMware
|
||||
|
||||
CORE is capable of running inside of a virtual machine, using
|
||||
software such as VirtualBox,
|
||||
VMware Server or QEMU. However, CORE itself is performing machine
|
||||
virtualization in order to realize multiple emulated nodes, and running CORE
|
||||
virtually adds additional contention for the physical resources. **For performance reasons, this is not recommended.** Timing inside of a VM often has
|
||||
problems. If you do run CORE from within a VM, it is recommended that you view
|
||||
the GUI with remote X11 over SSH, so the virtual machine does not need to
|
||||
emulate the video card with the X11 application.
|
||||
|
||||
.. index:: VCORE
|
||||
|
||||
A CORE virtual machine is provided for download, named VCORE.
|
||||
This is the perhaps the easiest way to get CORE up and running as the machine
|
||||
is already set up for you. This may be adequate for initially evaluating the
|
||||
tool but keep in mind the performance limitations of running within VirtualBox
|
||||
or VMware. To install the virtual machine, you first need to obtain VirtualBox
|
||||
from http://www.virtualbox.org, or VMware Server or Player from
|
||||
http://www.vmware.com (this commercial software is distributed for free.)
|
||||
Once virtualization software has been installed, you can import the virtual
|
||||
machine appliance using the ``vbox`` file for VirtualBox or the ``vmx`` file for VMware. See the documentation that comes with VCORE for login information.
|
||||
|
256
doc/intro.rst
Normal file
256
doc/intro.rst
Normal file
|
@ -0,0 +1,256 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _Introduction:
|
||||
|
||||
************
|
||||
Introduction
|
||||
************
|
||||
|
||||
The Common Open Research Emulator (CORE) is a tool for building virtual
|
||||
networks. As an emulator, CORE builds a representation of a real computer
|
||||
network that runs in real time, as opposed to simulation, where abstract models
|
||||
are used. The live-running emulation can be connected to physical networks and
|
||||
routers. It provides an environment for running real applications and
|
||||
protocols, taking advantage of virtualization provided by the Linux or FreeBSD
|
||||
operating systems.
|
||||
|
||||
Some of its key features are:
|
||||
|
||||
.. index::
|
||||
single: key features
|
||||
|
||||
* efficient and scalable
|
||||
* runs applications and protocols without modification
|
||||
* easy-to-use GUI
|
||||
* highly customizable
|
||||
|
||||
CORE is typically used for network and protocol research,
|
||||
demonstrations, application and platform testing, evaluating networking
|
||||
scenarios, security studies, and increasing the size of physical test networks.
|
||||
|
||||
.. index::
|
||||
single: CORE; components of
|
||||
single: CORE; API
|
||||
single: API
|
||||
single: CORE; GUI
|
||||
|
||||
.. _Architecture:
|
||||
|
||||
Architecture
|
||||
============
|
||||
The main components of CORE are shown in :ref:`core-architecture`. A
|
||||
*CORE daemon* (backend) manages emulation sessions. It builds emulated networks
|
||||
using kernel virtualization for virtual nodes and some form of bridging and
|
||||
packet manipulation for virtual networks. The nodes and networks come together
|
||||
via interfaces installed on nodes. The daemon is controlled via the
|
||||
graphical user interface, the *CORE GUI* (frontend).
|
||||
The daemon uses Python modules
|
||||
that can be imported directly by Python scripts.
|
||||
The GUI and the daemon communicate using a custom,
|
||||
asynchronous, sockets-based API, known as the *CORE API*. The dashed line
|
||||
in the figure notionally depicts the user-space and kernel-space separation.
|
||||
The components the user interacts with are colored blue: GUI, scripts, or
|
||||
command-line tools.
|
||||
|
||||
The system is modular to allow mixing different components. The virtual
|
||||
networks component, for example, can be realized with other network
|
||||
simulators and emulators, such as ns-3 and EMANE.
|
||||
Different types of kernel virtualization are supported.
|
||||
Another example is how a session can be designed and started using
|
||||
the GUI, and continue to run in "headless" operation with the GUI closed.
|
||||
The CORE API is sockets based,
|
||||
to allow the possibility of running different components on different physical
|
||||
machines.
|
||||
|
||||
.. _core-architecture:
|
||||
|
||||
.. figure:: figures/core-architecture.*
|
||||
:alt: CORE architecture diagram
|
||||
:align: center
|
||||
|
||||
CORE Architecture
|
||||
|
||||
The CORE GUI is a Tcl/Tk program; it is started using the command
|
||||
``core-gui``. The CORE daemon, named ``core-daemon``,
|
||||
is usually started via the init script
|
||||
(``/etc/init.d/core-daemon`` or ``core-daemon.service``,
|
||||
depending on platform.)
|
||||
The CORE daemon manages sessions of virtual
|
||||
nodes and networks, of which other scripts and utilities may be used for
|
||||
further control.
|
||||
|
||||
|
||||
.. _How_Does_It_Work?:
|
||||
|
||||
How Does it Work?
|
||||
=================
|
||||
|
||||
A CORE node is a lightweight virtual machine. The CORE framework runs on Linux
|
||||
and FreeBSD systems. The primary platform used for development is Linux.
|
||||
|
||||
.. index::
|
||||
single: Linux; virtualization
|
||||
single: Linux; containers
|
||||
single: LXC
|
||||
single: network namespaces
|
||||
|
||||
* :ref:`Linux` CORE uses Linux network namespace virtualization to build virtual nodes, and ties them together with virtual networks using Linux Ethernet bridging.
|
||||
* :ref:`FreeBSD` CORE uses jails with a network stack virtualization kernel option to build virtual nodes, and ties them together with virtual networks using BSD's Netgraph system.
|
||||
|
||||
|
||||
.. _Linux:
|
||||
|
||||
Linux
|
||||
-----
|
||||
Linux network namespaces (also known as netns, LXC, or `Linux containers
|
||||
<http://lxc.sourceforge.net/>`_) is the primary virtualization
|
||||
technique used by CORE. LXC has been part of the mainline Linux kernel since
|
||||
2.6.24. Recent Linux distributions such as Fedora and Ubuntu have
|
||||
namespaces-enabled kernels out of the box, so the kernel does not need to be
|
||||
patched or recompiled.
|
||||
A namespace is created using the ``clone()`` system call. Similar
|
||||
to the BSD jails, each namespace has its own process environment and private
|
||||
network stack. Network namespaces share the same filesystem in CORE.
|
||||
|
||||
.. index::
|
||||
single: Linux; bridging
|
||||
single: Linux; networking
|
||||
single: ebtables
|
||||
|
||||
CORE combines these namespaces with Linux Ethernet bridging
|
||||
to form networks. Link characteristics are applied using Linux Netem queuing
|
||||
disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless
|
||||
networks are emulated by controlling which interfaces can send and receive with
|
||||
ebtables rules.
|
||||
|
||||
|
||||
.. _FreeBSD:
|
||||
|
||||
FreeBSD
|
||||
-------
|
||||
|
||||
.. index::
|
||||
single: FreeBSD; Network stack virtualization
|
||||
single: FreeBSD; jails
|
||||
single: FreeBSD; vimages
|
||||
|
||||
FreeBSD jails provide an isolated process space, a virtual environment for
|
||||
running programs. Starting with FreeBSD 8.0, a new `vimage` kernel option
|
||||
extends BSD jails so that each jail can have its own virtual network stack --
|
||||
its own networking variables such as addresses, interfaces, routes, counters,
|
||||
protocol state, socket information, etc. The existing networking algorithms and
|
||||
code paths are intact but operate on this virtualized state.
|
||||
|
||||
Each jail plus network stack forms a lightweight virtual machine. These are
|
||||
named jails or *virtual images* (or *vimages*) and are created using a the
|
||||
``jail`` or ``vimage`` command. Unlike traditional virtual
|
||||
machines, vimages do not feature entire operating systems running on emulated
|
||||
hardware. All of the vimages will share the same processor, memory, clock, and
|
||||
other system resources. Because the actual hardware is not emulated and network
|
||||
packets can be passed by reference through the in-kernel Netgraph system,
|
||||
vimages are quite lightweight and a single system can accommodate numerous
|
||||
instances.
|
||||
|
||||
Virtual network stacks in FreeBSD were historically available as a patch to the
|
||||
FreeBSD 4.11 and 7.0 kernels, and the VirtNet project [#f1]_ [#f2]_
|
||||
added this functionality to the
|
||||
mainline 8.0-RELEASE and newer kernels.
|
||||
|
||||
.. index::
|
||||
single: FreeBSD; Netgraph
|
||||
|
||||
The FreeBSD Operating System kernel features a graph-based
|
||||
networking subsystem named Netgraph. The netgraph(4) manual page quoted below
|
||||
best defines this system:
|
||||
|
||||
The netgraph system provides a uniform and modular system for the
|
||||
implementation of kernel objects which perform various networking functions.
|
||||
The objects, known as nodes, can be arranged into arbitrarily complicated
|
||||
graphs. Nodes have hooks which are used to connect two nodes together,
|
||||
forming the edges in the graph. Nodes communicate along the edges to
|
||||
process data, implement protocols, etc.
|
||||
|
||||
The aim of netgraph is to supplement rather than replace the existing
|
||||
kernel networking infrastructure.
|
||||
|
||||
.. index::
|
||||
single: IMUNES
|
||||
single: VirtNet
|
||||
single: prior work
|
||||
|
||||
.. rubric:: Footnotes
|
||||
.. [#f1] http://www.nlnet.nl/project/virtnet/
|
||||
.. [#f2] http://www.imunes.net/virtnet/
|
||||
|
||||
.. _Prior_Work:
|
||||
|
||||
Prior Work
|
||||
==========
|
||||
|
||||
The Tcl/Tk CORE GUI was originally derived from the open source
|
||||
`IMUNES <http://www.tel.fer.hr/imunes/>`_
|
||||
project from the University of Zagreb
|
||||
as a custom project within Boeing Research and Technology's Network
|
||||
Technology research group in 2004. Since then they have developed the CORE
|
||||
framework to use not only FreeBSD but Linux virtualization, have developed a
|
||||
Python framework, and made numerous user- and kernel-space developments, such
|
||||
as support for wireless networks, IPsec, the ability to distribute emulations,
|
||||
simulation integration, and more. The IMUNES project also consists of userspace
|
||||
and kernel components. Originally, one had to download and apply a patch for
|
||||
the FreeBSD 4.11 kernel, but the more recent
|
||||
`VirtNet <http://www.nlnet.nl/project/virtnet/>`_
|
||||
effort has brought network stack
|
||||
virtualization to the more modern FreeBSD 8.x kernel.
|
||||
|
||||
.. _Open_Source_Project_and_Resources:
|
||||
|
||||
Open Source Project and Resources
|
||||
=================================
|
||||
.. index::
|
||||
single: open source project
|
||||
single: license
|
||||
single: website
|
||||
single: supplemental website
|
||||
single: contributing
|
||||
|
||||
CORE has been released by Boeing to the open source community under the BSD
|
||||
license. If you find CORE useful for your work, please contribute back to the
|
||||
project. Contributions can be as simple as reporting a bug, dropping a line of
|
||||
encouragement or technical suggestions to the mailing lists, or can also
|
||||
include submitting patches or maintaining aspects of the tool. For details on
|
||||
contributing to CORE, please visit the
|
||||
`wiki <http://code.google.com/p/coreemu/wiki/Home, wiki>`_.
|
||||
|
||||
Besides this manual, there are other additional resources available online:
|
||||
|
||||
* `CORE website <http://cs.itd.nrl.navy.mil/work/core/>`_ - main project page containing demos, downloads, and mailing list information.
|
||||
* `CORE supplemental website <http://code.google.com/p/coreemu/>`_ - supplemental Google Code page with a quickstart guide, wiki, bug tracker, and screenshots.
|
||||
|
||||
.. index::
|
||||
single: wiki
|
||||
single: CORE; wiki
|
||||
|
||||
The `CORE wiki <http://code.google.com/p/coreemu/wiki/Home>`_ is a good place to check for the latest documentation and tips.
|
||||
|
||||
Goals
|
||||
-----
|
||||
These are the Goals of the CORE project; they are similar to what we consider to be the :ref:`key features <Introduction>`.
|
||||
|
||||
#. Ease of use - In a few clicks the user should have a running network.
|
||||
#. Efficiency and scalability - A node is more lightweight than a full virtual machine. Tens of nodes should be possible on a standard laptop computer.
|
||||
#. Real software - Run real implementation code, protocols, networking stacks.
|
||||
#. Networking - CORE is focused on emulating networks and offers various ways to connect the running emulation with real or simulated networks.
|
||||
#. Hackable - The source code is available and easy to understand and modify.
|
||||
|
||||
Non-Goals
|
||||
---------
|
||||
This is a list of Non-Goals, specific things that people may be interested in but are not areas that we will pursue.
|
||||
|
||||
|
||||
#. Reinventing the wheel - Where possible, CORE reuses existing open source components such as virtualization, Netgraph, netem, bridging, Quagga, etc.
|
||||
#. 1,000,000 nodes - While the goal of CORE is to provide efficient, scalable network emulation, there is no set goal of N number of nodes. There are realistic limits on what a machine can handle as its resources are divided amongst virtual nodes. We will continue to make things more efficient and let the user determine the right number of nodes based on available hardware and the activities each node is performing.
|
||||
#. Solves every problem - CORE is about emulating networking layers 3-7 using virtual network stacks in the Linux or FreeBSD operating systems.
|
||||
#. Hardware-specific - CORE itself is not an instantiation of hardware, a testbed, or a specific laboratory setup; it should run on commodity laptop and desktop PCs, in addition to high-end server hardware.
|
||||
|
||||
|
91
doc/machine.rst
Normal file
91
doc/machine.rst
Normal file
|
@ -0,0 +1,91 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _Machine_Types:
|
||||
|
||||
*************
|
||||
Machine Types
|
||||
*************
|
||||
|
||||
.. index:: machine types
|
||||
|
||||
Different node types can be configured in CORE, and each node type has a
|
||||
*machine type* that indicates how the node will be represented at run time.
|
||||
Different machine types allow for different virtualization options.
|
||||
|
||||
.. _netns:
|
||||
|
||||
netns
|
||||
=====
|
||||
|
||||
.. index:: netns machine type
|
||||
|
||||
The *netns* machine type is the default. This is for nodes that will be
|
||||
backed by Linux network namespaces. See :ref:`Linux` for a brief explanation of
|
||||
netns. This default machine type is very lightweight, providing a minimum
|
||||
amount of
|
||||
virtualization in order to emulate a network.
|
||||
Another reason this is designated as the default machine type
|
||||
is because this virtualization technology
|
||||
typically requires no changes to the kernel; it is available out-of-the-box
|
||||
from the latest mainstream Linux distributions.
|
||||
|
||||
.. index:: physical machine type
|
||||
|
||||
.. index:: emulation testbed machines
|
||||
|
||||
.. index:: real node
|
||||
|
||||
.. index:: physical node
|
||||
|
||||
.. _physical:
|
||||
|
||||
physical
|
||||
========
|
||||
|
||||
The *physical* machine type is used for nodes that represent a real
|
||||
Linux-based machine that will participate in the emulated network scenario.
|
||||
This is typically used, for example, to incorporate racks of server machines
|
||||
from an emulation testbed. A physical node is one that is running the CORE
|
||||
daemon (:file:`core-daemon`), but will not be further partitioned into virtual
|
||||
machines. Services that are run on the physical node do not run in an
|
||||
isolated or virtualized environment, but directly on the operating system.
|
||||
|
||||
Physical nodes must be assigned to servers, the same way nodes
|
||||
are assigned to emulation servers with :ref:`Distributed_Emulation`.
|
||||
The list of available physical nodes currently shares the same dialog box
|
||||
and list as the emulation servers, accessed using the *Emulation Servers...*
|
||||
entry from the *Session* menu.
|
||||
|
||||
.. index:: GRE tunnels with physical nodes
|
||||
|
||||
Support for physical nodes is under development and may be improved in future
|
||||
releases. Currently, when any node is linked to a physical node, a dashed line
|
||||
is drawn to indicate network tunneling. A GRE tunneling interface will be
|
||||
created on the physical node and used to tunnel traffic to and from the
|
||||
emulated world.
|
||||
|
||||
Double-clicking on a physical node during runtime
|
||||
opens a terminal with an SSH shell to that
|
||||
node. Users should configure public-key SSH login as done with emulation
|
||||
servers.
|
||||
|
||||
.. _xen:
|
||||
|
||||
xen
|
||||
===
|
||||
|
||||
.. index:: xen machine type
|
||||
|
||||
The *xen* machine type is an experimental new type in CORE for managing
|
||||
Xen domUs from within CORE. After further development,
|
||||
it may be documented here.
|
||||
|
||||
Current limitations include only supporting ISO-based filesystems, and lack
|
||||
of integration with node services, EMANE, and possibly other features of CORE.
|
||||
|
||||
There is a :file:`README-Xen` file available in the CORE source that contains
|
||||
further instructions for setting up Xen-based nodes.
|
||||
|
||||
|
||||
|
37
doc/man/Makefile.am
Executable file
37
doc/man/Makefile.am
Executable file
|
@ -0,0 +1,37 @@
|
|||
# CORE
|
||||
# (c)2012-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Makefile for building man pages.
|
||||
#
|
||||
|
||||
if WANT_GUI
|
||||
GUI_MANS = core-gui.1
|
||||
endif
|
||||
if WANT_DAEMON
|
||||
DAEMON_MANS = vnoded.1 vcmd.1 netns.1 core-daemon.1 coresendmsg.1 \
|
||||
core-cleanup.1 core-xen-cleanup.1
|
||||
endif
|
||||
man_MANS = $(GUI_MANS) $(DAEMON_MANS)
|
||||
|
||||
.PHONY: generate-mans
|
||||
generate-mans:
|
||||
$(HELP2MAN) --source CORE 'sh $(top_srcdir)/gui/core-gui' -o core-gui.1.new
|
||||
$(HELP2MAN) --no-info --source CORE $(top_srcdir)/daemon/src/vnoded -o vnoded.1.new
|
||||
$(HELP2MAN) --no-info --source CORE $(top_srcdir)/daemon/src/vcmd -o vcmd.1.new
|
||||
$(HELP2MAN) --no-info --source CORE $(top_srcdir)/daemon/src/netns -o netns.1.new
|
||||
$(HELP2MAN) --version-string=$(CORE_VERSION) --no-info --source CORE $(top_srcdir)/daemon/sbin/core-daemon -o core-daemon.1.new
|
||||
$(HELP2MAN) --version-string=$(CORE_VERSION) --no-info --source CORE $(top_srcdir)/daemon/sbin/coresendmsg -o coresendmsg.1.new
|
||||
$(HELP2MAN) --version-string=$(CORE_VERSION) --no-info --source CORE $(top_srcdir)/daemon/sbin/core-cleanup -o core-cleanup.1.new
|
||||
$(HELP2MAN) --version-string=$(CORE_VERSION) --no-info --source CORE $(top_srcdir)/daemon/sbin/core-xen-cleanup -o core-xen-cleanup.1.new
|
||||
|
||||
.PHONY: diff
|
||||
diff:
|
||||
for m in ${man_MANS}; do \
|
||||
colordiff -u $$m $$m.new | less -R; \
|
||||
done;
|
||||
|
||||
DISTCLEANFILES = Makefile.in
|
||||
EXTRA_DIST = $(man_MANS)
|
30
doc/man/core-cleanup.1
Normal file
30
doc/man/core-cleanup.1
Normal file
|
@ -0,0 +1,30 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH CORE-CLEANUP "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
core-cleanup \- clean-up script for CORE
|
||||
.SH DESCRIPTION
|
||||
usage: core\-cleanup [\-d [\-l]]
|
||||
.IP
|
||||
Clean up all CORE namespaces processes, bridges, interfaces, and session
|
||||
directories. Options:
|
||||
.TP
|
||||
\fB\-h\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-d\fR
|
||||
also kill the Python daemon
|
||||
.TP
|
||||
\fB\-l\fR
|
||||
remove the core-daemon.log file
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR core-daemon(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR core-xen-cleanup(1),
|
||||
.BR vcmd(1),
|
||||
.BR vnoded(1)
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
||||
|
52
doc/man/core-daemon.1
Normal file
52
doc/man/core-daemon.1
Normal file
|
@ -0,0 +1,52 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH CORE "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
core-daemon \- CORE daemon manages emulation sessions started from GUI or scripts
|
||||
.SH SYNOPSIS
|
||||
.B core-daemon
|
||||
[\fI-h\fR] [\fIoptions\fR] [\fIargs\fR]
|
||||
.SH DESCRIPTION
|
||||
CORE daemon instantiates Linux network namespace nodes.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-f\fR CONFIGFILE, \fB\-\-configfile\fR=\fICONFIGFILE\fR
|
||||
read config from specified file; default =
|
||||
/etc/core/core.conf
|
||||
.TP
|
||||
\fB\-d\fR, \fB\-\-daemonize\fR
|
||||
run in background as daemon; default=False
|
||||
.TP
|
||||
\fB\-e\fR EXECFILE, \fB\-\-execute\fR=\fIEXECFILE\fR
|
||||
execute a Python/XML\-based session
|
||||
.TP
|
||||
\fB\-l\fR LOGFILE, \fB\-\-logfile\fR=\fILOGFILE\fR
|
||||
log output to specified file; default =
|
||||
/var/log/core-daemon.log
|
||||
.TP
|
||||
\fB\-p\fR PORT, \fB\-\-port\fR=\fIPORT\fR
|
||||
port number to listen on; default = 4038
|
||||
.TP
|
||||
\fB\-i\fR PIDFILE, \fB\-\-pidfile\fR=\fIPIDFILE\fR
|
||||
filename to write pid to; default = /var/run/core-daemon.pid
|
||||
.TP
|
||||
\fB\-t\fR NUMTHREADS, \fB\-\-numthreads\fR=\fINUMTHREADS\fR
|
||||
number of server threads; default = 1
|
||||
.TP
|
||||
\fB\-v\fR, \fB\-\-verbose\fR
|
||||
enable verbose logging; default = False
|
||||
.TP
|
||||
\fB\-g\fR, \fB\-\-debug\fR
|
||||
enable debug logging; default = False
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR netns(1),
|
||||
.BR vcmd(1),
|
||||
.BR vnoded(1)
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
38
doc/man/core-gui.1
Normal file
38
doc/man/core-gui.1
Normal file
|
@ -0,0 +1,38 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH CORE "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
core-gui \- Common Open Research Emulator (CORE) graphical user interface
|
||||
.SH SYNOPSIS
|
||||
.B core-gui
|
||||
[\fI-h|-v\fR] [\fI-b|-c <sessionid>\fR] [\fI-s\fR] [\fI<configfile.imn>\fR]
|
||||
.SH DESCRIPTION
|
||||
Launches the CORE Tcl/Tk X11 GUI or starts an imn\-based emulation.
|
||||
.TP
|
||||
\-(\fB\-h\fR)elp
|
||||
show help message and exit
|
||||
.TP
|
||||
\-(\fB\-v\fR)ersion
|
||||
show version number and exit
|
||||
.TP
|
||||
\-(\fB\-b\fR)atch
|
||||
batch mode (no X11 GUI)
|
||||
.TP
|
||||
\-(\fB\-c\fR)losebatch <sessionid>
|
||||
stop and clean up a batch mode session <sessionid>
|
||||
.TP
|
||||
\-(\fB\-s\fR)tart
|
||||
start in execute mode, not edit mode
|
||||
.TP
|
||||
<configfile.imn>
|
||||
(optional) load the specified imn scenario file
|
||||
.PP
|
||||
With no parameters, starts the GUI in edit mode with a blank canvas.
|
||||
.SH "SEE ALSO"
|
||||
.BR core-daemon(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR vcmd(1),
|
||||
.BR vnoded(1)
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
28
doc/man/core-xen-cleanup.1
Normal file
28
doc/man/core-xen-cleanup.1
Normal file
|
@ -0,0 +1,28 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH CORE-XEN-CLEANUP "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
core-xen-cleanup \- clean-up script for CORE Xen domUs
|
||||
.SH DESCRIPTION
|
||||
usage: core\-xen\-cleanup [\-d]
|
||||
.IP
|
||||
Clean up all CORE Xen domUs, bridges, interfaces, and session
|
||||
directories. Options:
|
||||
.TP
|
||||
\fB\-h\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-d\fR
|
||||
also kill the Python daemon
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR core-daemon(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR core-cleanup(1),
|
||||
.BR vcmd(1),
|
||||
.BR vnoded(1)
|
||||
.SH BUGS
|
||||
Warning! This script will remove logical volumes that match the name "/dev/vg*/c*-n*-" on all volume groups. Use with care.
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
||||
|
85
doc/man/coresendmsg.1
Normal file
85
doc/man/coresendmsg.1
Normal file
|
@ -0,0 +1,85 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH CORESENDMSG "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
coresendmsg \- send a CORE API message to the core-daemon daemon
|
||||
.SH SYNOPSIS
|
||||
.B coresendmsg
|
||||
[\fI-h|-H\fR] [\fIoptions\fR] [\fImessage-type\fR] [\fIflags=flags\fR] [\fImessage-TLVs\fR]
|
||||
.SH DESCRIPTION
|
||||
.SS "Supported message types:"
|
||||
.IP
|
||||
['node', 'link', 'exec', 'reg', 'conf', 'file', 'iface', 'event', 'sess', 'excp']
|
||||
.SS "Supported message flags (flags=f1,f2,...):"
|
||||
.IP
|
||||
['add', 'del', 'cri', 'loc', 'str', 'txt', 'tty']
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-H\fR
|
||||
show example usage help message and exit
|
||||
.TP
|
||||
\fB\-p\fR PORT, \fB\-\-port\fR=\fIPORT\fR
|
||||
TCP port to connect to, default: 4038
|
||||
.TP
|
||||
\fB\-a\fR ADDRESS, \fB\-\-address\fR=\fIADDRESS\fR
|
||||
Address to connect to, default: localhost
|
||||
.TP
|
||||
\fB\-s\fR SESSION, \fB\-\-session\fR=\fISESSION\fR
|
||||
Session to join, default: None
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-listen\fR
|
||||
Listen for a response message and print it.
|
||||
.TP
|
||||
\fB\-t\fR, \fB\-\-list\-tlvs\fR
|
||||
List TLVs for the specified message type.
|
||||
.TP
|
||||
\fB\-T\fR, \fB\-\-tcp\fR
|
||||
Use TCP instead of UDP and connect to a session,
|
||||
default: False
|
||||
.PP
|
||||
Usage: coresendmsg [\-h|\-H] [options] [message\-type] [flags=flags] [message\-TLVs]
|
||||
.SS "Supported message types:"
|
||||
.IP
|
||||
['node', 'link', 'exec', 'reg', 'conf', 'file', 'iface', 'event', 'sess', 'excp']
|
||||
.SS "Supported message flags (flags=f1,f2,...):"
|
||||
.IP
|
||||
['add', 'del', 'cri', 'loc', 'str', 'txt', 'tty']
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-H\fR
|
||||
show example usage help message and exit
|
||||
.TP
|
||||
\fB\-p\fR PORT, \fB\-\-port\fR=\fIPORT\fR
|
||||
TCP port to connect to, default: 4038
|
||||
.TP
|
||||
\fB\-a\fR ADDRESS, \fB\-\-address\fR=\fIADDRESS\fR
|
||||
Address to connect to, default: localhost
|
||||
.TP
|
||||
\fB\-s\fR SESSION, \fB\-\-session\fR=\fISESSION\fR
|
||||
Session to join, default: None
|
||||
.TP
|
||||
\fB\-l\fR, \fB\-\-listen\fR
|
||||
Listen for a response message and print it.
|
||||
.TP
|
||||
\fB\-t\fR, \fB\-\-list\-tlvs\fR
|
||||
List TLVs for the specified message type.
|
||||
.TP
|
||||
\fB\-T\fR, \fB\-\-tcp\fR
|
||||
Use TCP instead of UDP and connect to a session,
|
||||
default: False
|
||||
.SH "EXAMPLES"
|
||||
.TP
|
||||
A list of examples is available using the following command:
|
||||
coresendmsg \-H
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR core-daemon(1),
|
||||
.BR vcmd(1),
|
||||
.BR vnoded(1)
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
30
doc/man/netns.1
Normal file
30
doc/man/netns.1
Normal file
|
@ -0,0 +1,30 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH NETNS "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
netns \- run commands within a network namespace
|
||||
.SH SYNOPSIS
|
||||
.B netns
|
||||
[\fI-h|-V\fR] [\fI-w\fR] \fI-- command \fR[\fIargs\fR...]
|
||||
.SH DESCRIPTION
|
||||
Run the specified command in a new network namespace.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-V\fR, \fB\-\-version\fR
|
||||
show version number and exit
|
||||
.TP
|
||||
\fB\-w\fR
|
||||
wait for command to complete (useful for interactive commands)
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR core-daemon(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR unshare(1),
|
||||
.BR vcmd(1),
|
||||
.BR vnoded(1)
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
42
doc/man/vcmd.1
Normal file
42
doc/man/vcmd.1
Normal file
|
@ -0,0 +1,42 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH VCMD "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
vcmd \- run a command in a network namespace created by vnoded
|
||||
.SH SYNOPSIS
|
||||
.B vcmd
|
||||
[\fI-h|-V\fR] [\fI-v\fR] [\fI-q|-i|-I\fR] \fI-c <channel name> -- command args\fR...
|
||||
.SH DESCRIPTION
|
||||
Run the specified command in the Linux namespace container specified by the
|
||||
control <channel name>, with the specified arguments.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-V\fR, \fB\-\-version\fR
|
||||
show version number and exit
|
||||
.TP
|
||||
\fB\-v\fR
|
||||
enable verbose logging
|
||||
.TP
|
||||
\fB\-q\fR
|
||||
run the command quietly, without local input or output
|
||||
.TP
|
||||
\fB\-i\fR
|
||||
run the command interactively (use PTY)
|
||||
.TP
|
||||
\fB\-I\fR
|
||||
run the command non\-interactively (without PTY)
|
||||
.TP
|
||||
\fB\-c\fR
|
||||
control channel name (e.g. '/tmp/pycore.45647/n3')
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR core-daemon(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR netns(1),
|
||||
.BR vnoded(1),
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
44
doc/man/vnoded.1
Normal file
44
doc/man/vnoded.1
Normal file
|
@ -0,0 +1,44 @@
|
|||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.40.4.
|
||||
.TH VNODED "1" "August 2013" "CORE" "User Commands"
|
||||
.SH NAME
|
||||
vnoded \- network namespace daemon used by CORE to create a lightweight container
|
||||
.SH SYNOPSIS
|
||||
.B vnoded
|
||||
[\fI-h|-V\fR] [\fI-v\fR] [\fI-n\fR] [\fI-C <chdir>\fR] [\fI-l <logfile>\fR] [\fI-p <pidfile>\fR] \fI-c <control channel>\fR
|
||||
.SH DESCRIPTION
|
||||
Linux namespace container server daemon runs as PID 1 in the container.
|
||||
Normally this process is launched automatically by the CORE daemon.
|
||||
.SH OPTIONS
|
||||
.TP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
show this help message and exit
|
||||
.TP
|
||||
\fB\-V\fR, \fB\-\-version\fR
|
||||
show version number and exit
|
||||
.TP
|
||||
\fB\-v\fR
|
||||
enable verbose logging
|
||||
.TP
|
||||
\fB\-n\fR
|
||||
do not create and run daemon within a new network namespace (for debug)
|
||||
.TP
|
||||
\fB\-C\fR
|
||||
change to the specified <chdir> directory
|
||||
.TP
|
||||
\fB\-l\fR
|
||||
log output to the specified <logfile> file
|
||||
.TP
|
||||
\fB\-p\fR
|
||||
write process id to the specified <pidfile> file
|
||||
.TP
|
||||
\fB\-c\fR
|
||||
establish the specified <control channel> for receiving control commands
|
||||
.SH "SEE ALSO"
|
||||
.BR core-gui(1),
|
||||
.BR core-daemon(1),
|
||||
.BR coresendmsg(1),
|
||||
.BR vcmd(1),
|
||||
.SH BUGS
|
||||
Report bugs to
|
||||
.BI core-dev@pf.itd.nrl.navy.mil.
|
||||
|
314
doc/ns3.rst
Normal file
314
doc/ns3.rst
Normal file
|
@ -0,0 +1,314 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012-2013 the Boeing Company
|
||||
|
||||
.. _ns-3:
|
||||
|
||||
****
|
||||
ns-3
|
||||
****
|
||||
|
||||
.. index:: ns-3
|
||||
|
||||
This chapter describes running CORE with the
|
||||
`ns-3 network simulator <http://www.nsnam.org>`_.
|
||||
|
||||
.. _What_is_ns-3?:
|
||||
|
||||
What is ns-3?
|
||||
=============
|
||||
|
||||
.. index:: ns-3 Introduction
|
||||
|
||||
ns-3 is a discrete-event network simulator for Internet systems, targeted primarily for research and educational use. [#f1]_ By default, ns-3 simulates entire networks, from applications down to channels, and it does so in simulated time, instead of real (wall-clock) time.
|
||||
|
||||
CORE can run in conjunction with ns-3 to simulate some types of networks. CORE
|
||||
network namespace virtual nodes can have virtual TAP interfaces installed using
|
||||
the simulator for communication. The simulator needs to run at wall clock time
|
||||
with the real-time scheduler. In this type of configuration, the CORE
|
||||
namespaces are used to provide packets to the ns-3 devices and channels.
|
||||
This allows, for example, wireless models developed for ns-3 to be used
|
||||
in an emulation context.
|
||||
|
||||
Users simulate networks with ns-3 by writing C++ programs or Python scripts that
|
||||
import the ns-3 library. Simulation models are objects instantiated in these
|
||||
scripts. Combining the CORE Python modules with ns-3 Python bindings allow
|
||||
a script to easily set up and manage an emulation + simulation environment.
|
||||
|
||||
.. rubric:: Footnotes
|
||||
.. [#f1] http://www.nsnam.org
|
||||
|
||||
.. _ns-3_Scripting:
|
||||
|
||||
ns-3 Scripting
|
||||
==============
|
||||
|
||||
.. index:: ns-3 scripting
|
||||
|
||||
Currently, ns-3 is supported by writing
|
||||
:ref:`Python scripts <Python_Scripting>`, but not through
|
||||
drag-and-drop actions within the GUI.
|
||||
If you have a copy of the CORE source, look under :file:`core/daemon/ns3/examples/` for example scripts; a CORE installation package puts these under
|
||||
:file:`/usr/share/core/examples/corens3`.
|
||||
|
||||
To run these scripts, install CORE so the CORE Python libraries are accessible,
|
||||
and download and build ns-3. This has been tested using ns-3 releases starting
|
||||
with 3.11 (and through 3.16 as of this writing).
|
||||
|
||||
The first step is to open an ns-3 waf shell. `waf <http://code.google.com/p/waf/>`_ is the build system for ns-3. Opening a waf shell as root will merely
|
||||
set some environment variables useful for finding python modules and ns-3
|
||||
executables. The following environment variables are extended or set by
|
||||
issuing `waf shell`:
|
||||
|
||||
::
|
||||
|
||||
PATH
|
||||
PYTHONPATH
|
||||
LD_LIBRARY_PATH
|
||||
NS3_MODULE_PATH
|
||||
NS3_EXECUTABLE_PATH
|
||||
|
||||
Open a waf shell as root, so that network namespaces may be instantiated
|
||||
by the script with root permissions. For an example, run the
|
||||
:file:`ns3wifi.py`
|
||||
program, which simply instantiates 10 nodes (by default) and places them on
|
||||
an ns-3 WiFi channel. That is, the script will instantiate 10 namespace nodes,
|
||||
and create a special tap device that sends packets between the namespace
|
||||
node and a special ns-3 simulation node, where the tap device is bridged
|
||||
to an ns-3 WiFi network device, and attached to an ns-3 WiFi channel.
|
||||
|
||||
::
|
||||
|
||||
> cd ns-allinone-3.16/ns-3.16
|
||||
> sudo ./waf shell
|
||||
# # use '/usr/local' below if installed from source
|
||||
# cd /usr/share/core/examples/corens3/
|
||||
# python -i ns3wifi.py
|
||||
running ns-3 simulation for 600 seconds
|
||||
|
||||
>>> print session
|
||||
<corens3.obj.Ns3Session object at 0x1963e50>
|
||||
>>>
|
||||
|
||||
|
||||
The interactive Python shell allows some interaction with the Python objects
|
||||
for the emulation.
|
||||
|
||||
In another terminal, nodes can be accessed using *vcmd*:
|
||||
::
|
||||
|
||||
vcmd -c /tmp/pycore.10781/n1 -- bash
|
||||
root@n1:/tmp/pycore.10781/n1.conf#
|
||||
root@n1:/tmp/pycore.10781/n1.conf# ping 10.0.0.3
|
||||
PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data.
|
||||
64 bytes from 10.0.0.3: icmp_req=1 ttl=64 time=7.99 ms
|
||||
64 bytes from 10.0.0.3: icmp_req=2 ttl=64 time=3.73 ms
|
||||
64 bytes from 10.0.0.3: icmp_req=3 ttl=64 time=3.60 ms
|
||||
^C
|
||||
--- 10.0.0.3 ping statistics ---
|
||||
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
|
||||
rtt min/avg/max/mdev = 3.603/5.111/7.993/2.038 ms
|
||||
root@n1:/tmp/pycore.10781/n1.conf#
|
||||
|
||||
|
||||
The ping packets shown above are traversing an ns-3 ad-hoc Wifi simulated
|
||||
network.
|
||||
|
||||
To clean up the session, use the Session.shutdown() method from the Python
|
||||
terminal.
|
||||
|
||||
::
|
||||
|
||||
>>> print session
|
||||
<corens3.obj.Ns3Session object at 0x1963e50>
|
||||
>>>
|
||||
>>> session.shutdown()
|
||||
>>>
|
||||
|
||||
|
||||
A CORE/ns-3 Python script will instantiate an Ns3Session, which is a
|
||||
CORE Session
|
||||
having CoreNs3Nodes, an ns-3 MobilityHelper, and a fixed duration.
|
||||
The CoreNs3Node inherits from both the CoreNode and the ns-3 Node classes -- it
|
||||
is a network namespace having an associated simulator object. The CORE TunTap
|
||||
interface is used, represented by a ns-3 TapBridge in `CONFIGURE_LOCAL`
|
||||
mode, where ns-3 creates and configures the tap device. An event is scheduled
|
||||
to install the taps at time 0.
|
||||
|
||||
.. NOTE::
|
||||
The GUI can be used to run the :file:`ns3wifi.py`
|
||||
and :file:`ns3wifirandomwalk.py` scripts directly. First, ``core-daemon``
|
||||
must be
|
||||
stopped and run within the waf root shell. Then the GUI may be run as
|
||||
a normal user, and the *Execute Python Script...* option may be used from
|
||||
the *File* menu. Dragging nodes around in the :file:`ns3wifi.py` example
|
||||
will cause their ns-3 positions to be updated.
|
||||
|
||||
|
||||
Users may find the files :file:`ns3wimax.py` and :file:`ns3lte.py`
|
||||
in that example
|
||||
directory; those files were similarly configured, but the underlying
|
||||
ns-3 support is not present as of ns-3.16, so they will not work. Specifically,
|
||||
the ns-3 has to be extended to support bridging the Tap device to
|
||||
an LTE and a WiMax device.
|
||||
|
||||
.. _ns-3_Integration_details:
|
||||
|
||||
Integration details
|
||||
===================
|
||||
|
||||
.. index:: ns-3 integration details
|
||||
|
||||
The previous example :file:`ns3wifi.py` used Python API from the special Python
|
||||
objects *Ns3Session* and *Ns3WifiNet*. The example program does not import
|
||||
anything directly from the ns-3 python modules; rather, only the above
|
||||
two objects are used, and the API available to configure the underlying
|
||||
ns-3 objects is constrained. For example, *Ns3WifiNet* instantiates
|
||||
a constant-rate 802.11a-based ad hoc network, using a lot of ns-3 defaults.
|
||||
|
||||
However, programs may be written with a blend of ns-3 API and CORE Python
|
||||
API calls. This section examines some of the fundamental objects in
|
||||
the CORE ns-3 support. Source code can be found in
|
||||
:file:`daemon/ns3/corens3/obj.py` and example
|
||||
code in :file:`daemon/ns3/corens3/examples/`.
|
||||
|
||||
Ns3Session
|
||||
----------
|
||||
|
||||
The *Ns3Session* class is a CORE Session that starts an ns-3 simulation
|
||||
thread. ns-3 actually runs as a separate process on the same host as
|
||||
the CORE daemon, and the control of starting and stopping this process
|
||||
is performed by the *Ns3Session* class.
|
||||
|
||||
Example:
|
||||
|
||||
::
|
||||
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
|
||||
Note the use of the duration attribute to control how long the ns-3 simulation
|
||||
should run. By default, the duration is 600 seconds.
|
||||
|
||||
Typically, the session keeps track of the ns-3 nodes (holding a node
|
||||
container for references to the nodes). This is accomplished via the
|
||||
`addnode()` method, e.g.:
|
||||
|
||||
::
|
||||
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
|
||||
`addnode()` creates instances of a *CoreNs3Node*, which we'll cover next.
|
||||
|
||||
CoreNs3Node
|
||||
-----------
|
||||
|
||||
A *CoreNs3Node* is both a CoreNode and an ns-3 node:
|
||||
|
||||
::
|
||||
|
||||
class CoreNs3Node(CoreNode, ns.network.Node):
|
||||
''' The CoreNs3Node is both a CoreNode backed by a network namespace and
|
||||
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
|
||||
device will be used.
|
||||
|
||||
|
||||
CoreNs3Net
|
||||
-----------
|
||||
|
||||
A *CoreNs3Net* derives from *PyCoreNet*. This network exists entirely
|
||||
in simulation, using the TunTap device to interact between the emulated
|
||||
and the simulated realm. *Ns3WifiNet* is a specialization of this.
|
||||
|
||||
As an example, this type of code would be typically used to add a WiFi
|
||||
network to a session:
|
||||
|
||||
::
|
||||
|
||||
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps")
|
||||
wifi.setposition(30, 30, 0)
|
||||
|
||||
The above two lines will create a wlan1 object and set its initial canvas
|
||||
position. Later in the code, the newnetif method of the CoreNs3Node can
|
||||
be used to add interfaces on particular nodes to this network; e.g.:
|
||||
|
||||
::
|
||||
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
|
||||
|
||||
.. _ns-3_Mobility:
|
||||
|
||||
Mobility
|
||||
========
|
||||
|
||||
.. index:: ns-3 mobility
|
||||
|
||||
Mobility in ns-3 is handled by an object (a MobilityModel) aggregated to
|
||||
an ns-3 node. The MobilityModel is able to report the position of the
|
||||
object in the ns-3 space. This is a slightly different model from, for
|
||||
instance, EMANE, where location is associated with an interface, and the
|
||||
CORE GUI, where mobility is configured by right-clicking on a WiFi
|
||||
cloud.
|
||||
|
||||
The CORE GUI supports the ability to render the underlying ns-3 mobility
|
||||
model, if one is configured, on the CORE canvas. For example, the
|
||||
example program :file:`ns3wifirandomwalk.py` uses five nodes (by default) in
|
||||
a random walk mobility model. This can be executed by starting the
|
||||
core daemon from an ns-3 waf shell:
|
||||
|
||||
::
|
||||
|
||||
# sudo bash
|
||||
# cd /path/to/ns-3
|
||||
# ./waf shell
|
||||
# core-daemon
|
||||
|
||||
and in a separate window, starting the CORE GUI (not from a waf shell)
|
||||
and selecting the
|
||||
*Execute Python script...* option from the File menu, selecting the
|
||||
:file:`ns3wifirandomwalk.py` script.
|
||||
|
||||
The program invokes ns-3 mobility through the following statement:
|
||||
|
||||
::
|
||||
|
||||
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
|
||||
This can be replaced by a different mode of mobility, in which nodes
|
||||
are placed according to a constant mobility model, and a special
|
||||
API call to the CoreNs3Net object is made to use the CORE canvas
|
||||
positions.
|
||||
|
||||
::
|
||||
|
||||
- session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
+ session.setupconstantmobility()
|
||||
+ wifi.usecorepositions()
|
||||
|
||||
|
||||
In this mode, the user dragging around the nodes on the canvas will
|
||||
cause CORE to update the position of the underlying ns-3 nodes.
|
||||
|
||||
|
||||
.. _ns-3_Under_Development:
|
||||
|
||||
Under Development
|
||||
=================
|
||||
|
||||
.. index:: limitations with ns-3
|
||||
|
||||
Support for ns-3 is fairly new and still under active development.
|
||||
Improved support may be found in the development snapshots available on the web.
|
||||
|
||||
The following limitations will be addressed in future releases:
|
||||
|
||||
* GUI configuration and control - currently ns-3 networks can only be
|
||||
instantiated from a Python script or from the GUI hooks facility.
|
||||
|
||||
* Model support - currently the WiFi model is supported. The WiMAX and 3GPP LTE
|
||||
models have been experimented with, but are not currently working with the
|
||||
TapBridge device.
|
||||
|
||||
|
60
doc/performance.rst
Normal file
60
doc/performance.rst
Normal file
|
@ -0,0 +1,60 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _Performance:
|
||||
|
||||
.. include:: constants.txt
|
||||
|
||||
***********
|
||||
Performance
|
||||
***********
|
||||
|
||||
.. index:: performance
|
||||
|
||||
.. index:: number of nodes
|
||||
|
||||
The top question about the performance of CORE is often
|
||||
*how many nodes can it handle?* The answer depends on several factors:
|
||||
|
||||
* Hardware - the number and speed of processors in the computer, the available
|
||||
processor cache, RAM memory, and front-side bus speed may greatly affect
|
||||
overall performance.
|
||||
* Operating system version - Linux or FreeBSD, and the specific kernel versions
|
||||
used will affect overall performance.
|
||||
* Active processes - all nodes share the same CPU resources, so if one or more
|
||||
nodes is performing a CPU-intensive task, overall performance will suffer.
|
||||
* Network traffic - the more packets that are sent around the virtual network
|
||||
increases the amount of CPU usage.
|
||||
* GUI usage - widgets that run periodically, mobility scenarios, and other GUI
|
||||
interactions generally consume CPU cycles that may be needed for emulation.
|
||||
|
||||
On a typical single-CPU Xeon 3.0GHz server machine with 2GB RAM running FreeBSD
|
||||
|BSDVERSION|, we have found it reasonable to run 30-75 nodes running
|
||||
OSPFv2 and OSPFv3 routing. On this hardware CORE can instantiate 100 or more
|
||||
nodes, but at that point it becomes critical as to what each of the nodes is
|
||||
doing.
|
||||
|
||||
.. index:: network performance
|
||||
|
||||
Because this software is primarily a network emulator, the more appropriate
|
||||
question is *how much network traffic can it handle?* On the same 3.0GHz server
|
||||
described above, running FreeBSD 4.11, about 300,000 packets-per-second can be
|
||||
pushed through the system. The number of hops and the size of the packets is
|
||||
less important. The limiting factor is the number of times that the operating
|
||||
system needs to handle a packet. The 300,000 pps figure represents the number
|
||||
of times the system as a whole needed to deal with a packet. As more network
|
||||
hops are added, this increases the number of context switches and decreases the
|
||||
throughput seen on the full length of the network path.
|
||||
|
||||
.. NOTE::
|
||||
The right question to be asking is *"how much traffic?"*,
|
||||
not *"how many nodes?"*.
|
||||
|
||||
For a more detailed study of performance in CORE, refer to the following publications:
|
||||
|
||||
* J\. Ahrenholz, T. Goff, and B. Adamson, Integration of the CORE and EMANE Network Emulators, Proceedings of the IEEE Military Communications Conference 2011, November 2011.
|
||||
|
||||
* Ahrenholz, J., Comparison of CORE Network Emulation Platforms, Proceedings of the IEEE Military Communications Conference 2010, pp. 864-869, November 2010.
|
||||
|
||||
* J\. Ahrenholz, C. Danilov, T. Henderson, and J.H. Kim, CORE: A real-time network emulator, Proceedings of IEEE MILCOM Conference, 2008.
|
||||
|
137
doc/scripting.rst
Normal file
137
doc/scripting.rst
Normal file
|
@ -0,0 +1,137 @@
|
|||
.. This file is part of the CORE Manual
|
||||
(c)2012 the Boeing Company
|
||||
|
||||
.. _Python_Scripting:
|
||||
|
||||
****************
|
||||
Python Scripting
|
||||
****************
|
||||
|
||||
.. index:: Python scripting
|
||||
|
||||
CORE can be used via the :ref:`GUI <Using_the_CORE_GUI>` or Python scripting.
|
||||
Writing your own Python scripts offers a rich programming
|
||||
environment with complete control over all aspects of the emulation.
|
||||
This chapter provides a brief introduction to scripting. Most of the
|
||||
documentation is available from sample scripts,
|
||||
or online via interactive Python.
|
||||
|
||||
.. index:: sample Python scripts
|
||||
|
||||
The best starting point is the sample scripts that are
|
||||
included with CORE. If you have a CORE source tree, the example script files
|
||||
can be found under :file:`core/daemon/examples/netns/`. When CORE is installed
|
||||
from packages, the example script files will be in
|
||||
:file:`/usr/share/core/examples/netns/` (or the :file:`/usr/local/...` prefix
|
||||
when installed from source.) For the most part, the example scripts
|
||||
are self-documenting; see the comments contained within the Python code.
|
||||
|
||||
The scripts should be run with root privileges because they create new
|
||||
network namespaces. In general, a CORE Python script does not connect to the
|
||||
CORE daemon, :file:`core-daemon`; in fact, :file:`core-daemon`
|
||||
is just another Python script
|
||||
that uses the CORE Python modules and exchanges messages with the GUI.
|
||||
To connect the GUI to your scripts, see the included sample scripts that
|
||||
allow for GUI connections.
|
||||
|
||||
Here are the basic elements of a CORE Python script:
|
||||
::
|
||||
|
||||
#!/usr/bin/python
|
||||
|
||||
from core import pycore
|
||||
|
||||
session = pycore.Session(persistent=True)
|
||||
node1 = session.addobj(cls=pycore.nodes.CoreNode, name="n1")
|
||||
node2 = session.addobj(cls=pycore.nodes.CoreNode, name="n2")
|
||||
hub1 = session.addobj(cls=pycore.nodes.HubNode, name="hub1")
|
||||
node1.newnetif(hub1, ["10.0.0.1/24"])
|
||||
node2.newnetif(hub1, ["10.0.0.2/24"])
|
||||
|
||||
node1.icmd(["ping", "-c", "5", "10.0.0.2"])
|
||||
session.shutdown()
|
||||
|
||||
|
||||
The above script creates a CORE session having two nodes connected with a hub.
|
||||
The first node pings the second node with 5 ping packets; the result is
|
||||
displayed on screen.
|
||||
|
||||
A good way to learn about the CORE Python modules is via interactive Python.
|
||||
Scripts can be run using *python -i*. Cut and paste the simple script
|
||||
above and you will have two nodes connected by a hub, with one node running
|
||||
a test ping to the other.
|
||||
|
||||
The CORE Python modules are documented with comments in the code. From an
|
||||
interactive Python shell, you can retrieve online help about the various
|
||||
classes and methods; for example *help(pycore.nodes.CoreNode)* or
|
||||
*help(pycore.Session)*.
|
||||
|
||||
An interactive development environment (IDE) is available for browsing
|
||||
the CORE source, the
|
||||
`Eric Python IDE <http://eric-ide.python-projects.org/index.html>`_.
|
||||
CORE has a project file that can be opened by Eric, in the source under
|
||||
:file:`core/daemon/CORE.e4p`.
|
||||
This IDE
|
||||
has a class browser for viewing a tree of classes and methods. It features
|
||||
syntax highlighting, auto-completion, indenting, and more. One feature that
|
||||
is helpful with learning the CORE Python modules is the ability to generate
|
||||
class diagrams; right-click on a class, choose *Diagrams*, and
|
||||
*Class Diagram*.
|
||||
|
||||
.. index:: daemon versus script
|
||||
.. index:: script versus daemon
|
||||
.. index:: script with GUI support
|
||||
.. index:: connecting GUI to script
|
||||
|
||||
.. NOTE::
|
||||
The CORE daemon :file:`core-daemon` manages a list of sessions and allows
|
||||
the GUI to connect and control sessions. Your Python script uses the
|
||||
same CORE modules but runs independently of the daemon. The daemon
|
||||
does not need to be running for your script to work.
|
||||
|
||||
The session created by a Python script may be viewed in the GUI if certain
|
||||
steps are followed. The GUI has a :ref:`File_Menu`, *Execute Python script...*
|
||||
option for running a script and automatically connecting to it. Once connected,
|
||||
normal GUI interaction is possible, such as moving and double-clicking nodes,
|
||||
activating Widgets, etc.
|
||||
|
||||
The script should have a line such as the following for running it from
|
||||
the GUI.
|
||||
::
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
||||
|
||||
Also, the script should add its session to the session list after creating it.
|
||||
A global ``server`` variable is exposed to the script pointing to the
|
||||
``CoreServer`` object in the :file:`core-daemon`.
|
||||
::
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.addsession(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
::
|
||||
|
||||
session = pycore.Session(persistent=True)
|
||||
add_to_server(session)
|
||||
|
||||
|
||||
Finally, nodes and networks need to have their coordinates set to something,
|
||||
otherwise they will be grouped at the coordinates ``<0, 0>``. First sketching
|
||||
the topology in the GUI and then using the *Export Python script* option may
|
||||
help here.
|
||||
::
|
||||
|
||||
switch.setposition(x=80,y=50)
|
||||
|
||||
|
||||
A fully-worked example script that you can launch from the GUI is available
|
||||
in the file :file:`switch.py` in the examples directory.
|
1729
doc/usage.rst
Normal file
1729
doc/usage.rst
Normal file
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue