initial import (Boeing r1752, NRL r878)
This commit is contained in:
commit
f8f46d28be
394 changed files with 99738 additions and 0 deletions
194
Changelog
Normal file
194
Changelog
Normal file
|
@ -0,0 +1,194 @@
|
|||
2013-08-12 CORE 4.6
|
||||
|
||||
* NOTE: cored is now core-daemon, and core is now core-gui (for Debian
|
||||
acceptance)
|
||||
* NOTE: /etc/init.d/core is now /etc/init.d/core-daemon (for insserv
|
||||
compatibility)
|
||||
* EMANE:
|
||||
- don't start EMANE locally if no local NEMs
|
||||
- EMANE poststartup() to re-transmit location events during initialization
|
||||
- added debug port to EMANE options
|
||||
- added a basic EMANE 802.11 CORE Python script example
|
||||
- expose transport XML block generation to EmaneModels
|
||||
- expose NEM entry to the EmaneModel so it can be overridden by a model
|
||||
- add the control interface bridge prior to starting EMANE, as some models may
|
||||
- depend on the controlnet functionality
|
||||
- added EMANE model to CORE converter
|
||||
- parse lat/long/alt from node messages, for moving nodes using command-line
|
||||
- fix bug #196 incorrect distance when traversing UTM zones
|
||||
|
||||
* GUI:
|
||||
- added Cut, Copy, and Paste options to the Edit menu
|
||||
- paste will copy selected services and take care of node and interface
|
||||
- renumbering
|
||||
- implement Edit > Find dialog for searching nodes and links
|
||||
- when copying existing file for a service, perform string replacement of:
|
||||
- "~", "%SESSION%", "%SESSION_DIR%", "%SESSION_USER%", "%NODE%", "%NODENAME%"
|
||||
- use CORE_DATA_DIR insteadof LIBDIR
|
||||
- fix Adjacency Widget to work with OSPFv2 only networks
|
||||
|
||||
* BUILD:
|
||||
- build/packaging improvements for inclusion on Debian
|
||||
- fix error when running scenario with a mobility script in batch mode
|
||||
- include Linux kernel patches for 3.8
|
||||
- renamed core-cleanup.sh to core-cleanup for Debian conformance
|
||||
- don't always generate man pages from Makefile; new manpages for
|
||||
coresendmsg and core-daemon
|
||||
|
||||
* BUGFIXES:
|
||||
- don't auto-assign IPv4/IPv6 addresses when none received in Link Messages (session reconnect)
|
||||
- fixed lock view
|
||||
- fix GUI spinbox errors for Tk 8.5.8 (RHEL/CentOS 6.2)
|
||||
- fix broker node count for distributed session entering the RUNTIME state when
|
||||
- (non-EMANE) WLANs or GreTapBridges are involved;
|
||||
- fix "file exists" error message when distributed session number is re-used
|
||||
- and servers file is written
|
||||
- fix bug #194 configuration dialog too long, make dialog scrollable/resizable
|
||||
- allow float values for loss and duplicates percent
|
||||
- fix the following bugs: 166, 172, 177, 178, 192, 194, 196, 201, 206, 210, 212, 213, 214, 221
|
||||
|
||||
2013-04-13 CORE 4.5
|
||||
|
||||
* GUI:
|
||||
- improved behavior when starting GUI without daemon, or using File New after connection with daemon is lost
|
||||
- fix various GUI issues when reconnecting to a session
|
||||
- support 3D GUI via output to SDT3D
|
||||
- added "Execute Python script..." entry to the File Menu
|
||||
- support user-defined terminal program instead of hard-coded xterm
|
||||
- added session options for "enable RJ45s", "preserve session dir"
|
||||
- added buttons to the IP Addresses dialog for removing all/selected IPv4/IPv6
|
||||
- allow sessions with multiple canvases to enter RUNTIME state
|
||||
- added "--addons" startup mode to pass control to code included from addons dir
|
||||
- added "Locked" entry to View menu to prevent moving items
|
||||
- use currently selected node type when invoking a topology generator
|
||||
- updated throughput plots with resizing, color picker, plot labels, locked scales, and save/load plot configuration with imn file
|
||||
- improved session dialog
|
||||
* EMANE:
|
||||
- EMANE 0.8.1 support with backwards-compatibility for 0.7.4
|
||||
- extend CommEffect model to generate CommEffect events upon receipt of Link Messages having link effects
|
||||
* Services:
|
||||
- updated FTP service with root directory for anonymous users
|
||||
- added HTTP, PCAP, BIRD, RADVD, and Babel services
|
||||
- support copying existing files instead of always generating them
|
||||
- added "Services..." entry to node right-click menu
|
||||
- added "View" button for side-by-side comparison when copying customized config files
|
||||
- updated Quagga daemons to wait for zebra.vty VTY file before starting
|
||||
* General:
|
||||
- XML import and export
|
||||
- renamed "cored.py" to "cored", "coresendmsg.py" to "coresendmsg"
|
||||
- code reorganization and clean-up
|
||||
- updated XML export to write NetworkPlan, MotionPlan, and ServicePlan within a Scenario tag, added new "Save As XML..." File menu entry
|
||||
- added script_start/pause/stop options to Ns2ScriptedMobility
|
||||
- "python" source sub-directory renamed to "daemon"
|
||||
- added "cored -e" option to execute a Python script, adding its session to the active sessions list, allowing for GUI connection
|
||||
- support comma-separated list for custom_services_dir in core.conf file
|
||||
- updated kernel patches for Linux kernel 3.5
|
||||
- support RFC 6164-style IPv6 /127 addressing
|
||||
* ns-3:
|
||||
- integrate ns-3 node location between CORE and ns-3 simulation
|
||||
- added ns-3 random walk mobility example
|
||||
- updated ns-3 Wifi example to allow GUI connection and moving of nodes
|
||||
* fixed the following bugs: 54, 103, 111, 136, 145, 153, 157, 160, 161, 162, 164, 165, 168, 170, 171, 173, 174, 176, 184, 190, 193
|
||||
|
||||
2012-09-25 CORE 4.4
|
||||
|
||||
* GUI:
|
||||
- real-time bandwidth plotting tool
|
||||
- added Wireshark and tshark right-click menu items
|
||||
- X,Y coordinates shown in the status bar
|
||||
- updated GUI attribute option to link messages for changing color/width/dash
|
||||
- added sample IPsec and VPN scenarios, how many nodes script
|
||||
- added jitter parameter to WLANs
|
||||
- renamed Experiment menu to Session menu, added session options
|
||||
- use 'key=value' configuration for services, EMANE models, WLAN models, etc.
|
||||
- save only service values that have been customized
|
||||
- copy service parameters from one customized service to another
|
||||
- right-click menu to start/stop/restart each service
|
||||
* EMANE:
|
||||
- EMANE 0.7.4 support
|
||||
- added support for EMANE CommEffect model and Comm Effect controller GUI
|
||||
- added support for EMANE Raw Transport when using RJ45 devices
|
||||
* Services:
|
||||
- improved service customization; allow a service to define custom Tcl tab
|
||||
- added vtysh.conf for Quagga service to support 'write mem'
|
||||
- support scheduled events and services that start N seconds after runtime
|
||||
- added UCARP service
|
||||
* Documentation:
|
||||
- converted the CORE manual to reStructuredText using Sphinx; added Python docs
|
||||
* General:
|
||||
- Python code reorganization
|
||||
- improved cored.py thread locking
|
||||
- merged xen branch into trunk
|
||||
- added an event queue to a session with notion of time zero
|
||||
- added UDP support to cored.py
|
||||
- use UDP by default in coresendmsg.py; added '-H' option to print examples
|
||||
- enter a bash shell by default when running vcmd with no arguments
|
||||
- fixes to distributed emulation entering runtime state
|
||||
- write 'nodes' file upon session startup
|
||||
- make session number and other attributes available in environment
|
||||
- support /etc/core/environment and ~/.core/environment files
|
||||
- added Ns2ScriptedMobility model to Python, removed from the GUI
|
||||
- namespace nodes mount a private /sys
|
||||
|
||||
- fixed the following bugs: 80, 81, 84, 99, 104, 109, 110, 122, 124, 131, 133, 134, 135, 137, 140, 143, 144, 146, 147, 151, 154, 155
|
||||
|
||||
2012-03-07 CORE 4.3
|
||||
|
||||
* EMANE 0.7.2 and 0.7.3 support
|
||||
* hook scripts: customize actions at any of six different session states
|
||||
* Check Emulation Light (CEL) exception feedback system
|
||||
* added FTP and XORP services, and service validate commands
|
||||
* services can flag when customization is required
|
||||
* Python classes to support ns-3 simulation experiments
|
||||
* write state, node X,Y position, and servers to pycore session dir
|
||||
* removed over 9,000 lines of unused GUI code
|
||||
* performance monitoring script
|
||||
* batch mode improvements and --closebatch option
|
||||
* export session to EmulationScript XML files
|
||||
* basic range model moved from GUI to Python, supports 3D coordinates
|
||||
* improved WLAN dialog with tabs
|
||||
* added PhysicalNode class for joining real nodes with emulated networks
|
||||
* fixed the following bugs: 50, 75, 76, 79, 82, 83, 85, 86, 89, 90, 92, 94, 96, 98, 100, 112, 113, 116, 119, 120
|
||||
|
||||
2011-08-19 CORE 4.2
|
||||
|
||||
* EMANE 0.7.1 support
|
||||
- support for Bypass model, Universal PHY, logging, realtime
|
||||
* configurable MAC addresses
|
||||
* control interfaces (backchannel between node and host)
|
||||
* service customization dialog improved (tabbed)
|
||||
* new testing scripts for MDR and EMANE performance testing
|
||||
* improved upgrading of old imn files
|
||||
* new coresendmsg.py utility (deprecates libcoreapi and coreapisend)
|
||||
* new security services, custom service becomes UserDefined
|
||||
* new services and Python scripting chapters in manual
|
||||
* fixes to distributed emulation, linking tunnels/RJ45s with WLANs/hubs/switches
|
||||
* fixed the following bugs: 18, 32, 34, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 52, 53, 55, 57, 58, 60, 62, 64, 65, 66, 68, 71, 72, 74
|
||||
|
||||
2011-01-05 CORE 4.1
|
||||
* new icons for toolbars and nodes
|
||||
* node services introduced, node models deprecated
|
||||
* customizable node types
|
||||
* traffic flow editor with MGEN support
|
||||
* user configs moved from /etc/core/`*` to ~/.core/
|
||||
* allocate addresses from custom IPv4/IPv6 prefixes
|
||||
* distributed emulation using GRE tunnels
|
||||
* FreeBSD 8.1 now uses cored.py
|
||||
* EMANE 0.6.4 support
|
||||
* numerous bugfixes
|
||||
|
||||
2010-08-17 CORE 4.0
|
||||
* Python framework with Linux network namespace (netns) support (Linux netns is now the primary supported platform)
|
||||
* ability to close the GUI and later reconnect to a running session (netns only)
|
||||
* EMANE integration (netns only)
|
||||
* new topology generators, host file generator
|
||||
* user-editable Observer Widgets
|
||||
* use of /etc/core instead of /usr/local/etc/core
|
||||
* various bugfixes
|
||||
|
||||
2009-09-15 CORE 3.5
|
||||
|
||||
2009-06-23 CORE 3.4
|
||||
|
||||
2009-03-11 CORE 3.3
|
||||
|
22
LICENSE
Normal file
22
LICENSE
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2005-2013, the Boeing Company.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
71
Makefile.am
Normal file
71
Makefile.am
Normal file
|
@ -0,0 +1,71 @@
|
|||
# CORE
|
||||
# (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Top-level Makefile for CORE project.
|
||||
#
|
||||
|
||||
if WANT_DOCS
|
||||
DOCS = doc
|
||||
endif
|
||||
if WANT_GUI
|
||||
GUI = gui
|
||||
endif
|
||||
if WANT_DAEMON
|
||||
DAEMON = scripts daemon
|
||||
endif
|
||||
|
||||
# keep docs last due to dependencies on binaries
|
||||
SUBDIRS = ${GUI} ${DAEMON} ${DOCS}
|
||||
|
||||
ACLOCAL_AMFLAGS = -I config
|
||||
|
||||
# extra files to include with distribution tarball
|
||||
EXTRA_DIST = bootstrap.sh LICENSE README-Xen Changelog kernel \
|
||||
packaging/bsd \
|
||||
packaging/deb/compat \
|
||||
packaging/deb/copyright \
|
||||
packaging/deb/changelog \
|
||||
packaging/deb/core.postrm \
|
||||
packaging/deb/rules \
|
||||
packaging/deb/control \
|
||||
packaging/deb/core-daemon.install.in \
|
||||
packaging/deb/core-gui.install.in \
|
||||
packaging/rpm/core.spec.in \
|
||||
packaging/rpm/specfiles.sh
|
||||
|
||||
DISTCLEAN_TARGETS = aclocal.m4 config.h.in
|
||||
|
||||
# extra cruft to remove
|
||||
DISTCLEANFILES = aclocal.m4 config.h.in configure Makefile.in
|
||||
|
||||
# don't include svn dirs in source tarball
|
||||
dist-hook:
|
||||
rm -rf `find $(distdir)/kernel -name .svn`
|
||||
rm -rf $(distdir)/packaging/bsd/.svn
|
||||
|
||||
# build a source RPM using Fedora ~/rpmbuild dirs
|
||||
.PHONY: rpm
|
||||
rpm:
|
||||
rpmdev-setuptree
|
||||
cp -afv core-*.tar.gz ~/rpmbuild/SOURCES
|
||||
cp -afv packaging/rpm/core.spec ~/rpmbuild/SPECS
|
||||
rpmbuild -bs ~/rpmbuild/SPECS/core.spec
|
||||
|
||||
# build a Ubuntu deb package using CDBS
|
||||
.PHONY: deb
|
||||
deb:
|
||||
rm -rf debian
|
||||
mkdir -p debian
|
||||
cp -vf packaging/deb/* debian/
|
||||
@echo "First create source archive with: dpkg-source -b core-4.5"
|
||||
@echo "Then build with: pbuilder-dist precise i386 build core*.dsc"
|
||||
|
||||
.PHONY: core-restart
|
||||
core-restart:
|
||||
/etc/init.d/core-daemon stop
|
||||
daemon/sbin/core-cleanup
|
||||
rm -f /var/log/core-daemon.log
|
||||
/etc/init.d/core-daemon start
|
61
README
Normal file
61
README
Normal file
|
@ -0,0 +1,61 @@
|
|||
|
||||
CORE: Common Open Research Emulator
|
||||
Copyright (c)2005-2013 the Boeing Company.
|
||||
See the LICENSE file included in this distribution.
|
||||
|
||||
== ABOUT =======================================================================
|
||||
CORE is a tool for emulating networks using a GUI or Python scripts. The CORE
|
||||
project site (1) is a good source of introductory information, with a manual,
|
||||
screenshots, and demos about this software. Also a supplemental
|
||||
Google Code page (2) hosts a wiki, blog, bug tracker, and quickstart guide.
|
||||
|
||||
1. http://cs.itd.nrl.navy.mil/work/core/
|
||||
2. http://code.google.com/p/coreemu/
|
||||
|
||||
|
||||
== BUILDING CORE ===============================================================
|
||||
|
||||
To build this software you should use:
|
||||
|
||||
./bootstrap.sh
|
||||
./configure
|
||||
make
|
||||
sudo make install
|
||||
|
||||
Here is what is installed with 'make install':
|
||||
|
||||
/usr/local/bin/core-gui
|
||||
/usr/local/sbin/core-daemon
|
||||
/usr/local/sbin/[vcmd, vnoded, coresendmsg, core-cleanup.sh]
|
||||
/usr/local/lib/core/*
|
||||
/usr/local/share/core/*
|
||||
/usr/local/lib/python2.6/dist-packages/core/*
|
||||
/usr/local/lib/python2.6/dist-packages/[netns,vcmd].so
|
||||
/etc/core/*
|
||||
/etc/init.d/core
|
||||
|
||||
See the manual for the software required for building CORE.
|
||||
|
||||
|
||||
== RUNNING CORE ================================================================
|
||||
|
||||
First start the CORE services:
|
||||
|
||||
sudo /etc/init.d/core-daemon start
|
||||
|
||||
This automatically runs the core-daemon program.
|
||||
Assuming the GUI is in your PATH, run the CORE GUI by typing the following:
|
||||
|
||||
core-gui
|
||||
|
||||
This launches the CORE GUI. You do not need to run the GUI as root.
|
||||
|
||||
== SUPPORT =====================================================================
|
||||
|
||||
If you have questions, comments, or trouble, please use the CORE mailing lists:
|
||||
- core-users for general comments and questions
|
||||
http://pf.itd.nrl.navy.mil/mailman/listinfo/core-users
|
||||
- core-dev for bugs, compile errors, and other development issues
|
||||
http://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev
|
||||
|
||||
|
87
README-Xen
Normal file
87
README-Xen
Normal file
|
@ -0,0 +1,87 @@
|
|||
CORE Xen README
|
||||
|
||||
This file describes the xen branch of the CORE development tree which enables
|
||||
machines based on Xen domUs. When you edit node types, you are given the option
|
||||
of changing the machine type (netns, physical, or xen) and the profile for
|
||||
each node type.
|
||||
|
||||
CORE will create each domU machine on the fly, having a bootable ISO image that
|
||||
contains the root filesystem, and a special persitent area (/rtr/persistent)
|
||||
using a LVM volume where configuration is stored. See the /etc/core/xen.conf
|
||||
file for related settings here.
|
||||
|
||||
INSTALLATION
|
||||
|
||||
1. Tested under OpenSUSE 11.3 which allows installing a Xen dom0 during the
|
||||
install process.
|
||||
|
||||
2. Create an LVM volume group having enough free space available for CORE to
|
||||
build logical volumes for domU nodes. The name of this group is set with the
|
||||
'vg_name=' option in /etc/core/xen.conf. (With 256M per persistent area,
|
||||
10GB would allow for 40 nodes.)
|
||||
|
||||
3. To get libev-devel in OpenSUSE, use:
|
||||
zypper ar http://download.opensuse.org/repositories/X11:/windowmanagers/openSUSE_11.3 WindowManagers
|
||||
zypper install libev-devel
|
||||
|
||||
4. In addition to the normal CORE dependencies
|
||||
(see http://code.google.com/p/coreemu/wiki/Quickstart), pyparted-3.2 is used
|
||||
when creating LVM partitions and decorator-3.3.0 is a dependency for
|
||||
pyparted. The 'python setup.py install' and 'make install' need to be
|
||||
performed on these source tarballs as no packages are available.
|
||||
|
||||
tar xzf decorator-3.3.0.tar.gz
|
||||
cd decorator-3.3.0
|
||||
python setup.py build
|
||||
python setup.py install
|
||||
|
||||
tar xzf pyparted-3.2.tar.gz
|
||||
cd pyparted-3.2
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
|
||||
5. These Xen parameters were used for dom0, by editing /boot/grub/menu.lst:
|
||||
a) Add options to "kernel /xen.gz" line:
|
||||
gnttab_max_nr_frames=128 dom0_mem=1G dom0_max_vcpus=2 dom0_vcpus_pin
|
||||
b) Make Xen default boot by editing the "default" line with the
|
||||
index for the Xen boot option. e.g. change "default 0" to "default 2"
|
||||
Reboot to enable the Xen kernel.
|
||||
|
||||
5. Run CORE's ./configure script as root to properly discover sbin binaries.
|
||||
|
||||
tar xzf core-xen.tgz
|
||||
cd core-xen
|
||||
./bootstrap.sh
|
||||
./configure
|
||||
make
|
||||
make install
|
||||
|
||||
6. Put your ISO images in /opt/core-xen/iso-files and set the "iso_file="
|
||||
xen.conf option appropriately.
|
||||
|
||||
7. Uncomment the controlnet entry in /etc/core/core.conf:
|
||||
# establish a control backchannel for accessing nodes
|
||||
controlnet = 172.16.0.0/24
|
||||
|
||||
This setting governs what IP addresses will be used for a control channel.
|
||||
Given this default setting the host dom0 will have the address 172.16.0.254
|
||||
assigned to a bridge device; domU VMs will get interfaces joined to this
|
||||
bridge, having addresses such as 172.16.0.1 for node n1, 172.16.0.2 for n2,
|
||||
etc.
|
||||
|
||||
When 'controlnet =' is unspecified in the core.conf file, double-clicking on
|
||||
a node results in the 'xm console' method. A key mapping is set up so you
|
||||
can press 'F1' then 'F2' to login as root. The key ctrl+']' detaches from the
|
||||
console. Only one console is available per domU VM.
|
||||
|
||||
8. When 'controlnet =' is specified, double-clicking on a node results in an
|
||||
attempt to ssh to that node's control IP address.
|
||||
Put a host RSA key for use on the domUs in /opt/core-xen/ssh:
|
||||
|
||||
mkdir -p /opt/core-xen/ssh
|
||||
ssh-keygen -t rsa -f /opt/core-xen/ssh/ssh_host_rsa_key
|
||||
cp ~/.ssh/id_rsa.pub /opt/core-xen/ssh/authorized_keys
|
||||
chmod 600 /opt/core-xen/ssh/authorized_keys
|
||||
|
||||
|
48
bootstrap.sh
Executable file
48
bootstrap.sh
Executable file
|
@ -0,0 +1,48 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# (c)2010-2012 the Boeing Company
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Bootstrap the autoconf system.
|
||||
#
|
||||
|
||||
if [ x$1 = x ]; then # PASS
|
||||
echo "Bootstrapping the autoconf system..."
|
||||
# echo " These autotools programs should be installed for this script to work:"
|
||||
# echo " aclocal, libtoolize, autoheader, automake, autoconf"
|
||||
echo "(Messages below about copying and installing files are normal.)"
|
||||
elif [ x$1 = xclean ]; then # clean - take out the trash
|
||||
echo "Cleaning up the autoconf mess..."
|
||||
rm -rf autom4te.cache config BSDmakefile
|
||||
exit 0;
|
||||
else # help text
|
||||
echo "usage: $0 [clean]"
|
||||
echo -n " Use this script to bootstrap the autoconf build system prior to "
|
||||
echo "running the "
|
||||
echo " ./configure script."
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# try to keep everything nice and tidy in ./config
|
||||
if ! [ -d "config" ]; then
|
||||
mkdir config
|
||||
fi
|
||||
|
||||
# on FreeBSD, discourage use of make
|
||||
UNAME=`uname`
|
||||
if [ x${UNAME} = xFreeBSD ]; then
|
||||
echo "all:" > BSDmakefile
|
||||
echo ' @echo "Please use GNU make instead by typing:"' >> BSDmakefile
|
||||
echo ' @echo " gmake"' >> BSDmakefile
|
||||
echo ' @echo ""' >> BSDmakefile
|
||||
fi
|
||||
|
||||
# bootstrapping
|
||||
echo "(1/4) Running aclocal..." && aclocal -I config \
|
||||
&& echo "(2/4) Running autoheader..." && autoheader \
|
||||
&& echo "(3/4) Running automake..." \
|
||||
&& automake --add-missing --copy --foreign \
|
||||
&& echo "(4/4) Running autoconf..." && autoconf \
|
||||
&& echo "" \
|
||||
&& echo "You are now ready to run \"./configure\"."
|
368
configure.ac
Normal file
368
configure.ac
Normal file
|
@ -0,0 +1,368 @@
|
|||
#
|
||||
# Copyright (c) 2010-2013 the Boeing Company
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# CORE configure script
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# -*- Autoconf -*-
|
||||
# Process this file with autoconf to produce a configure script.
|
||||
|
||||
#
|
||||
# this defines the CORE version number, must be static for AC_INIT
|
||||
#
|
||||
AC_INIT(core, 4.5svn5, core-dev@pf.itd.nrl.navy.mil)
|
||||
VERSION=$PACKAGE_VERSION
|
||||
CORE_VERSION=$PACKAGE_VERSION
|
||||
CORE_VERSION_DATE=20130816
|
||||
COREDPY_VERSION=$PACKAGE_VERSION
|
||||
|
||||
#
|
||||
# autoconf and automake initialization
|
||||
#
|
||||
AC_CONFIG_SRCDIR([daemon/src/version.h.in])
|
||||
AC_CONFIG_AUX_DIR(config)
|
||||
AC_CONFIG_MACRO_DIR(config)
|
||||
AC_CONFIG_HEADERS([config.h])
|
||||
AM_INIT_AUTOMAKE
|
||||
|
||||
AC_SUBST(CORE_VERSION)
|
||||
AC_SUBST(CORE_VERSION_DATE)
|
||||
AC_SUBST(COREDPY_VERSION)
|
||||
|
||||
#
|
||||
# some of the following directory variables are not expanded at configure-time,
|
||||
# so we have special checks to expand them
|
||||
#
|
||||
|
||||
# CORE GUI files in LIBDIR
|
||||
# AC_PREFIX_DEFAULT is /usr/local, but not expanded yet
|
||||
if test "x$prefix" = "xNONE" ; then
|
||||
prefix="/usr/local"
|
||||
fi
|
||||
if test "$libdir" = "\${exec_prefix}/lib" ; then
|
||||
libdir="${prefix}/lib"
|
||||
fi
|
||||
# this can be /usr/lib or /usr/lib64
|
||||
LIB_DIR="${libdir}"
|
||||
# don't let the Tcl files install to /usr/lib64/core
|
||||
CORE_LIB_DIR="${prefix}/lib/core"
|
||||
AC_SUBST(LIB_DIR)
|
||||
AC_SUBST(CORE_LIB_DIR)
|
||||
SBINDIR="${prefix}/sbin"
|
||||
AC_SUBST(SBINDIR)
|
||||
BINDIR="${prefix}/bin"
|
||||
AC_SUBST(BINDIR)
|
||||
|
||||
# CORE daemon configuration file (core.conf) in CORE_CONF_DIR
|
||||
if test "$sysconfdir" = "\${prefix}/etc" ; then
|
||||
sysconfdir="/etc"
|
||||
CORE_CONF_DIR="/etc/core"
|
||||
else
|
||||
CORE_CONF_DIR="$sysconfdir/core"
|
||||
fi
|
||||
AC_SUBST(CORE_CONF_DIR)
|
||||
if test "$datarootdir" = "\${prefix}/share" ; then
|
||||
datarootdir="${prefix}/share"
|
||||
fi
|
||||
CORE_DATA_DIR="$datarootdir/core"
|
||||
AC_SUBST(CORE_DATA_DIR)
|
||||
|
||||
# CORE GUI configuration files and preferences in CORE_GUI_CONF_DIR
|
||||
# scenario files in ~/.core/configs/
|
||||
#AC_ARG_VAR(CORE_GUI_CONF_DIR, [GUI configuration directory.])
|
||||
AC_ARG_WITH([guiconfdir],
|
||||
[AS_HELP_STRING([--with-guiconfdir=dir],
|
||||
[specify GUI configuration directory])],
|
||||
[CORE_GUI_CONF_DIR="$with_guiconfdir"],
|
||||
[CORE_GUI_CONF_DIR="\${HOME}/.core"])
|
||||
AC_SUBST(CORE_GUI_CONF_DIR)
|
||||
AC_ARG_ENABLE([gui],
|
||||
[AS_HELP_STRING([--enable-gui[=ARG]],
|
||||
[build and install the GUI (default is yes)])],
|
||||
[], [enable_gui=yes])
|
||||
AC_SUBST(enable_gui)
|
||||
AC_ARG_ENABLE([daemon],
|
||||
[AS_HELP_STRING([--enable-daemon[=ARG]],
|
||||
[build and install the daemon with Python modules
|
||||
(default is yes)])],
|
||||
[], [enable_daemon=yes])
|
||||
AC_SUBST(enable_daemon)
|
||||
if test "x$enable_daemon" = "xno"; then
|
||||
want_python=no
|
||||
want_bsd=no
|
||||
want_linux_netns=no
|
||||
fi
|
||||
|
||||
# CORE state files
|
||||
if test "$localstatedir" = "\${prefix}/var" ; then
|
||||
# use /var instead of /usr/local/var (/usr/local/var/log isn't standard)
|
||||
CORE_STATE_DIR="/var"
|
||||
else
|
||||
CORE_STATE_DIR="$localstatedir"
|
||||
fi
|
||||
AC_SUBST(CORE_STATE_DIR)
|
||||
|
||||
# default compiler flags
|
||||
# _GNU_SOURCE is defined to get c99 defines for lrint()
|
||||
CFLAGS="$CFLAGS -O3 -Werror -Wall -D_GNU_SOURCE"
|
||||
# debug flags
|
||||
#CFLAGS="$CFLAGS -g -Werror -Wall -D_GNU_SOURCE"
|
||||
|
||||
# Checks for programs.
|
||||
AC_PROG_AWK
|
||||
AC_PROG_CC
|
||||
AC_PROG_INSTALL
|
||||
AC_PROG_MAKE_SET
|
||||
AC_PROG_RANLIB
|
||||
|
||||
AM_PATH_PYTHON(2.6, want_python=yes, want_python=no)
|
||||
SEARCHPATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin"
|
||||
#
|
||||
# daemon dependencies
|
||||
#
|
||||
if test "x$enable_daemon" = "xyes" ; then
|
||||
AC_CHECK_PROG(brctl_path, brctl, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(ebtables_path, ebtables, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(ifconfig_path, ifconfig, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(ngctl_path, ngctl, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(vimage_path, vimage, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH)
|
||||
fi
|
||||
|
||||
#AC_CHECK_PROG(dia, dia, yes, no)
|
||||
AC_CHECK_PROG(help2man, help2man, yes, no, $SEARCHPATH)
|
||||
if test "x$convert" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ImageMagick convert.])
|
||||
#want_docs_missing="convert"
|
||||
fi
|
||||
#if test "x$dia" = "xno" ; then
|
||||
# AC_MSG_WARN([Could not locate dia.])
|
||||
# want_docs_missing="dia"
|
||||
#fi
|
||||
if test "x$help2man" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate help2man.])
|
||||
want_docs_missing="$want_docs_missing help2man"
|
||||
fi
|
||||
if test "x$want_docs_missing" = "x" ; then
|
||||
want_docs=yes
|
||||
else
|
||||
AC_MSG_WARN([Could not find required helper utilities (${want_docs_missing}) so the CORE documentation will not be built.])
|
||||
want_docs=no
|
||||
fi
|
||||
|
||||
#AC_PATH_PROGS(tcl_path, [tclsh tclsh8.5 tclsh8.4], no)
|
||||
#if test "x$tcl_path" = "xno" ; then
|
||||
# AC_MSG_ERROR([Could not locate tclsh. Please install Tcl/Tk.])
|
||||
#fi
|
||||
#AC_PATH_PROGS(wish_path, [wish wish8.5 wish8.4], no)
|
||||
#if test "x$wish_path" = "xno" ; then
|
||||
# AC_MSG_ERROR([Could not locate wish. Please install Tcl/Tk.])
|
||||
#fi
|
||||
|
||||
if test "x$enable_daemon" = "xyes" ; then
|
||||
# Checks for libraries.
|
||||
AC_CHECK_LIB([netgraph], [NgMkSockNode])
|
||||
|
||||
# Checks for header files.
|
||||
AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h stdint.h stdlib.h string.h sys/ioctl.h sys/mount.h sys/socket.h sys/time.h termios.h unistd.h])
|
||||
|
||||
# Checks for typedefs, structures, and compiler characteristics.
|
||||
AC_C_INLINE
|
||||
AC_TYPE_INT32_T
|
||||
AC_TYPE_PID_T
|
||||
AC_TYPE_SIZE_T
|
||||
AC_TYPE_SSIZE_T
|
||||
AC_TYPE_UINT32_T
|
||||
AC_TYPE_UINT8_T
|
||||
|
||||
# Checks for library functions.
|
||||
AC_FUNC_FORK
|
||||
AC_FUNC_MALLOC
|
||||
AC_FUNC_REALLOC
|
||||
AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname])
|
||||
fi
|
||||
|
||||
# simple architecture detection
|
||||
if test `uname -m` = "x86_64"; then
|
||||
ARCH=amd64
|
||||
else
|
||||
ARCH=i386
|
||||
fi
|
||||
AC_MSG_RESULT([using architecture $ARCH])
|
||||
AC_SUBST(ARCH)
|
||||
|
||||
# Host-specific detection
|
||||
want_linux_netns=no
|
||||
want_bsd=no
|
||||
if test `uname -s` = "FreeBSD"; then
|
||||
want_bsd=yes
|
||||
AC_CHECK_PROGS(gmake)
|
||||
# FreeBSD fix for linking libev port below
|
||||
CFLAGS="$CFLAGS -L/usr/local/lib"
|
||||
else
|
||||
want_linux_netns=yes
|
||||
fi
|
||||
if test "x$want_python" = "xno"; then
|
||||
want_bsd=no
|
||||
want_linux_netns=no
|
||||
fi
|
||||
|
||||
if test "x$want_python" = "xyes"; then
|
||||
CFLAGS_save=$CFLAGS
|
||||
CPPFLAGS_save=$CPPFLAGS
|
||||
if test "x$PYTHON_INCLUDE_DIR" = "x"; then
|
||||
PYTHON_INCLUDE_DIR=`$PYTHON -c "import distutils.sysconfig; print distutils.sysconfig.get_python_inc()"`
|
||||
fi
|
||||
CFLAGS="-I$PYTHON_INCLUDE_DIR"
|
||||
CPPFLAGS="-I$PYTHON_INCLUDE_DIR"
|
||||
AC_CHECK_HEADERS([Python.h], [],
|
||||
AC_MSG_ERROR([Python bindings require Python development headers (try installing your 'python-devel' or 'python-dev' package)]))
|
||||
CFLAGS=$CFLAGS_save
|
||||
CPPFLAGS=$CPPFLAGS_save
|
||||
PKG_CHECK_MODULES(libev, libev,
|
||||
AC_MSG_RESULT([found libev using pkgconfig OK])
|
||||
AC_SUBST(libev_CFLAGS)
|
||||
AC_SUBST(libev_LIBS),
|
||||
AC_MSG_RESULT([did not find libev using pkconfig...])
|
||||
AC_CHECK_LIB([ev], ev_set_allocator,
|
||||
AC_MSG_RESULT([found libev OK])
|
||||
AC_SUBST(libev_CFLAGS)
|
||||
AC_SUBST(libev_LIBS, [-lev]),
|
||||
AC_MSG_ERROR([Python bindings require libev (try installing your 'libev-devel' or 'libev-dev' package)])))
|
||||
else
|
||||
# Namespace support requires Python support
|
||||
want_linux_netns=no
|
||||
fi
|
||||
|
||||
progs_missing=""
|
||||
if test "x$want_linux_netns" = "xyes"; then
|
||||
if test "x$brctl_path" = "xno" ; then
|
||||
progs_missing="${progs_missing}brctl "
|
||||
brctl_path="/usr/sbin"
|
||||
AC_MSG_ERROR([Could not locate brctl (from bridge-utils package).])
|
||||
fi
|
||||
if test "x$ebtables_path" = "xno" ; then
|
||||
progs_missing="${progs_missing}ebtables "
|
||||
ebtables_path="/sbin"
|
||||
AC_MSG_ERROR([Could not locate ebtables (from ebtables package).])
|
||||
fi
|
||||
if test "x$ip_path" = "xno" ; then
|
||||
progs_missing="${progs_missing}ip "
|
||||
ip_path="/sbin"
|
||||
AC_MSG_ERROR([Could not locate ip (from iproute package).])
|
||||
fi
|
||||
if test "x$tc_path" = "xno" ; then
|
||||
progs_missing="${progs_missing}tc "
|
||||
tc_path="/sbin"
|
||||
AC_MSG_ERROR([Could not locate tc (from iproute package).])
|
||||
fi
|
||||
fi
|
||||
if test "x$want_bsd" = "xyes"; then
|
||||
if test "x$ifconfig_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate the 'ifconfig' utility.])
|
||||
fi
|
||||
if test "x$ngctl_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate the 'ngctl' utility.])
|
||||
fi
|
||||
if test "x$vimage_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate the 'vimage' utility.])
|
||||
fi
|
||||
fi
|
||||
|
||||
AC_ARG_WITH([startup],
|
||||
[AS_HELP_STRING([--with-startup=option],
|
||||
[option=systemd,suse,none to install systemd/SUSE init scripts])],
|
||||
[with_startup=$with_startup],
|
||||
[with_startup=initd])
|
||||
AC_SUBST(with_startup)
|
||||
AC_MSG_RESULT([using startup option $with_startup])
|
||||
|
||||
# Variable substitutions
|
||||
AM_CONDITIONAL(WANT_GUI, test x$enable_gui = xyes)
|
||||
AM_CONDITIONAL(WANT_DAEMON, test x$enable_daemon = xyes)
|
||||
AM_CONDITIONAL(WANT_BSD, test x$want_bsd = xyes)
|
||||
AM_CONDITIONAL(WANT_DOCS, test x$want_docs = xyes)
|
||||
AM_CONDITIONAL(WANT_PYTHON, test x$want_python = xyes)
|
||||
AM_CONDITIONAL(WANT_NETNS, test x$want_linux_netns = xyes)
|
||||
|
||||
AM_CONDITIONAL(WANT_INITD, test x$with_startup = xinitd)
|
||||
AM_CONDITIONAL(WANT_SYSTEMD, test x$with_startup = xsystemd)
|
||||
AM_CONDITIONAL(WANT_SUSE, test x$with_startup = xsuse)
|
||||
|
||||
if test $cross_compiling = no; then
|
||||
AM_MISSING_PROG(HELP2MAN, help2man)
|
||||
else
|
||||
HELP2MAN=:
|
||||
fi
|
||||
|
||||
|
||||
# Output files
|
||||
AC_CONFIG_FILES([Makefile
|
||||
gui/core-gui
|
||||
gui/version.tcl
|
||||
gui/Makefile
|
||||
gui/icons/Makefile
|
||||
scripts/Makefile
|
||||
scripts/perf/Makefile
|
||||
scripts/xen/Makefile
|
||||
doc/Makefile
|
||||
doc/conf.py
|
||||
doc/man/Makefile
|
||||
doc/figures/Makefile
|
||||
daemon/Makefile
|
||||
daemon/src/Makefile
|
||||
daemon/src/version.h
|
||||
daemon/core/constants.py
|
||||
daemon/ns3/Makefile
|
||||
daemon/ns3/corens3/constants.py
|
||||
daemon/doc/Makefile
|
||||
daemon/doc/conf.py
|
||||
packaging/deb/core-daemon.install
|
||||
packaging/deb/core-gui.install
|
||||
packaging/rpm/core.spec],)
|
||||
AC_OUTPUT
|
||||
|
||||
# Summary text
|
||||
echo \
|
||||
"------------------------------------------------------------------------
|
||||
${PACKAGE_STRING} Configuration:
|
||||
|
||||
Host System Type: ${host}
|
||||
C Compiler and flags: ${CC} ${CFLAGS}
|
||||
Install prefix: ${prefix}
|
||||
Build GUI: ${enable_gui}
|
||||
GUI path: ${CORE_LIB_DIR}
|
||||
GUI config: ${CORE_GUI_CONF_DIR}
|
||||
Daemon path: ${SBINDIR}
|
||||
Daemon config: ${CORE_CONF_DIR}
|
||||
Python modules: ${pythondir}
|
||||
Logs: ${CORE_STATE_DIR}/log
|
||||
|
||||
Features to build:
|
||||
Python bindings: ${want_python}
|
||||
Linux Namespaces emulation: ${want_linux_netns}
|
||||
FreeBSD Jails emulation: ${want_bsd}
|
||||
Documentation: ${want_docs}
|
||||
|
||||
------------------------------------------------------------------------"
|
||||
if test "x${want_bsd}" = "xyes" ; then
|
||||
# TODO: more sophisticated checks of gmake vs make
|
||||
echo ">>> NOTE: on FreeBSD you should use 'gmake' instead of 'make'
|
||||
------------------------------------------------------------------------"
|
||||
fi
|
||||
if test "x${want_linux_netns}" = "xyes" ; then
|
||||
echo "On this platform you should run core-gui as a normal user.
|
||||
------------------------------------------------------------------------"
|
||||
fi
|
||||
if test "x${progs_missing}" != "x" ; then
|
||||
echo ">>> NOTE: the following programs could not be found:"
|
||||
echo " $progs_missing
|
||||
------------------------------------------------------------------------"
|
||||
fi
|
223
daemon/CORE.e4p
Normal file
223
daemon/CORE.e4p
Normal file
|
@ -0,0 +1,223 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE Project SYSTEM "Project-4.6.dtd">
|
||||
<!-- eric4 project file for project CORE -->
|
||||
<!-- Saved: 2013-08-06, 13:58:14 -->
|
||||
<!-- Copyright (C) 2013 , -->
|
||||
<Project version="4.6">
|
||||
<Language>en</Language>
|
||||
<ProgLanguage mixed="0">Python</ProgLanguage>
|
||||
<ProjectType>Console</ProjectType>
|
||||
<Description></Description>
|
||||
<Version>4.0</Version>
|
||||
<Author></Author>
|
||||
<Email></Email>
|
||||
<Sources>
|
||||
<Source>setup.py</Source>
|
||||
<Source>examples/netns/switchtest.py</Source>
|
||||
<Source>examples/netns/ospfmanetmdrtest.py</Source>
|
||||
<Source>examples/netns/switch.py</Source>
|
||||
<Source>examples/netns/wlantest.py</Source>
|
||||
<Source>examples/stopsession.py</Source>
|
||||
<Source>src/setup.py</Source>
|
||||
<Source>core/emane/__init__.py</Source>
|
||||
<Source>core/emane/emane.py</Source>
|
||||
<Source>core/emane/ieee80211abg.py</Source>
|
||||
<Source>core/emane/rfpipe.py</Source>
|
||||
<Source>core/emane/nodes.py</Source>
|
||||
<Source>core/netns/vif.py</Source>
|
||||
<Source>core/netns/vnet.py</Source>
|
||||
<Source>core/netns/__init__.py</Source>
|
||||
<Source>core/netns/vnode.py</Source>
|
||||
<Source>core/netns/vnodeclient.py</Source>
|
||||
<Source>core/netns/nodes.py</Source>
|
||||
<Source>core/service.py</Source>
|
||||
<Source>core/__init__.py</Source>
|
||||
<Source>core/addons/__init__.py</Source>
|
||||
<Source>core/broker.py</Source>
|
||||
<Source>core/services/__init__.py</Source>
|
||||
<Source>core/services/quagga.py</Source>
|
||||
<Source>core/misc/LatLongUTMconversion.py</Source>
|
||||
<Source>core/misc/__init__.py</Source>
|
||||
<Source>core/misc/ipaddr.py</Source>
|
||||
<Source>core/misc/quagga.py</Source>
|
||||
<Source>core/misc/utils.py</Source>
|
||||
<Source>core/pycore.py</Source>
|
||||
<Source>core/coreobj.py</Source>
|
||||
<Source>core/location.py</Source>
|
||||
<Source>core/session.py</Source>
|
||||
<Source>core/api/__init__.py</Source>
|
||||
<Source>core/api/data.py</Source>
|
||||
<Source>core/api/coreapi.py</Source>
|
||||
<Source>core/services/nrl.py</Source>
|
||||
<Source>core/services/utility.py</Source>
|
||||
<Source>core/bsd/netgraph.py</Source>
|
||||
<Source>core/bsd/__init__.py</Source>
|
||||
<Source>core/bsd/nodes.py</Source>
|
||||
<Source>core/bsd/vnet.py</Source>
|
||||
<Source>core/bsd/vnode.py</Source>
|
||||
<Source>core/xen/xen.py</Source>
|
||||
<Source>core/xen/xenconfig.py</Source>
|
||||
<Source>core/xen/__init__.py</Source>
|
||||
<Source>examples/myservices/sample.py</Source>
|
||||
<Source>examples/myservices/__init__.py</Source>
|
||||
<Source>core/services/security.py</Source>
|
||||
<Source>core/emane/universal.py</Source>
|
||||
<Source>examples/netns/wlanemanetests.py</Source>
|
||||
<Source>core/services/xorp.py</Source>
|
||||
<Source>core/misc/xmlutils.py</Source>
|
||||
<Source>core/mobility.py</Source>
|
||||
<Source>core/phys/pnodes.py</Source>
|
||||
<Source>core/phys/__init__.py</Source>
|
||||
<Source>ns3/setup.py</Source>
|
||||
<Source>ns3/corens3/__init__.py</Source>
|
||||
<Source>ns3/corens3/constants.py</Source>
|
||||
<Source>ns3/corens3/obj.py</Source>
|
||||
<Source>ns3/examples/ns3wifi.py</Source>
|
||||
<Source>ns3/examples/ns3lte.py</Source>
|
||||
<Source>ns3/examples/ns3wimax.py</Source>
|
||||
<Source>core/emane/commeffect.py</Source>
|
||||
<Source>core/services/ucarp.py</Source>
|
||||
<Source>core/emane/bypass.py</Source>
|
||||
<Source>core/conf.py</Source>
|
||||
<Source>core/misc/event.py</Source>
|
||||
<Source>core/sdt.py</Source>
|
||||
<Source>core/services/bird.py</Source>
|
||||
<Source>examples/netns/basicrange.py</Source>
|
||||
<Source>examples/netns/howmanynodes.py</Source>
|
||||
<Source>sbin/core-daemon</Source>
|
||||
<Source>sbin/coresendmsg</Source>
|
||||
<Source>sbin/core-cleanup</Source>
|
||||
<Source>sbin/core-xen-cleanup</Source>
|
||||
<Source>ns3/examples/ns3wifirandomwalk.py</Source>
|
||||
<Source>core/misc/utm.py</Source>
|
||||
</Sources>
|
||||
<Forms>
|
||||
</Forms>
|
||||
<Translations>
|
||||
</Translations>
|
||||
<Resources>
|
||||
</Resources>
|
||||
<Interfaces>
|
||||
</Interfaces>
|
||||
<Others>
|
||||
</Others>
|
||||
<Vcs>
|
||||
<VcsType>Subversion</VcsType>
|
||||
<VcsOptions>
|
||||
<dict>
|
||||
<key>
|
||||
<string>add</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>checkout</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>commit</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>diff</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>export</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>global</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>history</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>log</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>remove</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>status</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>tag</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
<key>
|
||||
<string>update</string>
|
||||
</key>
|
||||
<value>
|
||||
<list>
|
||||
<string></string>
|
||||
</list>
|
||||
</value>
|
||||
</dict>
|
||||
</VcsOptions>
|
||||
<VcsOtherData>
|
||||
<dict>
|
||||
<key>
|
||||
<string>standardLayout</string>
|
||||
</key>
|
||||
<value>
|
||||
<bool>True</bool>
|
||||
</value>
|
||||
</dict>
|
||||
</VcsOtherData>
|
||||
</Vcs>
|
||||
<FiletypeAssociations>
|
||||
<FiletypeAssociation pattern="*.pyw" type="SOURCES" />
|
||||
<FiletypeAssociation pattern="*.idl" type="INTERFACES" />
|
||||
<FiletypeAssociation pattern="*.py" type="SOURCES" />
|
||||
<FiletypeAssociation pattern="*.ptl" type="SOURCES" />
|
||||
</FiletypeAssociations>
|
||||
</Project>
|
4
daemon/MANIFEST.in
Normal file
4
daemon/MANIFEST.in
Normal file
|
@ -0,0 +1,4 @@
|
|||
recursive-include sbin *.sh *.py
|
||||
include data/core.conf
|
||||
recursive-include examples/netns *.py *.sh
|
||||
recursive-exclude examples/netns *.pyc *.pyo
|
51
daemon/Makefile.am
Executable file
51
daemon/Makefile.am
Executable file
|
@ -0,0 +1,51 @@
|
|||
# CORE
|
||||
# (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Makefile for building netns components.
|
||||
#
|
||||
|
||||
if WANT_NETNS
|
||||
SUBDIRS = src ns3
|
||||
endif
|
||||
|
||||
# Python package build
|
||||
noinst_SCRIPTS = build
|
||||
build:
|
||||
$(PYTHON) setup.py build
|
||||
|
||||
# Python package install
|
||||
install-exec-hook:
|
||||
CORE_CONF_DIR=${DESTDIR}/${CORE_CONF_DIR} $(PYTHON) setup.py install --prefix=${DESTDIR}/${prefix} --install-purelib=${DESTDIR}/${pythondir} --install-platlib=${DESTDIR}/${pyexecdir} --no-compile
|
||||
|
||||
# Python package uninstall
|
||||
uninstall-hook:
|
||||
rm -f ${SBINDIR}/core-daemon
|
||||
rm -f ${SBINDIR}/coresendmsg
|
||||
rm -f ${SBINDIR}/core-cleanup
|
||||
rm -f ${SBINDIR}/core-xen-cleanup
|
||||
rm -f ${pythondir}/core_python-${COREDPY_VERSION}-py${PYTHON_VERSION}.egg-info
|
||||
rm -f ${pythondir}/core_python_netns-1.0-py${PYTHON_VERSION}.egg-info
|
||||
rm -rf ${pythondir}/core
|
||||
rm -rf ${prefix}/share/core
|
||||
|
||||
# Python package cleanup
|
||||
clean-local:
|
||||
-rm -rf build
|
||||
|
||||
# Python RPM package
|
||||
rpm:
|
||||
$(PYTHON) setup.py bdist_rpm
|
||||
|
||||
# because we include entire directories with EXTRA_DIST, we need to clean up
|
||||
# the source control files
|
||||
dist-hook:
|
||||
rm -rf `find $(distdir)/ -name .svn` `find $(distdir)/ -name '*.pyc'`
|
||||
|
||||
DISTCLEANFILES = Makefile.in core/*.pyc MANIFEST doc/Makefile.in doc/Makefile \
|
||||
doc/conf.py core/addons/*.pyc
|
||||
|
||||
# files to include with distribution tarball
|
||||
EXTRA_DIST = setup.py MANIFEST.in CORE.e4p core data examples sbin doc
|
23
daemon/core/__init__.py
Normal file
23
daemon/core/__init__.py
Normal file
|
@ -0,0 +1,23 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
"""core
|
||||
|
||||
Top-level Python package containing CORE components.
|
||||
|
||||
See http://cs.itd.nrl.navy.mil/work/core/ and
|
||||
http://code.google.com/p/coreemu/ for more information on CORE.
|
||||
|
||||
Pieces can be imported individually, for example
|
||||
|
||||
import core.netns.vnode
|
||||
|
||||
or everything listed in __all__ can be imported using
|
||||
|
||||
from core import *
|
||||
"""
|
||||
|
||||
__all__ = []
|
||||
|
||||
# Automatically import all add-ons listed in addons.__all__
|
||||
from addons import *
|
6
daemon/core/addons/__init__.py
Normal file
6
daemon/core/addons/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
"""Optional add-ons
|
||||
|
||||
Add on files can be put in this directory. Everything listed in
|
||||
__all__ is automatically loaded by the main core module.
|
||||
"""
|
||||
__all__ = []
|
0
daemon/core/api/__init__.py
Normal file
0
daemon/core/api/__init__.py
Normal file
630
daemon/core/api/coreapi.py
Normal file
630
daemon/core/api/coreapi.py
Normal file
|
@ -0,0 +1,630 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
coreapi.py: uses coreapi_data for Message and TLV types, and defines TLV data
|
||||
types and objects used for parsing and building CORE API messages.
|
||||
'''
|
||||
|
||||
import struct
|
||||
|
||||
from core.api.data import *
|
||||
from core.misc.ipaddr import *
|
||||
|
||||
|
||||
class CoreTlvData(object):
|
||||
datafmt = None
|
||||
datatype = None
|
||||
padlen = None
|
||||
|
||||
@classmethod
|
||||
def pack(cls, value):
|
||||
"return: (tlvlen, tlvdata)"
|
||||
tmp = struct.pack(cls.datafmt, value)
|
||||
return len(tmp) - cls.padlen, tmp
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
return struct.unpack(cls.datafmt, data)[0]
|
||||
|
||||
@classmethod
|
||||
def packstring(cls, strvalue):
|
||||
return cls.pack(cls.fromstring(strvalue))
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
return cls.datatype(s)
|
||||
|
||||
class CoreTlvDataObj(CoreTlvData):
|
||||
@classmethod
|
||||
def pack(cls, obj):
|
||||
"return: (tlvlen, tlvdata)"
|
||||
tmp = struct.pack(cls.datafmt, cls.getvalue(obj))
|
||||
return len(tmp) - cls.padlen, tmp
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
return cls.newobj(struct.unpack(cls.datafmt, data)[0])
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def newobj(obj):
|
||||
raise NotImplementedError
|
||||
|
||||
class CoreTlvDataUint16(CoreTlvData):
|
||||
datafmt = "!H"
|
||||
datatype = int
|
||||
padlen = 0
|
||||
|
||||
class CoreTlvDataUint32(CoreTlvData):
|
||||
datafmt = "!2xI"
|
||||
datatype = int
|
||||
padlen = 2
|
||||
|
||||
class CoreTlvDataUint64(CoreTlvData):
|
||||
datafmt = "!2xQ"
|
||||
datatype = long
|
||||
padlen = 2
|
||||
|
||||
class CoreTlvDataString(CoreTlvData):
|
||||
datatype = str
|
||||
|
||||
@staticmethod
|
||||
def pack(value):
|
||||
if not isinstance(value, str):
|
||||
raise ValueError, "value not a string: %s" % value
|
||||
if len(value) < 256:
|
||||
hdrsiz = CoreTlv.hdrsiz
|
||||
else:
|
||||
hdrsiz = CoreTlv.longhdrsiz
|
||||
padlen = -(hdrsiz + len(value)) % 4
|
||||
return len(value), value + '\0' * padlen
|
||||
|
||||
@staticmethod
|
||||
def unpack(data):
|
||||
return data.rstrip('\0')
|
||||
|
||||
class CoreTlvDataUint16List(CoreTlvData):
|
||||
''' List of unsigned 16-bit values.
|
||||
'''
|
||||
datatype = tuple
|
||||
|
||||
@staticmethod
|
||||
def pack(values):
|
||||
if not isinstance(values, tuple):
|
||||
raise ValueError, "value not a tuple: %s" % values
|
||||
data = ""
|
||||
for v in values:
|
||||
data += struct.pack("!H", v)
|
||||
padlen = -(CoreTlv.hdrsiz + len(data)) % 4
|
||||
return len(data), data + '\0' * padlen
|
||||
|
||||
@staticmethod
|
||||
def unpack(data):
|
||||
datafmt = "!%dH" % (len(data)/2)
|
||||
return struct.unpack(datafmt, data)
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
return tuple(map(lambda(x): int(x), s.split()))
|
||||
|
||||
class CoreTlvDataIPv4Addr(CoreTlvDataObj):
|
||||
datafmt = "!2x4s"
|
||||
datatype = IPAddr.fromstring
|
||||
padlen = 2
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
return obj.addr
|
||||
|
||||
@staticmethod
|
||||
def newobj(value):
|
||||
return IPAddr(af = AF_INET, addr = value)
|
||||
|
||||
class CoreTlvDataIPv6Addr(CoreTlvDataObj):
|
||||
datafmt = "!16s2x"
|
||||
datatype = IPAddr.fromstring
|
||||
padlen = 2
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
return obj.addr
|
||||
|
||||
@staticmethod
|
||||
def newobj(value):
|
||||
return IPAddr(af = AF_INET6, addr = value)
|
||||
|
||||
class CoreTlvDataMacAddr(CoreTlvDataObj):
|
||||
datafmt = "!2x8s"
|
||||
datatype = MacAddr.fromstring
|
||||
padlen = 2
|
||||
|
||||
@staticmethod
|
||||
def getvalue(obj):
|
||||
return obj.addr
|
||||
|
||||
@staticmethod
|
||||
def newobj(value):
|
||||
return MacAddr(addr = value[2:]) # only use 48 bits
|
||||
|
||||
class CoreTlv(object):
|
||||
hdrfmt = "!BB"
|
||||
hdrsiz = struct.calcsize(hdrfmt)
|
||||
|
||||
longhdrfmt = "!BBH"
|
||||
longhdrsiz = struct.calcsize(longhdrfmt)
|
||||
|
||||
tlvtypemap = {}
|
||||
tlvdataclsmap = {}
|
||||
|
||||
def __init__(self, tlvtype, tlvdata):
|
||||
self.tlvtype = tlvtype
|
||||
if tlvdata:
|
||||
try:
|
||||
self.value = self.tlvdataclsmap[self.tlvtype].unpack(tlvdata)
|
||||
except KeyError:
|
||||
self.value = tlvdata
|
||||
else:
|
||||
self.value = None
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
"parse data and return (tlv, remainingdata)"
|
||||
tlvtype, tlvlen = struct.unpack(cls.hdrfmt, data[:cls.hdrsiz])
|
||||
hdrsiz = cls.hdrsiz
|
||||
if tlvlen == 0:
|
||||
tlvtype, zero, tlvlen = struct.unpack(cls.longhdrfmt,
|
||||
data[:cls.longhdrsiz])
|
||||
hdrsiz = cls.longhdrsiz
|
||||
tlvsiz = hdrsiz + tlvlen
|
||||
tlvsiz += -tlvsiz % 4 # for 32-bit alignment
|
||||
return cls(tlvtype, data[hdrsiz:tlvsiz]), data[tlvsiz:]
|
||||
|
||||
@classmethod
|
||||
def pack(cls, tlvtype, value):
|
||||
try:
|
||||
tlvlen, tlvdata = cls.tlvdataclsmap[tlvtype].pack(value)
|
||||
except Exception, e:
|
||||
raise ValueError, "TLV packing error type=%s: %s" % (tlvtype, e)
|
||||
if tlvlen < 256:
|
||||
hdr = struct.pack(cls.hdrfmt, tlvtype, tlvlen)
|
||||
else:
|
||||
hdr = struct.pack(cls.longhdrfmt, tlvtype, 0, tlvlen)
|
||||
return hdr + tlvdata
|
||||
|
||||
@classmethod
|
||||
def packstring(cls, tlvtype, value):
|
||||
return cls.pack(tlvtype, cls.tlvdataclsmap[tlvtype].fromstring(value))
|
||||
|
||||
def typestr(self):
|
||||
try:
|
||||
return self.tlvtypemap[self.tlvtype]
|
||||
except KeyError:
|
||||
return "unknown tlv type: %s" % str(self.tlvtype)
|
||||
|
||||
def __str__(self):
|
||||
return "%s <tlvtype = %s, value = %s>" % \
|
||||
(self.__class__.__name__, self.typestr(), self.value)
|
||||
|
||||
class CoreNodeTlv(CoreTlv):
|
||||
tlvtypemap = node_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_NODE_NUMBER: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_TYPE: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_NAME: CoreTlvDataString,
|
||||
CORE_TLV_NODE_IPADDR: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_NODE_MACADDR: CoreTlvDataMacAddr,
|
||||
CORE_TLV_NODE_IP6ADDR: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_NODE_MODEL: CoreTlvDataString,
|
||||
CORE_TLV_NODE_EMUSRV: CoreTlvDataString,
|
||||
CORE_TLV_NODE_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_NODE_XPOS: CoreTlvDataUint16,
|
||||
CORE_TLV_NODE_YPOS: CoreTlvDataUint16,
|
||||
CORE_TLV_NODE_CANVAS: CoreTlvDataUint16,
|
||||
CORE_TLV_NODE_EMUID: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_NETID: CoreTlvDataUint32,
|
||||
CORE_TLV_NODE_SERVICES: CoreTlvDataString,
|
||||
CORE_TLV_NODE_LAT: CoreTlvDataString,
|
||||
CORE_TLV_NODE_LONG: CoreTlvDataString,
|
||||
CORE_TLV_NODE_ALT: CoreTlvDataString,
|
||||
CORE_TLV_NODE_ICON: CoreTlvDataString,
|
||||
CORE_TLV_NODE_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreLinkTlv(CoreTlv):
|
||||
tlvtypemap = link_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_LINK_N1NUMBER: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_N2NUMBER: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_DELAY: CoreTlvDataUint64,
|
||||
CORE_TLV_LINK_BW: CoreTlvDataUint64,
|
||||
CORE_TLV_LINK_PER: CoreTlvDataString,
|
||||
CORE_TLV_LINK_DUP: CoreTlvDataString,
|
||||
CORE_TLV_LINK_JITTER: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_MER: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_BURST: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_LINK_MBURST: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_TYPE: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_GUIATTR: CoreTlvDataString,
|
||||
CORE_TLV_LINK_EMUID: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_NETID: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_KEY: CoreTlvDataUint32,
|
||||
CORE_TLV_LINK_IF1NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF1IP4: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_LINK_IF1IP4MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF1MAC: CoreTlvDataMacAddr,
|
||||
CORE_TLV_LINK_IF1IP6: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_LINK_IF1IP6MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF2NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF2IP4: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_LINK_IF2IP4MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_IF2MAC: CoreTlvDataMacAddr,
|
||||
CORE_TLV_LINK_IF2IP6: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_LINK_IF2IP6MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_LINK_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreExecTlv(CoreTlv):
|
||||
tlvtypemap = exec_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_EXEC_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_NUM: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_TIME: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_CMD: CoreTlvDataString,
|
||||
CORE_TLV_EXEC_RESULT: CoreTlvDataString,
|
||||
CORE_TLV_EXEC_STATUS: CoreTlvDataUint32,
|
||||
CORE_TLV_EXEC_SESSION: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreRegTlv(CoreTlv):
|
||||
tlvtypemap = reg_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_REG_WIRELESS: CoreTlvDataString,
|
||||
CORE_TLV_REG_MOBILITY: CoreTlvDataString,
|
||||
CORE_TLV_REG_UTILITY: CoreTlvDataString,
|
||||
CORE_TLV_REG_EXECSRV: CoreTlvDataString,
|
||||
CORE_TLV_REG_GUI: CoreTlvDataString,
|
||||
CORE_TLV_REG_EMULSRV: CoreTlvDataString,
|
||||
CORE_TLV_REG_SESSION: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreConfTlv(CoreTlv):
|
||||
tlvtypemap = conf_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_CONF_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_CONF_OBJ: CoreTlvDataString,
|
||||
CORE_TLV_CONF_TYPE: CoreTlvDataUint16,
|
||||
CORE_TLV_CONF_DATA_TYPES: CoreTlvDataUint16List,
|
||||
CORE_TLV_CONF_VALUES: CoreTlvDataString,
|
||||
CORE_TLV_CONF_CAPTIONS: CoreTlvDataString,
|
||||
CORE_TLV_CONF_BITMAP: CoreTlvDataString,
|
||||
CORE_TLV_CONF_POSSIBLE_VALUES: CoreTlvDataString,
|
||||
CORE_TLV_CONF_GROUPS: CoreTlvDataString,
|
||||
CORE_TLV_CONF_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_CONF_NETID: CoreTlvDataUint32,
|
||||
CORE_TLV_CONF_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreFileTlv(CoreTlv):
|
||||
tlvtypemap = file_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_FILE_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_FILE_NAME: CoreTlvDataString,
|
||||
CORE_TLV_FILE_MODE: CoreTlvDataString,
|
||||
CORE_TLV_FILE_NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_FILE_TYPE: CoreTlvDataString,
|
||||
CORE_TLV_FILE_SRCNAME: CoreTlvDataString,
|
||||
CORE_TLV_FILE_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_FILE_DATA: CoreTlvDataString,
|
||||
CORE_TLV_FILE_CMPDATA: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreIfaceTlv(CoreTlv):
|
||||
tlvtypemap = iface_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_IFACE_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_IFACE_NUM: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_NAME: CoreTlvDataString,
|
||||
CORE_TLV_IFACE_IPADDR: CoreTlvDataIPv4Addr,
|
||||
CORE_TLV_IFACE_MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_MACADDR: CoreTlvDataMacAddr,
|
||||
CORE_TLV_IFACE_IP6ADDR: CoreTlvDataIPv6Addr,
|
||||
CORE_TLV_IFACE_IP6MASK: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_TYPE: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_IFACE_STATE: CoreTlvDataUint16,
|
||||
CORE_TLV_IFACE_EMUID: CoreTlvDataUint32,
|
||||
CORE_TLV_IFACE_NETID: CoreTlvDataUint32,
|
||||
}
|
||||
|
||||
class CoreEventTlv(CoreTlv):
|
||||
tlvtypemap = event_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_EVENT_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_EVENT_TYPE: CoreTlvDataUint32,
|
||||
CORE_TLV_EVENT_NAME: CoreTlvDataString,
|
||||
CORE_TLV_EVENT_DATA: CoreTlvDataString,
|
||||
CORE_TLV_EVENT_TIME: CoreTlvDataString,
|
||||
CORE_TLV_EVENT_SESSION: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreSessionTlv(CoreTlv):
|
||||
tlvtypemap = session_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_SESS_NUMBER: CoreTlvDataString,
|
||||
CORE_TLV_SESS_NAME: CoreTlvDataString,
|
||||
CORE_TLV_SESS_FILE: CoreTlvDataString,
|
||||
CORE_TLV_SESS_NODECOUNT: CoreTlvDataString,
|
||||
CORE_TLV_SESS_DATE: CoreTlvDataString,
|
||||
CORE_TLV_SESS_THUMB: CoreTlvDataString,
|
||||
CORE_TLV_SESS_USER: CoreTlvDataString,
|
||||
CORE_TLV_SESS_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
class CoreExceptionTlv(CoreTlv):
|
||||
tlvtypemap = exception_tlvs
|
||||
tlvdataclsmap = {
|
||||
CORE_TLV_EXCP_NODE: CoreTlvDataUint32,
|
||||
CORE_TLV_EXCP_SESSION: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_LEVEL: CoreTlvDataUint16,
|
||||
CORE_TLV_EXCP_SOURCE: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_DATE: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_TEXT: CoreTlvDataString,
|
||||
CORE_TLV_EXCP_OPAQUE: CoreTlvDataString,
|
||||
}
|
||||
|
||||
|
||||
class CoreMessage(object):
|
||||
hdrfmt = "!BBH"
|
||||
hdrsiz = struct.calcsize(hdrfmt)
|
||||
|
||||
msgtype = None
|
||||
|
||||
flagmap = {}
|
||||
|
||||
tlvcls = CoreTlv
|
||||
|
||||
def __init__(self, flags, hdr, data):
|
||||
self.rawmsg = hdr + data
|
||||
self.flags = flags
|
||||
self.tlvdata = {}
|
||||
self.parsedata(data)
|
||||
|
||||
@classmethod
|
||||
def unpackhdr(cls, data):
|
||||
"parse data and return (msgtype, msgflags, msglen)"
|
||||
msgtype, msgflags, msglen = struct.unpack(cls.hdrfmt, data[:cls.hdrsiz])
|
||||
return msgtype, msgflags, msglen
|
||||
|
||||
@classmethod
|
||||
def pack(cls, msgflags, tlvdata):
|
||||
hdr = struct.pack(cls.hdrfmt, cls.msgtype, msgflags, len(tlvdata))
|
||||
return hdr + tlvdata
|
||||
|
||||
def addtlvdata(self, k, v):
|
||||
if k in self.tlvdata:
|
||||
raise KeyError, "key already exists: %s (val=%s)" % (k, v)
|
||||
self.tlvdata[k] = v
|
||||
|
||||
def gettlv(self, tlvtype):
|
||||
if tlvtype in self.tlvdata:
|
||||
return self.tlvdata[tlvtype]
|
||||
else:
|
||||
return None
|
||||
|
||||
def parsedata(self, data):
|
||||
while data:
|
||||
tlv, data = self.tlvcls.unpack(data)
|
||||
self.addtlvdata(tlv.tlvtype, tlv.value)
|
||||
|
||||
def packtlvdata(self):
|
||||
''' Opposite of parsedata(). Return packed TLV data using
|
||||
self.tlvdata dict. Used by repack().
|
||||
'''
|
||||
tlvdata = ""
|
||||
keys = sorted(self.tlvdata.keys())
|
||||
for k in keys:
|
||||
v = self.tlvdata[k]
|
||||
tlvdata += self.tlvcls.pack(k, v)
|
||||
return tlvdata
|
||||
|
||||
def repack(self):
|
||||
''' Invoke after updating self.tlvdata[] to rebuild self.rawmsg.
|
||||
Useful for modifying a message that has been parsed, before
|
||||
sending the raw data again.
|
||||
'''
|
||||
tlvdata = self.packtlvdata()
|
||||
self.rawmsg = self.pack(self.flags, tlvdata)
|
||||
|
||||
def typestr(self):
|
||||
try:
|
||||
return message_types[self.msgtype]
|
||||
except KeyError:
|
||||
return "unknown message type: %s" % str(self.msgtype)
|
||||
|
||||
def flagstr(self):
|
||||
msgflags = []
|
||||
flag = 1L
|
||||
while True:
|
||||
if (self.flags & flag):
|
||||
try:
|
||||
msgflags.append(self.flagmap[flag])
|
||||
except KeyError:
|
||||
msgflags.append("0x%x" % flag)
|
||||
flag <<= 1
|
||||
if not (self.flags & ~(flag - 1)):
|
||||
break
|
||||
return "0x%x <%s>" % (self.flags, " | ".join(msgflags))
|
||||
|
||||
def __str__(self):
|
||||
tmp = "%s <msgtype = %s, flags = %s>" % \
|
||||
(self.__class__.__name__, self.typestr(), self.flagstr())
|
||||
for k, v in self.tlvdata.iteritems():
|
||||
if k in self.tlvcls.tlvtypemap:
|
||||
tlvtype = self.tlvcls.tlvtypemap[k]
|
||||
else:
|
||||
tlvtype = "tlv type %s" % k
|
||||
tmp += "\n %s: %s" % (tlvtype, v)
|
||||
return tmp
|
||||
|
||||
def nodenumbers(self):
|
||||
''' Return a list of node numbers included in this message.
|
||||
'''
|
||||
n = None
|
||||
n2 = None
|
||||
# not all messages have node numbers
|
||||
if self.msgtype == CORE_API_NODE_MSG:
|
||||
n = self.gettlv(CORE_TLV_NODE_NUMBER)
|
||||
elif self.msgtype == CORE_API_LINK_MSG:
|
||||
n = self.gettlv(CORE_TLV_LINK_N1NUMBER)
|
||||
n2 = self.gettlv(CORE_TLV_LINK_N2NUMBER)
|
||||
elif self.msgtype == CORE_API_EXEC_MSG:
|
||||
n = self.gettlv(CORE_TLV_EXEC_NODE)
|
||||
elif self.msgtype == CORE_API_CONF_MSG:
|
||||
n = self.gettlv(CORE_TLV_CONF_NODE)
|
||||
elif self.msgtype == CORE_API_FILE_MSG:
|
||||
n = self.gettlv(CORE_TLV_FILE_NODE)
|
||||
elif self.msgtype == CORE_API_IFACE_MSG:
|
||||
n = self.gettlv(CORE_TLV_IFACE_NODE)
|
||||
elif self.msgtype == CORE_API_EVENT_MSG:
|
||||
n = self.gettlv(CORE_TLV_EVENT_NODE)
|
||||
r = []
|
||||
if n is not None:
|
||||
r.append(n)
|
||||
if n2 is not None:
|
||||
r.append(n2)
|
||||
return r
|
||||
|
||||
def sessionnumbers(self):
|
||||
''' Return a list of session numbers included in this message.
|
||||
'''
|
||||
r = []
|
||||
if self.msgtype == CORE_API_SESS_MSG:
|
||||
s = self.gettlv(CORE_TLV_SESS_NUMBER)
|
||||
elif self.msgtype == CORE_API_EXCP_MSG:
|
||||
s = self.gettlv(CORE_TLV_EXCP_SESSION)
|
||||
else:
|
||||
# All other messages share TLV number 0xA for the session number(s).
|
||||
s = self.gettlv(CORE_TLV_NODE_SESSION)
|
||||
if s is not None:
|
||||
for sid in s.split('|'):
|
||||
r.append(int(sid))
|
||||
return r
|
||||
|
||||
|
||||
class CoreNodeMessage(CoreMessage):
|
||||
msgtype = CORE_API_NODE_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreNodeTlv
|
||||
|
||||
class CoreLinkMessage(CoreMessage):
|
||||
msgtype = CORE_API_LINK_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreLinkTlv
|
||||
|
||||
class CoreExecMessage(CoreMessage):
|
||||
msgtype = CORE_API_EXEC_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreExecTlv
|
||||
|
||||
class CoreRegMessage(CoreMessage):
|
||||
msgtype = CORE_API_REG_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreRegTlv
|
||||
|
||||
class CoreConfMessage(CoreMessage):
|
||||
msgtype = CORE_API_CONF_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreConfTlv
|
||||
|
||||
class CoreFileMessage(CoreMessage):
|
||||
msgtype = CORE_API_FILE_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreFileTlv
|
||||
|
||||
class CoreIfaceMessage(CoreMessage):
|
||||
msgtype = CORE_API_IFACE_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreIfaceTlv
|
||||
|
||||
class CoreEventMessage(CoreMessage):
|
||||
msgtype = CORE_API_EVENT_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreEventTlv
|
||||
|
||||
class CoreSessionMessage(CoreMessage):
|
||||
msgtype = CORE_API_SESS_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreSessionTlv
|
||||
|
||||
class CoreExceptionMessage(CoreMessage):
|
||||
msgtype = CORE_API_EXCP_MSG
|
||||
flagmap = message_flags
|
||||
tlvcls = CoreExceptionTlv
|
||||
|
||||
msgclsmap = {
|
||||
CORE_API_NODE_MSG: CoreNodeMessage,
|
||||
CORE_API_LINK_MSG: CoreLinkMessage,
|
||||
CORE_API_EXEC_MSG: CoreExecMessage,
|
||||
CORE_API_REG_MSG: CoreRegMessage,
|
||||
CORE_API_CONF_MSG: CoreConfMessage,
|
||||
CORE_API_FILE_MSG: CoreFileMessage,
|
||||
CORE_API_IFACE_MSG: CoreIfaceMessage,
|
||||
CORE_API_EVENT_MSG: CoreEventMessage,
|
||||
CORE_API_SESS_MSG: CoreSessionMessage,
|
||||
CORE_API_EXCP_MSG: CoreExceptionMessage,
|
||||
}
|
||||
|
||||
def msg_class(msgtypeid):
|
||||
global msgclsmap
|
||||
return msgclsmap[msgtypeid]
|
||||
|
||||
nodeclsmap = {}
|
||||
|
||||
def add_node_class(name, nodetypeid, nodecls, change = False):
|
||||
global nodeclsmap
|
||||
if nodetypeid in nodeclsmap:
|
||||
if not change:
|
||||
raise ValueError, \
|
||||
"node class already exists for nodetypeid %s" % nodetypeid
|
||||
nodeclsmap[nodetypeid] = nodecls
|
||||
if nodetypeid not in node_types:
|
||||
node_types[nodetypeid] = name
|
||||
exec "%s = %s" % (name, nodetypeid) in globals()
|
||||
elif name != node_types[nodetypeid]:
|
||||
raise ValueError, "node type already exists for '%s'" % name
|
||||
else:
|
||||
pass
|
||||
|
||||
def change_node_class(name, nodetypeid, nodecls):
|
||||
return add_node_class(name, nodetypeid, nodecls, change = True)
|
||||
|
||||
def node_class(nodetypeid):
|
||||
global nodeclsmap
|
||||
return nodeclsmap[nodetypeid]
|
||||
|
||||
def str_to_list(s):
|
||||
''' Helper to convert pipe-delimited string ("a|b|c") into a list (a, b, c)
|
||||
'''
|
||||
if s is None:
|
||||
return None
|
||||
return s.split("|")
|
||||
|
||||
def state_name(n):
|
||||
''' Helper to convert state number into state name using event types.
|
||||
'''
|
||||
if n in event_types:
|
||||
eventname = event_types[n]
|
||||
name = eventname.split('_')[2]
|
||||
else:
|
||||
name = "unknown"
|
||||
return name
|
327
daemon/core/api/data.py
Normal file
327
daemon/core/api/data.py
Normal file
|
@ -0,0 +1,327 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
data.py: constant definitions for the CORE API, enumerating the
|
||||
different message and TLV types (these constants are also found in coreapi.h)
|
||||
'''
|
||||
|
||||
def enumdict(d):
|
||||
for k, v in d.iteritems():
|
||||
exec "%s = %s" % (v, k) in globals()
|
||||
|
||||
# Constants
|
||||
|
||||
CORE_API_VER = "1.21"
|
||||
CORE_API_PORT = 4038
|
||||
|
||||
# Message types
|
||||
|
||||
message_types = {
|
||||
0x01: "CORE_API_NODE_MSG",
|
||||
0x02: "CORE_API_LINK_MSG",
|
||||
0x03: "CORE_API_EXEC_MSG",
|
||||
0x04: "CORE_API_REG_MSG",
|
||||
0x05: "CORE_API_CONF_MSG",
|
||||
0x06: "CORE_API_FILE_MSG",
|
||||
0x07: "CORE_API_IFACE_MSG",
|
||||
0x08: "CORE_API_EVENT_MSG",
|
||||
0x09: "CORE_API_SESS_MSG",
|
||||
0x0A: "CORE_API_EXCP_MSG",
|
||||
0x0B: "CORE_API_MSG_MAX",
|
||||
}
|
||||
|
||||
enumdict(message_types)
|
||||
|
||||
# Generic Message Flags
|
||||
|
||||
message_flags = {
|
||||
0x01: "CORE_API_ADD_FLAG",
|
||||
0x02: "CORE_API_DEL_FLAG",
|
||||
0x04: "CORE_API_CRI_FLAG",
|
||||
0x08: "CORE_API_LOC_FLAG",
|
||||
0x10: "CORE_API_STR_FLAG",
|
||||
0x20: "CORE_API_TXT_FLAG",
|
||||
0x40: "CORE_API_TTY_FLAG",
|
||||
}
|
||||
|
||||
enumdict(message_flags)
|
||||
|
||||
# Node Message TLV Types
|
||||
|
||||
node_tlvs = {
|
||||
0x01: "CORE_TLV_NODE_NUMBER",
|
||||
0x02: "CORE_TLV_NODE_TYPE",
|
||||
0x03: "CORE_TLV_NODE_NAME",
|
||||
0x04: "CORE_TLV_NODE_IPADDR",
|
||||
0x05: "CORE_TLV_NODE_MACADDR",
|
||||
0x06: "CORE_TLV_NODE_IP6ADDR",
|
||||
0x07: "CORE_TLV_NODE_MODEL",
|
||||
0x08: "CORE_TLV_NODE_EMUSRV",
|
||||
0x0A: "CORE_TLV_NODE_SESSION",
|
||||
0x20: "CORE_TLV_NODE_XPOS",
|
||||
0x21: "CORE_TLV_NODE_YPOS",
|
||||
0x22: "CORE_TLV_NODE_CANVAS",
|
||||
0x23: "CORE_TLV_NODE_EMUID",
|
||||
0x24: "CORE_TLV_NODE_NETID",
|
||||
0x25: "CORE_TLV_NODE_SERVICES",
|
||||
0x30: "CORE_TLV_NODE_LAT",
|
||||
0x31: "CORE_TLV_NODE_LONG",
|
||||
0x32: "CORE_TLV_NODE_ALT",
|
||||
0x42: "CORE_TLV_NODE_ICON",
|
||||
0x50: "CORE_TLV_NODE_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(node_tlvs)
|
||||
|
||||
node_types = dict(enumerate([
|
||||
"CORE_NODE_DEF",
|
||||
"CORE_NODE_PHYS",
|
||||
"CORE_NODE_XEN",
|
||||
"CORE_NODE_TBD",
|
||||
"CORE_NODE_SWITCH",
|
||||
"CORE_NODE_HUB",
|
||||
"CORE_NODE_WLAN",
|
||||
"CORE_NODE_RJ45",
|
||||
"CORE_NODE_TUNNEL",
|
||||
"CORE_NODE_KTUNNEL",
|
||||
"CORE_NODE_EMANE",
|
||||
]))
|
||||
|
||||
enumdict(node_types)
|
||||
|
||||
rj45_models = dict(enumerate([
|
||||
"RJ45_MODEL_LINKED",
|
||||
"RJ45_MODEL_WIRELESS",
|
||||
"RJ45_MODEL_INSTALLED",
|
||||
]))
|
||||
|
||||
enumdict(rj45_models)
|
||||
|
||||
# Link Message TLV Types
|
||||
|
||||
link_tlvs = {
|
||||
0x01: "CORE_TLV_LINK_N1NUMBER",
|
||||
0x02: "CORE_TLV_LINK_N2NUMBER",
|
||||
0x03: "CORE_TLV_LINK_DELAY",
|
||||
0x04: "CORE_TLV_LINK_BW",
|
||||
0x05: "CORE_TLV_LINK_PER",
|
||||
0x06: "CORE_TLV_LINK_DUP",
|
||||
0x07: "CORE_TLV_LINK_JITTER",
|
||||
0x08: "CORE_TLV_LINK_MER",
|
||||
0x09: "CORE_TLV_LINK_BURST",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_LINK_SESSION",
|
||||
0x10: "CORE_TLV_LINK_MBURST",
|
||||
0x20: "CORE_TLV_LINK_TYPE",
|
||||
0x21: "CORE_TLV_LINK_GUIATTR",
|
||||
0x23: "CORE_TLV_LINK_EMUID",
|
||||
0x24: "CORE_TLV_LINK_NETID",
|
||||
0x25: "CORE_TLV_LINK_KEY",
|
||||
0x30: "CORE_TLV_LINK_IF1NUM",
|
||||
0x31: "CORE_TLV_LINK_IF1IP4",
|
||||
0x32: "CORE_TLV_LINK_IF1IP4MASK",
|
||||
0x33: "CORE_TLV_LINK_IF1MAC",
|
||||
0x34: "CORE_TLV_LINK_IF1IP6",
|
||||
0x35: "CORE_TLV_LINK_IF1IP6MASK",
|
||||
0x36: "CORE_TLV_LINK_IF2NUM",
|
||||
0x37: "CORE_TLV_LINK_IF2IP4",
|
||||
0x38: "CORE_TLV_LINK_IF2IP4MASK",
|
||||
0x39: "CORE_TLV_LINK_IF2MAC",
|
||||
0x40: "CORE_TLV_LINK_IF2IP6",
|
||||
0x41: "CORE_TLV_LINK_IF2IP6MASK",
|
||||
0x50: "CORE_TLV_LINK_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(link_tlvs)
|
||||
|
||||
link_types = dict(enumerate([
|
||||
"CORE_LINK_WIRELESS",
|
||||
"CORE_LINK_WIRED",
|
||||
]))
|
||||
|
||||
enumdict(link_types)
|
||||
|
||||
# Execute Message TLV Types
|
||||
|
||||
exec_tlvs = {
|
||||
0x01: "CORE_TLV_EXEC_NODE",
|
||||
0x02: "CORE_TLV_EXEC_NUM",
|
||||
0x03: "CORE_TLV_EXEC_TIME",
|
||||
0x04: "CORE_TLV_EXEC_CMD",
|
||||
0x05: "CORE_TLV_EXEC_RESULT",
|
||||
0x06: "CORE_TLV_EXEC_STATUS",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_EXEC_SESSION",
|
||||
}
|
||||
|
||||
enumdict(exec_tlvs)
|
||||
|
||||
# Register Message TLV Types
|
||||
|
||||
reg_tlvs = {
|
||||
0x01: "CORE_TLV_REG_WIRELESS",
|
||||
0x02: "CORE_TLV_REG_MOBILITY",
|
||||
0x03: "CORE_TLV_REG_UTILITY",
|
||||
0x04: "CORE_TLV_REG_EXECSRV",
|
||||
0x05: "CORE_TLV_REG_GUI",
|
||||
0x06: "CORE_TLV_REG_EMULSRV",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_REG_SESSION",
|
||||
}
|
||||
|
||||
enumdict(reg_tlvs)
|
||||
|
||||
# Configuration Message TLV Types
|
||||
|
||||
conf_tlvs = {
|
||||
0x01: "CORE_TLV_CONF_NODE",
|
||||
0x02: "CORE_TLV_CONF_OBJ",
|
||||
0x03: "CORE_TLV_CONF_TYPE",
|
||||
0x04: "CORE_TLV_CONF_DATA_TYPES",
|
||||
0x05: "CORE_TLV_CONF_VALUES",
|
||||
0x06: "CORE_TLV_CONF_CAPTIONS",
|
||||
0x07: "CORE_TLV_CONF_BITMAP",
|
||||
0x08: "CORE_TLV_CONF_POSSIBLE_VALUES",
|
||||
0x09: "CORE_TLV_CONF_GROUPS",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_CONF_SESSION",
|
||||
CORE_TLV_NODE_NETID: "CORE_TLV_CONF_NETID",
|
||||
0x50: "CORE_TLV_CONF_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(conf_tlvs)
|
||||
|
||||
conf_flags = {
|
||||
0x00: "CONF_TYPE_FLAGS_NONE",
|
||||
0x01: "CONF_TYPE_FLAGS_REQUEST",
|
||||
0x02: "CONF_TYPE_FLAGS_UPDATE",
|
||||
0x03: "CONF_TYPE_FLAGS_RESET",
|
||||
}
|
||||
|
||||
enumdict(conf_flags)
|
||||
|
||||
conf_data_types = {
|
||||
0x01: "CONF_DATA_TYPE_UINT8",
|
||||
0x02: "CONF_DATA_TYPE_UINT16",
|
||||
0x03: "CONF_DATA_TYPE_UINT32",
|
||||
0x04: "CONF_DATA_TYPE_UINT64",
|
||||
0x05: "CONF_DATA_TYPE_INT8",
|
||||
0x06: "CONF_DATA_TYPE_INT16",
|
||||
0x07: "CONF_DATA_TYPE_INT32",
|
||||
0x08: "CONF_DATA_TYPE_INT64",
|
||||
0x09: "CONF_DATA_TYPE_FLOAT",
|
||||
0x0A: "CONF_DATA_TYPE_STRING",
|
||||
0x0B: "CONF_DATA_TYPE_BOOL",
|
||||
}
|
||||
|
||||
enumdict(conf_data_types)
|
||||
|
||||
# File Message TLV Types
|
||||
|
||||
file_tlvs = {
|
||||
0x01: "CORE_TLV_FILE_NODE",
|
||||
0x02: "CORE_TLV_FILE_NAME",
|
||||
0x03: "CORE_TLV_FILE_MODE",
|
||||
0x04: "CORE_TLV_FILE_NUM",
|
||||
0x05: "CORE_TLV_FILE_TYPE",
|
||||
0x06: "CORE_TLV_FILE_SRCNAME",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_FILE_SESSION",
|
||||
0x10: "CORE_TLV_FILE_DATA",
|
||||
0x11: "CORE_TLV_FILE_CMPDATA",
|
||||
}
|
||||
|
||||
enumdict(file_tlvs)
|
||||
|
||||
# Interface Message TLV Types
|
||||
|
||||
iface_tlvs = {
|
||||
0x01: "CORE_TLV_IFACE_NODE",
|
||||
0x02: "CORE_TLV_IFACE_NUM",
|
||||
0x03: "CORE_TLV_IFACE_NAME",
|
||||
0x04: "CORE_TLV_IFACE_IPADDR",
|
||||
0x05: "CORE_TLV_IFACE_MASK",
|
||||
0x06: "CORE_TLV_IFACE_MACADDR",
|
||||
0x07: "CORE_TLV_IFACE_IP6ADDR",
|
||||
0x08: "CORE_TLV_IFACE_IP6MASK",
|
||||
0x09: "CORE_TLV_IFACE_TYPE",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_IFACE_SESSION",
|
||||
0x0B: "CORE_TLV_IFACE_STATE",
|
||||
CORE_TLV_NODE_EMUID: "CORE_TLV_IFACE_EMUID",
|
||||
CORE_TLV_NODE_NETID: "CORE_TLV_IFACE_NETID",
|
||||
}
|
||||
|
||||
enumdict(iface_tlvs)
|
||||
|
||||
# Event Message TLV Types
|
||||
|
||||
event_tlvs = {
|
||||
0x01: "CORE_TLV_EVENT_NODE",
|
||||
0x02: "CORE_TLV_EVENT_TYPE",
|
||||
0x03: "CORE_TLV_EVENT_NAME",
|
||||
0x04: "CORE_TLV_EVENT_DATA",
|
||||
0x05: "CORE_TLV_EVENT_TIME",
|
||||
CORE_TLV_NODE_SESSION: "CORE_TLV_EVENT_SESSION",
|
||||
}
|
||||
|
||||
enumdict(event_tlvs)
|
||||
|
||||
event_types = dict(enumerate([
|
||||
"CORE_EVENT_NONE",
|
||||
"CORE_EVENT_DEFINITION_STATE",
|
||||
"CORE_EVENT_CONFIGURATION_STATE",
|
||||
"CORE_EVENT_INSTANTIATION_STATE",
|
||||
"CORE_EVENT_RUNTIME_STATE",
|
||||
"CORE_EVENT_DATACOLLECT_STATE",
|
||||
"CORE_EVENT_SHUTDOWN_STATE",
|
||||
"CORE_EVENT_START",
|
||||
"CORE_EVENT_STOP",
|
||||
"CORE_EVENT_PAUSE",
|
||||
"CORE_EVENT_RESTART",
|
||||
"CORE_EVENT_FILE_OPEN",
|
||||
"CORE_EVENT_FILE_SAVE",
|
||||
"CORE_EVENT_SCHEDULED",
|
||||
]))
|
||||
|
||||
enumdict(event_types)
|
||||
|
||||
# Session Message TLV Types
|
||||
|
||||
session_tlvs = {
|
||||
0x01: "CORE_TLV_SESS_NUMBER",
|
||||
0x02: "CORE_TLV_SESS_NAME",
|
||||
0x03: "CORE_TLV_SESS_FILE",
|
||||
0x04: "CORE_TLV_SESS_NODECOUNT",
|
||||
0x05: "CORE_TLV_SESS_DATE",
|
||||
0x06: "CORE_TLV_SESS_THUMB",
|
||||
0x07: "CORE_TLV_SESS_USER",
|
||||
0x0A: "CORE_TLV_SESS_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(session_tlvs)
|
||||
|
||||
# Exception Message TLV Types
|
||||
|
||||
exception_tlvs = {
|
||||
0x01: "CORE_TLV_EXCP_NODE",
|
||||
0x02: "CORE_TLV_EXCP_SESSION",
|
||||
0x03: "CORE_TLV_EXCP_LEVEL",
|
||||
0x04: "CORE_TLV_EXCP_SOURCE",
|
||||
0x05: "CORE_TLV_EXCP_DATE",
|
||||
0x06: "CORE_TLV_EXCP_TEXT",
|
||||
0x0A: "CORE_TLV_EXCP_OPAQUE",
|
||||
}
|
||||
|
||||
enumdict(exception_tlvs)
|
||||
|
||||
exception_levels = dict(enumerate([
|
||||
"CORE_EXCP_LEVEL_NONE",
|
||||
"CORE_EXCP_LEVEL_FATAL",
|
||||
"CORE_EXCP_LEVEL_ERROR",
|
||||
"CORE_EXCP_LEVEL_WARNING",
|
||||
"CORE_EXCP_LEVEL_NOTICE",
|
||||
]))
|
||||
|
||||
enumdict(exception_levels)
|
||||
|
||||
del enumdict
|
858
daemon/core/broker.py
Normal file
858
daemon/core/broker.py
Normal file
|
@ -0,0 +1,858 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
broker.py: definition of CoreBroker class that is part of the
|
||||
pycore session object. Handles distributing parts of the emulation out to
|
||||
other emulation servers. The broker is consulted during the
|
||||
CoreRequestHandler.handlemsg() loop to determine if messages should be handled
|
||||
locally or forwarded on to another emulation server.
|
||||
'''
|
||||
|
||||
import os, socket, select, threading, sys
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode, PyCoreNet
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.phys.pnodes import PhysicalNode
|
||||
from core.misc.ipaddr import IPAddr
|
||||
from core.conf import ConfigurableManager
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns.vif import GreTap
|
||||
from core.netns.vnet import GreTapBridge
|
||||
|
||||
|
||||
class CoreBroker(ConfigurableManager):
|
||||
''' Member of pycore session class for handling global emulation server
|
||||
data.
|
||||
'''
|
||||
_name = "broker"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session, verbose = False):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.session_id_master = None
|
||||
self.myip = None
|
||||
self.verbose = verbose
|
||||
# dict containing tuples of (host, port, sock)
|
||||
self.servers = {}
|
||||
self.servers_lock = threading.Lock()
|
||||
self.addserver("localhost", None, None)
|
||||
# dict containing node number to server name mapping
|
||||
self.nodemap = {}
|
||||
# this lock also protects self.nodecounts
|
||||
self.nodemap_lock = threading.Lock()
|
||||
# reference counts of nodes on servers
|
||||
self.nodecounts = { }
|
||||
self.bootcount = 0
|
||||
# list of node numbers that are link-layer nodes (networks)
|
||||
self.nets = []
|
||||
# list of node numbers that are PhysicalNode nodes
|
||||
self.phys = []
|
||||
# allows for other message handlers to process API messages (e.g. EMANE)
|
||||
self.handlers = ()
|
||||
# dict with tunnel key to tunnel device mapping
|
||||
self.tunnels = {}
|
||||
self.dorecvloop = False
|
||||
self.recvthread = None
|
||||
|
||||
def startup(self):
|
||||
''' Build tunnels between network-layer nodes now that all node
|
||||
and link information has been received; called when session
|
||||
enters the instantation state.
|
||||
'''
|
||||
self.addnettunnels()
|
||||
self.writeservers()
|
||||
|
||||
def shutdown(self):
|
||||
''' Close all active sockets; called when the session enters the
|
||||
data collect state
|
||||
'''
|
||||
with self.servers_lock:
|
||||
while len(self.servers) > 0:
|
||||
(server, v) = self.servers.popitem()
|
||||
(host, port, sock) = v
|
||||
if sock is None:
|
||||
continue
|
||||
if self.verbose:
|
||||
self.session.info("closing connection with %s @ %s:%s" % \
|
||||
(server, host, port))
|
||||
sock.close()
|
||||
self.reset()
|
||||
self.dorecvloop = False
|
||||
if self.recvthread is not None:
|
||||
self.recvthread.join()
|
||||
|
||||
def reset(self):
|
||||
''' Reset to initial state.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
self.nodemap.clear()
|
||||
for server in self.nodecounts:
|
||||
if self.nodecounts[server] < 1:
|
||||
self.delserver(server)
|
||||
self.nodecounts.clear()
|
||||
self.bootcount = 0
|
||||
self.nodemap_lock.release()
|
||||
del self.nets[:]
|
||||
del self.phys[:]
|
||||
while len(self.tunnels) > 0:
|
||||
(key, gt) = self.tunnels.popitem()
|
||||
gt.shutdown()
|
||||
|
||||
def startrecvloop(self):
|
||||
''' Spawn the recvloop() thread if it hasn't been already started.
|
||||
'''
|
||||
if self.recvthread is not None:
|
||||
if self.recvthread.isAlive():
|
||||
return
|
||||
else:
|
||||
self.recvthread.join()
|
||||
# start reading data from connected sockets
|
||||
self.dorecvloop = True
|
||||
self.recvthread = threading.Thread(target = self.recvloop)
|
||||
self.recvthread.daemon = True
|
||||
self.recvthread.start()
|
||||
|
||||
def recvloop(self):
|
||||
''' Thread target that receives messages from server sockets.
|
||||
'''
|
||||
self.dorecvloop = True
|
||||
# note: this loop continues after emulation is stopped,
|
||||
# even with 0 servers
|
||||
while self.dorecvloop:
|
||||
rlist = []
|
||||
with self.servers_lock:
|
||||
# build a socket list for select call
|
||||
for name in self.servers:
|
||||
(h, p, sock) = self.servers[name]
|
||||
if sock is not None:
|
||||
rlist.append(sock.fileno())
|
||||
r, w, x = select.select(rlist, [], [], 1.0)
|
||||
for sockfd in r:
|
||||
try:
|
||||
(h, p, sock, name) = self.getserverbysock(sockfd)
|
||||
except KeyError:
|
||||
# servers may have changed; loop again
|
||||
break
|
||||
rcvlen = self.recv(sock, h)
|
||||
if rcvlen == 0:
|
||||
if self.verbose:
|
||||
self.session.info("connection with %s @ %s:%s" \
|
||||
" has closed" % (name, h, p))
|
||||
self.servers[name] = (h, p, None)
|
||||
|
||||
|
||||
def recv(self, sock, host):
|
||||
''' Receive data on an emulation server socket and broadcast it to
|
||||
all connected session handlers. Returns the length of data recevied
|
||||
and forwarded. Return value of zero indicates the socket has closed
|
||||
and should be removed from the self.servers dict.
|
||||
'''
|
||||
msghdr = sock.recv(coreapi.CoreMessage.hdrsiz)
|
||||
if len(msghdr) == 0:
|
||||
# server disconnected
|
||||
sock.close()
|
||||
return 0
|
||||
if len(msghdr) != coreapi.CoreMessage.hdrsiz:
|
||||
if self.verbose:
|
||||
self.session.info("warning: broker received not enough data " \
|
||||
"len=%s" % len(msghdr))
|
||||
return len(msghdr)
|
||||
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr)
|
||||
msgdata = sock.recv(msglen)
|
||||
data = msghdr + msgdata
|
||||
count = None
|
||||
# snoop exec response for remote interactive TTYs
|
||||
if msgtype == coreapi.CORE_API_EXEC_MSG and \
|
||||
msgflags & coreapi.CORE_API_TTY_FLAG:
|
||||
data = self.fixupremotetty(msghdr, msgdata, host)
|
||||
elif msgtype == coreapi.CORE_API_NODE_MSG:
|
||||
# snoop node delete response to decrement node counts
|
||||
if msgflags & coreapi.CORE_API_DEL_FLAG:
|
||||
msg = coreapi.CoreNodeMessage(msgflags, msghdr, msgdata)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER)
|
||||
if nodenum is not None:
|
||||
count = self.delnodemap(sock, nodenum)
|
||||
# snoop node add response to increment booted node count
|
||||
# (only CoreNodes send these response messages)
|
||||
elif msgflags & \
|
||||
(coreapi.CORE_API_ADD_FLAG | coreapi.CORE_API_LOC_FLAG):
|
||||
self.incrbootcount()
|
||||
self.session.checkruntime()
|
||||
|
||||
self.session.broadcastraw(None, data)
|
||||
if count is not None and count < 1:
|
||||
return 0
|
||||
else:
|
||||
return len(data)
|
||||
|
||||
def addserver(self, name, host, port):
|
||||
''' Add a new server, and try to connect to it. If we're already
|
||||
connected to this (host, port), then leave it alone. When host,port
|
||||
is None, do not try to connect.
|
||||
'''
|
||||
self.servers_lock.acquire()
|
||||
if name in self.servers:
|
||||
(oldhost, oldport, sock) = self.servers[name]
|
||||
if host == oldhost or port == oldport:
|
||||
# leave this socket connected
|
||||
if sock is not None:
|
||||
self.servers_lock.release()
|
||||
return
|
||||
if self.verbose and host is not None and sock is not None:
|
||||
self.session.info("closing connection with %s @ %s:%s" % \
|
||||
(name, host, port))
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
self.servers_lock.release()
|
||||
if self.verbose and host is not None:
|
||||
self.session.info("adding server %s @ %s:%s" % (name, host, port))
|
||||
if host is None:
|
||||
sock = None
|
||||
else:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
#sock.setblocking(0)
|
||||
#error = sock.connect_ex((host, port))
|
||||
try:
|
||||
sock.connect((host, port))
|
||||
self.startrecvloop()
|
||||
except Exception, e:
|
||||
self.session.warn("error connecting to server %s:%s:\n\t%s" % \
|
||||
(host, port, e))
|
||||
sock.close()
|
||||
sock = None
|
||||
self.servers_lock.acquire()
|
||||
self.servers[name] = (host, port, sock)
|
||||
self.servers_lock.release()
|
||||
|
||||
def delserver(self, name):
|
||||
''' Remove a server and hang up any connection.
|
||||
'''
|
||||
self.servers_lock.acquire()
|
||||
if name not in self.servers:
|
||||
self.servers_lock.release()
|
||||
return
|
||||
(host, port, sock) = self.servers.pop(name)
|
||||
if sock is not None:
|
||||
if self.verbose:
|
||||
self.session.info("closing connection with %s @ %s:%s" % \
|
||||
(name, host, port))
|
||||
sock.close()
|
||||
self.servers_lock.release()
|
||||
|
||||
def getserver(self, name):
|
||||
''' Return the (host, port, sock) tuple, or raise a KeyError exception.
|
||||
'''
|
||||
if name not in self.servers:
|
||||
raise KeyError, "emulation server %s not found" % name
|
||||
return self.servers[name]
|
||||
|
||||
def getserverbysock(self, sockfd):
|
||||
''' Return a (host, port, sock, name) tuple based on socket file
|
||||
descriptor, or raise a KeyError exception.
|
||||
'''
|
||||
with self.servers_lock:
|
||||
for name in self.servers:
|
||||
(host, port, sock) = self.servers[name]
|
||||
if sock is None:
|
||||
continue
|
||||
if sock.fileno() == sockfd:
|
||||
return (host, port, sock, name)
|
||||
raise KeyError, "socket fd %s not found" % sockfd
|
||||
|
||||
def getserverlist(self):
|
||||
''' Return the list of server names (keys from self.servers).
|
||||
'''
|
||||
with self.servers_lock:
|
||||
serverlist = sorted(self.servers.keys())
|
||||
return serverlist
|
||||
|
||||
def tunnelkey(self, n1num, n2num):
|
||||
''' Compute a 32-bit key used to uniquely identify a GRE tunnel.
|
||||
The hash(n1num), hash(n2num) values are used, so node numbers may be
|
||||
None or string values (used for e.g. "ctrlnet").
|
||||
'''
|
||||
sid = self.session_id_master
|
||||
if sid is None:
|
||||
# this is the master session
|
||||
sid = self.session.sessionid
|
||||
|
||||
key = (sid << 16) | hash(n1num) | (hash(n2num) << 8)
|
||||
return key & 0xFFFFFFFF
|
||||
|
||||
def addtunnel(self, remoteip, n1num, n2num, localnum):
|
||||
''' Add a new GreTapBridge between nodes on two different machines.
|
||||
'''
|
||||
key = self.tunnelkey(n1num, n2num)
|
||||
if localnum == n2num:
|
||||
remotenum = n1num
|
||||
else:
|
||||
remotenum = n2num
|
||||
if key in self.tunnels.keys():
|
||||
self.session.warn("tunnel with key %s (%s-%s) already exists!" % \
|
||||
(key, n1num, n2num))
|
||||
else:
|
||||
objid = key & ((1<<16)-1)
|
||||
self.session.info("Adding tunnel for %s-%s to %s with key %s" % \
|
||||
(n1num, n2num, remoteip, key))
|
||||
if localnum in self.phys:
|
||||
# no bridge is needed on physical nodes; use the GreTap directly
|
||||
gt = GreTap(node=None, name=None, session=self.session,
|
||||
remoteip=remoteip, key=key)
|
||||
else:
|
||||
gt = self.session.addobj(cls = GreTapBridge, objid = objid,
|
||||
policy="ACCEPT", remoteip=remoteip, key = key)
|
||||
gt.localnum = localnum
|
||||
gt.remotenum = remotenum
|
||||
self.tunnels[key] = gt
|
||||
|
||||
def addnettunnels(self):
|
||||
''' Add GreTaps between network devices on different machines.
|
||||
The GreTapBridge is not used since that would add an extra bridge.
|
||||
'''
|
||||
for n in self.nets:
|
||||
self.addnettunnel(n)
|
||||
|
||||
def addnettunnel(self, n):
|
||||
try:
|
||||
net = self.session.obj(n)
|
||||
except KeyError:
|
||||
raise KeyError, "network node %s not found" % n
|
||||
# add other nets here that do not require tunnels
|
||||
if isinstance(net, EmaneNet):
|
||||
return None
|
||||
|
||||
servers = self.getserversbynode(n)
|
||||
if len(servers) < 2:
|
||||
return None
|
||||
hosts = []
|
||||
for server in servers:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is None:
|
||||
continue
|
||||
hosts.append(host)
|
||||
if len(hosts) == 0:
|
||||
# get IP address from API message sender (master)
|
||||
self.session._handlerslock.acquire()
|
||||
for h in self.session._handlers:
|
||||
if h.client_address != "":
|
||||
hosts.append(h.client_address[0])
|
||||
self.session._handlerslock.release()
|
||||
|
||||
r = []
|
||||
for host in hosts:
|
||||
if self.myip:
|
||||
# we are the remote emulation server
|
||||
myip = self.myip
|
||||
else:
|
||||
# we are the session master
|
||||
myip = host
|
||||
key = self.tunnelkey(n, IPAddr.toint(myip))
|
||||
if key in self.tunnels.keys():
|
||||
continue
|
||||
self.session.info("Adding tunnel for net %s to %s with key %s" % \
|
||||
(n, host, key))
|
||||
gt = GreTap(node=None, name=None, session=self.session,
|
||||
remoteip=host, key=key)
|
||||
self.tunnels[key] = gt
|
||||
r.append(gt)
|
||||
# attaching to net will later allow gt to be destroyed
|
||||
# during net.shutdown()
|
||||
net.attach(gt)
|
||||
return r
|
||||
|
||||
def deltunnel(self, n1num, n2num):
|
||||
''' Cleanup of the GreTapBridge.
|
||||
'''
|
||||
key = self.tunnelkey(n1num, n2num)
|
||||
try:
|
||||
gt = self.tunnels.pop(key)
|
||||
except KeyError:
|
||||
gt = None
|
||||
if gt:
|
||||
self.session.delobj(gt.objid)
|
||||
del gt
|
||||
|
||||
def gettunnel(self, n1num, n2num):
|
||||
''' Return the GreTap between two nodes if it exists.
|
||||
'''
|
||||
key = self.tunnelkey(n1num, n2num)
|
||||
if key in self.tunnels.keys():
|
||||
return self.tunnels[key]
|
||||
else:
|
||||
return None
|
||||
|
||||
def addnodemap(self, server, nodenum):
|
||||
''' Record a node number to emulation server mapping.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
if nodenum in self.nodemap:
|
||||
if server in self.nodemap[nodenum]:
|
||||
self.nodemap_lock.release()
|
||||
return
|
||||
self.nodemap[nodenum].append(server)
|
||||
else:
|
||||
self.nodemap[nodenum] = [server,]
|
||||
if server in self.nodecounts:
|
||||
self.nodecounts[server] += 1
|
||||
else:
|
||||
self.nodecounts[server] = 1
|
||||
self.nodemap_lock.release()
|
||||
|
||||
def delnodemap(self, sock, nodenum):
|
||||
''' Remove a node number to emulation server mapping.
|
||||
Return the number of nodes left on this server.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
count = None
|
||||
if nodenum not in self.nodemap:
|
||||
self.nodemap_lock.release()
|
||||
return count
|
||||
found = False
|
||||
for server in self.nodemap[nodenum]:
|
||||
(host, port, srvsock) = self.getserver(server)
|
||||
if srvsock == sock:
|
||||
found = True
|
||||
break
|
||||
if server in self.nodecounts:
|
||||
count = self.nodecounts[server]
|
||||
if found:
|
||||
self.nodemap[nodenum].remove(server)
|
||||
if server in self.nodecounts:
|
||||
count -= 1
|
||||
self.nodecounts[server] = count
|
||||
self.nodemap_lock.release()
|
||||
return count
|
||||
|
||||
def incrbootcount(self):
|
||||
''' Count a node that has booted.
|
||||
'''
|
||||
self.bootcount += 1
|
||||
return self.bootcount
|
||||
|
||||
def getbootcount(self):
|
||||
''' Return the number of booted nodes.
|
||||
'''
|
||||
return self.bootcount
|
||||
|
||||
def getserversbynode(self, nodenum):
|
||||
''' Retrieve a list of emulation servers given a node number.
|
||||
'''
|
||||
self.nodemap_lock.acquire()
|
||||
if nodenum not in self.nodemap:
|
||||
self.nodemap_lock.release()
|
||||
return []
|
||||
r = self.nodemap[nodenum]
|
||||
self.nodemap_lock.release()
|
||||
return r
|
||||
|
||||
def addnet(self, nodenum):
|
||||
''' Add a node number to the list of link-layer nodes.
|
||||
'''
|
||||
if nodenum not in self.nets:
|
||||
self.nets.append(nodenum)
|
||||
|
||||
def addphys(self, nodenum):
|
||||
''' Add a node number to the list of physical nodes.
|
||||
'''
|
||||
if nodenum not in self.phys:
|
||||
self.phys.append(nodenum)
|
||||
|
||||
def configure_reset(self, msg):
|
||||
''' Ignore reset messages, because node delete responses may still
|
||||
arrive and require the use of nodecounts.
|
||||
'''
|
||||
return None
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message with a list of server:host:port
|
||||
combinations that we'll need to connect with.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
|
||||
if values is None:
|
||||
self.session.info("emulation server data missing")
|
||||
return None
|
||||
values = values.split('|')
|
||||
# string of "server:ip:port,server:ip:port,..."
|
||||
serverstrings = values[0]
|
||||
server_list = serverstrings.split(',')
|
||||
for server in server_list:
|
||||
server_items = server.split(':')
|
||||
(name, host, port) = server_items[:3]
|
||||
if host == '':
|
||||
host = None
|
||||
if port == '':
|
||||
port = None
|
||||
else:
|
||||
port = int(port)
|
||||
sid = msg.gettlv(coreapi.CORE_TLV_CONF_SESSION)
|
||||
if sid is not None:
|
||||
# receive session ID and my IP from master
|
||||
self.session_id_master = int(sid.split('|')[0])
|
||||
self.myip = host
|
||||
host = None
|
||||
port = None
|
||||
# this connects to the server immediately; maybe we should wait
|
||||
# or spin off a new "client" thread here
|
||||
self.addserver(name, host, port)
|
||||
self.setupserver(name)
|
||||
return None
|
||||
|
||||
def handlemsg(self, msg):
|
||||
''' Handle an API message. Determine whether this needs to be handled
|
||||
by the local server or forwarded on to another one.
|
||||
Returns True when message does not need to be handled locally,
|
||||
and performs forwarding if required.
|
||||
Returning False indicates this message should be handled locally.
|
||||
'''
|
||||
serverlist = []
|
||||
handle_locally = False
|
||||
# Do not forward messages when in definition state
|
||||
# (for e.g. configuring services)
|
||||
if self.session.getstate() == coreapi.CORE_EVENT_DEFINITION_STATE:
|
||||
handle_locally = True
|
||||
return not handle_locally
|
||||
# Decide whether message should be handled locally or forwarded, or both
|
||||
if msg.msgtype == coreapi.CORE_API_NODE_MSG:
|
||||
(handle_locally, serverlist) = self.handlenodemsg(msg)
|
||||
elif msg.msgtype == coreapi.CORE_API_EVENT_MSG:
|
||||
# broadcast events everywhere
|
||||
serverlist = self.getserverlist()
|
||||
elif msg.msgtype == coreapi.CORE_API_CONF_MSG:
|
||||
# broadcast location and services configuration everywhere
|
||||
confobj = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
if confobj == "location" or confobj == "services" or \
|
||||
confobj == "session":
|
||||
serverlist = self.getserverlist()
|
||||
elif msg.msgtype == coreapi.CORE_API_FILE_MSG:
|
||||
# broadcast hook scripts and custom service files everywhere
|
||||
filetype = msg.gettlv(coreapi.CORE_TLV_FILE_TYPE)
|
||||
if filetype is not None and \
|
||||
(filetype[:5] == "hook:" or filetype[:8] == "service:"):
|
||||
serverlist = self.getserverlist()
|
||||
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG:
|
||||
# prepare a serverlist from two node numbers in link message
|
||||
(handle_locally, serverlist, msg) = self.handlelinkmsg(msg)
|
||||
elif len(serverlist) == 0:
|
||||
# check for servers based on node numbers in all messages but link
|
||||
nn = msg.nodenumbers()
|
||||
if len(nn) == 0:
|
||||
return False
|
||||
serverlist = self.getserversbynode(nn[0])
|
||||
|
||||
if len(serverlist) == 0:
|
||||
handle_locally = True
|
||||
|
||||
# allow other handlers to process this message
|
||||
# (this is used by e.g. EMANE to use the link add message to keep counts
|
||||
# of interfaces on other servers)
|
||||
for handler in self.handlers:
|
||||
handler(msg)
|
||||
|
||||
# Perform any message forwarding
|
||||
handle_locally = self.forwardmsg(msg, serverlist, handle_locally)
|
||||
return not handle_locally
|
||||
|
||||
def setupserver(self, server):
|
||||
''' Send the appropriate API messages for configuring the specified
|
||||
emulation server.
|
||||
'''
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is None or sock is None:
|
||||
return
|
||||
# communicate this session's current state to the server
|
||||
tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
|
||||
self.session.getstate())
|
||||
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
|
||||
sock.send(msg)
|
||||
# send a Configuration message for the broker object and inform the
|
||||
# server of its local name
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, "broker")
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
coreapi.CONF_TYPE_FLAGS_UPDATE)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
(coreapi.CONF_DATA_TYPE_STRING,))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
"%s:%s:%s" % (server, host, port))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_SESSION,
|
||||
"%s" % self.session.sessionid)
|
||||
msg = coreapi.CoreConfMessage.pack(0, tlvdata)
|
||||
sock.send(msg)
|
||||
|
||||
@staticmethod
|
||||
def fixupremotetty(msghdr, msgdata, host):
|
||||
''' When an interactive TTY request comes from the GUI, snoop the reply
|
||||
and add an SSH command to the appropriate remote server.
|
||||
'''
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr)
|
||||
msgcls = coreapi.msg_class(msgtype)
|
||||
msg = msgcls(msgflags, msghdr, msgdata)
|
||||
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_EXEC_NODE)
|
||||
execnum = msg.gettlv(coreapi.CORE_TLV_EXEC_NUM)
|
||||
cmd = msg.gettlv(coreapi.CORE_TLV_EXEC_CMD)
|
||||
res = msg.gettlv(coreapi.CORE_TLV_EXEC_RESULT)
|
||||
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NODE, nodenum)
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NUM, execnum)
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_CMD, cmd)
|
||||
title = "\\\"CORE: n%s @ %s\\\"" % (nodenum, host)
|
||||
res = "ssh -X -f " + host + " xterm -e " + res
|
||||
tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_RESULT, res)
|
||||
|
||||
return coreapi.CoreExecMessage.pack(msgflags, tlvdata)
|
||||
|
||||
def handlenodemsg(self, msg):
|
||||
''' Determine and return the servers to which this node message should
|
||||
be forwarded. Also keep track of link-layer nodes and the mapping of
|
||||
nodes to servers.
|
||||
'''
|
||||
serverlist = []
|
||||
handle_locally = False
|
||||
serverfiletxt = None
|
||||
# snoop Node Message for emulation server TLV and record mapping
|
||||
n = msg.tlvdata[coreapi.CORE_TLV_NODE_NUMBER]
|
||||
# replicate link-layer nodes on all servers
|
||||
nodetype = msg.gettlv(coreapi.CORE_TLV_NODE_TYPE)
|
||||
if nodetype is not None:
|
||||
try:
|
||||
nodecls = coreapi.node_class(nodetype)
|
||||
except KeyError:
|
||||
self.session.warn("broker invalid node type %s" % nodetype)
|
||||
return (False, serverlist)
|
||||
if nodecls is None:
|
||||
self.session.warn("broker unimplemented node type %s" % nodetype)
|
||||
return (False, serverlist)
|
||||
if issubclass(nodecls, PyCoreNet) and \
|
||||
nodetype != coreapi.CORE_NODE_WLAN:
|
||||
# network node replicated on all servers; could be optimized
|
||||
# don't replicate WLANs, because ebtables rules won't work
|
||||
serverlist = self.getserverlist()
|
||||
handle_locally = True
|
||||
self.addnet(n)
|
||||
for server in serverlist:
|
||||
self.addnodemap(server, n)
|
||||
# do not record server name for networks since network
|
||||
# nodes are replicated across all server
|
||||
return (handle_locally, serverlist)
|
||||
if issubclass(nodecls, PyCoreNet) and \
|
||||
nodetype == coreapi.CORE_NODE_WLAN:
|
||||
# special case where remote WLANs not in session._objs, and no
|
||||
# node response message received, so they are counted here
|
||||
if msg.gettlv(coreapi.CORE_TLV_NODE_EMUSRV) is not None:
|
||||
self.incrbootcount()
|
||||
elif issubclass(nodecls, PyCoreNode):
|
||||
name = msg.gettlv(coreapi.CORE_TLV_NODE_NAME)
|
||||
if name:
|
||||
serverfiletxt = "%s %s %s" % (n, name, nodecls)
|
||||
if issubclass(nodecls, PhysicalNode):
|
||||
# remember physical nodes
|
||||
self.addphys(n)
|
||||
|
||||
# emulation server TLV specifies server
|
||||
server = msg.gettlv(coreapi.CORE_TLV_NODE_EMUSRV)
|
||||
if server is not None:
|
||||
self.addnodemap(server, n)
|
||||
if server not in serverlist:
|
||||
serverlist.append(server)
|
||||
if serverfiletxt and self.session.master:
|
||||
self.writenodeserver(serverfiletxt, server)
|
||||
# hook to update coordinates of physical nodes
|
||||
if n in self.phys:
|
||||
self.session.mobility.physnodeupdateposition(msg)
|
||||
return (handle_locally, serverlist)
|
||||
|
||||
def handlelinkmsg(self, msg):
|
||||
''' Determine and return the servers to which this link message should
|
||||
be forwarded. Also build tunnels between different servers or add
|
||||
opaque data to the link message before forwarding.
|
||||
'''
|
||||
serverlist = []
|
||||
handle_locally = False
|
||||
|
||||
# determine link message destination using non-network nodes
|
||||
nn = msg.nodenumbers()
|
||||
if nn[0] in self.nets:
|
||||
if nn[1] in self.nets:
|
||||
# two network nodes linked together - prevent loops caused by
|
||||
# the automatic tunnelling
|
||||
handle_locally = True
|
||||
else:
|
||||
serverlist = self.getserversbynode(nn[1])
|
||||
elif nn[1] in self.nets:
|
||||
serverlist = self.getserversbynode(nn[0])
|
||||
else:
|
||||
serverset1 = set(self.getserversbynode(nn[0]))
|
||||
serverset2 = set(self.getserversbynode(nn[1]))
|
||||
# nodes are on two different servers, build tunnels as needed
|
||||
if serverset1 != serverset2:
|
||||
localn = None
|
||||
if len(serverset1) == 0 or len(serverset2) == 0:
|
||||
handle_locally = True
|
||||
serverlist = list(serverset1 | serverset2)
|
||||
host = None
|
||||
# get the IP of remote server and decide which node number
|
||||
# is for a local node
|
||||
for server in serverlist:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is None:
|
||||
# named server is local
|
||||
handle_locally = True
|
||||
if server in serverset1:
|
||||
localn = nn[0]
|
||||
else:
|
||||
localn = nn[1]
|
||||
if handle_locally and localn is None:
|
||||
# having no local node at this point indicates local node is
|
||||
# the one with the empty serverset
|
||||
if len(serverset1) == 0:
|
||||
localn = nn[0]
|
||||
elif len(serverset2) == 0:
|
||||
localn = nn[1]
|
||||
if host is None:
|
||||
host = self.getlinkendpoint(msg, localn == nn[0])
|
||||
if localn is None:
|
||||
msg = self.addlinkendpoints(msg, serverset1, serverset2)
|
||||
elif msg.flags & coreapi.CORE_API_ADD_FLAG:
|
||||
self.addtunnel(host, nn[0], nn[1], localn)
|
||||
elif msg.flags & coreapi.CORE_API_DEL_FLAG:
|
||||
self.deltunnel(nn[0], nn[1])
|
||||
handle_locally = False
|
||||
else:
|
||||
serverlist = list(serverset1 | serverset2)
|
||||
|
||||
return (handle_locally, serverlist, msg)
|
||||
|
||||
def addlinkendpoints(self, msg, serverset1, serverset2):
|
||||
''' For a link message that is not handled locally, inform the remote
|
||||
servers of the IP addresses used as tunnel endpoints by adding
|
||||
opaque data to the link message.
|
||||
'''
|
||||
ip1 = ""
|
||||
for server in serverset1:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is not None:
|
||||
ip1 = host
|
||||
ip2 = ""
|
||||
for server in serverset2:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
if host is not None:
|
||||
ip2 = host
|
||||
tlvdata = msg.rawmsg[coreapi.CoreMessage.hdrsiz:]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_OPAQUE,
|
||||
"%s:%s" % (ip1, ip2))
|
||||
newraw = coreapi.CoreLinkMessage.pack(msg.flags, tlvdata)
|
||||
msghdr = newraw[:coreapi.CoreMessage.hdrsiz]
|
||||
return coreapi.CoreLinkMessage(msg.flags, msghdr, tlvdata)
|
||||
|
||||
def getlinkendpoint(self, msg, first_is_local):
|
||||
''' A link message between two different servers has been received,
|
||||
and we need to determine the tunnel endpoint. First look for
|
||||
opaque data in the link message, otherwise use the IP of the message
|
||||
sender (the master server).
|
||||
'''
|
||||
host = None
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_LINK_OPAQUE)
|
||||
if opaque is not None:
|
||||
if first_is_local:
|
||||
host = opaque.split(':')[1]
|
||||
else:
|
||||
host = opaque.split(':')[0]
|
||||
if host == "":
|
||||
host = None
|
||||
if host is None:
|
||||
# get IP address from API message sender (master)
|
||||
self.session._handlerslock.acquire()
|
||||
for h in self.session._handlers:
|
||||
if h.client_address != "":
|
||||
host = h.client_address[0]
|
||||
self.session._handlerslock.release()
|
||||
return host
|
||||
|
||||
def forwardmsg(self, msg, serverlist, handle_locally):
|
||||
''' Forward API message to all servers in serverlist; if an empty
|
||||
host/port is encountered, set the handle_locally flag. Returns the
|
||||
value of the handle_locally flag, which may be unchanged.
|
||||
'''
|
||||
for server in serverlist:
|
||||
try:
|
||||
(host, port, sock) = self.getserver(server)
|
||||
except KeyError:
|
||||
# server not found, don't handle this message locally
|
||||
self.session.info("broker could not find server %s, message " \
|
||||
"with type %s dropped" % \
|
||||
(server, msg.msgtype))
|
||||
continue
|
||||
if host is None and port is None:
|
||||
# local emulation server, handle this locally
|
||||
handle_locally = True
|
||||
else:
|
||||
if sock is None:
|
||||
self.session.info("server %s @ %s:%s is disconnected" % \
|
||||
(server, host, port))
|
||||
else:
|
||||
sock.send(msg.rawmsg)
|
||||
return handle_locally
|
||||
|
||||
def writeservers(self):
|
||||
''' Write the server list to a text file in the session directory upon
|
||||
startup: /tmp/pycore.nnnnn/servers
|
||||
'''
|
||||
filename = os.path.join(self.session.sessiondir, "servers")
|
||||
try:
|
||||
f = open(filename, "w")
|
||||
master = self.session_id_master
|
||||
if master is None:
|
||||
master = self.session.sessionid
|
||||
f.write("master=%s\n" % master)
|
||||
self.servers_lock.acquire()
|
||||
for name in sorted(self.servers.keys()):
|
||||
if name == "localhost":
|
||||
continue
|
||||
(host, port, sock) = self.servers[name]
|
||||
f.write("%s %s %s\n" % (name, host, port))
|
||||
f.close()
|
||||
except Exception, e:
|
||||
self.session.warn("Error writing server list to the file: %s\n%s" \
|
||||
% (filename, e))
|
||||
finally:
|
||||
self.servers_lock.release()
|
||||
|
||||
def writenodeserver(self, nodestr, server):
|
||||
''' Creates a /tmp/pycore.nnnnn/nX.conf/server file having the node
|
||||
and server info. This may be used by scripts for accessing nodes on
|
||||
other machines, much like local nodes may be accessed via the
|
||||
VnodeClient class.
|
||||
'''
|
||||
(host, port, sock) = self.getserver(server)
|
||||
serverstr = "%s %s %s" % (server, host, port)
|
||||
name = nodestr.split()[1]
|
||||
dirname = os.path.join(self.session.sessiondir, name + ".conf")
|
||||
filename = os.path.join(dirname, "server")
|
||||
try:
|
||||
os.makedirs(dirname)
|
||||
except OSError:
|
||||
# directory may already exist from previous distributed run
|
||||
pass
|
||||
try:
|
||||
f = open(filename, "w")
|
||||
f.write("%s\n%s\n" % (serverstr, nodestr))
|
||||
f.close()
|
||||
return True
|
||||
except Exception, e:
|
||||
msg = "Error writing server file '%s'" % filename
|
||||
msg += "for node %s:\n%s" % (name, e)
|
||||
self.session.warn(msg)
|
||||
return False
|
||||
|
||||
|
0
daemon/core/bsd/__init__.py
Normal file
0
daemon/core/bsd/__init__.py
Normal file
70
daemon/core/bsd/netgraph.py
Normal file
70
daemon/core/bsd/netgraph.py
Normal file
|
@ -0,0 +1,70 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
netgraph.py: Netgraph helper functions; for now these are wrappers around
|
||||
ngctl commands.
|
||||
'''
|
||||
|
||||
import subprocess
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
checkexec([NGCTL_BIN])
|
||||
|
||||
def createngnode(type, hookstr, name=None):
|
||||
''' Create a new Netgraph node of type and optionally assign name. The
|
||||
hook string hookstr should contain two names. This is a string so
|
||||
other commands may be inserted after the two names.
|
||||
Return the name and netgraph ID of the new node.
|
||||
'''
|
||||
hook1 = hookstr.split()[0]
|
||||
ngcmd = "mkpeer %s %s \n show .%s" % (type, hookstr, hook1)
|
||||
cmd = [NGCTL_BIN, "-f", "-"]
|
||||
cmdid = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
cmdid.stdin.write(ngcmd)
|
||||
cmdid.stdin.close()
|
||||
result = cmdid.stdout.read()
|
||||
result += cmdid.stderr.read()
|
||||
cmdid.stdout.close()
|
||||
cmdid.stderr.close()
|
||||
status = cmdid.wait()
|
||||
if status > 0:
|
||||
raise Exception, "error creating Netgraph node %s (%s): %s" % \
|
||||
(type, ngcmd, result)
|
||||
results = result.split()
|
||||
ngname = results[1]
|
||||
ngid = results[5]
|
||||
if name:
|
||||
check_call([NGCTL_BIN, "name", "[0x%s]:" % ngid, name])
|
||||
return (ngname, ngid)
|
||||
|
||||
def destroyngnode(name):
|
||||
''' Shutdown a Netgraph node having the given name.
|
||||
'''
|
||||
check_call([NGCTL_BIN, "shutdown", "%s:" % name])
|
||||
|
||||
def connectngnodes(name1, name2, hook1, hook2):
|
||||
''' Connect two hooks of two Netgraph nodes given by their names.
|
||||
'''
|
||||
node1 = "%s:" % name1
|
||||
node2 = "%s:" % name2
|
||||
check_call([NGCTL_BIN, "connect", node1, node2, hook1, hook2])
|
||||
|
||||
def ngmessage(name, msg):
|
||||
''' Send a Netgraph message to the node named name.
|
||||
'''
|
||||
cmd = [NGCTL_BIN, "msg", "%s:" % name] + msg
|
||||
check_call(cmd)
|
||||
|
||||
def ngloadkernelmodule(name):
|
||||
''' Load a kernel module by invoking kldstat. This is needed for the
|
||||
ng_ether module which automatically creates Netgraph nodes when loaded.
|
||||
'''
|
||||
mutecall(["kldload", name])
|
197
daemon/core/bsd/nodes.py
Normal file
197
daemon/core/bsd/nodes.py
Normal file
|
@ -0,0 +1,197 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
|
||||
'''
|
||||
nodes.py: definition of CoreNode classes and other node classes that inherit
|
||||
from the CoreNode, implementing specific node types.
|
||||
'''
|
||||
|
||||
from vnode import *
|
||||
from vnet import *
|
||||
from core.constants import *
|
||||
from core.misc.ipaddr import *
|
||||
from core.api import coreapi
|
||||
from core.bsd.netgraph import ngloadkernelmodule
|
||||
|
||||
checkexec([IFCONFIG_BIN])
|
||||
|
||||
class CoreNode(JailNode):
|
||||
apitype = coreapi.CORE_NODE_DEF
|
||||
|
||||
class PtpNet(NetgraphPipeNet):
|
||||
def tonodemsg(self, flags):
|
||||
''' Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
'''
|
||||
pass
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
'''
|
||||
tlvdata = ""
|
||||
if len(self._netif) != 2:
|
||||
return tlvdata
|
||||
(if1, if2) = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if2.node.objid)
|
||||
delay = if1.getparam('delay')
|
||||
bw = if1.getparam('bw')
|
||||
loss = if1.getparam('loss')
|
||||
duplicate = if1.getparam('duplicate')
|
||||
jitter = if1.getparam('jitter')
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
for addr in if1.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
for addr in if2.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
return [msg,]
|
||||
|
||||
class SwitchNode(NetgraphNet):
|
||||
ngtype = "bridge"
|
||||
nghooks = "link0 link0\nmsg .link0 setpersistent"
|
||||
apitype = coreapi.CORE_NODE_SWITCH
|
||||
policy = "ACCEPT"
|
||||
|
||||
class HubNode(NetgraphNet):
|
||||
ngtype = "hub"
|
||||
nghooks = "link0 link0\nmsg .link0 setpersistent"
|
||||
apitype = coreapi.CORE_NODE_HUB
|
||||
policy = "ACCEPT"
|
||||
|
||||
class WlanNode(NetgraphNet):
|
||||
ngtype = "wlan"
|
||||
nghooks = "anchor anchor"
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
NetgraphNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
self.mobility = None
|
||||
|
||||
def attach(self, netif):
|
||||
NetgraphNet.attach(self, netif)
|
||||
if self.model:
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is None:
|
||||
return
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' Mobility and wireless model.
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
if self.model._positioncallback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
|
||||
class RJ45Node(NetgraphPipeNet):
|
||||
apitype = coreapi.CORE_NODE_RJ45
|
||||
policy = "ACCEPT"
|
||||
|
||||
def __init__(self, session, objid, name, verbose, start = True):
|
||||
if start:
|
||||
ngloadkernelmodule("ng_ether")
|
||||
NetgraphPipeNet.__init__(self, session, objid, name, verbose, start)
|
||||
if start:
|
||||
self.setpromisc(True)
|
||||
|
||||
def shutdown(self):
|
||||
self.setpromisc(False)
|
||||
NetgraphPipeNet.shutdown(self)
|
||||
|
||||
def setpromisc(self, promisc):
|
||||
p = "promisc"
|
||||
if not promisc:
|
||||
p = "-" + p
|
||||
check_call([IFCONFIG_BIN, self.name, "up", p])
|
||||
|
||||
def attach(self, netif):
|
||||
if len(self._netif) > 0:
|
||||
raise ValueError, \
|
||||
"RJ45 networks support at most 1 network interface"
|
||||
NetgraphPipeNet.attach(self, netif)
|
||||
connectngnodes(self.ngname, self.name, self.gethook(), "lower")
|
||||
|
||||
class TunnelNode(NetgraphNet):
|
||||
ngtype = "pipe"
|
||||
nghooks = "upper lower"
|
||||
apitype = coreapi.CORE_NODE_TUNNEL
|
||||
policy = "ACCEPT"
|
||||
|
216
daemon/core/bsd/vnet.py
Normal file
216
daemon/core/bsd/vnet.py
Normal file
|
@ -0,0 +1,216 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
vnet.py: NetgraphNet and NetgraphPipeNet classes that implement virtual networks
|
||||
using the FreeBSD Netgraph subsystem.
|
||||
'''
|
||||
|
||||
import sys, threading
|
||||
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreNet, PyCoreObj
|
||||
from core.bsd.netgraph import *
|
||||
from core.bsd.vnode import VEth
|
||||
|
||||
class NetgraphNet(PyCoreNet):
|
||||
ngtype = None
|
||||
nghooks = ()
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
PyCoreNet.__init__(self, session, objid, name)
|
||||
if name is None:
|
||||
name = str(self.objid)
|
||||
if policy is not None:
|
||||
self.policy = policy
|
||||
self.name = name
|
||||
self.ngname = "n_%s_%s" % (str(self.objid), self.session.sessionid)
|
||||
self.ngid = None
|
||||
self.verbose = verbose
|
||||
self._netif = {}
|
||||
self._linked = {}
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
tmp, self.ngid = createngnode(type=self.ngtype, hookstr=self.nghooks,
|
||||
name=self.ngname)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.up = False
|
||||
while self._netif:
|
||||
k, netif = self._netif.popitem()
|
||||
if netif.pipe:
|
||||
pipe = netif.pipe
|
||||
netif.pipe = None
|
||||
pipe.shutdown()
|
||||
else:
|
||||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
self._linked.clear()
|
||||
del self.session
|
||||
destroyngnode(self.ngname)
|
||||
|
||||
def attach(self, netif):
|
||||
''' Attach an interface to this netgraph node. Create a pipe between
|
||||
the interface and the hub/switch/wlan node.
|
||||
(Note that the PtpNet subclass overrides this method.)
|
||||
'''
|
||||
if self.up:
|
||||
pipe = self.session.addobj(cls = NetgraphPipeNet,
|
||||
verbose = self.verbose, start = True)
|
||||
pipe.attach(netif)
|
||||
hook = "link%d" % len(self._netif)
|
||||
pipe.attachnet(self, hook)
|
||||
PyCoreNet.attach(self, netif)
|
||||
|
||||
def detach(self, netif):
|
||||
if self.up:
|
||||
pass
|
||||
PyCoreNet.detach(self, netif)
|
||||
|
||||
def linked(self, netif1, netif2):
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[netif1] != netif1:
|
||||
raise ValueError, "inconsistency for netif %s" % netif1.name
|
||||
if self._netif[netif2] != netif2:
|
||||
raise ValueError, "inconsistency for netif %s" % netif2.name
|
||||
try:
|
||||
linked = self._linked[netif1][netif2]
|
||||
except KeyError:
|
||||
linked = False
|
||||
self._linked[netif1][netif2] = linked
|
||||
return linked
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
if not self.linked(netif1, netif2):
|
||||
return
|
||||
msg = ["unlink", "{", "node1=0x%s" % netif1.pipe.ngid]
|
||||
msg += ["node2=0x%s" % netif2.pipe.ngid, "}"]
|
||||
ngmessage(self.ngname, msg)
|
||||
self._linked[netif1][netif2] = False
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
if self.linked(netif1, netif2):
|
||||
return
|
||||
msg = ["link", "{", "node1=0x%s" % netif1.pipe.ngid]
|
||||
msg += ["node2=0x%s" % netif2.pipe.ngid, "}"]
|
||||
ngmessage(self.ngname, msg)
|
||||
self._linked[netif1][netif2] = True
|
||||
|
||||
def linknet(self, net):
|
||||
''' Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2=None):
|
||||
''' Set link effects by modifying the pipe connected to an interface.
|
||||
'''
|
||||
if not netif.pipe:
|
||||
self.warn("linkconfig for %s but interface %s has no pipe" % \
|
||||
(self.name, netif.name))
|
||||
return
|
||||
return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter,
|
||||
netif2)
|
||||
|
||||
class NetgraphPipeNet(NetgraphNet):
|
||||
ngtype = "pipe"
|
||||
nghooks = "upper lower"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
NetgraphNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
if start:
|
||||
# account for Ethernet header
|
||||
ngmessage(self.ngname, ["setcfg", "{", "header_offset=14", "}"])
|
||||
|
||||
def attach(self, netif):
|
||||
''' Attach an interface to this pipe node.
|
||||
The first interface is connected to the "upper" hook, the second
|
||||
connected to the "lower" hook.
|
||||
'''
|
||||
if len(self._netif) > 1:
|
||||
raise ValueError, \
|
||||
"Netgraph pipes support at most 2 network interfaces"
|
||||
if self.up:
|
||||
hook = self.gethook()
|
||||
connectngnodes(self.ngname, netif.localname, hook, netif.hook)
|
||||
if netif.pipe:
|
||||
raise ValueError, \
|
||||
"Interface %s already attached to pipe %s" % \
|
||||
(netif.name, netif.pipe.name)
|
||||
netif.pipe = self
|
||||
self._netif[netif] = netif
|
||||
self._linked[netif] = {}
|
||||
|
||||
def attachnet(self, net, hook):
|
||||
''' Attach another NetgraphNet to this pipe node.
|
||||
'''
|
||||
localhook = self.gethook()
|
||||
connectngnodes(self.ngname, net.ngname, localhook, hook)
|
||||
|
||||
def gethook(self):
|
||||
''' Returns the first hook (e.g. "upper") then the second hook
|
||||
(e.g. "lower") based on the number of connections.
|
||||
'''
|
||||
hooks = self.nghooks.split()
|
||||
if len(self._netif) == 0:
|
||||
return hooks[0]
|
||||
else:
|
||||
return hooks[1]
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Set link effects by sending a Netgraph setcfg message to the pipe.
|
||||
'''
|
||||
netif.setparam('bw', bw)
|
||||
netif.setparam('delay', delay)
|
||||
netif.setparam('loss', loss)
|
||||
netif.setparam('duplicate', duplicate)
|
||||
netif.setparam('jitter', jitter)
|
||||
if not self.up:
|
||||
return
|
||||
params = []
|
||||
upstream = []
|
||||
downstream = []
|
||||
if bw is not None:
|
||||
if str(bw)=="0":
|
||||
bw="-1"
|
||||
params += ["bandwidth=%s" % bw,]
|
||||
if delay is not None:
|
||||
if str(delay)=="0":
|
||||
delay="-1"
|
||||
params += ["delay=%s" % delay,]
|
||||
if loss is not None:
|
||||
if str(loss)=="0":
|
||||
loss="-1"
|
||||
upstream += ["BER=%s" % loss,]
|
||||
downstream += ["BER=%s" % loss,]
|
||||
if duplicate is not None:
|
||||
if str(duplicate)=="0":
|
||||
duplicate="-1"
|
||||
upstream += ["duplicate=%s" % duplicate,]
|
||||
downstream += ["duplicate=%s" % duplicate,]
|
||||
if jitter:
|
||||
self.warn("jitter parameter ignored for link %s" % self.name)
|
||||
if len(params) > 0 or len(upstream) > 0 or len(downstream) > 0:
|
||||
setcfg = ["setcfg", "{",] + params
|
||||
if len(upstream) > 0:
|
||||
setcfg += ["upstream={",] + upstream + ["}",]
|
||||
if len(downstream) > 0:
|
||||
setcfg += ["downstream={",] + downstream + ["}",]
|
||||
setcfg += ["}",]
|
||||
ngmessage(self.ngname, setcfg)
|
||||
|
393
daemon/core/bsd/vnode.py
Normal file
393
daemon/core/bsd/vnode.py
Normal file
|
@ -0,0 +1,393 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: core-dev@pf.itd.nrl.navy.mil
|
||||
#
|
||||
'''
|
||||
vnode.py: SimpleJailNode and JailNode classes that implement the FreeBSD
|
||||
jail-based virtual node.
|
||||
'''
|
||||
|
||||
import os, signal, sys, subprocess, threading, string
|
||||
import random, time
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.bsd.netgraph import *
|
||||
|
||||
checkexec([IFCONFIG_BIN, VIMAGE_BIN])
|
||||
|
||||
class VEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
# name is the device name (e.g. ngeth0, ngeth1, etc.) before it is
|
||||
# installed in a node; the Netgraph name is renamed to localname
|
||||
# e.g. before install: name = ngeth0 localname = n0_0_123
|
||||
# after install: name = eth0 localname = n0_0_123
|
||||
self.localname = localname
|
||||
self.ngid = None
|
||||
self.net = None
|
||||
self.pipe = None
|
||||
self.addrlist = []
|
||||
self.hwaddr = None
|
||||
self.up = False
|
||||
self.hook = "ether"
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
hookstr = "%s %s" % (self.hook, self.hook)
|
||||
ngname, ngid = createngnode(type="eiface", hookstr=hookstr,
|
||||
name=self.localname)
|
||||
self.name = ngname
|
||||
self.ngid = ngid
|
||||
check_call([IFCONFIG_BIN, ngname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
destroyngnode(self.localname)
|
||||
self.up = False
|
||||
|
||||
def attachnet(self, net):
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
self.net = None
|
||||
net.attach(self)
|
||||
self.net = net
|
||||
|
||||
def detachnet(self):
|
||||
if self.net is not None:
|
||||
self.net.detach(self)
|
||||
|
||||
def addaddr(self, addr):
|
||||
self.addrlist.append(addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
self.addrlist.remove(addr)
|
||||
|
||||
def sethwaddr(self, addr):
|
||||
self.hwaddr = addr
|
||||
|
||||
class TunTap(PyCoreNetIf):
|
||||
'''TUN/TAP virtual device in TAP mode'''
|
||||
def __init__(self, node, name, localname, mtu = None, net = None,
|
||||
start = True):
|
||||
raise NotImplementedError
|
||||
|
||||
class SimpleJailNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None, nodedir = None,
|
||||
verbose = False):
|
||||
PyCoreNode.__init__(self, session, objid, name)
|
||||
self.nodedir = nodedir
|
||||
self.verbose = verbose
|
||||
self.pid = None
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
|
||||
def startup(self):
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
vimg = [VIMAGE_BIN, "-c", self.name]
|
||||
try:
|
||||
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
|
||||
except OSError:
|
||||
raise Exception, ("vimage command not found while running: %s" % \
|
||||
vimg)
|
||||
self.info("bringing up loopback interface")
|
||||
self.cmd([IFCONFIG_BIN, "lo0", "127.0.0.1"])
|
||||
self.info("setting hostname: %s" % self.name)
|
||||
self.cmd(["hostname", self.name])
|
||||
self.cmd([SYSCTL_BIN, "vfs.morphing_symlinks=1"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
del self.session
|
||||
vimg = [VIMAGE_BIN, "-d", self.name]
|
||||
try:
|
||||
os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg)
|
||||
except OSError:
|
||||
raise Exception, ("vimage command not found while running: %s" % \
|
||||
vimg)
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
if wait:
|
||||
mode = os.P_WAIT
|
||||
else:
|
||||
mode = os.P_NOWAIT
|
||||
tmp = call([VIMAGE_BIN, self.name] + args, cwd=self.nodedir)
|
||||
if not wait:
|
||||
tmp = None
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def cmdresult(self, args, wait = True):
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(args)
|
||||
result = cmdout.read()
|
||||
result += cmderr.read()
|
||||
cmdin.close()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
if wait:
|
||||
status = cmdid.wait()
|
||||
else:
|
||||
status = 0
|
||||
return (status, result)
|
||||
|
||||
def popen(self, args):
|
||||
cmd = [VIMAGE_BIN, self.name]
|
||||
cmd.extend(args)
|
||||
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE, cwd=self.nodedir)
|
||||
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
return os.spawnlp(os.P_WAIT, VIMAGE_BIN, VIMAGE_BIN, self.name, *args)
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut",
|
||||
"-title", self.name, "-e", VIMAGE_BIN, self.name, sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' We add 'sudo' to the command string because the GUI runs as a
|
||||
normal user.
|
||||
'''
|
||||
return "cd %s && sudo %s %s %s" % (self.nodedir, VIMAGE_BIN, self.name, sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def boot(self):
|
||||
pass
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
self.addsymlink(path=target, file=None)
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s_%s_%s" % (self.objid, ifindex, sessionid)
|
||||
localname = name
|
||||
ifclass = VEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
if self.up:
|
||||
# install into jail
|
||||
check_call([IFCONFIG_BIN, veth.name, "vnet", self.name])
|
||||
# rename from "ngeth0" to "eth0"
|
||||
self.cmd([IFCONFIG_BIN, veth.name, "name", ifname])
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "link",
|
||||
str(addr)])
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
if ':' in addr:
|
||||
family = "inet6"
|
||||
else:
|
||||
family = "inet"
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "alias",
|
||||
str(addr)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
if ':' in addr:
|
||||
family = "inet6"
|
||||
else:
|
||||
family = "inet"
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "-alias",
|
||||
str(addr)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def ifup(self, ifindex):
|
||||
if self.up:
|
||||
self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
(filename, srcname, filename)
|
||||
self.shcmd(shcmd)
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
return None
|
||||
#return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
|
||||
def addsymlink(self, path, file):
|
||||
''' Create a symbolic link from /path/name/file ->
|
||||
/tmp/pycore.nnnnn/@.conf/path.name/file
|
||||
'''
|
||||
dirname = path
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
if file:
|
||||
pathname = os.path.join(path, file)
|
||||
sym = os.path.join(self.session.sessiondir, "@.conf", dirname, file)
|
||||
else:
|
||||
pathname = path
|
||||
sym = os.path.join(self.session.sessiondir, "@.conf", dirname)
|
||||
|
||||
if os.path.islink(pathname):
|
||||
if os.readlink(pathname) == sym:
|
||||
# this link already exists - silently return
|
||||
return
|
||||
os.unlink(pathname)
|
||||
else:
|
||||
if os.path.exists(pathname):
|
||||
self.warn("did not create symlink for %s since path " \
|
||||
"exists on host" % pathname)
|
||||
return
|
||||
self.info("creating symlink %s -> %s" % (pathname, sym))
|
||||
os.symlink(sym, pathname)
|
||||
|
||||
class JailNode(SimpleJailNode):
|
||||
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True):
|
||||
super(JailNode, self).__init__(session = session, objid = objid,
|
||||
name = name, nodedir = nodedir,
|
||||
verbose = verbose)
|
||||
self.bootsh = bootsh
|
||||
if not start:
|
||||
return
|
||||
# below here is considered node startup/instantiation code
|
||||
self.makenodedir()
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
super(JailNode, self).startup()
|
||||
#self.privatedir("/var/run")
|
||||
#self.privatedir("/var/log")
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
# services are instead stopped when session enters datacollect state
|
||||
#self.session.services.stopnodeservices(self)
|
||||
try:
|
||||
super(JailNode, self).shutdown()
|
||||
finally:
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
#self.addsymlink(path=dirname, file=basename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
373
daemon/core/conf.py
Normal file
373
daemon/core/conf.py
Normal file
|
@ -0,0 +1,373 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
conf.py: common support for configurable objects
|
||||
'''
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
class ConfigurableManager(object):
|
||||
''' A generic class for managing Configurables. This class can register
|
||||
with a session to receive Config Messages for setting some parameters
|
||||
for itself or for the Configurables that it manages.
|
||||
'''
|
||||
# name corresponds to configuration object field
|
||||
_name = ""
|
||||
# type corresponds with register message types
|
||||
_type = None
|
||||
|
||||
def __init__(self, session=None):
|
||||
self.session = session
|
||||
self.session.addconfobj(self._name, self._type, self.configure)
|
||||
# Configurable key=values, indexed by node number
|
||||
self.configs = {}
|
||||
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configure messages. The configuration message sent to a
|
||||
ConfigurableManager usually is used to:
|
||||
1. Request a list of Configurables (request flag)
|
||||
2. Reset manager and clear configs (reset flag)
|
||||
3. Send values that configure the manager or one of its
|
||||
Configurables
|
||||
|
||||
Returns any reply messages.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
return self.configure_request(msg)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all" or objname == self._name:
|
||||
return self.configure_reset(msg)
|
||||
else:
|
||||
return self.configure_values(msg,
|
||||
msg.gettlv(coreapi.CORE_TLV_CONF_VALUES))
|
||||
|
||||
def configure_request(self, msg):
|
||||
''' Request configuration data.
|
||||
'''
|
||||
return None
|
||||
|
||||
def configure_reset(self, msg):
|
||||
''' By default, resets this manager to clear configs.
|
||||
'''
|
||||
return self.reset()
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Values have been sent to this manager.
|
||||
'''
|
||||
return None
|
||||
|
||||
def configure_values_keyvalues(self, msg, values, target, keys):
|
||||
''' Helper that can be used for configure_values for parsing in
|
||||
'key=value' strings from a values field. The key name must be
|
||||
in the keys list, and target.key=value is set.
|
||||
'''
|
||||
if values is None:
|
||||
return None
|
||||
kvs = values.split('|')
|
||||
for kv in kvs:
|
||||
try:
|
||||
# key=value
|
||||
(key, value) = kv.split('=', 1)
|
||||
except ValueError:
|
||||
# value only
|
||||
key = keys[kvs.index(kv)]
|
||||
value = kv
|
||||
if key not in keys:
|
||||
raise ValueError, "invalid key: %s" % key
|
||||
setattr(target, key, value)
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
return None
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
''' add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
'''
|
||||
conflist = []
|
||||
if nodenum in self.configs:
|
||||
oldlist = self.configs[nodenum]
|
||||
found = False
|
||||
for (t, v) in oldlist:
|
||||
if (t == conftype):
|
||||
# replace existing config
|
||||
found = True
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((t, v))
|
||||
if not found:
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((conftype, values))
|
||||
self.configs[nodenum] = conflist
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
''' get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied
|
||||
'''
|
||||
if nodenum in self.configs:
|
||||
# return configured values
|
||||
conflist = self.configs[nodenum]
|
||||
for (t, v) in conflist:
|
||||
if (conftype is None) or (t == conftype):
|
||||
return (t, v)
|
||||
# return default values provided (may be None)
|
||||
return (conftype, defaultvalues)
|
||||
|
||||
def getallconfigs(self, use_clsmap=True):
|
||||
''' Return (nodenum, conftype, values) tuples for all stored configs.
|
||||
Used when reconnecting to a session.
|
||||
'''
|
||||
r = []
|
||||
for nodenum in self.configs:
|
||||
for (t, v) in self.configs[nodenum]:
|
||||
if use_clsmap:
|
||||
t = self._modelclsmap[t]
|
||||
r.append( (nodenum, t, v) )
|
||||
return r
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
''' remove configuration values for the specified node;
|
||||
when nodenum is None, remove all configuration values
|
||||
'''
|
||||
if nodenum is None:
|
||||
self.configs = {}
|
||||
return
|
||||
if nodenum in self.configs:
|
||||
self.configs.pop(nodenum)
|
||||
|
||||
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
|
||||
''' keyvalues list of tuples
|
||||
'''
|
||||
if conftype not in self._modelclsmap:
|
||||
self.warn("Unknown model type '%s'" % (conftype))
|
||||
return
|
||||
model = self._modelclsmap[conftype]
|
||||
keys = model.getnames()
|
||||
# defaults are merged with supplied values here
|
||||
values = list(model.getdefaultvalues())
|
||||
for key, value in keyvalues:
|
||||
if key not in keys:
|
||||
self.warn("Skipping unknown configuration key for %s: '%s'" % \
|
||||
(conftype, key))
|
||||
continue
|
||||
i = keys.index(key)
|
||||
values[i] = value
|
||||
self.setconfig(nodenum, conftype, values)
|
||||
|
||||
def getmodels(self, n):
|
||||
''' Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
This assumes self.configs contains an iterable of (model-names, values)
|
||||
and a self._modelclsmapdict exists.
|
||||
'''
|
||||
r = []
|
||||
if n.objid in self.configs:
|
||||
v = self.configs[n.objid]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
|
||||
def info(self, msg):
|
||||
self.session.info(msg)
|
||||
|
||||
def warn(self, msg):
|
||||
self.session.warn(msg)
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
''' A generic class for managing configuration parameters.
|
||||
Parameters are sent via Configuration Messages, which allow the GUI
|
||||
to build dynamic dialogs depending on what is being configured.
|
||||
'''
|
||||
_name = ""
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = []
|
||||
_confgroups = None
|
||||
_bitmap = None
|
||||
|
||||
def __init__(self, session=None, objid=None):
|
||||
self.session = session
|
||||
self.objid = objid
|
||||
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
def register(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def getdefaultvalues(cls):
|
||||
return tuple( map(lambda x: x[2], cls._confmatrix) )
|
||||
|
||||
@classmethod
|
||||
def getnames(cls):
|
||||
return tuple( map( lambda x: x[0], cls._confmatrix) )
|
||||
|
||||
@classmethod
|
||||
def configure(cls, mgr, msg):
|
||||
''' Handle configuration messages for this object.
|
||||
'''
|
||||
reply = None
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
|
||||
if mgr.verbose:
|
||||
mgr.info("received configure message for %s" % cls._name)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
if mgr.verbose:
|
||||
mgr.info("replying to configure request for %s model" %
|
||||
cls._name)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if objname == "all":
|
||||
defaults = None
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
else:
|
||||
defaults = cls.getdefaultvalues()
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
|
||||
if values is None:
|
||||
# node has no active config for this model (don't send defaults)
|
||||
return None
|
||||
# reply with config options
|
||||
reply = cls.toconfmsg(0, nodenum, typeflags, values)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all":
|
||||
mgr.clearconfig(nodenum)
|
||||
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the node
|
||||
# object has been created
|
||||
if objname is None:
|
||||
mgr.info("no configuration object for node %s" % nodenum)
|
||||
return None
|
||||
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
|
||||
defaults = cls.getdefaultvalues()
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
values = mgr.getconfig(nodenum, cls._name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
# determine new or old style config
|
||||
new = cls.haskeyvalues(values)
|
||||
if new:
|
||||
new_values = list(defaults)
|
||||
keys = cls.getnames()
|
||||
for v in values:
|
||||
key, value = v.split('=', 1)
|
||||
try:
|
||||
new_values[keys.index(key)] = value
|
||||
except ValueError:
|
||||
mgr.info("warning: ignoring invalid key '%s'" % key)
|
||||
values = new_values
|
||||
mgr.setconfig(nodenum, objname, values)
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def toconfmsg(cls, flags, nodenum, typeflags, values):
|
||||
''' Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
'''
|
||||
keys = cls.getnames()
|
||||
keyvalues = map(lambda a,b: "%s=%s" % (a,b), keys, values)
|
||||
values_str = string.join(keyvalues, '|')
|
||||
tlvdata = ""
|
||||
if nodenum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
|
||||
nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
cls._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
typeflags)
|
||||
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
values_str)
|
||||
captions = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[4], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
possiblevals = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[3], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if cls._bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
|
||||
cls._bitmap)
|
||||
if cls._confgroups is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
cls._confgroups)
|
||||
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
@staticmethod
|
||||
def booltooffon(value):
|
||||
''' Convenience helper turns bool into on (True) or off (False) string.
|
||||
'''
|
||||
if value == "1" or value == "true" or value == "on":
|
||||
return "on"
|
||||
else:
|
||||
return "off"
|
||||
|
||||
@staticmethod
|
||||
def offontobool(value):
|
||||
if type(value) == str:
|
||||
if value.lower() == "on":
|
||||
return 1
|
||||
elif value.lower() == "off":
|
||||
return 0
|
||||
return value
|
||||
|
||||
|
||||
def valueof(self, name, values):
|
||||
''' Helper to return a value by the name defined in confmatrix.
|
||||
Checks if it is boolean'''
|
||||
i = self.getnames().index(name)
|
||||
if self._confmatrix[i][1] == coreapi.CONF_DATA_TYPE_BOOL and \
|
||||
values[i] != "":
|
||||
return self.booltooffon( values[i] )
|
||||
else:
|
||||
return values[i]
|
||||
|
||||
@staticmethod
|
||||
def haskeyvalues(values):
|
||||
''' Helper to check for list of key=value pairs versus a plain old
|
||||
list of values. Returns True if all elements are "key=value".
|
||||
'''
|
||||
if len(values) == 0:
|
||||
return False
|
||||
for v in values:
|
||||
if "=" not in v:
|
||||
return False
|
||||
return True
|
||||
|
||||
def getkeyvaluelist(self):
|
||||
''' Helper to return a list of (key, value) tuples. Keys come from
|
||||
self._confmatrix and values are instance attributes.
|
||||
'''
|
||||
r = []
|
||||
for k in self.getnames():
|
||||
if hasattr(self, k):
|
||||
r.append((k, getattr(self, k)))
|
||||
return r
|
||||
|
||||
|
19
daemon/core/constants.py.in
Normal file
19
daemon/core/constants.py.in
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Constants created by autoconf ./configure script
|
||||
COREDPY_VERSION = "@COREDPY_VERSION@"
|
||||
CORE_STATE_DIR = "@CORE_STATE_DIR@"
|
||||
CORE_CONF_DIR = "@CORE_CONF_DIR@"
|
||||
CORE_DATA_DIR = "@CORE_DATA_DIR@"
|
||||
CORE_LIB_DIR = "@CORE_LIB_DIR@"
|
||||
CORE_SBIN_DIR = "@SBINDIR@"
|
||||
|
||||
BRCTL_BIN = "@brctl_path@/brctl"
|
||||
SYSCTL_BIN = "@sysctl_path@/sysctl"
|
||||
IP_BIN = "@ip_path@/ip"
|
||||
TC_BIN = "@tc_path@/tc"
|
||||
EBTABLES_BIN = "@ebtables_path@/ebtables"
|
||||
IFCONFIG_BIN = "@ifconfig_path@/ifconfig"
|
||||
NGCTL_BIN = "@ngctl_path@/ngctl"
|
||||
VIMAGE_BIN = "@vimage_path@/vimage"
|
||||
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
|
||||
MOUNT_BIN = "@mount_path@/mount"
|
||||
UMOUNT_BIN = "@umount_path@/umount"
|
445
daemon/core/coreobj.py
Normal file
445
daemon/core/coreobj.py
Normal file
|
@ -0,0 +1,445 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
coreobj.py: defines the basic objects for emulation: the PyCoreObj base class,
|
||||
along with PyCoreNode, PyCoreNet, and PyCoreNetIf
|
||||
'''
|
||||
import sys, threading, os, shutil
|
||||
|
||||
from core.api import coreapi
|
||||
from core.misc.ipaddr import *
|
||||
|
||||
class Position(object):
|
||||
''' Helper class for Cartesian coordinate position
|
||||
'''
|
||||
def __init__(self, x = None, y = None, z = None):
|
||||
self.x = None
|
||||
self.y = None
|
||||
self.z = None
|
||||
self.set(x, y, z)
|
||||
|
||||
def set(self, x = None, y = None, z = None):
|
||||
''' Returns True if the position has actually changed.
|
||||
'''
|
||||
if self.x == x and self.y == y and self.z == z:
|
||||
return False
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
return True
|
||||
|
||||
def get(self):
|
||||
''' Fetch the (x,y,z) position tuple.
|
||||
'''
|
||||
return (self.x, self.y, self.z)
|
||||
|
||||
class PyCoreObj(object):
|
||||
''' Base class for pycore objects (nodes and nets)
|
||||
'''
|
||||
apitype = None
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
self.session = session
|
||||
if objid is None:
|
||||
objid = session.getobjid()
|
||||
self.objid = objid
|
||||
if name is None:
|
||||
name = "o%s" % self.objid
|
||||
self.name = name
|
||||
# ifindex is key, PyCoreNetIf instance is value
|
||||
self._netif = {}
|
||||
self.ifindex = 0
|
||||
self.canvas = None
|
||||
self.icon = None
|
||||
self.opaque = None
|
||||
self.verbose = verbose
|
||||
self.position = Position()
|
||||
|
||||
def startup(self):
|
||||
''' Each object implements its own startup method.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
''' Each object implements its own shutdown method.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def setposition(self, x = None, y = None, z = None):
|
||||
''' Set the (x,y,z) position of the object.
|
||||
'''
|
||||
return self.position.set(x = x, y = y, z = z)
|
||||
|
||||
def getposition(self):
|
||||
''' Return an (x,y,z) tuple representing this object's position.
|
||||
'''
|
||||
return self.position.get()
|
||||
|
||||
def ifname(self, ifindex):
|
||||
return self.netif(ifindex).name
|
||||
|
||||
def netifs(self, sort=False):
|
||||
''' Iterate over attached network interfaces.
|
||||
'''
|
||||
if sort:
|
||||
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
|
||||
else:
|
||||
return self._netif.itervalues()
|
||||
|
||||
def numnetif(self):
|
||||
''' Return the attached interface count.
|
||||
'''
|
||||
return len(self._netif)
|
||||
|
||||
def getifindex(self, netif):
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
return ifindex
|
||||
return -1
|
||||
|
||||
def newifindex(self):
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
''' Build a CORE API Node Message for this object. Both nodes and
|
||||
networks can be represented by a Node Message.
|
||||
'''
|
||||
if self.apitype is None:
|
||||
return None
|
||||
tlvdata = ""
|
||||
(x, y, z) = self.getposition()
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER,
|
||||
self.objid)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_TYPE,
|
||||
self.apitype)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NAME,
|
||||
self.name)
|
||||
if hasattr(self, "type") and self.type is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_MODEL,
|
||||
self.type)
|
||||
|
||||
if x is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_XPOS, x)
|
||||
if y is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_YPOS, y)
|
||||
if self.canvas is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_CANVAS,
|
||||
self.canvas)
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID,
|
||||
self.objid)
|
||||
if self.icon is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_ICON,
|
||||
self.icon)
|
||||
if self.opaque is not None:
|
||||
tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_OPAQUE,
|
||||
self.opaque)
|
||||
msg = coreapi.CoreNodeMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API Link Messages for this object. There is no default
|
||||
method for PyCoreObjs as PyCoreNodes do not implement this but
|
||||
PyCoreNets do.
|
||||
'''
|
||||
return []
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for printing informational messages when verbose
|
||||
is turned on.
|
||||
'''
|
||||
if self.verbose:
|
||||
print "%s: %s" % (self.name, msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for printing warning/error messages
|
||||
'''
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def exception(self, level, source, text):
|
||||
''' Generate an Exception Message for this session, providing this
|
||||
object number.
|
||||
'''
|
||||
if self.session:
|
||||
id = None
|
||||
if isinstance(self.objid, int):
|
||||
id = self.objid
|
||||
elif isinstance(self.objid, str) and self.objid.isdigit():
|
||||
id = int(self.objid)
|
||||
self.session.exception(level, source, id, text)
|
||||
|
||||
|
||||
class PyCoreNode(PyCoreObj):
|
||||
''' Base class for nodes
|
||||
'''
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
''' Initialization for node objects.
|
||||
'''
|
||||
PyCoreObj.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self.services = []
|
||||
self.type = None
|
||||
self.nodedir = None
|
||||
|
||||
def nodeid(self):
|
||||
return self.objid
|
||||
|
||||
def addservice(self, service):
|
||||
if service is not None:
|
||||
self.services.append(service)
|
||||
|
||||
def makenodedir(self):
|
||||
if self.nodedir is None:
|
||||
self.nodedir = \
|
||||
os.path.join(self.session.sessiondir, self.name + ".conf")
|
||||
os.makedirs(self.nodedir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
self.tmpnodedir = False
|
||||
|
||||
def rmnodedir(self):
|
||||
if hasattr(self.session.options, 'preservedir'):
|
||||
if self.session.options.preservedir == '1':
|
||||
return
|
||||
if self.tmpnodedir:
|
||||
shutil.rmtree(self.nodedir, ignore_errors = True)
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
if ifindex in self._netif:
|
||||
raise ValueError, "ifindex %s already exists" % ifindex
|
||||
self._netif[ifindex] = netif
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
netif = self._netif.pop(ifindex)
|
||||
netif.shutdown()
|
||||
del netif
|
||||
|
||||
def netif(self, ifindex, net = None):
|
||||
if ifindex in self._netif:
|
||||
return self._netif[ifindex]
|
||||
else:
|
||||
return None
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def setposition(self, x = None, y = None, z = None):
|
||||
changed = PyCoreObj.setposition(self, x = x, y = y, z = z)
|
||||
if not changed:
|
||||
# save extra interface range calculations
|
||||
return
|
||||
for netif in self.netifs(sort=True):
|
||||
netif.setposition(x, y, z)
|
||||
|
||||
def commonnets(self, obj, want_ctrl=False):
|
||||
''' Given another node or net object, return common networks between
|
||||
this node and that object. A list of tuples is returned, with each tuple
|
||||
consisting of (network, interface1, interface2).
|
||||
'''
|
||||
r = []
|
||||
for netif1 in self.netifs():
|
||||
if not want_ctrl and hasattr(netif1, 'control'):
|
||||
continue
|
||||
for netif2 in obj.netifs():
|
||||
if netif1.net == netif2.net:
|
||||
r += (netif1.net, netif1, netif2),
|
||||
return r
|
||||
|
||||
|
||||
|
||||
class PyCoreNet(PyCoreObj):
|
||||
''' Base class for networks
|
||||
'''
|
||||
linktype = coreapi.CORE_LINK_WIRED
|
||||
|
||||
def __init__(self, session, objid, name, verbose = False, start = True):
|
||||
''' Initialization for network objects.
|
||||
'''
|
||||
PyCoreObj.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self._linked = {}
|
||||
self._linked_lock = threading.Lock()
|
||||
|
||||
def attach(self, netif):
|
||||
i = self.newifindex()
|
||||
self._netif[i] = netif
|
||||
netif.netifi = i
|
||||
with self._linked_lock:
|
||||
self._linked[netif] = {}
|
||||
|
||||
def detach(self, netif):
|
||||
del self._netif[netif.netifi]
|
||||
netif.netifi = None
|
||||
with self._linked_lock:
|
||||
del self._linked[netif]
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API Link Messages for this network. Each link message
|
||||
describes a link between this network and a node.
|
||||
'''
|
||||
msgs = []
|
||||
# build a link message from this network node to each node having a
|
||||
# connected interface
|
||||
for netif in self.netifs(sort=True):
|
||||
if not hasattr(netif, "node"):
|
||||
continue
|
||||
otherobj = netif.node
|
||||
if otherobj is None:
|
||||
# two layer-2 switches/hubs linked together via linknet()
|
||||
if not hasattr(netif, "othernet"):
|
||||
continue
|
||||
otherobj = netif.othernet
|
||||
if otherobj.objid == self.objid:
|
||||
continue
|
||||
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
self.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
otherobj.objid)
|
||||
delay = netif.getparam('delay')
|
||||
bw = netif.getparam('bw')
|
||||
loss = netif.getparam('loss')
|
||||
duplicate = netif.getparam('duplicate')
|
||||
jitter = netif.getparam('jitter')
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW,
|
||||
bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM,
|
||||
otherobj.getifindex(netif))
|
||||
for addr in netif.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, \
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
msgs.append(msg)
|
||||
return msgs
|
||||
|
||||
class PyCoreNetIf(object):
|
||||
''' Base class for interfaces.
|
||||
'''
|
||||
def __init__(self, node, name, mtu):
|
||||
self.node = node
|
||||
self.name = name
|
||||
if not isinstance(mtu, (int, long)):
|
||||
raise ValueError
|
||||
self.mtu = mtu
|
||||
self.net = None
|
||||
self._params = {}
|
||||
self.addrlist = []
|
||||
self.hwaddr = None
|
||||
self.poshook = None
|
||||
# used with EMANE
|
||||
self.transport_type = None
|
||||
# interface index on the network
|
||||
self.netindex = None
|
||||
|
||||
def startup(self):
|
||||
pass
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
def attachnet(self, net):
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
self.net = None
|
||||
net.attach(self)
|
||||
self.net = net
|
||||
|
||||
def detachnet(self):
|
||||
if self.net is not None:
|
||||
self.net.detach(self)
|
||||
|
||||
def addaddr(self, addr):
|
||||
self.addrlist.append(addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
self.addrlist.remove(addr)
|
||||
|
||||
def sethwaddr(self, addr):
|
||||
self.hwaddr = addr
|
||||
|
||||
def getparam(self, key):
|
||||
''' Retrieve a parameter from the _params dict,
|
||||
or None if the parameter does not exist.
|
||||
'''
|
||||
if key not in self._params:
|
||||
return None
|
||||
return self._params[key]
|
||||
|
||||
def getparams(self):
|
||||
''' Return (key, value) pairs from the _params dict.
|
||||
'''
|
||||
r = []
|
||||
for k in sorted(self._params.keys()):
|
||||
r.append((k, self._params[k]))
|
||||
return r
|
||||
|
||||
def setparam(self, key, value):
|
||||
''' Set a parameter in the _params dict.
|
||||
Returns True if the parameter has changed.
|
||||
'''
|
||||
if key in self._params:
|
||||
if self._params[key] == value:
|
||||
return False
|
||||
elif self._params[key] <= 0 and value <= 0:
|
||||
# treat None and 0 as unchanged values
|
||||
return False
|
||||
self._params[key] = value
|
||||
return True
|
||||
|
||||
def setposition(self, x, y, z):
|
||||
''' Dispatch to any position hook (self.poshook) handler.
|
||||
'''
|
||||
if self.poshook is not None:
|
||||
self.poshook(self, x, y, z)
|
||||
|
0
daemon/core/emane/__init__.py
Normal file
0
daemon/core/emane/__init__.py
Normal file
65
daemon/core/emane/bypass.py
Normal file
65
daemon/core/emane/bypass.py
Normal file
|
@ -0,0 +1,65 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
bypass.py: EMANE Bypass model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
class EmaneBypassModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
_name = "emane_bypass"
|
||||
_confmatrix = [
|
||||
("none",coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'True,False','There are no parameters for the bypass model.'),
|
||||
]
|
||||
|
||||
# value groupings
|
||||
_confgroups = "Bypass Parameters:1-1"
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml,
|
||||
nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "BYPASS NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "BYPASS MAC")
|
||||
mac.setAttribute("library", "bypassmaclayer")
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = e.xmldoc("phy")
|
||||
phy = phydoc.getElementsByTagName("phy").pop()
|
||||
phy.setAttribute("name", "BYPASS PHY")
|
||||
phy.setAttribute("library", "bypassphylayer")
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
||||
|
124
daemon/core/emane/commeffect.py
Executable file
124
daemon/core/emane/commeffect.py
Executable file
|
@ -0,0 +1,124 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
# Randy Charland <rcharland@ll.mit.edu>
|
||||
#
|
||||
'''
|
||||
commeffect.py: EMANE CommEffect model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventcommeffect
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
def z(x):
|
||||
''' Helper to use 0 for None values. '''
|
||||
if x is None:
|
||||
return 0
|
||||
else:
|
||||
return x
|
||||
|
||||
class EmaneCommEffectModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_commeffect"
|
||||
# CommEffect parameters
|
||||
_confmatrix_shim = [
|
||||
("defaultconnectivity", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'defaultconnectivity'),
|
||||
("filterfile", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'filter file'),
|
||||
("groupid", coreapi.CONF_DATA_TYPE_UINT32, '0',
|
||||
'', 'NEM Group ID'),
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable promiscuous mode'),
|
||||
("enabletighttimingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable tight timing mode'),
|
||||
("receivebufferperiod", coreapi.CONF_DATA_TYPE_FLOAT, '1.0',
|
||||
'', 'receivebufferperiod'),
|
||||
]
|
||||
|
||||
_confmatrix = _confmatrix_shim
|
||||
# value groupings
|
||||
_confgroups = "CommEffect SHIM Parameters:1-%d" \
|
||||
% len(_confmatrix_shim)
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
shimdoc = e.xmldoc("shim")
|
||||
shim = shimdoc.getElementsByTagName("shim").pop()
|
||||
shim.setAttribute("name", "commeffect SHIM")
|
||||
shim.setAttribute("library", "commeffectshim")
|
||||
|
||||
names = self.getnames()
|
||||
shimnames = list(names[:len(self._confmatrix_shim)])
|
||||
shimnames.remove("filterfile")
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
map( lambda n: shim.appendChild(e.xmlparam(shimdoc, n, \
|
||||
self.valueof(n, values))), shimnames)
|
||||
# empty filterfile is not allowed
|
||||
ff = self.valueof("filterfile", values)
|
||||
if ff.strip() != '':
|
||||
shim.appendChild(e.xmlparam(shimdoc, "filterfile", ff))
|
||||
e.xmlwrite(shimdoc, self.shimxmlname(ifc))
|
||||
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "commeffect NEM")
|
||||
nem.setAttribute("type", "unstructured")
|
||||
nem.appendChild(e.xmlshimdefinition(nemdoc, self.shimxmlname(ifc)))
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Generate CommEffect events when a Link Message is received having
|
||||
link parameters.
|
||||
'''
|
||||
service = self.session.emane.service
|
||||
if service is None:
|
||||
self.session.warn("%s: EMANE event service unavailable" % \
|
||||
self._name)
|
||||
return
|
||||
if netif is None or netif2 is None:
|
||||
self.session.warn("%s: missing NEM information" % self._name)
|
||||
return
|
||||
# TODO: batch these into multiple events per transmission
|
||||
event = emaneeventcommeffect.EventCommEffect(1)
|
||||
index = 0
|
||||
e = self.session.obj(self.objid)
|
||||
nemid = e.getnemid(netif)
|
||||
nemid2 = e.getnemid(netif2)
|
||||
mbw = bw
|
||||
|
||||
event.set(index, nemid, 0, z(delay), 0, z(jitter), z(loss),
|
||||
z(duplicate), long(z(bw)), long(z(mbw)))
|
||||
service.publish(emaneeventcommeffect.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
nemid2, emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
|
||||
|
844
daemon/core/emane/emane.py
Normal file
844
daemon/core/emane/emane.py
Normal file
|
@ -0,0 +1,844 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
emane.py: definition of an Emane class for implementing configuration
|
||||
control of an EMANE emulation.
|
||||
'''
|
||||
|
||||
import sys, os, threading, subprocess, time, string
|
||||
from xml.dom.minidom import parseString, Document
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.misc.ipaddr import MacAddr
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.mobility import WirelessModel
|
||||
from core.emane.nodes import EmaneNode
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventlocation
|
||||
except Exception, e:
|
||||
pass
|
||||
|
||||
class Emane(ConfigurableManager):
|
||||
''' EMANE controller object. Lives in a Session instance and is used for
|
||||
building EMANE config files from all of the EmaneNode objects in this
|
||||
emulation, and for controlling the EMANE daemons.
|
||||
'''
|
||||
_name = "emane"
|
||||
_type = coreapi.CORE_TLV_REG_EMULSRV
|
||||
_hwaddr_prefix = "02:02"
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self._objs = {}
|
||||
self._objslock = threading.Lock()
|
||||
self._ifccounts = {}
|
||||
self._ifccountslock = threading.Lock()
|
||||
self._modelclsmap = {}
|
||||
# Port numbers are allocated from these counters
|
||||
self.platformport = self.session.getcfgitemint('emane_platform_port',
|
||||
8100)
|
||||
self.transformport = self.session.getcfgitemint('emane_transform_port',
|
||||
8200)
|
||||
# model for global EMANE configuration options
|
||||
self.emane_config = EmaneGlobalModel(session, None, self.verbose)
|
||||
session.broker.handlers += (self.handledistributed, )
|
||||
self.loadmodels()
|
||||
# this allows the event service Python bindings to be absent
|
||||
try:
|
||||
self.service = emaneeventservice.EventService()
|
||||
except:
|
||||
self.service = None
|
||||
self.doeventloop = False
|
||||
self.eventmonthread = None
|
||||
# EMANE 0.7.4 support -- to be removed when 0.7.4 support is deprecated
|
||||
self.emane074 = False
|
||||
try:
|
||||
tmp = emaneeventlocation.EventLocation(1)
|
||||
# check if yaw parameter is supported by Location Events
|
||||
# if so, we have EMANE 0.8.1+; if not, we have EMANE 0.7.4/earlier
|
||||
tmp.set(0, 1, 2, 2, 2, 3)
|
||||
except TypeError:
|
||||
self.emane074 = True
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def loadmodels(self):
|
||||
''' dynamically load EMANE models that were specified in the config file
|
||||
'''
|
||||
self._modelclsmap.clear()
|
||||
self._modelclsmap[self.emane_config._name] = self.emane_config
|
||||
emane_models = self.session.getcfgitem('emane_models')
|
||||
if emane_models is None:
|
||||
return
|
||||
emane_models = emane_models.split(',')
|
||||
for model in emane_models:
|
||||
model = model.strip()
|
||||
try:
|
||||
modelfile = "%s" % model.lower()
|
||||
clsname = "Emane%sModel" % model
|
||||
importcmd = "from %s import %s" % (modelfile, clsname)
|
||||
exec(importcmd)
|
||||
except Exception, e:
|
||||
warntxt = "unable to load the EMANE model '%s'" % modelfile
|
||||
warntxt += " specified in the config file (%s)" % e
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_WARNING, "emane",
|
||||
None, warntxt)
|
||||
self.warn(warntxt)
|
||||
continue
|
||||
# record the model name to class name mapping
|
||||
# this should match clsname._name
|
||||
confname = "emane_%s" % model.lower()
|
||||
self._modelclsmap[confname] = eval(clsname)
|
||||
# each EmaneModel must have ModelName.configure() defined
|
||||
confmethod = eval("%s.configure_emane" % clsname)
|
||||
self.session.addconfobj(confname, coreapi.CORE_TLV_REG_WIRELESS,
|
||||
confmethod)
|
||||
|
||||
def addobj(self, obj):
|
||||
''' add a new EmaneNode object to this Emane controller object
|
||||
'''
|
||||
self._objslock.acquire()
|
||||
if obj.objid in self._objs:
|
||||
self._objslock.release()
|
||||
raise KeyError, "non-unique EMANE object id %s for %s" % \
|
||||
(obj.objid, obj)
|
||||
self._objs[obj.objid] = obj
|
||||
self._objslock.release()
|
||||
|
||||
def getmodels(self, n):
|
||||
''' Used with XML export; see ConfigurableManager.getmodels()
|
||||
'''
|
||||
r = ConfigurableManager.getmodels(self, n)
|
||||
# EMANE global params are stored with first EMANE node (if non-default
|
||||
# values are configured)
|
||||
sorted_ids = sorted(self.configs.keys())
|
||||
if None in self.configs and len(sorted_ids) > 1 and \
|
||||
n.objid == sorted_ids[1]:
|
||||
v = self.configs[None]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
def getifcconfig(self, nodenum, conftype, defaultvalues, ifc):
|
||||
# use the network-wide config values or interface(NEM)-specific values?
|
||||
if ifc is None:
|
||||
return self.getconfig(nodenum, conftype, defaultvalues)[1]
|
||||
else:
|
||||
# don't use default values when interface config is the same as net
|
||||
# note here that using ifc.node.objid as key allows for only one type
|
||||
# of each model per node; TODO: use both node and interface as key
|
||||
return self.getconfig(ifc.node.objid, conftype, None)[1]
|
||||
|
||||
def setup(self):
|
||||
''' Populate self._objs with EmaneNodes; perform distributed setup;
|
||||
associate models with EmaneNodes from self.config.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for obj in self.session.objs():
|
||||
if isinstance(obj, EmaneNode):
|
||||
self.addobj(obj)
|
||||
if len(self._objs) == 0:
|
||||
return False
|
||||
if self.checkdistributed():
|
||||
# we are slave, but haven't received a platformid yet
|
||||
cfgval = self.getconfig(None, self.emane_config._name,
|
||||
self.emane_config.getdefaultvalues())[1]
|
||||
i = self.emane_config.getnames().index('platform_id_start')
|
||||
if cfgval[i] == self.emane_config.getdefaultvalues()[i]:
|
||||
return False
|
||||
self.setnodemodels()
|
||||
return True
|
||||
|
||||
def startup(self):
|
||||
''' after all the EmaneNode objects have been added, build XML files
|
||||
and start the daemons
|
||||
'''
|
||||
self.reset()
|
||||
if not self.setup():
|
||||
return
|
||||
with self._objslock:
|
||||
self.buildxml()
|
||||
self.starteventmonitor()
|
||||
if self.numnems() > 0:
|
||||
self.startdaemons()
|
||||
self.installnetifs()
|
||||
|
||||
def poststartup(self):
|
||||
''' Retransmit location events now that all NEMs are active.
|
||||
'''
|
||||
if self.doeventmonitor():
|
||||
return
|
||||
with self._objslock:
|
||||
for n in sorted(self._objs.keys()):
|
||||
e = self._objs[n]
|
||||
for netif in e.netifs():
|
||||
(x, y, z) = netif.node.position.get()
|
||||
e.setnemposition(netif, x, y, z)
|
||||
|
||||
def reset(self):
|
||||
''' remove all EmaneNode objects from the dictionary,
|
||||
reset port numbers and nem id counters
|
||||
'''
|
||||
with self._objslock:
|
||||
self._objs.clear()
|
||||
# don't clear self._ifccounts here; NEM counts are needed for buildxml
|
||||
self.platformport = self.session.getcfgitemint('emane_platform_port',
|
||||
8100)
|
||||
self.transformport = self.session.getcfgitemint('emane_transform_port',
|
||||
8200)
|
||||
|
||||
def shutdown(self):
|
||||
''' stop all EMANE daemons
|
||||
'''
|
||||
self._ifccountslock.acquire()
|
||||
self._ifccounts.clear()
|
||||
self._ifccountslock.release()
|
||||
self._objslock.acquire()
|
||||
if len(self._objs) == 0:
|
||||
self._objslock.release()
|
||||
return
|
||||
self.info("Stopping EMANE daemons.")
|
||||
self.deinstallnetifs()
|
||||
self.stopdaemons()
|
||||
self.stopeventmonitor()
|
||||
self._objslock.release()
|
||||
|
||||
def handledistributed(self, msg):
|
||||
''' Broker handler for processing CORE API messages as they are
|
||||
received. This is used to snoop the Link add messages to get NEM
|
||||
counts of NEMs that exist on other servers.
|
||||
'''
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG and \
|
||||
msg.flags & coreapi.CORE_API_ADD_FLAG:
|
||||
nn = msg.nodenumbers()
|
||||
# first node is always link layer node in Link add message
|
||||
if nn[0] in self.session.broker.nets:
|
||||
serverlist = self.session.broker.getserversbynode(nn[1])
|
||||
for server in serverlist:
|
||||
self._ifccountslock.acquire()
|
||||
if server not in self._ifccounts:
|
||||
self._ifccounts[server] = 1
|
||||
else:
|
||||
self._ifccounts[server] += 1
|
||||
self._ifccountslock.release()
|
||||
|
||||
def checkdistributed(self):
|
||||
''' Check for EMANE nodes that exist on multiple emulation servers and
|
||||
coordinate the NEM id and port number space.
|
||||
If we are the master EMANE node, return False so initialization will
|
||||
proceed as normal; otherwise slaves return True here and
|
||||
initialization is deferred.
|
||||
'''
|
||||
# check with the session if we are the "master" Emane object?
|
||||
master = False
|
||||
self._objslock.acquire()
|
||||
if len(self._objs) > 0:
|
||||
master = self.session.master
|
||||
self.info("Setup EMANE with master=%s." % master)
|
||||
self._objslock.release()
|
||||
|
||||
# we are not the master Emane object, wait for nem id and ports
|
||||
if not master:
|
||||
return True
|
||||
|
||||
cfgval = self.getconfig(None, self.emane_config._name,
|
||||
self.emane_config.getdefaultvalues())[1]
|
||||
values = list(cfgval)
|
||||
|
||||
nemcount = 0
|
||||
self._objslock.acquire()
|
||||
for n in self._objs:
|
||||
emanenode = self._objs[n]
|
||||
nemcount += emanenode.numnetif()
|
||||
nemid = int(self.emane_config.valueof("nem_id_start", values))
|
||||
nemid += nemcount
|
||||
platformid = int(self.emane_config.valueof("platform_id_start", values))
|
||||
names = list(self.emane_config.getnames())
|
||||
|
||||
# build an ordered list of servers so platform ID is deterministic
|
||||
servers = []
|
||||
for n in sorted(self._objs):
|
||||
for s in self.session.broker.getserversbynode(n):
|
||||
if s not in servers:
|
||||
servers.append(s)
|
||||
self._objslock.release()
|
||||
|
||||
for server in servers:
|
||||
if server == "localhost":
|
||||
continue
|
||||
(host, port, sock) = self.session.broker.getserver(server)
|
||||
if sock is None:
|
||||
continue
|
||||
platformid += 1
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
values[names.index("platform_id_start")] = str(platformid)
|
||||
values[names.index("nem_id_start")] = str(nemid)
|
||||
msg = EmaneGlobalModel.toconfmsg(flags=0, nodenum=None,
|
||||
typeflags=typeflags, values=values)
|
||||
sock.send(msg)
|
||||
# increment nemid for next server by number of interfaces
|
||||
self._ifccountslock.acquire()
|
||||
if server in self._ifccounts:
|
||||
nemid += self._ifccounts[server]
|
||||
self._ifccountslock.release()
|
||||
|
||||
return False
|
||||
|
||||
def buildxml(self):
|
||||
''' Build all of the XML files required to run EMANE.
|
||||
'''
|
||||
# assume self._objslock is already held here
|
||||
if self.verbose:
|
||||
self.info("Emane.buildxml()")
|
||||
self.buildplatformxml()
|
||||
self.buildnemxml()
|
||||
self.buildtransportxml()
|
||||
|
||||
def xmldoc(self, doctype):
|
||||
''' Returns an XML xml.minidom.Document with a DOCTYPE tag set to the
|
||||
provided doctype string, and an initial element having the same
|
||||
name.
|
||||
'''
|
||||
# we hack in the DOCTYPE using the parser
|
||||
docstr = """<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE %s SYSTEM "file:///usr/share/emane/dtd/%s.dtd">
|
||||
<%s/>""" % (doctype, doctype, doctype)
|
||||
# normally this would be: doc = Document()
|
||||
return parseString(docstr)
|
||||
|
||||
def xmlparam(self, doc, name, value):
|
||||
''' Convenience function for building a parameter tag of the format:
|
||||
<param name="name" value="value" />
|
||||
'''
|
||||
p = doc.createElement("param")
|
||||
p.setAttribute("name", name)
|
||||
p.setAttribute("value", value)
|
||||
return p
|
||||
|
||||
def xmlshimdefinition(self, doc, name):
|
||||
''' Convenience function for building a definition tag of the format:
|
||||
<shim definition="name" />
|
||||
'''
|
||||
p = doc.createElement("shim")
|
||||
p.setAttribute("definition", name)
|
||||
return p
|
||||
|
||||
def xmlwrite(self, doc, filename):
|
||||
''' Write the given XML document to the specified filename.
|
||||
'''
|
||||
#self.info("%s" % doc.toprettyxml(indent=" "))
|
||||
pathname = os.path.join(self.session.sessiondir, filename)
|
||||
f = open(pathname, "w")
|
||||
doc.writexml(writer=f, indent="", addindent=" ", newl="\n", \
|
||||
encoding="UTF-8")
|
||||
f.close()
|
||||
|
||||
def setnodemodels(self):
|
||||
''' Associate EmaneModel classes with EmaneNode nodes. The model
|
||||
configurations are stored in self.configs.
|
||||
'''
|
||||
for n in self._objs:
|
||||
self.setnodemodel(n)
|
||||
|
||||
def setnodemodel(self, n):
|
||||
emanenode = self._objs[n]
|
||||
for (t, v) in self.configs[n]:
|
||||
if t is None:
|
||||
continue
|
||||
if t == self.emane_config._name:
|
||||
continue
|
||||
# only use the first valid EmaneModel
|
||||
# convert model name to class (e.g. emane_rfpipe -> EmaneRfPipe)
|
||||
cls = self._modelclsmap[t]
|
||||
emanenode.setmodel(cls, v)
|
||||
return True
|
||||
# no model has been configured for this EmaneNode
|
||||
return False
|
||||
|
||||
def nemlookup(self, nemid):
|
||||
''' Look for the given numerical NEM ID and return the first matching
|
||||
EmaneNode and NEM interface.
|
||||
'''
|
||||
emanenode = None
|
||||
netif = None
|
||||
|
||||
for n in self._objs:
|
||||
emanenode = self._objs[n]
|
||||
netif = emanenode.getnemnetif(nemid)
|
||||
if netif is not None:
|
||||
break
|
||||
else:
|
||||
emanenode = None
|
||||
return (emanenode, netif)
|
||||
|
||||
def numnems(self):
|
||||
''' Return the number of NEMs emulated locally.
|
||||
'''
|
||||
count = 0
|
||||
for o in self._objs.values():
|
||||
count += len(o.netifs())
|
||||
return count
|
||||
|
||||
def buildplatformxml(self):
|
||||
''' Build a platform.xml file now that all nodes are configured.
|
||||
'''
|
||||
values = self.getconfig(None, "emane",
|
||||
self.emane_config.getdefaultvalues())[1]
|
||||
doc = self.xmldoc("platform")
|
||||
plat = doc.getElementsByTagName("platform").pop()
|
||||
platformid = self.emane_config.valueof("platform_id_start", values)
|
||||
plat.setAttribute("name", "Platform %s" % platformid)
|
||||
plat.setAttribute("id", platformid)
|
||||
|
||||
names = list(self.emane_config.getnames())
|
||||
platform_names = names[:len(self.emane_config._confmatrix_platform)]
|
||||
platform_names.remove('platform_id_start')
|
||||
|
||||
# append all platform options (except starting id) to doc
|
||||
map( lambda n: plat.appendChild(self.xmlparam(doc, n, \
|
||||
self.emane_config.valueof(n, values))), platform_names)
|
||||
|
||||
nemid = int(self.emane_config.valueof("nem_id_start", values))
|
||||
# assume self._objslock is already held here
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
nems = emanenode.buildplatformxmlentry(doc)
|
||||
for netif in sorted(nems, key=lambda n: n.node.objid):
|
||||
# set ID, endpoints here
|
||||
nementry = nems[netif]
|
||||
nementry.setAttribute("id", "%d" % nemid)
|
||||
# insert nem options (except nem id) to doc
|
||||
trans_addr = self.emane_config.valueof("transportendpoint", \
|
||||
values)
|
||||
nementry.insertBefore(self.xmlparam(doc, "transportendpoint", \
|
||||
"%s:%d" % (trans_addr, self.transformport)),
|
||||
nementry.firstChild)
|
||||
platform_addr = self.emane_config.valueof("platformendpoint", \
|
||||
values)
|
||||
nementry.insertBefore(self.xmlparam(doc, "platformendpoint", \
|
||||
"%s:%d" % (platform_addr, self.platformport)),
|
||||
nementry.firstChild)
|
||||
plat.appendChild(nementry)
|
||||
emanenode.setnemid(netif, nemid)
|
||||
# NOTE: MAC address set before here is incorrect, including the one
|
||||
# sent from the GUI via link message
|
||||
# MAC address determined by NEM ID: 02:02:00:00:nn:nn"
|
||||
macstr = self._hwaddr_prefix + ":00:00:"
|
||||
macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF)
|
||||
netif.sethwaddr(MacAddr.fromstring(macstr))
|
||||
# increment counters used to manage IDs, endpoint port numbers
|
||||
nemid += 1
|
||||
self.platformport += 1
|
||||
self.transformport += 1
|
||||
self.xmlwrite(doc, "platform.xml")
|
||||
|
||||
def buildnemxml(self):
|
||||
''' Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which
|
||||
are defined on a per-EmaneNode basis.
|
||||
'''
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
nems = emanenode.buildnemxmlfiles(self)
|
||||
|
||||
def buildtransportxml(self):
|
||||
''' Calls emanegentransportxml using a platform.xml file to build
|
||||
the transportdaemon*.xml.
|
||||
'''
|
||||
try:
|
||||
subprocess.check_call(["emanegentransportxml", "platform.xml"], \
|
||||
cwd=self.session.sessiondir)
|
||||
except Exception, e:
|
||||
self.info("error running emanegentransportxml: %s" % e)
|
||||
|
||||
def startdaemons(self):
|
||||
''' Start the appropriate EMANE daemons. The transport daemon will
|
||||
bind to the TAP interfaces.
|
||||
'''
|
||||
if self.verbose:
|
||||
self.info("Emane.startdaemons()")
|
||||
path = self.session.sessiondir
|
||||
loglevel = "2"
|
||||
cfgloglevel = self.session.getcfgitemint("emane_log_level")
|
||||
realtime = self.session.getcfgitembool("emane_realtime", True)
|
||||
if cfgloglevel:
|
||||
self.info("setting user-defined EMANE log level: %d" % cfgloglevel)
|
||||
loglevel = str(cfgloglevel)
|
||||
emanecmd = ["emane", "-d", "--logl", loglevel, "-f", \
|
||||
os.path.join(path, "emane.log")]
|
||||
if realtime:
|
||||
emanecmd += "-r",
|
||||
try:
|
||||
cmd = emanecmd + [os.path.join(path, "platform.xml")]
|
||||
if self.verbose:
|
||||
self.info("Emane.startdaemons() running %s" % str(cmd))
|
||||
subprocess.check_call(cmd, cwd=path)
|
||||
except Exception, e:
|
||||
errmsg = "error starting emane: %s" % e
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane",
|
||||
None, errmsg)
|
||||
self.info(errmsg)
|
||||
|
||||
# start one transport daemon per transportdaemon*.xml file
|
||||
transcmd = ["emanetransportd", "-d", "--logl", loglevel, "-f", \
|
||||
os.path.join(path, "emanetransportd.log")]
|
||||
if realtime:
|
||||
transcmd += "-r",
|
||||
files = os.listdir(path)
|
||||
for file in files:
|
||||
if file[-3:] == "xml" and file[:15] == "transportdaemon":
|
||||
cmd = transcmd + [os.path.join(path, file)]
|
||||
try:
|
||||
if self.verbose:
|
||||
self.info("Emane.startdaemons() running %s" % str(cmd))
|
||||
subprocess.check_call(cmd, cwd=path)
|
||||
except Exception, e:
|
||||
errmsg = "error starting emanetransportd: %s" % e
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane",
|
||||
None, errmsg)
|
||||
self.info(errmsg)
|
||||
|
||||
def stopdaemons(self):
|
||||
''' Kill the appropriate EMANE daemons.
|
||||
'''
|
||||
# TODO: we may want to improve this if we had the PIDs from the
|
||||
# specific EMANE daemons that we've started
|
||||
subprocess.call(["killall", "-q", "emane"])
|
||||
subprocess.call(["killall", "-q", "emanetransportd"])
|
||||
|
||||
def installnetifs(self):
|
||||
''' Install TUN/TAP virtual interfaces into their proper namespaces
|
||||
now that the EMANE daemons are running.
|
||||
'''
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
if self.verbose:
|
||||
self.info("Emane.installnetifs() for node %d" % n)
|
||||
emanenode.installnetifs()
|
||||
|
||||
def deinstallnetifs(self):
|
||||
''' Uninstall TUN/TAP virtual interfaces.
|
||||
'''
|
||||
for n in sorted(self._objs.keys()):
|
||||
emanenode = self._objs[n]
|
||||
emanenode.deinstallnetifs()
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configuration messages for global EMANE config.
|
||||
'''
|
||||
r = self.emane_config.configure_emane(session, msg)
|
||||
|
||||
# extra logic to start slave Emane object after nemid has been
|
||||
# configured from the master
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_UPDATE and \
|
||||
self.session.master == False:
|
||||
self.startup()
|
||||
|
||||
return r
|
||||
|
||||
def doeventmonitor(self):
|
||||
''' Returns boolean whether or not EMANE events will be monitored.
|
||||
'''
|
||||
# this support must be explicitly turned on; by default, CORE will
|
||||
# generate the EMANE events when nodes are moved
|
||||
return self.session.getcfgitembool('emane_event_monitor', False)
|
||||
|
||||
def starteventmonitor(self):
|
||||
''' Start monitoring EMANE location events if configured to do so.
|
||||
'''
|
||||
if self.verbose:
|
||||
self.info("Emane.starteventmonitor()")
|
||||
if not self.doeventmonitor():
|
||||
return
|
||||
if self.service is None:
|
||||
errmsg = "Warning: EMANE events will not be generated " \
|
||||
"because the emaneeventservice\n binding was " \
|
||||
"unable to load " \
|
||||
"(install the python-emaneeventservice bindings)"
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_WARNING, "emane",
|
||||
None, errmsg)
|
||||
self.warn(errmsg)
|
||||
|
||||
return
|
||||
self.doeventloop = True
|
||||
self.eventmonthread = threading.Thread(target = self.eventmonitorloop)
|
||||
self.eventmonthread.daemon = True
|
||||
self.eventmonthread.start()
|
||||
|
||||
|
||||
def stopeventmonitor(self):
|
||||
''' Stop monitoring EMANE location events.
|
||||
'''
|
||||
self.doeventloop = False
|
||||
if self.service is not None:
|
||||
self.service.breakloop()
|
||||
# reset the service, otherwise nextEvent won't work
|
||||
del self.service
|
||||
self.service = emaneeventservice.EventService()
|
||||
if self.eventmonthread is not None:
|
||||
self.eventmonthread.join()
|
||||
self.eventmonthread = None
|
||||
|
||||
def eventmonitorloop(self):
|
||||
''' Thread target that monitors EMANE location events.
|
||||
'''
|
||||
if self.service is None:
|
||||
return
|
||||
self.info("subscribing to EMANE location events")
|
||||
#self.service.subscribe(emaneeventlocation.EVENT_ID,
|
||||
# self.handlelocationevent)
|
||||
#self.service.loop()
|
||||
#self.service.subscribe(emaneeventlocation.EVENT_ID, None)
|
||||
while self.doeventloop is True:
|
||||
(event, platform, nem, component, data) = self.service.nextEvent()
|
||||
if event == emaneeventlocation.EVENT_ID:
|
||||
self.handlelocationevent(event, platform, nem, component, data)
|
||||
|
||||
self.info("unsubscribing from EMANE location events")
|
||||
#self.service.unsubscribe(emaneeventlocation.EVENT_ID)
|
||||
|
||||
def handlelocationevent(self, event, platform, nem, component, data):
|
||||
''' Handle an EMANE location event.
|
||||
'''
|
||||
event = emaneeventlocation.EventLocation(data)
|
||||
entries = event.entries()
|
||||
for e in entries.values():
|
||||
# yaw,pitch,roll,azimuth,elevation,velocity are unhandled
|
||||
(nemid, lat, long, alt) = e[:4]
|
||||
# convert nemid to node number
|
||||
(emanenode, netif) = self.nemlookup(nemid)
|
||||
if netif is None:
|
||||
if self.verbose:
|
||||
self.info("location event for unknown NEM %s" % nemid)
|
||||
continue
|
||||
n = netif.node.objid
|
||||
# convert from lat/long/alt to x,y,z coordinates
|
||||
(x, y, z) = self.session.location.getxyz(lat, long, alt)
|
||||
x = int(x)
|
||||
y = int(y)
|
||||
z = int(z)
|
||||
if self.verbose:
|
||||
self.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)" \
|
||||
% (nemid, lat, long, alt, x, y, z))
|
||||
try:
|
||||
if (x.bit_length() > 16) or (y.bit_length() > 16) or \
|
||||
(z.bit_length() > 16) or (x < 0) or (y < 0) or (z < 0):
|
||||
warntxt = "Unable to build node location message since " \
|
||||
"received lat/long/alt exceeds coordinate " \
|
||||
"space: NEM %s (%d, %d, %d)" % (nemid, x, y, z)
|
||||
self.info(warntxt)
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"emane", None, warntxt)
|
||||
continue
|
||||
except AttributeError:
|
||||
# int.bit_length() not present on Python 2.6
|
||||
pass
|
||||
|
||||
# generate a node message for this location update
|
||||
try:
|
||||
node = self.session.obj(n)
|
||||
except KeyError:
|
||||
self.warn("location event NEM %s has no corresponding node %s" \
|
||||
% (nemid, n))
|
||||
continue
|
||||
# don't use node.setposition(x,y,z) which generates an event
|
||||
node.position.set(x,y,z)
|
||||
msg = node.tonodemsg(flags=0)
|
||||
self.session.broadcastraw(None, msg)
|
||||
self.session.sdt.updatenodegeo(node, lat, long, alt)
|
||||
|
||||
|
||||
class EmaneModel(WirelessModel):
|
||||
''' EMANE models inherit from this parent class, which takes care of
|
||||
handling configuration messages based on the _confmatrix list of
|
||||
configurable parameters. Helper functions also live here.
|
||||
'''
|
||||
_prefix = {'y': 1e-24, # yocto
|
||||
'z': 1e-21, # zepto
|
||||
'a': 1e-18, # atto
|
||||
'f': 1e-15, # femto
|
||||
'p': 1e-12, # pico
|
||||
'n': 1e-9, # nano
|
||||
'u': 1e-6, # micro
|
||||
'm': 1e-3, # mili
|
||||
'c': 1e-2, # centi
|
||||
'd': 1e-1, # deci
|
||||
'k': 1e3, # kilo
|
||||
'M': 1e6, # mega
|
||||
'G': 1e9, # giga
|
||||
'T': 1e12, # tera
|
||||
'P': 1e15, # peta
|
||||
'E': 1e18, # exa
|
||||
'Z': 1e21, # zetta
|
||||
'Y': 1e24, # yotta
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def configure_emane(cls, session, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Pass the Emane object as the manager object.
|
||||
'''
|
||||
return cls.configure(session.emane, msg)
|
||||
|
||||
@classmethod
|
||||
def emane074_fixup(cls, value, div=1.0):
|
||||
''' Helper for converting 0.8.1 and newer values to EMANE 0.7.4
|
||||
compatible values.
|
||||
NOTE: This should be removed when support for 0.7.4 has been
|
||||
deprecated.
|
||||
'''
|
||||
if div == 0:
|
||||
return "0"
|
||||
if type(value) is not str:
|
||||
return str(value / div)
|
||||
if value.endswith(tuple(cls._prefix.keys())):
|
||||
suffix = value[-1]
|
||||
value = float(value[:-1]) * cls._prefix[suffix]
|
||||
return str(int(value / div))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def buildplatformxmlnementry(self, doc, n, ifc):
|
||||
''' Build the NEM definition that goes into the platform.xml file.
|
||||
This returns an XML element that will be added to the <platform/> element.
|
||||
This default method supports per-interface config
|
||||
(e.g. <nem definition="n2_0_63emane_rfpipe.xml" id="1"> or per-EmaneNode
|
||||
config (e.g. <nem definition="n1emane_rfpipe.xml" id="1">.
|
||||
This can be overriden by a model for NEM flexibility; n is the EmaneNode.
|
||||
'''
|
||||
nem = doc.createElement("nem")
|
||||
nem.setAttribute("name", ifc.localname)
|
||||
# if this netif contains a non-standard (per-interface) config,
|
||||
# then we need to use a more specific xml file here
|
||||
nem.setAttribute("definition", self.nemxmlname(ifc))
|
||||
return nem
|
||||
|
||||
def buildplatformxmltransportentry(self, doc, n, ifc):
|
||||
''' Build the transport definition that goes into the platform.xml file.
|
||||
This returns an XML element that will added to the nem definition.
|
||||
This default method supports raw and virtual transport types, but may be
|
||||
overriden by a model to support the e.g. pluggable virtual transport.
|
||||
n is the EmaneNode.
|
||||
'''
|
||||
type = ifc.transport_type
|
||||
if not type:
|
||||
e.info("warning: %s interface type unsupported!" % ifc.name)
|
||||
type = "raw"
|
||||
trans = doc.createElement("transport")
|
||||
trans.setAttribute("definition", n.transportxmlname(type))
|
||||
trans.setAttribute("group", "1")
|
||||
param = doc.createElement("param")
|
||||
param.setAttribute("name", "device")
|
||||
if type == "raw":
|
||||
# raw RJ45 name e.g. 'eth0'
|
||||
param.setAttribute("value", ifc.name)
|
||||
else:
|
||||
# virtual TAP name e.g. 'n3.0.17'
|
||||
param.setAttribute("value", ifc.localname)
|
||||
trans.appendChild(param)
|
||||
return trans
|
||||
|
||||
def basename(self, ifc = None):
|
||||
''' Return the string that other names are based on.
|
||||
If a specific config is stored for a node's interface, a unique
|
||||
filename is needed; otherwise the name of the EmaneNode is used.
|
||||
'''
|
||||
emane = self.session.emane
|
||||
name = "n%s" % self.objid
|
||||
if ifc is not None:
|
||||
nodenum = ifc.node.objid
|
||||
if emane.getconfig(nodenum, self._name, None)[1] is not None:
|
||||
name = ifc.localname.replace('.','_')
|
||||
return "%s%s" % (name, self._name)
|
||||
|
||||
def nemxmlname(self, ifc = None):
|
||||
''' Return the string name for the NEM XML file, e.g. 'n3rfpipenem.xml'
|
||||
'''
|
||||
return "%snem.xml" % self.basename(ifc)
|
||||
|
||||
def shimxmlname(self, ifc = None):
|
||||
''' Return the string name for the SHIM XML file, e.g. 'commeffectshim.xml'
|
||||
'''
|
||||
return "%sshim.xml" % self.basename(ifc)
|
||||
|
||||
def macxmlname(self, ifc = None):
|
||||
''' Return the string name for the MAC XML file, e.g. 'n3rfpipemac.xml'
|
||||
'''
|
||||
return "%smac.xml" % self.basename(ifc)
|
||||
|
||||
def phyxmlname(self, ifc = None):
|
||||
''' Return the string name for the PHY XML file, e.g. 'n3rfpipephy.xml'
|
||||
'''
|
||||
return "%sphy.xml" % self.basename(ifc)
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
''' invoked from MobilityModel when nodes are moved; this causes
|
||||
EMANE location events to be generated for the nodes in the moved
|
||||
list, making EmaneModels compatible with Ns2ScriptedMobility
|
||||
'''
|
||||
try:
|
||||
wlan = self.session.obj(self.objid)
|
||||
except KeyError:
|
||||
return
|
||||
wlan.setnempositions(moved_netifs)
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Invoked when a Link Message is received. Default is unimplemented.
|
||||
'''
|
||||
warntxt = "EMANE model %s does not support link " % self._name
|
||||
warntxt += "configuration, dropping Link Message"
|
||||
self.session.warn(warntxt)
|
||||
|
||||
|
||||
class EmaneGlobalModel(EmaneModel):
|
||||
''' Global EMANE configuration options.
|
||||
'''
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
_name = "emane"
|
||||
_confmatrix_platform = [
|
||||
("otamanagerchannelenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'on,off', 'enable OTA Manager channel'),
|
||||
("otamanagergroup", coreapi.CONF_DATA_TYPE_STRING, '224.1.2.8:45702',
|
||||
'', 'OTA Manager group'),
|
||||
("otamanagerdevice", coreapi.CONF_DATA_TYPE_STRING, 'lo',
|
||||
'', 'OTA Manager device'),
|
||||
("eventservicegroup", coreapi.CONF_DATA_TYPE_STRING, '224.1.2.8:45703',
|
||||
'', 'Event Service group'),
|
||||
("eventservicedevice", coreapi.CONF_DATA_TYPE_STRING, 'lo',
|
||||
'', 'Event Service device'),
|
||||
("platform_id_start", coreapi.CONF_DATA_TYPE_INT32, '1',
|
||||
'', 'starting Platform ID'),
|
||||
("debugportenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'on,off', 'enable debug port'),
|
||||
("debugport", coreapi.CONF_DATA_TYPE_UINT16, '47000',
|
||||
'', 'debug port number'),
|
||||
]
|
||||
_confmatrix_nem = [
|
||||
("transportendpoint", coreapi.CONF_DATA_TYPE_STRING, 'localhost',
|
||||
'', 'Transport endpoint address (port is automatic)'),
|
||||
("platformendpoint", coreapi.CONF_DATA_TYPE_STRING, 'localhost',
|
||||
'', 'Platform endpoint address (port is automatic)'),
|
||||
("nem_id_start", coreapi.CONF_DATA_TYPE_INT32, '1',
|
||||
'', 'starting NEM ID'),
|
||||
]
|
||||
_confmatrix = _confmatrix_platform + _confmatrix_nem
|
||||
_confgroups = "Platform Attributes:1-%d|NEM Parameters:%d-%d" % \
|
||||
(len(_confmatrix_platform), len(_confmatrix_platform) + 1,
|
||||
len(_confmatrix))
|
||||
|
119
daemon/core/emane/ieee80211abg.py
Normal file
119
daemon/core/emane/ieee80211abg.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
|
||||
class EmaneIeee80211abgModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_ieee80211abg"
|
||||
_80211rates = '1 1 Mbps,2 2 Mbps,3 5.5 Mbps,4 11 Mbps,5 6 Mbps,' + \
|
||||
'6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps,' + \
|
||||
'12 54 Mbps'
|
||||
# MAC parameters
|
||||
_confmatrix_mac = [
|
||||
("mode", coreapi.CONF_DATA_TYPE_UINT8, '0',
|
||||
'0 802.11b (DSSS only),1 802.11b (DSSS only),' + \
|
||||
'2 802.11a or g (OFDM),3 802.11b/g (DSSS and OFDM)', 'mode'),
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable promiscuous mode'),
|
||||
("distance", coreapi.CONF_DATA_TYPE_UINT32, '1000',
|
||||
'', 'max distance (m)'),
|
||||
("unicastrate", coreapi.CONF_DATA_TYPE_UINT8, '4', _80211rates,
|
||||
'unicast rate (Mbps)'),
|
||||
("multicastrate", coreapi.CONF_DATA_TYPE_UINT8, '1', _80211rates,
|
||||
'multicast rate (Mbps)'),
|
||||
("rtsthreshold", coreapi.CONF_DATA_TYPE_UINT16, '0',
|
||||
'', 'RTS threshold (bytes)'),
|
||||
("wmmenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'WiFi Multimedia (WMM)'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
|
||||
'/usr/share/emane/models/ieee80211abg/xml/ieee80211pcr.xml',
|
||||
'', 'SINR/PCR curve file'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
("queuesize", coreapi.CONF_DATA_TYPE_STRING, '0:255 1:255 2:255 3:255',
|
||||
'', 'queue size (0-4:size)'),
|
||||
("cwmin", coreapi.CONF_DATA_TYPE_STRING, '0:32 1:32 2:16 3:8',
|
||||
'', 'min contention window (0-4:minw)'),
|
||||
("cwmax", coreapi.CONF_DATA_TYPE_STRING, '0:1024 1:1024 2:64 3:16',
|
||||
'', 'max contention window (0-4:maxw)'),
|
||||
("aifs", coreapi.CONF_DATA_TYPE_STRING, '0:2 1:2 2:2 3:1',
|
||||
'', 'arbitration inter frame space (0-4:aifs)'),
|
||||
("txop", coreapi.CONF_DATA_TYPE_STRING, '0:0 1:0 2:0 3:0',
|
||||
'', 'txop (0-4:usec)'),
|
||||
("retrylimit", coreapi.CONF_DATA_TYPE_STRING, '0:3 1:3 2:3 3:3',
|
||||
'', 'retry limit (0-4:numretries)'),
|
||||
]
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
# value groupings
|
||||
_confgroups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% (len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml,
|
||||
nXXemane_ieee80211abgphy.xml are used.
|
||||
'''
|
||||
# use the network-wide config values or interface(NEM)-specific values?
|
||||
if ifc is None:
|
||||
values = e.getconfig(self.objid, self._name,
|
||||
self.getdefaultvalues())[1]
|
||||
else:
|
||||
nodenum = ifc.node.objid
|
||||
values = e.getconfig(nodenum, self._name, None)[1]
|
||||
if values is None:
|
||||
# do not build specific files for this NEM when config is same
|
||||
# as the network
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "ieee80211abg NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "ieee80211abg MAC")
|
||||
mac.setAttribute("library", "ieee80211abgmaclayer")
|
||||
|
||||
names = self.getnames()
|
||||
macnames = names[:len(self._confmatrix_mac)]
|
||||
phynames = names[len(self._confmatrix_mac):]
|
||||
|
||||
# append all MAC options to macdoc
|
||||
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
281
daemon/core/emane/nodes.py
Normal file
281
daemon/core/emane/nodes.py
Normal file
|
@ -0,0 +1,281 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
nodes.py: definition of an EmaneNode class for implementing configuration
|
||||
control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
||||
share the same MAC+PHY model.
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNet
|
||||
try:
|
||||
import emaneeventservice
|
||||
import emaneeventlocation
|
||||
except Exception, e:
|
||||
''' Don't require all CORE users to have EMANE libeventservice and its
|
||||
Python bindings installed.
|
||||
'''
|
||||
pass
|
||||
|
||||
class EmaneNet(PyCoreNet):
|
||||
''' EMANE network base class.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_EMANE
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
type = "wlan" # icon used
|
||||
|
||||
class EmaneNode(EmaneNet):
|
||||
''' EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
'''
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
PyCoreNet.__init__(self, session, objid, name, verbose, start)
|
||||
self.verbose = verbose
|
||||
self.conf = ""
|
||||
self.up = False
|
||||
self.nemidmap = {}
|
||||
self.model = None
|
||||
self.mobility = None
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' The CommEffect model supports link configuration.
|
||||
'''
|
||||
if not self.model:
|
||||
return
|
||||
return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
|
||||
def config(self, conf):
|
||||
#print "emane", self.name, "got config:", conf
|
||||
self.conf = conf
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' set the EmaneModel associated with this node
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
# EmaneModel really uses values from ConfigurableManager
|
||||
# when buildnemxml() is called, not during init()
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose)
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
def setnemid(self, netif, nemid):
|
||||
''' Record an interface to numerical ID mapping. The Emane controller
|
||||
object manages and assigns these IDs for all NEMs.
|
||||
'''
|
||||
self.nemidmap[netif] = nemid
|
||||
|
||||
def getnemid(self, netif):
|
||||
''' Given an interface, return its numerical ID.
|
||||
'''
|
||||
if netif not in self.nemidmap:
|
||||
return None
|
||||
else:
|
||||
return self.nemidmap[netif]
|
||||
|
||||
def getnemnetif(self, nemid):
|
||||
''' Given a numerical NEM ID, return its interface. This returns the
|
||||
first interface that matches the given NEM ID.
|
||||
'''
|
||||
for netif in self.nemidmap:
|
||||
if self.nemidmap[netif] == nemid:
|
||||
return netif
|
||||
return None
|
||||
|
||||
def netifs(self, sort=True):
|
||||
''' Retrieve list of linked interfaces sorted by node number.
|
||||
'''
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
|
||||
|
||||
def buildplatformxmlentry(self, doc):
|
||||
''' Return a dictionary of XML elements describing the NEMs
|
||||
connected to this EmaneNode for inclusion in the platform.xml file.
|
||||
'''
|
||||
ret = {}
|
||||
if self.model is None:
|
||||
self.info("warning: EmaneNode %s has no associated model" % \
|
||||
self.name)
|
||||
return ret
|
||||
for netif in self.netifs():
|
||||
# <nem name="NODE-001" definition="rfpipenem.xml">
|
||||
nementry = self.model.buildplatformxmlnementry(doc, self, netif)
|
||||
# <transport definition="transvirtual.xml" group="1">
|
||||
# <param name="device" value="n1.0.158" />
|
||||
# </transport>
|
||||
trans = self.model.buildplatformxmltransportentry(doc, self, netif)
|
||||
nementry.appendChild(trans)
|
||||
ret[netif] = nementry
|
||||
|
||||
return ret
|
||||
|
||||
def buildnemxmlfiles(self, emane):
|
||||
''' Let the configured model build the necessary nem, mac, and phy
|
||||
XMLs.
|
||||
'''
|
||||
if self.model is None:
|
||||
return
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
self.model.buildnemxmlfiles(emane, ifc=None)
|
||||
# build XML for specific interface (NEM) configs
|
||||
need_virtual = False
|
||||
need_raw = False
|
||||
vtype = "virtual"
|
||||
rtype = "raw"
|
||||
for netif in self.netifs():
|
||||
self.model.buildnemxmlfiles(emane, netif)
|
||||
if "virtual" in netif.transport_type:
|
||||
need_virtual = True
|
||||
vtype = netif.transport_type
|
||||
else:
|
||||
need_raw = True
|
||||
rtype = netif.transport_type
|
||||
# build transport XML files depending on type of interfaces involved
|
||||
if need_virtual:
|
||||
self.buildtransportxml(emane, vtype)
|
||||
if need_raw:
|
||||
self.buildtransportxml(emane, rtype)
|
||||
|
||||
def buildtransportxml(self, emane, type):
|
||||
''' Write a transport XML file for the Virtual or Raw Transport.
|
||||
'''
|
||||
transdoc = emane.xmldoc("transport")
|
||||
trans = transdoc.getElementsByTagName("transport").pop()
|
||||
trans.setAttribute("name", "%s Transport" % type.capitalize())
|
||||
trans.setAttribute("library", "trans%s" % type.lower())
|
||||
trans.appendChild(emane.xmlparam(transdoc, "bitrate", "0"))
|
||||
if "virtual" in type.lower():
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath",
|
||||
"/dev/net/tun"))
|
||||
emane.xmlwrite(transdoc, self.transportxmlname(type.lower()))
|
||||
|
||||
def transportxmlname(self, type):
|
||||
''' Return the string name for the Transport XML file,
|
||||
e.g. 'n3transvirtual.xml'
|
||||
'''
|
||||
return "n%strans%s.xml" % (self.objid, type)
|
||||
|
||||
|
||||
def installnetifs(self):
|
||||
''' Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
to bind to the TAPs.
|
||||
'''
|
||||
if not self.session.emane.doeventmonitor() and \
|
||||
self.session.emane.service is None:
|
||||
warntxt = "unable to publish EMANE events because the eventservice "
|
||||
warntxt += "Python bindings failed to load"
|
||||
self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.name,
|
||||
self.objid, warntxt)
|
||||
|
||||
for netif in self.netifs():
|
||||
if "virtual" in netif.transport_type.lower():
|
||||
netif.install()
|
||||
# if we are listening for EMANE events, don't generate them
|
||||
if self.session.emane.doeventmonitor():
|
||||
netif.poshook = None
|
||||
continue
|
||||
# at this point we register location handlers for generating
|
||||
# EMANE location events
|
||||
netif.poshook = self.setnemposition
|
||||
(x,y,z) = netif.node.position.get()
|
||||
self.setnemposition(netif, x, y, z)
|
||||
|
||||
def deinstallnetifs(self):
|
||||
''' Uninstall TAP devices. This invokes their shutdown method for
|
||||
any required cleanup; the device may be actually removed when
|
||||
emanetransportd terminates.
|
||||
'''
|
||||
for netif in self.netifs():
|
||||
if "virtual" in netif.transport_type.lower():
|
||||
netif.shutdown()
|
||||
netif.poshook = None
|
||||
|
||||
def setnemposition(self, netif, x, y, z):
|
||||
''' Publish a NEM location change event using the EMANE event service.
|
||||
'''
|
||||
if self.session.emane.service is None:
|
||||
if self.verbose:
|
||||
self.info("position service not available")
|
||||
return
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
self.info("nemid for %s is unknown" % ifname)
|
||||
return
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
if self.verbose:
|
||||
self.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(ifname, nemid, x, y, z, lat, long, alt))
|
||||
event = emaneeventlocation.EventLocation(1)
|
||||
# altitude must be an integer or warning is printed
|
||||
# unused: yaw, pitch, roll, azimuth, elevation, velocity
|
||||
alt = int(round(alt))
|
||||
event.set(0, nemid, lat, long, alt)
|
||||
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
def setnempositions(self, moved_netifs):
|
||||
''' Several NEMs have moved, from e.g. a WaypointMobilityModel
|
||||
calculation. Generate an EMANE Location Event having several
|
||||
entries for each netif that has moved.
|
||||
'''
|
||||
if len(moved_netifs) == 0:
|
||||
return
|
||||
if self.session.emane.service is None:
|
||||
if self.verbose:
|
||||
self.info("position service not available")
|
||||
return
|
||||
|
||||
event = emaneeventlocation.EventLocation(len(moved_netifs))
|
||||
i = 0
|
||||
for netif in moved_netifs:
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
self.info("nemid for %s is unknown" % ifname)
|
||||
continue
|
||||
(x, y, z) = netif.node.getposition()
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
if self.verbose:
|
||||
self.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)"
|
||||
"(%.6f,%.6f,%.6f)" % \
|
||||
(i, ifname, nemid, x, y, z, lat, long, alt))
|
||||
# altitude must be an integer or warning is printed
|
||||
alt = int(round(alt))
|
||||
event.set(i, nemid, lat, long, alt)
|
||||
i += 1
|
||||
|
||||
self.session.emane.service.publish(emaneeventlocation.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY,
|
||||
emaneeventservice.NEMID_ANY,
|
||||
emaneeventservice.COMPONENTID_ANY,
|
||||
event.export())
|
||||
|
||||
|
106
daemon/core/emane/rfpipe.py
Normal file
106
daemon/core/emane/rfpipe.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
# author: Harry Bullen <hbullen@i-a-i.com>
|
||||
#
|
||||
'''
|
||||
rfpipe.py: EMANE RF-PIPE model for CORE
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
|
||||
class EmaneRfPipeModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_rfpipe"
|
||||
|
||||
# configuration parameters are
|
||||
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
# MAC parameters
|
||||
_confmatrix_mac = [
|
||||
("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'True,False', 'enable promiscuous mode'),
|
||||
("datarate", coreapi.CONF_DATA_TYPE_UINT32, '1M',
|
||||
'', 'data rate (bps)'),
|
||||
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'transmission jitter (usec)'),
|
||||
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'transmission delay (usec)'),
|
||||
("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable traffic flow control'),
|
||||
("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10',
|
||||
'', 'number of flow control tokens'),
|
||||
("enabletighttiming", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off', 'enable tight timing for pkt delay'),
|
||||
("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING,
|
||||
'/usr/share/emane/models/rfpipe/xml/rfpipepcr.xml',
|
||||
'', 'SINR/PCR curve file'),
|
||||
("transmissioncontrolmap", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'tx control map (nem:rate:freq:tx_dBm)'),
|
||||
]
|
||||
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
|
||||
# value groupings
|
||||
_confgroups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml,
|
||||
nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "RF-PIPE NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
names = list(self.getnames())
|
||||
macnames = names[:len(self._confmatrix_mac)]
|
||||
phynames = names[len(self._confmatrix_mac):]
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "RF-PIPE MAC")
|
||||
mac.setAttribute("library", "rfpipemaclayer")
|
||||
if self.valueof("transmissioncontrolmap", values) is "":
|
||||
macnames.remove("transmissioncontrolmap")
|
||||
# EMANE 0.7.4 support
|
||||
if e.emane074:
|
||||
# convert datarate from bps to kbps
|
||||
i = names.index('datarate')
|
||||
values = list(values)
|
||||
values[i] = self.emane074_fixup(values[i], 1000)
|
||||
# append MAC options to macdoc
|
||||
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
113
daemon/core/emane/universal.py
Normal file
113
daemon/core/emane/universal.py
Normal file
|
@ -0,0 +1,113 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
universal.py: EMANE Universal PHY model for CORE. Enumerates configuration items
|
||||
used for the Universal PHY.
|
||||
'''
|
||||
|
||||
import sys
|
||||
import string
|
||||
from core.api import coreapi
|
||||
|
||||
from core.constants import *
|
||||
from emane import EmaneModel
|
||||
|
||||
class EmaneUniversalModel(EmaneModel):
|
||||
''' This Univeral PHY model is meant to be imported by other models,
|
||||
not instantiated.
|
||||
'''
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
raise SyntaxError
|
||||
|
||||
_name = "emane_universal"
|
||||
_xmlname = "universalphy"
|
||||
_xmllibrary = "universalphylayer"
|
||||
|
||||
# universal PHY parameters
|
||||
_confmatrix = [
|
||||
("antennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna gain (dBi)'),
|
||||
("antennaazimuth", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna azimuth (deg)'),
|
||||
("antennaelevation", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','antenna elevation (deg)'),
|
||||
("antennaprofileid", coreapi.CONF_DATA_TYPE_STRING, '1',
|
||||
'','antenna profile ID'),
|
||||
("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'','antenna profile manifest URI'),
|
||||
("antennaprofileenable", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off','antenna profile mode'),
|
||||
("bandwidth", coreapi.CONF_DATA_TYPE_UINT64, '1M',
|
||||
'', 'rf bandwidth (hz)'),
|
||||
("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','default connectivity'),
|
||||
("frequency", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
|
||||
'','frequency (Hz)'),
|
||||
("frequencyofinterest", coreapi.CONF_DATA_TYPE_UINT64, '2.347G',
|
||||
'','frequency of interest (Hz)'),
|
||||
("frequencyofinterestfilterenable", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off','frequency of interest filter enable'),
|
||||
("noiseprocessingmode", coreapi.CONF_DATA_TYPE_BOOL, '0',
|
||||
'On,Off','enable noise processing'),
|
||||
("pathlossmode", coreapi.CONF_DATA_TYPE_STRING, '2ray',
|
||||
'pathloss,2ray,freespace','path loss mode'),
|
||||
("subid", coreapi.CONF_DATA_TYPE_UINT16, '1',
|
||||
'','subid'),
|
||||
("systemnoisefigure", coreapi.CONF_DATA_TYPE_FLOAT, '4.0',
|
||||
'','system noise figure (dB)'),
|
||||
("txpower", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'','transmit power (dBm)'),
|
||||
]
|
||||
|
||||
# old parameters
|
||||
_confmatrix_ver074 = [
|
||||
("antennaazimuthbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '360.0',
|
||||
'','azimith beam width (deg)'),
|
||||
("antennaelevationbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '180.0',
|
||||
'','elevation beam width (deg)'),
|
||||
("antennatype", coreapi.CONF_DATA_TYPE_STRING, 'omnidirectional',
|
||||
'omnidirectional,unidirectional','antenna type'),
|
||||
]
|
||||
|
||||
# parameters that require unit conversion for 0.7.4
|
||||
_update_ver074 = ("bandwidth", "frequency", "frequencyofinterest")
|
||||
# parameters that should be removed for 0.7.4
|
||||
_remove_ver074 = ("antennaprofileenable", "antennaprofileid",
|
||||
"antennaprofilemanifesturi",
|
||||
"frequencyofinterestfilterenable")
|
||||
|
||||
|
||||
@classmethod
|
||||
def getphydoc(cls, e, mac, values, phynames):
|
||||
phydoc = e.xmldoc("phy")
|
||||
phy = phydoc.getElementsByTagName("phy").pop()
|
||||
phy.setAttribute("name", cls._xmlname)
|
||||
phy.setAttribute("library", cls._xmllibrary)
|
||||
# EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated
|
||||
if e.emane074:
|
||||
names = mac.getnames()
|
||||
values = list(values)
|
||||
phynames = list(phynames)
|
||||
# update units for some parameters
|
||||
for p in cls._update_ver074:
|
||||
i = names.index(p)
|
||||
# these all happen to be KHz, so 1000 is used
|
||||
values[i] = cls.emane074_fixup(values[i], 1000)
|
||||
# remove new incompatible options
|
||||
for p in cls._remove_ver074:
|
||||
phynames.remove(p)
|
||||
# insert old options with their default values
|
||||
for old in cls._confmatrix_ver074:
|
||||
phy.appendChild(e.xmlparam(phydoc, old[0], old[2]))
|
||||
|
||||
# append all PHY options to phydoc
|
||||
map( lambda n: phy.appendChild(e.xmlparam(phydoc, n, \
|
||||
mac.valueof(n, values))), phynames)
|
||||
return phydoc
|
||||
|
||||
|
246
daemon/core/location.py
Normal file
246
daemon/core/location.py
Normal file
|
@ -0,0 +1,246 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
location.py: definition of CoreLocation class that is a member of the
|
||||
Session object. Provides conversions between Cartesian and geographic coordinate
|
||||
systems. Depends on utm contributed module, from
|
||||
https://pypi.python.org/pypi/utm (version 0.3.0).
|
||||
'''
|
||||
|
||||
from core.conf import ConfigurableManager
|
||||
from core.api import coreapi
|
||||
from core.misc import utm
|
||||
|
||||
class CoreLocation(ConfigurableManager):
|
||||
''' Member of session class for handling global location data. This keeps
|
||||
track of a latitude/longitude/altitude reference point and scale in
|
||||
order to convert between X,Y and geo coordinates.
|
||||
|
||||
TODO: this could be updated to use more generic
|
||||
Configurable/ConfigurableManager code like other Session objects
|
||||
'''
|
||||
_name = "location"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.reset()
|
||||
self.zonemap = {}
|
||||
for n, l in utm.ZONE_LETTERS:
|
||||
self.zonemap[l] = n
|
||||
|
||||
def reset(self):
|
||||
''' Reset to initial state.
|
||||
'''
|
||||
# (x, y, z) coordinates of the point given by self.refgeo
|
||||
self.refxyz = (0.0, 0.0, 0.0)
|
||||
# decimal latitude, longitude, and altitude at the point (x, y, z)
|
||||
self.setrefgeo(0.0, 0.0, 0.0)
|
||||
# 100 pixels equals this many meters
|
||||
self.refscale = 1.0
|
||||
# cached distance to refpt in other zones
|
||||
self.zoneshifts = {}
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message for setting the reference point
|
||||
and scale.
|
||||
'''
|
||||
if values is None:
|
||||
self.session.info("location data missing")
|
||||
return None
|
||||
values = values.split('|')
|
||||
# Cartesian coordinate reference point
|
||||
refx,refy = map(lambda x: float(x), values[0:2])
|
||||
refz = 0.0
|
||||
self.refxyz = (refx, refy, refz)
|
||||
# Geographic reference point
|
||||
lat,long,alt = map(lambda x: float(x), values[2:5])
|
||||
self.setrefgeo(lat, long, alt)
|
||||
self.refscale = float(values[5])
|
||||
self.session.info("location configured: (%.2f,%.2f,%.2f) = "
|
||||
"(%.5f,%.5f,%.5f) scale=%.2f" %
|
||||
(self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0],
|
||||
self.refgeo[1], self.refgeo[2], self.refscale))
|
||||
self.session.info("location configured: UTM(%.5f,%.5f,%.5f)" %
|
||||
(self.refutm[1], self.refutm[2], self.refutm[3]))
|
||||
|
||||
def px2m(self, val):
|
||||
''' Convert the specified value in pixels to meters using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
'''
|
||||
return (val / 100.0) * self.refscale
|
||||
|
||||
def m2px(self, val):
|
||||
''' Convert the specified value in meters to pixels using the
|
||||
configured scale. The scale is given as s, where
|
||||
100 pixels = s meters.
|
||||
'''
|
||||
if self.refscale == 0.0:
|
||||
return 0.0
|
||||
return 100.0 * (val / self.refscale)
|
||||
|
||||
def setrefgeo(self, lat, lon, alt):
|
||||
''' Record the geographical reference point decimal (lat, lon, alt)
|
||||
and convert and store its UTM equivalent for later use.
|
||||
'''
|
||||
self.refgeo = (lat, lon, alt)
|
||||
# easting, northing, zone
|
||||
(e, n, zonen, zonel) = utm.from_latlon(lat, lon)
|
||||
self.refutm = ( (zonen, zonel), e, n, alt)
|
||||
|
||||
def getgeo(self, x, y, z):
|
||||
''' Given (x, y, z) Cartesian coordinates, convert them to latitude,
|
||||
longitude, and altitude based on the configured reference point
|
||||
and scale.
|
||||
'''
|
||||
# shift (x,y,z) over to reference point (x,y,z)
|
||||
x = x - self.refxyz[0]
|
||||
y = -(y - self.refxyz[1])
|
||||
if z is None:
|
||||
z = self.refxyz[2]
|
||||
else:
|
||||
z = z - self.refxyz[2]
|
||||
# use UTM coordinates since unit is meters
|
||||
zone = self.refutm[0]
|
||||
if zone == "":
|
||||
raise ValueError, "reference point not configured"
|
||||
e = self.refutm[1] + self.px2m(x)
|
||||
n = self.refutm[2] + self.px2m(y)
|
||||
alt = self.refutm[3] + self.px2m(z)
|
||||
(e, n, zone) = self.getutmzoneshift(e, n)
|
||||
try:
|
||||
lat, lon = utm.to_latlon(e, n, zone[0], zone[1])
|
||||
except utm.OutOfRangeError:
|
||||
self.info("UTM out of range error for e=%s n=%s zone=%s" \
|
||||
"xyz=(%s,%s,%s)" % (e, n, zone, x, y, z))
|
||||
(lat, lon) = self.refgeo[:2]
|
||||
#self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \
|
||||
# "%.3f,%.3f,%.3f" % (x, y, z, e, n, zone, lat, lon, alt))
|
||||
return (lat, lon, alt)
|
||||
|
||||
def getxyz(self, lat, lon, alt):
|
||||
''' Given latitude, longitude, and altitude location data, convert them
|
||||
to (x, y, z) Cartesian coordinates based on the configured
|
||||
reference point and scale. Lat/lon is converted to UTM meter
|
||||
coordinates, UTM zones are accounted for, and the scale turns
|
||||
meters to pixels.
|
||||
'''
|
||||
# convert lat/lon to UTM coordinates in meters
|
||||
(e, n, zonen, zonel) = utm.from_latlon(lat, lon)
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
xshift = self.geteastingshift(zonen, zonel)
|
||||
if xshift is None:
|
||||
xm = e - self.refutm[1]
|
||||
else:
|
||||
xm = e + xshift
|
||||
yshift = self.getnorthingshift(zonen, zonel)
|
||||
if yshift is None:
|
||||
ym = n - self.refutm[2]
|
||||
else:
|
||||
ym = n + yshift
|
||||
zm = alt - ralt
|
||||
|
||||
# shift (x,y,z) over to reference point (x,y,z)
|
||||
x = self.m2px(xm) + self.refxyz[0]
|
||||
y = -(self.m2px(ym) + self.refxyz[1])
|
||||
z = self.m2px(zm) + self.refxyz[2]
|
||||
return (x, y, z)
|
||||
|
||||
def geteastingshift(self, zonen, zonel):
|
||||
''' If the lat, lon coordinates being converted are located in a
|
||||
different UTM zone than the canvas reference point, the UTM meters
|
||||
may need to be shifted.
|
||||
This picks a reference point in the same longitudinal band
|
||||
(UTM zone number) as the provided zone, to calculate the shift in
|
||||
meters for the x coordinate.
|
||||
'''
|
||||
rzonen = int(self.refutm[0][0])
|
||||
if zonen == rzonen:
|
||||
return None # same zone number, no x shift required
|
||||
z = (zonen, zonel)
|
||||
if z in self.zoneshifts and self.zoneshifts[z][0] is not None:
|
||||
return self.zoneshifts[z][0] # x shift already calculated, cached
|
||||
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
lon2 = rlon + 6*(zonen - rzonen) # ea. zone is 6deg band
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, lon2) # ignore northing
|
||||
# NOTE: great circle distance used here, not reference ellipsoid!
|
||||
xshift = utm.haversine(rlon, rlat, lon2, rlat) - e2
|
||||
# cache the return value
|
||||
yshift = None
|
||||
if z in self.zoneshifts:
|
||||
yshift = self.zoneshifts[z][1]
|
||||
self.zoneshifts[z] = (xshift, yshift)
|
||||
return xshift
|
||||
|
||||
def getnorthingshift(self, zonen, zonel):
|
||||
''' If the lat, lon coordinates being converted are located in a
|
||||
different UTM zone than the canvas reference point, the UTM meters
|
||||
may need to be shifted.
|
||||
This picks a reference point in the same latitude band (UTM zone letter)
|
||||
as the provided zone, to calculate the shift in meters for the
|
||||
y coordinate.
|
||||
'''
|
||||
rzonel = self.refutm[0][1]
|
||||
if zonel == rzonel:
|
||||
return None # same zone letter, no y shift required
|
||||
z = (zonen, zonel)
|
||||
if z in self.zoneshifts and self.zoneshifts[z][1] is not None:
|
||||
return self.zoneshifts[z][1] # y shift already calculated, cached
|
||||
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
# zonemap is used to calculate degrees difference between zone letters
|
||||
latshift = self.zonemap[zonel] - self.zonemap[rzonel]
|
||||
lat2 = rlat + latshift # ea. latitude band is 8deg high
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(lat2, rlon)
|
||||
# NOTE: great circle distance used here, not reference ellipsoid
|
||||
yshift = -(utm.haversine(rlon, rlat, rlon, lat2) + n2)
|
||||
# cache the return value
|
||||
xshift = None
|
||||
if z in self.zoneshifts:
|
||||
xshift = self.zoneshifts[z][0]
|
||||
self.zoneshifts[z] = (xshift, yshift)
|
||||
return yshift
|
||||
|
||||
def getutmzoneshift(self, e, n):
|
||||
''' Given UTM easting and northing values, check if they fall outside
|
||||
the reference point's zone boundary. Return the UTM coordinates in a
|
||||
different zone and the new zone if they do. Zone lettering is only
|
||||
changed when the reference point is in the opposite hemisphere.
|
||||
'''
|
||||
zone = self.refutm[0]
|
||||
(rlat, rlon, ralt) = self.refgeo
|
||||
if e > 834000 or e < 166000:
|
||||
num_zones = (int(e) - 166000) / (utm.R/10)
|
||||
# estimate number of zones to shift, E (positive) or W (negative)
|
||||
rlon2 = self.refgeo[1] + (num_zones * 6)
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2)
|
||||
xshift = utm.haversine(rlon, rlat, rlon2, rlat)
|
||||
# after >3 zones away from refpt, the above estimate won't work
|
||||
# (the above estimate could be improved)
|
||||
if not 100000 <= (e - xshift) < 1000000:
|
||||
# move one more zone away
|
||||
num_zones = (abs(num_zones)+1) * (abs(num_zones)/num_zones)
|
||||
rlon2 = self.refgeo[1] + (num_zones * 6)
|
||||
(e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2)
|
||||
xshift = utm.haversine(rlon, rlat, rlon2, rlat)
|
||||
e = e - xshift
|
||||
zone = (zonen2, zonel2)
|
||||
if n < 0:
|
||||
# refpt in northern hemisphere and we crossed south of equator
|
||||
n += 10000000
|
||||
zone = (zone[0], 'M')
|
||||
elif n > 10000000:
|
||||
# refpt in southern hemisphere and we crossed north of equator
|
||||
n -= 10000000
|
||||
zone = (zone[0], 'N')
|
||||
return (e, n, zone)
|
||||
|
||||
|
||||
|
216
daemon/core/misc/LatLongUTMconversion.py
Executable file
216
daemon/core/misc/LatLongUTMconversion.py
Executable file
|
@ -0,0 +1,216 @@
|
|||
#!/usr/bin/env python
|
||||
# this file is from http://pygps.org/
|
||||
|
||||
# Lat Long - UTM, UTM - Lat Long conversions
|
||||
|
||||
from math import pi, sin, cos, tan, sqrt
|
||||
|
||||
#LatLong- UTM conversion..h
|
||||
#definitions for lat/long to UTM and UTM to lat/lng conversions
|
||||
#include <string.h>
|
||||
|
||||
_deg2rad = pi / 180.0
|
||||
_rad2deg = 180.0 / pi
|
||||
|
||||
_EquatorialRadius = 2
|
||||
_eccentricitySquared = 3
|
||||
|
||||
_ellipsoid = [
|
||||
# id, Ellipsoid name, Equatorial Radius, square of eccentricity
|
||||
# first once is a placeholder only, To allow array indices to match id numbers
|
||||
[ -1, "Placeholder", 0, 0],
|
||||
[ 1, "Airy", 6377563, 0.00667054],
|
||||
[ 2, "Australian National", 6378160, 0.006694542],
|
||||
[ 3, "Bessel 1841", 6377397, 0.006674372],
|
||||
[ 4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372],
|
||||
[ 5, "Clarke 1866", 6378206, 0.006768658],
|
||||
[ 6, "Clarke 1880", 6378249, 0.006803511],
|
||||
[ 7, "Everest", 6377276, 0.006637847],
|
||||
[ 8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422],
|
||||
[ 9, "Fischer 1968", 6378150, 0.006693422],
|
||||
[ 10, "GRS 1967", 6378160, 0.006694605],
|
||||
[ 11, "GRS 1980", 6378137, 0.00669438],
|
||||
[ 12, "Helmert 1906", 6378200, 0.006693422],
|
||||
[ 13, "Hough", 6378270, 0.00672267],
|
||||
[ 14, "International", 6378388, 0.00672267],
|
||||
[ 15, "Krassovsky", 6378245, 0.006693422],
|
||||
[ 16, "Modified Airy", 6377340, 0.00667054],
|
||||
[ 17, "Modified Everest", 6377304, 0.006637847],
|
||||
[ 18, "Modified Fischer 1960", 6378155, 0.006693422],
|
||||
[ 19, "South American 1969", 6378160, 0.006694542],
|
||||
[ 20, "WGS 60", 6378165, 0.006693422],
|
||||
[ 21, "WGS 66", 6378145, 0.006694542],
|
||||
[ 22, "WGS-72", 6378135, 0.006694318],
|
||||
[ 23, "WGS-84", 6378137, 0.00669438]
|
||||
]
|
||||
|
||||
#Reference ellipsoids derived from Peter H. Dana's website-
|
||||
#http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html
|
||||
#Department of Geography, University of Texas at Austin
|
||||
#Internet: pdana@mail.utexas.edu
|
||||
#3/22/95
|
||||
|
||||
#Source
|
||||
#Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System
|
||||
#1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency
|
||||
|
||||
#def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long,
|
||||
# double &UTMNorthing, double &UTMEasting, char* UTMZone)
|
||||
|
||||
def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None):
|
||||
"""converts lat/long to UTM coords. Equations from USGS Bulletin 1532
|
||||
East Longitudes are positive, West longitudes are negative.
|
||||
North latitudes are positive, South latitudes are negative
|
||||
Lat and Long are in decimal degrees
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
|
||||
|
||||
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
|
||||
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
|
||||
k0 = 0.9996
|
||||
|
||||
#Make sure the longitude is between -180.00 .. 179.9
|
||||
LongTemp = (Long+180)-int((Long+180)/360)*360-180 # -180.00 .. 179.9
|
||||
|
||||
LatRad = Lat*_deg2rad
|
||||
LongRad = LongTemp*_deg2rad
|
||||
|
||||
if zone is None:
|
||||
ZoneNumber = int((LongTemp + 180)/6) + 1
|
||||
else:
|
||||
ZoneNumber = zone
|
||||
|
||||
if Lat >= 56.0 and Lat < 64.0 and LongTemp >= 3.0 and LongTemp < 12.0:
|
||||
ZoneNumber = 32
|
||||
|
||||
# Special zones for Svalbard
|
||||
if Lat >= 72.0 and Lat < 84.0:
|
||||
if LongTemp >= 0.0 and LongTemp < 9.0:ZoneNumber = 31
|
||||
elif LongTemp >= 9.0 and LongTemp < 21.0: ZoneNumber = 33
|
||||
elif LongTemp >= 21.0 and LongTemp < 33.0: ZoneNumber = 35
|
||||
elif LongTemp >= 33.0 and LongTemp < 42.0: ZoneNumber = 37
|
||||
|
||||
LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 #+3 puts origin in middle of zone
|
||||
LongOriginRad = LongOrigin * _deg2rad
|
||||
|
||||
#compute the UTM Zone from the latitude and longitude
|
||||
UTMZone = "%d%c" % (ZoneNumber, _UTMLetterDesignator(Lat))
|
||||
|
||||
eccPrimeSquared = (eccSquared)/(1-eccSquared)
|
||||
N = a/sqrt(1-eccSquared*sin(LatRad)*sin(LatRad))
|
||||
T = tan(LatRad)*tan(LatRad)
|
||||
C = eccPrimeSquared*cos(LatRad)*cos(LatRad)
|
||||
A = cos(LatRad)*(LongRad-LongOriginRad)
|
||||
|
||||
M = a*((1
|
||||
- eccSquared/4
|
||||
- 3*eccSquared*eccSquared/64
|
||||
- 5*eccSquared*eccSquared*eccSquared/256)*LatRad
|
||||
- (3*eccSquared/8
|
||||
+ 3*eccSquared*eccSquared/32
|
||||
+ 45*eccSquared*eccSquared*eccSquared/1024)*sin(2*LatRad)
|
||||
+ (15*eccSquared*eccSquared/256 + 45*eccSquared*eccSquared*eccSquared/1024)*sin(4*LatRad)
|
||||
- (35*eccSquared*eccSquared*eccSquared/3072)*sin(6*LatRad))
|
||||
|
||||
UTMEasting = (k0*N*(A+(1-T+C)*A*A*A/6
|
||||
+ (5-18*T+T*T+72*C-58*eccPrimeSquared)*A*A*A*A*A/120)
|
||||
+ 500000.0)
|
||||
|
||||
UTMNorthing = (k0*(M+N*tan(LatRad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24
|
||||
+ (61
|
||||
-58*T
|
||||
+T*T
|
||||
+600*C
|
||||
-330*eccPrimeSquared)*A*A*A*A*A*A/720)))
|
||||
|
||||
if Lat < 0:
|
||||
UTMNorthing = UTMNorthing + 10000000.0; #10000000 meter offset for southern hemisphere
|
||||
return (UTMZone, UTMEasting, UTMNorthing)
|
||||
|
||||
|
||||
def _UTMLetterDesignator(Lat):
|
||||
"""This routine determines the correct UTM letter designator for the given
|
||||
latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com"""
|
||||
|
||||
if 84 >= Lat >= 72: return 'X'
|
||||
elif 72 > Lat >= 64: return 'W'
|
||||
elif 64 > Lat >= 56: return 'V'
|
||||
elif 56 > Lat >= 48: return 'U'
|
||||
elif 48 > Lat >= 40: return 'T'
|
||||
elif 40 > Lat >= 32: return 'S'
|
||||
elif 32 > Lat >= 24: return 'R'
|
||||
elif 24 > Lat >= 16: return 'Q'
|
||||
elif 16 > Lat >= 8: return 'P'
|
||||
elif 8 > Lat >= 0: return 'N'
|
||||
elif 0 > Lat >= -8: return 'M'
|
||||
elif -8> Lat >= -16: return 'L'
|
||||
elif -16 > Lat >= -24: return 'K'
|
||||
elif -24 > Lat >= -32: return 'J'
|
||||
elif -32 > Lat >= -40: return 'H'
|
||||
elif -40 > Lat >= -48: return 'G'
|
||||
elif -48 > Lat >= -56: return 'F'
|
||||
elif -56 > Lat >= -64: return 'E'
|
||||
elif -64 > Lat >= -72: return 'D'
|
||||
elif -72 > Lat >= -80: return 'C'
|
||||
else: return 'Z' # if the Latitude is outside the UTM limits
|
||||
|
||||
#void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone,
|
||||
# double& Lat, double& Long )
|
||||
|
||||
def UTMtoLL(ReferenceEllipsoid, northing, easting, zone):
|
||||
"""converts UTM coords to lat/long. Equations from USGS Bulletin 1532
|
||||
East Longitudes are positive, West longitudes are negative.
|
||||
North latitudes are positive, South latitudes are negative
|
||||
Lat and Long are in decimal degrees.
|
||||
Written by Chuck Gantz- chuck.gantz@globalstar.com
|
||||
Converted to Python by Russ Nelson <nelson@crynwr.com>"""
|
||||
|
||||
k0 = 0.9996
|
||||
a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius]
|
||||
eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared]
|
||||
e1 = (1-sqrt(1-eccSquared))/(1+sqrt(1-eccSquared))
|
||||
#NorthernHemisphere; //1 for northern hemispher, 0 for southern
|
||||
|
||||
x = easting - 500000.0 #remove 500,000 meter offset for longitude
|
||||
y = northing
|
||||
|
||||
ZoneLetter = zone[-1]
|
||||
ZoneNumber = int(zone[:-1])
|
||||
if ZoneLetter >= 'N':
|
||||
NorthernHemisphere = 1 # point is in northern hemisphere
|
||||
else:
|
||||
NorthernHemisphere = 0 # point is in southern hemisphere
|
||||
y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere
|
||||
|
||||
LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 # +3 puts origin in middle of zone
|
||||
|
||||
eccPrimeSquared = (eccSquared)/(1-eccSquared)
|
||||
|
||||
M = y / k0
|
||||
mu = M/(a*(1-eccSquared/4-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256))
|
||||
|
||||
phi1Rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu)
|
||||
+ (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu)
|
||||
+(151*e1*e1*e1/96)*sin(6*mu))
|
||||
phi1 = phi1Rad*_rad2deg;
|
||||
|
||||
N1 = a/sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad))
|
||||
T1 = tan(phi1Rad)*tan(phi1Rad)
|
||||
C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad)
|
||||
R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5)
|
||||
D = x/(N1*k0)
|
||||
|
||||
Lat = phi1Rad - (N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24
|
||||
+(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720)
|
||||
Lat = Lat * _rad2deg
|
||||
|
||||
Long = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1)
|
||||
*D*D*D*D*D/120)/cos(phi1Rad)
|
||||
Long = LongOrigin + Long * _rad2deg
|
||||
return (Lat, Long)
|
||||
|
||||
if __name__ == '__main__':
|
||||
(z, e, n) = LLtoUTM(23, 45.00, -75.00)
|
||||
print z, e, n
|
||||
print UTMtoLL(23, n, e, z)
|
||||
|
0
daemon/core/misc/__init__.py
Normal file
0
daemon/core/misc/__init__.py
Normal file
160
daemon/core/misc/event.py
Normal file
160
daemon/core/misc/event.py
Normal file
|
@ -0,0 +1,160 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
event.py: event loop implementation using a heap queue and threads.
|
||||
'''
|
||||
import time
|
||||
import threading
|
||||
import heapq
|
||||
|
||||
class EventLoop(object):
|
||||
|
||||
class Event(object):
|
||||
def __init__(self, eventnum, time, func, *args, **kwds):
|
||||
self.eventnum = eventnum
|
||||
self.time = time
|
||||
self.func = func
|
||||
self.args = args
|
||||
self.kwds = kwds
|
||||
self.canceled = False
|
||||
|
||||
def __cmp__(self, other):
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.eventnum, other.eventnum)
|
||||
return tmp
|
||||
|
||||
def run(self):
|
||||
if self.canceled:
|
||||
return
|
||||
self.func(*self.args, **self.kwds)
|
||||
|
||||
def cancel(self):
|
||||
self.canceled = True # XXX not thread-safe
|
||||
|
||||
def __init__(self):
|
||||
self.lock = threading.RLock()
|
||||
self.queue = []
|
||||
self.eventnum = 0
|
||||
self.timer = None
|
||||
self.running = False
|
||||
self.start = None
|
||||
|
||||
def __del__(self):
|
||||
self.stop()
|
||||
|
||||
def __run_events(self):
|
||||
schedule = False
|
||||
while True:
|
||||
with self.lock:
|
||||
if not self.running or not self.queue:
|
||||
break
|
||||
now = time.time()
|
||||
if self.queue[0].time > now:
|
||||
schedule = True
|
||||
break
|
||||
event = heapq.heappop(self.queue)
|
||||
assert event.time <= now
|
||||
event.run()
|
||||
with self.lock:
|
||||
self.timer = None
|
||||
if schedule:
|
||||
self.__schedule_event()
|
||||
|
||||
def __schedule_event(self):
|
||||
with self.lock:
|
||||
assert self.running
|
||||
if not self.queue:
|
||||
return
|
||||
delay = self.queue[0].time - time.time()
|
||||
assert self.timer is None
|
||||
self.timer = threading.Timer(delay, self.__run_events)
|
||||
self.timer.daemon = True
|
||||
self.timer.start()
|
||||
|
||||
def run(self):
|
||||
with self.lock:
|
||||
if self.running:
|
||||
return
|
||||
self.running = True
|
||||
self.start = time.time()
|
||||
for event in self.queue:
|
||||
event.time += self.start
|
||||
self.__schedule_event()
|
||||
|
||||
def stop(self):
|
||||
with self.lock:
|
||||
if not self.running:
|
||||
return
|
||||
self.queue = []
|
||||
self.eventnum = 0
|
||||
if self.timer is not None:
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
self.running = False
|
||||
self.start = None
|
||||
|
||||
def add_event(self, delaysec, func, *args, **kwds):
|
||||
with self.lock:
|
||||
eventnum = self.eventnum
|
||||
self.eventnum += 1
|
||||
evtime = float(delaysec)
|
||||
if self.running:
|
||||
evtime += time.time()
|
||||
event = self.Event(eventnum, evtime, func, *args, **kwds)
|
||||
|
||||
if self.queue:
|
||||
prevhead = self.queue[0]
|
||||
else:
|
||||
prevhead = None
|
||||
|
||||
heapq.heappush(self.queue, event)
|
||||
head = self.queue[0]
|
||||
if prevhead is not None and prevhead != head:
|
||||
if self.timer is not None and not self.timer.is_alive():
|
||||
self.timer.cancel()
|
||||
self.timer = None
|
||||
|
||||
if self.running and self.timer is None:
|
||||
self.__schedule_event()
|
||||
return event
|
||||
|
||||
def example():
|
||||
loop = EventLoop()
|
||||
|
||||
def msg(arg):
|
||||
delta = time.time() - loop.start
|
||||
print delta, 'arg:', arg
|
||||
|
||||
def repeat(interval, count):
|
||||
count -= 1
|
||||
msg('repeat: interval: %s; remaining: %s' % (interval, count))
|
||||
if count > 0:
|
||||
loop.add_event(interval, repeat, interval, count)
|
||||
|
||||
def sleep(delay):
|
||||
msg('sleep %s' % delay)
|
||||
time.sleep(delay)
|
||||
msg('sleep done')
|
||||
|
||||
def stop(arg):
|
||||
msg(arg)
|
||||
loop.stop()
|
||||
|
||||
loop.add_event(0, msg, 'start')
|
||||
loop.add_event(0, msg, 'time zero')
|
||||
|
||||
for delay in 5, 4, 10, -1, 0, 9, 3, 7, 3.14:
|
||||
loop.add_event(delay, msg, 'time %s' % delay)
|
||||
|
||||
loop.run()
|
||||
|
||||
loop.add_event(0, repeat, 1, 5)
|
||||
loop.add_event(12, sleep, 10)
|
||||
|
||||
loop.add_event(15.75, stop, 'stop time: 15.75')
|
230
daemon/core/misc/ipaddr.py
Normal file
230
daemon/core/misc/ipaddr.py
Normal file
|
@ -0,0 +1,230 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
ipaddr.py: helper objects for dealing with IPv4/v6 addresses.
|
||||
'''
|
||||
|
||||
import socket
|
||||
import struct
|
||||
import random
|
||||
|
||||
AF_INET = socket.AF_INET
|
||||
AF_INET6 = socket.AF_INET6
|
||||
|
||||
class MacAddr(object):
|
||||
def __init__(self, addr):
|
||||
self.addr = addr
|
||||
|
||||
def __str__(self):
|
||||
return ":".join(map(lambda x: ("%02x" % ord(x)), self.addr))
|
||||
|
||||
def tolinklocal(self):
|
||||
''' Convert the MAC address to a IPv6 link-local address, using EUI 48
|
||||
to EUI 64 conversion process per RFC 5342.
|
||||
'''
|
||||
if not self.addr:
|
||||
return IPAddr.fromstring("::")
|
||||
tmp = struct.unpack("!Q", '\x00\x00' + self.addr)[0]
|
||||
nic = long(tmp) & 0x000000FFFFFFL
|
||||
oui = long(tmp) & 0xFFFFFF000000L
|
||||
# toggle U/L bit
|
||||
oui ^= 0x020000000000L
|
||||
# append EUI-48 octets
|
||||
oui = (oui << 16) | 0xFFFE000000L
|
||||
return IPAddr(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic))
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
addr = "".join(map(lambda x: chr(int(x, 16)), s.split(":")))
|
||||
return cls(addr)
|
||||
|
||||
@classmethod
|
||||
def random(cls):
|
||||
tmp = random.randint(0, 0xFFFFFF)
|
||||
tmp |= 0x00163E << 24 # use the Xen OID 00:16:3E
|
||||
tmpbytes = struct.pack("!Q", tmp)
|
||||
return cls(tmpbytes[2:])
|
||||
|
||||
class IPAddr(object):
|
||||
def __init__(self, af, addr):
|
||||
# check if (af, addr) is valid
|
||||
if not socket.inet_ntop(af, addr):
|
||||
raise ValueError, "invalid af/addr"
|
||||
self.af = af
|
||||
self.addr = addr
|
||||
|
||||
def isIPv4(self):
|
||||
return self.af == AF_INET
|
||||
|
||||
def isIPv6(self):
|
||||
return self.af == AF_INET6
|
||||
|
||||
def __str__(self):
|
||||
return socket.inet_ntop(self.af, self.addr)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return other.af == self.af and other.addr == self.addr
|
||||
except:
|
||||
return False
|
||||
|
||||
def __add__(self, other):
|
||||
try:
|
||||
carry = int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
tmp = map(lambda x: ord(x), self.addr)
|
||||
for i in xrange(len(tmp) - 1, -1, -1):
|
||||
x = tmp[i] + carry
|
||||
tmp[i] = x & 0xff
|
||||
carry = x >> 8
|
||||
if carry == 0:
|
||||
break
|
||||
addr = "".join(map(lambda x: chr(x), tmp))
|
||||
return self.__class__(self.af, addr)
|
||||
|
||||
def __sub__(self, other):
|
||||
try:
|
||||
tmp = -int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
return self.__add__(tmp)
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, s):
|
||||
for af in AF_INET, AF_INET6:
|
||||
try:
|
||||
return cls(af, socket.inet_pton(af, s))
|
||||
except Exception, e:
|
||||
pass
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
def toint(s):
|
||||
''' convert IPv4 string to 32-bit integer
|
||||
'''
|
||||
bin = socket.inet_pton(AF_INET, s)
|
||||
return(struct.unpack('!I', bin)[0])
|
||||
|
||||
class IPPrefix(object):
|
||||
def __init__(self, af, prefixstr):
|
||||
"prefixstr format: address/prefixlen"
|
||||
tmp = prefixstr.split("/")
|
||||
if len(tmp) > 2:
|
||||
raise ValueError, "invalid prefix: '%s'" % prefixstr
|
||||
self.af = af
|
||||
if self.af == AF_INET:
|
||||
self.addrlen = 32
|
||||
elif self.af == AF_INET6:
|
||||
self.addrlen = 128
|
||||
else:
|
||||
raise ValueError, "invalid address family: '%s'" % self.af
|
||||
if len(tmp) == 2:
|
||||
self.prefixlen = int(tmp[1])
|
||||
else:
|
||||
self.prefixlen = self.addrlen
|
||||
self.prefix = socket.inet_pton(self.af, tmp[0])
|
||||
if self.addrlen > self.prefixlen:
|
||||
addrbits = self.addrlen - self.prefixlen
|
||||
netmask = ((1L << self.prefixlen) - 1) << addrbits
|
||||
prefix = ""
|
||||
for i in xrange(-1, -(addrbits >> 3) - 2, -1):
|
||||
prefix = chr(ord(self.prefix[i]) & (netmask & 0xff)) + prefix
|
||||
netmask >>= 8
|
||||
self.prefix = self.prefix[:i] + prefix
|
||||
|
||||
def __str__(self):
|
||||
return "%s/%s" % (socket.inet_ntop(self.af, self.prefix),
|
||||
self.prefixlen)
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return other.af == self.af and \
|
||||
other.prefixlen == self.prefixlen and \
|
||||
other.prefix == self.prefix
|
||||
except:
|
||||
return False
|
||||
|
||||
def __add__(self, other):
|
||||
try:
|
||||
tmp = int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
a = IPAddr(self.af, self.prefix) + \
|
||||
(tmp << (self.addrlen - self.prefixlen))
|
||||
prefixstr = "%s/%s" % (a, self.prefixlen)
|
||||
if self.__class__ == IPPrefix:
|
||||
return self.__class__(self.af, prefixstr)
|
||||
else:
|
||||
return self.__class__(prefixstr)
|
||||
|
||||
def __sub__(self, other):
|
||||
try:
|
||||
tmp = -int(other)
|
||||
except:
|
||||
return NotImplemented
|
||||
return self.__add__(tmp)
|
||||
|
||||
def addr(self, hostid):
|
||||
tmp = int(hostid)
|
||||
if (tmp == 1 or tmp == 0 or tmp == -1) and self.addrlen == self.prefixlen:
|
||||
return IPAddr(self.af, self.prefix)
|
||||
if tmp == 0 or \
|
||||
tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or \
|
||||
(self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1):
|
||||
raise ValueError, "invalid hostid for prefix %s: %s" % (self, hostid)
|
||||
addr = ""
|
||||
for i in xrange(-1, -(self.addrlen >> 3) - 1, -1):
|
||||
addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr
|
||||
tmp >>= 8
|
||||
if not tmp:
|
||||
break
|
||||
addr = self.prefix[:i] + addr
|
||||
return IPAddr(self.af, addr)
|
||||
|
||||
def minaddr(self):
|
||||
return self.addr(1)
|
||||
|
||||
def maxaddr(self):
|
||||
if self.af == AF_INET:
|
||||
return self.addr((1 << (self.addrlen - self.prefixlen)) - 2)
|
||||
else:
|
||||
return self.addr((1 << (self.addrlen - self.prefixlen)) - 1)
|
||||
|
||||
def numaddr(self):
|
||||
return max(0, (1 << (self.addrlen - self.prefixlen)) - 2)
|
||||
|
||||
def prefixstr(self):
|
||||
return "%s" % socket.inet_ntop(self.af, self.prefix)
|
||||
|
||||
def netmaskstr(self):
|
||||
addrbits = self.addrlen - self.prefixlen
|
||||
netmask = ((1L << self.prefixlen) - 1) << addrbits
|
||||
netmaskbytes = struct.pack("!L", netmask)
|
||||
return IPAddr(af=AF_INET, addr=netmaskbytes).__str__()
|
||||
|
||||
class IPv4Prefix(IPPrefix):
|
||||
def __init__(self, prefixstr):
|
||||
IPPrefix.__init__(self, AF_INET, prefixstr)
|
||||
|
||||
class IPv6Prefix(IPPrefix):
|
||||
def __init__(self, prefixstr):
|
||||
IPPrefix.__init__(self, AF_INET6, prefixstr)
|
||||
|
||||
def isIPAddress(af, addrstr):
|
||||
try:
|
||||
tmp = socket.inet_pton(af, addrstr)
|
||||
return True
|
||||
except:
|
||||
return False
|
||||
|
||||
def isIPv4Address(addrstr):
|
||||
return isIPAddress(AF_INET, addrstr)
|
||||
|
||||
def isIPv6Address(addrstr):
|
||||
return isIPAddress(AF_INET6, addrstr)
|
116
daemon/core/misc/quagga.py
Normal file
116
daemon/core/misc/quagga.py
Normal file
|
@ -0,0 +1,116 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
quagga.py: helper class for generating Quagga configuration.
|
||||
'''
|
||||
|
||||
import os.path
|
||||
from string import Template
|
||||
|
||||
def maketuple(obj):
|
||||
if hasattr(obj, "__iter__"):
|
||||
return tuple(obj)
|
||||
else:
|
||||
return (obj,)
|
||||
|
||||
class NetIf(object):
|
||||
def __init__(self, name, addrlist = []):
|
||||
self.name = name
|
||||
self.addrlist = addrlist
|
||||
|
||||
class Conf(object):
|
||||
def __init__(self, **kwds):
|
||||
self.kwds = kwds
|
||||
|
||||
def __str__(self):
|
||||
tmp = self.template.substitute(**self.kwds)
|
||||
if tmp[-1] == '\n':
|
||||
tmp = tmp[:-1]
|
||||
return tmp
|
||||
|
||||
class QuaggaOSPF6Interface(Conf):
|
||||
AF_IPV6_ID = 0
|
||||
AF_IPV4_ID = 65
|
||||
|
||||
template = Template("""\
|
||||
interface $interface
|
||||
$addr
|
||||
ipv6 ospf6 instance-id $instanceid
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 11
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network $network
|
||||
ipv6 ospf6 diffhellos
|
||||
ipv6 ospf6 adjacencyconnectivity uniconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
""")
|
||||
|
||||
# ip address $ipaddr/32
|
||||
# ipv6 ospf6 simhelloLLtoULRecv :$simhelloport
|
||||
# !$ipaddr:$simhelloport
|
||||
|
||||
def __init__(self, netif, instanceid = AF_IPV4_ID,
|
||||
network = "manet-designated-router", **kwds):
|
||||
self.netif = netif
|
||||
def addrstr(x):
|
||||
if x.find(".") >= 0:
|
||||
return "ip address %s" % x
|
||||
elif x.find(":") >= 0:
|
||||
return "ipv6 address %s" % x
|
||||
else:
|
||||
raise Value, "invalid address: %s", x
|
||||
addr = "\n ".join(map(addrstr, netif.addrlist))
|
||||
|
||||
self.instanceid = instanceid
|
||||
self.network = network
|
||||
Conf.__init__(self, interface = netif.name, addr = addr,
|
||||
instanceid = instanceid, network = network, **kwds)
|
||||
|
||||
def name(self):
|
||||
return self.netif.name
|
||||
|
||||
class QuaggaOSPF6(Conf):
|
||||
|
||||
template = Template("""\
|
||||
$interfaces
|
||||
!
|
||||
router ospf6
|
||||
router-id $routerid
|
||||
$ospfifs
|
||||
$redistribute
|
||||
""")
|
||||
|
||||
def __init__(self, ospf6ifs, area, routerid,
|
||||
redistribute = "! no redistribute"):
|
||||
ospf6ifs = maketuple(ospf6ifs)
|
||||
interfaces = "\n!\n".join(map(str, ospf6ifs))
|
||||
ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % \
|
||||
(x.name(), area), ospf6ifs))
|
||||
Conf.__init__(self, interfaces = interfaces, routerid = routerid,
|
||||
ospfifs = ospfifs, redistribute = redistribute)
|
||||
|
||||
|
||||
class QuaggaConf(Conf):
|
||||
template = Template("""\
|
||||
log file $logfile
|
||||
$debugs
|
||||
!
|
||||
$routers
|
||||
!
|
||||
$forwarding
|
||||
""")
|
||||
|
||||
def __init__(self, routers, logfile, debugs = ()):
|
||||
routers = "\n!\n".join(map(str, maketuple(routers)))
|
||||
if debugs:
|
||||
debugs = "\n".join(maketuple(debugs))
|
||||
else:
|
||||
debugs = "! no debugs"
|
||||
forwarding = "ip forwarding\nipv6 forwarding"
|
||||
Conf.__init__(self, logfile = logfile, debugs = debugs,
|
||||
routers = routers, forwarding = forwarding)
|
228
daemon/core/misc/utils.py
Normal file
228
daemon/core/misc/utils.py
Normal file
|
@ -0,0 +1,228 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
utils.py: miscellaneous utility functions, wrappers around some subprocess
|
||||
procedures.
|
||||
'''
|
||||
|
||||
import subprocess, os, ast
|
||||
|
||||
def checkexec(execlist):
|
||||
for bin in execlist:
|
||||
# note that os.access() uses real uid/gid; that should be okay
|
||||
# here
|
||||
if not os.access(bin, os.X_OK):
|
||||
raise EnvironmentError, "executable not found: %s" % bin
|
||||
|
||||
def ensurepath(pathlist):
|
||||
searchpath = os.environ["PATH"].split(":")
|
||||
for p in set(pathlist):
|
||||
if p not in searchpath:
|
||||
os.environ["PATH"] += ":" + p
|
||||
|
||||
def maketuple(obj):
|
||||
if hasattr(obj, "__iter__"):
|
||||
return tuple(obj)
|
||||
else:
|
||||
return (obj,)
|
||||
|
||||
def maketuplefromstr(s, type):
|
||||
s.replace('\\', '\\\\')
|
||||
return ast.literal_eval(s)
|
||||
#return tuple(type(i) for i in s[1:-1].split(','))
|
||||
#r = ()
|
||||
#for i in s.strip("()").split(','):
|
||||
# r += (i.strip("' "), )
|
||||
# chop empty last element from "('a',)" strings
|
||||
#if r[-1] == '':
|
||||
# r = r[:-1]
|
||||
#return r
|
||||
|
||||
def call(*args, **kwds):
|
||||
return subprocess.call(*args, **kwds)
|
||||
|
||||
def mutecall(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return call(*args, **kwds)
|
||||
|
||||
def check_call(*args, **kwds):
|
||||
return subprocess.check_call(*args, **kwds)
|
||||
|
||||
def mutecheck_call(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.check_call(*args, **kwds)
|
||||
|
||||
def spawn(*args, **kwds):
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def mutespawn(*args, **kwds):
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def detachinit():
|
||||
if os.fork():
|
||||
os._exit(0) # parent exits
|
||||
os.setsid()
|
||||
|
||||
def detach(*args, **kwds):
|
||||
kwds["preexec_fn"] = detachinit
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def mutedetach(*args, **kwds):
|
||||
kwds["preexec_fn"] = detachinit
|
||||
kwds["stdout"] = open(os.devnull, "w")
|
||||
kwds["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(*args, **kwds).pid
|
||||
|
||||
def hexdump(s, bytes_per_word = 2, words_per_line = 8):
|
||||
dump = ""
|
||||
count = 0
|
||||
bytes = bytes_per_word * words_per_line
|
||||
while s:
|
||||
line = s[:bytes]
|
||||
s = s[bytes:]
|
||||
tmp = map(lambda x: ("%02x" * bytes_per_word) % x,
|
||||
zip(*[iter(map(ord, line))] * bytes_per_word))
|
||||
if len(line) % 2:
|
||||
tmp.append("%x" % ord(line[-1]))
|
||||
dump += "0x%08x: %s\n" % (count, " ".join(tmp))
|
||||
count += len(line)
|
||||
return dump[:-1]
|
||||
|
||||
def filemunge(pathname, header, text):
|
||||
''' Insert text at the end of a file, surrounded by header comments.
|
||||
'''
|
||||
filedemunge(pathname, header) # prevent duplicates
|
||||
f = open(pathname, 'a')
|
||||
f.write("# BEGIN %s\n" % header)
|
||||
f.write(text)
|
||||
f.write("# END %s\n" % header)
|
||||
f.close()
|
||||
|
||||
def filedemunge(pathname, header):
|
||||
''' Remove text that was inserted in a file surrounded by header comments.
|
||||
'''
|
||||
f = open(pathname, 'r')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
start = None
|
||||
end = None
|
||||
for i in range(len(lines)):
|
||||
if lines[i] == "# BEGIN %s\n" % header:
|
||||
start = i
|
||||
elif lines[i] == "# END %s\n" % header:
|
||||
end = i + 1
|
||||
if start is None or end is None:
|
||||
return
|
||||
f = open(pathname, 'w')
|
||||
lines = lines[:start] + lines[end:]
|
||||
f.write("".join(lines))
|
||||
f.close()
|
||||
|
||||
def expandcorepath(pathname, session=None, node=None):
|
||||
''' Expand a file path given session information.
|
||||
'''
|
||||
if session is not None:
|
||||
pathname = pathname.replace('~', "/home/%s" % session.user)
|
||||
pathname = pathname.replace('%SESSION%', str(session.sessionid))
|
||||
pathname = pathname.replace('%SESSION_DIR%', session.sessiondir)
|
||||
pathname = pathname.replace('%SESSION_USER%', session.user)
|
||||
if node is not None:
|
||||
pathname = pathname.replace('%NODE%', str(node.objid))
|
||||
pathname = pathname.replace('%NODENAME%', node.name)
|
||||
return pathname
|
||||
|
||||
def sysctldevname(devname):
|
||||
''' Translate a device name to the name used with sysctl.
|
||||
'''
|
||||
if devname is None:
|
||||
return None
|
||||
return devname.replace(".", "/")
|
||||
|
||||
def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (),
|
||||
stdin = os.devnull, stdout = os.devnull, stderr = os.devnull,
|
||||
stdoutmode = 0644, stderrmode = 0644, pidfilename = None,
|
||||
defaultmaxfd = 1024):
|
||||
''' Run the background process as a daemon.
|
||||
'''
|
||||
if not hasattr(dontclose, "__contains__"):
|
||||
if not isinstance(dontclose, int):
|
||||
raise TypeError, "dontclose must be an integer"
|
||||
dontclose = (int(dontclose),)
|
||||
else:
|
||||
for fd in dontclose:
|
||||
if not isinstance(fd, int):
|
||||
raise TypeError, "dontclose must contain only integers"
|
||||
# redirect stdin
|
||||
if stdin:
|
||||
fd = os.open(stdin, os.O_RDONLY)
|
||||
os.dup2(fd, 0)
|
||||
os.close(fd)
|
||||
# redirect stdout
|
||||
if stdout:
|
||||
fd = os.open(stdout, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
|
||||
stdoutmode)
|
||||
os.dup2(fd, 1)
|
||||
if (stdout == stderr):
|
||||
os.dup2(1, 2)
|
||||
os.close(fd)
|
||||
# redirect stderr
|
||||
if stderr and (stderr != stdout):
|
||||
fd = os.open(stderr, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
|
||||
stderrmode)
|
||||
os.dup2(fd, 2)
|
||||
os.close(fd)
|
||||
if os.fork():
|
||||
os._exit(0) # parent exits
|
||||
os.setsid()
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
if pidfilename:
|
||||
try:
|
||||
f = open(pidfilename, "w")
|
||||
f.write("%s\n" % pid)
|
||||
f.close()
|
||||
except:
|
||||
pass
|
||||
os._exit(0) # parent exits
|
||||
if rootdir:
|
||||
os.chdir(rootdir)
|
||||
os.umask(umask)
|
||||
if close_fds:
|
||||
try:
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if maxfd == resource.RLIM_INFINITY:
|
||||
raise ValueError
|
||||
except:
|
||||
maxfd = defaultmaxfd
|
||||
for fd in xrange(3, maxfd):
|
||||
if fd in dontclose:
|
||||
continue
|
||||
try:
|
||||
os.close(fd)
|
||||
except:
|
||||
pass
|
||||
|
||||
def readfileintodict(filename, d):
|
||||
''' Read key=value pairs from a file, into a dict.
|
||||
Skip comments; strip newline characters and spacing.
|
||||
'''
|
||||
with open(filename, 'r') as f:
|
||||
lines = f.readlines()
|
||||
for l in lines:
|
||||
if l[:1] == '#':
|
||||
continue
|
||||
try:
|
||||
key, value = l.split('=', 1)
|
||||
d[key] = value.strip()
|
||||
except ValueError:
|
||||
pass
|
259
daemon/core/misc/utm.py
Normal file
259
daemon/core/misc/utm.py
Normal file
|
@ -0,0 +1,259 @@
|
|||
"""
|
||||
utm
|
||||
===
|
||||
|
||||
.. image:: https://travis-ci.org/Turbo87/utm.png
|
||||
|
||||
Bidirectional UTM-WGS84 converter for python
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
::
|
||||
|
||||
import utm
|
||||
|
||||
Convert a (latitude, longitude) tuple into an UTM coordinate::
|
||||
|
||||
utm.from_latlon(51.2, 7.5)
|
||||
>>> (395201.3103811303, 5673135.241182375, 32, 'U')
|
||||
|
||||
Convert an UTM coordinate into a (latitude, longitude) tuple::
|
||||
|
||||
utm.to_latlon(340000, 5710000, 32, 'U')
|
||||
>>> (51.51852098408468, 6.693872395145327)
|
||||
|
||||
Speed
|
||||
-----
|
||||
|
||||
The library has been compared to the more generic pyproj library by running the
|
||||
unit test suite through pyproj instead of utm. These are the results:
|
||||
|
||||
* with pyproj (without projection cache): 4.0 - 4.5 sec
|
||||
* with pyproj (with projection cache): 0.9 - 1.0 sec
|
||||
* with utm: 0.4 - 0.5 sec
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
* Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright (C) 2012 Tobias Bieniek <Tobias.Bieniek@gmx.de>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
"""
|
||||
|
||||
import math
|
||||
|
||||
__all__ = ['to_latlon', 'from_latlon']
|
||||
|
||||
class OutOfRangeError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
K0 = 0.9996
|
||||
|
||||
E = 0.00669438
|
||||
E2 = E * E
|
||||
E3 = E2 * E
|
||||
E_P2 = E / (1.0 - E)
|
||||
|
||||
SQRT_E = math.sqrt(1 - E)
|
||||
_E = (1 - SQRT_E) / (1 + SQRT_E)
|
||||
_E3 = _E * _E * _E
|
||||
_E4 = _E3 * _E
|
||||
|
||||
M1 = (1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256)
|
||||
M2 = (3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024)
|
||||
M3 = (15 * E2 / 256 + 45 * E3 / 1024)
|
||||
M4 = (35 * E3 / 3072)
|
||||
|
||||
P2 = (3 * _E / 2 - 27 * _E3 / 32)
|
||||
P3 = (21 * _E3 / 16 - 55 * _E4 / 32)
|
||||
P4 = (151 * _E3 / 96)
|
||||
|
||||
R = 6378137
|
||||
|
||||
ZONE_LETTERS = [
|
||||
(84, None), (72, 'X'), (64, 'W'), (56, 'V'), (48, 'U'), (40, 'T'),
|
||||
(32, 'S'), (24, 'R'), (16, 'Q'), (8, 'P'), (0, 'N'), (-8, 'M'), (-16, 'L'),
|
||||
(-24, 'K'), (-32, 'J'), (-40, 'H'), (-48, 'G'), (-56, 'F'), (-64, 'E'),
|
||||
(-72, 'D'), (-80, 'C')
|
||||
]
|
||||
|
||||
|
||||
def to_latlon(easting, northing, zone_number, zone_letter):
|
||||
zone_letter = zone_letter.upper()
|
||||
|
||||
if not 100000 <= easting < 1000000:
|
||||
raise OutOfRangeError('easting out of range (must be between 100.000 m and 999.999 m)')
|
||||
if not 0 <= northing <= 10000000:
|
||||
raise OutOfRangeError('northing out of range (must be between 0 m and 10.000.000 m)')
|
||||
if not 1 <= zone_number <= 60:
|
||||
raise OutOfRangeError('zone number out of range (must be between 1 and 60)')
|
||||
if not 'C' <= zone_letter <= 'X' or zone_letter in ['I', 'O']:
|
||||
raise OutOfRangeError('zone letter out of range (must be between C and X)')
|
||||
|
||||
x = easting - 500000
|
||||
y = northing
|
||||
|
||||
if zone_letter < 'N':
|
||||
y -= 10000000
|
||||
|
||||
m = y / K0
|
||||
mu = m / (R * M1)
|
||||
|
||||
p_rad = (mu + P2 * math.sin(2 * mu) + P3 * math.sin(4 * mu) + P4 * math.sin(6 * mu))
|
||||
|
||||
p_sin = math.sin(p_rad)
|
||||
p_sin2 = p_sin * p_sin
|
||||
|
||||
p_cos = math.cos(p_rad)
|
||||
|
||||
p_tan = p_sin / p_cos
|
||||
p_tan2 = p_tan * p_tan
|
||||
p_tan4 = p_tan2 * p_tan2
|
||||
|
||||
ep_sin = 1 - E * p_sin2
|
||||
ep_sin_sqrt = math.sqrt(1 - E * p_sin2)
|
||||
|
||||
n = R / ep_sin_sqrt
|
||||
r = (1 - E) / ep_sin
|
||||
|
||||
c = _E * p_cos**2
|
||||
c2 = c * c
|
||||
|
||||
d = x / (n * K0)
|
||||
d2 = d * d
|
||||
d3 = d2 * d
|
||||
d4 = d3 * d
|
||||
d5 = d4 * d
|
||||
d6 = d5 * d
|
||||
|
||||
latitude = (p_rad - (p_tan / r) *
|
||||
(d2 / 2 -
|
||||
d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) +
|
||||
d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2))
|
||||
|
||||
longitude = (d -
|
||||
d3 / 6 * (1 + 2 * p_tan2 + c) +
|
||||
d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4)) / p_cos
|
||||
|
||||
return (math.degrees(latitude),
|
||||
math.degrees(longitude) + zone_number_to_central_longitude(zone_number))
|
||||
|
||||
|
||||
def from_latlon(latitude, longitude):
|
||||
if not -80.0 <= latitude <= 84.0:
|
||||
raise OutOfRangeError('latitude out of range (must be between 80 deg S and 84 deg N)')
|
||||
if not -180.0 <= longitude <= 180.0:
|
||||
raise OutOfRangeError('northing out of range (must be between 180 deg W and 180 deg E)')
|
||||
|
||||
lat_rad = math.radians(latitude)
|
||||
lat_sin = math.sin(lat_rad)
|
||||
lat_cos = math.cos(lat_rad)
|
||||
|
||||
lat_tan = lat_sin / lat_cos
|
||||
lat_tan2 = lat_tan * lat_tan
|
||||
lat_tan4 = lat_tan2 * lat_tan2
|
||||
|
||||
lon_rad = math.radians(longitude)
|
||||
|
||||
zone_number = latlon_to_zone_number(latitude, longitude)
|
||||
central_lon = zone_number_to_central_longitude(zone_number)
|
||||
central_lon_rad = math.radians(central_lon)
|
||||
|
||||
zone_letter = latitude_to_zone_letter(latitude)
|
||||
|
||||
n = R / math.sqrt(1 - E * lat_sin**2)
|
||||
c = E_P2 * lat_cos**2
|
||||
|
||||
a = lat_cos * (lon_rad - central_lon_rad)
|
||||
a2 = a * a
|
||||
a3 = a2 * a
|
||||
a4 = a3 * a
|
||||
a5 = a4 * a
|
||||
a6 = a5 * a
|
||||
|
||||
m = R * (M1 * lat_rad -
|
||||
M2 * math.sin(2 * lat_rad) +
|
||||
M3 * math.sin(4 * lat_rad) -
|
||||
M4 * math.sin(6 * lat_rad))
|
||||
|
||||
easting = K0 * n * (a +
|
||||
a3 / 6 * (1 - lat_tan2 + c) +
|
||||
a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000
|
||||
|
||||
northing = K0 * (m + n * lat_tan * (a2 / 2 +
|
||||
a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) +
|
||||
a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2)))
|
||||
|
||||
if latitude < 0:
|
||||
northing += 10000000
|
||||
|
||||
return easting, northing, zone_number, zone_letter
|
||||
|
||||
|
||||
def latitude_to_zone_letter(latitude):
|
||||
for lat_min, zone_letter in ZONE_LETTERS:
|
||||
if latitude >= lat_min:
|
||||
return zone_letter
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def latlon_to_zone_number(latitude, longitude):
|
||||
if 56 <= latitude <= 64 and 3 <= longitude <= 12:
|
||||
return 32
|
||||
|
||||
if 72 <= latitude <= 84 and longitude >= 0:
|
||||
if longitude <= 9:
|
||||
return 31
|
||||
elif longitude <= 21:
|
||||
return 33
|
||||
elif longitude <= 33:
|
||||
return 35
|
||||
elif longitude <= 42:
|
||||
return 37
|
||||
|
||||
return int((longitude + 180) / 6) + 1
|
||||
|
||||
|
||||
def zone_number_to_central_longitude(zone_number):
|
||||
return (zone_number - 1) * 6 - 180 + 3
|
||||
|
||||
|
||||
def haversine(lon1, lat1, lon2, lat2):
|
||||
"""
|
||||
Calculate the great circle distance between two points
|
||||
on the earth (specified in decimal degrees)
|
||||
"""
|
||||
# convert decimal degrees to radians
|
||||
lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2])
|
||||
# haversine formula
|
||||
dlon = lon2 - lon1
|
||||
dlat = lat2 - lat1
|
||||
a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2
|
||||
c = 2 * math.asin(math.sqrt(a))
|
||||
m = 6367000 * c
|
||||
return m
|
||||
|
776
daemon/core/misc/xmlutils.py
Normal file
776
daemon/core/misc/xmlutils.py
Normal file
|
@ -0,0 +1,776 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
Helpers for loading and saving XML files. savesessionxml(session, filename) is
|
||||
the main public interface here.
|
||||
'''
|
||||
import os, pwd
|
||||
from xml.dom.minidom import parse, Document, Node
|
||||
from core import pycore
|
||||
from core.api import coreapi
|
||||
|
||||
def addelementsfromlist(dom, parent, iterable, name, attr_name):
|
||||
''' XML helper to iterate through a list and add items to parent using tags
|
||||
of the given name and the item value as an attribute named attr_name.
|
||||
Example: addelementsfromlist(dom, parent, ('a','b','c'), "letter", "value")
|
||||
<parent>
|
||||
<letter value="a"/>
|
||||
<letter value="b"/>
|
||||
<letter value="c"/>
|
||||
</parent>
|
||||
'''
|
||||
for item in iterable:
|
||||
element = dom.createElement(name)
|
||||
element.setAttribute(attr_name, item)
|
||||
parent.appendChild(element)
|
||||
|
||||
def addtextelementsfromlist(dom, parent, iterable, name, attrs):
|
||||
''' XML helper to iterate through a list and add items to parent using tags
|
||||
of the given name, attributes specified in the attrs tuple, and having the
|
||||
text of the item within the tags.
|
||||
'''
|
||||
for item in iterable:
|
||||
element = dom.createElement(name)
|
||||
for k,v in attrs:
|
||||
element.setAttribute(k, v)
|
||||
parent.appendChild(element)
|
||||
txt = dom.createTextNode(item)
|
||||
element.appendChild(txt)
|
||||
|
||||
def gettextelementstolist(parent):
|
||||
''' XML helper to parse child text nodes from the given parent and return
|
||||
a list of (key, value) tuples.
|
||||
'''
|
||||
r = []
|
||||
for n in parent.childNodes:
|
||||
if n.nodeType != Node.ELEMENT_NODE:
|
||||
continue
|
||||
k = str(n.nodeName)
|
||||
v = '' # sometimes want None here?
|
||||
for c in n.childNodes:
|
||||
if c.nodeType != Node.TEXT_NODE:
|
||||
continue
|
||||
v = str(c.nodeValue)
|
||||
break
|
||||
r.append((k,v))
|
||||
return r
|
||||
|
||||
def addparamtoparent(dom, parent, name, value):
|
||||
''' XML helper to add a <param name="name" value="value"/> tag to the parent
|
||||
element, when value is not None.
|
||||
'''
|
||||
if value is None:
|
||||
return None
|
||||
p = dom.createElement("param")
|
||||
parent.appendChild(p)
|
||||
p.setAttribute("name", name)
|
||||
p.setAttribute("value", "%s" % value)
|
||||
return p
|
||||
|
||||
def addtextparamtoparent(dom, parent, name, value):
|
||||
''' XML helper to add a <param name="name">value</param> tag to the parent
|
||||
element, when value is not None.
|
||||
'''
|
||||
if value is None:
|
||||
return None
|
||||
p = dom.createElement("param")
|
||||
parent.appendChild(p)
|
||||
p.setAttribute("name", name)
|
||||
txt = dom.createTextNode(value)
|
||||
p.appendChild(txt)
|
||||
return p
|
||||
|
||||
def getoneelement(dom, name):
|
||||
e = dom.getElementsByTagName(name)
|
||||
if len(e) == 0:
|
||||
return None
|
||||
return e[0]
|
||||
|
||||
def gettextchild(dom):
|
||||
# this could be improved to skip XML comments
|
||||
child = dom.firstChild
|
||||
if child is not None and child.nodeType == Node.TEXT_NODE:
|
||||
return str(child.nodeValue)
|
||||
return None
|
||||
|
||||
def getparamssetattrs(dom, param_names, target):
|
||||
''' XML helper to get <param name="name" value="value"/> tags and set
|
||||
the attribute in the target object. String type is used. Target object
|
||||
attribute is unchanged if the XML attribute is not present.
|
||||
'''
|
||||
params = dom.getElementsByTagName("param")
|
||||
for param in params:
|
||||
param_name = param.getAttribute("name")
|
||||
value = param.getAttribute("value")
|
||||
if value is None:
|
||||
continue # never reached?
|
||||
if param_name in param_names:
|
||||
setattr(target, param_name, str(value))
|
||||
|
||||
def xmltypetonodeclass(session, type):
|
||||
''' Helper to convert from a type string to a class name in pycore.nodes.*.
|
||||
'''
|
||||
if hasattr(pycore.nodes, type):
|
||||
return eval("pycore.nodes.%s" % type)
|
||||
else:
|
||||
return None
|
||||
|
||||
class CoreDocumentParser(object):
|
||||
def __init__(self, session, filename):
|
||||
self.session = session
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.filename = filename
|
||||
self.dom = parse(filename)
|
||||
|
||||
#self.scenario = getoneelement(self.dom, "Scenario")
|
||||
self.np = getoneelement(self.dom, "NetworkPlan")
|
||||
if self.np is None:
|
||||
raise ValueError, "missing NetworkPlan!"
|
||||
self.mp = getoneelement(self.dom, "MotionPlan")
|
||||
self.sp = getoneelement(self.dom, "ServicePlan")
|
||||
self.meta = getoneelement(self.dom, "CoreMetaData")
|
||||
|
||||
self.coords = self.getmotiondict(self.mp)
|
||||
# link parameters parsed in parsenets(), applied in parsenodes()
|
||||
self.linkparams = {}
|
||||
|
||||
self.parsenets()
|
||||
self.parsenodes()
|
||||
self.parseservices()
|
||||
self.parsemeta()
|
||||
|
||||
|
||||
def warn(self, msg):
|
||||
if self.session:
|
||||
warnstr = "XML parsing '%s':" % (self.filename)
|
||||
self.session.warn("%s %s" % (warnstr, msg))
|
||||
|
||||
def getmotiondict(self, mp):
|
||||
''' Parse a MotionPlan into a dict with node names for keys and coordinates
|
||||
for values.
|
||||
'''
|
||||
if mp is None:
|
||||
return {}
|
||||
coords = {}
|
||||
for node in mp.getElementsByTagName("Node"):
|
||||
nodename = str(node.getAttribute("name"))
|
||||
if nodename == '':
|
||||
continue
|
||||
for m in node.getElementsByTagName("motion"):
|
||||
if m.getAttribute("type") != "stationary":
|
||||
continue
|
||||
point = m.getElementsByTagName("point")
|
||||
if len(point) == 0:
|
||||
continue
|
||||
txt = point[0].firstChild
|
||||
if txt is None:
|
||||
continue
|
||||
xyz = map(int, txt.nodeValue.split(','))
|
||||
z = None
|
||||
x, y = xyz[0:2]
|
||||
if (len(xyz) == 3):
|
||||
z = xyz[2]
|
||||
coords[nodename] = (x, y, z)
|
||||
return coords
|
||||
|
||||
@staticmethod
|
||||
def getcommonattributes(obj):
|
||||
''' Helper to return tuple of attributes common to nodes and nets.
|
||||
'''
|
||||
id = int(obj.getAttribute("id"))
|
||||
name = str(obj.getAttribute("name"))
|
||||
type = str(obj.getAttribute("type"))
|
||||
return(id, name, type)
|
||||
|
||||
def parsenets(self):
|
||||
linkednets = []
|
||||
for net in self.np.getElementsByTagName("NetworkDefinition"):
|
||||
id, name, type = self.getcommonattributes(net)
|
||||
nodecls = xmltypetonodeclass(self.session, type)
|
||||
if not nodecls:
|
||||
self.warn("skipping unknown network node '%s' type '%s'" % \
|
||||
(name, type))
|
||||
continue
|
||||
n = self.session.addobj(cls = nodecls, objid = id, name = name,
|
||||
start = False)
|
||||
if name in self.coords:
|
||||
x, y, z = self.coords[name]
|
||||
n.setposition(x, y, z)
|
||||
getparamssetattrs(net, ("icon", "canvas", "opaque"), n)
|
||||
if hasattr(n, "canvas") and n.canvas is not None:
|
||||
n.canvas = int(n.canvas)
|
||||
# links between two nets (e.g. switch-switch)
|
||||
for ifc in net.getElementsByTagName("interface"):
|
||||
netid = str(ifc.getAttribute("net"))
|
||||
linkednets.append((n, netid))
|
||||
self.parsemodels(net, n)
|
||||
# link networks together now that they all have been parsed
|
||||
for (n, netid) in linkednets:
|
||||
try:
|
||||
n2 = n.session.objbyname(netid)
|
||||
except KeyError:
|
||||
n.warn("skipping net %s interface: unknown net %s" % \
|
||||
(n.name, netid))
|
||||
continue
|
||||
n.linknet(n2)
|
||||
|
||||
def parsenodes(self):
|
||||
for node in self.np.getElementsByTagName("Node"):
|
||||
id, name, type = self.getcommonattributes(node)
|
||||
if type == "rj45":
|
||||
nodecls = pycore.nodes.RJ45Node
|
||||
else:
|
||||
nodecls = pycore.nodes.CoreNode
|
||||
n = self.session.addobj(cls = nodecls, objid = id, name = name,
|
||||
start = False)
|
||||
if name in self.coords:
|
||||
x, y, z = self.coords[name]
|
||||
n.setposition(x, y, z)
|
||||
n.type = type
|
||||
getparamssetattrs(node, ("icon", "canvas", "opaque"), n)
|
||||
if hasattr(n, "canvas") and n.canvas is not None:
|
||||
n.canvas = int(n.canvas)
|
||||
for ifc in node.getElementsByTagName("interface"):
|
||||
self.parseinterface(n, ifc)
|
||||
|
||||
def parseinterface(self, n, ifc):
|
||||
''' Parse a interface block such as:
|
||||
<interface name="eth0" net="37278">
|
||||
<address type="mac">00:00:00:aa:00:01</address>
|
||||
<address>10.0.0.2/24</address>
|
||||
<address>2001::2/64</address>
|
||||
</interface>
|
||||
'''
|
||||
name = str(ifc.getAttribute("name"))
|
||||
netid = str(ifc.getAttribute("net"))
|
||||
hwaddr = None
|
||||
addrlist = []
|
||||
try:
|
||||
net = n.session.objbyname(netid)
|
||||
except KeyError:
|
||||
n.warn("skipping node %s interface %s: unknown net %s" % \
|
||||
(n.name, name, netid))
|
||||
return
|
||||
for addr in ifc.getElementsByTagName("address"):
|
||||
addrstr = gettextchild(addr)
|
||||
if addrstr is None:
|
||||
continue
|
||||
if addr.getAttribute("type") == "mac":
|
||||
hwaddr = addrstr
|
||||
else:
|
||||
addrlist.append(addrstr)
|
||||
i = n.newnetif(net, addrlist = addrlist, hwaddr = hwaddr,
|
||||
ifindex = None, ifname = name)
|
||||
for model in ifc.getElementsByTagName("model"):
|
||||
self.parsemodel(model, n, n.objid)
|
||||
key = (n.name, name)
|
||||
if key in self.linkparams:
|
||||
netif = n.netif(i)
|
||||
for (k, v) in self.linkparams[key]:
|
||||
netif.setparam(k, v)
|
||||
|
||||
def parsemodels(self, dom, obj):
|
||||
''' Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
config dict.
|
||||
'''
|
||||
nodenum = int(dom.getAttribute("id"))
|
||||
for model in dom.getElementsByTagName("model"):
|
||||
self.parsemodel(model, obj, nodenum)
|
||||
|
||||
def parsemodel(self, model, obj, nodenum):
|
||||
''' Mobility/wireless model config is stored in a ConfigurableManager's
|
||||
config dict.
|
||||
'''
|
||||
name = model.getAttribute("name")
|
||||
if name == '':
|
||||
return
|
||||
type = model.getAttribute("type")
|
||||
# convert child text nodes into key=value pairs
|
||||
kvs = gettextelementstolist(model)
|
||||
|
||||
mgr = self.session.mobility
|
||||
# TODO: the session.confobj() mechanism could be more generic;
|
||||
# it only allows registering Conf Message callbacks, but here
|
||||
# we want access to the ConfigurableManager, not the callback
|
||||
if name[:5] == "emane":
|
||||
mgr = self.session.emane
|
||||
elif name[:5] == "netem":
|
||||
mgr = None
|
||||
self.parsenetem(model, obj, kvs)
|
||||
|
||||
elif name[:3] == "xen":
|
||||
mgr = self.session.xen
|
||||
# TODO: assign other config managers here
|
||||
if mgr:
|
||||
mgr.setconfig_keyvalues(nodenum, name, kvs)
|
||||
|
||||
def parsenetem(self, model, obj, kvs):
|
||||
''' Determine interface and invoke setparam() using the parsed
|
||||
(key, value) pairs.
|
||||
'''
|
||||
ifname = model.getAttribute("netif")
|
||||
peer = model.getAttribute("peer")
|
||||
key = (peer, ifname)
|
||||
# nodes and interfaces do not exist yet, at this point of the parsing,
|
||||
# save (key, value) pairs for later
|
||||
try:
|
||||
#kvs = map(lambda(k, v): (int(v)), kvs)
|
||||
kvs = map(self.numericvalue, kvs)
|
||||
except ValueError:
|
||||
self.warn("error parsing link parameters for '%s' on '%s'" % \
|
||||
(ifname, peer))
|
||||
self.linkparams[key] = kvs
|
||||
|
||||
@staticmethod
|
||||
def numericvalue(keyvalue):
|
||||
(key, value) = keyvalue
|
||||
if '.' in str(value):
|
||||
value = float(value)
|
||||
else:
|
||||
value = int(value)
|
||||
return (key, value)
|
||||
|
||||
def parseservices(self):
|
||||
''' After node objects exist, parse service customizations and add them
|
||||
to the nodes.
|
||||
'''
|
||||
svclists = {}
|
||||
# parse services and store configs into session.services.configs
|
||||
for node in self.sp.getElementsByTagName("Node"):
|
||||
name = node.getAttribute("name")
|
||||
n = self.session.objbyname(name)
|
||||
if n is None:
|
||||
self.warn("skipping service config for unknown node '%s'" % \
|
||||
name)
|
||||
continue
|
||||
for service in node.getElementsByTagName("Service"):
|
||||
svcname = service.getAttribute("name")
|
||||
if self.parseservice(service, n):
|
||||
if n.objid in svclists:
|
||||
svclists[n.objid] += "|" + svcname
|
||||
else:
|
||||
svclists[n.objid] = svcname
|
||||
# associate nodes with services
|
||||
for objid in sorted(svclists.keys()):
|
||||
n = self.session.obj(objid)
|
||||
self.session.services.addservicestonode(node=n, nodetype=n.type,
|
||||
services_str=svclists[objid],
|
||||
verbose=self.verbose)
|
||||
|
||||
def parseservice(self, service, n):
|
||||
''' Use session.services manager to store service customizations before
|
||||
they are added to a node.
|
||||
'''
|
||||
name = service.getAttribute("name")
|
||||
svc = self.session.services.getservicebyname(name)
|
||||
if svc is None:
|
||||
return False
|
||||
values = []
|
||||
startup_idx = service.getAttribute("startup_idx")
|
||||
if startup_idx is not None:
|
||||
values.append("startidx=%s" % startup_idx)
|
||||
startup_time = service.getAttribute("start_time")
|
||||
if startup_time is not None:
|
||||
values.append("starttime=%s" % startup_time)
|
||||
dirs = []
|
||||
for dir in service.getElementsByTagName("Directory"):
|
||||
dirname = dir.getAttribute("name")
|
||||
dirs.append(dirname)
|
||||
if len(dirs):
|
||||
values.append("dirs=%s" % dirs)
|
||||
|
||||
startup = []
|
||||
shutdown = []
|
||||
validate = []
|
||||
for cmd in service.getElementsByTagName("Command"):
|
||||
type = cmd.getAttribute("type")
|
||||
cmdstr = gettextchild(cmd)
|
||||
if cmdstr is None:
|
||||
continue
|
||||
if type == "start":
|
||||
startup.append(cmdstr)
|
||||
elif type == "stop":
|
||||
shutdown.append(cmdstr)
|
||||
elif type == "validate":
|
||||
validate.append(cmdstr)
|
||||
if len(startup):
|
||||
values.append("cmdup=%s" % startup)
|
||||
if len(shutdown):
|
||||
values.append("cmddown=%s" % shutdown)
|
||||
if len(validate):
|
||||
values.append("cmdval=%s" % validate)
|
||||
|
||||
files = []
|
||||
for file in service.getElementsByTagName("File"):
|
||||
filename = file.getAttribute("name")
|
||||
files.append(filename)
|
||||
data = gettextchild(file)
|
||||
typestr = "service:%s:%s" % (name, filename)
|
||||
self.session.services.setservicefile(nodenum=n.objid, type=typestr,
|
||||
filename=filename,
|
||||
srcname=None, data=data)
|
||||
if len(files):
|
||||
values.append("files=%s" % files)
|
||||
if not bool(service.getAttribute("custom")):
|
||||
return True
|
||||
self.session.services.setcustomservice(n.objid, svc, values)
|
||||
return True
|
||||
|
||||
def parsehooks(self, hooks):
|
||||
''' Parse hook scripts from XML into session._hooks.
|
||||
'''
|
||||
for hook in hooks.getElementsByTagName("Hook"):
|
||||
filename = hook.getAttribute("name")
|
||||
state = hook.getAttribute("state")
|
||||
data = gettextchild(hook)
|
||||
if data is None:
|
||||
data = "" # allow for empty file
|
||||
type = "hook:%s" % state
|
||||
self.session.sethook(type, filename=filename,
|
||||
srcname=None, data=data)
|
||||
|
||||
def parsemeta(self):
|
||||
opt = getoneelement(self.meta, "SessionOptions")
|
||||
if opt:
|
||||
for param in opt.getElementsByTagName("param"):
|
||||
k = str(param.getAttribute("name"))
|
||||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = gettextchild(param) # allow attribute/text for newlines
|
||||
setattr(self.session.options, k, v)
|
||||
hooks = getoneelement(self.meta, "Hooks")
|
||||
if hooks:
|
||||
self.parsehooks(hooks)
|
||||
meta = getoneelement(self.meta, "MetaData")
|
||||
if meta:
|
||||
for param in meta.getElementsByTagName("param"):
|
||||
k = str(param.getAttribute("name"))
|
||||
v = str(param.getAttribute("value"))
|
||||
if v == '':
|
||||
v = gettextchild(param)
|
||||
self.session.metadata.additem(k, v)
|
||||
|
||||
|
||||
class CoreDocumentWriter(Document):
|
||||
''' Utility class for writing a CoreSession to XML. The init method builds
|
||||
an xml.dom.minidom.Document, and the writexml() method saves the XML file.
|
||||
'''
|
||||
def __init__(self, session):
|
||||
''' Create an empty Scenario XML Document, then populate it with
|
||||
objects from the given session.
|
||||
'''
|
||||
Document.__init__(self)
|
||||
self.session = session
|
||||
self.scenario = self.createElement("Scenario")
|
||||
self.np = self.createElement("NetworkPlan")
|
||||
self.mp = self.createElement("MotionPlan")
|
||||
self.sp = self.createElement("ServicePlan")
|
||||
self.meta = self.createElement("CoreMetaData")
|
||||
|
||||
self.appendChild(self.scenario)
|
||||
self.scenario.appendChild(self.np)
|
||||
self.scenario.appendChild(self.mp)
|
||||
self.scenario.appendChild(self.sp)
|
||||
self.scenario.appendChild(self.meta)
|
||||
|
||||
self.populatefromsession()
|
||||
|
||||
def populatefromsession(self):
|
||||
self.session.emane.setup() # not during runtime?
|
||||
self.addnets()
|
||||
self.addnodes()
|
||||
self.addmetadata()
|
||||
|
||||
def writexml(self, filename):
|
||||
self.session.info("saving session XML file %s" % filename)
|
||||
f = open(filename, "w")
|
||||
Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \
|
||||
encoding="UTF-8")
|
||||
f.close()
|
||||
if self.session.user is not None:
|
||||
uid = pwd.getpwnam(self.session.user).pw_uid
|
||||
gid = os.stat(self.session.sessiondir).st_gid
|
||||
os.chown(filename, uid, gid)
|
||||
|
||||
def addnets(self):
|
||||
''' Add PyCoreNet objects as NetworkDefinition XML elements.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for net in self.session.objs():
|
||||
if not isinstance(net, pycore.nodes.PyCoreNet):
|
||||
continue
|
||||
self.addnet(net)
|
||||
|
||||
def addnet(self, net):
|
||||
''' Add one PyCoreNet object as a NetworkDefinition XML element.
|
||||
'''
|
||||
n = self.createElement("NetworkDefinition")
|
||||
self.np.appendChild(n)
|
||||
n.setAttribute("name", net.name)
|
||||
# could use net.brname
|
||||
n.setAttribute("id", "%s" % net.objid)
|
||||
n.setAttribute("type", "%s" % net.__class__.__name__)
|
||||
self.addnetinterfaces(n, net)
|
||||
# key used with tunnel node
|
||||
if hasattr(net, 'grekey') and net.grekey is not None:
|
||||
n.setAttribute("key", "%s" % net.grekey)
|
||||
# link parameters
|
||||
for netif in net.netifs(sort=True):
|
||||
self.addnetem(n, netif)
|
||||
# wireless/mobility models
|
||||
modelconfigs = net.session.mobility.getmodels(net)
|
||||
modelconfigs += net.session.emane.getmodels(net)
|
||||
self.addmodels(n, modelconfigs)
|
||||
self.addposition(net)
|
||||
|
||||
def addnetem(self, n, netif):
|
||||
''' Similar to addmodels(); used for writing netem link effects
|
||||
parameters. TODO: Interface parameters should be moved to the model
|
||||
construct, then this separate method shouldn't be required.
|
||||
'''
|
||||
if not hasattr(netif, "node") or netif.node is None:
|
||||
return
|
||||
params = netif.getparams()
|
||||
if len(params) == 0:
|
||||
return
|
||||
model = self.createElement("model")
|
||||
model.setAttribute("name", "netem")
|
||||
model.setAttribute("netif", netif.name)
|
||||
model.setAttribute("peer", netif.node.name)
|
||||
has_params = False
|
||||
for k, v in params:
|
||||
# default netem parameters are 0 or None
|
||||
if v is None or v == 0:
|
||||
continue
|
||||
if k == "has_netem" or k == "has_tbf":
|
||||
continue
|
||||
key = self.createElement(k)
|
||||
key.appendChild(self.createTextNode("%s" % v))
|
||||
model.appendChild(key)
|
||||
has_params = True
|
||||
if has_params:
|
||||
n.appendChild(model)
|
||||
|
||||
def addmodels(self, n, configs):
|
||||
''' Add models from a list of model-class, config values tuples.
|
||||
'''
|
||||
for (m, conf) in configs:
|
||||
model = self.createElement("model")
|
||||
n.appendChild(model)
|
||||
model.setAttribute("name", m._name)
|
||||
type = "wireless"
|
||||
if m._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
type = "mobility"
|
||||
model.setAttribute("type", type)
|
||||
for i, k in enumerate(m.getnames()):
|
||||
key = self.createElement(k)
|
||||
value = conf[i]
|
||||
if value is None:
|
||||
value = ""
|
||||
key.appendChild(self.createTextNode("%s" % value))
|
||||
model.appendChild(key)
|
||||
|
||||
def addnodes(self):
|
||||
''' Add PyCoreNode objects as node XML elements.
|
||||
'''
|
||||
with self.session._objslock:
|
||||
for node in self.session.objs():
|
||||
if not isinstance(node, pycore.nodes.PyCoreNode):
|
||||
continue
|
||||
self.addnode(node)
|
||||
|
||||
def addnode(self, node):
|
||||
''' Add a PyCoreNode object as node XML elements.
|
||||
'''
|
||||
n = self.createElement("Node")
|
||||
self.np.appendChild(n)
|
||||
n.setAttribute("name", node.name)
|
||||
n.setAttribute("id", "%s" % node.nodeid())
|
||||
if node.type:
|
||||
n.setAttribute("type", node.type)
|
||||
self.addinterfaces(n, node)
|
||||
self.addposition(node)
|
||||
addparamtoparent(self, n, "icon", node.icon)
|
||||
addparamtoparent(self, n, "canvas", node.canvas)
|
||||
self.addservices(node)
|
||||
|
||||
def addinterfaces(self, n, node):
|
||||
''' Add PyCoreNetIfs to node XML elements.
|
||||
'''
|
||||
for ifc in node.netifs(sort=True):
|
||||
i = self.createElement("interface")
|
||||
n.appendChild(i)
|
||||
i.setAttribute("name", ifc.name)
|
||||
netmodel = None
|
||||
if ifc.net:
|
||||
i.setAttribute("net", ifc.net.name)
|
||||
if hasattr(ifc.net, "model"):
|
||||
netmodel = ifc.net.model
|
||||
if ifc.mtu and ifc.mtu != 1500:
|
||||
i.setAttribute("mtu", "%s" % ifc.mtu)
|
||||
# could use ifc.params, transport_type
|
||||
self.addaddresses(i, ifc)
|
||||
# per-interface models
|
||||
if netmodel and netmodel._name[:6] == "emane_":
|
||||
cfg = self.session.emane.getifcconfig(node.objid, netmodel._name,
|
||||
None, ifc)
|
||||
if cfg:
|
||||
self.addmodels(i, ((netmodel, cfg),) )
|
||||
|
||||
|
||||
def addnetinterfaces(self, n, net):
|
||||
''' Similar to addinterfaces(), but only adds interface elements to the
|
||||
supplied XML node that would not otherwise appear in the Node elements.
|
||||
These are any interfaces that link two switches/hubs together.
|
||||
'''
|
||||
for ifc in net.netifs(sort=True):
|
||||
if not hasattr(ifc, "othernet") or not ifc.othernet:
|
||||
continue
|
||||
if net.objid == ifc.net.objid:
|
||||
continue
|
||||
i = self.createElement("interface")
|
||||
n.appendChild(i)
|
||||
i.setAttribute("name", ifc.name)
|
||||
if ifc.net:
|
||||
i.setAttribute("net", ifc.net.name)
|
||||
|
||||
def addposition(self, node):
|
||||
''' Add object coordinates as location XML element.
|
||||
'''
|
||||
(x,y,z) = node.position.get()
|
||||
if x is None or y is None:
|
||||
return
|
||||
# <Node name="n1">
|
||||
mpn = self.createElement("Node")
|
||||
mpn.setAttribute("name", node.name)
|
||||
self.mp.appendChild(mpn)
|
||||
|
||||
# <motion type="stationary">
|
||||
motion = self.createElement("motion")
|
||||
motion.setAttribute("type", "stationary")
|
||||
mpn.appendChild(motion)
|
||||
|
||||
# <point>$X$,$Y$,$Z$</point>
|
||||
pt = self.createElement("point")
|
||||
motion.appendChild(pt)
|
||||
coordstxt = "%s,%s" % (x,y)
|
||||
if z:
|
||||
coordstxt += ",%s" % z
|
||||
coords = self.createTextNode(coordstxt)
|
||||
pt.appendChild(coords)
|
||||
|
||||
def addservices(self, node):
|
||||
''' Add services and their customizations to the ServicePlan.
|
||||
'''
|
||||
if len(node.services) == 0:
|
||||
return
|
||||
defaults = self.session.services.getdefaultservices(node.type)
|
||||
if node.services == defaults:
|
||||
return
|
||||
spn = self.createElement("Node")
|
||||
spn.setAttribute("name", node.name)
|
||||
self.sp.appendChild(spn)
|
||||
|
||||
for svc in node.services:
|
||||
s = self.createElement("Service")
|
||||
spn.appendChild(s)
|
||||
s.setAttribute("name", str(svc._name))
|
||||
s.setAttribute("startup_idx", str(svc._startindex))
|
||||
if svc._starttime != "":
|
||||
s.setAttribute("start_time", str(svc._starttime))
|
||||
# only record service names if not a customized service
|
||||
if not svc._custom:
|
||||
continue
|
||||
s.setAttribute("custom", str(svc._custom))
|
||||
addelementsfromlist(self, s, svc._dirs, "Directory", "name")
|
||||
|
||||
for fn in svc._configs:
|
||||
if len(fn) == 0:
|
||||
continue
|
||||
f = self.createElement("File")
|
||||
f.setAttribute("name", fn)
|
||||
# all file names are added to determine when a file has been deleted
|
||||
s.appendChild(f)
|
||||
data = self.session.services.getservicefiledata(svc, fn)
|
||||
if data is None:
|
||||
# this includes only customized file contents and skips
|
||||
# the auto-generated files
|
||||
continue
|
||||
txt = self.createTextNode(data)
|
||||
f.appendChild(txt)
|
||||
|
||||
addtextelementsfromlist(self, s, svc._startup, "Command",
|
||||
(("type","start"),))
|
||||
addtextelementsfromlist(self, s, svc._shutdown, "Command",
|
||||
(("type","stop"),))
|
||||
addtextelementsfromlist(self, s, svc._validate, "Command",
|
||||
(("type","validate"),))
|
||||
|
||||
def addaddresses(self, i, netif):
|
||||
''' Add MAC and IP addresses to interface XML elements.
|
||||
'''
|
||||
if netif.hwaddr:
|
||||
h = self.createElement("address")
|
||||
i.appendChild(h)
|
||||
h.setAttribute("type", "mac")
|
||||
htxt = self.createTextNode("%s" % netif.hwaddr)
|
||||
h.appendChild(htxt)
|
||||
for addr in netif.addrlist:
|
||||
a = self.createElement("address")
|
||||
i.appendChild(a)
|
||||
# a.setAttribute("type", )
|
||||
atxt = self.createTextNode("%s" % addr)
|
||||
a.appendChild(atxt)
|
||||
|
||||
def addhooks(self):
|
||||
''' Add hook script XML elements to the metadata tag.
|
||||
'''
|
||||
hooks = self.createElement("Hooks")
|
||||
for state in sorted(self.session._hooks.keys()):
|
||||
for (filename, data) in self.session._hooks[state]:
|
||||
hook = self.createElement("Hook")
|
||||
hook.setAttribute("name", filename)
|
||||
hook.setAttribute("state", str(state))
|
||||
txt = self.createTextNode(data)
|
||||
hook.appendChild(txt)
|
||||
hooks.appendChild(hook)
|
||||
if hooks.hasChildNodes():
|
||||
self.meta.appendChild(hooks)
|
||||
|
||||
def addmetadata(self):
|
||||
''' Add CORE-specific session meta-data XML elements.
|
||||
'''
|
||||
# options
|
||||
options = self.createElement("SessionOptions")
|
||||
defaults = self.session.options.getdefaultvalues()
|
||||
for i, (k, v) in enumerate(self.session.options.getkeyvaluelist()):
|
||||
if str(v) != str(defaults[i]):
|
||||
addtextparamtoparent(self, options, k, v)
|
||||
#addparamtoparent(self, options, k, v)
|
||||
if options.hasChildNodes():
|
||||
self.meta.appendChild(options)
|
||||
# hook scripts
|
||||
self.addhooks()
|
||||
# meta
|
||||
meta = self.createElement("MetaData")
|
||||
self.meta.appendChild(meta)
|
||||
for (k, v) in self.session.metadata.items():
|
||||
addtextparamtoparent(self, meta, k, v)
|
||||
#addparamtoparent(self, meta, k, v)
|
||||
|
||||
def opensessionxml(session, filename):
|
||||
''' Import a session from the EmulationScript XML format.
|
||||
'''
|
||||
doc = CoreDocumentParser(session, filename)
|
||||
|
||||
def savesessionxml(session, filename):
|
||||
''' Export a session to the EmulationScript XML format.
|
||||
'''
|
||||
doc = CoreDocumentWriter(session)
|
||||
doc.writexml(filename)
|
||||
|
929
daemon/core/mobility.py
Normal file
929
daemon/core/mobility.py
Normal file
|
@ -0,0 +1,929 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
mobility.py: mobility helpers for moving nodes and calculating wireless range.
|
||||
'''
|
||||
import sys, os, time, string, math, threading
|
||||
import heapq
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.misc.utils import check_call
|
||||
from core.misc.ipaddr import IPAddr
|
||||
|
||||
class MobilityManager(ConfigurableManager):
|
||||
''' Member of session class for handling configuration data for mobility and
|
||||
range models.
|
||||
'''
|
||||
_name = "MobilityManager"
|
||||
_type = coreapi.CORE_TLV_REG_WIRELESS
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
# configurations for basic range, indexed by WLAN node number, are
|
||||
# stored in self.configs
|
||||
# mapping from model names to their classes
|
||||
self._modelclsmap = {}
|
||||
# dummy node objects for tracking position of nodes on other servers
|
||||
self.phys = {}
|
||||
self.physnets = {}
|
||||
self.session.broker.handlers += (self.physnodehandlelink, )
|
||||
self.register()
|
||||
|
||||
def startup(self):
|
||||
''' Session is transitioning from instantiation to runtime state.
|
||||
Instantiate any mobility models that have been configured for a WLAN.
|
||||
'''
|
||||
for nodenum in self.configs:
|
||||
v = self.configs[nodenum]
|
||||
try:
|
||||
n = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
self.session.warn("Skipping mobility configuration for unknown"
|
||||
"node %d." % nodenum)
|
||||
continue
|
||||
for model in v:
|
||||
try:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
except KeyError:
|
||||
self.session.warn("Skipping mobility configuration for "
|
||||
"unknown model '%s'" % model[0])
|
||||
continue
|
||||
n.setmodel(cls, model[1])
|
||||
if self.session.master:
|
||||
self.installphysnodes(n)
|
||||
if n.mobility:
|
||||
self.session.evq.add_event(0.0, n.mobility.startup)
|
||||
|
||||
|
||||
def reset(self):
|
||||
''' Reset all configs.
|
||||
'''
|
||||
self.clearconfig(nodenum=None)
|
||||
|
||||
def register(self):
|
||||
''' Register models as configurable object(s) with the Session object.
|
||||
'''
|
||||
models = [BasicRangeModel, Ns2ScriptedMobility]
|
||||
for m in models:
|
||||
self.session.addconfobj(m._name, m._type, m.configure_mob)
|
||||
self._modelclsmap[m._name] = m
|
||||
|
||||
def handleevent(self, msg):
|
||||
''' Handle an Event Message used to start, stop, or pause
|
||||
mobility scripts for a given WlanNode.
|
||||
'''
|
||||
eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE)
|
||||
name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME)
|
||||
try:
|
||||
node = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
self.session.warn("Ignoring event for model '%s', unknown node " \
|
||||
"'%s'" % (name, nodenum))
|
||||
return
|
||||
|
||||
# name is e.g. "mobility:ns2script"
|
||||
models = name[9:].split(',')
|
||||
for m in models:
|
||||
try:
|
||||
cls = self._modelclsmap[m]
|
||||
except KeyError:
|
||||
self.session.warn("Ignoring event for unknown model '%s'" % m)
|
||||
continue
|
||||
_name = "waypoint"
|
||||
if cls._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
model = node.mobility
|
||||
elif cls._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
model = node.mobility
|
||||
else:
|
||||
continue
|
||||
if model is None:
|
||||
self.session.warn("Ignoring event, %s has no model" % node.name)
|
||||
continue
|
||||
if cls._name != model._name:
|
||||
self.session.warn("Ignoring event for %s wrong model %s,%s" % \
|
||||
(node.name, cls._name, model._name))
|
||||
continue
|
||||
|
||||
if eventtype == coreapi.CORE_EVENT_STOP or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
model.stop(move_initial=True)
|
||||
if eventtype == coreapi.CORE_EVENT_START or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
model.start()
|
||||
if eventtype == coreapi.CORE_EVENT_PAUSE:
|
||||
model.pause()
|
||||
|
||||
def sendevent(self, model):
|
||||
''' Send an event message on behalf of a mobility model.
|
||||
This communicates the current and end (max) times to the GUI.
|
||||
'''
|
||||
if model.state == model.STATE_STOPPED:
|
||||
eventtype = coreapi.CORE_EVENT_STOP
|
||||
elif model.state == model.STATE_RUNNING:
|
||||
eventtype = coreapi.CORE_EVENT_START
|
||||
elif model.state == model.STATE_PAUSED:
|
||||
eventtype = coreapi.CORE_EVENT_PAUSE
|
||||
data = "start=%d" % int(model.lasttime - model.timezero)
|
||||
data += " end=%d" % int(model.endtime)
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NODE,
|
||||
model.objid)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE,
|
||||
eventtype)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NAME,
|
||||
"mobility:%s" % model._name)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_DATA,
|
||||
data)
|
||||
tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TIME,
|
||||
"%s" % time.time())
|
||||
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
|
||||
try:
|
||||
self.session.broadcastraw(None, msg)
|
||||
except Exception, e:
|
||||
self.warn("Error sending Event Message: %s" % e)
|
||||
|
||||
def updatewlans(self, moved, moved_netifs):
|
||||
''' A mobility script has caused nodes in the 'moved' list to move.
|
||||
Update every WlanNode. This saves range calculations if the model
|
||||
were to recalculate for each individual node movement.
|
||||
'''
|
||||
for nodenum in self.configs:
|
||||
try:
|
||||
n = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
continue
|
||||
if n.model:
|
||||
n.model.update(moved, moved_netifs)
|
||||
|
||||
def addphys(self, netnum, node):
|
||||
''' Keep track of PhysicalNodes and which network they belong to.
|
||||
'''
|
||||
nodenum = node.objid
|
||||
self.phys[nodenum] = node
|
||||
if netnum not in self.physnets:
|
||||
self.physnets[netnum] = [nodenum,]
|
||||
else:
|
||||
self.physnets[netnum].append(nodenum)
|
||||
|
||||
def physnodehandlelink(self, msg):
|
||||
''' Broker handler. Snoop Link add messages to get
|
||||
node numbers of PhyiscalNodes and their nets.
|
||||
Physical nodes exist only on other servers, but a shadow object is
|
||||
created here for tracking node position.
|
||||
'''
|
||||
if msg.msgtype == coreapi.CORE_API_LINK_MSG and \
|
||||
msg.flags & coreapi.CORE_API_ADD_FLAG:
|
||||
nn = msg.nodenumbers()
|
||||
# first node is always link layer node in Link add message
|
||||
if nn[0] not in self.session.broker.nets:
|
||||
return
|
||||
if nn[1] in self.session.broker.phys:
|
||||
# record the fact that this PhysicalNode is linked to a net
|
||||
dummy = PyCoreNode(session=self.session, objid=nn[1],
|
||||
name="n%d" % nn[1], start=False)
|
||||
self.addphys(nn[0], dummy)
|
||||
|
||||
def physnodeupdateposition(self, msg):
|
||||
''' Snoop node messages belonging to physical nodes. The dummy object
|
||||
in self.phys[] records the node position.
|
||||
'''
|
||||
nodenum = msg.nodenumbers()[0]
|
||||
try:
|
||||
dummy = self.phys[nodenum]
|
||||
nodexpos = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS)
|
||||
nodeypos = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS)
|
||||
dummy.setposition(nodexpos, nodeypos, None)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def installphysnodes(self, net):
|
||||
''' After installing a mobility model on a net, include any physical
|
||||
nodes that we have recorded. Use the GreTap tunnel to the physical node
|
||||
as the node's interface.
|
||||
'''
|
||||
try:
|
||||
nodenums = self.physnets[net.objid]
|
||||
except KeyError:
|
||||
return
|
||||
for nodenum in nodenums:
|
||||
node = self.phys[nodenum]
|
||||
servers = self.session.broker.getserversbynode(nodenum)
|
||||
(host, port, sock) = self.session.broker.getserver(servers[0])
|
||||
netif = self.session.broker.gettunnel(net.objid, IPAddr.toint(host))
|
||||
node.addnetif(netif, 0)
|
||||
netif.node = node
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
|
||||
class WirelessModel(Configurable):
|
||||
''' Base class used by EMANE models and the basic range model.
|
||||
Used for managing arbitrary configuration parameters.
|
||||
'''
|
||||
_type = coreapi.CORE_TLV_REG_WIRELESS
|
||||
_bitmap = None
|
||||
_positioncallback = None
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values = None):
|
||||
Configurable.__init__(self, session, objid)
|
||||
self.verbose = verbose
|
||||
# 'values' can be retrieved from a ConfigurableManager, or used here
|
||||
# during initialization, depending on the model.
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' May be used if the model can populate the GUI with wireless (green)
|
||||
link lines.
|
||||
'''
|
||||
return []
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BasicRangeModel(WirelessModel):
|
||||
''' Basic Range wireless model, calculates range between nodes and links
|
||||
and unlinks nodes based on this distance. This was formerly done from
|
||||
the GUI.
|
||||
'''
|
||||
_name = "basic_range"
|
||||
|
||||
# configuration parameters are
|
||||
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = [
|
||||
("range", coreapi.CONF_DATA_TYPE_UINT32, '275',
|
||||
'', 'wireless range (pixels)'),
|
||||
("bandwidth", coreapi.CONF_DATA_TYPE_UINT32, '54000',
|
||||
'', 'bandwidth (bps)'),
|
||||
("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'transmission jitter (usec)'),
|
||||
("delay", coreapi.CONF_DATA_TYPE_FLOAT, '5000.0',
|
||||
'', 'transmission delay (usec)'),
|
||||
("error", coreapi.CONF_DATA_TYPE_FLOAT, '0.0',
|
||||
'', 'error rate (%)'),
|
||||
]
|
||||
|
||||
# value groupings
|
||||
_confgroups = "Basic Range Parameters:1-%d" % len(_confmatrix)
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values=None):
|
||||
''' Range model is only instantiated during runtime.
|
||||
'''
|
||||
super(BasicRangeModel, self).__init__(session = session, objid = objid,
|
||||
verbose = verbose)
|
||||
self.wlan = session.obj(objid)
|
||||
self._netifs = {}
|
||||
self._netifslock = threading.Lock()
|
||||
if values is None:
|
||||
values = session.mobility.getconfig(objid, self._name,
|
||||
self.getdefaultvalues())[1]
|
||||
self.range = float(self.valueof("range", values))
|
||||
if self.verbose:
|
||||
self.session.info("Basic range model configured for WLAN %d using" \
|
||||
" range %d" % (objid, self.range))
|
||||
self.bw = int(self.valueof("bandwidth", values))
|
||||
if self.bw == 0.0:
|
||||
self.bw = None
|
||||
self.delay = float(self.valueof("delay", values))
|
||||
if self.delay == 0.0:
|
||||
self.delay = None
|
||||
self.loss = float(self.valueof("error", values))
|
||||
if self.loss == 0.0:
|
||||
self.loss = None
|
||||
self.jitter = float(self.valueof("jitter", values))
|
||||
if self.jitter == 0.0:
|
||||
self.jitter = None
|
||||
|
||||
@classmethod
|
||||
def configure_mob(cls, session, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Pass the MobilityManager object as the manager object.
|
||||
'''
|
||||
return cls.configure(session.mobility, msg)
|
||||
|
||||
def setlinkparams(self):
|
||||
''' Apply link parameters to all interfaces. This is invoked from
|
||||
WlanNode.setmodel() after the position callback has been set.
|
||||
'''
|
||||
with self._netifslock:
|
||||
for netif in self._netifs:
|
||||
self.wlan.linkconfig(netif, bw=self.bw, delay=self.delay,
|
||||
loss=self.loss, duplicate=None,
|
||||
jitter=self.jitter)
|
||||
|
||||
def get_position(self, netif):
|
||||
with self._netifslock:
|
||||
return self._netifs[netif]
|
||||
|
||||
def set_position(self, netif, x = None, y = None, z = None):
|
||||
''' A node has moved; given an interface, a new (x,y,z) position has
|
||||
been set; calculate the new distance between other nodes and link or
|
||||
unlink node pairs based on the configured range.
|
||||
'''
|
||||
#print "set_position(%s, x=%s, y=%s, z=%s)" % (netif.localname, x, y, z)
|
||||
self._netifslock.acquire()
|
||||
self._netifs[netif] = (x, y, z)
|
||||
if x is None or y is None:
|
||||
self._netifslock.release()
|
||||
return
|
||||
for netif2 in self._netifs:
|
||||
self.calclink(netif, netif2)
|
||||
self._netifslock.release()
|
||||
|
||||
_positioncallback = set_position
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
''' Node positions have changed without recalc. Update positions from
|
||||
node.position, then re-calculate links for those that have moved.
|
||||
Assumes bidirectional links, with one calculation per node pair, where
|
||||
one of the nodes has moved.
|
||||
'''
|
||||
with self._netifslock:
|
||||
while len(moved_netifs):
|
||||
netif = moved_netifs.pop()
|
||||
(nx, ny, nz) = netif.node.getposition()
|
||||
if netif in self._netifs:
|
||||
self._netifs[netif] = (nx, ny, nz)
|
||||
for netif2 in self._netifs:
|
||||
if netif2 in moved_netifs:
|
||||
continue
|
||||
self.calclink(netif, netif2)
|
||||
|
||||
def calclink(self, netif, netif2):
|
||||
''' Helper used by set_position() and update() to
|
||||
calculate distance between two interfaces and perform
|
||||
linking/unlinking. Sends link/unlink messages and updates the
|
||||
WlanNode's linked dict.
|
||||
'''
|
||||
if netif == netif2:
|
||||
return
|
||||
(x, y, z) = self._netifs[netif]
|
||||
(x2, y2, z2) = self._netifs[netif2]
|
||||
if x2 is None or y2 is None:
|
||||
return
|
||||
|
||||
d = self.calcdistance( (x,y,z), (x2,y2,z2) )
|
||||
# ordering is important, to keep the wlan._linked dict organized
|
||||
a = min(netif, netif2)
|
||||
b = max(netif, netif2)
|
||||
try:
|
||||
self.wlan._linked_lock.acquire()
|
||||
linked = self.wlan.linked(a, b)
|
||||
except KeyError:
|
||||
return
|
||||
finally:
|
||||
self.wlan._linked_lock.release()
|
||||
if d > self.range:
|
||||
if linked:
|
||||
self.wlan.unlink(a, b)
|
||||
self.sendlinkmsg(a, b, unlink=True)
|
||||
else:
|
||||
if not linked:
|
||||
self.wlan.link(a, b)
|
||||
self.sendlinkmsg(a, b)
|
||||
|
||||
|
||||
def calcdistance(self, p1, p2):
|
||||
''' Calculate the distance between two three-dimensional points.
|
||||
'''
|
||||
a = p1[0] - p2[0]
|
||||
b = p1[1] - p2[1]
|
||||
c = 0
|
||||
if p1[2] is not None and p2[2] is not None:
|
||||
c = p1[2] - p2[2]
|
||||
return math.hypot(math.hypot(a, b), c)
|
||||
|
||||
def linkmsg(self, netif, netif2, flags):
|
||||
''' Create a wireless link/unlink API message.
|
||||
'''
|
||||
n1 = netif.localname.split('.')[0]
|
||||
n2 = netif2.localname.split('.')[0]
|
||||
tlvdata = coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
netif.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
netif2.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_NETID,
|
||||
self.wlan.objid)
|
||||
#tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM,
|
||||
# netif.index)
|
||||
#tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM,
|
||||
# netif2.index)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
coreapi.CORE_LINK_WIRELESS)
|
||||
return coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
|
||||
def sendlinkmsg(self, netif, netif2, unlink=False):
|
||||
''' Send a wireless link/unlink API message to the GUI.
|
||||
'''
|
||||
if unlink:
|
||||
flags = coreapi.CORE_API_DEL_FLAG
|
||||
else:
|
||||
flags = coreapi.CORE_API_ADD_FLAG
|
||||
msg = self.linkmsg(netif, netif2, flags)
|
||||
self.session.broadcastraw(src=None, data=msg)
|
||||
self.session.sdt.updatelink(netif.node.objid, netif2.node.objid, flags,
|
||||
wireless=True)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Return a list of wireless link messages for when the GUI reconnects.
|
||||
'''
|
||||
r = []
|
||||
with self.wlan._linked_lock:
|
||||
for a in self.wlan._linked:
|
||||
for b in self.wlan._linked[a]:
|
||||
if self.wlan._linked[a][b]:
|
||||
r.append(self.linkmsg(a, b, flags))
|
||||
return r
|
||||
|
||||
class WayPointMobility(WirelessModel):
|
||||
''' Abstract class for mobility models that set node waypoints.
|
||||
'''
|
||||
_name = "waypoint"
|
||||
_type = coreapi.CORE_TLV_REG_MOBILITY
|
||||
|
||||
STATE_STOPPED = 0
|
||||
STATE_RUNNING = 1
|
||||
STATE_PAUSED = 2
|
||||
|
||||
class WayPoint(object):
|
||||
def __init__(self, time, nodenum, coords, speed):
|
||||
self.time = time
|
||||
self.nodenum = nodenum
|
||||
self.coords = coords
|
||||
self.speed = speed
|
||||
|
||||
def __cmp__(self, other):
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.nodenum, other.nodenum)
|
||||
return tmp
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values = None):
|
||||
super(WayPointMobility, self).__init__(session = session, objid = objid,
|
||||
verbose = verbose, values = values)
|
||||
self.state = self.STATE_STOPPED
|
||||
self.queue = []
|
||||
self.queue_copy = []
|
||||
self.points = {}
|
||||
self.initial = {}
|
||||
self.lasttime = None
|
||||
self.endtime = None
|
||||
self.wlan = session.obj(objid)
|
||||
# these are really set in child class via confmatrix
|
||||
self.loop = False
|
||||
self.refresh_ms = 50
|
||||
# flag whether to stop scheduling when queue is empty
|
||||
# (ns-3 sets this to False as new waypoints may be added from trace)
|
||||
self.empty_queue_stop = True
|
||||
|
||||
def runround(self):
|
||||
''' Advance script time and move nodes.
|
||||
'''
|
||||
if self.state != self.STATE_RUNNING:
|
||||
return
|
||||
t = self.lasttime
|
||||
self.lasttime = time.time()
|
||||
now = self.lasttime - self.timezero
|
||||
dt = self.lasttime - t
|
||||
#print "runround(now=%.2f, dt=%.2f)" % (now, dt)
|
||||
|
||||
# keep current waypoints up-to-date
|
||||
self.updatepoints(now)
|
||||
|
||||
if not len(self.points):
|
||||
if len(self.queue):
|
||||
# more future waypoints, allow time for self.lasttime update
|
||||
nexttime = self.queue[0].time - now
|
||||
if nexttime > (0.001 * self.refresh_ms):
|
||||
nexttime -= (0.001 * self.refresh_ms)
|
||||
self.session.evq.add_event(nexttime, self.runround)
|
||||
return
|
||||
else:
|
||||
# no more waypoints or queued items, loop?
|
||||
if not self.empty_queue_stop:
|
||||
# keep running every refresh_ms, even with empty queue
|
||||
self.session.evq.add_event(0.001 * self.refresh_ms, self.runround)
|
||||
return
|
||||
if not self.loopwaypoints():
|
||||
return self.stop(move_initial=False)
|
||||
if not len(self.queue):
|
||||
# prevent busy loop
|
||||
return
|
||||
return self.run()
|
||||
|
||||
# only move netifs attached to self.wlan, or all nodenum in script?
|
||||
moved = []
|
||||
moved_netifs = []
|
||||
for netif in self.wlan.netifs():
|
||||
node = netif.node
|
||||
if self.movenode(node, dt):
|
||||
moved.append(node)
|
||||
moved_netifs.append(netif)
|
||||
|
||||
# calculate all ranges after moving nodes; this saves calculations
|
||||
#self.wlan.model.update(moved)
|
||||
self.session.mobility.updatewlans(moved, moved_netifs)
|
||||
|
||||
# TODO: check session state
|
||||
self.session.evq.add_event(0.001 * self.refresh_ms, self.runround)
|
||||
|
||||
def run(self):
|
||||
self.timezero = time.time()
|
||||
self.lasttime = self.timezero - (0.001 * self.refresh_ms)
|
||||
self.movenodesinitial()
|
||||
self.runround()
|
||||
self.session.mobility.sendevent(self)
|
||||
|
||||
def movenode(self, node, dt):
|
||||
''' Calculate next node location and update its coordinates.
|
||||
Returns True if the node's position has changed.
|
||||
'''
|
||||
if node.objid not in self.points:
|
||||
return False
|
||||
x1, y1, z1 = node.getposition()
|
||||
x2, y2, z2 = self.points[node.objid].coords
|
||||
speed = self.points[node.objid].speed
|
||||
# instantaneous move (prevents dx/dy == 0.0 below)
|
||||
if speed == 0:
|
||||
self.setnodeposition(node, x2, y2, z2)
|
||||
del self.points[node.objid]
|
||||
return True
|
||||
# speed can be a velocity vector (ns3 mobility) or speed value
|
||||
if isinstance(speed, (float, int)):
|
||||
# linear speed value
|
||||
alpha = math.atan2(y2 - y1, x2 - x1)
|
||||
sx = speed * math.cos(alpha)
|
||||
sy = speed * math.sin(alpha)
|
||||
else:
|
||||
# velocity vector
|
||||
sx = speed[0]
|
||||
sy = speed[1]
|
||||
|
||||
# calculate dt * speed = distance moved
|
||||
dx = sx * dt
|
||||
dy = sy * dt
|
||||
# prevent overshoot
|
||||
if abs(dx) > abs(x2 - x1):
|
||||
dx = x2 - x1
|
||||
if abs(dy) > abs(y2 - y1):
|
||||
dy = y2 - y1
|
||||
if dx == 0.0 and dy == 0.0:
|
||||
if self.endtime < (self.lasttime - self.timezero):
|
||||
# the last node to reach the last waypoint determines this
|
||||
# script's endtime
|
||||
self.endtime = self.lasttime - self.timezero
|
||||
del self.points[node.objid]
|
||||
return False
|
||||
#print "node %s dx,dy= <%s, %d>" % (node.name, dx, dy)
|
||||
if (x1 + dx) < 0.0:
|
||||
dx = 0.0 - x1
|
||||
if (y1 + dy) < 0.0:
|
||||
dy = 0.0 - y1
|
||||
self.setnodeposition(node, x1 + dx, y1 + dy, z1)
|
||||
return True
|
||||
|
||||
def movenodesinitial(self):
|
||||
''' Move nodes to their initial positions. Then calculate the ranges.
|
||||
'''
|
||||
moved = []
|
||||
moved_netifs = []
|
||||
for netif in self.wlan.netifs():
|
||||
node = netif.node
|
||||
if node.objid not in self.initial:
|
||||
continue
|
||||
(x, y, z) = self.initial[node.objid].coords
|
||||
self.setnodeposition(node, x, y, z)
|
||||
moved.append(node)
|
||||
moved_netifs.append(netif)
|
||||
#self.wlan.model.update(moved)
|
||||
self.session.mobility.updatewlans(moved, moved_netifs)
|
||||
|
||||
def addwaypoint(self, time, nodenum, x, y, z, speed):
|
||||
''' Waypoints are pushed to a heapq, sorted by time.
|
||||
'''
|
||||
#print "addwaypoint: %s %s %s,%s,%s %s" % (time, nodenum, x, y, z, speed)
|
||||
wp = self.WayPoint(time, nodenum, coords=(x,y,z), speed=speed)
|
||||
heapq.heappush(self.queue, wp)
|
||||
|
||||
def addinitial(self, nodenum, x, y, z):
|
||||
''' Record initial position in a dict.
|
||||
'''
|
||||
wp = self.WayPoint(0, nodenum, coords=(x,y,z), speed=0)
|
||||
self.initial[nodenum] = wp
|
||||
|
||||
def updatepoints(self, now):
|
||||
''' Move items from self.queue to self.points when their time has come.
|
||||
'''
|
||||
while len(self.queue):
|
||||
if self.queue[0].time > now:
|
||||
break
|
||||
wp = heapq.heappop(self.queue)
|
||||
self.points[wp.nodenum] = wp
|
||||
|
||||
def copywaypoints(self):
|
||||
''' Store backup copy of waypoints for looping and stopping.
|
||||
'''
|
||||
self.queue_copy = list(self.queue)
|
||||
|
||||
def loopwaypoints(self):
|
||||
''' Restore backup copy of waypoints when looping.
|
||||
'''
|
||||
self.queue = list(self.queue_copy)
|
||||
return self.loop
|
||||
|
||||
def setnodeposition(self, node, x, y, z):
|
||||
''' Helper to move a node, notify any GUI (connected session handlers),
|
||||
without invoking the interface poshook callback that may perform
|
||||
range calculation.
|
||||
'''
|
||||
# this would cause PyCoreNetIf.poshook() callback (range calculation)
|
||||
#node.setposition(x, y, z)
|
||||
node.position.set(x, y, z)
|
||||
msg = node.tonodemsg(flags=0)
|
||||
self.session.broadcastraw(None, msg)
|
||||
self.session.sdt.updatenode(node, flags=0, x=x, y=y, z=z)
|
||||
|
||||
def setendtime(self):
|
||||
''' Set self.endtime to the time of the last waypoint in the queue of
|
||||
waypoints. This is just an estimate. The endtime will later be
|
||||
adjusted, after one round of the script has run, to be the time
|
||||
that the last moving node has reached its final waypoint.
|
||||
'''
|
||||
try:
|
||||
self.endtime = self.queue[-1].time
|
||||
except IndexError:
|
||||
self.endtime = 0
|
||||
|
||||
def start(self):
|
||||
''' Run the script from the beginning or unpause from where it
|
||||
was before.
|
||||
'''
|
||||
laststate = self.state
|
||||
self.state = self.STATE_RUNNING
|
||||
if laststate == self.STATE_STOPPED or laststate == self.STATE_RUNNING:
|
||||
self.loopwaypoints()
|
||||
self.timezero = 0
|
||||
self.lasttime = 0
|
||||
self.run()
|
||||
elif laststate == self.STATE_PAUSED:
|
||||
now = time.time()
|
||||
self.timezero += now - self.lasttime
|
||||
self.lasttime = now - (0.001 * self.refresh_ms)
|
||||
self.runround()
|
||||
|
||||
def stop(self, move_initial=True):
|
||||
''' Stop the script and move nodes to initial positions.
|
||||
'''
|
||||
self.state = self.STATE_STOPPED
|
||||
self.loopwaypoints()
|
||||
self.timezero = 0
|
||||
self.lasttime = 0
|
||||
if move_initial:
|
||||
self.movenodesinitial()
|
||||
self.session.mobility.sendevent(self)
|
||||
|
||||
def pause(self):
|
||||
''' Pause the script; pause time is stored to self.lasttime.
|
||||
'''
|
||||
self.state = self.STATE_PAUSED
|
||||
self.lasttime = time.time()
|
||||
|
||||
|
||||
class Ns2ScriptedMobility(WayPointMobility):
|
||||
''' Handles the ns-2 script format, generated by scengen/setdest or
|
||||
BonnMotion.
|
||||
'''
|
||||
_name = "ns2script"
|
||||
|
||||
_confmatrix = [
|
||||
("file", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'mobility script file'),
|
||||
("refresh_ms", coreapi.CONF_DATA_TYPE_UINT32, '50',
|
||||
'', 'refresh time (ms)'),
|
||||
("loop", coreapi.CONF_DATA_TYPE_BOOL, '1',
|
||||
'On,Off', 'loop'),
|
||||
("autostart", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'auto-start seconds (0.0 for runtime)'),
|
||||
("map", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'node mapping (optional, e.g. 0:1,1:2,2:3)'),
|
||||
("script_start", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'script file to run upon start'),
|
||||
("script_pause", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'script file to run upon pause'),
|
||||
("script_stop", coreapi.CONF_DATA_TYPE_STRING, '',
|
||||
'', 'script file to run upon stop'),
|
||||
]
|
||||
_confgroups = "ns-2 Mobility Script Parameters:1-%d" % len(_confmatrix)
|
||||
|
||||
def __init__(self, session, objid, verbose = False, values = None):
|
||||
'''
|
||||
'''
|
||||
super(Ns2ScriptedMobility, self).__init__(session = session, objid = objid,
|
||||
verbose = verbose, values = values)
|
||||
self._netifs = {}
|
||||
self._netifslock = threading.Lock()
|
||||
if values is None:
|
||||
values = session.mobility.getconfig(objid, self._name,
|
||||
self.getdefaultvalues())[1]
|
||||
self.file = self.valueof("file", values)
|
||||
self.refresh_ms = int(self.valueof("refresh_ms", values))
|
||||
self.loop = (self.valueof("loop", values).lower() == "on")
|
||||
self.autostart = self.valueof("autostart", values)
|
||||
self.parsemap(self.valueof("map", values))
|
||||
self.script_start = self.valueof("script_start", values)
|
||||
self.script_pause = self.valueof("script_pause", values)
|
||||
self.script_stop = self.valueof("script_stop", values)
|
||||
if self.verbose:
|
||||
self.session.info("ns-2 scripted mobility configured for WLAN %d" \
|
||||
" using file: %s" % (objid, self.file))
|
||||
self.readscriptfile()
|
||||
self.copywaypoints()
|
||||
self.setendtime()
|
||||
|
||||
@classmethod
|
||||
def configure_mob(cls, session, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Pass the MobilityManager object as the manager object.
|
||||
'''
|
||||
return cls.configure(session.mobility, msg)
|
||||
|
||||
def readscriptfile(self):
|
||||
''' Read in mobility script from a file. This adds waypoints to a
|
||||
priority queue, sorted by waypoint time. Initial waypoints are
|
||||
stored in a separate dict.
|
||||
'''
|
||||
filename = self.findfile(self.file)
|
||||
try:
|
||||
f = open(filename, 'r')
|
||||
except IOError, e:
|
||||
self.session.warn("ns-2 scripted mobility failed to load file " \
|
||||
" '%s' (%s)" % (self.file, e))
|
||||
return
|
||||
if self.verbose:
|
||||
self.session.info("reading ns-2 script file: %s" % filename)
|
||||
ln = 0
|
||||
ix = iy = iz = None
|
||||
inodenum = None
|
||||
for line in f:
|
||||
ln += 1
|
||||
if line[:2] != '$n':
|
||||
continue
|
||||
try:
|
||||
if line[:8] == "$ns_ at ":
|
||||
if ix is not None and iy is not None:
|
||||
self.addinitial(self.map(inodenum), ix, iy, iz)
|
||||
ix = iy = iz = None
|
||||
# waypoints:
|
||||
# $ns_ at 1.00 "$node_(6) setdest 500.0 178.0 25.0"
|
||||
parts = line.split()
|
||||
time = float(parts[2])
|
||||
nodenum = parts[3][1+parts[3].index('('):parts[3].index(')')]
|
||||
x = float(parts[5])
|
||||
y = float(parts[6])
|
||||
z = None
|
||||
speed = float(parts[7].strip('"'))
|
||||
self.addwaypoint(time, self.map(nodenum), x, y, z, speed)
|
||||
elif line[:7] == "$node_(":
|
||||
# initial position (time=0, speed=0):
|
||||
# $node_(6) set X_ 780.0
|
||||
parts = line.split()
|
||||
time = 0.0
|
||||
nodenum = parts[0][1+parts[0].index('('):parts[0].index(')')]
|
||||
if parts[2] == 'X_':
|
||||
if ix is not None and iy is not None:
|
||||
self.addinitial(self.map(inodenum), ix, iy, iz)
|
||||
ix = iy = iz = None
|
||||
ix = float(parts[3])
|
||||
elif parts[2] == 'Y_':
|
||||
iy = float(parts[3])
|
||||
elif parts[2] == 'Z_':
|
||||
iz = float(parts[3])
|
||||
self.addinitial(self.map(nodenum), ix, iy, iz)
|
||||
ix = iy = iz = None
|
||||
inodenum = nodenum
|
||||
else:
|
||||
raise ValueError
|
||||
except ValueError, e:
|
||||
self.session.warn("skipping line %d of file %s '%s' (%s)" % \
|
||||
(ln, self.file, line, e))
|
||||
continue
|
||||
if ix is not None and iy is not None:
|
||||
self.addinitial(self.map(inodenum), ix, iy, iz)
|
||||
|
||||
def findfile(self, fn):
|
||||
''' Locate a script file. If the specified file doesn't exist, look in the
|
||||
same directory as the scenario file (session.filename), or in the default
|
||||
configs directory (~/.core/configs). This allows for sample files without
|
||||
absolute pathnames.
|
||||
'''
|
||||
if os.path.exists(fn):
|
||||
return fn
|
||||
if self.session.filename is not None:
|
||||
d = os.path.dirname(self.session.filename)
|
||||
sessfn = os.path.join(d, fn)
|
||||
if (os.path.exists(sessfn)):
|
||||
return sessfn
|
||||
if self.session.user is not None:
|
||||
userfn = os.path.join('/home', self.session.user, '.core', 'configs', fn)
|
||||
if (os.path.exists(userfn)):
|
||||
return userfn
|
||||
return fn
|
||||
|
||||
def parsemap(self, mapstr):
|
||||
''' Parse a node mapping string, given as a configuration parameter.
|
||||
'''
|
||||
self.nodemap = {}
|
||||
if mapstr.strip() == '':
|
||||
return
|
||||
for pair in mapstr.split(','):
|
||||
parts = pair.split(':')
|
||||
try:
|
||||
if len(parts) != 2:
|
||||
raise ValueError
|
||||
self.nodemap[int(parts[0])] = int(parts[1])
|
||||
except ValueError:
|
||||
self.session.warn("ns-2 mobility node map error")
|
||||
return
|
||||
|
||||
def map(self, nodenum):
|
||||
''' Map one node number (from a script file) to another.
|
||||
'''
|
||||
nodenum = int(nodenum)
|
||||
try:
|
||||
return self.nodemap[nodenum]
|
||||
except KeyError:
|
||||
return nodenum
|
||||
|
||||
def startup(self):
|
||||
''' Start running the script if autostart is enabled.
|
||||
Move node to initial positions when any autostart time is specified.
|
||||
Ignore the script if autostart is an empty string (can still be
|
||||
started via GUI controls).
|
||||
'''
|
||||
if self.autostart == '':
|
||||
if self.verbose:
|
||||
self.session.info("not auto-starting ns-2 script for %s" % \
|
||||
self.wlan.name)
|
||||
return
|
||||
try:
|
||||
t = float(self.autostart)
|
||||
except ValueError:
|
||||
self.session.warn("Invalid auto-start seconds specified '%s' for " \
|
||||
"%s" % (self.autostart, self.wlan.name))
|
||||
return
|
||||
self.movenodesinitial()
|
||||
if self.verbose:
|
||||
self.session.info("scheduling ns-2 script for %s autostart at %s" \
|
||||
% (self.wlan.name, t))
|
||||
self.state = self.STATE_RUNNING
|
||||
self.session.evq.add_event(t, self.run)
|
||||
|
||||
def start(self):
|
||||
''' Handle the case when un-paused.
|
||||
'''
|
||||
laststate = self.state
|
||||
super(Ns2ScriptedMobility, self).start()
|
||||
if laststate == self.STATE_PAUSED:
|
||||
self.statescript("unpause")
|
||||
|
||||
def run(self):
|
||||
''' Start is pressed or autostart is triggered.
|
||||
'''
|
||||
super(Ns2ScriptedMobility, self).run()
|
||||
self.statescript("run")
|
||||
|
||||
def pause(self):
|
||||
super(Ns2ScriptedMobility, self).pause()
|
||||
self.statescript("pause")
|
||||
|
||||
def stop(self, move_initial=True):
|
||||
super(Ns2ScriptedMobility, self).stop(move_initial=move_initial)
|
||||
self.statescript("stop")
|
||||
|
||||
def statescript(self, typestr):
|
||||
filename = None
|
||||
if typestr == "run" or typestr == "unpause":
|
||||
filename = self.script_start
|
||||
elif typestr == "pause":
|
||||
filename = self.script_pause
|
||||
elif typestr == "stop":
|
||||
filename = self.script_stop
|
||||
if filename is None or filename == '':
|
||||
return
|
||||
filename = self.findfile(filename)
|
||||
try:
|
||||
check_call(["/bin/sh", filename, typestr],
|
||||
cwd=self.session.sessiondir,
|
||||
env=self.session.getenviron())
|
||||
except Exception, e:
|
||||
self.session.warn("Error running script '%s' for WLAN state %s: " \
|
||||
"%s" % (filename, typestr, e))
|
||||
|
||||
|
0
daemon/core/netns/__init__.py
Normal file
0
daemon/core/netns/__init__.py
Normal file
401
daemon/core/netns/nodes.py
Normal file
401
daemon/core/netns/nodes.py
Normal file
|
@ -0,0 +1,401 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
nodes.py: definition of an LxcNode and CoreNode classes, and other node classes
|
||||
that inherit from the CoreNode, implementing specific node types.
|
||||
'''
|
||||
|
||||
from vnode import *
|
||||
from vnet import *
|
||||
from core.misc.ipaddr import *
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode
|
||||
|
||||
class CtrlNet(LxBrNet):
|
||||
policy = "ACCEPT"
|
||||
CTRLIF_IDX_BASE = 99 # base control interface index
|
||||
|
||||
def __init__(self, session, objid = "ctrlnet", name = None,
|
||||
verbose = False, netid = 1, prefix = None,
|
||||
hostid = None, start = True, assign_address = True,
|
||||
updown_script = None):
|
||||
if not prefix:
|
||||
prefix = "172.16.%d.0/24" % netid
|
||||
self.prefix = IPv4Prefix(prefix)
|
||||
self.hostid = hostid
|
||||
self.assign_address = assign_address
|
||||
self.updown_script = updown_script
|
||||
LxBrNet.__init__(self, session, objid = objid, name = name,
|
||||
verbose = verbose, start = start)
|
||||
|
||||
def startup(self):
|
||||
LxBrNet.startup(self)
|
||||
if self.hostid:
|
||||
addr = self.prefix.addr(self.hostid)
|
||||
else:
|
||||
addr = self.prefix.maxaddr()
|
||||
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
|
||||
if self.assign_address:
|
||||
self.addrconfig(addrlist = addrlist)
|
||||
if self.updown_script is not None:
|
||||
self.info("interface %s updown script '%s startup' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
check_call([self.updown_script, self.brname, "startup"])
|
||||
|
||||
def shutdown(self):
|
||||
if self.updown_script is not None:
|
||||
self.info("interface %s updown script '%s shutdown' called" % \
|
||||
(self.brname, self.updown_script))
|
||||
check_call([self.updown_script, self.brname, "shutdown"])
|
||||
LxBrNet.shutdown(self)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Do not include CtrlNet in link messages describing this session.
|
||||
'''
|
||||
return []
|
||||
|
||||
class CoreNode(LxcNode):
|
||||
apitype = coreapi.CORE_NODE_DEF
|
||||
|
||||
class PtpNet(LxBrNet):
|
||||
policy = "ACCEPT"
|
||||
|
||||
def attach(self, netif):
|
||||
if len(self._netif) > 1:
|
||||
raise ValueError, \
|
||||
"Point-to-point links support at most 2 network interfaces"
|
||||
LxBrNet.attach(self, netif)
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
''' Do not generate a Node Message for point-to-point links. They are
|
||||
built using a link message instead.
|
||||
'''
|
||||
pass
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
''' Build CORE API TLVs for a point-to-point link. One Link message
|
||||
describes this network.
|
||||
'''
|
||||
tlvdata = ""
|
||||
if len(self._netif) != 2:
|
||||
return tlvdata
|
||||
(if1, if2) = self._netif.items()
|
||||
if1 = if1[1]
|
||||
if2 = if2[1]
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER,
|
||||
if1.node.objid)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER,
|
||||
if2.node.objid)
|
||||
delay = if1.getparam('delay')
|
||||
bw = if1.getparam('bw')
|
||||
loss = if1.getparam('loss')
|
||||
duplicate = if1.getparam('duplicate')
|
||||
jitter = if1.getparam('jitter')
|
||||
if delay is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY,
|
||||
delay)
|
||||
if bw is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw)
|
||||
if loss is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER,
|
||||
str(loss))
|
||||
if duplicate is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP,
|
||||
str(duplicate))
|
||||
if jitter is not None:
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER,
|
||||
jitter)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE,
|
||||
self.linktype)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \
|
||||
if1.node.getifindex(if1))
|
||||
for addr in if1.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \
|
||||
if2.node.getifindex(if2))
|
||||
for addr in if2.addrlist:
|
||||
(ip, sep, mask) = addr.partition('/')
|
||||
mask = int(mask)
|
||||
if isIPv4Address(ip):
|
||||
family = AF_INET
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK
|
||||
else:
|
||||
family = AF_INET6
|
||||
tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6
|
||||
tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip,
|
||||
IPAddr(af=family, addr=ipl))
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
return [msg,]
|
||||
|
||||
class SwitchNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_SWITCH
|
||||
policy = "ACCEPT"
|
||||
type = "lanswitch"
|
||||
|
||||
class HubNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_HUB
|
||||
policy = "ACCEPT"
|
||||
type = "hub"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True):
|
||||
''' the Hub node forwards packets to all bridge ports by turning off
|
||||
the MAC address learning
|
||||
'''
|
||||
LxBrNet.__init__(self, session, objid, name, verbose, start)
|
||||
if start:
|
||||
check_call([BRCTL_BIN, "setageing", self.brname, "0"])
|
||||
|
||||
|
||||
class WlanNode(LxBrNet):
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
policy = "DROP"
|
||||
type = "wlan"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
LxBrNet.__init__(self, session, objid, name, verbose, start, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
self.mobility = None
|
||||
|
||||
def attach(self, netif):
|
||||
LxBrNet.attach(self, netif)
|
||||
if self.model:
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is None:
|
||||
return
|
||||
(x,y,z) = netif.node.position.get()
|
||||
# invokes any netif.poshook
|
||||
netif.setposition(x, y, z)
|
||||
#self.model.setlinkparams()
|
||||
|
||||
def setmodel(self, model, config):
|
||||
''' Mobility and wireless model.
|
||||
'''
|
||||
if (self.verbose):
|
||||
self.info("adding model %s" % model._name)
|
||||
if model._type == coreapi.CORE_TLV_REG_WIRELESS:
|
||||
self.model = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
if self.model._positioncallback:
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.model._positioncallback
|
||||
if netif.node is not None:
|
||||
(x,y,z) = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
self.model.setlinkparams()
|
||||
elif model._type == coreapi.CORE_TLV_REG_MOBILITY:
|
||||
self.mobility = model(session=self.session, objid=self.objid,
|
||||
verbose=self.verbose, values=config)
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
msgs = LxBrNet.tolinkmsgs(self, flags)
|
||||
if self.model:
|
||||
msgs += self.model.tolinkmsgs(flags)
|
||||
return msgs
|
||||
|
||||
|
||||
class RJ45Node(PyCoreNode, PyCoreNetIf):
|
||||
''' RJ45Node is a physical interface on the host linked to the emulated
|
||||
network.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_RJ45
|
||||
|
||||
def __init__(self, session, objid = None, name = None, mtu = 1500,
|
||||
verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
# this initializes net, params, poshook
|
||||
PyCoreNetIf.__init__(self, node=self, name=name, mtu = mtu)
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self.ifindex = None
|
||||
# the following are PyCoreNetIf attributes
|
||||
self.transport_type = "raw"
|
||||
self.localname = name
|
||||
self.type = "rj45"
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
''' Set the interface in the up state.
|
||||
'''
|
||||
# interface will also be marked up during net.attach()
|
||||
self.savestate()
|
||||
try:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
except:
|
||||
self.warn("Failed to run command: %s link set %s up" % \
|
||||
(IP_BIN, self.localname))
|
||||
return
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
''' Bring the interface down. Remove any addresses and queuing
|
||||
disciplines.
|
||||
'''
|
||||
if not self.up:
|
||||
return
|
||||
check_call([IP_BIN, "link", "set", self.localname, "down"])
|
||||
check_call([IP_BIN, "addr", "flush", "dev", self.localname])
|
||||
mutecall([TC_BIN, "qdisc", "del", "dev", self.localname, "root"])
|
||||
self.up = False
|
||||
self.restorestate()
|
||||
|
||||
def attachnet(self, net):
|
||||
PyCoreNetIf.attachnet(self, net)
|
||||
|
||||
def detachnet(self):
|
||||
PyCoreNetIf.detachnet(self)
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
''' This is called when linking with another node. Since this node
|
||||
represents an interface, we do not create another object here,
|
||||
but attach ourselves to the given network.
|
||||
'''
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if self.net is not None:
|
||||
raise ValueError, \
|
||||
"RJ45 nodes support at most 1 network interface"
|
||||
self._netif[ifindex] = self
|
||||
self.node = self # PyCoreNetIf.node is self
|
||||
self.ifindex = ifindex
|
||||
if net is not None:
|
||||
self.attachnet(net)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(addr)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
self._netif.pop(ifindex)
|
||||
if ifindex == self.ifindex:
|
||||
self.shutdown()
|
||||
else:
|
||||
raise ValueError, "ifindex %s does not exist" % ifindex
|
||||
|
||||
def netif(self, ifindex, net=None):
|
||||
''' This object is considered the network interface, so we only
|
||||
return self here. This keeps the RJ45Node compatible with
|
||||
real nodes.
|
||||
'''
|
||||
if net is not None and net == self.net:
|
||||
return self
|
||||
if ifindex is None:
|
||||
ifindex = 0
|
||||
if ifindex == self.ifindex:
|
||||
return self
|
||||
return None
|
||||
|
||||
def getifindex(self, netif):
|
||||
if netif != self:
|
||||
return None
|
||||
return self.ifindex
|
||||
|
||||
def addaddr(self, addr):
|
||||
if self.up:
|
||||
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.name])
|
||||
PyCoreNetIf.addaddr(self, addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
if self.up:
|
||||
check_call([IP_BIN, "addr", "del", str(addr), "dev", self.name])
|
||||
PyCoreNetIf.deladdr(self, addr)
|
||||
|
||||
def savestate(self):
|
||||
''' Save the addresses and other interface state before using the
|
||||
interface for emulation purposes. TODO: save/restore the PROMISC flag
|
||||
'''
|
||||
self.old_up = False
|
||||
self.old_addrs = []
|
||||
cmd = [IP_BIN, "addr", "show", "dev", self.localname]
|
||||
try:
|
||||
tmp = subprocess.Popen(cmd, stdout = subprocess.PIPE)
|
||||
except OSError:
|
||||
self.warn("Failed to run %s command: %s" % (IP_BIN, cmd))
|
||||
if tmp.wait():
|
||||
self.warn("Command failed: %s" % cmd)
|
||||
return
|
||||
lines = tmp.stdout.read()
|
||||
tmp.stdout.close()
|
||||
for l in lines.split('\n'):
|
||||
items = l.split()
|
||||
if len(items) < 2:
|
||||
continue
|
||||
if items[1] == "%s:" % self.localname:
|
||||
flags = items[2][1:-1].split(',')
|
||||
if "UP" in flags:
|
||||
self.old_up = True
|
||||
elif items[0] == "inet":
|
||||
self.old_addrs.append((items[1], items[3]))
|
||||
elif items[0] == "inet6":
|
||||
if items[1][:4] == "fe80":
|
||||
continue
|
||||
self.old_addrs.append((items[1], None))
|
||||
|
||||
def restorestate(self):
|
||||
''' Restore the addresses and other interface state after using it.
|
||||
'''
|
||||
for addr in self.old_addrs:
|
||||
if addr[1] is None:
|
||||
check_call([IP_BIN, "addr", "add", addr[0], "dev",
|
||||
self.localname])
|
||||
else:
|
||||
check_call([IP_BIN, "addr", "add", addr[0], "brd", addr[1],
|
||||
"dev", self.localname])
|
||||
if self.old_up:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
''' Use setposition() from both parent classes.
|
||||
'''
|
||||
PyCoreObj.setposition(self, x, y, z)
|
||||
# invoke any poshook
|
||||
PyCoreNetIf.setposition(self, x, y, z)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class TunnelNode(GreTapBridge):
|
||||
apitype = coreapi.CORE_NODE_TUNNEL
|
||||
policy = "ACCEPT"
|
||||
type = "tunnel"
|
||||
|
168
daemon/core/netns/vif.py
Normal file
168
daemon/core/netns/vif.py
Normal file
|
@ -0,0 +1,168 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vif.py: PyCoreNetIf classes that implement the interfaces available
|
||||
under Linux.
|
||||
'''
|
||||
|
||||
import os, signal, shutil, sys, subprocess, vnodeclient, threading, string
|
||||
import random, time
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
checkexec([IP_BIN])
|
||||
|
||||
class VEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
# note that net arg is ignored
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
check_call([IP_BIN, "link", "add", "name", self.localname,
|
||||
"type", "veth", "peer", "name", self.name])
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
if self.node:
|
||||
self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
if self.localname:
|
||||
mutedetach([IP_BIN, "link", "delete", self.localname])
|
||||
self.up = False
|
||||
|
||||
|
||||
class TunTap(PyCoreNetIf):
|
||||
''' TUN/TAP virtual device in TAP mode
|
||||
'''
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.transport_type = "virtual"
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
# TODO: more sophisticated TAP creation here
|
||||
# Debian does not support -p (tap) option, RedHat does.
|
||||
# For now, this is disabled to allow the TAP to be created by another
|
||||
# system (e.g. EMANE's emanetransportd)
|
||||
#check_call(["tunctl", "-t", self.name])
|
||||
# self.install()
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name])
|
||||
#if self.name:
|
||||
# mutedetach(["tunctl", "-d", self.localname])
|
||||
self.up = False
|
||||
|
||||
def install(self):
|
||||
''' Install this TAP into its namespace. This is not done from the
|
||||
startup() method but called at a later time when a userspace
|
||||
program (running on the host) has had a chance to open the socket
|
||||
end of the TAP.
|
||||
'''
|
||||
netns = str(self.node.pid)
|
||||
# check for presence of device - tap device may not appear right away
|
||||
# waits ~= stime * ( 2 ** attempts) seconds
|
||||
attempts = 9
|
||||
stime = 0.01
|
||||
while attempts > 0:
|
||||
try:
|
||||
mutecheck_call([IP_BIN, "link", "show", self.localname])
|
||||
break
|
||||
except Exception, e:
|
||||
msg = "ip link show %s error (%d): %s" % \
|
||||
(self.localname, attempts, e)
|
||||
if attempts > 1:
|
||||
msg += ", retrying..."
|
||||
self.node.info(msg)
|
||||
time.sleep(stime)
|
||||
stime *= 2
|
||||
attempts -= 1
|
||||
# install tap device into namespace
|
||||
try:
|
||||
check_call([IP_BIN, "link", "set", self.localname, "netns", netns])
|
||||
except Exception, e:
|
||||
msg = "error installing TAP interface %s, command:" % self.localname
|
||||
msg += "ip link set %s netns %s" % (self.localname, netns)
|
||||
self.node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.localname, msg)
|
||||
self.node.warn(msg)
|
||||
return
|
||||
self.node.cmd([IP_BIN, "link", "set", self.localname,
|
||||
"name", self.name])
|
||||
for addr in self.addrlist:
|
||||
self.node.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.name])
|
||||
self.node.cmd([IP_BIN, "link", "set", self.name, "up"])
|
||||
|
||||
class GreTap(PyCoreNetIf):
|
||||
''' GRE TAP device for tunneling between emulation servers.
|
||||
Uses the "gretap" tunnel device type from Linux which is a GRE device
|
||||
having a MAC address. The MAC address is required for bridging.
|
||||
'''
|
||||
def __init__(self, node = None, name = None, session = None, mtu = 1458,
|
||||
remoteip = None, objid = None, localip = None, ttl = 255,
|
||||
key = None, start = True):
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.session = session
|
||||
if objid is None:
|
||||
# from PyCoreObj
|
||||
objid = (((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff)
|
||||
self.objid = objid
|
||||
sessionid = self.session.shortsessionid()
|
||||
# interface name on the local host machine
|
||||
self.localname = "gt.%s.%s" % (self.objid, sessionid)
|
||||
self.transport_type = "raw"
|
||||
if not start:
|
||||
self.up = False
|
||||
return
|
||||
|
||||
if remoteip is None:
|
||||
raise ValueError, "missing remote IP required for GRE TAP device"
|
||||
cmd = ("ip", "link", "add", self.localname, "type", "gretap",
|
||||
"remote", str(remoteip))
|
||||
if localip:
|
||||
cmd += ("local", str(localip))
|
||||
if ttl:
|
||||
cmd += ("ttl", str(ttl))
|
||||
if key:
|
||||
cmd += ("key", str(key))
|
||||
check_call(cmd)
|
||||
cmd = ("ip", "link", "set", self.localname, "up")
|
||||
check_call(cmd)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if self.localname:
|
||||
cmd = ("ip", "link", "set", self.localname, "down")
|
||||
check_call(cmd)
|
||||
cmd = ("ip", "link", "del", self.localname)
|
||||
check_call(cmd)
|
||||
self.localname = None
|
||||
|
||||
def tonodemsg(self, flags):
|
||||
return None
|
||||
|
||||
def tolinkmsgs(self, flags):
|
||||
return []
|
496
daemon/core/netns/vnet.py
Normal file
496
daemon/core/netns/vnet.py
Normal file
|
@ -0,0 +1,496 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnet.py: PyCoreNet and LxBrNet classes that implement virtual networks using
|
||||
Linux Ethernet bridging and ebtables rules.
|
||||
'''
|
||||
|
||||
import os, sys, threading, time, subprocess
|
||||
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreNet, PyCoreObj
|
||||
from core.netns.vif import VEth, GreTap
|
||||
|
||||
checkexec([BRCTL_BIN, IP_BIN, EBTABLES_BIN, TC_BIN])
|
||||
|
||||
ebtables_lock = threading.Lock()
|
||||
|
||||
class EbtablesQueue(object):
|
||||
''' Helper class for queuing up ebtables commands into rate-limited
|
||||
atomic commits. This improves performance and reliability when there are
|
||||
many WLAN link updates.
|
||||
'''
|
||||
# update rate is every 300ms
|
||||
rate = 0.3
|
||||
# ebtables
|
||||
atomic_file = "/tmp/pycore.ebtables.atomic"
|
||||
|
||||
def __init__(self):
|
||||
''' Initialize the helper class, but don't start the update thread
|
||||
until a WLAN is instantiated.
|
||||
'''
|
||||
self.doupdateloop = False
|
||||
self.updatethread = None
|
||||
# this lock protects cmds and updates lists
|
||||
self.updatelock = threading.Lock()
|
||||
# list of pending ebtables commands
|
||||
self.cmds = []
|
||||
# list of WLANs requiring update
|
||||
self.updates = []
|
||||
# timestamps of last WLAN update; this keeps track of WLANs that are
|
||||
# using this queue
|
||||
self.last_update_time = {}
|
||||
|
||||
def startupdateloop(self, wlan):
|
||||
''' Kick off the update loop; only needs to be invoked once.
|
||||
'''
|
||||
self.updatelock.acquire()
|
||||
self.last_update_time[wlan] = time.time()
|
||||
self.updatelock.release()
|
||||
if self.doupdateloop:
|
||||
return
|
||||
self.doupdateloop = True
|
||||
self.updatethread = threading.Thread(target = self.updateloop)
|
||||
self.updatethread.daemon = True
|
||||
self.updatethread.start()
|
||||
|
||||
def stopupdateloop(self, wlan):
|
||||
''' Kill the update loop thread if there are no more WLANs using it.
|
||||
'''
|
||||
self.updatelock.acquire()
|
||||
try:
|
||||
del self.last_update_time[wlan]
|
||||
except KeyError:
|
||||
pass
|
||||
self.updatelock.release()
|
||||
if len(self.last_update_time) > 0:
|
||||
return
|
||||
self.doupdateloop = False
|
||||
if self.updatethread:
|
||||
self.updatethread.join()
|
||||
self.updatethread = None
|
||||
|
||||
def ebatomiccmd(self, cmd):
|
||||
''' Helper for building ebtables atomic file command list.
|
||||
'''
|
||||
r = [EBTABLES_BIN, "--atomic-file", self.atomic_file]
|
||||
if cmd:
|
||||
r.extend(cmd)
|
||||
return r
|
||||
|
||||
def lastupdate(self, wlan):
|
||||
''' Return the time elapsed since this WLAN was last updated.
|
||||
'''
|
||||
try:
|
||||
elapsed = time.time() - self.last_update_time[wlan]
|
||||
except KeyError:
|
||||
self.last_update_time[wlan] = time.time()
|
||||
elapsed = 0.0
|
||||
return elapsed
|
||||
|
||||
def updated(self, wlan):
|
||||
''' Keep track of when this WLAN was last updated.
|
||||
'''
|
||||
self.last_update_time[wlan] = time.time()
|
||||
self.updates.remove(wlan)
|
||||
|
||||
def updateloop(self):
|
||||
''' Thread target that looks for WLANs needing update, and
|
||||
rate limits the amount of ebtables activity. Only one userspace program
|
||||
should use ebtables at any given time, or results can be unpredictable.
|
||||
'''
|
||||
while self.doupdateloop:
|
||||
self.updatelock.acquire()
|
||||
for wlan in self.updates:
|
||||
if self.lastupdate(wlan) > self.rate:
|
||||
self.buildcmds(wlan)
|
||||
#print "ebtables commit %d rules" % len(self.cmds)
|
||||
self.ebcommit(wlan)
|
||||
self.updated(wlan)
|
||||
self.updatelock.release()
|
||||
time.sleep(self.rate)
|
||||
|
||||
def ebcommit(self, wlan):
|
||||
''' Perform ebtables atomic commit using commands built in the
|
||||
self.cmds list.
|
||||
'''
|
||||
# save kernel ebtables snapshot to a file
|
||||
cmd = self.ebatomiccmd(["--atomic-save",])
|
||||
try:
|
||||
check_call(cmd)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "atomic-save (%s)" % cmd, e)
|
||||
# no atomic file, exit
|
||||
return
|
||||
# modify the table file using queued ebtables commands
|
||||
for c in self.cmds:
|
||||
cmd = self.ebatomiccmd(c)
|
||||
try:
|
||||
check_call(cmd)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "cmd=%s" % cmd, e)
|
||||
pass
|
||||
self.cmds = []
|
||||
# commit the table file to the kernel
|
||||
cmd = self.ebatomiccmd(["--atomic-commit",])
|
||||
try:
|
||||
check_call(cmd)
|
||||
os.unlink(self.atomic_file)
|
||||
except Exception, e:
|
||||
self.eberror(wlan, "atomic-commit (%s)" % cmd, e)
|
||||
|
||||
def ebchange(self, wlan):
|
||||
''' Flag a change to the given WLAN's _linked dict, so the ebtables
|
||||
chain will be rebuilt at the next interval.
|
||||
'''
|
||||
self.updatelock.acquire()
|
||||
if wlan not in self.updates:
|
||||
self.updates.append(wlan)
|
||||
self.updatelock.release()
|
||||
|
||||
def buildcmds(self, wlan):
|
||||
''' Inspect a _linked dict from a wlan, and rebuild the ebtables chain
|
||||
for that WLAN.
|
||||
'''
|
||||
wlan._linked_lock.acquire()
|
||||
# flush the chain
|
||||
self.cmds.extend([["-F", wlan.brname],])
|
||||
# rebuild the chain
|
||||
for (netif1, v) in wlan._linked.items():
|
||||
for (netif2, linked) in v.items():
|
||||
if wlan.policy == "DROP" and linked:
|
||||
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
|
||||
"-o", netif2.localname, "-j", "ACCEPT"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "ACCEPT"]])
|
||||
elif wlan.policy == "ACCEPT" and not linked:
|
||||
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
|
||||
"-o", netif2.localname, "-j", "DROP"],
|
||||
["-A", wlan.brname, "-o", netif1.localname,
|
||||
"-i", netif2.localname, "-j", "DROP"]])
|
||||
wlan._linked_lock.release()
|
||||
|
||||
def eberror(self, wlan, source, error):
|
||||
''' Log an ebtables command error and send an exception.
|
||||
'''
|
||||
if not wlan:
|
||||
return
|
||||
wlan.exception(coreapi.CORE_EXCP_LEVEL_ERROR, wlan.brname,
|
||||
"ebtables command error: %s\n%s\n" % (source, error))
|
||||
|
||||
|
||||
# a global object because all WLANs share the same queue
|
||||
# cannot have multiple threads invoking the ebtables commnd
|
||||
ebq = EbtablesQueue()
|
||||
|
||||
def ebtablescmds(call, cmds):
|
||||
ebtables_lock.acquire()
|
||||
try:
|
||||
for cmd in cmds:
|
||||
call(cmd)
|
||||
finally:
|
||||
ebtables_lock.release()
|
||||
|
||||
class LxBrNet(PyCoreNet):
|
||||
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
PyCoreNet.__init__(self, session, objid, name, verbose, start)
|
||||
if name is None:
|
||||
name = str(self.objid)
|
||||
if policy is not None:
|
||||
self.policy = policy
|
||||
self.name = name
|
||||
self.brname = "b.%s.%s" % (str(self.objid), self.session.sessionid)
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
ebq.startupdateloop(self)
|
||||
|
||||
def startup(self):
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addbr", self.brname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname,
|
||||
"Error adding bridge: %s" % e)
|
||||
try:
|
||||
# turn off spanning tree protocol and forwarding delay
|
||||
check_call([BRCTL_BIN, "stp", self.brname, "off"])
|
||||
check_call([BRCTL_BIN, "setfd", self.brname, "0"])
|
||||
check_call([IP_BIN, "link", "set", self.brname, "up"])
|
||||
# create a new ebtables chain for this bridge
|
||||
ebtablescmds(check_call, [
|
||||
[EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
|
||||
[EBTABLES_BIN, "-A", "FORWARD",
|
||||
"--logical-in", self.brname, "-j", self.brname]])
|
||||
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
|
||||
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % \
|
||||
self.brname
|
||||
if os.path.exists(snoop):
|
||||
open(snoop, "w").write('0')
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_WARNING, self.brname,
|
||||
"Error setting bridge parameters: %s" % e)
|
||||
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
ebq.stopupdateloop(self)
|
||||
mutecall([IP_BIN, "link", "set", self.brname, "down"])
|
||||
mutecall([BRCTL_BIN, "delbr", self.brname])
|
||||
ebtablescmds(mutecall, [
|
||||
[EBTABLES_BIN, "-D", "FORWARD",
|
||||
"--logical-in", self.brname, "-j", self.brname],
|
||||
[EBTABLES_BIN, "-X", self.brname]])
|
||||
for netif in self.netifs():
|
||||
# removes veth pairs used for bridge-to-bridge connections
|
||||
netif.shutdown()
|
||||
self._netif.clear()
|
||||
self._linked.clear()
|
||||
del self.session
|
||||
self.up = False
|
||||
|
||||
def attach(self, netif):
|
||||
if self.up:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "addif", self.brname, netif.localname])
|
||||
check_call([IP_BIN, "link", "set", netif.localname, "up"])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error joining interface %s to bridge %s: %s" % \
|
||||
(netif.localname, self.brname, e))
|
||||
return
|
||||
PyCoreNet.attach(self, netif)
|
||||
|
||||
def detach(self, netif):
|
||||
if self.up:
|
||||
try:
|
||||
check_call([BRCTL_BIN, "delif", self.brname, netif.localname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error removing interface %s from bridge %s: %s" % \
|
||||
(netif.localname, self.brname, e))
|
||||
return
|
||||
PyCoreNet.detach(self, netif)
|
||||
|
||||
def linked(self, netif1, netif2):
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[netif1.netifi] != netif1:
|
||||
raise ValueError, "inconsistency for netif %s" % netif1.name
|
||||
if self._netif[netif2.netifi] != netif2:
|
||||
raise ValueError, "inconsistency for netif %s" % netif2.name
|
||||
try:
|
||||
linked = self._linked[netif1][netif2]
|
||||
except KeyError:
|
||||
if self.policy == "ACCEPT":
|
||||
linked = True
|
||||
elif self.policy == "DROP":
|
||||
linked = False
|
||||
else:
|
||||
raise Exception, "unknown policy: %s" % self.policy
|
||||
self._linked[netif1][netif2] = linked
|
||||
return linked
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
''' Unlink two PyCoreNetIfs, resulting in adding or removing ebtables
|
||||
filtering rules.
|
||||
'''
|
||||
self._linked_lock.acquire()
|
||||
if not self.linked(netif1, netif2):
|
||||
self._linked_lock.release()
|
||||
return
|
||||
self._linked[netif1][netif2] = False
|
||||
self._linked_lock.release()
|
||||
ebq.ebchange(self)
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
''' Link two PyCoreNetIfs together, resulting in adding or removing
|
||||
ebtables filtering rules.
|
||||
'''
|
||||
self._linked_lock.acquire()
|
||||
if self.linked(netif1, netif2):
|
||||
self._linked_lock.release()
|
||||
return
|
||||
self._linked[netif1][netif2] = True
|
||||
self._linked_lock.release()
|
||||
ebq.ebchange(self)
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Configure link parameters by applying tc queuing disciplines on the
|
||||
interface.
|
||||
'''
|
||||
tc = [TC_BIN, "qdisc", "replace", "dev", netif.localname]
|
||||
parent = ["root"]
|
||||
changed = False
|
||||
if netif.setparam('bw', bw):
|
||||
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
|
||||
if bw is not None:
|
||||
burst = max(2 * netif.mtu, bw / 1000)
|
||||
limit = 0xffff # max IP payload
|
||||
tbf = ["tbf", "rate", str(bw),
|
||||
"burst", str(burst), "limit", str(limit)]
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
check_call(tc + parent + ["handle", "1:"] + tbf)
|
||||
netif.setparam('has_tbf', True)
|
||||
changed = True
|
||||
elif netif.getparam('has_tbf') and bw <= 0:
|
||||
tcd = [] + tc
|
||||
tcd[2] = "delete"
|
||||
if self.up:
|
||||
check_call(tcd + parent)
|
||||
netif.setparam('has_tbf', False)
|
||||
# removing the parent removes the child
|
||||
netif.setparam('has_netem', False)
|
||||
changed = True
|
||||
if netif.getparam('has_tbf'):
|
||||
parent = ["parent", "1:1"]
|
||||
netem = ["netem"]
|
||||
changed = max(changed, netif.setparam('delay', delay))
|
||||
if loss is not None:
|
||||
loss = float(loss)
|
||||
changed = max(changed, netif.setparam('loss', loss))
|
||||
if duplicate is not None:
|
||||
duplicate = float(duplicate)
|
||||
changed = max(changed, netif.setparam('duplicate', duplicate))
|
||||
changed = max(changed, netif.setparam('jitter', jitter))
|
||||
if not changed:
|
||||
return
|
||||
# jitter and delay use the same delay statement
|
||||
if delay is not None:
|
||||
netem += ["delay", "%sus" % delay]
|
||||
if jitter is not None:
|
||||
if delay is None:
|
||||
netem += ["delay", "0us", "%sus" % jitter, "25%"]
|
||||
else:
|
||||
netem += ["%sus" % jitter, "25%"]
|
||||
|
||||
if loss is not None:
|
||||
netem += ["loss", "%s%%" % min(loss, 100)]
|
||||
if duplicate is not None:
|
||||
netem += ["duplicate", "%s%%" % min(duplicate, 100)]
|
||||
if delay <= 0 and loss <= 0 and duplicate <= 0:
|
||||
# possibly remove netem if it exists and parent queue wasn't removed
|
||||
if not netif.getparam('has_netem'):
|
||||
return
|
||||
tc[2] = "delete"
|
||||
if self.up:
|
||||
check_call(tc + parent + ["handle", "10:"])
|
||||
netif.setparam('has_netem', False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
check_call(tc + parent + ["handle", "10:"] + netem)
|
||||
netif.setparam('has_netem', True)
|
||||
|
||||
def linknet(self, net):
|
||||
''' Link this bridge with another by creating a veth pair and installing
|
||||
each device into each bridge.
|
||||
'''
|
||||
sessionid = self.session.sessionid
|
||||
localname = "n%s.%s.%s" % (self.objid, net.objid, sessionid)
|
||||
name = "n%s.%s.%s" % (net.objid, self.objid, sessionid)
|
||||
netif = VEth(node = None, name = name, localname = localname,
|
||||
mtu = 1500, net = self, start = self.up)
|
||||
self.attach(netif)
|
||||
if net.up:
|
||||
# this is similar to net.attach() but uses netif.name instead
|
||||
# of localname
|
||||
check_call([BRCTL_BIN, "addif", net.brname, netif.name])
|
||||
check_call([IP_BIN, "link", "set", netif.name, "up"])
|
||||
i = net.newifindex()
|
||||
net._netif[i] = netif
|
||||
with net._linked_lock:
|
||||
net._linked[netif] = {}
|
||||
netif.net = self
|
||||
netif.othernet = net
|
||||
|
||||
def addrconfig(self, addrlist):
|
||||
''' Set addresses on the bridge.
|
||||
'''
|
||||
if not self.up:
|
||||
return
|
||||
for addr in addrlist:
|
||||
try:
|
||||
check_call([IP_BIN, "addr", "add", str(addr), "dev", self.brname])
|
||||
except Exception, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname,
|
||||
"Error adding IP address: %s" % e)
|
||||
|
||||
class GreTapBridge(LxBrNet):
|
||||
''' A network consisting of a bridge with a gretap device for tunneling to
|
||||
another system.
|
||||
'''
|
||||
def __init__(self, session, remoteip = None, objid = None, name = None,
|
||||
policy = "ACCEPT", localip = None, ttl = 255, key = None,
|
||||
verbose = False, start = True):
|
||||
LxBrNet.__init__(self, session = session, objid = objid,
|
||||
name = name, verbose = verbose, policy = policy,
|
||||
start = False)
|
||||
self.grekey = key
|
||||
if self.grekey is None:
|
||||
self.grekey = self.session.sessionid ^ self.objid
|
||||
self.localnum = None
|
||||
self.remotenum = None
|
||||
self.remoteip = remoteip
|
||||
self.localip = localip
|
||||
self.ttl = ttl
|
||||
if remoteip is None:
|
||||
self.gretap = None
|
||||
else:
|
||||
self.gretap = GreTap(node = self, name = None, session = session,
|
||||
remoteip = remoteip, objid = None, localip = localip, ttl = ttl,
|
||||
key = self.grekey)
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
''' Creates a bridge and adds the gretap device to it.
|
||||
'''
|
||||
LxBrNet.startup(self)
|
||||
if self.gretap:
|
||||
self.attach(self.gretap)
|
||||
|
||||
def shutdown(self):
|
||||
''' Detach the gretap device and remove the bridge.
|
||||
'''
|
||||
if self.gretap:
|
||||
self.detach(self.gretap)
|
||||
self.gretap.shutdown()
|
||||
self.gretap = None
|
||||
LxBrNet.shutdown(self)
|
||||
|
||||
def addrconfig(self, addrlist):
|
||||
''' Set the remote tunnel endpoint. This is a one-time method for
|
||||
creating the GreTap device, which requires the remoteip at startup.
|
||||
The 1st address in the provided list is remoteip, 2nd optionally
|
||||
specifies localip.
|
||||
'''
|
||||
if self.gretap:
|
||||
raise ValueError, "gretap already exists for %s" % self.name
|
||||
remoteip = addrlist[0].split('/')[0]
|
||||
localip = None
|
||||
if len(addrlist) > 1:
|
||||
localip = addrlist[1].split('/')[0]
|
||||
self.gretap = GreTap(session = self.session, remoteip = remoteip,
|
||||
objid = None, name = None,
|
||||
localip = localip, ttl = self.ttl, key = self.grekey)
|
||||
self.attach(self.gretap)
|
||||
|
||||
def setkey(self, key):
|
||||
''' Set the GRE key used for the GreTap device. This needs to be set
|
||||
prior to instantiating the GreTap device (before addrconfig).
|
||||
'''
|
||||
self.grekey = key
|
402
daemon/core/netns/vnode.py
Normal file
402
daemon/core/netns/vnode.py
Normal file
|
@ -0,0 +1,402 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Tom Goff <thomas.goff@boeing.com>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnode.py: PyCoreNode and LxcNode classes that implement the network namespace
|
||||
virtual node.
|
||||
'''
|
||||
|
||||
import os, signal, sys, subprocess, vnodeclient, threading, string, shutil
|
||||
import random, time
|
||||
from core.api import coreapi
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position
|
||||
from core.netns.vif import VEth, TunTap
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
checkexec([IP_BIN])
|
||||
|
||||
class SimpleLxcNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None, nodedir = None,
|
||||
verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self.nodedir = nodedir
|
||||
self.ctrlchnlname = \
|
||||
os.path.abspath(os.path.join(self.session.sessiondir, self.name))
|
||||
self.vnodeclient = None
|
||||
self.pid = None
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
|
||||
def alive(self):
|
||||
try:
|
||||
os.kill(self.pid, 0)
|
||||
except OSError:
|
||||
return False
|
||||
return True
|
||||
|
||||
def startup(self):
|
||||
''' Start a new namespace node by invoking the vnoded process that
|
||||
allocates a new namespace. Bring up the loopback device and set
|
||||
the hostname.
|
||||
'''
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
vnoded = ["%s/vnoded" % CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname,
|
||||
"-l", self.ctrlchnlname + ".log",
|
||||
"-p", self.ctrlchnlname + ".pid"]
|
||||
if self.nodedir:
|
||||
vnoded += ["-C", self.nodedir]
|
||||
try:
|
||||
tmp = subprocess.Popen(vnoded, stdout = subprocess.PIPE,
|
||||
env = self.session.getenviron(state=False))
|
||||
except OSError, e:
|
||||
msg = "error running vnoded command: %s (%s)" % (vnoded, e)
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL,
|
||||
"SimpleLxcNode.startup()", msg)
|
||||
raise Exception, msg
|
||||
try:
|
||||
self.pid = int(tmp.stdout.read())
|
||||
tmp.stdout.close()
|
||||
except Exception:
|
||||
msg = "vnoded failed to create a namespace; "
|
||||
msg += "check kernel support and user priveleges"
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_FATAL,
|
||||
"SimpleLxcNode.startup()", msg)
|
||||
if tmp.wait():
|
||||
raise Exception, ("command failed: %s" % vnoded)
|
||||
self.vnodeclient = vnodeclient.VnodeClient(self.name,
|
||||
self.ctrlchnlname)
|
||||
self.info("bringing up loopback interface")
|
||||
self.cmd([IP_BIN, "link", "set", "lo", "up"])
|
||||
self.info("setting hostname: %s" % self.name)
|
||||
self.cmd(["hostname", self.name])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
while self._mounts:
|
||||
source, target = self._mounts.pop(-1)
|
||||
self.umount(target)
|
||||
#print "XXX del vnodeclient:", self.vnodeclient
|
||||
# XXX XXX XXX this causes a serious crash
|
||||
#del self.vnodeclient
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
os.waitpid(self.pid, 0)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
os.unlink(self.ctrlchnlname)
|
||||
except OSError:
|
||||
pass
|
||||
self._netif.clear()
|
||||
#del self.session
|
||||
# print "XXX del vnodeclient:", self.vnodeclient
|
||||
del self.vnodeclient
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
return self.vnodeclient.cmd(args, wait)
|
||||
|
||||
def cmdresult(self, args):
|
||||
return self.vnodeclient.cmdresult(args)
|
||||
|
||||
def popen(self, args):
|
||||
return self.vnodeclient.popen(args)
|
||||
|
||||
def icmd(self, args):
|
||||
return self.vnodeclient.icmd(args)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait = True):
|
||||
return self.vnodeclient.redircmd(infd, outfd, errfd, args, wait)
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return self.vnodeclient.term(sh = sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
return self.vnodeclient.termcmdstring(sh = sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.vnodeclient.shcmd(cmdstr, sh = sh)
|
||||
|
||||
def boot(self):
|
||||
pass
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
try:
|
||||
shcmd = "mkdir -p '%s' && %s -n --bind '%s' '%s'" % \
|
||||
(target, MOUNT_BIN, source, target)
|
||||
self.shcmd(shcmd)
|
||||
self._mounts.append((source, target))
|
||||
except:
|
||||
self.warn("mounting failed for %s at %s" % (source, target))
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
try:
|
||||
self.cmd([UMOUNT_BIN, "-n", "-l", target])
|
||||
except:
|
||||
self.warn("unmounting failed for %s" % target)
|
||||
|
||||
def newifindex(self):
|
||||
with self.lock:
|
||||
return PyCoreNode.newifindex(self)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
localname = "n%s.%s.%s" % (self.objid, ifname, sessionid)
|
||||
ifclass = VEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
if self.up:
|
||||
check_call([IP_BIN, "link", "set", veth.name,
|
||||
"netns", str(self.pid)])
|
||||
self.cmd([IP_BIN, "link", "set", veth.name, "name", ifname])
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newtuntap(self, ifindex = None, ifname = None, net = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
localname = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
name = ifname
|
||||
ifclass = TunTap
|
||||
tuntap = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, start = self.up)
|
||||
try:
|
||||
self.addnetif(tuntap, ifindex)
|
||||
except:
|
||||
tuntap.shutdown()
|
||||
del tuntap
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
(status, result) = self.cmdresult([IP_BIN, "link", "set", "dev",
|
||||
self.ifname(ifindex), "address", str(addr)])
|
||||
if status:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"SimpleLxcNode.sethwaddr()",
|
||||
"error setting MAC address %s" % str(addr))
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
def ifup(self, ifindex):
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if isinstance(net, EmaneNode):
|
||||
ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
# TUN/TAP is not ready for addressing yet; the device may
|
||||
# take some time to appear, and installing it into a
|
||||
# namespace after it has been bound removes addressing;
|
||||
# save addresses with the interface now
|
||||
self.attachnet(ifindex, net)
|
||||
netif = self.netif(ifindex)
|
||||
netif.sethwaddr(hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
netif.addaddr(addr)
|
||||
return ifindex
|
||||
else:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def connectnode(self, ifname, othernode, otherifname):
|
||||
tmplen = 8
|
||||
tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
for x in xrange(tmplen)])
|
||||
tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
for x in xrange(tmplen)])
|
||||
check_call([IP_BIN, "link", "add", "name", tmp1,
|
||||
"type", "veth", "peer", "name", tmp2])
|
||||
|
||||
check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
|
||||
|
||||
check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
othernode.addnetif(PyCoreNetIf(othernode, otherifname),
|
||||
othernode.newifindex())
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
(filename, srcname, filename)
|
||||
self.shcmd(shcmd)
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
|
||||
|
||||
def netifstats(self, ifname = None):
|
||||
return self.vnodeclient.netifstats(ifname = ifname)
|
||||
|
||||
|
||||
class LxcNode(SimpleLxcNode):
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True):
|
||||
super(LxcNode, self).__init__(session = session, objid = objid,
|
||||
name = name, nodedir = nodedir,
|
||||
verbose = verbose, start = start)
|
||||
self.bootsh = bootsh
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
self.makenodedir()
|
||||
super(LxcNode, self).startup()
|
||||
self.privatedir("/var/run")
|
||||
self.privatedir("/var/log")
|
||||
except OSError, e:
|
||||
self.warn("Error with LxcNode.startup(): %s" % e)
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"LxcNode.startup()", "%s" % e)
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
# services are instead stopped when session enters datacollect state
|
||||
#self.session.services.stopnodeservices(self)
|
||||
try:
|
||||
super(LxcNode, self).shutdown()
|
||||
finally:
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def hostfilename(self, filename):
|
||||
''' Return the name of a node's file on the host filesystem.
|
||||
'''
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
return os.path.join(dirname, basename)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
hostfilename = self.hostfilename(filename)
|
||||
dirname, basename = os.path.split(hostfilename)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode = None):
|
||||
''' Copy a file to a node, following symlinks and preserving metadata.
|
||||
Change file mode if specified.
|
||||
'''
|
||||
hostfilename = self.hostfilename(filename)
|
||||
shutil.copy2(srcfilename, hostfilename)
|
||||
if mode is not None:
|
||||
os.chmod(hostfilename, mode)
|
||||
self.info("copied nodefile: '%s'; mode: %s" % (hostfilename, mode))
|
||||
|
||||
|
221
daemon/core/netns/vnodeclient.py
Normal file
221
daemon/core/netns/vnodeclient.py
Normal file
|
@ -0,0 +1,221 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Tom Goff <thomas.goff@boeing.com>
|
||||
#
|
||||
'''
|
||||
vnodeclient.py: implementation of the VnodeClient class for issuing commands
|
||||
over a control channel to the vnoded process running in a network namespace.
|
||||
The control channel can be accessed via calls to the vcmd Python module or
|
||||
by invoking the vcmd shell command.
|
||||
'''
|
||||
|
||||
import os, stat, sys
|
||||
from core.constants import *
|
||||
|
||||
USE_VCMD_MODULE = True
|
||||
|
||||
if USE_VCMD_MODULE:
|
||||
import vcmd
|
||||
else:
|
||||
import subprocess
|
||||
|
||||
VCMD = os.path.join(CORE_SBIN_DIR, "vcmd")
|
||||
|
||||
class VnodeClient(object):
|
||||
def __init__(self, name, ctrlchnlname):
|
||||
self.name = name
|
||||
self.ctrlchnlname = ctrlchnlname
|
||||
if USE_VCMD_MODULE:
|
||||
self.cmdchnl = vcmd.VCmd(self.ctrlchnlname)
|
||||
else:
|
||||
self.cmdchnl = None
|
||||
self._addr = {}
|
||||
|
||||
def warn(self, msg):
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
|
||||
def connected(self):
|
||||
if USE_VCMD_MODULE:
|
||||
return self.cmdchnl.connected()
|
||||
else:
|
||||
return True
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
''' Execute a command on a node and return the status (return code).
|
||||
'''
|
||||
if USE_VCMD_MODULE:
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
tmp = self.cmdchnl.qcmd(args)
|
||||
if not wait:
|
||||
return tmp
|
||||
tmp = tmp.wait()
|
||||
else:
|
||||
if wait:
|
||||
mode = os.P_WAIT
|
||||
else:
|
||||
mode = os.P_NOWAIT
|
||||
tmp = os.spawnlp(mode, VCMD, VCMD, "-c",
|
||||
self.ctrlchnlname, "-q", "--", *args)
|
||||
if not wait:
|
||||
return tmp
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def cmdresult(self, args):
|
||||
''' Execute a command on a node and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
'''
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(args)
|
||||
result = cmdout.read()
|
||||
result += cmderr.read()
|
||||
cmdin.close()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
return (status, result)
|
||||
|
||||
def popen(self, args):
|
||||
if USE_VCMD_MODULE:
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
return self.cmdchnl.popen(args)
|
||||
else:
|
||||
cmd = [VCMD, "-c", self.ctrlchnlname, "--"]
|
||||
cmd.extend(args)
|
||||
tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname,
|
||||
"--", *args)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait = True):
|
||||
'''
|
||||
Execute a command on a node with standard input, output, and
|
||||
error redirected according to the given file descriptors.
|
||||
'''
|
||||
if not USE_VCMD_MODULE:
|
||||
raise NotImplementedError
|
||||
if not self.cmdchnl.connected():
|
||||
raise ValueError, "self.cmdchnl not connected"
|
||||
tmp = self.cmdchnl.redircmd(infd, outfd, errfd, args)
|
||||
if not wait:
|
||||
return tmp
|
||||
tmp = tmp.wait()
|
||||
if tmp:
|
||||
self.warn("cmd exited with status %s: %s" % (tmp, str(args)))
|
||||
return tmp
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
return os.spawnlp(os.P_NOWAIT, "xterm", "xterm", "-ut",
|
||||
"-title", self.name, "-e",
|
||||
VCMD, "-c", self.ctrlchnlname, "--", sh)
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
return "%s -c %s -- %s" % (VCMD, self.ctrlchnlname, sh)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def getaddr(self, ifname, rescan = False):
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
tmp = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
cmd = [IP_BIN, "addr", "show", "dev", ifname]
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
|
||||
cmdin.close()
|
||||
for line in cmdout:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
tmp["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
tmp["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
tmp["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
tmp["inet6link"].append(line[1])
|
||||
else:
|
||||
self.warn("unknown scope: %s" % line[3])
|
||||
else:
|
||||
pass
|
||||
err = cmderr.read()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
if status:
|
||||
self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
if err:
|
||||
self.warn("error output: %s" % err)
|
||||
self._addr[ifname] = tmp
|
||||
return tmp
|
||||
|
||||
def netifstats(self, ifname = None):
|
||||
stats = {}
|
||||
cmd = ["cat", "/proc/net/dev"]
|
||||
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
|
||||
cmdin.close()
|
||||
# ignore first line
|
||||
cmdout.readline()
|
||||
# second line has count names
|
||||
tmp = cmdout.readline().strip().split("|")
|
||||
rxkeys = tmp[1].split()
|
||||
txkeys = tmp[2].split()
|
||||
for line in cmdout:
|
||||
line = line.strip().split()
|
||||
devname, tmp = line[0].split(":")
|
||||
if tmp:
|
||||
line.insert(1, tmp)
|
||||
stats[devname] = {"rx": {}, "tx": {}}
|
||||
field = 1
|
||||
for count in rxkeys:
|
||||
stats[devname]["rx"][count] = int(line[field])
|
||||
field += 1
|
||||
for count in txkeys:
|
||||
stats[devname]["tx"][count] = int(line[field])
|
||||
field += 1
|
||||
err = cmderr.read()
|
||||
cmdout.close()
|
||||
cmderr.close()
|
||||
status = cmdid.wait()
|
||||
if status:
|
||||
self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
|
||||
if err:
|
||||
self.warn("error output: %s" % err)
|
||||
if ifname is not None:
|
||||
return stats[ifname]
|
||||
else:
|
||||
return stats
|
||||
|
||||
def createclients(sessiondir, clientcls = VnodeClient,
|
||||
cmdchnlfilterfunc = None):
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x),
|
||||
os.listdir(sessiondir))
|
||||
cmdchnls = filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries)
|
||||
if cmdchnlfilterfunc:
|
||||
cmdchnls = filter(cmdchnlfilterfunc, cmdchnls)
|
||||
cmdchnls.sort()
|
||||
return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls)
|
||||
|
||||
def createremoteclients(sessiondir, clientcls = VnodeClient,
|
||||
filterfunc = None):
|
||||
''' Creates remote VnodeClients, for nodes emulated on other machines. The
|
||||
session.Broker writes a n1.conf/server file having the server's info.
|
||||
'''
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x),
|
||||
os.listdir(sessiondir))
|
||||
nodedirs = filter(lambda x: stat.S_ISDIR(os.stat(x).st_mode), direntries)
|
||||
nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")),
|
||||
nodedirs)
|
||||
if filterfunc:
|
||||
nodedirs = filter(filterfunc, nodedirs)
|
||||
nodedirs.sort()
|
||||
return map(lambda x: clientcls(x), nodedirs)
|
0
daemon/core/phys/__init__.py
Normal file
0
daemon/core/phys/__init__.py
Normal file
268
daemon/core/phys/pnodes.py
Normal file
268
daemon/core/phys/pnodes.py
Normal file
|
@ -0,0 +1,268 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
''' PhysicalNode class for including real systems in the emulated network.
|
||||
'''
|
||||
import os, threading, subprocess
|
||||
|
||||
from core.misc.ipaddr import *
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.coreobj import PyCoreNode, PyCoreNetIf
|
||||
from core.emane.nodes import EmaneNode
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns.vnet import LxBrNet
|
||||
from core.netns.vif import GreTap
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from core.bsd.vnet import NetgraphNet
|
||||
|
||||
|
||||
class PhysicalNode(PyCoreNode):
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, verbose = False, start = True):
|
||||
PyCoreNode.__init__(self, session, objid, name, verbose=verbose,
|
||||
start=start)
|
||||
self.nodedir = nodedir
|
||||
self.up = start
|
||||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def boot(self):
|
||||
self.session.services.bootnodeservices(self)
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
def startup(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
self.makenodedir()
|
||||
#self.privatedir("/var/run")
|
||||
#self.privatedir("/var/log")
|
||||
except OSError, e:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"PhysicalNode.startup()", e)
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
while self._mounts:
|
||||
source, target = self._mounts.pop(-1)
|
||||
self.umount(target)
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' The broker will add the appropriate SSH command to open a terminal
|
||||
on this physical node.
|
||||
'''
|
||||
return sh
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
''' run a command on the physical node
|
||||
'''
|
||||
os.chdir(self.nodedir)
|
||||
try:
|
||||
if wait:
|
||||
# os.spawnlp(os.P_WAIT, args)
|
||||
subprocess.call(args)
|
||||
else:
|
||||
# os.spawnlp(os.P_NOWAIT, args)
|
||||
subprocess.Popen(args)
|
||||
except CalledProcessError, e:
|
||||
self.warn("cmd exited with status %s: %s" % (e, str(args)))
|
||||
|
||||
def cmdresult(self, args):
|
||||
''' run a command on the physical node and get the result
|
||||
'''
|
||||
os.chdir(self.nodedir)
|
||||
# in Python 2.7 we can use subprocess.check_output() here
|
||||
tmp = subprocess.Popen(args, stdin = subprocess.PIPE,
|
||||
stdout = subprocess.PIPE,
|
||||
stderr = subprocess.PIPE)
|
||||
result = tmp.stdout.read()
|
||||
result += tmp.stderr.read()
|
||||
tmp.stdin.close()
|
||||
tmp.stdout.close()
|
||||
tmp.stderr.close()
|
||||
status = tmp.wait()
|
||||
return (status, result)
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.sethwaddr()
|
||||
'''
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
ifname = self.ifname(ifindex)
|
||||
if self.up:
|
||||
(status, result) = self.cmdresult([IP_BIN, "link", "set", "dev",
|
||||
ifname, "address", str(addr)])
|
||||
if status:
|
||||
self.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"PhysicalNode.sethwaddr()",
|
||||
"error setting MAC address %s" % str(addr))
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.addaddr()
|
||||
'''
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
''' same as SimpleLxcNode.deladdr()
|
||||
'''
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
"dev", self.ifname(ifindex)])
|
||||
|
||||
def adoptnetif(self, netif, ifindex, hwaddr, addrlist):
|
||||
''' The broker builds a GreTap tunnel device to this physical node.
|
||||
When a link message is received linking this node to another part of
|
||||
the emulation, no new interface is created; instead, adopt the
|
||||
GreTap netif as the node interface.
|
||||
'''
|
||||
netif.name = "gt%d" % ifindex
|
||||
netif.node = self
|
||||
self.addnetif(netif, ifindex)
|
||||
# use a more reasonable name, e.g. "gt0" instead of "gt.56286.150"
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "down"])
|
||||
self.cmd([IP_BIN, "link", "set", netif.localname, "name", netif.name])
|
||||
netif.localname = netif.name
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
if self.up:
|
||||
self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "up"])
|
||||
|
||||
def linkconfig(self, netif, bw = None, delay = None,
|
||||
loss = None, duplicate = None, jitter = None, netif2 = None):
|
||||
''' Apply tc queing disciplines using LxBrNet.linkconfig()
|
||||
'''
|
||||
if os.uname()[0] == "Linux":
|
||||
netcls = LxBrNet
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
netcls = NetgraphNet
|
||||
else:
|
||||
raise NotImplementedError, "unsupported platform"
|
||||
# borrow the tc qdisc commands from LxBrNet.linkconfig()
|
||||
tmp = netcls(session=self.session, start=False)
|
||||
tmp.up = True
|
||||
tmp.linkconfig(netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
del tmp
|
||||
|
||||
def newifindex(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
if self.up and net is None:
|
||||
raise NotImplementedError
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
|
||||
if self.up:
|
||||
# this is reached when this node is linked to a network node
|
||||
# tunnel to net not built yet, so build it now and adopt it
|
||||
gt = self.session.broker.addnettunnel(net.objid)
|
||||
if gt is None or len(gt) != 1:
|
||||
self.session.warn("Error building tunnel from PhysicalNode."
|
||||
"newnetif()")
|
||||
gt = gt[0]
|
||||
net.detach(gt)
|
||||
self.adoptnetif(gt, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
# this is reached when configuring services (self.up=False)
|
||||
if ifname is None:
|
||||
ifname = "gt%d" % ifindex
|
||||
netif = GreTap(node = self, name = ifname, session = self.session,
|
||||
start = False)
|
||||
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError, "path not fully qualified: " + path
|
||||
hostpath = os.path.join(self.nodedir, path[1:].replace("/", "."))
|
||||
try:
|
||||
os.mkdir(hostpath)
|
||||
except OSError:
|
||||
pass
|
||||
except Exception, e:
|
||||
raise Exception, e
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def mount(self, source, target):
|
||||
source = os.path.abspath(source)
|
||||
self.info("mounting %s at %s" % (source, target))
|
||||
try:
|
||||
os.makedirs(target)
|
||||
except OSError:
|
||||
pass
|
||||
try:
|
||||
self.cmd([MOUNT_BIN, "--bind", source, target])
|
||||
self._mounts.append((source, target))
|
||||
except:
|
||||
self.warn("mounting failed for %s at %s" % (source, target))
|
||||
|
||||
def umount(self, target):
|
||||
self.info("unmounting '%s'" % target)
|
||||
try:
|
||||
self.cmd([UMOUNT_BIN, "-l", target])
|
||||
except:
|
||||
self.warn("unmounting failed for %s" % target)
|
||||
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
|
||||
|
27
daemon/core/pycore.py
Normal file
27
daemon/core/pycore.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
"""
|
||||
This is a convenience module that imports a set of platform-dependent
|
||||
defaults.
|
||||
"""
|
||||
|
||||
from misc.utils import ensurepath
|
||||
ensurepath(["/sbin", "/bin", "/usr/sbin", "/usr/bin"])
|
||||
del ensurepath
|
||||
|
||||
from session import Session
|
||||
|
||||
import os
|
||||
|
||||
if os.uname()[0] == "Linux":
|
||||
from netns import nodes
|
||||
try:
|
||||
from xen import xen
|
||||
except ImportError:
|
||||
#print "Xen support disabled."
|
||||
pass
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from bsd import nodes
|
||||
from phys import pnodes
|
||||
del os
|
202
daemon/core/sdt.py
Normal file
202
daemon/core/sdt.py
Normal file
|
@ -0,0 +1,202 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
sdt.py: Scripted Display Tool (SDT3D) helper
|
||||
'''
|
||||
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from coreobj import PyCoreNet, PyCoreObj
|
||||
from core.netns import nodes
|
||||
import socket
|
||||
|
||||
class Sdt(object):
|
||||
''' Helper class for exporting session objects to NRL's SDT3D.
|
||||
The connect() method initializes the display, and can be invoked
|
||||
when a node position or link has changed.
|
||||
'''
|
||||
DEFAULT_SDT_PORT = 5000
|
||||
# default altitude (in meters) for flyto view
|
||||
DEFAULT_ALT = 2500
|
||||
# TODO: read in user's nodes.conf here; below are default node types
|
||||
# from the GUI
|
||||
DEFAULT_SPRITES = [('router', 'router.gif'), ('host', 'host.gif'),
|
||||
('PC', 'pc.gif'), ('mdr', 'mdr.gif'),
|
||||
('prouter', 'router_green.gif'), ('xen', 'xen.gif'),
|
||||
('hub', 'hub.gif'), ('lanswitch','lanswitch.gif'),
|
||||
('wlan', 'wlan.gif'), ('rj45','rj45.gif'),
|
||||
('tunnel','tunnel.gif'),
|
||||
]
|
||||
|
||||
def __init__(self, session):
|
||||
self.session = session
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
self.showerror = True
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.address = ("127.0.0.1", self.DEFAULT_SDT_PORT)
|
||||
|
||||
def is_enabled(self):
|
||||
if not hasattr(self.session.options, 'enablesdt'):
|
||||
return False
|
||||
if self.session.options.enablesdt == '1':
|
||||
return True
|
||||
return False
|
||||
|
||||
def connect(self, flags=0):
|
||||
if not self.is_enabled():
|
||||
return False
|
||||
if self.connected:
|
||||
return True
|
||||
if self.showerror:
|
||||
self.session.info("connecting to SDT at %s:%s" % self.address)
|
||||
if self.sock is None:
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
try:
|
||||
self.sock.connect(self.address)
|
||||
except Exception, e:
|
||||
self.session.warn("SDT socket connect error: %s" % e)
|
||||
return False
|
||||
if not self.initialize():
|
||||
return False
|
||||
self.connected = True
|
||||
# refresh all objects in SDT3D when connecting after session start
|
||||
if not flags & coreapi.CORE_API_ADD_FLAG:
|
||||
if not self.sendobjs():
|
||||
return False
|
||||
return True
|
||||
|
||||
def initialize(self):
|
||||
''' Load icon sprites, and fly to the reference point location on
|
||||
the virtual globe.
|
||||
'''
|
||||
if not self.cmd('path "%s/icons/normal"' % CORE_DATA_DIR):
|
||||
return False
|
||||
# send node type to icon mappings
|
||||
for (type, icon) in self.DEFAULT_SPRITES:
|
||||
if not self.cmd('sprite %s image %s' % (type, icon)):
|
||||
return False
|
||||
(lat, long) = self.session.location.refgeo[:2]
|
||||
return self.cmd('flyto %.6f,%.6f,%d' % (long, lat, self.DEFAULT_ALT))
|
||||
|
||||
def disconnect(self):
|
||||
try:
|
||||
self.sock.close()
|
||||
except:
|
||||
pass
|
||||
self.sock = None
|
||||
self.connected = False
|
||||
|
||||
def shutdown(self):
|
||||
''' Invoked from Session.shutdown() and Session.checkshutdown().
|
||||
'''
|
||||
# TODO: clear SDT display here?
|
||||
self.disconnect()
|
||||
self.showerror = True
|
||||
|
||||
def cmd(self, cmdstr):
|
||||
''' Send an SDT command over a UDP socket. socket.sendall() is used
|
||||
as opposed to socket.sendto() because an exception is raised when there
|
||||
is no socket listener.
|
||||
'''
|
||||
if self.sock is None:
|
||||
return False
|
||||
try:
|
||||
if self.verbose:
|
||||
self.session.info("sdt: %s" % cmdstr)
|
||||
self.sock.sendall("%s\n" % cmdstr)
|
||||
return True
|
||||
except Exception, e:
|
||||
if self.showerror:
|
||||
self.session.warn("SDT connection error: %s" % e)
|
||||
self.showerror = False
|
||||
self.connected = False
|
||||
return False
|
||||
|
||||
def updatenode(self, node, flags, x, y, z):
|
||||
''' Node is updated from a Node Message or mobility script.
|
||||
'''
|
||||
if node is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
if flags & coreapi.CORE_API_DEL_FLAG:
|
||||
self.cmd('delete node,%d' % node.objid)
|
||||
return
|
||||
(lat, long, alt) = self.session.location.getgeo(x, y, z)
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
if flags & coreapi.CORE_API_ADD_FLAG:
|
||||
type = node.type
|
||||
if node.icon is not None:
|
||||
type = node.name
|
||||
self.cmd('sprite %s image %s' % (type, node.icon))
|
||||
self.cmd('node %d type %s label on,"%s" %s' % \
|
||||
(node.objid, type, node.name, pos))
|
||||
else:
|
||||
self.cmd('node %d %s' % (node.objid, pos))
|
||||
|
||||
def updatenodegeo(self, node, lat, long, alt):
|
||||
''' Node is updated upon receiving an EMANE Location Event.
|
||||
'''
|
||||
if node is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
self.cmd('node %d %s' % (node.objid, pos))
|
||||
|
||||
def updatelink(self, node1num, node2num, flags, wireless=False):
|
||||
''' Link is updated from a Link Message or by a wireless model.
|
||||
'''
|
||||
if node1num is None or node2num is None:
|
||||
return
|
||||
if not self.connect():
|
||||
return
|
||||
if flags & coreapi.CORE_API_DEL_FLAG:
|
||||
self.cmd('delete link,%s,%s' % (node1num, node2num))
|
||||
elif flags & coreapi.CORE_API_ADD_FLAG:
|
||||
attr = ""
|
||||
if wireless:
|
||||
attr = " line green"
|
||||
self.cmd('link %s,%s%s' % (node1num, node2num, attr))
|
||||
|
||||
def sendobjs(self):
|
||||
''' Session has already started, and the SDT3D GUI later connects.
|
||||
Send all node and link objects for display. Otherwise, nodes and links
|
||||
will only be drawn when they have been updated.
|
||||
'''
|
||||
nets = []
|
||||
with self.session._objslock:
|
||||
for obj in self.session.objs():
|
||||
if isinstance(obj, PyCoreNet):
|
||||
nets.append(obj)
|
||||
if not isinstance(obj, PyCoreObj):
|
||||
continue
|
||||
(x, y, z) = obj.getposition()
|
||||
if x is None or y is None:
|
||||
continue
|
||||
self.updatenode(obj, coreapi.CORE_API_ADD_FLAG, x, y, z)
|
||||
for net in nets:
|
||||
# use tolinkmsgs() to handle various types of links
|
||||
msgs = net.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG)
|
||||
for msg in msgs:
|
||||
msghdr = msg[:coreapi.CoreMessage.hdrsiz]
|
||||
flags = coreapi.CoreMessage.unpackhdr(msghdr)[1]
|
||||
m = coreapi.CoreLinkMessage(flags, msghdr,
|
||||
msg[coreapi.CoreMessage.hdrsiz:])
|
||||
n1num = m.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER)
|
||||
n2num = m.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER)
|
||||
link_msg_type = m.gettlv(coreapi.CORE_TLV_LINK_TYPE)
|
||||
if isinstance(net, nodes.WlanNode) or \
|
||||
isinstance(net, nodes.EmaneNode):
|
||||
if (n1num == net.objid):
|
||||
continue
|
||||
wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS)
|
||||
self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl)
|
||||
|
||||
|
760
daemon/core/service.py
Normal file
760
daemon/core/service.py
Normal file
|
@ -0,0 +1,760 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
service.py: definition of CoreService class that is subclassed to define
|
||||
startup services and routing for nodes. A service is typically a daemon
|
||||
program launched when a node starts that provides some sort of
|
||||
service. The CoreServices class handles configuration messages for sending
|
||||
a list of available services to the GUI and for configuring individual
|
||||
services.
|
||||
'''
|
||||
|
||||
import sys, os, shlex
|
||||
|
||||
from itertools import repeat
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
from core.misc.utils import maketuplefromstr, expandcorepath
|
||||
|
||||
servicelist = []
|
||||
|
||||
def addservice(service):
|
||||
global servicelist
|
||||
i = 0
|
||||
found = -1
|
||||
for s in servicelist:
|
||||
if s._group == service._group:
|
||||
found = i
|
||||
elif (found >= 0):
|
||||
# insert service into list next to existing group
|
||||
i = found + 1
|
||||
break
|
||||
i += 1
|
||||
servicelist.insert(i, service)
|
||||
|
||||
class CoreServices(ConfigurableManager):
|
||||
''' Class for interacting with a list of available startup services for
|
||||
nodes. Mostly used to convert a CoreService into a Config API
|
||||
message. This class lives in the Session object and remembers
|
||||
the default services configured for each node type, and any
|
||||
custom service configuration. A CoreService is not a Configurable.
|
||||
'''
|
||||
_name = "services"
|
||||
_type = coreapi.CORE_TLV_REG_UTILITY
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
# dict of default services tuples, key is node type
|
||||
self.defaultservices = {}
|
||||
# dict of tuple of service objects, key is node number
|
||||
self.customservices = {}
|
||||
importcmd = "from core.services import *"
|
||||
exec(importcmd)
|
||||
paths = self.session.getcfgitem('custom_services_dir')
|
||||
if paths:
|
||||
for path in paths.split(','):
|
||||
path = path.strip()
|
||||
self.importcustom(path)
|
||||
|
||||
def importcustom(self, path):
|
||||
''' Import services from a myservices directory.
|
||||
'''
|
||||
if not path or len(path) == 0:
|
||||
return
|
||||
if not os.path.isdir(path):
|
||||
self.session.warn("invalid custom service directory specified" \
|
||||
": %s" % path)
|
||||
return
|
||||
try:
|
||||
parentdir, childdir = os.path.split(path)
|
||||
if childdir == "services":
|
||||
raise ValueError, "use a unique custom services dir name, " \
|
||||
"not 'services'"
|
||||
sys.path.append(parentdir)
|
||||
exec("from %s import *" % childdir)
|
||||
except Exception, e:
|
||||
self.session.warn("error importing custom services from " \
|
||||
"%s:\n%s" % (path, e))
|
||||
|
||||
def reset(self):
|
||||
''' Called when config message with reset flag is received
|
||||
'''
|
||||
self.defaultservices.clear()
|
||||
self.customservices.clear()
|
||||
|
||||
def get(self):
|
||||
''' Get the list of available services.
|
||||
'''
|
||||
global servicelist
|
||||
return servicelist
|
||||
|
||||
def getservicebyname(self, name):
|
||||
''' Get a service class from the global servicelist given its name.
|
||||
Returns None when the name is not found.
|
||||
'''
|
||||
global servicelist
|
||||
for s in servicelist:
|
||||
if s._name == name:
|
||||
return s
|
||||
return None
|
||||
|
||||
def getdefaultservices(self, type):
|
||||
''' Get the list of default services that should be enabled for a
|
||||
node for the given node type.
|
||||
'''
|
||||
r = []
|
||||
if type in self.defaultservices:
|
||||
defaults = self.defaultservices[type]
|
||||
for name in defaults:
|
||||
s = self.getservicebyname(name)
|
||||
if s is None:
|
||||
self.session.warn("default service %s is unknown" % name)
|
||||
else:
|
||||
r.append(s)
|
||||
return r
|
||||
|
||||
def getcustomservice(self, objid, service):
|
||||
''' Get any custom service configured for the given node that
|
||||
matches the specified service name. If no custom service
|
||||
is found, return the specified service.
|
||||
'''
|
||||
if objid in self.customservices:
|
||||
for s in self.customservices[objid]:
|
||||
if s._name == service._name:
|
||||
return s
|
||||
return service
|
||||
|
||||
def setcustomservice(self, objid, service, values):
|
||||
''' Store service customizations in an instantiated service object
|
||||
using a list of values that came from a config message.
|
||||
'''
|
||||
if service._custom:
|
||||
s = service
|
||||
else:
|
||||
# instantiate the class, for storing config customization
|
||||
s = service()
|
||||
# values are new key=value format; not all keys need to be present
|
||||
# a missing key means go with the default
|
||||
if Configurable.haskeyvalues(values):
|
||||
for v in values:
|
||||
key, value = v.split('=', 1)
|
||||
s.setvalue(key, value)
|
||||
# old-style config, list of values
|
||||
else:
|
||||
s.fromvaluelist(values)
|
||||
|
||||
# assume custom service already in dict
|
||||
if service._custom:
|
||||
return
|
||||
# add the custom service to dict
|
||||
if objid in self.customservices:
|
||||
self.customservices[objid] += (s, )
|
||||
else:
|
||||
self.customservices[objid] = (s, )
|
||||
|
||||
def addservicestonode(self, node, nodetype, services_str, verbose):
|
||||
''' Populate the node.service list using (1) the list of services
|
||||
requested from the services TLV, (2) using any custom service
|
||||
configuration, or (3) using the default services for this node type.
|
||||
'''
|
||||
if services_str is not None:
|
||||
services = services_str.split('|')
|
||||
for name in services:
|
||||
s = self.getservicebyname(name)
|
||||
if s is None:
|
||||
self.session.warn("configured service %s for node %s is " \
|
||||
"unknown" % (name, node.name))
|
||||
continue
|
||||
if verbose:
|
||||
self.session.info("adding configured service %s to " \
|
||||
"node %s" % (s._name, node.name))
|
||||
s = self.getcustomservice(node.objid, s)
|
||||
node.addservice(s)
|
||||
else:
|
||||
services = self.getdefaultservices(nodetype)
|
||||
for s in services:
|
||||
if verbose:
|
||||
self.session.info("adding default service %s to node %s" % \
|
||||
(s._name, node.name))
|
||||
s = self.getcustomservice(node.objid, s)
|
||||
node.addservice(s)
|
||||
|
||||
def getallconfigs(self):
|
||||
''' Return (nodenum, service) tuples for all stored configs.
|
||||
Used when reconnecting to a session or opening XML.
|
||||
'''
|
||||
r = []
|
||||
for nodenum in self.customservices:
|
||||
for s in self.customservices[nodenum]:
|
||||
r.append( (nodenum, s) )
|
||||
return r
|
||||
|
||||
def getallfiles(self, service):
|
||||
''' Return all customized files stored with a service.
|
||||
Used when reconnecting to a session or opening XML.
|
||||
'''
|
||||
r = []
|
||||
if not service._custom:
|
||||
return r
|
||||
for filename in service._configs:
|
||||
data = self.getservicefiledata(service, filename)
|
||||
if data is None:
|
||||
continue
|
||||
r.append( (filename, data) )
|
||||
return r
|
||||
|
||||
def bootnodeservices(self, node):
|
||||
''' Start all services on a node.
|
||||
'''
|
||||
services = sorted(node.services,
|
||||
key=lambda service: service._startindex)
|
||||
for s in services:
|
||||
try:
|
||||
t = float(s._starttime)
|
||||
if t > 0.0:
|
||||
fn = self.bootnodeservice
|
||||
self.session.evq.add_event(t, fn, node, s, services)
|
||||
continue
|
||||
except ValueError:
|
||||
pass
|
||||
self.bootnodeservice(node, s, services)
|
||||
|
||||
def bootnodeservice(self, node, s, services):
|
||||
''' Start a service on a node. Create private dirs, generate config
|
||||
files, and execute startup commands.
|
||||
'''
|
||||
if s._custom:
|
||||
self.bootnodecustomservice(node, s, services)
|
||||
return
|
||||
if node.verbose:
|
||||
node.info("starting service %s (%s)" % (s._name, s._startindex))
|
||||
for d in s._dirs:
|
||||
try:
|
||||
node.privatedir(d)
|
||||
except Exception, e:
|
||||
node.warn("Error making node %s dir %s: %s" % \
|
||||
(node.name, d, e))
|
||||
for filename in s.getconfigfilenames(node.objid, services):
|
||||
cfg = s.generateconfig(node, filename, services)
|
||||
node.nodefile(filename, cfg)
|
||||
for cmd in s.getstartup(node, services):
|
||||
try:
|
||||
# NOTE: this wait=False can be problematic!
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error starting command %s" % cmd)
|
||||
|
||||
def bootnodecustomservice(self, node, s, services):
|
||||
''' Start a custom service on a node. Create private dirs, use supplied
|
||||
config files, and execute supplied startup commands.
|
||||
'''
|
||||
if node.verbose:
|
||||
node.info("starting service %s (%s)(custom)" % (s._name, s._startindex))
|
||||
for d in s._dirs:
|
||||
try:
|
||||
node.privatedir(d)
|
||||
except Exception, e:
|
||||
node.warn("Error making node %s dir %s: %s" % \
|
||||
(node.name, d, e))
|
||||
for i, filename in enumerate(s._configs):
|
||||
if len(filename) == 0:
|
||||
continue
|
||||
cfg = self.getservicefiledata(s, filename)
|
||||
if cfg is None:
|
||||
cfg = s.generateconfig(node, filename, services)
|
||||
# cfg may have a file:/// url for copying from a file
|
||||
try:
|
||||
if self.copyservicefile(node, filename, cfg):
|
||||
continue
|
||||
except IOError, e:
|
||||
node.warn("Error copying service file %s" % filename)
|
||||
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"service:%s" % s._name,
|
||||
"error copying service file '%s': %s" % (filename, e))
|
||||
continue
|
||||
node.nodefile(filename, cfg)
|
||||
|
||||
for cmd in s._startup:
|
||||
try:
|
||||
# NOTE: this wait=False can be problematic!
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error starting command %s" % cmd)
|
||||
|
||||
def copyservicefile(self, node, filename, cfg):
|
||||
''' Given a configured service filename and config, determine if the
|
||||
config references an existing file that should be copied.
|
||||
Returns True for local files, False for generated.
|
||||
'''
|
||||
if cfg[:7] == 'file://':
|
||||
src = cfg[7:]
|
||||
src = src.split('\n')[0]
|
||||
src = expandcorepath(src, node.session, node)
|
||||
# TODO: glob here
|
||||
node.nodefilecopy(filename, src, mode = 0644)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def validatenodeservices(self, node):
|
||||
''' Run validation commands for all services on a node.
|
||||
'''
|
||||
services = sorted(node.services,
|
||||
key=lambda service: service._startindex)
|
||||
for s in services:
|
||||
self.validatenodeservice(node, s, services)
|
||||
|
||||
def validatenodeservice(self, node, s, services):
|
||||
''' Run the validation command(s) for a service.
|
||||
'''
|
||||
if node.verbose:
|
||||
node.info("validating service %s (%s)" % (s._name, s._startindex))
|
||||
if s._custom:
|
||||
validate_cmds = s._validate
|
||||
else:
|
||||
validate_cmds = s.getvalidate(node, services)
|
||||
for cmd in validate_cmds:
|
||||
if node.verbose:
|
||||
node.info("validating service %s using: %s" % (s._name, cmd))
|
||||
try:
|
||||
(status, result) = node.cmdresult(shlex.split(cmd))
|
||||
if status != 0:
|
||||
raise ValueError, "non-zero exit status"
|
||||
except:
|
||||
node.warn("validation command '%s' failed" % cmd)
|
||||
node.exception(coreapi.CORE_EXCP_LEVEL_ERROR,
|
||||
"service:%s" % s._name,
|
||||
"validate command failed: %s" % cmd)
|
||||
|
||||
def stopnodeservices(self, node):
|
||||
''' Stop all services on a node.
|
||||
'''
|
||||
services = sorted(node.services,
|
||||
key=lambda service: service._startindex)
|
||||
for s in services:
|
||||
self.stopnodeservice(node, s)
|
||||
|
||||
def stopnodeservice(self, node, s):
|
||||
''' Stop a service on a node.
|
||||
'''
|
||||
for cmd in s._shutdown:
|
||||
try:
|
||||
# NOTE: this wait=False can be problematic!
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error running stop command %s" % cmd)
|
||||
|
||||
|
||||
def configure_request(self, msg):
|
||||
''' Receive configuration message for configuring services.
|
||||
With a request flag set, a list of services has been requested.
|
||||
When the opaque field is present, a specific service is being
|
||||
configured or requested.
|
||||
'''
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
sessionnum = msg.gettlv(coreapi.CORE_TLV_CONF_SESSION)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
|
||||
# send back a list of available services
|
||||
if opaque is None:
|
||||
global servicelist
|
||||
tf = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
datatypes = tuple(repeat(coreapi.CONF_DATA_TYPE_BOOL,
|
||||
len(servicelist)))
|
||||
vals = "|".join(repeat('0', len(servicelist)))
|
||||
names = map(lambda x: x._name, servicelist)
|
||||
captions = "|".join(names)
|
||||
possiblevals = ""
|
||||
for s in servicelist:
|
||||
if s._custom_needed:
|
||||
possiblevals += '1'
|
||||
possiblevals += '|'
|
||||
groups = self.buildgroups(servicelist)
|
||||
# send back the properties for this service
|
||||
else:
|
||||
if nodenum is None:
|
||||
return None
|
||||
n = self.session.obj(nodenum)
|
||||
if n is None:
|
||||
self.session.warn("Request to configure service %s for " \
|
||||
"unknown node %s" % (svc._name, nodenum))
|
||||
return None
|
||||
servicesstring = opaque.split(':')
|
||||
services = self.servicesfromopaque(opaque, n.objid)
|
||||
if len(services) < 1:
|
||||
return None
|
||||
if len(servicesstring) == 3:
|
||||
# a file request: e.g. "service:zebra:quagga.conf"
|
||||
return self.getservicefile(services, n, servicesstring[2])
|
||||
|
||||
# the first service in the list is the one being configured
|
||||
svc = services[0]
|
||||
# send back:
|
||||
# dirs, configs, startindex, startup, shutdown, metadata, config
|
||||
tf = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
datatypes = tuple(repeat(coreapi.CONF_DATA_TYPE_STRING,
|
||||
len(svc.keys)))
|
||||
vals = svc.tovaluelist(n, services)
|
||||
captions = None
|
||||
possiblevals = None
|
||||
groups = None
|
||||
|
||||
tlvdata = ""
|
||||
if nodenum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE,
|
||||
nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
self._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, tf)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
vals)
|
||||
if captions:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
if possiblevals:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if groups:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
groups)
|
||||
if sessionnum is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_SESSION, sessionnum)
|
||||
if opaque:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
|
||||
opaque)
|
||||
return coreapi.CoreConfMessage.pack(0, tlvdata)
|
||||
|
||||
|
||||
def configure_values(self, msg, values):
|
||||
''' Receive configuration message for configuring services.
|
||||
With a request flag set, a list of services has been requested.
|
||||
When the opaque field is present, a specific service is being
|
||||
configured or requested.
|
||||
'''
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
|
||||
errmsg = "services config message that I don't know how to handle"
|
||||
if values is None:
|
||||
self.session.info(errmsg)
|
||||
return None
|
||||
else:
|
||||
values = values.split('|')
|
||||
|
||||
if opaque is None:
|
||||
# store default services for a node type in self.defaultservices[]
|
||||
data_types = msg.gettlv(coreapi.CORE_TLV_CONF_DATA_TYPES)
|
||||
if values is None or data_types is None or \
|
||||
data_types[0] != coreapi.CONF_DATA_TYPE_STRING:
|
||||
self.session.info(errmsg)
|
||||
return None
|
||||
key = values.pop(0)
|
||||
self.defaultservices[key] = values
|
||||
self.session.info("default services for type %s set to %s" % \
|
||||
(key, values))
|
||||
else:
|
||||
# store service customized config in self.customservices[]
|
||||
if nodenum is None:
|
||||
return None
|
||||
services = self.servicesfromopaque(opaque, nodenum)
|
||||
if len(services) < 1:
|
||||
return None
|
||||
svc = services[0]
|
||||
self.setcustomservice(nodenum, svc, values)
|
||||
return None
|
||||
|
||||
def servicesfromopaque(self, opaque, objid):
|
||||
''' Build a list of services from an opaque data string.
|
||||
'''
|
||||
services = []
|
||||
servicesstring = opaque.split(':')
|
||||
if servicesstring[0] != "service":
|
||||
return []
|
||||
servicenames = servicesstring[1].split(',')
|
||||
for name in servicenames:
|
||||
s = self.getservicebyname(name)
|
||||
s = self.getcustomservice(objid, s)
|
||||
if s is None:
|
||||
self.session.warn("Request for unknown service '%s'" % name)
|
||||
return []
|
||||
services.append(s)
|
||||
return services
|
||||
|
||||
def buildgroups(self, servicelist):
|
||||
''' Build a string of groups for use in a configuration message given
|
||||
a list of services. The group list string has the format
|
||||
"title1:1-5|title2:6-9|10-12", where title is an optional group title
|
||||
and i-j is a numeric range of value indices; groups are
|
||||
separated by commas.
|
||||
'''
|
||||
i = 0
|
||||
r = ""
|
||||
lastgroup = "<undefined>"
|
||||
for service in servicelist:
|
||||
i += 1
|
||||
group = service._group
|
||||
if group != lastgroup:
|
||||
lastgroup = group
|
||||
# finish previous group
|
||||
if i > 1:
|
||||
r += "-%d|" % (i -1)
|
||||
# optionally include group title
|
||||
if group == "":
|
||||
r += "%d" % i
|
||||
else:
|
||||
r += "%s:%d" % (group, i)
|
||||
# finish the last group list
|
||||
if i > 0:
|
||||
r += "-%d" % i
|
||||
return r
|
||||
|
||||
def getservicefile(self, services, node, filename):
|
||||
''' Send a File Message when the GUI has requested a service file.
|
||||
The file data is either auto-generated or comes from an existing config.
|
||||
'''
|
||||
svc = services[0]
|
||||
# get the filename and determine the config file index
|
||||
if svc._custom:
|
||||
cfgfiles = svc._configs
|
||||
else:
|
||||
cfgfiles = svc.getconfigfilenames(node.objid, services)
|
||||
if filename not in cfgfiles:
|
||||
self.session.warn("Request for unknown file '%s' for service '%s'" \
|
||||
% (filename, services[0]))
|
||||
return None
|
||||
|
||||
# get the file data
|
||||
data = self.getservicefiledata(svc, filename)
|
||||
if data is None:
|
||||
data = "%s" % (svc.generateconfig(node, filename, services))
|
||||
else:
|
||||
data = "%s" % data
|
||||
filetypestr = "service:%s" % svc._name
|
||||
|
||||
# send a file message
|
||||
flags = coreapi.CORE_API_ADD_FLAG
|
||||
tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE, node.objid)
|
||||
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME, filename)
|
||||
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE, filetypestr)
|
||||
tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA, data)
|
||||
reply = coreapi.CoreFileMessage.pack(flags, tlvdata)
|
||||
return reply
|
||||
|
||||
def getservicefiledata(self, service, filename):
|
||||
''' Get the customized file data associated with a service. Return None
|
||||
for invalid filenames or missing file data.
|
||||
'''
|
||||
try:
|
||||
i = service._configs.index(filename)
|
||||
except ValueError:
|
||||
return None
|
||||
if i >= len(service._configtxt) or service._configtxt[i] is None:
|
||||
return None
|
||||
return service._configtxt[i]
|
||||
|
||||
def setservicefile(self, nodenum, type, filename, srcname, data):
|
||||
''' Receive a File Message from the GUI and store the customized file
|
||||
in the service config. The filename must match one from the list of
|
||||
config files in the service.
|
||||
'''
|
||||
if len(type.split(':')) < 2:
|
||||
self.session.warn("Received file type did not contain service info.")
|
||||
return
|
||||
if srcname is not None:
|
||||
raise NotImplementedError
|
||||
(svcid, svcname) = type.split(':')[:2]
|
||||
svc = self.getservicebyname(svcname)
|
||||
svc = self.getcustomservice(nodenum, svc)
|
||||
if svc is None:
|
||||
self.session.warn("Received filename for unknown service '%s'" % \
|
||||
svcname)
|
||||
return
|
||||
cfgfiles = svc._configs
|
||||
if filename not in cfgfiles:
|
||||
self.session.warn("Received unknown file '%s' for service '%s'" \
|
||||
% (filename, svcname))
|
||||
return
|
||||
i = cfgfiles.index(filename)
|
||||
configtxtlist = list(svc._configtxt)
|
||||
numitems = len(configtxtlist)
|
||||
if numitems < i+1:
|
||||
# add empty elements to list to support index assignment
|
||||
for j in range(1, (i + 2) - numitems):
|
||||
configtxtlist += None,
|
||||
configtxtlist[i] = data
|
||||
svc._configtxt = configtxtlist
|
||||
|
||||
def handleevent(self, msg):
|
||||
''' Handle an Event Message used to start, stop, restart, or validate
|
||||
a service on a given node.
|
||||
'''
|
||||
eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE)
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE)
|
||||
name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME)
|
||||
try:
|
||||
node = self.session.obj(nodenum)
|
||||
except KeyError:
|
||||
self.session.warn("Ignoring event for service '%s', unknown node " \
|
||||
"'%s'" % (name, nodenum))
|
||||
return
|
||||
|
||||
services = self.servicesfromopaque(name, nodenum)
|
||||
for s in services:
|
||||
if eventtype == coreapi.CORE_EVENT_STOP or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
self.stopnodeservice(node, s)
|
||||
if eventtype == coreapi.CORE_EVENT_START or \
|
||||
eventtype == coreapi.CORE_EVENT_RESTART:
|
||||
if s._custom:
|
||||
cmds = s._startup
|
||||
else:
|
||||
cmds = s.getstartup(node, services)
|
||||
for cmd in cmds:
|
||||
try:
|
||||
node.cmd(shlex.split(cmd), wait = False)
|
||||
except:
|
||||
node.warn("error starting command %s" % cmd)
|
||||
if eventtype == coreapi.CORE_EVENT_PAUSE:
|
||||
self.validatenodeservice(node, s, services)
|
||||
|
||||
|
||||
class CoreService(object):
|
||||
''' Parent class used for defining services.
|
||||
'''
|
||||
# service name should not include spaces
|
||||
_name = ""
|
||||
# group string allows grouping services together
|
||||
_group = ""
|
||||
# list name(s) of services that this service depends upon
|
||||
_depends = ()
|
||||
keys = ["dirs","files","startidx","cmdup","cmddown","cmdval","meta","starttime"]
|
||||
# private, per-node directories required by this service
|
||||
_dirs = ()
|
||||
# config files written by this service
|
||||
_configs = ()
|
||||
# index used to determine start order with other services
|
||||
_startindex = 0
|
||||
# time in seconds after runtime to run startup commands
|
||||
_starttime = ""
|
||||
# list of startup commands
|
||||
_startup = ()
|
||||
# list of shutdown commands
|
||||
_shutdown = ()
|
||||
# list of validate commands
|
||||
_validate = ()
|
||||
# metadata associated with this service
|
||||
_meta = ""
|
||||
# custom configuration text
|
||||
_configtxt = ()
|
||||
_custom = False
|
||||
_custom_needed = False
|
||||
|
||||
def __init__(self):
|
||||
''' Services are not necessarily instantiated. Classmethods may be used
|
||||
against their config. Services are instantiated when a custom
|
||||
configuration is used to override their default parameters.
|
||||
'''
|
||||
self._custom = True
|
||||
|
||||
@classmethod
|
||||
def getconfigfilenames(cls, nodenum, services):
|
||||
''' Return the tuple of configuration file filenames. This default method
|
||||
returns the cls._configs tuple, but this method may be overriden to
|
||||
provide node-specific filenames that may be based on other services.
|
||||
'''
|
||||
return cls._configs
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate configuration file given a node object. The filename is
|
||||
provided to allow for multiple config files. The other services are
|
||||
provided to allow interdependencies (e.g. zebra and OSPF).
|
||||
Return the configuration string to be written to a file or sent
|
||||
to the GUI for customization.
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Return the tuple of startup commands. This default method
|
||||
returns the cls._startup tuple, but this method may be
|
||||
overriden to provide node-specific commands that may be
|
||||
based on other services.
|
||||
'''
|
||||
return cls._startup
|
||||
|
||||
@classmethod
|
||||
def getvalidate(cls, node, services):
|
||||
''' Return the tuple of validate commands. This default method
|
||||
returns the cls._validate tuple, but this method may be
|
||||
overriden to provide node-specific commands that may be
|
||||
based on other services.
|
||||
'''
|
||||
return cls._validate
|
||||
|
||||
@classmethod
|
||||
def tovaluelist(cls, node, services):
|
||||
''' Convert service properties into a string list of key=value pairs,
|
||||
separated by "|".
|
||||
'''
|
||||
valmap = [cls._dirs, cls._configs, cls._startindex, cls._startup,
|
||||
cls._shutdown, cls._validate, cls._meta, cls._starttime]
|
||||
if not cls._custom:
|
||||
# this is always reached due to classmethod
|
||||
valmap[valmap.index(cls._configs)] = \
|
||||
cls.getconfigfilenames(node.objid, services)
|
||||
valmap[valmap.index(cls._startup)] = \
|
||||
cls.getstartup(node, services)
|
||||
vals = map( lambda a,b: "%s=%s" % (a, str(b)), cls.keys, valmap)
|
||||
return "|".join(vals)
|
||||
|
||||
def fromvaluelist(self, values):
|
||||
''' Convert list of values into properties for this instantiated
|
||||
(customized) service.
|
||||
'''
|
||||
# TODO: support empty value? e.g. override default meta with ''
|
||||
for key in self.keys:
|
||||
try:
|
||||
self.setvalue(key, values[self.keys.index(key)])
|
||||
except IndexError:
|
||||
# old config does not need to have new keys
|
||||
pass
|
||||
|
||||
def setvalue(self, key, value):
|
||||
if key not in self.keys:
|
||||
raise ValueError
|
||||
# this handles data conversion to int, string, and tuples
|
||||
if value:
|
||||
if key == "startidx":
|
||||
value = int(value)
|
||||
elif key == "meta":
|
||||
value = str(value)
|
||||
else:
|
||||
value = maketuplefromstr(value, str)
|
||||
|
||||
if key == "dirs":
|
||||
self._dirs = value
|
||||
elif key == "files":
|
||||
self._configs = value
|
||||
elif key == "startidx":
|
||||
self._startindex = value
|
||||
elif key == "cmdup":
|
||||
self._startup = value
|
||||
elif key == "cmddown":
|
||||
self._shutdown = value
|
||||
elif key == "cmdval":
|
||||
self._validate = value
|
||||
elif key == "meta":
|
||||
self._meta = value
|
||||
elif key == "starttime":
|
||||
self._starttime = value
|
6
daemon/core/services/__init__.py
Normal file
6
daemon/core/services/__init__.py
Normal file
|
@ -0,0 +1,6 @@
|
|||
"""Services
|
||||
|
||||
Services available to nodes can be put in this directory. Everything listed in
|
||||
__all__ is automatically loaded by the main core module.
|
||||
"""
|
||||
__all__ = ["quagga", "nrl", "xorp", "bird", "utility", "security", "ucarp"]
|
249
daemon/core/services/bird.py
Normal file
249
daemon/core/services/bird.py
Normal file
|
@ -0,0 +1,249 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2012 Jean-Tiare Le Bigot.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# authors: Jean-Tiare Le Bigot <admin@jtlebi.fr>
|
||||
# Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
bird.py: defines routing services provided by the BIRD Internet Routing Daemon.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
class Bird(CoreService):
|
||||
''' Bird router support
|
||||
'''
|
||||
_name = "bird"
|
||||
_group = "BIRD"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/bird",)
|
||||
_configs = ("/etc/bird/bird.conf",)
|
||||
_startindex = 35
|
||||
_startup = ("bird -c %s" % (_configs[0]),)
|
||||
_shutdown = ("killall bird", )
|
||||
_validate = ("pidof bird", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the bird.conf file contents.
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateBirdConf(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a .split('/') [0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateBirdConf(cls, node, services):
|
||||
''' Returns configuration file text. Other services that depend on bird
|
||||
will have generatebirdifcconfig() and generatebirdconfig()
|
||||
hooks that are invoked here.
|
||||
'''
|
||||
cfg = """\
|
||||
/* Main configuration file for BIRD. This is ony a template,
|
||||
* you will *need* to customize it according to your needs
|
||||
* Beware that only double quotes \'"\' are valid. No singles. */
|
||||
|
||||
|
||||
log "/var/log/%s.log" all;
|
||||
#debug protocols all;
|
||||
#debug commands 2;
|
||||
|
||||
router id %s; # Mandatory for IPv6, may be automatic for IPv4
|
||||
|
||||
protocol kernel {
|
||||
persist; # Don\'t remove routes on BIRD shutdown
|
||||
scan time 200; # Scan kernel routing table every 200 seconds
|
||||
export all;
|
||||
import all;
|
||||
}
|
||||
|
||||
protocol device {
|
||||
scan time 10; # Scan interfaces every 10 seconds
|
||||
}
|
||||
|
||||
""" % (cls._name, cls.routerid(node))
|
||||
|
||||
# Generate protocol specific configurations
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
cfg += s.generatebirdconfig(node)
|
||||
|
||||
return cfg
|
||||
|
||||
class BirdService(CoreService):
|
||||
''' Parent class for Bird services. Defines properties and methods
|
||||
common to Bird's routing daemons.
|
||||
'''
|
||||
|
||||
_name = "BirdDaemon"
|
||||
_group = "BIRD"
|
||||
_depends = ("bird", )
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the bird service."
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatebirdifcconfig(cls, node):
|
||||
''' Use only bare interfaces descriptions in generated protocol
|
||||
configurations. This has the slight advantage of being the same
|
||||
everywhere.
|
||||
'''
|
||||
cfg = ""
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True: continue
|
||||
cfg += ' interface "%s";\n'% ifc.name
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdBgp(BirdService):
|
||||
'''BGP BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_BGP"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
return """
|
||||
/* This is a sample config that should be customized with appropriate AS numbers
|
||||
* and peers; add one section like this for each neighbor */
|
||||
|
||||
protocol bgp {
|
||||
local as 65000; # Customize your AS number
|
||||
neighbor 198.51.100.130 as 64496; # Customize neighbor AS number && IP
|
||||
export filter { # We use non-trivial export rules
|
||||
# This is an example. You should advertise only *your routes*
|
||||
if (source = RTS_DEVICE) || (source = RTS_OSPF) then {
|
||||
# bgp_community.add((65000,64501)); # Assign our community
|
||||
accept;
|
||||
}
|
||||
reject;
|
||||
};
|
||||
import all;
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
class BirdOspf(BirdService):
|
||||
'''OSPF BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = 'protocol ospf {\n'
|
||||
cfg += ' export filter {\n'
|
||||
cfg += ' if source = RTS_BGP then {\n'
|
||||
cfg += ' ospf_metric1 = 100;\n'
|
||||
cfg += ' accept;\n'
|
||||
cfg += ' }\n'
|
||||
cfg += ' accept;\n'
|
||||
cfg += ' };\n'
|
||||
cfg += ' area 0.0.0.0 {\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' };\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdRadv(BirdService):
|
||||
'''RADV BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_RADV"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
|
||||
cfg += 'protocol radv {\n'
|
||||
cfg += ' # auto configuration on all interfaces\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' # Advertise DNS\n'
|
||||
cfg += ' rdnss {\n'
|
||||
cfg += '# lifetime mult 10;\n'
|
||||
cfg += '# lifetime mult 10;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::11;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::11;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::12;\n'
|
||||
cfg += '# ns 2001:0DB8:1234::12;\n'
|
||||
cfg += ' };\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdRip(BirdService):
|
||||
'''RIP BIRD Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = 'protocol rip {\n'
|
||||
cfg += ' period 10;\n'
|
||||
cfg += ' garbage time 60;\n'
|
||||
cfg += cls.generatebirdifcconfig(node)
|
||||
cfg += ' honor neighbor;\n'
|
||||
cfg += ' authentication none;\n'
|
||||
cfg += ' import all;\n'
|
||||
cfg += ' export all;\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
class BirdStatic(BirdService):
|
||||
'''Static Bird Service (configuration generation)'''
|
||||
|
||||
_name = "BIRD_static"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatebirdconfig(cls, node):
|
||||
cfg = '/* This is a sample config that must be customized */\n'
|
||||
|
||||
cfg += 'protocol static {\n'
|
||||
cfg += '# route 0.0.0.0/0 via 198.51.100.130; # Default route. Do NOT advertise on BGP !\n'
|
||||
cfg += '# route 203.0.113.0/24 reject; # Sink route\n'
|
||||
cfg += '# route 10.2.0.0/24 via "arc0"; # Secondary network\n'
|
||||
cfg += '}\n\n'
|
||||
|
||||
return cfg
|
||||
|
||||
|
||||
# Register all protocols
|
||||
addservice(Bird)
|
||||
addservice(BirdOspf)
|
||||
addservice(BirdBgp)
|
||||
#addservice(BirdRadv) # untested
|
||||
addservice(BirdRip)
|
||||
addservice(BirdStatic)
|
191
daemon/core/services/nrl.py
Normal file
191
daemon/core/services/nrl.py
Normal file
|
@ -0,0 +1,191 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
nrl.py: defines services provided by NRL protolib tools hosted here:
|
||||
http://cs.itd.nrl.navy.mil/products/
|
||||
'''
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
|
||||
class NrlService(CoreService):
|
||||
''' Parent class for NRL services. Defines properties and methods
|
||||
common to NRL's routing daemons.
|
||||
'''
|
||||
_name = "NRLDaemon"
|
||||
_group = "Routing"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 45
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def firstipv4prefix(node, prefixlen=24):
|
||||
''' Similar to QuaggaService.routerid(). Helper to return the first IPv4
|
||||
prefix of a node, using the supplied prefix length. This ignores the
|
||||
interface's prefix length, so e.g. '/32' can turn into '/24'.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
addr = a.split('/')[0]
|
||||
pre = IPv4Prefix("%s/%s" % (addr, prefixlen))
|
||||
return str(pre)
|
||||
#raise ValueError, "no IPv4 address found"
|
||||
return "0.0.0.0/%s" % prefixlen
|
||||
|
||||
class NrlNhdp(NrlService):
|
||||
''' NeighborHood Discovery Protocol for MANET networks.
|
||||
'''
|
||||
_name = "NHDP"
|
||||
_startup = ("nrlnhdp", )
|
||||
_shutdown = ("killall nrlnhdp", )
|
||||
_validate = ("pidof nrlnhdp", )
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
cmd = cls._startup[0]
|
||||
cmd += " -l /var/log/nrlnhdp.log"
|
||||
cmd += " -rpipe %s_nhdp" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames:
|
||||
cmd += " -flooding ecds"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
cmd += " -i ".join(interfacenames)
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlNhdp)
|
||||
|
||||
class NrlSmf(NrlService):
|
||||
''' Simplified Multicast Forwarding for MANET networks.
|
||||
'''
|
||||
_name = "SMF"
|
||||
_startup = ("nrlsmf", )
|
||||
_shutdown = ("killall nrlsmf", )
|
||||
_validate = ("pidof nrlsmf", )
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
cmd = cls._startup[0]
|
||||
cmd += " instance %s_smf" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
netifs = filter(lambda x: not getattr(x, 'control', False), \
|
||||
node.netifs())
|
||||
if len(netifs) == 0:
|
||||
return ()
|
||||
|
||||
if "arouted" in servicenames:
|
||||
cmd += " tap %s_tap" % (node.name,)
|
||||
cmd += " unicast %s" % cls.firstipv4prefix(node, 24)
|
||||
cmd += " push lo,%s resequence on" % netifs[0].name
|
||||
if len(netifs) > 0:
|
||||
if "NHDP" in servicenames:
|
||||
cmd += " ecds "
|
||||
elif "OLSR" in servicenames:
|
||||
cmd += " smpr "
|
||||
else:
|
||||
cmd += " cf "
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += ",".join(interfacenames)
|
||||
|
||||
cmd += " hash MD5"
|
||||
cmd += " log /var/log/nrlsmf.log"
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlSmf)
|
||||
|
||||
class NrlOlsr(NrlService):
|
||||
''' Optimized Link State Routing protocol for MANET networks.
|
||||
'''
|
||||
_name = "OLSR"
|
||||
_startup = ("nrlolsrd", )
|
||||
_shutdown = ("killall nrlolsrd", )
|
||||
_validate = ("pidof nrlolsrd", )
|
||||
|
||||
@classmethod
|
||||
def getstartup(cls, node, services):
|
||||
''' Generate the appropriate command-line based on node interfaces.
|
||||
'''
|
||||
cmd = cls._startup[0]
|
||||
# are multiple interfaces supported? No.
|
||||
netifs = list(node.netifs())
|
||||
if len(netifs) > 0:
|
||||
ifc = netifs[0]
|
||||
cmd += " -i %s" % ifc.name
|
||||
cmd += " -l /var/log/nrlolsrd.log"
|
||||
cmd += " -rpipe %s_olsr" % node.name
|
||||
|
||||
servicenames = map(lambda x: x._name, services)
|
||||
if "SMF" in servicenames and not "NHDP" in servicenames:
|
||||
cmd += " -flooding s-mpr"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
if "zebra" in servicenames:
|
||||
cmd += " -z"
|
||||
|
||||
return (cmd, )
|
||||
|
||||
addservice(NrlOlsr)
|
||||
|
||||
class Arouted(NrlService):
|
||||
''' Adaptive Routing
|
||||
'''
|
||||
_name = "arouted"
|
||||
_configs = ("startarouted.sh", )
|
||||
_startindex = NrlService._startindex + 10
|
||||
_startup = ("sh startarouted.sh", )
|
||||
_shutdown = ("pkill arouted", )
|
||||
_validate = ("pidof arouted", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
'''
|
||||
cfg = """
|
||||
#!/bin/sh
|
||||
for f in "/tmp/%s_smf"; do
|
||||
count=1
|
||||
until [ -e "$f" ]; do
|
||||
if [ $count -eq 10 ]; then
|
||||
echo "ERROR: nrlmsf pipe not found: $f" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.1
|
||||
count=$(($count + 1))
|
||||
done
|
||||
done
|
||||
|
||||
""" % (node.name)
|
||||
cfg += "ip route add %s dev lo\n" % cls.firstipv4prefix(node, 24)
|
||||
cfg += "arouted instance %s_smf tap %s_tap" % (node.name, node.name)
|
||||
cfg += " stability 10" # seconds to consider a new route valid
|
||||
cfg += " 2>&1 > /var/log/arouted.log &\n\n"
|
||||
return cfg
|
||||
|
||||
# experimental
|
||||
#addservice(Arouted)
|
589
daemon/core/services/quagga.py
Normal file
589
daemon/core/services/quagga.py
Normal file
|
@ -0,0 +1,589 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
quagga.py: defines routing services provided by Quagga.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
if os.uname()[0] == "Linux":
|
||||
from core.netns import nodes
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
from core.bsd import nodes
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, isIPv4Address, isIPv6Address
|
||||
from core.api import coreapi
|
||||
from core.constants import *
|
||||
|
||||
QUAGGA_USER="root"
|
||||
QUAGGA_GROUP="root"
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
QUAGGA_GROUP="wheel"
|
||||
|
||||
class Zebra(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "zebra"
|
||||
_group = "Quagga"
|
||||
_depends = ("vtysh", )
|
||||
_dirs = ("/usr/local/etc/quagga", "/var/run/quagga")
|
||||
_configs = ("/usr/local/etc/quagga/Quagga.conf",
|
||||
"quaggaboot.sh","/usr/local/etc/quagga/vtysh.conf")
|
||||
_startindex = 35
|
||||
_startup = ("sh quaggaboot.sh zebra",)
|
||||
_shutdown = ("killall zebra", )
|
||||
_validate = ("pidof zebra", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the Quagga.conf or quaggaboot.sh file contents.
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateQuaggaConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateQuaggaBoot(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVtyshConf(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateVtyshConf(cls, node, services):
|
||||
''' Returns configuration file text.
|
||||
'''
|
||||
return "service integrated-vtysh-config"
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaConf(cls, node, services):
|
||||
''' Returns configuration file text. Other services that depend on zebra
|
||||
will have generatequaggaifcconfig() and generatequaggaconfig()
|
||||
hooks that are invoked here.
|
||||
'''
|
||||
# we could verify here that filename == Quagga.conf
|
||||
cfg = ""
|
||||
for ifc in node.netifs():
|
||||
cfg += "interface %s\n" % ifc.name
|
||||
# include control interfaces in addressing but not routing daemons
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
continue
|
||||
cfgv4 = ""
|
||||
cfgv6 = ""
|
||||
want_ipv4 = False
|
||||
want_ipv6 = False
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
ifccfg = s.generatequaggaifcconfig(node, ifc)
|
||||
if s._ipv4_routing:
|
||||
want_ipv4 = True
|
||||
if s._ipv6_routing:
|
||||
want_ipv6 = True
|
||||
cfgv6 += ifccfg
|
||||
else:
|
||||
cfgv4 += ifccfg
|
||||
|
||||
if want_ipv4:
|
||||
ipv4list = filter(lambda x: isIPv4Address(x.split('/')[0]),
|
||||
ifc.addrlist)
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ipv4list))
|
||||
cfg += "\n"
|
||||
cfg += cfgv4
|
||||
if want_ipv6:
|
||||
ipv6list = filter(lambda x: isIPv6Address(x.split('/')[0]),
|
||||
ifc.addrlist)
|
||||
cfg += " "
|
||||
cfg += "\n ".join(map(cls.addrstr, ipv6list))
|
||||
cfg += "\n"
|
||||
cfg += cfgv6
|
||||
cfg += "!\n"
|
||||
|
||||
for s in services:
|
||||
if cls._name not in s._depends:
|
||||
continue
|
||||
cfg += s.generatequaggaconfig(node)
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
''' helper for mapping IP addresses to zebra config statements
|
||||
'''
|
||||
if x.find(".") >= 0:
|
||||
return "ip address %s" % x
|
||||
elif x.find(":") >= 0:
|
||||
return "ipv6 address %s" % x
|
||||
else:
|
||||
raise Value, "invalid address: %s", x
|
||||
|
||||
@classmethod
|
||||
def generateQuaggaBoot(cls, node, services):
|
||||
''' Generate a shell script used to boot the Quagga daemons.
|
||||
'''
|
||||
try:
|
||||
quagga_bin_search = node.session.cfg['quagga_bin_search']
|
||||
quagga_sbin_search = node.session.cfg['quagga_sbin_search']
|
||||
except KeyError:
|
||||
quagga_bin_search = '"/usr/local/bin /usr/bin /usr/lib/quagga"'
|
||||
quagga_sbin_search = '"/usr/local/sbin /usr/sbin /usr/lib/quagga"'
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by zebra service (quagga.py)
|
||||
QUAGGA_CONF=%s
|
||||
QUAGGA_SBIN_SEARCH=%s
|
||||
QUAGGA_BIN_SEARCH=%s
|
||||
QUAGGA_STATE_DIR=%s
|
||||
QUAGGA_USER=%s
|
||||
QUAGGA_GROUP=%s
|
||||
|
||||
searchforprog()
|
||||
{
|
||||
prog=$1
|
||||
searchpath=$@
|
||||
ret=
|
||||
for p in $searchpath; do
|
||||
if [ -x $p/$prog ]; then
|
||||
ret=$p
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo $ret
|
||||
}
|
||||
|
||||
confcheck()
|
||||
{
|
||||
CONF_DIR=`dirname $QUAGGA_CONF`
|
||||
# if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then
|
||||
ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf
|
||||
fi
|
||||
# if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then
|
||||
ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf
|
||||
fi
|
||||
}
|
||||
|
||||
waitforvtyfiles()
|
||||
{
|
||||
for f in "$@"; do
|
||||
count=1
|
||||
until [ -e $QUAGGA_STATE_DIR/$f ]; do
|
||||
if [ $count -eq 10 ]; then
|
||||
echo "ERROR: vty file not found: $QUAGGA_STATE_DIR/$f" >&2
|
||||
return 1
|
||||
fi
|
||||
sleep 0.1
|
||||
count=$(($count + 1))
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
{
|
||||
QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH)
|
||||
if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon not found in search path:"
|
||||
echo " $QUAGGA_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ "$1" != "zebra" ]; then
|
||||
waitforvtyfiles zebra.vty
|
||||
fi
|
||||
|
||||
$QUAGGA_SBIN_DIR/$1 -u $QUAGGA_USER -g $QUAGGA_GROUP -d
|
||||
}
|
||||
|
||||
bootvtysh()
|
||||
{
|
||||
QUAGGA_BIN_DIR=$(searchforprog $1 $QUAGGA_BIN_SEARCH)
|
||||
if [ "z$QUAGGA_BIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon not found in search path:"
|
||||
echo " $QUAGGA_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
vtyfiles="zebra.vty"
|
||||
for r in rip ripng ospf6 ospf bgp babel; do
|
||||
if grep -q "^router \<${r}\>" $QUAGGA_CONF; then
|
||||
vtyfiles="$vtyfiles ${r}d.vty"
|
||||
fi
|
||||
done
|
||||
|
||||
# wait for Quagga daemon vty files to appear before invoking vtysh
|
||||
waitforvtyfiles $vtyfiles
|
||||
|
||||
$QUAGGA_BIN_DIR/vtysh -b
|
||||
}
|
||||
|
||||
confcheck
|
||||
if [ "x$1" = "x" ]; then
|
||||
echo "ERROR: missing the name of the Quagga daemon to boot"
|
||||
exit 1
|
||||
elif [ "$1" = "vtysh" ]; then
|
||||
bootvtysh $1
|
||||
else
|
||||
bootdaemon $1
|
||||
fi
|
||||
""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \
|
||||
QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP)
|
||||
|
||||
addservice(Zebra)
|
||||
|
||||
class QuaggaService(CoreService):
|
||||
''' Parent class for Quagga services. Defines properties and methods
|
||||
common to Quagga's routing daemons.
|
||||
'''
|
||||
_name = "QuaggaDaemon"
|
||||
_group = "Quagga"
|
||||
_depends = ("zebra", )
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the Zebra service."
|
||||
|
||||
_ipv4_routing = False
|
||||
_ipv6_routing = False
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a .split('/') [0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@staticmethod
|
||||
def rj45check(ifc):
|
||||
''' Helper to detect whether interface is connected an external RJ45
|
||||
link.
|
||||
'''
|
||||
if ifc.net:
|
||||
for peerifc in ifc.net.netifs():
|
||||
if peerifc == ifc:
|
||||
continue
|
||||
if isinstance(peerifc, nodes.RJ45Node):
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
return ""
|
||||
|
||||
|
||||
|
||||
class Ospfv2(QuaggaService):
|
||||
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
_name = "OSPFv2"
|
||||
_startup = ("sh quaggaboot.sh ospfd",)
|
||||
_shutdown = ("killall ospfd", )
|
||||
_validate = ("pidof ospfd", )
|
||||
_ipv4_routing = True
|
||||
|
||||
@staticmethod
|
||||
def mtucheck(ifc):
|
||||
''' Helper to detect MTU mismatch and add the appropriate OSPF
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
'''
|
||||
if ifc.mtu != 1500:
|
||||
# a workaround for PhysicalNode GreTap, which has no knowledge of
|
||||
# the other nodes/nets
|
||||
return " ip ospf mtu-ignore\n"
|
||||
if not ifc.net:
|
||||
return ""
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu != ifc.mtu:
|
||||
return " ip ospf mtu-ignore\n"
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def ptpcheck(ifc):
|
||||
''' Helper to detect whether interface is connected to a notional
|
||||
point-to-point link.
|
||||
'''
|
||||
if isinstance(ifc.net, nodes.PtpNet):
|
||||
return " ip ospf network point-to-point\n"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router ospf\n"
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " router-id %s\n" % rtrid
|
||||
# network 10.0.0.0/24 area 0
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
net = IPv4Prefix(a)
|
||||
cfg += " network %s area 0\n" % net
|
||||
cfg += "!\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
#cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
#if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
#cfg += cls.ptpcheck(ifc)
|
||||
|
||||
#return cfg + """\
|
||||
# ip ospf hello-interval 2
|
||||
# ip ospf dead-interval 6
|
||||
# ip ospf retransmit-interval 5
|
||||
#"""
|
||||
|
||||
addservice(Ospfv2)
|
||||
|
||||
class Ospfv3(QuaggaService):
|
||||
''' The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
_name = "OSPFv3"
|
||||
_startup = ("sh quaggaboot.sh ospf6d",)
|
||||
_shutdown = ("killall ospf6d", )
|
||||
_validate = ("pidof ospf6d", )
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
|
||||
@staticmethod
|
||||
def minmtu(ifc):
|
||||
''' Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
'''
|
||||
mtu = ifc.mtu
|
||||
if not ifc.net:
|
||||
return mtu
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu < mtu:
|
||||
mtu = i.mtu
|
||||
return mtu
|
||||
|
||||
@classmethod
|
||||
def mtucheck(cls, ifc):
|
||||
''' Helper to detect MTU mismatch and add the appropriate OSPFv3
|
||||
ifmtu command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
'''
|
||||
minmtu = cls.minmtu(ifc)
|
||||
if minmtu < ifc.mtu:
|
||||
return " ipv6 ospf6 ifmtu %d\n" % minmtu
|
||||
else:
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def ptpcheck(ifc):
|
||||
''' Helper to detect whether interface is connected to a notional
|
||||
point-to-point link.
|
||||
'''
|
||||
if isinstance(ifc.net, nodes.PtpNet):
|
||||
return " ipv6 ospf6 network point-to-point\n"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router ospf6\n"
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " router-id %s\n" % rtrid
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += " interface %s area 0.0.0.0\n" % ifc.name
|
||||
cfg += "!\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
return cls.mtucheck(ifc)
|
||||
#cfg = cls.mtucheck(ifc)
|
||||
# external RJ45 connections will use default OSPF timers
|
||||
#if cls.rj45check(ifc):
|
||||
# return cfg
|
||||
#cfg += cls.ptpcheck(ifc)
|
||||
|
||||
#return cfg + """\
|
||||
# ipv6 ospf6 hello-interval 2
|
||||
# ipv6 ospf6 dead-interval 6
|
||||
# ipv6 ospf6 retransmit-interval 5
|
||||
#"""
|
||||
|
||||
addservice(Ospfv3)
|
||||
|
||||
class Ospfv3mdr(Ospfv3):
|
||||
''' The OSPFv3 MANET Designated Router (MDR) service provides IPv6
|
||||
routing for wireless networks. It does not build its own
|
||||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
'''
|
||||
_name = "OSPFv3MDR"
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
cfg = cls.mtucheck(ifc)
|
||||
|
||||
return cfg + """\
|
||||
ipv6 ospf6 instance-id 65
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network manet-designated-router
|
||||
ipv6 ospf6 diffhellos
|
||||
ipv6 ospf6 adjacencyconnectivity uniconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
"""
|
||||
|
||||
addservice(Ospfv3mdr)
|
||||
|
||||
class Bgp(QuaggaService):
|
||||
'''' The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
'''
|
||||
_name = "BGP"
|
||||
_startup = ("sh quaggaboot.sh bgpd",)
|
||||
_shutdown = ("killall bgpd", )
|
||||
_validate = ("pidof bgpd", )
|
||||
_custom_needed = True
|
||||
_ipv4_routing = True
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "!\n! BGP configuration\n!\n"
|
||||
cfg += "! You should configure the AS number below,\n"
|
||||
cfg += "! along with this router's peers.\n!\n"
|
||||
cfg += "router bgp %s\n" % node.objid
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += " bgp router-id %s\n" % rtrid
|
||||
cfg += " redistribute connected\n"
|
||||
cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n"
|
||||
return cfg
|
||||
|
||||
addservice(Bgp)
|
||||
|
||||
class Rip(QuaggaService):
|
||||
''' The RIP service provides IPv4 routing for wired networks.
|
||||
'''
|
||||
_name = "RIP"
|
||||
_startup = ("sh quaggaboot.sh ripd",)
|
||||
_shutdown = ("killall ripd", )
|
||||
_validate = ("pidof ripd", )
|
||||
_ipv4_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = """\
|
||||
router rip
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf
|
||||
network 0.0.0.0/0
|
||||
!
|
||||
"""
|
||||
return cfg
|
||||
|
||||
addservice(Rip)
|
||||
|
||||
class Ripng(QuaggaService):
|
||||
''' The RIP NG service provides IPv6 routing for wired networks.
|
||||
'''
|
||||
_name = "RIPNG"
|
||||
_startup = ("sh quaggaboot.sh ripngd",)
|
||||
_shutdown = ("killall ripngd", )
|
||||
_validate = ("pidof ripngd", )
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = """\
|
||||
router ripng
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf6
|
||||
network ::/0
|
||||
!
|
||||
"""
|
||||
return cfg
|
||||
|
||||
addservice(Ripng)
|
||||
|
||||
class Babel(QuaggaService):
|
||||
''' The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
'''
|
||||
_name = "Babel"
|
||||
_startup = ("sh quaggaboot.sh babeld",)
|
||||
_shutdown = ("killall babeld", )
|
||||
_validate = ("pidof babeld", )
|
||||
_ipv6_routing = True
|
||||
|
||||
@classmethod
|
||||
def generatequaggaconfig(cls, node):
|
||||
cfg = "router babel\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += " network %s\n" % ifc.name
|
||||
cfg += " redistribute static\n redistribute connected\n"
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generatequaggaifcconfig(cls, node, ifc):
|
||||
type = "wired"
|
||||
if ifc.net and ifc.net.linktype == coreapi.CORE_LINK_WIRELESS:
|
||||
return " babel wireless\n no babel split-horizon\n"
|
||||
else:
|
||||
return " babel wired\n babel split-horizon\n"
|
||||
|
||||
addservice(Babel)
|
||||
|
||||
|
||||
class Vtysh(CoreService):
|
||||
''' Simple service to run vtysh -b (boot) after all Quagga daemons have
|
||||
started.
|
||||
'''
|
||||
_name = "vtysh"
|
||||
_group = "Quagga"
|
||||
_startindex = 45
|
||||
_startup = ("sh quaggaboot.sh vtysh",)
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
addservice(Vtysh)
|
||||
|
||||
|
129
daemon/core/services/security.py
Normal file
129
daemon/core/services/security.py
Normal file
|
@ -0,0 +1,129 @@
|
|||
#
|
||||
# CORE - define security services : vpnclient, vpnserver, ipsec and firewall
|
||||
#
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
'''
|
||||
security.py: defines security services (vpnclient, vpnserver, ipsec and
|
||||
firewall)
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.constants import *
|
||||
|
||||
class VPNClient(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "VPNClient"
|
||||
_group = "Security"
|
||||
_configs = ('vpnclient.sh', )
|
||||
_startindex = 60
|
||||
_startup = ('sh vpnclient.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn", )
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the client.conf and vpnclient.sh file contents to
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom VPN Client configuration for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleVPNClient" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening VPN client configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
# this line is required to add the above class to the list of available services
|
||||
addservice(VPNClient)
|
||||
|
||||
class VPNServer(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "VPNServer"
|
||||
_group = "Security"
|
||||
_configs = ('vpnserver.sh', )
|
||||
_startindex = 50
|
||||
_startup = ('sh vpnserver.sh',)
|
||||
_shutdown = ("killall openvpn",)
|
||||
_validate = ("pidof openvpn", )
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the sample server.conf and vpnserver.sh file contents to
|
||||
GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom VPN Server Configuration for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleVPNServer" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening VPN server configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
addservice(VPNServer)
|
||||
|
||||
class IPsec(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "IPsec"
|
||||
_group = "Security"
|
||||
_configs = ('ipsec.sh', )
|
||||
_startindex = 60
|
||||
_startup = ('sh ipsec.sh',)
|
||||
_shutdown = ("killall racoon",)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the ipsec.conf and racoon.conf file contents to
|
||||
GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# set up static tunnel mode security assocation for service "
|
||||
cfg += "(security.py)\n"
|
||||
fname = "%s/examples/services/sampleIPsec" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening IPsec configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
addservice(IPsec)
|
||||
|
||||
class Firewall(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "Firewall"
|
||||
_group = "Security"
|
||||
_configs = ('firewall.sh', )
|
||||
_startindex = 20
|
||||
_startup = ('sh firewall.sh',)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the firewall rule examples to GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# custom node firewall rules for service (security.py)\n"
|
||||
fname = "%s/examples/services/sampleFirewall" % CORE_DATA_DIR
|
||||
try:
|
||||
cfg += open(fname, "rb").read()
|
||||
except e:
|
||||
print "Error opening Firewall configuration template (%s): %s" % \
|
||||
(fname, e)
|
||||
return cfg
|
||||
|
||||
addservice(Firewall)
|
||||
|
189
daemon/core/services/ucarp.py
Executable file
189
daemon/core/services/ucarp.py
Executable file
|
@ -0,0 +1,189 @@
|
|||
#
|
||||
# CORE configuration for UCARP
|
||||
# Copyright (c) 2012 Jonathan deBoer
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
#
|
||||
# author: Jonathan deBoer <jdccdevel@gmail.com>
|
||||
#
|
||||
'''
|
||||
ucarp.py: defines high-availability IP address controlled by ucarp
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
|
||||
UCARP_ETC="/usr/local/etc/ucarp"
|
||||
|
||||
class Ucarp(CoreService):
|
||||
'''
|
||||
'''
|
||||
_name = "ucarp"
|
||||
_group = "Utility"
|
||||
_depends = ( )
|
||||
_dirs = (UCARP_ETC, )
|
||||
_configs = (UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",)
|
||||
_startindex = 65
|
||||
_startup = ("sh ucarpboot.sh",)
|
||||
_shutdown = ("killall ucarp", )
|
||||
_validate = ("pidof ucarp", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return the default file contents
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateUcarpConf(node, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateVipUp(node, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generateVipDown(node, services)
|
||||
elif filename == cls._configs[3]:
|
||||
return cls.generateUcarpBoot(node, services)
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
@classmethod
|
||||
def generateUcarpConf(cls, node, services):
|
||||
''' Returns configuration file text.
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of UCARP executable
|
||||
UCARP_EXEC=%s
|
||||
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
# Logging Facility
|
||||
FACILITY=daemon
|
||||
|
||||
# Instance ID
|
||||
# Any number from 1 to 255
|
||||
INSTANCE_ID=1
|
||||
|
||||
# Password
|
||||
# Master and Backup(s) need to be the same
|
||||
PASSWORD="changeme"
|
||||
|
||||
# The failover application address
|
||||
VIRTUAL_ADDRESS=127.0.0.254
|
||||
VIRTUAL_NET=8
|
||||
|
||||
# Interface for IP Address
|
||||
INTERFACE=lo
|
||||
|
||||
# Maintanence address of the local machine
|
||||
SOURCE_ADDRESS=127.0.0.1
|
||||
|
||||
# The ratio number to be considered before marking the node as dead
|
||||
DEAD_RATIO=3
|
||||
|
||||
# UCARP base, lower number will be preferred master
|
||||
# set to same to have master stay as long as possible
|
||||
UCARP_BASE=1
|
||||
SKEW=0
|
||||
|
||||
# UCARP options
|
||||
# -z run shutdown script on exit
|
||||
# -P force preferred master
|
||||
# -n don't run down script at start up when we are backup
|
||||
# -M use broadcast instead of multicast
|
||||
# -S ignore interface state
|
||||
OPTIONS="-z -n -M"
|
||||
|
||||
# Send extra parameter to down and up scripts
|
||||
#XPARAM="-x <enter param here>"
|
||||
XPARAM="-x ${VIRTUAL_NET}"
|
||||
|
||||
# The start and stop scripts
|
||||
START_SCRIPT=${UCARP_CFGDIR}/default-up.sh
|
||||
STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh
|
||||
|
||||
# These line should not need to be touched
|
||||
UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM"
|
||||
|
||||
${UCARP_EXEC} -B ${UCARP_OPTS}
|
||||
""" % (ucarp_bin, UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateUcarpBoot(cls, node, services):
|
||||
''' Generate a shell script used to boot the Ucarp daemons.
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# Location of the UCARP config directory
|
||||
UCARP_CFGDIR=%s
|
||||
|
||||
chmod a+x ${UCARP_CFGDIR}/*.sh
|
||||
|
||||
# Start the default ucarp daemon configuration
|
||||
${UCARP_CFGDIR}/default.sh
|
||||
|
||||
""" % (UCARP_ETC)
|
||||
|
||||
@classmethod
|
||||
def generateVipUp(cls, node, services):
|
||||
''' Generate a shell script used to start the virtual ip
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-up.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr add ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generateVipDown(cls, node, services):
|
||||
''' Generate a shell script used to stop the virtual ip
|
||||
'''
|
||||
try:
|
||||
ucarp_bin = node.session.cfg['ucarp_bin']
|
||||
except KeyError:
|
||||
ucarp_bin = "/usr/sbin/ucarp"
|
||||
return """\
|
||||
#!/bin/bash
|
||||
|
||||
# Should be invoked as "default-down.sh <dev> <ip>"
|
||||
exec 2> /dev/null
|
||||
|
||||
IP="${2}"
|
||||
NET="${3}"
|
||||
if [ -z "$NET" ]; then
|
||||
NET="24"
|
||||
fi
|
||||
|
||||
/sbin/ip addr del ${IP}/${NET} dev "$1"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
|
||||
addservice(Ucarp)
|
||||
|
676
daemon/core/services/utility.py
Normal file
676
daemon/core/services/utility.py
Normal file
|
@ -0,0 +1,676 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
utility.py: defines miscellaneous utility services.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
|
||||
class UtilService(CoreService):
|
||||
''' Parent class for utility services.
|
||||
'''
|
||||
_name = "UtilityProcess"
|
||||
_group = "Utility"
|
||||
_depends = ()
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 80
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
class IPForwardService(UtilService):
|
||||
_name = "IPForward"
|
||||
_configs = ("ipforward.sh", )
|
||||
_startindex = 5
|
||||
_startup = ("sh ipforward.sh", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
if os.uname()[0] == "Linux":
|
||||
return cls.generateconfiglinux(node, filename, services)
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
return cls.generateconfigbsd(node, filename, services)
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
|
||||
@classmethod
|
||||
def generateconfiglinux(cls, node, filename, services):
|
||||
cfg = """\
|
||||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
%s -w net.ipv4.conf.all.forwarding=1
|
||||
%s -w net.ipv6.conf.all.forwarding=1
|
||||
%s -w net.ipv4.conf.all.send_redirects=0
|
||||
%s -w net.ipv4.conf.all.rp_filter=0
|
||||
%s -w net.ipv4.conf.default.rp_filter=0
|
||||
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
|
||||
for ifc in node.netifs():
|
||||
name = sysctldevname(ifc.name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \
|
||||
(SYSCTL_BIN, name)
|
||||
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (SYSCTL_BIN, name)
|
||||
return cfg
|
||||
|
||||
@classmethod
|
||||
def generateconfigbsd(cls, node, filename, services):
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
%s -w net.inet.ip.forwarding=1
|
||||
%s -w net.inet6.ip6.forwarding=1
|
||||
%s -w net.inet.icmp.bmcastecho=1
|
||||
%s -w net.inet.icmp.icmplim=0
|
||||
""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN)
|
||||
|
||||
addservice(IPForwardService)
|
||||
|
||||
class DefaultRouteService(UtilService):
|
||||
_name = "DefaultRoute"
|
||||
_configs = ("defaultroute.sh",)
|
||||
_startup = ("sh defaultroute.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultRoute service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
fam = "inet6 ::"
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
fam = "inet 0.0.0.0"
|
||||
if net.maxaddr() == net.minaddr():
|
||||
return ""
|
||||
else:
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "ip route add default via"
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
rtcmd = "route add -%s" % fam
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
return "%s %s" % (rtcmd, net.minaddr())
|
||||
|
||||
addservice(DefaultRouteService)
|
||||
|
||||
class DefaultMulticastRouteService(UtilService):
|
||||
_name = "DefaultMulticastRoute"
|
||||
_configs = ("defaultmroute.sh",)
|
||||
_startup = ("sh defaultmroute.sh",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n"
|
||||
cfg += "# the first interface is chosen below; please change it "
|
||||
cfg += "as needed\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "ip route add 224.0.0.0/4 dev"
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
rtcmd = "route add 224.0.0.0/4 -iface"
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
cfg += "%s %s\n" % (rtcmd, ifc.name)
|
||||
cfg += "\n"
|
||||
break
|
||||
return cfg
|
||||
|
||||
addservice(DefaultMulticastRouteService)
|
||||
|
||||
class StaticRouteService(UtilService):
|
||||
_name = "StaticRoute"
|
||||
_configs = ("staticroute.sh",)
|
||||
_startup = ("sh staticroute.sh",)
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n"
|
||||
cfg += "# NOTE: this service must be customized to be of any use\n"
|
||||
cfg += "# Below are samples that you can uncomment and edit.\n#\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.routestr, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def routestr(x):
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
fam = "inet6"
|
||||
dst = "3ffe:4::/64"
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
fam = "inet"
|
||||
dst = "10.9.8.0/24"
|
||||
if net.maxaddr() == net.minaddr():
|
||||
return ""
|
||||
else:
|
||||
if os.uname()[0] == "Linux":
|
||||
rtcmd = "#/sbin/ip route add %s via" % dst
|
||||
elif os.uname()[0] == "FreeBSD":
|
||||
rtcmd = "#/sbin/route add -%s %s" % (fam, dst)
|
||||
else:
|
||||
raise Exception, "unknown platform"
|
||||
return "%s %s" % (rtcmd, net.minaddr())
|
||||
|
||||
addservice(StaticRouteService)
|
||||
|
||||
class SshService(UtilService):
|
||||
_name = "SSH"
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
_configs = ("startsshd.sh", "sshd_config",)
|
||||
_dirs = ()
|
||||
else:
|
||||
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
|
||||
_dirs = ("/etc/ssh", "/var/run/sshd",)
|
||||
_startup = ("sh startsshd.sh",)
|
||||
_shutdown = ("killall sshd",)
|
||||
_validate = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Use a startup script for launching sshd in order to wait for host
|
||||
key generation.
|
||||
'''
|
||||
if os.uname()[0] == "FreeBSD":
|
||||
sshcfgdir = node.nodedir
|
||||
sshstatedir = node.nodedir
|
||||
sshlibdir = "/usr/libexec"
|
||||
else:
|
||||
sshcfgdir = cls._dirs[0]
|
||||
sshstatedir = cls._dirs[1]
|
||||
sshlibdir = "/usr/lib/openssh"
|
||||
if filename == "startsshd.sh":
|
||||
return """\
|
||||
#!/bin/sh
|
||||
# auto-generated by SSH service (utility.py)
|
||||
ssh-keygen -q -t rsa -N "" -f %s/ssh_host_rsa_key
|
||||
chmod 655 %s
|
||||
# wait until RSA host key has been generated to launch sshd
|
||||
/usr/sbin/sshd -f %s/sshd_config
|
||||
""" % (sshcfgdir, sshstatedir, sshcfgdir)
|
||||
else:
|
||||
return """\
|
||||
# auto-generated by SSH service (utility.py)
|
||||
Port 22
|
||||
Protocol 2
|
||||
HostKey %s/ssh_host_rsa_key
|
||||
UsePrivilegeSeparation yes
|
||||
PidFile %s/sshd.pid
|
||||
|
||||
KeyRegenerationInterval 3600
|
||||
ServerKeyBits 768
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
LoginGraceTime 120
|
||||
PermitRootLogin yes
|
||||
StrictModes yes
|
||||
|
||||
RSAAuthentication yes
|
||||
PubkeyAuthentication yes
|
||||
|
||||
IgnoreRhosts yes
|
||||
RhostsRSAAuthentication no
|
||||
HostbasedAuthentication no
|
||||
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp %s/sftp-server
|
||||
UsePAM yes
|
||||
UseDNS no
|
||||
""" % (sshcfgdir, sshstatedir, sshlibdir)
|
||||
|
||||
addservice(SshService)
|
||||
|
||||
class DhcpService(UtilService):
|
||||
_name = "DHCP"
|
||||
_configs = ("/etc/dhcp/dhcpd.conf",)
|
||||
_dirs = ("/etc/dhcp",)
|
||||
_startup = ("dhcpd",)
|
||||
_shutdown = ("killall dhcpd",)
|
||||
_validate = ("pidof dhcpd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a dhcpd config file using the network address of
|
||||
each interface.
|
||||
'''
|
||||
cfg = """\
|
||||
# auto-generated by DHCP service (utility.py)
|
||||
# NOTE: move these option lines into the desired pool { } block(s) below
|
||||
#option domain-name "test.com";
|
||||
#option domain-name-servers 10.0.0.1;
|
||||
#option routers 10.0.0.1;
|
||||
|
||||
log-facility local6;
|
||||
|
||||
default-lease-time 600;
|
||||
max-lease-time 7200;
|
||||
|
||||
ddns-update-style none;
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
|
||||
cfg += "\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv4 prefix string
|
||||
for inclusion in the dhcpd3 config file.
|
||||
'''
|
||||
if x.find(":") >= 0:
|
||||
return ""
|
||||
else:
|
||||
addr = x.split("/")[0]
|
||||
net = IPv4Prefix(x)
|
||||
# divide the address space in half
|
||||
rangelow = net.addr(net.numaddr() / 2)
|
||||
rangehigh = net.maxaddr()
|
||||
return """
|
||||
subnet %s netmask %s {
|
||||
pool {
|
||||
range %s %s;
|
||||
default-lease-time 600;
|
||||
option routers %s;
|
||||
}
|
||||
}
|
||||
""" % (net.prefixstr(), net.netmaskstr(), rangelow, rangehigh, addr)
|
||||
|
||||
addservice(DhcpService)
|
||||
|
||||
class DhcpClientService(UtilService):
|
||||
''' Use a DHCP client for all interfaces for addressing.
|
||||
'''
|
||||
_name = "DHCPClient"
|
||||
_configs = ("startdhcpclient.sh",)
|
||||
_startup = ("sh startdhcpclient.sh",)
|
||||
_shutdown = ("killall dhclient",)
|
||||
_validate = ("pidof dhclient",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a script to invoke dhclient on all interfaces.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by DHCPClient service (utility.py)\n"
|
||||
cfg += "# uncomment this mkdir line and symlink line to enable client-"
|
||||
cfg += "side DNS\n# resolution based on the DHCP server response.\n"
|
||||
cfg += "#mkdir -p /var/run/resolvconf/interface\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name
|
||||
cfg += " /var/run/resolvconf/resolv.conf\n"
|
||||
cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name
|
||||
cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name)
|
||||
return cfg
|
||||
|
||||
addservice(DhcpClientService)
|
||||
|
||||
class FtpService(UtilService):
|
||||
''' Start a vsftpd server.
|
||||
'''
|
||||
_name = "FTP"
|
||||
_configs = ("vsftpd.conf",)
|
||||
_dirs = ("/var/run/vsftpd/empty", "/var/ftp",)
|
||||
_startup = ("vsftpd ./vsftpd.conf",)
|
||||
_shutdown = ("killall vsftpd",)
|
||||
_validate = ("pidof vsftpd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a vsftpd.conf configuration file.
|
||||
'''
|
||||
return """\
|
||||
# vsftpd.conf auto-generated by FTP service (utility.py)
|
||||
listen=YES
|
||||
anonymous_enable=YES
|
||||
local_enable=YES
|
||||
dirmessage_enable=YES
|
||||
use_localtime=YES
|
||||
xferlog_enable=YES
|
||||
connect_from_port_20=YES
|
||||
xferlog_file=/var/log/vsftpd.log
|
||||
ftpd_banner=Welcome to the CORE FTP service
|
||||
secure_chroot_dir=/var/run/vsftpd/empty
|
||||
anon_root=/var/ftp
|
||||
"""
|
||||
|
||||
addservice(FtpService)
|
||||
|
||||
class HttpService(UtilService):
|
||||
''' Start an apache server.
|
||||
'''
|
||||
_name = "HTTP"
|
||||
_configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars",
|
||||
"/var/www/index.html",)
|
||||
_dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2",
|
||||
"/var/lock/apache2", "/var/www", )
|
||||
_startup = ("apache2ctl start",)
|
||||
_shutdown = ("apache2ctl stop",)
|
||||
_validate = ("pidof apache2",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate an apache2.conf configuration file.
|
||||
'''
|
||||
if filename == cls._configs[0]:
|
||||
return cls.generateapache2conf(node, filename, services)
|
||||
elif filename == cls._configs[1]:
|
||||
return cls.generateenvvars(node, filename, services)
|
||||
elif filename == cls._configs[2]:
|
||||
return cls.generatehtml(node, filename, services)
|
||||
else:
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generateapache2conf(cls, node, filename, services):
|
||||
return """\
|
||||
# apache2.conf generated by utility.py:HttpService
|
||||
LockFile ${APACHE_LOCK_DIR}/accept.lock
|
||||
PidFile ${APACHE_PID_FILE}
|
||||
Timeout 300
|
||||
KeepAlive On
|
||||
MaxKeepAliveRequests 100
|
||||
KeepAliveTimeout 5
|
||||
|
||||
<IfModule mpm_prefork_module>
|
||||
StartServers 5
|
||||
MinSpareServers 5
|
||||
MaxSpareServers 10
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_worker_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_event_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
User ${APACHE_RUN_USER}
|
||||
Group ${APACHE_RUN_GROUP}
|
||||
|
||||
AccessFileName .htaccess
|
||||
|
||||
<Files ~ "^\.ht">
|
||||
Order allow,deny
|
||||
Deny from all
|
||||
Satisfy all
|
||||
</Files>
|
||||
|
||||
DefaultType None
|
||||
|
||||
HostnameLookups Off
|
||||
|
||||
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||
LogLevel warn
|
||||
|
||||
#Include mods-enabled/*.load
|
||||
#Include mods-enabled/*.conf
|
||||
LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so
|
||||
LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so
|
||||
LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so
|
||||
LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so
|
||||
LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so
|
||||
LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so
|
||||
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
|
||||
LoadModule env_module /usr/lib/apache2/modules/mod_env.so
|
||||
|
||||
NameVirtualHost *:80
|
||||
Listen 80
|
||||
|
||||
<IfModule mod_ssl.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
<IfModule mod_gnutls.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
|
||||
LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O" common
|
||||
LogFormat "%{Referer}i -> %U" referer
|
||||
LogFormat "%{User-agent}i" agent
|
||||
|
||||
ServerTokens OS
|
||||
ServerSignature On
|
||||
TraceEnable Off
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerAdmin webmaster@localhost
|
||||
DocumentRoot /var/www
|
||||
<Directory />
|
||||
Options FollowSymLinks
|
||||
AllowOverride None
|
||||
</Directory>
|
||||
<Directory /var/www/>
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Order allow,deny
|
||||
allow from all
|
||||
</Directory>
|
||||
ErrorLog ${APACHE_LOG_DIR}/error.log
|
||||
LogLevel warn
|
||||
CustomLog ${APACHE_LOG_DIR}/access.log combined
|
||||
</VirtualHost>
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generateenvvars(cls, node, filename, services):
|
||||
return """\
|
||||
# this file is used by apache2ctl - generated by utility.py:HttpService
|
||||
# these settings come from a default Ubuntu apache2 installation
|
||||
export APACHE_RUN_USER=www-data
|
||||
export APACHE_RUN_GROUP=www-data
|
||||
export APACHE_PID_FILE=/var/run/apache2.pid
|
||||
export APACHE_RUN_DIR=/var/run/apache2
|
||||
export APACHE_LOCK_DIR=/var/lock/apache2
|
||||
export APACHE_LOG_DIR=/var/log/apache2
|
||||
export LANG=C
|
||||
export LANG
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def generatehtml(cls, node, filename, services):
|
||||
body = """\
|
||||
<!-- generated by utility.py:HttpService -->
|
||||
<h1>%s web server</h1>
|
||||
<p>This is the default web page for this server.</p>
|
||||
<p>The web server software is running but no content has been added, yet.</p>
|
||||
""" % node.name
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
body += "<li>%s - %s</li>\n" % (ifc.name, ifc.addrlist)
|
||||
return "<html><body>%s</body></html>" % body
|
||||
|
||||
addservice(HttpService)
|
||||
|
||||
class PcapService(UtilService):
|
||||
''' Pcap service for logging packets.
|
||||
'''
|
||||
_name = "pcap"
|
||||
_configs = ("pcap.sh", )
|
||||
_dirs = ()
|
||||
_startindex = 1
|
||||
_startup = ("sh pcap.sh start",)
|
||||
_shutdown = ("sh pcap.sh stop",)
|
||||
_validate = ("pidof tcpdump",)
|
||||
_meta = "logs network traffic to pcap packet capture files"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a startpcap.sh traffic logging script.
|
||||
'''
|
||||
cfg = """
|
||||
#!/bin/sh
|
||||
# set tcpdump options here (see 'man tcpdump' for help)
|
||||
# (-s snap length, -C limit pcap file length, -n disable name resolution)
|
||||
DUMPOPTS="-s 12288 -C 10 -n"
|
||||
|
||||
if [ "x$1" = "xstart" ]; then
|
||||
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
cfg += '# '
|
||||
redir = "< /dev/null"
|
||||
cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \
|
||||
(node.name, ifc.name, ifc.name, redir)
|
||||
cfg += """
|
||||
|
||||
elif [ "x$1" = "xstop" ]; then
|
||||
mkdir -p ${SESSION_DIR}/pcap
|
||||
mv *.pcap ${SESSION_DIR}/pcap
|
||||
fi;
|
||||
"""
|
||||
return cfg
|
||||
|
||||
addservice(PcapService)
|
||||
|
||||
class RadvdService(UtilService):
|
||||
_name = "radvd"
|
||||
_configs = ("/etc/radvd/radvd.conf",)
|
||||
_dirs = ("/etc/radvd",)
|
||||
_startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",)
|
||||
_shutdown = ("pkill radvd",)
|
||||
_validate = ("pidof radvd",)
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Generate a RADVD router advertisement daemon config file
|
||||
using the network address of each interface.
|
||||
'''
|
||||
cfg = "# auto-generated by RADVD service (utility.py)\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
prefixes = map(cls.subnetentry, ifc.addrlist)
|
||||
if len(prefixes) < 1:
|
||||
continue
|
||||
cfg += """\
|
||||
interface %s
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
MinRtrAdvInterval 3;
|
||||
MaxRtrAdvInterval 10;
|
||||
AdvDefaultPreference low;
|
||||
AdvHomeAgentFlag off;
|
||||
""" % ifc.name
|
||||
for prefix in prefixes:
|
||||
if prefix == "":
|
||||
continue
|
||||
cfg += """\
|
||||
prefix %s
|
||||
{
|
||||
AdvOnLink on;
|
||||
AdvAutonomous on;
|
||||
AdvRouterAddr on;
|
||||
};
|
||||
""" % prefix
|
||||
cfg += "};\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv6 prefix string
|
||||
for inclusion in the RADVD config file.
|
||||
'''
|
||||
if x.find(":") >= 0:
|
||||
net = IPv6Prefix(x)
|
||||
return str(net)
|
||||
else:
|
||||
return ""
|
||||
|
||||
addservice(RadvdService)
|
||||
|
||||
class AtdService(UtilService):
|
||||
''' Atd service for scheduling at jobs
|
||||
'''
|
||||
_name = "atd"
|
||||
_configs = ("startatd.sh",)
|
||||
_dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool")
|
||||
_startup = ("sh startatd.sh", )
|
||||
_shutdown = ("pkill atd", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return """
|
||||
#!/bin/sh
|
||||
echo 00001 > /var/spool/cron/atjobs/.SEQ
|
||||
chown -R daemon /var/spool/cron/*
|
||||
chmod -R 700 /var/spool/cron/*
|
||||
atd
|
||||
"""
|
||||
|
||||
addservice(AtdService)
|
||||
|
||||
class UserDefinedService(UtilService):
|
||||
''' Dummy service allowing customization of anything.
|
||||
'''
|
||||
_name = "UserDefined"
|
||||
_startindex = 50
|
||||
_meta = "Customize this service to do anything upon startup."
|
||||
|
||||
addservice(UserDefinedService)
|
472
daemon/core/services/xorp.py
Normal file
472
daemon/core/services/xorp.py
Normal file
|
@ -0,0 +1,472 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
xorp.py: defines routing services provided by the XORP routing suite.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix
|
||||
from core.constants import *
|
||||
|
||||
class XorpRtrmgr(CoreService):
|
||||
''' XORP router manager service builds a config.boot file based on other
|
||||
enabled XORP services, and launches necessary daemons upon startup.
|
||||
'''
|
||||
_name = "xorp_rtrmgr"
|
||||
_group = "XORP"
|
||||
_depends = ()
|
||||
_dirs = ("/etc/xorp",)
|
||||
_configs = ("/etc/xorp/config.boot",)
|
||||
_startindex = 35
|
||||
_startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (_configs[0], _name, _name),)
|
||||
_shutdown = ("killall xorp_rtrmgr", )
|
||||
_validate = ("pidof xorp_rtrmgr", )
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Returns config.boot configuration file text. Other services that
|
||||
depend on this will have generatexorpconfig() hooks that are
|
||||
invoked here. Filename currently ignored.
|
||||
'''
|
||||
cfg = "interfaces {\n"
|
||||
for ifc in node.netifs():
|
||||
cfg += " interface %s {\n" % ifc.name
|
||||
cfg += "\tvif %s {\n" % ifc.name
|
||||
cfg += "".join(map(cls.addrstr, ifc.addrlist))
|
||||
cfg += cls.lladdrstr(ifc)
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n\n"
|
||||
|
||||
for s in services:
|
||||
try:
|
||||
s._depends.index(cls._name)
|
||||
cfg += s.generatexorpconfig(node)
|
||||
except ValueError:
|
||||
pass
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def addrstr(x):
|
||||
''' helper for mapping IP addresses to XORP config statements
|
||||
'''
|
||||
try:
|
||||
(addr, plen) = x.split("/")
|
||||
except Exception:
|
||||
raise ValueError, "invalid address"
|
||||
cfg = "\t address %s {\n" % addr
|
||||
cfg += "\t\tprefix-length: %s\n" % plen
|
||||
cfg +="\t }\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def lladdrstr(ifc):
|
||||
''' helper for adding link-local address entries (required by OSPFv3)
|
||||
'''
|
||||
cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\tprefix-length: 64\n"
|
||||
cfg += "\t }\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRtrmgr)
|
||||
|
||||
class XorpService(CoreService):
|
||||
''' Parent class for XORP services. Defines properties and methods
|
||||
common to XORP's routing daemons.
|
||||
'''
|
||||
_name = "XorpDaemon"
|
||||
_group = "XORP"
|
||||
_depends = ("xorp_rtrmgr", )
|
||||
_dirs = ()
|
||||
_configs = ()
|
||||
_startindex = 40
|
||||
_startup = ()
|
||||
_shutdown = ()
|
||||
_meta = "The config file for this service can be found in the xorp_rtrmgr service."
|
||||
|
||||
@staticmethod
|
||||
def fea(forwarding):
|
||||
''' Helper to add a forwarding engine entry to the config file.
|
||||
'''
|
||||
cfg = "fea {\n"
|
||||
cfg += " %s {\n" % forwarding
|
||||
cfg += "\tdisable:false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def mfea(forwarding, ifcs):
|
||||
''' Helper to add a multicast forwarding engine entry to the config file.
|
||||
'''
|
||||
names = []
|
||||
for ifc in ifcs:
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
names.append("register_vif")
|
||||
|
||||
cfg = "plumbing {\n"
|
||||
cfg += " %s {\n" % forwarding
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
cfg += "\t vif %s {\n" % name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
|
||||
@staticmethod
|
||||
def policyexportconnected():
|
||||
''' Helper to add a policy statement for exporting connected routes.
|
||||
'''
|
||||
cfg = "policy {\n"
|
||||
cfg += " policy-statement export-connected {\n"
|
||||
cfg += "\tterm 100 {\n"
|
||||
cfg += "\t from {\n"
|
||||
cfg += "\t\tprotocol: \"connected\"\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def routerid(node):
|
||||
''' Helper to return the first IPv4 address of a node as its router ID.
|
||||
'''
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") >= 0:
|
||||
return a.split('/')[0]
|
||||
#raise ValueError, "no IPv4 address found for router ID"
|
||||
return "0.0.0.0"
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
return ""
|
||||
|
||||
class XorpOspfv2(XorpService):
|
||||
''' The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
'''
|
||||
_name = "XORP_OSPFv2"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ospf4 {\n"
|
||||
cfg += "\trouter-id: %s\n" % rtrid
|
||||
cfg += "\tarea 0.0.0.0 {\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\t interface %s {\n" % ifc.name
|
||||
cfg += "\t\tvif %s {\n" % ifc.name
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
addr = a.split("/")[0]
|
||||
cfg += "\t\t address %s {\n" % addr
|
||||
cfg += "\t\t }\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOspfv2)
|
||||
|
||||
class XorpOspfv3(XorpService):
|
||||
''' The OSPFv3 service provides IPv6 routing. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified XORP configuration file.
|
||||
'''
|
||||
_name = "XORP_OSPFv3"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding6")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ospf6 0 { /* Instance ID 0 */\n"
|
||||
cfg += "\trouter-id: %s\n" % rtrid
|
||||
cfg += "\tarea 0.0.0.0 {\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\t interface %s {\n" % ifc.name
|
||||
cfg += "\t\tvif %s {\n" % ifc.name
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOspfv3)
|
||||
|
||||
class XorpBgp(XorpService):
|
||||
''' IPv4 inter-domain routing. AS numbers and peers must be customized.
|
||||
'''
|
||||
_name = "XORP_BGP"
|
||||
_custom_needed = True
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = "/* This is a sample config that should be customized with\n"
|
||||
cfg += " appropriate AS numbers and peers */\n"
|
||||
cfg += cls.fea("unicast-forwarding4")
|
||||
cfg += cls.policyexportconnected()
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " bgp {\n"
|
||||
cfg += "\tbgp-id: %s\n" % rtrid
|
||||
cfg += "\tlocal-as: 65001 /* change this */\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
cfg += "\tpeer 10.0.1.1 { /* change this */\n"
|
||||
cfg += "\t local-ip: 10.0.1.1\n"
|
||||
cfg += "\t as: 65002\n"
|
||||
cfg += "\t next-hop: 10.0.0.2\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpBgp)
|
||||
|
||||
class XorpRip(XorpService):
|
||||
''' RIP IPv4 unicast routing.
|
||||
'''
|
||||
_name = "XORP_RIP"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
cfg += cls.policyexportconnected()
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " rip {\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
addr = a.split("/")[0]
|
||||
cfg += "\t\taddress %s {\n" % addr
|
||||
cfg += "\t\t disable: false\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRip)
|
||||
|
||||
class XorpRipng(XorpService):
|
||||
''' RIP NG IPv6 unicast routing.
|
||||
'''
|
||||
_name = "XORP_RIPNG"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding6")
|
||||
cfg += cls.policyexportconnected()
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " ripng {\n"
|
||||
cfg += "\texport: \"export-connected\"\n"
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
# for a in ifc.addrlist:
|
||||
# if a.find(":") < 0:
|
||||
# continue
|
||||
# addr = a.split("/")[0]
|
||||
# cfg += "\t\taddress %s {\n" % addr
|
||||
# cfg += "\t\t disable: false\n"
|
||||
# cfg += "\t\t}\n"
|
||||
cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\t disable: false\n"
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpRipng)
|
||||
|
||||
class XorpPimSm4(XorpService):
|
||||
''' PIM Sparse Mode IPv4 multicast routing.
|
||||
'''
|
||||
_name = "XORP_PIMSM4"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.mfea("mfea4", node.netifs())
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " igmp {\n"
|
||||
names = []
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " pimsm4 {\n"
|
||||
|
||||
names.append("register_vif")
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
cfg += "\t vif %s {\n" % name
|
||||
cfg += "\t\tdr-priority: 1\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\tbootstrap {\n"
|
||||
cfg += "\t cand-bsr {\n"
|
||||
cfg += "\t\tscope-zone 224.0.0.0/4 {\n"
|
||||
cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t cand-rp {\n"
|
||||
cfg += "\t\tgroup-prefix 224.0.0.0/4 {\n"
|
||||
cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " fib2mrib {\n"
|
||||
cfg += "\tdisable: false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpPimSm4)
|
||||
|
||||
class XorpPimSm6(XorpService):
|
||||
''' PIM Sparse Mode IPv6 multicast routing.
|
||||
'''
|
||||
_name = "XORP_PIMSM6"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.mfea("mfea6", node.netifs())
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " mld {\n"
|
||||
names = []
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
names.append(ifc.name)
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
cfg += "\t\tdisable: false\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " pimsm6 {\n"
|
||||
|
||||
names.append("register_vif")
|
||||
for name in names:
|
||||
cfg += "\tinterface %s {\n" % name
|
||||
cfg += "\t vif %s {\n" % name
|
||||
cfg += "\t\tdr-priority: 1\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += "\tbootstrap {\n"
|
||||
cfg += "\t cand-bsr {\n"
|
||||
cfg += "\t\tscope-zone ff00::/8 {\n"
|
||||
cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t cand-rp {\n"
|
||||
cfg += "\t\tgroup-prefix ff00::/8 {\n"
|
||||
cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0]
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " fib2mrib {\n"
|
||||
cfg += "\tdisable: false\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpPimSm6)
|
||||
|
||||
class XorpOlsr(XorpService):
|
||||
''' OLSR IPv4 unicast MANET routing.
|
||||
'''
|
||||
_name = "XORP_OLSR"
|
||||
|
||||
@classmethod
|
||||
def generatexorpconfig(cls, node):
|
||||
cfg = cls.fea("unicast-forwarding4")
|
||||
rtrid = cls.routerid(node)
|
||||
cfg += "\nprotocols {\n"
|
||||
cfg += " olsr4 {\n"
|
||||
cfg += "\tmain-address: %s\n" % rtrid
|
||||
for ifc in node.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
for a in ifc.addrlist:
|
||||
if a.find(".") < 0:
|
||||
continue
|
||||
addr = a.split("/")[0]
|
||||
cfg += "\t\taddress %s {\n" % addr
|
||||
cfg += "\t\t}\n"
|
||||
cfg += "\t }\n"
|
||||
cfg += "\t}\n"
|
||||
cfg += " }\n"
|
||||
cfg += "}\n"
|
||||
return cfg
|
||||
|
||||
addservice(XorpOlsr)
|
1029
daemon/core/session.py
Normal file
1029
daemon/core/session.py
Normal file
File diff suppressed because it is too large
Load diff
0
daemon/core/xen/__init__.py
Normal file
0
daemon/core/xen/__init__.py
Normal file
818
daemon/core/xen/xen.py
Normal file
818
daemon/core/xen/xen.py
Normal file
|
@ -0,0 +1,818 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
'''
|
||||
xen.py: implementation of the XenNode and XenVEth classes that support
|
||||
generating Xen domUs based on an ISO image and persistent configuration area
|
||||
'''
|
||||
|
||||
from core.netns.vnet import *
|
||||
from core.netns.vnode import LxcNode
|
||||
from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf
|
||||
from core.misc.ipaddr import *
|
||||
from core.misc.utils import *
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.netns.vif import TunTap
|
||||
from core.emane.nodes import EmaneNode
|
||||
|
||||
try:
|
||||
import parted
|
||||
except ImportError, e:
|
||||
#print "Failed to load parted Python module required by Xen support."
|
||||
#print "Error was:", e
|
||||
raise ImportError
|
||||
|
||||
import base64
|
||||
import crypt
|
||||
import subprocess
|
||||
try:
|
||||
import fsimage
|
||||
except ImportError, e:
|
||||
# fix for fsimage under Ubuntu
|
||||
sys.path.append("/usr/lib/xen-default/lib/python")
|
||||
try:
|
||||
import fsimage
|
||||
except ImportError, e:
|
||||
#print "Failed to load fsimage Python module required by Xen support."
|
||||
#print "Error was:", e
|
||||
raise ImportError
|
||||
|
||||
|
||||
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
import string
|
||||
|
||||
# XXX move these out to config file
|
||||
AWK_PATH = "/bin/awk"
|
||||
KPARTX_PATH = "/sbin/kpartx"
|
||||
LVCREATE_PATH = "/sbin/lvcreate"
|
||||
LVREMOVE_PATH = "/sbin/lvremove"
|
||||
LVCHANGE_PATH = "/sbin/lvchange"
|
||||
MKFSEXT4_PATH = "/sbin/mkfs.ext4"
|
||||
MKSWAP_PATH = "/sbin/mkswap"
|
||||
TAR_PATH = "/bin/tar"
|
||||
SED_PATH = "/bin/sed"
|
||||
XM_PATH = "/usr/sbin/xm"
|
||||
UDEVADM_PATH = "/sbin/udevadm"
|
||||
|
||||
class XenVEth(PyCoreNetIf):
|
||||
def __init__(self, node, name, localname, mtu = 1500, net = None,
|
||||
start = True, hwaddr = None):
|
||||
# note that net arg is ignored
|
||||
PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.hwaddr = hwaddr
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def startup(self):
|
||||
cmd = [XM_PATH, 'network-attach', self.node.vmname,
|
||||
'vifname=%s' % self.localname, 'script=vif-core']
|
||||
if self.hwaddr is not None:
|
||||
cmd.append('mac=%s' % self.hwaddr)
|
||||
check_call(cmd)
|
||||
check_call([IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
if not self.up:
|
||||
return
|
||||
if self.localname:
|
||||
if self.hwaddr is not None:
|
||||
pass
|
||||
# this should be doable, but some argument isn't a string
|
||||
#check_call([XM_PATH, 'network-detach', self.node.vmname,
|
||||
# self.hwaddr])
|
||||
self.up = False
|
||||
|
||||
|
||||
class XenNode(PyCoreNode):
|
||||
apitype = coreapi.CORE_NODE_XEN
|
||||
|
||||
FilesToIgnore = frozenset([
|
||||
#'ipforward.sh',
|
||||
'quaggaboot.sh',
|
||||
])
|
||||
|
||||
FilesRedirection = {
|
||||
'ipforward.sh' : '/core-tmp/ipforward.sh',
|
||||
}
|
||||
|
||||
CmdsToIgnore = frozenset([
|
||||
#'sh ipforward.sh',
|
||||
#'sh quaggaboot.sh zebra',
|
||||
#'sh quaggaboot.sh ospfd',
|
||||
#'sh quaggaboot.sh ospf6d',
|
||||
'sh quaggaboot.sh vtysh',
|
||||
'killall zebra',
|
||||
'killall ospfd',
|
||||
'killall ospf6d',
|
||||
'pidof zebra', 'pidof ospfd', 'pidof ospf6d',
|
||||
])
|
||||
|
||||
def RedirCmd_ipforward(self):
|
||||
sysctlFile = open(os.path.join(self.mountdir, self.etcdir,
|
||||
'sysctl.conf'), 'a')
|
||||
p1 = subprocess.Popen([AWK_PATH,
|
||||
'/^\/sbin\/sysctl -w/ {print $NF}',
|
||||
os.path.join(self.nodedir,
|
||||
'core-tmp/ipforward.sh') ],
|
||||
stdout=sysctlFile)
|
||||
p1.wait()
|
||||
sysctlFile.close()
|
||||
|
||||
def RedirCmd_zebra(self):
|
||||
check_call([SED_PATH, '-i', '-e', 's/^zebra=no/zebra=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
def RedirCmd_ospfd(self):
|
||||
check_call([SED_PATH, '-i', '-e', 's/^ospfd=no/ospfd=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
def RedirCmd_ospf6d(self):
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/^ospf6d=no/ospf6d=yes/',
|
||||
os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')])
|
||||
|
||||
CmdsRedirection = {
|
||||
'sh ipforward.sh' : RedirCmd_ipforward,
|
||||
'sh quaggaboot.sh zebra' : RedirCmd_zebra,
|
||||
'sh quaggaboot.sh ospfd' : RedirCmd_ospfd,
|
||||
'sh quaggaboot.sh ospf6d' : RedirCmd_ospf6d,
|
||||
}
|
||||
|
||||
# CoreNode: no __init__, take from LxcNode & SimpleLxcNode
|
||||
def __init__(self, session, objid = None, name = None,
|
||||
nodedir = None, bootsh = "boot.sh", verbose = False,
|
||||
start = True, model = None,
|
||||
vgname = None, ramsize = None, disksize = None,
|
||||
isofile = None):
|
||||
# SimpleLxcNode initialization
|
||||
PyCoreNode.__init__(self, session = session, objid = objid, name = name,
|
||||
verbose = verbose)
|
||||
self.nodedir = nodedir
|
||||
self.model = model
|
||||
# indicates startup() has been invoked and disk has been initialized
|
||||
self.up = False
|
||||
# indicates boot() has been invoked and domU is running
|
||||
self.booted = False
|
||||
self.ifindex = 0
|
||||
self.lock = threading.RLock()
|
||||
self._netif = {}
|
||||
# domU name
|
||||
self.vmname = "c" + str(session.sessionid) + "-" + name
|
||||
# LVM volume group name
|
||||
self.vgname = self.getconfigitem('vg_name', vgname)
|
||||
# LVM logical volume name
|
||||
self.lvname = self.vmname + '-'
|
||||
# LVM logical volume device path name
|
||||
self.lvpath = os.path.join('/dev', self.vgname, self.lvname)
|
||||
self.disksize = self.getconfigitem('disk_size', disksize)
|
||||
self.ramsize = int(self.getconfigitem('ram_size', ramsize))
|
||||
self.isofile = self.getconfigitem('iso_file', isofile)
|
||||
# temporary mount point for paused VM persistent filesystem
|
||||
self.mountdir = None
|
||||
self.etcdir = self.getconfigitem('etc_path')
|
||||
|
||||
# TODO: remove this temporary hack
|
||||
self.FilesRedirection['/usr/local/etc/quagga/Quagga.conf'] = \
|
||||
os.path.join(self.getconfigitem('mount_path'), self.etcdir,
|
||||
'quagga/Quagga.conf')
|
||||
|
||||
# LxcNode initialization
|
||||
# self.makenodedir()
|
||||
if self.nodedir is None:
|
||||
self.nodedir = \
|
||||
os.path.join(session.sessiondir, self.name + ".conf")
|
||||
self.mountdir = self.nodedir + self.getconfigitem('mount_path')
|
||||
if not os.path.isdir(self.mountdir):
|
||||
os.makedirs(self.mountdir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
raise Exception, "Xen PVM node requires a temporary nodedir"
|
||||
self.tmpnodedir = False
|
||||
self.bootsh = bootsh
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def getconfigitem(self, name, default=None):
|
||||
''' Configuration items come from the xen.conf file and/or input from
|
||||
the GUI, and are stored in the session using the XenConfigManager
|
||||
object. self.model is used to identify particular profiles
|
||||
associated with a node type in the GUI.
|
||||
'''
|
||||
return self.session.xen.getconfigitem(name=name, model=self.model,
|
||||
node=self, value=default)
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def startup(self):
|
||||
self.warn("XEN PVM startup() called: preparing disk for %s" % self.name)
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.up:
|
||||
raise Exception, "already up"
|
||||
self.createlogicalvolume()
|
||||
self.createpartitions()
|
||||
persistdev = self.createfilesystems()
|
||||
check_call([MOUNT_BIN, '-t', 'ext4', persistdev, self.mountdir])
|
||||
self.untarpersistent(tarname=self.getconfigitem('persist_tar_iso'),
|
||||
iso=True)
|
||||
self.setrootpassword(pw = self.getconfigitem('root_password'))
|
||||
self.sethostname(old='UBASE', new=self.name)
|
||||
self.setupssh(keypath=self.getconfigitem('ssh_key_path'))
|
||||
self.createvm()
|
||||
self.up = True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def boot(self):
|
||||
self.warn("XEN PVM boot() called")
|
||||
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
raise Exception, "Can't boot VM without initialized disk"
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
return
|
||||
|
||||
self.session.services.bootnodeservices(self)
|
||||
tarname = self.getconfigitem('persist_tar')
|
||||
if tarname:
|
||||
self.untarpersistent(tarname=tarname, iso=False)
|
||||
|
||||
try:
|
||||
check_call([UMOUNT_BIN, self.mountdir])
|
||||
self.unmount_all(self.mountdir)
|
||||
check_call([UDEVADM_PATH, 'settle'])
|
||||
check_call([KPARTX_PATH, '-d', self.lvpath])
|
||||
|
||||
#time.sleep(5)
|
||||
#time.sleep(1)
|
||||
|
||||
# unpause VM
|
||||
if self.verbose:
|
||||
self.warn("XEN PVM boot() unpause domU %s" % self.vmname)
|
||||
mutecheck_call([XM_PATH, 'unpause', self.vmname])
|
||||
|
||||
self.booted = True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def validate(self):
|
||||
self.session.services.validatenodeservices(self)
|
||||
|
||||
# from class LxcNode (also SimpleLxcNode)
|
||||
def shutdown(self):
|
||||
self.warn("XEN PVM shutdown() called")
|
||||
if not self.up:
|
||||
return
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if self.up:
|
||||
# sketch from SimpleLxcNode
|
||||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
|
||||
try:
|
||||
# RJE XXX what to do here
|
||||
if self.booted:
|
||||
mutecheck_call([XM_PATH, 'destroy', self.vmname])
|
||||
self.booted = False
|
||||
except OSError:
|
||||
pass
|
||||
except subprocess.CalledProcessError:
|
||||
# ignore this error too, the VM may have exited already
|
||||
pass
|
||||
|
||||
# discard LVM volume
|
||||
lvmRemoveCount = 0
|
||||
while os.path.exists(self.lvpath):
|
||||
try:
|
||||
check_call([UDEVADM_PATH, 'settle'])
|
||||
mutecall([LVCHANGE_PATH, '-an', self.lvpath])
|
||||
lvmRemoveCount += 1
|
||||
mutecall([LVREMOVE_PATH, '-f', self.lvpath])
|
||||
except OSError:
|
||||
pass
|
||||
if (lvmRemoveCount > 1):
|
||||
self.warn("XEN PVM shutdown() required %d lvremove " \
|
||||
"executions." % lvmRemoveCount)
|
||||
|
||||
self._netif.clear()
|
||||
del self.session
|
||||
|
||||
self.up = False
|
||||
|
||||
finally:
|
||||
self.rmnodedir()
|
||||
self.lock.release()
|
||||
|
||||
def createlogicalvolume(self):
|
||||
''' Create a logical volume for this Xen domU. Called from startup().
|
||||
'''
|
||||
if os.path.exists(self.lvpath):
|
||||
raise Exception, "LVM volume already exists"
|
||||
mutecheck_call([LVCREATE_PATH, '--size', self.disksize,
|
||||
'--name', self.lvname, self.vgname])
|
||||
|
||||
def createpartitions(self):
|
||||
''' Partition the LVM volume into persistent and swap partitions
|
||||
using the parted module.
|
||||
'''
|
||||
dev = parted.Device(path=self.lvpath)
|
||||
dev.removeFromCache()
|
||||
disk = parted.freshDisk(dev, 'msdos')
|
||||
constraint = parted.Constraint(device=dev)
|
||||
persist_size = int(0.75 * constraint.maxSize);
|
||||
self.createpartition(device=dev, disk=disk, start=1,
|
||||
end=(persist_size - 1) , type="ext4")
|
||||
self.createpartition(device=dev, disk=disk, start=persist_size,
|
||||
end=(constraint.maxSize - 1) , type="linux-swap(v1)")
|
||||
disk.commit()
|
||||
|
||||
def createpartition(self, device, disk, start, end, type):
|
||||
''' Create a single partition of the specified type and size and add
|
||||
it to the disk object, using the parted module.
|
||||
'''
|
||||
geo = parted.Geometry(device=device, start=start, end=end)
|
||||
fs = parted.FileSystem(type=type, geometry=geo)
|
||||
part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL,
|
||||
geometry=geo)
|
||||
constraint = parted.Constraint(exactGeom=geo)
|
||||
disk.addPartition(partition=part, constraint=constraint)
|
||||
|
||||
def createfilesystems(self):
|
||||
''' Make an ext4 filesystem and swap space. Return the device name for
|
||||
the persistent partition so we can mount it.
|
||||
'''
|
||||
output = subprocess.Popen([KPARTX_PATH, '-l', self.lvpath],
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
lines = output.splitlines()
|
||||
persistdev = '/dev/mapper/' + lines[0].strip().split(' ')[0].strip()
|
||||
swapdev = '/dev/mapper/' + lines[1].strip().split(' ')[0].strip()
|
||||
check_call([KPARTX_PATH, '-a', self.lvpath])
|
||||
mutecheck_call([MKFSEXT4_PATH, '-L', 'persist', persistdev])
|
||||
mutecheck_call([MKSWAP_PATH, '-f', '-L', 'swap', swapdev])
|
||||
return persistdev
|
||||
|
||||
def untarpersistent(self, tarname, iso):
|
||||
''' Unpack a persistent template tar file to the mounted mount dir.
|
||||
Uses fsimage library to read from an ISO file.
|
||||
'''
|
||||
tarname = tarname.replace('%h', self.name) # filename may use hostname
|
||||
if iso:
|
||||
try:
|
||||
fs = fsimage.open(self.isofile, 0)
|
||||
except IOError, e:
|
||||
self.warn("Failed to open ISO file: %s (%s)" % (self.isofile,e))
|
||||
return
|
||||
try:
|
||||
tardata = fs.open_file(tarname).read();
|
||||
except IOError, e:
|
||||
self.warn("Failed to open tar file: %s (%s)" % (tarname, e))
|
||||
return
|
||||
finally:
|
||||
del fs;
|
||||
else:
|
||||
try:
|
||||
f = open(tarname)
|
||||
tardata = f.read()
|
||||
f.close()
|
||||
except IOError, e:
|
||||
self.warn("Failed to open tar file: %s (%s)" % (tarname, e))
|
||||
return
|
||||
p = subprocess.Popen([TAR_PATH, '-C', self.mountdir, '--numeric-owner',
|
||||
'-xf', '-'], stdin=subprocess.PIPE)
|
||||
p.communicate(input=tardata)
|
||||
p.wait()
|
||||
|
||||
def setrootpassword(self, pw):
|
||||
''' Set the root password by updating the shadow password file that
|
||||
is on the filesystem mounted in the temporary area.
|
||||
'''
|
||||
saltedpw = crypt.crypt(pw, '$6$'+base64.b64encode(os.urandom(12)))
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
'/^root:/s_^root:\([^:]*\):_root:' + saltedpw + ':_',
|
||||
os.path.join(self.mountdir, self.etcdir, 'shadow')])
|
||||
|
||||
def sethostname(self, old, new):
|
||||
''' Set the hostname by updating the hostname and hosts files that
|
||||
reside on the filesystem mounted in the temporary area.
|
||||
'''
|
||||
check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hostname')])
|
||||
check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new),
|
||||
os.path.join(self.mountdir, self.etcdir, 'hosts')])
|
||||
|
||||
def setupssh(self, keypath):
|
||||
''' Configure SSH access by installing host keys and a system-wide
|
||||
authorized_keys file.
|
||||
'''
|
||||
sshdcfg = os.path.join(self.mountdir, self.etcdir, 'ssh/sshd_config')
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/PermitRootLogin no/PermitRootLogin yes/', sshdcfg])
|
||||
sshdir = os.path.join(self.getconfigitem('mount_path'), self.etcdir,
|
||||
'ssh')
|
||||
sshdir = sshdir.replace('/','\\/') # backslash slashes for use in sed
|
||||
check_call([SED_PATH, '-i', '-e',
|
||||
's/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/' + \
|
||||
'AuthorizedKeysFile ' + sshdir + '\/authorized_keys/',
|
||||
sshdcfg])
|
||||
for f in ('ssh_host_rsa_key','ssh_host_rsa_key.pub','authorized_keys'):
|
||||
src = os.path.join(keypath, f)
|
||||
dst = os.path.join(self.mountdir, self.etcdir, 'ssh', f)
|
||||
shutil.copy(src, dst)
|
||||
if f[-3:] != "pub":
|
||||
os.chmod(dst, 0600)
|
||||
|
||||
def createvm(self):
|
||||
''' Instantiate a *paused* domU VM
|
||||
Instantiate it now, so we can add network interfaces,
|
||||
pause it so we can have the filesystem open for configuration.
|
||||
'''
|
||||
args = [XM_PATH, 'create', os.devnull, '--paused']
|
||||
args.extend(['name=' + self.vmname, 'memory=' + str(self.ramsize)])
|
||||
args.append('disk=tap:aio:' + self.isofile + ',hda,r')
|
||||
args.append('disk=phy:' + self.lvpath + ',hdb,w')
|
||||
args.append('bootloader=pygrub')
|
||||
bootargs = '--kernel=/isolinux/vmlinuz --ramdisk=/isolinux/initrd'
|
||||
args.append('bootargs=' + bootargs)
|
||||
for action in ('poweroff', 'reboot', 'suspend', 'crash', 'halt'):
|
||||
args.append('on_%s=destroy' % action)
|
||||
args.append('extra=' + self.getconfigitem('xm_create_extra'))
|
||||
mutecheck_call(args)
|
||||
|
||||
# from class LxcNode
|
||||
def privatedir(self, path):
|
||||
#self.warn("XEN PVM privatedir() called")
|
||||
# Do nothing, Xen PVM nodes are fully private
|
||||
pass
|
||||
|
||||
# from class LxcNode
|
||||
def opennodefile(self, filename, mode = "w"):
|
||||
self.warn("XEN PVM opennodefile() called")
|
||||
raise Exception, "Can't open VM file with opennodefile()"
|
||||
|
||||
# from class LxcNode
|
||||
# open a file on a paused Xen node
|
||||
def openpausednodefile(self, filename, mode = "w"):
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError, "no basename for filename: " + filename
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
#dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode = 0755)
|
||||
hostfilename = os.path.join(dirname, basename)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
# from class LxcNode
|
||||
def nodefile(self, filename, contents, mode = 0644):
|
||||
if filename in self.FilesToIgnore:
|
||||
#self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename])
|
||||
return
|
||||
|
||||
if filename in self.FilesRedirection:
|
||||
redirFilename = self.FilesRedirection[filename]
|
||||
self.warn("XEN PVM nodefile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
filename = redirFilename
|
||||
|
||||
self.warn("XEN PVM nodefile(filename=%s) called" % [filename])
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM disk isn't ready"
|
||||
return
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM is already running"
|
||||
return
|
||||
|
||||
try:
|
||||
f = self.openpausednodefile(filename, "w")
|
||||
f.write(contents)
|
||||
os.chmod(f.name, mode)
|
||||
f.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def alive(self):
|
||||
# is VM running?
|
||||
return False # XXX
|
||||
|
||||
def cmd(self, args, wait = True):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
if cmdAsString in self.CmdsToIgnore:
|
||||
#self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return 0
|
||||
if cmdAsString in self.CmdsRedirection:
|
||||
self.CmdsRedirection[cmdAsString](self)
|
||||
return 0
|
||||
|
||||
self.warn("XEN PVM cmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return 0
|
||||
|
||||
def cmdresult(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
if cmdAsString in self.CmdsToIgnore:
|
||||
#self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
|
||||
return (0, "")
|
||||
self.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return (0, "")
|
||||
|
||||
def popen(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
self.warn("XEN PVM popen(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return
|
||||
|
||||
def icmd(self, args):
|
||||
cmdAsString = string.join(args, ' ')
|
||||
self.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented" % cmdAsString)
|
||||
return
|
||||
|
||||
def term(self, sh = "/bin/sh"):
|
||||
self.warn("XEN PVM term() called, but not yet implemented")
|
||||
return
|
||||
|
||||
def termcmdstring(self, sh = "/bin/sh"):
|
||||
''' We may add 'sudo' to the command string because the GUI runs as a
|
||||
normal user. Use SSH if control interface is available, otherwise
|
||||
use Xen console with a keymapping for easy login.
|
||||
'''
|
||||
controlifc = None
|
||||
for ifc in self.netifs():
|
||||
if hasattr(ifc, 'control') and ifc.control == True:
|
||||
controlifc = ifc
|
||||
break
|
||||
cmd = "xterm "
|
||||
# use SSH if control interface is available
|
||||
if controlifc:
|
||||
controlip = controlifc.addrlist[0].split('/')[0]
|
||||
cmd += "-e ssh root@%s" % controlip
|
||||
return cmd
|
||||
# otherwise use 'xm console'
|
||||
#pw = self.getconfigitem('root_password')
|
||||
#cmd += "-xrm 'XTerm*VT100.translations: #override <Key>F1: "
|
||||
#cmd += "string(\"root\\n\") \\n <Key>F2: string(\"%s\\n\")' " % pw
|
||||
cmd += "-e sudo %s console %s" % (XM_PATH, self.vmname)
|
||||
return cmd
|
||||
|
||||
def shcmd(self, cmdstr, sh = "/bin/sh"):
|
||||
self.warn("XEN PVM shcmd(args=[%s]) called, but not yet implemented" % cmdstr)
|
||||
return
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def info(self, msg):
|
||||
if self.verbose:
|
||||
print "%s: %s" % (self.name, msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
# from class SimpleLxcNode
|
||||
def warn(self, msg):
|
||||
print >> sys.stderr, "%s: %s" % (self.name, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def mount(self, source, target):
|
||||
self.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
|
||||
def umount(self, target):
|
||||
self.warn("XEN PVM Nodes can't bind-mount filesystems")
|
||||
|
||||
def newifindex(self):
|
||||
self.lock.acquire()
|
||||
try:
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def getifindex(self, netif):
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
return ifindex
|
||||
return -1
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
self.warn("XEN PVM addnetif() called")
|
||||
PyCoreNode.addnetif(self, netif, ifindex)
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
self.warn("XEN PVM delnetif() called")
|
||||
PyCoreNode.delnetif(self, ifindex)
|
||||
|
||||
def newveth(self, ifindex = None, ifname = None, net = None, hwaddr = None):
|
||||
self.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
|
||||
self.lock.acquire()
|
||||
try:
|
||||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
sessionid = self.session.shortsessionid()
|
||||
name = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
|
||||
localname = "n%s.%s.%s" % (self.objid, ifname, sessionid)
|
||||
ifclass = XenVEth
|
||||
veth = ifclass(node = self, name = name, localname = localname,
|
||||
mtu = 1500, net = net, hwaddr = hwaddr)
|
||||
|
||||
veth.name = ifname
|
||||
try:
|
||||
self.addnetif(veth, ifindex)
|
||||
except:
|
||||
veth.shutdown()
|
||||
del veth
|
||||
raise
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def newtuntap(self, ifindex = None, ifname = None, net = None):
|
||||
self.warn("XEN PVM newtuntap() called but not implemented")
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
if self.up:
|
||||
pass
|
||||
#self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex),
|
||||
# "address", str(addr)])
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
if self.up:
|
||||
pass
|
||||
# self.cmd([IP_BIN, "addr", "add", str(addr),
|
||||
# "dev", self.ifname(ifindex)])
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
except ValueError:
|
||||
self.warn("trying to delete unknown address: %s" % addr)
|
||||
if self.up:
|
||||
pass
|
||||
# self.cmd([IP_BIN, "addr", "del", str(addr),
|
||||
# "dev", self.ifname(ifindex)])
|
||||
|
||||
valid_deladdrtype = ("inet", "inet6", "inet6link")
|
||||
def delalladdr(self, ifindex, addrtypes = valid_deladdrtype):
|
||||
addr = self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
for t in addrtypes:
|
||||
if t not in self.valid_deladdrtype:
|
||||
raise ValueError, "addr type must be in: " + \
|
||||
" ".join(self.valid_deladdrtype)
|
||||
for a in addr[t]:
|
||||
self.deladdr(ifindex, a)
|
||||
# update cached information
|
||||
self.getaddr(self.ifname(ifindex), rescan = True)
|
||||
|
||||
# Xen PVM relies on boot process to bring up links
|
||||
#def ifup(self, ifindex):
|
||||
# if self.up:
|
||||
# self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
self.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called" %
|
||||
(ifindex, ifname))
|
||||
|
||||
self.lock.acquire()
|
||||
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access add veth as VM disk isn't ready"
|
||||
return
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access add veth as VM is already running"
|
||||
return
|
||||
|
||||
try:
|
||||
if isinstance(net, EmaneNode):
|
||||
raise Exception, "Xen PVM doesn't yet support Emane nets"
|
||||
|
||||
# ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
|
||||
# net = net)
|
||||
# # TUN/TAP is not ready for addressing yet; the device may
|
||||
# # take some time to appear, and installing it into a
|
||||
# # namespace after it has been bound removes addressing;
|
||||
# # save addresses with the interface now
|
||||
# self.attachnet(ifindex, net)
|
||||
# netif = self.netif(ifindex)
|
||||
# netif.sethwaddr(hwaddr)
|
||||
# for addr in maketuple(addrlist):
|
||||
# netif.addaddr(addr)
|
||||
# return ifindex
|
||||
else:
|
||||
ifindex = self.newveth(ifindex = ifindex, ifname = ifname,
|
||||
net = net, hwaddr = hwaddr)
|
||||
if net is not None:
|
||||
self.attachnet(ifindex, net)
|
||||
|
||||
rulefile = os.path.join(self.getconfigitem('mount_path'),
|
||||
self.etcdir,
|
||||
'udev/rules.d/70-persistent-net.rules')
|
||||
f = self.openpausednodefile(rulefile, "a")
|
||||
f.write('\n# Xen PVM virtual interface #%s %s with MAC address %s\n' % (ifindex, self.ifname(ifindex), hwaddr))
|
||||
# Using MAC address as we're now loading PVM net driver "early"
|
||||
# OLD: Would like to use MAC address, but udev isn't working with paravirtualized NICs. Perhaps the "set hw address" isn't triggering a rescan.
|
||||
f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % (hwaddr, self.ifname(ifindex)))
|
||||
#f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n' % (ifindex, self.ifname(ifindex)))
|
||||
f.close()
|
||||
|
||||
if hwaddr:
|
||||
self.sethwaddr(ifindex, hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
self.addaddr(ifindex, addr)
|
||||
#self.ifup(ifindex)
|
||||
return ifindex
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def connectnode(self, ifname, othernode, otherifname):
|
||||
self.warn("XEN PVM connectnode() called")
|
||||
|
||||
# tmplen = 8
|
||||
# tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
# for x in xrange(tmplen)])
|
||||
# tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
|
||||
# for x in xrange(tmplen)])
|
||||
# check_call([IP_BIN, "link", "add", "name", tmp1,
|
||||
# "type", "veth", "peer", "name", tmp2])
|
||||
#
|
||||
# check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
# self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
# self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
|
||||
#
|
||||
# check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
|
||||
# othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname])
|
||||
# othernode.addnetif(PyCoreNetIf(othernode, otherifname),
|
||||
# othernode.newifindex())
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
self.lock.acquire()
|
||||
if not self.up:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM disk isn't ready"
|
||||
return
|
||||
|
||||
if self.booted:
|
||||
self.lock.release()
|
||||
raise Exception, "Can't access VM file as VM is already running"
|
||||
return
|
||||
|
||||
if filename in self.FilesToIgnore:
|
||||
#self.warn("XEN PVM addfile(filename=%s) ignored" % [filename])
|
||||
return
|
||||
|
||||
if filename in self.FilesRedirection:
|
||||
redirFilename = self.FilesRedirection[filename]
|
||||
self.warn("XEN PVM addfile(filename=%s) redirected to %s" % (filename, redirFilename))
|
||||
filename = redirFilename
|
||||
|
||||
try:
|
||||
fin = open(srcname, "r")
|
||||
contents = fin.read()
|
||||
fin.close()
|
||||
|
||||
fout = self.openpausednodefile(filename, "w")
|
||||
fout.write(contents)
|
||||
os.chmod(fout.name, mode)
|
||||
fout.close()
|
||||
self.info("created nodefile: '%s'; mode: 0%o" % (fout.name, mode))
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
self.warn("XEN PVM addfile(filename=%s) called" % [filename])
|
||||
|
||||
#shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \
|
||||
# (filename, srcname, filename)
|
||||
#self.shcmd(shcmd)
|
||||
|
||||
def unmount_all(self, path):
|
||||
''' Namespaces inherit the host mounts, so we need to ensure that all
|
||||
namespaces have unmounted our temporary mount area so that the
|
||||
kpartx command will succeed.
|
||||
'''
|
||||
# Session.bootnodes() already has self.session._objslock
|
||||
for o in self.session.objs():
|
||||
if not isinstance(o, LxcNode):
|
||||
continue
|
||||
o.umount(path)
|
||||
|
265
daemon/core/xen/xenconfig.py
Normal file
265
daemon/core/xen/xenconfig.py
Normal file
|
@ -0,0 +1,265 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
xenconfig.py: Implementation of the XenConfigManager class for managing
|
||||
configurable items for XenNodes.
|
||||
|
||||
Configuration for a XenNode is available at these three levels:
|
||||
Global config: XenConfigManager.configs[0] = (type='xen', values)
|
||||
Nodes of this machine type have this config. These are the default values.
|
||||
XenConfigManager.default_config comes from defaults + xen.conf
|
||||
Node type config: XenConfigManager.configs[0] = (type='mytype', values)
|
||||
All nodes of this type have this config.
|
||||
Node-specific config: XenConfigManager.configs[nodenumber] = (type, values)
|
||||
The node having this specific number has this config.
|
||||
'''
|
||||
|
||||
import sys, os, threading, subprocess, time, string
|
||||
import ConfigParser
|
||||
from xml.dom.minidom import parseString, Document
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
from core.conf import ConfigurableManager, Configurable
|
||||
|
||||
|
||||
class XenConfigManager(ConfigurableManager):
|
||||
''' Xen controller object. Lives in a Session instance and is used for
|
||||
building Xen profiles.
|
||||
'''
|
||||
_name = "xen"
|
||||
_type = coreapi.CORE_TLV_REG_EMULSRV
|
||||
|
||||
def __init__(self, session):
|
||||
ConfigurableManager.__init__(self, session)
|
||||
self.verbose = self.session.getcfgitembool('verbose', False)
|
||||
self.default_config = XenDefaultConfig(session, objid=None)
|
||||
self.loadconfigfile()
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
''' add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
'''
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.setconfig(self, nodenum, conftype, values)
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
''' get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied; if conftype
|
||||
is None then we return a match on any conftype.
|
||||
'''
|
||||
if nodenum is None:
|
||||
nodenum = 0 # used for storing the global default config
|
||||
return ConfigurableManager.getconfig(self, nodenum, conftype,
|
||||
defaultvalues)
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
''' remove configuration values for a node
|
||||
'''
|
||||
ConfigurableManager.clearconfig(self, nodenum)
|
||||
if 0 in self.configs:
|
||||
self.configs.pop(0)
|
||||
|
||||
def configure(self, session, msg):
|
||||
''' Handle configuration messages for global Xen config.
|
||||
'''
|
||||
return self.default_config.configure(self, msg)
|
||||
|
||||
def loadconfigfile(self, filename=None):
|
||||
''' Load defaults from the /etc/core/xen.conf file into dict object.
|
||||
'''
|
||||
if filename is None:
|
||||
filename = os.path.join(CORE_CONF_DIR, 'xen.conf')
|
||||
cfg = ConfigParser.SafeConfigParser()
|
||||
if filename not in cfg.read(filename):
|
||||
self.session.warn("unable to read Xen config file: %s" % filename)
|
||||
return
|
||||
section = "xen"
|
||||
if not cfg.has_section(section):
|
||||
self.session.warn("%s is missing a xen section!" % filename)
|
||||
return
|
||||
self.configfile = dict(cfg.items(section))
|
||||
# populate default config items from config file entries
|
||||
vals = list(self.default_config.getdefaultvalues())
|
||||
names = self.default_config.getnames()
|
||||
for i in range(len(names)):
|
||||
if names[i] in self.configfile:
|
||||
vals[i] = self.configfile[names[i]]
|
||||
# this sets XenConfigManager.configs[0] = (type='xen', vals)
|
||||
self.setconfig(None, self.default_config._name, vals)
|
||||
|
||||
def getconfigitem(self, name, model=None, node=None, value=None):
|
||||
''' Get a config item of the given name, first looking for node-specific
|
||||
configuration, then model specific, and finally global defaults.
|
||||
If a value is supplied, it will override any stored config.
|
||||
'''
|
||||
if value is not None:
|
||||
return value
|
||||
n = None
|
||||
if node:
|
||||
n = node.objid
|
||||
(t, v) = self.getconfig(nodenum=n, conftype=model, defaultvalues=None)
|
||||
if n is not None and v is None:
|
||||
# get item from default config for the node type
|
||||
(t, v) = self.getconfig(nodenum=None, conftype=model,
|
||||
defaultvalues=None)
|
||||
if v is None:
|
||||
# get item from default config for the machine type
|
||||
(t, v) = self.getconfig(nodenum=None,
|
||||
conftype=self.default_config._name,
|
||||
defaultvalues=None)
|
||||
|
||||
confignames = self.default_config.getnames()
|
||||
if v and name in confignames:
|
||||
i = confignames.index(name)
|
||||
return v[i]
|
||||
else:
|
||||
# name may only exist in config file
|
||||
if name in self.configfile:
|
||||
return self.configfile[name]
|
||||
else:
|
||||
#self.warn("missing config item '%s'" % name)
|
||||
return None
|
||||
|
||||
|
||||
class XenConfig(Configurable):
|
||||
''' Manage Xen configuration profiles.
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def configure(cls, xen, msg):
|
||||
''' Handle configuration messages for setting up a model.
|
||||
Similar to Configurable.configure(), but considers opaque data
|
||||
for indicating node types.
|
||||
'''
|
||||
reply = None
|
||||
nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE)
|
||||
objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ)
|
||||
conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE)
|
||||
opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE)
|
||||
|
||||
nodetype = objname
|
||||
if opaque is not None:
|
||||
opaque_items = opaque.split(':')
|
||||
if len(opaque_items) != 2:
|
||||
xen.warn("xen config: invalid opaque data in conf message")
|
||||
return None
|
||||
nodetype = opaque_items[1]
|
||||
|
||||
if xen.verbose:
|
||||
xen.info("received configure message for %s" % nodetype)
|
||||
if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST:
|
||||
if xen.verbose:
|
||||
xen.info("replying to configure request for %s " % nodetype)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if objname == "all":
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE
|
||||
else:
|
||||
typeflags = coreapi.CONF_TYPE_FLAGS_NONE
|
||||
values = xen.getconfig(nodenum, nodetype, defaultvalues=None)[1]
|
||||
if values is None:
|
||||
# get defaults from default "xen" config which includes
|
||||
# settings from both cls._confdefaultvalues and xen.conf
|
||||
defaults = cls.getdefaultvalues()
|
||||
values = xen.getconfig(nodenum, cls._name, defaults)[1]
|
||||
if values is None:
|
||||
return None
|
||||
# reply with config options
|
||||
if nodenum is None:
|
||||
nodenum = 0
|
||||
reply = cls.toconfmsg(0, nodenum, typeflags, nodetype, values)
|
||||
elif conftype == coreapi.CONF_TYPE_FLAGS_RESET:
|
||||
if objname == "all":
|
||||
xen.clearconfig(nodenum)
|
||||
#elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the XenNode
|
||||
# object has been created
|
||||
if objname is None:
|
||||
xen.info("no configuration object for node %s" % nodenum)
|
||||
return None
|
||||
values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
defaults = cls.getdefaultvalues()
|
||||
values = xen.getconfig(nodenum, cls._name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
xen.setconfig(nodenum, nodetype, values)
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def toconfmsg(cls, flags, nodenum, typeflags, nodetype, values):
|
||||
''' Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
'''
|
||||
values_str = string.join(values, '|')
|
||||
tlvdata = ""
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, nodenum)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ,
|
||||
cls._name)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE,
|
||||
typeflags)
|
||||
datatypes = tuple( map(lambda x: x[1], cls._confmatrix) )
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES,
|
||||
datatypes)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES,
|
||||
values_str)
|
||||
captions = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[4], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS,
|
||||
captions)
|
||||
possiblevals = reduce( lambda a,b: a + '|' + b, \
|
||||
map(lambda x: x[3], cls._confmatrix))
|
||||
tlvdata += coreapi.CoreConfTlv.pack(
|
||||
coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals)
|
||||
if cls._bitmap is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP,
|
||||
cls._bitmap)
|
||||
if cls._confgroups is not None:
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS,
|
||||
cls._confgroups)
|
||||
opaque = "%s:%s" % (cls._name, nodetype)
|
||||
tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE,
|
||||
opaque)
|
||||
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
|
||||
return msg
|
||||
|
||||
|
||||
class XenDefaultConfig(XenConfig):
|
||||
''' Global default Xen configuration options.
|
||||
'''
|
||||
_name = "xen"
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
_confmatrix = [
|
||||
('ram_size', coreapi.CONF_DATA_TYPE_STRING, '256', '',
|
||||
'ram size (MB)'),
|
||||
('disk_size', coreapi.CONF_DATA_TYPE_STRING, '256M', '',
|
||||
'disk size (use K/M/G suffix)'),
|
||||
('iso_file', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'iso file'),
|
||||
('mount_path', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'mount path'),
|
||||
('etc_path', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'etc path'),
|
||||
('persist_tar_iso', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'iso persist tar file'),
|
||||
('persist_tar', coreapi.CONF_DATA_TYPE_STRING, '', '',
|
||||
'persist tar file'),
|
||||
('root_password', coreapi.CONF_DATA_TYPE_STRING, 'password', '',
|
||||
'root password'),
|
||||
]
|
||||
|
||||
_confgroups = "domU properties:1-%d" % len(_confmatrix)
|
||||
|
40
daemon/data/core.conf
Normal file
40
daemon/data/core.conf
Normal file
|
@ -0,0 +1,40 @@
|
|||
# Configuration file for CORE (core-gui, core-daemon)
|
||||
#
|
||||
|
||||
|
||||
### GUI configuration options ###
|
||||
[core-gui]
|
||||
# no options are presently defined; see the ~/.core preferences file
|
||||
|
||||
### core-daemon configuration options ###
|
||||
[core-daemon]
|
||||
pidfile = /var/run/core-daemon.pid
|
||||
logfile = /var/log/core-daemon.log
|
||||
# you may want to change the listenaddr below to 0.0.0.0
|
||||
listenaddr = localhost
|
||||
port = 4038
|
||||
numthreads = 1
|
||||
verbose = False
|
||||
quagga_bin_search = "/usr/local/bin /usr/bin /usr/lib/quagga"
|
||||
quagga_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/quagga"
|
||||
# uncomment the following line to load custom services from the specified dir
|
||||
# this may be a comma-separated list, and directory names should be unique
|
||||
# and not named 'services'
|
||||
#custom_services_dir = /home/username/.core/myservices
|
||||
# establish a control backchannel for accessing nodes (overriden by the session
|
||||
# option of the same name)
|
||||
#controlnet = 172.16.0.0/24
|
||||
# optional controlnet configuration script, uncomment to activate, and likely edit the script
|
||||
# controlnet_updown_script = /usr/local/share/core/examples/controlnet_updown
|
||||
# publish nodes' control IP addresses to /etc/hosts
|
||||
#update_etc_hosts = True
|
||||
|
||||
# EMANE configuration
|
||||
emane_platform_port = 8101
|
||||
emane_transform_port = 8201
|
||||
emane_event_monitor = False
|
||||
emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass
|
||||
# EMANE log level range [0,4] default: 2
|
||||
#emane_log_level = 2
|
||||
emane_realtime = True
|
||||
|
35
daemon/data/xen.conf
Normal file
35
daemon/data/xen.conf
Normal file
|
@ -0,0 +1,35 @@
|
|||
# Configuration file for CORE Xen support
|
||||
### Xen configuration options ###
|
||||
[xen]
|
||||
|
||||
### The following three configuration options *must* be specified in this
|
||||
### system-wide configuration file.
|
||||
# LVM volume group name for creating new volumes
|
||||
vg_name = domU
|
||||
# directory containing an RSA SSH host key and authorized_keys file to use
|
||||
# within the VM
|
||||
ssh_key_path = /opt/core-xen/ssh
|
||||
# extra arguments to pass via 'extra=' option to 'xm create'
|
||||
xm_create_extra = console=hvc0 rtr_boot=/dev/xvda rtr_boot_fstype=iso9660 rtr_root=/boot/root.img rtr_persist=LABEL=persist rtr_swap=LABEL=swap rtr_overlay_limit=500
|
||||
|
||||
### The remaining configuration options *may* be specified here.
|
||||
### If not specified here, they *must* be specified in the user (or scenario's)
|
||||
### nodes.conf file as profile-specific configuration options.
|
||||
# domU RAM memory size in MB
|
||||
ram_size = 256
|
||||
# domU disk size in MB
|
||||
disk_size = 256M
|
||||
# ISO filesystem to mount as read-only
|
||||
iso_file = /opt/core-xen/iso-files/rtr.iso
|
||||
# directory used temporarily as moint point for persistent area, under
|
||||
# /tmp/pycore.nnnnn/nX.conf/
|
||||
mount_path = /rtr/persist
|
||||
# mount_path + this directory where configuration files are located on the VM
|
||||
etc_path = config/etc
|
||||
# name of tar file within the iso_file to unpack to mount_path
|
||||
persist_tar_iso = persist-template.tar
|
||||
# name of tar file in dom0 that will be unpacked to mount_path prior to boot
|
||||
# the string '%h' will be replaced with the hostname (e.g. 'n3' for node 3)
|
||||
persist_tar = /opt/core-xen/rtr-configs/custom-%%h.tar
|
||||
# root password to set
|
||||
root_password = password
|
151
daemon/doc/Makefile.am
Normal file
151
daemon/doc/Makefile.am
Normal file
|
@ -0,0 +1,151 @@
|
|||
# CORE
|
||||
# (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Builds html and pdf documentation using Sphinx.
|
||||
#
|
||||
|
||||
# extra cruft to remove
|
||||
DISTCLEANFILES = conf.py Makefile.in stamp-vti *.rst
|
||||
|
||||
all: index.rst
|
||||
|
||||
# auto-generated Python documentation using Sphinx
|
||||
index.rst:
|
||||
sphinx-apidoc -o . ../core
|
||||
mv modules.rst index.rst
|
||||
|
||||
###### below this line was generated using sphinx-quickstart ######
|
||||
|
||||
# Makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
PAPER =
|
||||
BUILDDIR = _build
|
||||
|
||||
# Internal variables.
|
||||
PAPEROPT_a4 = -D latex_paper_size=a4
|
||||
PAPEROPT_letter = -D latex_paper_size=letter
|
||||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
|
||||
|
||||
help:
|
||||
@echo "Please use \`make <target>' where <target> is one of"
|
||||
@echo " html to make standalone HTML files"
|
||||
@echo " dirhtml to make HTML files named index.html in directories"
|
||||
@echo " singlehtml to make a single large HTML file"
|
||||
@echo " pickle to make pickle files"
|
||||
@echo " json to make JSON files"
|
||||
@echo " htmlhelp to make HTML files and a HTML help project"
|
||||
@echo " qthelp to make HTML files and a qthelp project"
|
||||
@echo " devhelp to make HTML files and a Devhelp project"
|
||||
@echo " epub to make an epub"
|
||||
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
|
||||
@echo " latexpdf to make LaTeX files and run them through pdflatex"
|
||||
@echo " text to make text files"
|
||||
@echo " man to make manual pages"
|
||||
@echo " changes to make an overview of all changed/added/deprecated items"
|
||||
@echo " linkcheck to check all external links for integrity"
|
||||
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
|
||||
|
||||
clean:
|
||||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
dirhtml:
|
||||
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
|
||||
|
||||
singlehtml:
|
||||
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
|
||||
@echo
|
||||
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
|
||||
|
||||
pickle:
|
||||
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
|
||||
@echo
|
||||
@echo "Build finished; now you can process the pickle files."
|
||||
|
||||
json:
|
||||
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
|
||||
@echo
|
||||
@echo "Build finished; now you can process the JSON files."
|
||||
|
||||
htmlhelp:
|
||||
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run HTML Help Workshop with the" \
|
||||
".hhp project file in $(BUILDDIR)/htmlhelp."
|
||||
|
||||
qthelp:
|
||||
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
|
||||
@echo
|
||||
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
|
||||
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
|
||||
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/CORE.qhcp"
|
||||
@echo "To view the help file:"
|
||||
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/CORE.qhc"
|
||||
|
||||
devhelp:
|
||||
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
|
||||
@echo
|
||||
@echo "Build finished."
|
||||
@echo "To view the help file:"
|
||||
@echo "# mkdir -p $$HOME/.local/share/devhelp/CORE"
|
||||
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/CORE"
|
||||
@echo "# devhelp"
|
||||
|
||||
epub:
|
||||
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
|
||||
@echo
|
||||
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
|
||||
|
||||
latex:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo
|
||||
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
|
||||
@echo "Run \`make' in that directory to run these through (pdf)latex" \
|
||||
"(use \`make latexpdf' here to do that automatically)."
|
||||
|
||||
latexpdf:
|
||||
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
|
||||
@echo "Running LaTeX files through pdflatex..."
|
||||
make -C $(BUILDDIR)/latex all-pdf
|
||||
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
|
||||
|
||||
text:
|
||||
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
|
||||
@echo
|
||||
@echo "Build finished. The text files are in $(BUILDDIR)/text."
|
||||
|
||||
man:
|
||||
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
|
||||
@echo
|
||||
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
|
||||
|
||||
changes:
|
||||
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
|
||||
@echo
|
||||
@echo "The overview file is in $(BUILDDIR)/changes."
|
||||
|
||||
linkcheck:
|
||||
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
|
||||
@echo
|
||||
@echo "Link check complete; look for any errors in the above output " \
|
||||
"or in $(BUILDDIR)/linkcheck/output.txt."
|
||||
|
||||
doctest:
|
||||
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
|
||||
@echo "Testing of doctests in the sources finished, look at the " \
|
||||
"results in $(BUILDDIR)/doctest/output.txt."
|
256
daemon/doc/conf.py.in
Normal file
256
daemon/doc/conf.py.in
Normal file
|
@ -0,0 +1,256 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# CORE Python documentation build configuration file, created by
|
||||
# sphinx-quickstart on Wed Jun 13 10:44:22 2012.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
import sys, os
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
# -- General configuration -----------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be extensions
|
||||
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'CORE Python modules'
|
||||
copyright = u'2012, core-dev'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
version = '@CORE_VERSION@'
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
release = '@CORE_VERSION@'
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value, then it is used:
|
||||
#today = ''
|
||||
# Else, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
exclude_patterns = ['_build']
|
||||
|
||||
# The reST default role (used for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
||||
# -- Options for HTML output ---------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
html_theme = 'default'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = []
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, maps document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_domain_indices = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
|
||||
#html_show_sphinx = True
|
||||
|
||||
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
|
||||
#html_show_copyright = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# This is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = None
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'COREpythondoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output --------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author, documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'index.tex', u'CORE Python Documentation',
|
||||
u'core-dev', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# If true, show page references after internal links.
|
||||
#latex_show_pagerefs = False
|
||||
|
||||
# If true, show URL addresses after external links.
|
||||
#latex_show_urls = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_domain_indices = True
|
||||
|
||||
|
||||
# -- Options for manual page output --------------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
('index', 'core-python', u'CORE Python Documentation',
|
||||
[u'core-dev'], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Epub output ---------------------------------------------------
|
||||
|
||||
# Bibliographic Dublin Core info.
|
||||
epub_title = u'CORE Python'
|
||||
epub_author = u'core-dev'
|
||||
epub_publisher = u'core-dev'
|
||||
epub_copyright = u'2012, core-dev'
|
||||
|
||||
# The language of the text. It defaults to the language option
|
||||
# or en if the language is not set.
|
||||
#epub_language = ''
|
||||
|
||||
# The scheme of the identifier. Typical schemes are ISBN or URL.
|
||||
#epub_scheme = ''
|
||||
|
||||
# The unique identifier of the text. This can be a ISBN number
|
||||
# or the project homepage.
|
||||
#epub_identifier = ''
|
||||
|
||||
# A unique identification for the text.
|
||||
#epub_uid = ''
|
||||
|
||||
# HTML files that should be inserted before the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_pre_files = []
|
||||
|
||||
# HTML files shat should be inserted after the pages created by sphinx.
|
||||
# The format is a list of tuples containing the path and title.
|
||||
#epub_post_files = []
|
||||
|
||||
# A list of files that should not be packed into the epub file.
|
||||
#epub_exclude_files = []
|
||||
|
||||
# The depth of the table of contents in toc.ncx.
|
||||
#epub_tocdepth = 3
|
||||
|
||||
# Allow duplicate toc entries.
|
||||
#epub_tocdup = True
|
53
daemon/examples/controlnet_updown
Normal file
53
daemon/examples/controlnet_updown
Normal file
|
@ -0,0 +1,53 @@
|
|||
#!/bin/bash
|
||||
# Sample controlnet up/down script that will be executed when the control
|
||||
# network is brought up or down. This script either adds an interface to the
|
||||
# controlnet bridge or adds a permissive iptables firewall rule.
|
||||
|
||||
controlnet_intf=$1
|
||||
action=$2
|
||||
|
||||
config_type=iptables # iptables or brctl
|
||||
|
||||
iptables_address=10.205.15.132
|
||||
brctl_intf=eth2
|
||||
|
||||
BRCTL=/sbin/brctl
|
||||
IPTABLES=/usr/sbin/iptables
|
||||
|
||||
case "$action" in
|
||||
startup)
|
||||
case "$config_type" in
|
||||
iptables)
|
||||
$IPTABLES -I FORWARD -i $controlnet_intf -d $iptables_address -j ACCEPT
|
||||
$IPTABLES -I FORWARD -o $controlnet_intf -s $iptables_address -j ACCEPT
|
||||
;;
|
||||
brctl)
|
||||
$BRCTL addif $controlnet_intf $brctl_intf
|
||||
;;
|
||||
*)
|
||||
echo "Invalid config_type $config_type"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
shutdown)
|
||||
case "$config_type" in
|
||||
iptables)
|
||||
$IPTABLES -D FORWARD -i $controlnet_intf -d $iptables_address -j ACCEPT
|
||||
$IPTABLES -D FORWARD -o $controlnet_intf -s $iptables_address -j ACCEPT
|
||||
;;
|
||||
brctl)
|
||||
$BRCTL delif $controlnet_intf $brctl_intf
|
||||
;;
|
||||
*)
|
||||
echo "Invalid config_type $config_type"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
|
||||
*)
|
||||
echo "Invalid action $action"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
exit 0
|
187
daemon/examples/emanemodel2core.py
Executable file
187
daemon/examples/emanemodel2core.py
Executable file
|
@ -0,0 +1,187 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# CORE
|
||||
# Copyright (c) 2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
emanemodel2core.py: scans an EMANE model source file
|
||||
(e.g. emane/models/rfpipe/maclayer/rfpipemaclayer.cc) and outputs Python
|
||||
bindings that allow the model to be used in CORE.
|
||||
|
||||
When using this conversion utility, you should replace XYZ, Xyz, and xyz with
|
||||
the actual model name. Note the capitalization convention.
|
||||
'''
|
||||
|
||||
import os, sys, optparse
|
||||
|
||||
MODEL_TEMPLATE_PART1 = """
|
||||
#
|
||||
# CORE
|
||||
# Copyright (c)2013 Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Name <email@company.com>
|
||||
#
|
||||
'''
|
||||
xyz.py: EMANE XYZ model bindings for CORE
|
||||
'''
|
||||
|
||||
from core.api import coreapi
|
||||
from emane import EmaneModel
|
||||
from universal import EmaneUniversalModel
|
||||
|
||||
class EmaneXyzModel(EmaneModel):
|
||||
def __init__(self, session, objid = None, verbose = False):
|
||||
EmaneModel.__init__(self, session, objid, verbose)
|
||||
|
||||
# model name
|
||||
_name = "emane_xyz"
|
||||
# MAC parameters
|
||||
_confmatrix_mac = [
|
||||
"""
|
||||
|
||||
MODEL_TEMPLATE_PART2 = """
|
||||
]
|
||||
|
||||
# PHY parameters from Universal PHY
|
||||
_confmatrix_phy = EmaneUniversalModel._confmatrix
|
||||
|
||||
_confmatrix = _confmatrix_mac + _confmatrix_phy
|
||||
|
||||
# value groupings
|
||||
_confgroups = "XYZ MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
|
||||
% ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
|
||||
|
||||
def buildnemxmlfiles(self, e, ifc):
|
||||
''' Build the necessary nem, mac, and phy XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide nXXemane_xyznem.xml,
|
||||
nXXemane_xyzmac.xml, nXXemane_xyzphy.xml are used.
|
||||
'''
|
||||
values = e.getifcconfig(self.objid, self._name,
|
||||
self.getdefaultvalues(), ifc)
|
||||
if values is None:
|
||||
return
|
||||
nemdoc = e.xmldoc("nem")
|
||||
nem = nemdoc.getElementsByTagName("nem").pop()
|
||||
nem.setAttribute("name", "XYZ NEM")
|
||||
mactag = nemdoc.createElement("mac")
|
||||
mactag.setAttribute("definition", self.macxmlname(ifc))
|
||||
nem.appendChild(mactag)
|
||||
phytag = nemdoc.createElement("phy")
|
||||
phytag.setAttribute("definition", self.phyxmlname(ifc))
|
||||
nem.appendChild(phytag)
|
||||
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
|
||||
|
||||
names = list(self.getnames())
|
||||
macnames = names[:len(self._confmatrix_mac)]
|
||||
phynames = names[len(self._confmatrix_mac):]
|
||||
# make any changes to the mac/phy names here to e.g. exclude them from
|
||||
# the XML output
|
||||
|
||||
macdoc = e.xmldoc("mac")
|
||||
mac = macdoc.getElementsByTagName("mac").pop()
|
||||
mac.setAttribute("name", "XYZ MAC")
|
||||
mac.setAttribute("library", "xyzmaclayer")
|
||||
# append MAC options to macdoc
|
||||
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
|
||||
self.valueof(n, values))), macnames)
|
||||
e.xmlwrite(macdoc, self.macxmlname(ifc))
|
||||
|
||||
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
|
||||
e.xmlwrite(phydoc, self.phyxmlname(ifc))
|
||||
|
||||
"""
|
||||
|
||||
def emane_model_source_to_core(infile, outfile):
|
||||
do_parse_line = False
|
||||
output = MODEL_TEMPLATE_PART1
|
||||
|
||||
with open(infile, 'r') as f:
|
||||
for line in f:
|
||||
# begin marker
|
||||
if "EMANE::ConfigurationDefinition" in line:
|
||||
do_parse_line = True
|
||||
# end marker -- all done
|
||||
if "{0, 0, 0, 0, 0, 0" in line:
|
||||
break
|
||||
if do_parse_line:
|
||||
outstr = convert_line(line)
|
||||
if outstr is not None:
|
||||
output += outstr
|
||||
continue
|
||||
output += MODEL_TEMPLATE_PART2
|
||||
|
||||
if outfile == sys.stdout:
|
||||
sys.stdout.write(output)
|
||||
else:
|
||||
with open(outfile, 'w') as f:
|
||||
f.write(output)
|
||||
|
||||
def convert_line(line):
|
||||
line = line.strip()
|
||||
# skip comments
|
||||
if line.startswith(('/*', '//')):
|
||||
return None
|
||||
items = line.strip('{},').split(',')
|
||||
if len(items) != 7:
|
||||
#print "continuning on line=", len(items), items
|
||||
return None
|
||||
return convert_items_to_line(items)
|
||||
|
||||
def convert_items_to_line(items):
|
||||
fields = ('required', 'default', 'count', 'name', 'value', 'type',
|
||||
'description')
|
||||
getfield = lambda(x): items[fields.index(x)].strip()
|
||||
|
||||
output = " ("
|
||||
output += "%s, " % getfield('name')
|
||||
value = getfield('value')
|
||||
if value == '"off"':
|
||||
type = "coreapi.CONF_DATA_TYPE_BOOL"
|
||||
value = "0"
|
||||
defaults = '"On,Off"'
|
||||
elif value == '"on"':
|
||||
type = "coreapi.CONF_DATA_TYPE_BOOL"
|
||||
value = '"1"'
|
||||
defaults = '"On,Off"'
|
||||
else:
|
||||
type = "coreapi.CONF_DATA_TYPE_STRING"
|
||||
defaults = '""'
|
||||
output += "%s, %s, %s, " % (type, value, defaults)
|
||||
output += getfield('description')
|
||||
output += "),\n"
|
||||
return output
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] -- <command> ..."
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(infile = None, outfile = sys.stdout)
|
||||
|
||||
parser.add_option("-i", "--infile", dest = "infile",
|
||||
help = "file to read (usually '*mac.cc')")
|
||||
parser.add_option("-o", "--outfile", dest = "outfile",
|
||||
help = "file to write (stdout is default)")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.infile is None:
|
||||
usage("please specify input file with the '-i' option", err=1)
|
||||
|
||||
emane_model_source_to_core(options.infile, options.outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
78
daemon/examples/findcore.py
Executable file
78
daemon/examples/findcore.py
Executable file
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# Search for installed CORE library files and Python bindings.
|
||||
#
|
||||
|
||||
import os, glob
|
||||
|
||||
pythondirs = [
|
||||
"/usr/lib/python2.7/site-packages",
|
||||
"/usr/lib/python2.7/dist-packages",
|
||||
"/usr/lib64/python2.7/site-packages",
|
||||
"/usr/lib64/python2.7/dist-packages",
|
||||
"/usr/local/lib/python2.7/site-packages",
|
||||
"/usr/local/lib/python2.7/dist-packages",
|
||||
"/usr/local/lib64/python2.7/site-packages",
|
||||
"/usr/local/lib64/python2.7/dist-packages",
|
||||
"/usr/lib/python2.6/site-packages",
|
||||
"/usr/lib/python2.6/dist-packages",
|
||||
"/usr/lib64/python2.6/site-packages",
|
||||
"/usr/lib64/python2.6/dist-packages",
|
||||
"/usr/local/lib/python2.6/site-packages",
|
||||
"/usr/local/lib/python2.6/dist-packages",
|
||||
"/usr/local/lib64/python2.6/site-packages",
|
||||
"/usr/local/lib64/python2.6/dist-packages",
|
||||
]
|
||||
|
||||
tcldirs = [
|
||||
"/usr/lib/core",
|
||||
"/usr/local/lib/core",
|
||||
]
|
||||
|
||||
def find_in_file(fn, search, column=None):
|
||||
''' Find a line starting with 'search' in the file given by the filename
|
||||
'fn'. Return True if found, False if not found, or the column text if
|
||||
column is specified.
|
||||
'''
|
||||
r = False
|
||||
if not os.path.exists(fn):
|
||||
return r
|
||||
f = open(fn, "r")
|
||||
for line in f:
|
||||
if line[:len(search)] != search:
|
||||
continue
|
||||
r = True
|
||||
if column is not None:
|
||||
r = line.split()[column]
|
||||
break
|
||||
f.close()
|
||||
return r
|
||||
|
||||
def main():
|
||||
versions = []
|
||||
for d in pythondirs:
|
||||
fn = "%s/core/constants.py" % d
|
||||
ver = find_in_file(fn, 'COREDPY_VERSION', 2)
|
||||
if ver:
|
||||
ver = ver.strip('"')
|
||||
versions.append((d, ver))
|
||||
for e in glob.iglob("%s/core_python*egg-info" % d):
|
||||
ver = find_in_file(e, 'Version:', 1)
|
||||
if ver:
|
||||
versions.append((e, ver))
|
||||
for e in glob.iglob("%s/netns*egg-info" % d):
|
||||
ver = find_in_file(e, 'Version:', 1)
|
||||
if ver:
|
||||
versions.append((e, ver))
|
||||
for d in tcldirs:
|
||||
fn = "%s/version.tcl" % d
|
||||
ver = find_in_file(fn, 'set CORE_VERSION', 2)
|
||||
if ver:
|
||||
versions.append((d, ver))
|
||||
|
||||
for (d, ver) in versions:
|
||||
print "%8s %s" % (ver, d)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
26
daemon/examples/myservices/README.txt
Normal file
26
daemon/examples/myservices/README.txt
Normal file
|
@ -0,0 +1,26 @@
|
|||
This directory contains a sample custom service that you can use as a template
|
||||
for creating your own services.
|
||||
|
||||
Follow these steps to add your own services:
|
||||
|
||||
1. Modify the sample service MyService to do what you want. It could generate
|
||||
config/script files, mount per-node directories, start processes/scripts,
|
||||
etc. sample.py is a Python file that defines one or more classes to be
|
||||
imported. You can create multiple Python files that will be imported.
|
||||
Add any new filenames to the __init__.py file.
|
||||
|
||||
2. Put these files in a directory such as /home/username/.core/myservices
|
||||
Note that the last component of this directory name 'myservices' should not
|
||||
be named something like 'services' which conflicts with an existing Python
|
||||
name (the syntax 'from myservices import *' is used).
|
||||
|
||||
3. Add a 'custom_services_dir = /home/username/.core/myservices' entry to the
|
||||
/etc/core/core.conf file.
|
||||
|
||||
4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax)
|
||||
should be displayed in the /var/log/core-daemon.log log file (or on screen).
|
||||
|
||||
5. Start using your custom service on your nodes. You can create a new node
|
||||
type that uses your service, or change the default services for an existing
|
||||
node type, or change individual nodes.
|
||||
|
7
daemon/examples/myservices/__init__.py
Normal file
7
daemon/examples/myservices/__init__.py
Normal file
|
@ -0,0 +1,7 @@
|
|||
"""myservices
|
||||
|
||||
Custom services that you define can be put in this directory. Everything
|
||||
listed in __all__ is automatically loaded when you add this directory to the
|
||||
custom_services_dir = '/full/path/to/here' core.conf file option.
|
||||
"""
|
||||
__all__ = ["sample"]
|
64
daemon/examples/myservices/sample.py
Normal file
64
daemon/examples/myservices/sample.py
Normal file
|
@ -0,0 +1,64 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
''' Sample user-defined service.
|
||||
'''
|
||||
|
||||
import os
|
||||
|
||||
from core.service import CoreService, addservice
|
||||
from core.misc.ipaddr import IPv4Prefix, IPv6Prefix
|
||||
|
||||
class MyService(CoreService):
|
||||
''' This is a sample user-defined service.
|
||||
'''
|
||||
# a unique name is required, without spaces
|
||||
_name = "MyService"
|
||||
# you can create your own group here
|
||||
_group = "Utility"
|
||||
# list of other services this service depends on
|
||||
_depends = ()
|
||||
# per-node directories
|
||||
_dirs = ()
|
||||
# generated files (without a full path this file goes in the node's dir,
|
||||
# e.g. /tmp/pycore.12345/n1.conf/)
|
||||
_configs = ('myservice.sh', )
|
||||
# this controls the starting order vs other enabled services
|
||||
_startindex = 50
|
||||
# list of startup commands, also may be generated during startup
|
||||
_startup = ('sh myservice.sh',)
|
||||
# list of shutdown commands
|
||||
_shutdown = ()
|
||||
|
||||
@classmethod
|
||||
def generateconfig(cls, node, filename, services):
|
||||
''' Return a string that will be written to filename, or sent to the
|
||||
GUI for user customization.
|
||||
'''
|
||||
cfg = "#!/bin/sh\n"
|
||||
cfg += "# auto-generated by MyService (sample.py)\n"
|
||||
|
||||
for ifc in node.netifs():
|
||||
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
|
||||
# here we do something interesting
|
||||
cfg += "\n".join(map(cls.subnetentry, ifc.addrlist))
|
||||
break
|
||||
return cfg
|
||||
|
||||
@staticmethod
|
||||
def subnetentry(x):
|
||||
''' Generate a subnet declaration block given an IPv4 prefix string
|
||||
for inclusion in the config file.
|
||||
'''
|
||||
if x.find(":") >= 0:
|
||||
# this is an IPv6 address
|
||||
return ""
|
||||
else:
|
||||
net = IPv4Prefix(x)
|
||||
return 'echo " network %s"' % (net)
|
||||
|
||||
# this line is required to add the above class to the list of available services
|
||||
addservice(MyService)
|
||||
|
74
daemon/examples/netns/basicrange.py
Executable file
74
daemon/examples/netns/basicrange.py
Executable file
|
@ -0,0 +1,74 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# Test 3D range calculation of the BasicRangeModel by adding n nodes to a WLAN
|
||||
# stacked 100 units above each other (using z-axis).
|
||||
#
|
||||
|
||||
|
||||
import optparse, sys, os, datetime, time
|
||||
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.misc.utils import mutecall
|
||||
from core.mobility import BasicRangeModel
|
||||
from core.netns.vnet import EbtablesQueue
|
||||
|
||||
def test(numnodes):
|
||||
# node list
|
||||
n = []
|
||||
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
|
||||
session = pycore.Session(persistent = True)
|
||||
wlanid = numnodes + 1
|
||||
net = session.addobj(cls = pycore.nodes.WlanNode, name = "wlan%d" % wlanid,
|
||||
objid = wlanid, verbose = True)
|
||||
net.setmodel(BasicRangeModel, BasicRangeModel.getdefaultvalues())
|
||||
for i in xrange(1, numnodes + 1):
|
||||
tmp = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i,
|
||||
objid = i)
|
||||
tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
# set increasing Z coordinates
|
||||
tmp.setposition(10, 10, 100*i)
|
||||
n.append(tmp)
|
||||
|
||||
n[0].term("bash")
|
||||
# wait for rate seconds to allow ebtables commands to commit
|
||||
time.sleep(EbtablesQueue.rate)
|
||||
#session.shutdown()
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
|
||||
parser.set_defaults(numnodes = 2)
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes to test; default = %s" %
|
||||
parser.defaults["numnodes"])
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 2:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
test(options.numnodes)
|
||||
|
||||
print >> sys.stderr, \
|
||||
"elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
99
daemon/examples/netns/emane80211.py
Executable file
99
daemon/examples/netns/emane80211.py
Executable file
|
@ -0,0 +1,99 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# Example CORE Python script that attaches N nodes to an EMANE 802.11abg
|
||||
# network. One of the parameters is changed, the pathloss mode.
|
||||
|
||||
import sys, datetime, optparse
|
||||
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.constants import *
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
|
||||
# node list (count from 1)
|
||||
n = [None]
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.addsession(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 5)
|
||||
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 1:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
# IP subnet
|
||||
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
|
||||
# session with some EMANE initialization
|
||||
session = pycore.Session(persistent=True)
|
||||
session.master = True
|
||||
session.location.setrefgeo(47.57917,-122.13232,2.00000)
|
||||
session.location.refscale = 150.0
|
||||
session.cfg['emane_models'] = "RfPipe, Ieee80211abg, Bypass, AtdlOmni"
|
||||
session.emane.loadmodels()
|
||||
add_to_server(session)
|
||||
|
||||
# EMANE WLAN
|
||||
print "creating EMANE WLAN wlan1"
|
||||
wlan = session.addobj(cls = pycore.nodes.EmaneNode, name = "wlan1")
|
||||
wlan.setposition(x=80,y=50)
|
||||
names = EmaneIeee80211abgModel.getnames()
|
||||
values = list(EmaneIeee80211abgModel.getdefaultvalues())
|
||||
# TODO: change any of the EMANE 802.11 parameter values here
|
||||
values[ names.index('pathlossmode') ] = 'pathloss'
|
||||
session.emane.setconfig(wlan.objid, EmaneIeee80211abgModel._name, values)
|
||||
services_str = "zebra|OSPFv3MDR|vtysh|IPForward"
|
||||
|
||||
print "creating %d nodes with addresses from %s" % \
|
||||
(options.numnodes, prefix)
|
||||
for i in xrange(1, options.numnodes + 1):
|
||||
tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i,
|
||||
objid=i)
|
||||
tmp.newnetif(wlan, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
tmp.setposition(x=150*i,y=150)
|
||||
session.services.addservicestonode(tmp, "", services_str, verbose=False)
|
||||
n.append(tmp)
|
||||
|
||||
# this starts EMANE, etc.
|
||||
session.instantiate()
|
||||
|
||||
# start a shell on node 1
|
||||
n[1].term("bash")
|
||||
|
||||
print "elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
||||
|
209
daemon/examples/netns/howmanynodes.py
Executable file
209
daemon/examples/netns/howmanynodes.py
Executable file
|
@ -0,0 +1,209 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
howmanynodes.py - This is a CORE script that creates network namespace nodes
|
||||
having one virtual Ethernet interface connected to a bridge. It continues to
|
||||
add nodes until an exception occurs. The number of nodes per bridge can be
|
||||
specified.
|
||||
'''
|
||||
|
||||
import optparse, sys, os, datetime, time, shutil
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.constants import *
|
||||
|
||||
GBD = 1024.0 * 1024.0
|
||||
|
||||
def linuxversion():
|
||||
''' Return a string having the Linux kernel version.
|
||||
'''
|
||||
f = open('/proc/version', 'r')
|
||||
v = f.readline().split()
|
||||
version_str = ' '.join(v[:3])
|
||||
f.close()
|
||||
return version_str
|
||||
|
||||
MEMKEYS = ('total', 'free', 'buff', 'cached', 'stotal', 'sfree')
|
||||
def memfree():
|
||||
''' Returns kilobytes memory [total, free, buff, cached, stotal, sfree].
|
||||
useful stats are:
|
||||
free memory = free + buff + cached
|
||||
swap used = stotal - sfree
|
||||
'''
|
||||
f = open('/proc/meminfo', 'r')
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
kbs = {}
|
||||
for k in MEMKEYS:
|
||||
kbs[k] = 0
|
||||
for l in lines:
|
||||
if l[:9] == "MemTotal:":
|
||||
kbs['total'] = int(l.split()[1])
|
||||
elif l[:8] == "MemFree:":
|
||||
kbs['free'] = int(l.split()[1])
|
||||
elif l[:8] == "Buffers:":
|
||||
kbs['buff'] = int(l.split()[1])
|
||||
elif l[:8] == "Cached:":
|
||||
kbs['cache'] = int(l.split()[1])
|
||||
elif l[:10] == "SwapTotal:":
|
||||
kbs['stotal'] = int(l.split()[1])
|
||||
elif l[:9] == "SwapFree:":
|
||||
kbs['sfree'] = int(l.split()[1])
|
||||
break
|
||||
return kbs
|
||||
|
||||
# node list (count from 1)
|
||||
nodelist = [None]
|
||||
switchlist = []
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(waittime = 0.2, numnodes = 0, bridges = 0, retries = 0,
|
||||
logfile = None, services = None)
|
||||
|
||||
parser.add_option("-w", "--waittime", dest = "waittime", type = float,
|
||||
help = "number of seconds to wait between node creation" \
|
||||
" (default = %s)" % parser.defaults["waittime"])
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes (default = unlimited)")
|
||||
parser.add_option("-b", "--bridges", dest = "bridges", type = int,
|
||||
help = "number of nodes per bridge; 0 = one bridge " \
|
||||
"(def. = %s)" % parser.defaults["bridges"])
|
||||
parser.add_option("-r", "--retry", dest = "retries", type = int,
|
||||
help = "number of retries on error (default = %s)" % \
|
||||
parser.defaults["retries"])
|
||||
parser.add_option("-l", "--log", dest = "logfile", type = str,
|
||||
help = "log memory usage to this file (default = %s)" % \
|
||||
parser.defaults["logfile"])
|
||||
parser.add_option("-s", "--services", dest = "services", type = str,
|
||||
help = "pipe-delimited list of services added to each " \
|
||||
"node (default = %s)\n(Example: 'zebra|OSPFv2|OSPFv3|" \
|
||||
"vtysh|IPForward')" % parser.defaults["services"])
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
|
||||
|
||||
print "Testing how many network namespace nodes this machine can create."
|
||||
print " - %s" % linuxversion()
|
||||
mem = memfree()
|
||||
print " - %.02f GB total memory (%.02f GB swap)" % \
|
||||
(mem['total']/GBD, mem['stotal']/GBD)
|
||||
print " - using IPv4 network prefix %s" % prefix
|
||||
print " - using wait time of %s" % options.waittime
|
||||
print " - using %d nodes per bridge" % options.bridges
|
||||
print " - will retry %d times on failure" % options.retries
|
||||
print " - adding these services to each node: %s" % options.services
|
||||
print " "
|
||||
|
||||
lfp = None
|
||||
if options.logfile is not None:
|
||||
# initialize a csv log file header
|
||||
lfp = open(options.logfile, "a")
|
||||
lfp.write("# log from howmanynodes.py %s\n" % time.ctime())
|
||||
lfp.write("# options = %s\n#\n" % options)
|
||||
lfp.write("# numnodes,%s\n" % ','.join(MEMKEYS))
|
||||
lfp.flush()
|
||||
|
||||
session = pycore.Session(persistent=True)
|
||||
switch = session.addobj(cls = pycore.nodes.SwitchNode)
|
||||
switchlist.append(switch)
|
||||
print "Added bridge %s (%d)." % (switch.brname, len(switchlist))
|
||||
|
||||
i = 0
|
||||
retry_count = options.retries
|
||||
while True:
|
||||
i += 1
|
||||
# optionally add a bridge (options.bridges nodes per bridge)
|
||||
try:
|
||||
if options.bridges > 0 and switch.numnetif() >= options.bridges:
|
||||
switch = session.addobj(cls = pycore.nodes.SwitchNode)
|
||||
switchlist.append(switch)
|
||||
print "\nAdded bridge %s (%d) for node %d." % \
|
||||
(switch.brname, len(switchlist), i)
|
||||
except Exception, e:
|
||||
print "At %d bridges (%d nodes) caught exception:\n%s\n" % \
|
||||
(len(switchlist), i-1, e)
|
||||
break
|
||||
# create a node
|
||||
try:
|
||||
n = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i)
|
||||
n.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
n.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
if options.services is not None:
|
||||
session.services.addservicestonode(n, "", options.services,
|
||||
verbose=False)
|
||||
n.boot()
|
||||
nodelist.append(n)
|
||||
if i % 25 == 0:
|
||||
print "\n%s nodes created " % i,
|
||||
mem = memfree()
|
||||
free = mem['free'] + mem['buff'] + mem['cached']
|
||||
swap = mem['stotal'] - mem['sfree']
|
||||
print "(%.02f/%.02f GB free/swap)" % (free/GBD , swap/GBD),
|
||||
if lfp:
|
||||
lfp.write("%d," % i)
|
||||
lfp.write("%s\n" % ','.join(str(mem[x]) for x in MEMKEYS))
|
||||
lfp.flush()
|
||||
else:
|
||||
sys.stdout.write(".")
|
||||
sys.stdout.flush()
|
||||
time.sleep(options.waittime)
|
||||
except Exception, e:
|
||||
print "At %d nodes caught exception:\n" % i, e
|
||||
if retry_count > 0:
|
||||
print "\nWill retry creating node %d." % i
|
||||
shutil.rmtree(n.nodedir, ignore_errors = True)
|
||||
retry_count -= 1
|
||||
i -= 1
|
||||
time.sleep(options.waittime)
|
||||
continue
|
||||
else:
|
||||
print "Stopping at %d nodes!" % i
|
||||
break
|
||||
|
||||
if i == options.numnodes:
|
||||
print "Stopping at %d nodes due to numnodes option." % i
|
||||
break
|
||||
# node creation was successful at this point
|
||||
retry_count = options.retries
|
||||
|
||||
if lfp:
|
||||
lfp.flush()
|
||||
lfp.close()
|
||||
|
||||
print "elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
print "Use the core-cleanup script to remove nodes and bridges."
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
109
daemon/examples/netns/iperf-performance-chain.py
Executable file
109
daemon/examples/netns/iperf-performance-chain.py
Executable file
|
@ -0,0 +1,109 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# This script creates a CORE session, that will connect n nodes together
|
||||
# in a chain, with static routes between nodes
|
||||
# number of nodes / number of hops
|
||||
# 2 0
|
||||
# 3 1
|
||||
# 4 2
|
||||
# n n - 2
|
||||
#
|
||||
# Use core-cleanup to clean up after this script as the session is left running.
|
||||
#
|
||||
|
||||
import sys, datetime, optparse
|
||||
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.constants import *
|
||||
|
||||
# node list (count from 1)
|
||||
n = [None]
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.addsession(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 5)
|
||||
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 1:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
|
||||
if options.numnodes >= 255:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
session = pycore.Session(persistent=True)
|
||||
add_to_server(session)
|
||||
print "creating %d nodes" % options.numnodes
|
||||
left = None
|
||||
prefix = None
|
||||
for i in xrange(1, options.numnodes + 1):
|
||||
tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i,
|
||||
objid=i)
|
||||
if left:
|
||||
tmp.newnetif(left, ["%s/%s" % (prefix.addr(2), prefix.prefixlen)])
|
||||
|
||||
prefix = ipaddr.IPv4Prefix("10.83.%d.0/24" % i) # limit: i < 255
|
||||
right = session.addobj(cls = pycore.nodes.PtpNet)
|
||||
tmp.newnetif(right, ["%s/%s" % (prefix.addr(1), prefix.prefixlen)])
|
||||
tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
tmp.cmd([SYSCTL_BIN, "net.ipv4.conf.all.forwarding=1"])
|
||||
tmp.cmd([SYSCTL_BIN, "net.ipv4.conf.default.rp_filter=0"])
|
||||
tmp.setposition(x=100*i,y=150)
|
||||
n.append(tmp)
|
||||
left = right
|
||||
|
||||
prefixes = map(lambda(x): ipaddr.IPv4Prefix("10.83.%d.0/24" % x),
|
||||
xrange(1, options.numnodes + 1))
|
||||
|
||||
# set up static routing in the chain
|
||||
for i in xrange(1, options.numnodes + 1):
|
||||
for j in xrange(1, options.numnodes + 1):
|
||||
if j < i - 1:
|
||||
gw = prefixes[i-2].addr(1)
|
||||
elif j > i:
|
||||
if i > len(prefixes) - 1:
|
||||
continue
|
||||
gw = prefixes[i-1].addr(2)
|
||||
else:
|
||||
continue
|
||||
net = prefixes[j-1]
|
||||
n[i].cmd([IP_BIN, "route", "add", str(net), "via", str(gw)])
|
||||
|
||||
|
||||
print "elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
||||
|
284
daemon/examples/netns/iperf-performance.sh
Executable file
284
daemon/examples/netns/iperf-performance.sh
Executable file
|
@ -0,0 +1,284 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# iperf-performance.sh
|
||||
#
|
||||
# (c)2013 the Boeing Company
|
||||
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Utility script to automate several iperf runs.
|
||||
#
|
||||
|
||||
# number of iperf runs per test
|
||||
NUMRUNS=10
|
||||
# number of seconds per run (10s is iperf default)
|
||||
RUNTIME=10
|
||||
# logging
|
||||
LOG=/tmp/${0}.log
|
||||
STAMP=`date +%Y%m%d%H%M%S`
|
||||
|
||||
#
|
||||
# client---(loopback)---server
|
||||
#
|
||||
loopbacktest () {
|
||||
killall iperf 2> /dev/null
|
||||
|
||||
echo ">> loopback iperf test"
|
||||
echo "loopback" > ${LOG}
|
||||
|
||||
# start an iperf server in the background
|
||||
# -s = server
|
||||
# -y c = CSV output
|
||||
echo "starting local iperf server"
|
||||
iperf -s -y c >> ${LOG} &
|
||||
|
||||
# run an iperf client NUMRUNS times
|
||||
i=1
|
||||
while [ $i -le $NUMRUNS ]; do
|
||||
echo "run $i/$NUMRUNS:"
|
||||
iperf -t ${RUNTIME} -c localhost
|
||||
sleep 0.3
|
||||
i=$((i+1))
|
||||
done
|
||||
|
||||
sleep 1
|
||||
echo "stopping local iperf server"
|
||||
killall -v iperf
|
||||
|
||||
}
|
||||
|
||||
#
|
||||
# lxc1( client )---veth-pair---lxc2( server )
|
||||
#
|
||||
lxcvethtest () {
|
||||
SERVERIP=10.0.0.1
|
||||
CLIENTIP=10.0.0.2
|
||||
SERVER=/tmp/${0}-server
|
||||
CLIENT=/tmp/${0}-client
|
||||
|
||||
echo ">> lxc veth iperf test"
|
||||
echo "lxcveth" >> ${LOG}
|
||||
|
||||
echo "starting lxc iperf server"
|
||||
vnoded -l $SERVER.log -p $SERVER.pid -c $SERVER
|
||||
ip link add name veth0.1 type veth peer name veth0
|
||||
ip link set veth0 netns `cat $SERVER.pid`
|
||||
vcmd -c $SERVER -- ifconfig veth0 $SERVERIP/24
|
||||
vcmd -c $SERVER -- iperf -s -y c >> ${LOG} &
|
||||
|
||||
echo "starting lxc iperf client"
|
||||
vnoded -l $CLIENT.log -p $CLIENT.pid -c $CLIENT
|
||||
ip link set veth0.1 netns `cat $CLIENT.pid`
|
||||
vcmd -c $CLIENT -- ifconfig veth0.1 $CLIENTIP/24
|
||||
|
||||
i=1
|
||||
while [ $i -le $NUMRUNS ]; do
|
||||
echo "run $i/$NUMRUNS:"
|
||||
vcmd -c $CLIENT -- iperf -t ${RUNTIME} -c ${SERVERIP}
|
||||
sleep 0.3
|
||||
i=$((i+1))
|
||||
done
|
||||
|
||||
sleep 1
|
||||
echo "stopping lxc iperf server"
|
||||
vcmd -c $SERVER -- killall -v iperf
|
||||
echo "stopping containers"
|
||||
kill -9 `cat $SERVER.pid`
|
||||
kill -9 `cat $CLIENT.pid`
|
||||
|
||||
echo "cleaning up"
|
||||
rm -f ${SERVER}*
|
||||
rm -f ${CLIENT}*
|
||||
}
|
||||
|
||||
#
|
||||
# lxc1( client veth:):veth---bridge---veth:(:veth server )lxc2
|
||||
#
|
||||
lxcbrtest () {
|
||||
SERVERIP=10.0.0.1
|
||||
CLIENTIP=10.0.0.2
|
||||
SERVER=/tmp/${0}-server
|
||||
CLIENT=/tmp/${0}-client
|
||||
BRIDGE="lxcbrtest"
|
||||
|
||||
echo ">> lxc bridge iperf test"
|
||||
echo "lxcbr" >> ${LOG}
|
||||
|
||||
echo "building bridge"
|
||||
brctl addbr $BRIDGE
|
||||
brctl stp $BRIDGE off # disable spanning tree protocol
|
||||
brctl setfd $BRIDGE 0 # disable forwarding delay
|
||||
ip link set $BRIDGE up
|
||||
|
||||
echo "starting lxc iperf server"
|
||||
vnoded -l $SERVER.log -p $SERVER.pid -c $SERVER
|
||||
ip link add name veth0.1 type veth peer name veth0
|
||||
ip link set veth0 netns `cat $SERVER.pid`
|
||||
vcmd -c $SERVER -- ifconfig veth0 $SERVERIP/24
|
||||
brctl addif $BRIDGE veth0.1
|
||||
ip link set veth0.1 up
|
||||
vcmd -c $SERVER -- iperf -s -y c >> ${LOG} &
|
||||
|
||||
echo "starting lxc iperf client"
|
||||
vnoded -l $CLIENT.log -p $CLIENT.pid -c $CLIENT
|
||||
ip link add name veth1.1 type veth peer name veth1
|
||||
ip link set veth1 netns `cat $CLIENT.pid`
|
||||
vcmd -c $CLIENT -- ifconfig veth1 $CLIENTIP/24
|
||||
brctl addif $BRIDGE veth1.1
|
||||
ip link set veth1.1 up
|
||||
|
||||
i=1
|
||||
while [ $i -le $NUMRUNS ]; do
|
||||
echo "run $i/$NUMRUNS:"
|
||||
vcmd -c $CLIENT -- iperf -t ${RUNTIME} -c ${SERVERIP}
|
||||
sleep 0.3
|
||||
i=$((i+1))
|
||||
done
|
||||
|
||||
sleep 1
|
||||
echo "stopping lxc iperf server"
|
||||
vcmd -c $SERVER -- killall -v iperf
|
||||
echo "stopping containers"
|
||||
kill -9 `cat $SERVER.pid`
|
||||
kill -9 `cat $CLIENT.pid`
|
||||
|
||||
echo "cleaning up"
|
||||
ip link set $BRIDGE down
|
||||
brctl delbr $BRIDGE
|
||||
rm -f ${SERVER}*
|
||||
rm -f ${CLIENT}*
|
||||
}
|
||||
|
||||
#
|
||||
# n1---n2---n3--- ... ---nN
|
||||
# N nodes (N-2 hops) in chain with static routing
|
||||
#
|
||||
chaintest () {
|
||||
NUMNODES=$1
|
||||
SERVERIP=10.83.$NUMNODES.1
|
||||
|
||||
if [ -d /tmp/pycore.* ]; then
|
||||
echo "/tmp/pycore.* already exists, skipping chaintest $NUMNODES"
|
||||
return
|
||||
fi
|
||||
|
||||
echo ">> n=$NUMNODES node chain iperf test"
|
||||
echo "chain$NUMNODES" >> ${LOG}
|
||||
|
||||
echo "running external chain CORE script with '-n $NUMNODES'"
|
||||
python iperf-performance-chain.py -n $NUMNODES
|
||||
|
||||
echo "starting lxc iperf server on node $NUMNODES"
|
||||
vcmd -c /tmp/pycore.*/n$NUMNODES -- iperf -s -y c >> ${LOG} &
|
||||
|
||||
echo "starting lxc iperf client"
|
||||
i=1
|
||||
while [ $i -le $NUMRUNS ]; do
|
||||
echo "run $i/$NUMRUNS:"
|
||||
vcmd -c /tmp/pycore.*/n1 -- iperf -t ${RUNTIME} -c ${SERVERIP}
|
||||
sleep 0.3
|
||||
i=$((i+1))
|
||||
done
|
||||
|
||||
sleep 1
|
||||
echo "stopping lxc iperf server"
|
||||
vcmd -c /tmp/pycore.*/n$NUMNODES -- killall -v iperf
|
||||
echo "cleaning up"
|
||||
core-cleanup
|
||||
}
|
||||
if [ "z$1" != "z" ]; then
|
||||
echo "This script takes no parameters and must be run as root."
|
||||
exit 1
|
||||
fi
|
||||
if [ `id -u` != 0 ]; then
|
||||
echo "This script must be run as root."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
#
|
||||
# N lxc clients >---bridge---veth:(:veth server )
|
||||
#
|
||||
clientstest () {
|
||||
NUMCLIENTS=$1
|
||||
SERVERIP=10.0.0.1
|
||||
SERVER=/tmp/${0}-server
|
||||
BRIDGE="lxcbrtest"
|
||||
|
||||
echo ">> n=$NUMCLIENTS clients iperf test"
|
||||
echo "clients$NUMCLIENTS" >> ${LOG}
|
||||
|
||||
echo "building bridge"
|
||||
brctl addbr $BRIDGE
|
||||
brctl stp $BRIDGE off # disable spanning tree protocol
|
||||
brctl setfd $BRIDGE 0 # disable forwarding delay
|
||||
ip link set $BRIDGE up
|
||||
|
||||
echo "starting lxc iperf server"
|
||||
vnoded -l $SERVER.log -p $SERVER.pid -c $SERVER
|
||||
ip link add name veth0.1 type veth peer name veth0
|
||||
ip link set veth0 netns `cat $SERVER.pid`
|
||||
vcmd -c $SERVER -- ifconfig veth0 $SERVERIP/24
|
||||
brctl addif $BRIDGE veth0.1
|
||||
ip link set veth0.1 up
|
||||
vcmd -c $SERVER -- iperf -s -y c >> ${LOG} &
|
||||
|
||||
i=1
|
||||
CLIENTS=""
|
||||
while [ $i -le $NUMCLIENTS ]; do
|
||||
echo "starting lxc iperf client $i/$NUMCLIENTS"
|
||||
CLIENT=/tmp/${0}-client$i
|
||||
CLIENTIP=10.0.0.1$i
|
||||
vnoded -l $CLIENT.log -p $CLIENT.pid -c $CLIENT
|
||||
ip link add name veth1.$i type veth peer name veth1
|
||||
ip link set veth1 netns `cat $CLIENT.pid`
|
||||
vcmd -c $CLIENT -- ifconfig veth1 $CLIENTIP/24
|
||||
brctl addif $BRIDGE veth1.$i
|
||||
ip link set veth1.$i up
|
||||
i=$((i+1))
|
||||
CLIENTS="$CLIENTS $CLIENT"
|
||||
done
|
||||
|
||||
j=1
|
||||
while [ $j -le $NUMRUNS ]; do
|
||||
echo "run $j/$NUMRUNS iperf:"
|
||||
for CLIENT in $CLIENTS; do
|
||||
vcmd -c $CLIENT -- iperf -t ${RUNTIME} -c ${SERVERIP} &
|
||||
done
|
||||
sleep ${RUNTIME} 1
|
||||
j=$((j+1))
|
||||
done
|
||||
|
||||
sleep 1
|
||||
echo "stopping lxc iperf server"
|
||||
vcmd -c $SERVER -- killall -v iperf
|
||||
echo "stopping containers"
|
||||
kill -9 `cat $SERVER.pid`
|
||||
for CLIENT in $CLIENTS; do
|
||||
kill -9 `cat $CLIENT.pid`
|
||||
done
|
||||
# time needed for processes/containers to shut down
|
||||
sleep 2
|
||||
|
||||
echo "cleaning up"
|
||||
ip link set $BRIDGE down
|
||||
brctl delbr $BRIDGE
|
||||
rm -f ${SERVER}*
|
||||
rm -f /tmp/${0}-client*
|
||||
# time needed for bridge clean-up
|
||||
sleep 1
|
||||
}
|
||||
|
||||
#
|
||||
# run all tests
|
||||
#
|
||||
loopbacktest
|
||||
lxcvethtest
|
||||
lxcbrtest
|
||||
chaintest 5
|
||||
chaintest 10
|
||||
clientstest 5
|
||||
clientstest 10
|
||||
clientstest 15
|
||||
|
||||
mv ${LOG} ${PWD}/${0}-${STAMP}.log
|
||||
echo "===> results in ${PWD}/${0}-${STAMP}.log"
|
572
daemon/examples/netns/ospfmanetmdrtest.py
Executable file
572
daemon/examples/netns/ospfmanetmdrtest.py
Executable file
|
@ -0,0 +1,572 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# create a random topology running OSPFv3 MDR, wait and then check
|
||||
# that all neighbor states are either full or two-way, and check the routes
|
||||
# in zebra vs those installed in the kernel.
|
||||
|
||||
import os, sys, random, time, optparse, datetime
|
||||
from string import Template
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.misc.utils import mutecall
|
||||
from core.constants import QUAGGA_STATE_DIR
|
||||
|
||||
# sanity check that zebra is installed
|
||||
try:
|
||||
mutecall(["zebra", "-u", "root", "-g", "root", "-v"])
|
||||
except OSError:
|
||||
sys.stderr.write("ERROR: running zebra failed\n")
|
||||
sys.exit(1)
|
||||
|
||||
class ManetNode(pycore.nodes.LxcNode):
|
||||
""" An Lxc namespace node configured for Quagga OSPFv3 MANET MDR
|
||||
"""
|
||||
conftemp = Template("""\
|
||||
interface eth0
|
||||
ip address $ipaddr
|
||||
ipv6 ospf6 instance-id 65
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network manet-designated-router
|
||||
ipv6 ospf6 diffhellos
|
||||
ipv6 ospf6 adjacencyconnectivity biconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
!
|
||||
router ospf6
|
||||
router-id $routerid
|
||||
interface eth0 area 0.0.0.0
|
||||
!
|
||||
ip forwarding
|
||||
""")
|
||||
|
||||
def __init__(self, core, ipaddr, routerid = None,
|
||||
objid = None, name = None, nodedir = None):
|
||||
if routerid is None:
|
||||
routerid = ipaddr.split("/")[0]
|
||||
self.ipaddr = ipaddr
|
||||
self.routerid = routerid
|
||||
pycore.nodes.LxcNode.__init__(self, core, objid, name, nodedir)
|
||||
self.privatedir(self.confdir)
|
||||
self.privatedir(QUAGGA_STATE_DIR)
|
||||
|
||||
def qconf(self):
|
||||
return self.conftemp.substitute(ipaddr = self.ipaddr,
|
||||
routerid = self.routerid)
|
||||
|
||||
def config(self):
|
||||
filename = os.path.join(self.confdir, "Quagga.conf")
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(self.qconf())
|
||||
f.close()
|
||||
pycore.nodes.LxcNode.config(self)
|
||||
|
||||
def bootscript(self):
|
||||
return """\
|
||||
#!/bin/sh -e
|
||||
|
||||
STATEDIR=%s
|
||||
|
||||
waitfile()
|
||||
{
|
||||
fname=$1
|
||||
|
||||
i=0
|
||||
until [ -e $fname ]; do
|
||||
i=$(($i + 1))
|
||||
if [ $i -eq 10 ]; then
|
||||
echo "file not found: $fname" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
}
|
||||
|
||||
mkdir -p $STATEDIR
|
||||
|
||||
zebra -d -u root -g root
|
||||
waitfile $STATEDIR/zebra.vty
|
||||
|
||||
ospf6d -d -u root -g root
|
||||
waitfile $STATEDIR/ospf6d.vty
|
||||
|
||||
vtysh -b
|
||||
""" % QUAGGA_STATE_DIR
|
||||
|
||||
class Route(object):
|
||||
""" Helper class for organzing routing table entries. """
|
||||
def __init__(self, prefix = None, gw = None, metric = None):
|
||||
try:
|
||||
self.prefix = ipaddr.IPv4Prefix(prefix)
|
||||
except Exception, e:
|
||||
raise ValueError, "Invalid prefix given to Route object: %s\n%s" % \
|
||||
(prefix, e)
|
||||
self.gw = gw
|
||||
self.metric = metric
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return self.prefix == other.prefix and self.gw == other.gw and \
|
||||
self.metric == other.metric
|
||||
except:
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
return "(%s,%s,%s)" % (self.prefix, self.gw, self.metric)
|
||||
|
||||
@staticmethod
|
||||
def key(r):
|
||||
if not r.prefix:
|
||||
return 0
|
||||
return r.prefix.prefix
|
||||
|
||||
|
||||
class ManetExperiment(object):
|
||||
""" A class for building an MDR network and checking and logging its state.
|
||||
"""
|
||||
def __init__(self, options, start):
|
||||
""" Initialize with options and start time. """
|
||||
self.session = None
|
||||
# node list
|
||||
self.nodes = []
|
||||
# WLAN network
|
||||
self.net = None
|
||||
self.verbose = options.verbose
|
||||
# dict from OptionParser
|
||||
self.options = options
|
||||
self.start = start
|
||||
self.logbegin()
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for writing output to stdout. '''
|
||||
print msg
|
||||
sys.stdout.flush()
|
||||
self.log(msg)
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for writing output to stderr. '''
|
||||
print >> sys.stderr, msg
|
||||
sys.stderr.flush()
|
||||
self.log(msg)
|
||||
|
||||
def logbegin(self):
|
||||
""" Start logging. """
|
||||
self.logfp = None
|
||||
if not self.options.logfile:
|
||||
return
|
||||
self.logfp = open(self.options.logfile, "w")
|
||||
self.log("ospfmanetmdrtest begin: %s\n" % self.start.ctime())
|
||||
|
||||
def logend(self):
|
||||
""" End logging. """
|
||||
if not self.logfp:
|
||||
return
|
||||
end = datetime.datetime.now()
|
||||
self.log("ospfmanetmdrtest end: %s (%s)\n" % \
|
||||
(end.ctime(), end - self.start))
|
||||
self.logfp.flush()
|
||||
self.logfp.close()
|
||||
self.logfp = None
|
||||
|
||||
def log(self, msg):
|
||||
""" Write to the log file, if any. """
|
||||
if not self.logfp:
|
||||
return
|
||||
print >> self.logfp, msg
|
||||
|
||||
def logdata(self, nbrs, mdrs, lsdbs, krs, zrs):
|
||||
""" Dump experiment parameters and data to the log file. """
|
||||
self.log("ospfmantetmdrtest data:")
|
||||
self.log("----- parameters -----")
|
||||
self.log("%s" % self.options)
|
||||
self.log("----- neighbors -----")
|
||||
for rtrid in sorted(nbrs.keys()):
|
||||
self.log("%s: %s" % (rtrid, nbrs[rtrid]))
|
||||
self.log("----- mdr levels -----")
|
||||
self.log(mdrs)
|
||||
self.log("----- link state databases -----")
|
||||
for rtrid in sorted(lsdbs.keys()):
|
||||
self.log("%s lsdb:" % rtrid)
|
||||
for line in lsdbs[rtrid].split("\n"):
|
||||
self.log(line)
|
||||
self.log("----- kernel routes -----")
|
||||
for rtrid in sorted(krs.keys()):
|
||||
msg = rtrid + ": "
|
||||
for rt in krs[rtrid]:
|
||||
msg += "%s" % rt
|
||||
self.log(msg)
|
||||
self.log("----- zebra routes -----")
|
||||
for rtrid in sorted(zrs.keys()):
|
||||
msg = rtrid + ": "
|
||||
for rt in zrs[rtrid]:
|
||||
msg += "%s" % rt
|
||||
self.log(msg)
|
||||
|
||||
def topology(self, numnodes, linkprob, verbose = False):
|
||||
""" Build a topology consisting of the given number of ManetNodes
|
||||
connected to a WLAN and probabilty of links and set
|
||||
the session, WLAN, and node list objects.
|
||||
"""
|
||||
# IP subnet
|
||||
prefix = ipaddr.IPv4Prefix("10.14.0.0/16")
|
||||
self.session = pycore.Session()
|
||||
# emulated network
|
||||
self.net = self.session.addobj(cls = pycore.nodes.WlanNode)
|
||||
for i in xrange(1, numnodes + 1):
|
||||
addr = "%s/%s" % (prefix.addr(i), 32)
|
||||
tmp = self.session.addobj(cls = ManetNode, ipaddr = addr, name = "n%d" % i)
|
||||
tmp.newnetif(self.net, [addr])
|
||||
self.nodes.append(tmp)
|
||||
# connect nodes with probability linkprob
|
||||
for i in xrange(numnodes):
|
||||
for j in xrange(i + 1, numnodes):
|
||||
r = random.random()
|
||||
if r < linkprob:
|
||||
if self.verbose:
|
||||
self.info("linking (%d,%d)" % (i, j))
|
||||
self.net.link(self.nodes[i].netif(0), self.nodes[j].netif(0))
|
||||
# force one link to avoid partitions (should check if this is needed)
|
||||
j = i
|
||||
while j == i:
|
||||
j = random.randint(0, numnodes - 1)
|
||||
if self.verbose:
|
||||
self.info("linking (%d,%d)" % (i, j))
|
||||
self.net.link(self.nodes[i].netif(0), self.nodes[j].netif(0))
|
||||
self.nodes[i].boot()
|
||||
# run the boot.sh script on all nodes to start Quagga
|
||||
for i in xrange(numnodes):
|
||||
self.nodes[i].cmd(["./%s" % self.nodes[i].bootsh])
|
||||
|
||||
def compareroutes(self, node, kr, zr):
|
||||
""" Compare two lists of Route objects.
|
||||
"""
|
||||
kr.sort(key=Route.key)
|
||||
zr.sort(key=Route.key)
|
||||
if kr != zr:
|
||||
self.warn("kernel and zebra routes differ")
|
||||
if self.verbose:
|
||||
msg = "kernel: "
|
||||
for r in kr:
|
||||
msg += "%s " % r
|
||||
msg += "\nzebra: "
|
||||
for r in zr:
|
||||
msg += "%s " % r
|
||||
self.warn(msg)
|
||||
else:
|
||||
self.info(" kernel and zebra routes match")
|
||||
|
||||
def comparemdrlevels(self, nbrs, mdrs):
|
||||
""" Check that all routers form a connected dominating set, i.e. all
|
||||
routers are either MDR, BMDR, or adjacent to one.
|
||||
"""
|
||||
msg = "All routers form a CDS"
|
||||
for n in self.nodes:
|
||||
if mdrs[n.routerid] != "OTHER":
|
||||
continue
|
||||
connected = False
|
||||
for nbr in nbrs[n.routerid]:
|
||||
if mdrs[nbr] == "MDR" or mdrs[nbr] == "BMDR":
|
||||
connected = True
|
||||
break
|
||||
if not connected:
|
||||
msg = "All routers do not form a CDS"
|
||||
self.warn("XXX %s: not in CDS; neighbors: %s" % \
|
||||
(n.routerid, nbrs[n.routerid]))
|
||||
if self.verbose:
|
||||
self.info(msg)
|
||||
|
||||
def comparelsdbs(self, lsdbs):
|
||||
""" Check LSDBs for consistency.
|
||||
"""
|
||||
msg = "LSDBs of all routers are consistent"
|
||||
prev = self.nodes[0]
|
||||
for n in self.nodes:
|
||||
db = lsdbs[n.routerid]
|
||||
if lsdbs[prev.routerid] != db:
|
||||
msg = "LSDBs of all routers are not consistent"
|
||||
self.warn("XXX LSDBs inconsistent for %s and %s" % \
|
||||
(n.routerid, prev.routerid))
|
||||
i = 0
|
||||
for entry in lsdbs[n.routerid].split("\n"):
|
||||
preventries = lsdbs[prev.routerid].split("\n")
|
||||
try:
|
||||
preventry = preventries[i]
|
||||
except IndexError:
|
||||
preventry = None
|
||||
if entry != preventry:
|
||||
self.warn("%s: %s" % (n.routerid, entry))
|
||||
self.warn("%s: %s" % (prev.routerid, preventry))
|
||||
i += 1
|
||||
prev = n
|
||||
if self.verbose:
|
||||
self.info(msg)
|
||||
|
||||
def checknodes(self):
|
||||
""" Check the neighbor state and routing tables of all nodes. """
|
||||
nbrs = {}
|
||||
mdrs = {}
|
||||
lsdbs = {}
|
||||
krs = {}
|
||||
zrs = {}
|
||||
v = self.verbose
|
||||
for n in self.nodes:
|
||||
self.info("checking %s" % n.name)
|
||||
nbrs[n.routerid] = Ospf6NeighState(n, verbose=v).run()
|
||||
krs[n.routerid] = KernelRoutes(n, verbose=v).run()
|
||||
zrs[n.routerid] = ZebraRoutes(n, verbose=v).run()
|
||||
self.compareroutes(n, krs[n.routerid], zrs[n.routerid])
|
||||
mdrs[n.routerid] = Ospf6MdrLevel(n, verbose=v).run()
|
||||
lsdbs[n.routerid] = Ospf6Database(n, verbose=v).run()
|
||||
self.comparemdrlevels(nbrs, mdrs)
|
||||
self.comparelsdbs(lsdbs)
|
||||
self.logdata(nbrs, mdrs, lsdbs, krs, zrs)
|
||||
|
||||
class Cmd:
|
||||
""" Helper class for running a command on a node and parsing the result. """
|
||||
args = ""
|
||||
def __init__(self, node, verbose=False):
|
||||
""" Initialize with a CoreNode (LxcNode) """
|
||||
self.id = None
|
||||
self.stdin = None
|
||||
self.out = None
|
||||
self.node = node
|
||||
self.verbose = verbose
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for writing output to stdout.'''
|
||||
print msg
|
||||
sys.stdout.flush()
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for writing output to stderr. '''
|
||||
print >> sys.stderr, "XXX %s:" % self.node.routerid, msg
|
||||
sys.stderr.flush()
|
||||
|
||||
def run(self):
|
||||
""" This is the primary method used for running this command. """
|
||||
self.open()
|
||||
r = self.parse()
|
||||
self.cleanup()
|
||||
return r
|
||||
|
||||
def open(self):
|
||||
""" Exceute call to node.popen(). """
|
||||
self.id, self.stdin, self.out, self.err = \
|
||||
self.node.popen((self.args))
|
||||
|
||||
def parse(self):
|
||||
""" This method is overloaded by child classes and should return some
|
||||
result.
|
||||
"""
|
||||
return None
|
||||
|
||||
def cleanup(self):
|
||||
""" Close the Popen channels."""
|
||||
self.stdin.close()
|
||||
self.out.close()
|
||||
self.err.close()
|
||||
tmp = self.id.wait()
|
||||
if tmp:
|
||||
self.warn("nonzero exit status:", tmp)
|
||||
|
||||
class VtyshCmd(Cmd):
|
||||
""" Runs a vtysh command. """
|
||||
def open(self):
|
||||
args = ("vtysh", "-c", self.args)
|
||||
self.id, self.stdin, self.out, self.err = self.node.popen((args))
|
||||
|
||||
class Ospf6NeighState(VtyshCmd):
|
||||
""" Check a node for OSPFv3 neighbors in the full/two-way states. """
|
||||
args = "show ipv6 ospf6 neighbor"
|
||||
|
||||
def parse(self):
|
||||
self.out.readline() # skip first line
|
||||
nbrlist = []
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
nbr = field[0]
|
||||
state = field[3].split("/")[0]
|
||||
if not state.lower() in ("full", "twoway"):
|
||||
self.warn("neighbor %s state: %s" % (nbr, state))
|
||||
nbrlist.append(nbr)
|
||||
|
||||
if len(nbrlist) == 0:
|
||||
self.warn("no neighbors")
|
||||
if self.verbose:
|
||||
self.info(" %s has %d neighbors" % (self.node.routerid, len(nbrlist)))
|
||||
return nbrlist
|
||||
|
||||
class Ospf6MdrLevel(VtyshCmd):
|
||||
""" Retrieve the OSPFv3 MDR level for a node. """
|
||||
args = "show ipv6 ospf6 mdrlevel"
|
||||
|
||||
def parse(self):
|
||||
line = self.out.readline()
|
||||
# TODO: handle multiple interfaces
|
||||
field = line.split()
|
||||
mdrlevel = field[4]
|
||||
if not mdrlevel in ("MDR", "BMDR", "OTHER"):
|
||||
self.warn("mdrlevel: %s" % mdrlevel)
|
||||
if self.verbose:
|
||||
self.info(" %s is %s" % (self.node.routerid, mdrlevel))
|
||||
return mdrlevel
|
||||
|
||||
class Ospf6Database(VtyshCmd):
|
||||
""" Retrieve the OSPFv3 LSDB summary for a node. """
|
||||
args = "show ipv6 ospf6 database"
|
||||
|
||||
def parse(self):
|
||||
db = ""
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
if len(field) < 8:
|
||||
continue
|
||||
# filter out Age and Duration columns
|
||||
filtered = field[:3] + field[4:7]
|
||||
db += " ".join(filtered) + "\n"
|
||||
return db
|
||||
|
||||
class ZebraRoutes(VtyshCmd):
|
||||
""" Return a list of Route objects for a node based on its zebra
|
||||
routing table.
|
||||
"""
|
||||
args = "show ip route"
|
||||
|
||||
def parse(self):
|
||||
for i in xrange(0,3):
|
||||
self.out.readline() # skip first three lines
|
||||
r = []
|
||||
prefix = None
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
# only use OSPFv3 selected FIB routes
|
||||
if field[0][:2] == "o>":
|
||||
prefix = field[1]
|
||||
metric = field[2].split("/")[1][:-1]
|
||||
if field[0][2:] != "*":
|
||||
continue
|
||||
if field[3] == "via":
|
||||
gw = field[4][:-1]
|
||||
else:
|
||||
gw = field[6][:-1]
|
||||
r.append(Route(prefix, gw, metric))
|
||||
prefix = None
|
||||
elif prefix and field[0] == "*":
|
||||
# already have prefix and metric from previous line
|
||||
gw = field[2][:-1]
|
||||
r.append(Route(prefix, gw, metric))
|
||||
prefix = None
|
||||
|
||||
if len(r) == 0:
|
||||
self.warn("no zebra routes")
|
||||
if self.verbose:
|
||||
self.info(" %s has %d zebra routes" % (self.node.routerid, len(r)))
|
||||
return r
|
||||
|
||||
class KernelRoutes(Cmd):
|
||||
""" Return a list of Route objects for a node based on its kernel
|
||||
routing table.
|
||||
"""
|
||||
args = ("/sbin/ip", "route", "show")
|
||||
|
||||
def parse(self):
|
||||
r = []
|
||||
prefix = None
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
if field[0] == "nexthop":
|
||||
if not prefix:
|
||||
# this saves only the first nexthop entry if multiple exist
|
||||
continue
|
||||
else:
|
||||
prefix = field[0]
|
||||
metric = field[-1]
|
||||
tmp = prefix.split("/")
|
||||
if len(tmp) < 2:
|
||||
prefix += "/32"
|
||||
if field[1] == "proto":
|
||||
# nexthop entry is on the next line
|
||||
continue
|
||||
gw = field[2] # nexthop IP or interface
|
||||
r.append(Route(prefix, gw, metric))
|
||||
prefix = None
|
||||
|
||||
if len(r) == 0:
|
||||
self.warn("no kernel routes")
|
||||
if self.verbose:
|
||||
self.info(" %s has %d kernel routes" % (self.node.routerid, len(r)))
|
||||
return r
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 10, linkprob = 0.35, delay = 20, seed = None)
|
||||
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
parser.add_option("-p", "--linkprob", dest = "linkprob", type = float,
|
||||
help = "link probabilty")
|
||||
parser.add_option("-d", "--delay", dest = "delay", type = float,
|
||||
help = "wait time before checking")
|
||||
parser.add_option("-s", "--seed", dest = "seed", type = int,
|
||||
help = "specify integer to use for random seed")
|
||||
parser.add_option("-v", "--verbose", dest = "verbose",
|
||||
action = "store_true", help = "be more verbose")
|
||||
parser.add_option("-l", "--logfile", dest = "logfile", type = str,
|
||||
help = "log detailed output to the specified file")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % options.numnodes)
|
||||
if options.linkprob <= 0.0 or options.linkprob > 1.0:
|
||||
usage("invalid linkprob: %s" % options.linkprob)
|
||||
if options.delay < 0.0:
|
||||
usage("invalid delay: %s" % options.delay)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
if options.seed:
|
||||
random.seed(options.seed)
|
||||
|
||||
me = ManetExperiment(options = options, start=datetime.datetime.now())
|
||||
me.info("creating topology: numnodes = %s; linkprob = %s" % \
|
||||
(options.numnodes, options.linkprob))
|
||||
me.topology(options.numnodes, options.linkprob)
|
||||
|
||||
me.info("waiting %s sec" % options.delay)
|
||||
time.sleep(options.delay)
|
||||
me.info("checking neighbor state and routes")
|
||||
me.checknodes()
|
||||
me.info("done")
|
||||
me.info("elapsed time: %s" % (datetime.datetime.now() - me.start))
|
||||
me.logend()
|
||||
|
||||
return me
|
||||
|
||||
if __name__ == "__main__":
|
||||
me = main()
|
78
daemon/examples/netns/switch.py
Executable file
78
daemon/examples/netns/switch.py
Executable file
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# connect n nodes to a virtual switch/hub
|
||||
|
||||
import sys, datetime, optparse
|
||||
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.constants import *
|
||||
|
||||
# node list (count from 1)
|
||||
n = [None]
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.addsession(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 5)
|
||||
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 1:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
# IP subnet
|
||||
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
|
||||
session = pycore.Session(persistent=True)
|
||||
add_to_server(session)
|
||||
# emulated Ethernet switch
|
||||
switch = session.addobj(cls = pycore.nodes.SwitchNode, name = "switch")
|
||||
switch.setposition(x=80,y=50)
|
||||
print "creating %d nodes with addresses from %s" % \
|
||||
(options.numnodes, prefix)
|
||||
for i in xrange(1, options.numnodes + 1):
|
||||
tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i,
|
||||
objid=i)
|
||||
tmp.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
tmp.setposition(x=150*i,y=150)
|
||||
n.append(tmp)
|
||||
|
||||
# start a shell on node 1
|
||||
n[1].term("bash")
|
||||
|
||||
print "elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
||||
|
97
daemon/examples/netns/switchtest.py
Executable file
97
daemon/examples/netns/switchtest.py
Executable file
|
@ -0,0 +1,97 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# run iperf to measure the effective throughput between two nodes when
|
||||
# n nodes are connected to a virtual hub/switch; run test for testsec
|
||||
# and repeat for minnodes <= n <= maxnodes with a step size of
|
||||
# nodestep
|
||||
|
||||
import optparse, sys, os, datetime
|
||||
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.misc.utils import mutecall
|
||||
|
||||
try:
|
||||
mutecall(["iperf", "-v"])
|
||||
except OSError:
|
||||
sys.stderr.write("ERROR: running iperf failed\n")
|
||||
sys.exit(1)
|
||||
|
||||
def test(numnodes, testsec):
|
||||
# node list
|
||||
n = []
|
||||
# IP subnet
|
||||
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
|
||||
session = pycore.Session()
|
||||
# emulated network
|
||||
net = session.addobj(cls = pycore.nodes.SwitchNode)
|
||||
for i in xrange(1, numnodes + 1):
|
||||
tmp = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i)
|
||||
tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
n.append(tmp)
|
||||
n[0].cmd(["iperf", "-s", "-D"])
|
||||
n[-1].icmd(["iperf", "-t", str(int(testsec)), "-c", str(prefix.addr(1))])
|
||||
n[0].cmd(["killall", "-9", "iperf"])
|
||||
session.shutdown()
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
|
||||
parser.set_defaults(minnodes = 2)
|
||||
parser.add_option("-m", "--minnodes", dest = "minnodes", type = int,
|
||||
help = "min number of nodes to test; default = %s" %
|
||||
parser.defaults["minnodes"])
|
||||
|
||||
parser.set_defaults(maxnodes = 2)
|
||||
parser.add_option("-n", "--maxnodes", dest = "maxnodes", type = int,
|
||||
help = "max number of nodes to test; default = %s" %
|
||||
parser.defaults["maxnodes"])
|
||||
|
||||
parser.set_defaults(testsec = 10)
|
||||
parser.add_option("-t", "--testsec", dest = "testsec", type = int,
|
||||
help = "test time in seconds; default = %s" %
|
||||
parser.defaults["testsec"])
|
||||
|
||||
parser.set_defaults(nodestep = 1)
|
||||
parser.add_option("-s", "--nodestep", dest = "nodestep", type = int,
|
||||
help = "number of nodes step size; default = %s" %
|
||||
parser.defaults["nodestep"])
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.minnodes < 2:
|
||||
usage("invalid min number of nodes: %s" % options.minnodes)
|
||||
if options.maxnodes < options.minnodes:
|
||||
usage("invalid max number of nodes: %s" % options.maxnodes)
|
||||
if options.testsec < 1:
|
||||
usage("invalid test time: %s" % options.testsec)
|
||||
if options.nodestep < 1:
|
||||
usage("invalid node step: %s" % options.nodestep)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
for i in xrange(options.minnodes, options.maxnodes + 1, options.nodestep):
|
||||
print >> sys.stderr, "%s node test:" % i
|
||||
test(i, options.testsec)
|
||||
print >> sys.stderr, ""
|
||||
|
||||
print >> sys.stderr, \
|
||||
"elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
33
daemon/examples/netns/twonodes.sh
Executable file
33
daemon/examples/netns/twonodes.sh
Executable file
|
@ -0,0 +1,33 @@
|
|||
#!/bin/sh
|
||||
# Below is a transcript of creating two emulated nodes and connecting them
|
||||
# together with a wired link. You can run the core-cleanup script to clean
|
||||
# up after this script.
|
||||
|
||||
# create node 1 namespace container
|
||||
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 1
|
||||
ip link add name n1.0.1 type veth peer name n1.0
|
||||
ip link set n1.0 netns `cat /tmp/n1.pid`
|
||||
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0
|
||||
vcmd -c /tmp/n1.ctl -- ifconfig eth0 10.0.0.1/24
|
||||
|
||||
# create node 2 namespace container
|
||||
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
|
||||
# create a virtual Ethernet (veth) pair, installing one end into node 2
|
||||
ip link add name n2.0.1 type veth peer name n2.0
|
||||
ip link set n2.0 netns `cat /tmp/n2.pid`
|
||||
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0
|
||||
vcmd -c /tmp/n2.ctl -- ifconfig eth0 10.0.0.2/24
|
||||
|
||||
# bridge together nodes 1 and 2 using the other end of each veth pair
|
||||
brctl addbr b.1.1
|
||||
brctl setfd b.1.1 0
|
||||
brctl addif b.1.1 n1.0.1
|
||||
brctl addif b.1.1 n2.0.1
|
||||
ip link set n1.0.1 up
|
||||
ip link set n2.0.1 up
|
||||
ip link set b.1.1 up
|
||||
|
||||
# display connectivity and ping from node 1 to node 2
|
||||
brctl show
|
||||
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
|
772
daemon/examples/netns/wlanemanetests.py
Executable file
772
daemon/examples/netns/wlanemanetests.py
Executable file
|
@ -0,0 +1,772 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
wlanemanetests.py - This script tests the performance of the WLAN device in
|
||||
CORE by measuring various metrics:
|
||||
- delay experienced when pinging end-to-end
|
||||
- maximum TCP throughput achieved using iperf end-to-end
|
||||
- the CPU used and loss experienced when running an MGEN flow of UDP traffic
|
||||
|
||||
All MANET nodes are arranged in a row, so that any given node can only
|
||||
communicate with the node to its right or to its left. Performance is measured
|
||||
using traffic that travels across each hop in the network. Static /32 routing
|
||||
is used instead of any dynamic routing protocol.
|
||||
|
||||
Various underlying network types are tested:
|
||||
- bridged (the CORE default, uses ebtables)
|
||||
- bridged with netem (add link effects to the bridge using tc queues)
|
||||
- EMANE bypass - the bypass model just forwards traffic
|
||||
- EMANE RF-PIPE - the bandwidth (bitrate) is set very high / no restrictions
|
||||
- EMANE RF-PIPE - bandwidth is set similar to netem case
|
||||
- EMANE RF-PIPE - default connectivity is off and pathloss events are
|
||||
generated to connect the nodes in a line
|
||||
|
||||
Results are printed/logged in CSV format.
|
||||
|
||||
'''
|
||||
|
||||
import os, sys, time, optparse, datetime, math
|
||||
from string import Template
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.misc.utils import mutecall
|
||||
from core.constants import QUAGGA_STATE_DIR
|
||||
from core.emane.bypass import EmaneBypassModel
|
||||
from core.emane.rfpipe import EmaneRfPipeModel
|
||||
import emaneeventservice
|
||||
import emaneeventpathloss
|
||||
|
||||
|
||||
# move these to core.misc.utils
|
||||
def readstat():
|
||||
f = open("/proc/stat", "r")
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
return lines
|
||||
|
||||
def numcpus():
|
||||
lines = readstat()
|
||||
n = 0
|
||||
for l in lines[1:]:
|
||||
if l[:3] != "cpu":
|
||||
break
|
||||
n += 1
|
||||
return n
|
||||
|
||||
def getcputimes(line):
|
||||
# return (user, nice, sys, idle) from a /proc/stat cpu line
|
||||
# assume columns are:
|
||||
# cpu# user nice sys idle iowait irq softirq steal guest (man 5 proc)
|
||||
items = line.split()
|
||||
(user, nice, sys, idle) = map(lambda(x): int(x), items[1:5])
|
||||
return [user, nice, sys, idle]
|
||||
|
||||
def calculatecpu(timesa, timesb):
|
||||
for i in range(len(timesa)):
|
||||
timesb[i] -= timesa[i]
|
||||
total = sum(timesb)
|
||||
if total == 0:
|
||||
return 0.0
|
||||
else:
|
||||
# subtract % time spent in idle time
|
||||
return 100 - ((100.0 * timesb[-1]) / total)
|
||||
|
||||
# end move these to core.misc.utils
|
||||
|
||||
class Cmd(object):
|
||||
''' Helper class for running a command on a node and parsing the result. '''
|
||||
args = ""
|
||||
def __init__(self, node, verbose=False):
|
||||
''' Initialize with a CoreNode (LxcNode) '''
|
||||
self.id = None
|
||||
self.stdin = None
|
||||
self.out = None
|
||||
self.node = node
|
||||
self.verbose = verbose
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for writing output to stdout.'''
|
||||
print msg
|
||||
sys.stdout.flush()
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for writing output to stderr. '''
|
||||
print >> sys.stderr, "XXX %s:" % self.node.name, msg
|
||||
sys.stderr.flush()
|
||||
|
||||
def run(self):
|
||||
''' This is the primary method used for running this command. '''
|
||||
self.open()
|
||||
status = self.id.wait()
|
||||
r = self.parse()
|
||||
self.cleanup()
|
||||
return r
|
||||
|
||||
def open(self):
|
||||
''' Exceute call to node.popen(). '''
|
||||
self.id, self.stdin, self.out, self.err = \
|
||||
self.node.popen((self.args))
|
||||
|
||||
def parse(self):
|
||||
''' This method is overloaded by child classes and should return some
|
||||
result.
|
||||
'''
|
||||
return None
|
||||
|
||||
def cleanup(self):
|
||||
''' Close the Popen channels.'''
|
||||
self.stdin.close()
|
||||
self.out.close()
|
||||
self.err.close()
|
||||
tmp = self.id.wait()
|
||||
if tmp:
|
||||
self.warn("nonzero exit status:", tmp)
|
||||
|
||||
|
||||
class ClientServerCmd(Cmd):
|
||||
''' Helper class for running a command on a node and parsing the result. '''
|
||||
args = ""
|
||||
client_args = ""
|
||||
def __init__(self, node, client_node, verbose=False):
|
||||
''' Initialize with two CoreNodes, node is the server '''
|
||||
Cmd.__init__(self, node, verbose)
|
||||
self.client_node = client_node
|
||||
|
||||
def run(self):
|
||||
''' Run the server command, then the client command, then
|
||||
kill the server '''
|
||||
self.open() # server
|
||||
self.client_open() # client
|
||||
status = self.client_id.wait()
|
||||
self.node.cmdresult(['killall', self.args[0]]) # stop the server
|
||||
r = self.parse()
|
||||
self.cleanup()
|
||||
return r
|
||||
|
||||
def client_open(self):
|
||||
''' Exceute call to client_node.popen(). '''
|
||||
self.client_id, self.client_stdin, self.client_out, self.client_err = \
|
||||
self.client_node.popen((self.client_args))
|
||||
|
||||
def parse(self):
|
||||
''' This method is overloaded by child classes and should return some
|
||||
result.
|
||||
'''
|
||||
return None
|
||||
|
||||
def cleanup(self):
|
||||
''' Close the Popen channels.'''
|
||||
self.stdin.close()
|
||||
self.out.close()
|
||||
self.err.close()
|
||||
tmp = self.id.wait()
|
||||
if tmp:
|
||||
self.warn("nonzero exit status: %s" % tmp)
|
||||
self.warn("command was: %s" % ((self.args, )))
|
||||
|
||||
|
||||
class PingCmd(Cmd):
|
||||
''' Test latency using ping.
|
||||
'''
|
||||
def __init__(self, node, verbose=False, addr=None, count=50, interval=0.1, ):
|
||||
Cmd.__init__(self, node, verbose)
|
||||
self.addr = addr
|
||||
self.count = count
|
||||
self.interval = interval
|
||||
self.args = ['ping', '-q', '-c', '%s' % count, '-i', '%s' % interval,
|
||||
addr]
|
||||
|
||||
def run(self):
|
||||
if self.verbose:
|
||||
self.info("%s initial test ping (max 1 second)..." % self.node.name)
|
||||
(status, result) = self.node.cmdresult(["ping", "-q", "-c", "1", "-w",
|
||||
"1", self.addr])
|
||||
if status != 0:
|
||||
self.warn("initial ping from %s to %s failed! result:\n%s" % \
|
||||
(self.node.name, self.addr, result))
|
||||
return (0.0, 0.0)
|
||||
if self.verbose:
|
||||
self.info("%s pinging %s (%d seconds)..." % \
|
||||
(self.node.name, self.addr, self.count * self.interval))
|
||||
return Cmd.run(self)
|
||||
|
||||
def parse(self):
|
||||
lines = self.out.readlines()
|
||||
avg_latency = 0
|
||||
mdev = 0
|
||||
try:
|
||||
stats_str = lines[-1].split('=')[1]
|
||||
stats = stats_str.split('/')
|
||||
avg_latency = float(stats[1])
|
||||
mdev = float(stats[3].split(' ')[0])
|
||||
except Exception, e:
|
||||
self.warn("ping parsing exception: %s" % e)
|
||||
return (avg_latency, mdev)
|
||||
|
||||
class IperfCmd(ClientServerCmd):
|
||||
''' Test throughput using iperf.
|
||||
'''
|
||||
def __init__(self, node, client_node, verbose=False, addr=None, time=10):
|
||||
# node is the server
|
||||
ClientServerCmd.__init__(self, node, client_node, verbose)
|
||||
self.addr = addr
|
||||
self.time = time
|
||||
# -s server, -y c CSV report output
|
||||
self.args = ["iperf", "-s", "-y", "c"]
|
||||
self.client_args = ["iperf", "-c", self.addr, "-t", "%s" % self.time]
|
||||
|
||||
def run(self):
|
||||
if self.verbose:
|
||||
self.info("Launching the iperf server on %s..." % self.node.name)
|
||||
self.info("Running the iperf client on %s (%s seconds)..." % \
|
||||
(self.client_node.name, self.time))
|
||||
return ClientServerCmd.run(self)
|
||||
|
||||
def parse(self):
|
||||
lines = self.out.readlines()
|
||||
try:
|
||||
bps = int(lines[-1].split(',')[-1].strip('\n'))
|
||||
except Exception, e:
|
||||
self.warn("iperf parsing exception: %s" % e)
|
||||
bps = 0
|
||||
return bps
|
||||
|
||||
class MgenCmd(ClientServerCmd):
|
||||
''' Run a test traffic flow using an MGEN sender and receiver.
|
||||
'''
|
||||
def __init__(self, node, client_node, verbose=False, addr=None, time=10,
|
||||
rate=512):
|
||||
ClientServerCmd.__init__(self, node, client_node, verbose)
|
||||
self.addr = addr
|
||||
self.time = time
|
||||
self.args = ['mgen', 'event', 'listen udp 5000', 'output',
|
||||
'/var/log/mgen.log']
|
||||
self.rate = rate
|
||||
sendevent = "ON 1 UDP DST %s/5000 PERIODIC [%s]" % \
|
||||
(addr, self.mgenrate(self.rate))
|
||||
stopevent = "%s OFF 1" % time
|
||||
self.client_args = ['mgen', 'event', sendevent, 'event', stopevent,
|
||||
'output', '/var/log/mgen.log']
|
||||
|
||||
@staticmethod
|
||||
def mgenrate(kbps):
|
||||
''' Return a MGEN periodic rate string for the given kilobits-per-sec.
|
||||
Assume 1500 byte MTU, 20-byte IP + 8-byte UDP headers, leaving
|
||||
1472 bytes for data.
|
||||
'''
|
||||
bps = (kbps / 8) * 1000.0
|
||||
maxdata = 1472
|
||||
pps = math.ceil(bps / maxdata)
|
||||
return "%s %s" % (pps, maxdata)
|
||||
|
||||
def run(self):
|
||||
if self.verbose:
|
||||
self.info("Launching the MGEN receiver on %s..." % self.node.name)
|
||||
self.info("Running the MGEN sender on %s (%s seconds)..." % \
|
||||
(self.client_node.name, self.time))
|
||||
return ClientServerCmd.run(self)
|
||||
|
||||
def cleanup(self):
|
||||
''' Close the Popen channels.'''
|
||||
self.stdin.close()
|
||||
self.out.close()
|
||||
self.err.close()
|
||||
tmp = self.id.wait() # non-zero mgen exit status OK
|
||||
|
||||
def parse(self):
|
||||
''' Check MGEN receiver's log file for packet sequence numbers, and
|
||||
return the percentage of lost packets.
|
||||
'''
|
||||
logfile = os.path.join(self.node.nodedir, 'var.log/mgen.log')
|
||||
f = open(logfile, 'r')
|
||||
numlost = 0
|
||||
lastseq = 0
|
||||
for line in f.readlines():
|
||||
fields = line.split()
|
||||
if fields[1] != 'RECV':
|
||||
continue
|
||||
try:
|
||||
seq = int(fields[4].split('>')[1])
|
||||
except:
|
||||
self.info("Unexpected MGEN line:\n%s" % fields)
|
||||
if seq > (lastseq + 1):
|
||||
numlost += seq - (lastseq + 1)
|
||||
lastseq = seq
|
||||
f.close()
|
||||
if lastseq > 0:
|
||||
loss = 100.0 * numlost / lastseq
|
||||
else:
|
||||
loss = 0
|
||||
if self.verbose:
|
||||
self.info("Receiver log shows %d of %d packets lost" % \
|
||||
(numlost, lastseq))
|
||||
return loss
|
||||
|
||||
|
||||
class Experiment(object):
|
||||
''' Experiment object to organize tests.
|
||||
'''
|
||||
def __init__(self, opt, start):
|
||||
''' Initialize with opt and start time. '''
|
||||
self.session = None
|
||||
# node list
|
||||
self.nodes = []
|
||||
# WLAN network
|
||||
self.net = None
|
||||
self.verbose = opt.verbose
|
||||
# dict from OptionParser
|
||||
self.opt = opt
|
||||
self.start = start
|
||||
self.numping = opt.numping
|
||||
self.numiperf = opt.numiperf
|
||||
self.nummgen = opt.nummgen
|
||||
self.logbegin()
|
||||
|
||||
def info(self, msg):
|
||||
''' Utility method for writing output to stdout. '''
|
||||
print msg
|
||||
sys.stdout.flush()
|
||||
self.log(msg)
|
||||
|
||||
def warn(self, msg):
|
||||
''' Utility method for writing output to stderr. '''
|
||||
print >> sys.stderr, msg
|
||||
sys.stderr.flush()
|
||||
self.log(msg)
|
||||
|
||||
def logbegin(self):
|
||||
''' Start logging. '''
|
||||
self.logfp = None
|
||||
if not self.opt.logfile:
|
||||
return
|
||||
self.logfp = open(self.opt.logfile, "w")
|
||||
self.log("%s begin: %s\n" % (sys.argv[0], self.start.ctime()))
|
||||
self.log("%s args: %s\n" % (sys.argv[0], sys.argv[1:]))
|
||||
(sysname, rel, ver, machine, nodename) = os.uname()
|
||||
self.log("%s %s %s %s on %s" % (sysname, rel, ver, machine, nodename))
|
||||
|
||||
def logend(self):
|
||||
''' End logging. '''
|
||||
if not self.logfp:
|
||||
return
|
||||
end = datetime.datetime.now()
|
||||
self.log("%s end: %s (%s)\n" % \
|
||||
(sys.argv[0], end.ctime(), end - self.start))
|
||||
self.logfp.flush()
|
||||
self.logfp.close()
|
||||
self.logfp = None
|
||||
|
||||
def log(self, msg):
|
||||
''' Write to the log file, if any. '''
|
||||
if not self.logfp:
|
||||
return
|
||||
print >> self.logfp, msg
|
||||
|
||||
def reset(self):
|
||||
''' Prepare for another experiment run.
|
||||
'''
|
||||
if self.session:
|
||||
self.session.shutdown()
|
||||
del self.session
|
||||
self.session = None
|
||||
self.nodes = []
|
||||
self.net = None
|
||||
|
||||
def createbridgedsession(self, numnodes, verbose = False):
|
||||
''' Build a topology consisting of the given number of LxcNodes
|
||||
connected to a WLAN.
|
||||
'''
|
||||
# IP subnet
|
||||
prefix = ipaddr.IPv4Prefix("10.0.0.0/16")
|
||||
self.session = pycore.Session()
|
||||
# emulated network
|
||||
self.net = self.session.addobj(cls = pycore.nodes.WlanNode,
|
||||
name = "wlan1")
|
||||
prev = None
|
||||
for i in xrange(1, numnodes + 1):
|
||||
addr = "%s/%s" % (prefix.addr(i), 32)
|
||||
tmp = self.session.addobj(cls = pycore.nodes.CoreNode, objid = i,
|
||||
name = "n%d" % i)
|
||||
tmp.newnetif(self.net, [addr])
|
||||
self.nodes.append(tmp)
|
||||
self.session.services.addservicestonode(tmp, "router",
|
||||
"IPForward", self.verbose)
|
||||
self.session.services.bootnodeservices(tmp)
|
||||
self.staticroutes(i, prefix, numnodes)
|
||||
|
||||
# link each node in a chain, with the previous node
|
||||
if prev:
|
||||
self.net.link(prev.netif(0), tmp.netif(0))
|
||||
prev = tmp
|
||||
|
||||
def createemanesession(self, numnodes, verbose = False, cls = None,
|
||||
values = None):
|
||||
''' Build a topology consisting of the given number of LxcNodes
|
||||
connected to an EMANE WLAN.
|
||||
'''
|
||||
prefix = ipaddr.IPv4Prefix("10.0.0.0/16")
|
||||
self.session = pycore.Session()
|
||||
self.session.master = True
|
||||
self.session.location.setrefgeo(47.57917,-122.13232,2.00000)
|
||||
self.session.location.refscale = 150.0
|
||||
self.session.cfg['emane_models'] = "RfPipe, Ieee80211abg, Bypass"
|
||||
self.session.emane.loadmodels()
|
||||
self.net = self.session.addobj(cls = pycore.nodes.EmaneNode,
|
||||
objid = numnodes + 1, name = "wlan1")
|
||||
self.net.verbose = verbose
|
||||
#self.session.emane.addobj(self.net)
|
||||
for i in xrange(1, numnodes + 1):
|
||||
addr = "%s/%s" % (prefix.addr(i), 32)
|
||||
tmp = self.session.addobj(cls = pycore.nodes.CoreNode, objid = i,
|
||||
name = "n%d" % i)
|
||||
#tmp.setposition(i * 20, 50, None)
|
||||
tmp.setposition(50, 50, None)
|
||||
tmp.newnetif(self.net, [addr])
|
||||
self.nodes.append(tmp)
|
||||
self.session.services.addservicestonode(tmp, "router",
|
||||
"IPForward", self.verbose)
|
||||
|
||||
if values is None:
|
||||
values = cls.getdefaultvalues()
|
||||
self.session.emane.setconfig(self.net.objid, cls._name, values)
|
||||
self.session.emane.startup()
|
||||
|
||||
self.info("waiting %s sec (TAP bring-up)" % 2)
|
||||
time.sleep(2)
|
||||
|
||||
for i in xrange(1, numnodes + 1):
|
||||
tmp = self.nodes[i-1]
|
||||
self.session.services.bootnodeservices(tmp)
|
||||
self.staticroutes(i, prefix, numnodes)
|
||||
|
||||
|
||||
def setnodes(self):
|
||||
''' Set the sender and receiver nodes for use in this experiment,
|
||||
along with the address of the receiver to be used.
|
||||
'''
|
||||
self.firstnode = self.nodes[0]
|
||||
self.lastnode = self.nodes[-1]
|
||||
self.lastaddr = self.lastnode.netif(0).addrlist[0].split('/')[0]
|
||||
|
||||
|
||||
def staticroutes(self, i, prefix, numnodes):
|
||||
''' Add static routes on node number i to the other nodes in the chain.
|
||||
'''
|
||||
routecmd = ["/sbin/ip", "route", "add"]
|
||||
node = self.nodes[i-1]
|
||||
neigh_left = ""
|
||||
neigh_right = ""
|
||||
# add direct interface routes first
|
||||
if i > 1:
|
||||
neigh_left = "%s" % prefix.addr(i - 1)
|
||||
cmd = routecmd + [neigh_left, "dev", node.netif(0).name]
|
||||
(status, result) = node.cmdresult(cmd)
|
||||
if status != 0:
|
||||
self.warn("failed to add interface route: %s" % cmd)
|
||||
if i < numnodes:
|
||||
neigh_right = "%s" % prefix.addr(i + 1)
|
||||
cmd = routecmd + [neigh_right, "dev", node.netif(0).name]
|
||||
(status, result) = node.cmdresult(cmd)
|
||||
if status != 0:
|
||||
self.warn("failed to add interface route: %s" % cmd)
|
||||
|
||||
# add static routes to all other nodes via left/right neighbors
|
||||
for j in xrange(1, numnodes + 1):
|
||||
if abs(j - i) < 2:
|
||||
continue
|
||||
addr = "%s" % prefix.addr(j)
|
||||
if j < i:
|
||||
gw = neigh_left
|
||||
else:
|
||||
gw = neigh_right
|
||||
cmd = routecmd + [addr, "via", gw]
|
||||
(status, result) = node.cmdresult(cmd)
|
||||
if status != 0:
|
||||
self.warn("failed to add route: %s" % cmd)
|
||||
|
||||
def setpathloss(self, numnodes):
|
||||
''' Send EMANE pathloss events to connect all NEMs in a chain.
|
||||
'''
|
||||
service = emaneeventservice.EventService()
|
||||
e = emaneeventpathloss.EventPathloss(1)
|
||||
for i in xrange(1, numnodes + 1):
|
||||
rxnem = i
|
||||
# inform rxnem that it can hear node to the left with 10dB noise
|
||||
txnem = rxnem - 1
|
||||
e.set(0, txnem, 10.0, 10.0)
|
||||
if txnem > 0:
|
||||
service.publish(emaneeventpathloss.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY, rxnem,
|
||||
emaneeventservice.COMPONENTID_ANY, e.export())
|
||||
# inform rxnem that it can hear node to the right with 10dB noise
|
||||
txnem = rxnem + 1
|
||||
e.set(0, txnem, 10.0, 10.0)
|
||||
if txnem <= numnodes:
|
||||
service.publish(emaneeventpathloss.EVENT_ID,
|
||||
emaneeventservice.PLATFORMID_ANY, rxnem,
|
||||
emaneeventservice.COMPONENTID_ANY, e.export())
|
||||
|
||||
def setneteffects(self, bw = None, delay = None):
|
||||
''' Set link effects for all interfaces attached to the network node.
|
||||
'''
|
||||
if not self.net:
|
||||
self.warn("failed to set effects: no network node")
|
||||
return
|
||||
for netif in self.net.netifs():
|
||||
self.net.linkconfig(netif, bw = bw, delay = delay)
|
||||
|
||||
def runalltests(self, title=""):
|
||||
''' Convenience helper to run all defined experiment tests.
|
||||
If tests are run multiple times, this returns the average of
|
||||
those runs.
|
||||
'''
|
||||
duration = self.opt.duration
|
||||
rate = self.opt.rate
|
||||
if len(title) > 0:
|
||||
self.info("----- running %s tests (duration=%s, rate=%s) -----" % \
|
||||
(title, duration, rate))
|
||||
(latency, mdev, throughput, cpu, loss) = (0,0,0,0,0)
|
||||
|
||||
self.info("number of runs: ping=%d, iperf=%d, mgen=%d" % \
|
||||
(self.numping, self.numiperf, self.nummgen))
|
||||
|
||||
if self.numping > 0:
|
||||
(latency, mdev) = self.pingtest(count=self.numping)
|
||||
|
||||
if self.numiperf > 0:
|
||||
throughputs = []
|
||||
for i in range(1, self.numiperf + 1):
|
||||
throughput = self.iperftest(time=duration)
|
||||
if self.numiperf > 1:
|
||||
throughputs += throughput
|
||||
time.sleep(1) # iperf is very CPU intensive
|
||||
if self.numiperf > 1:
|
||||
throughput = sum(throughputs) / len(throughputs)
|
||||
self.info("throughputs=%s" % ["%.2f" % v for v in throughputs])
|
||||
|
||||
if self.nummgen > 0:
|
||||
cpus = []
|
||||
losses = []
|
||||
for i in range(1, self.nummgen + 1):
|
||||
(cpu, loss) = self.cputest(time=duration, rate=rate)
|
||||
if self.nummgen > 1:
|
||||
cpus += cpu,
|
||||
losses += loss,
|
||||
if self.nummgen > 1:
|
||||
cpu = sum(cpus) / len(cpus)
|
||||
loss = sum(losses) / len(losses)
|
||||
self.info("cpus=%s" % ["%.2f" % v for v in cpus])
|
||||
self.info("losses=%s" % ["%.2f" % v for v in losses])
|
||||
|
||||
return (latency, mdev, throughput, cpu, loss)
|
||||
|
||||
def pingtest(self, count=50):
|
||||
''' Ping through a chain of nodes and report the average latency.
|
||||
'''
|
||||
p = PingCmd(node=self.firstnode, verbose=self.verbose,
|
||||
addr = self.lastaddr, count=count, interval=0.1).run()
|
||||
(latency, mdev) = p
|
||||
self.info("latency (ms): %.03f, %.03f" % (latency, mdev))
|
||||
return p
|
||||
|
||||
def iperftest(self, time=10):
|
||||
''' Run iperf through a chain of nodes and report the maximum
|
||||
throughput.
|
||||
'''
|
||||
bps = IperfCmd(node=self.lastnode, client_node=self.firstnode,
|
||||
verbose=False, addr=self.lastaddr, time=time).run()
|
||||
self.info("throughput (bps): %s" % bps)
|
||||
return bps
|
||||
|
||||
def cputest(self, time=10, rate=512):
|
||||
''' Run MGEN through a chain of nodes and report the CPU usage and
|
||||
percent of lost packets. Rate is in kbps.
|
||||
'''
|
||||
if self.verbose:
|
||||
self.info("%s initial test ping (max 1 second)..." % \
|
||||
self.firstnode.name)
|
||||
(status, result) = self.firstnode.cmdresult(["ping", "-q", "-c", "1",
|
||||
"-w", "1", self.lastaddr])
|
||||
if status != 0:
|
||||
self.warn("initial ping from %s to %s failed! result:\n%s" % \
|
||||
(self.firstnode.name, self.lastaddr, result))
|
||||
return (0.0, 0.0)
|
||||
lines = readstat()
|
||||
cpustart = getcputimes(lines[0])
|
||||
loss = MgenCmd(node=self.lastnode, client_node=self.firstnode,
|
||||
verbose=False, addr=self.lastaddr,
|
||||
time=time, rate=rate).run()
|
||||
lines = readstat()
|
||||
cpuend = getcputimes(lines[0])
|
||||
percent = calculatecpu(cpustart, cpuend)
|
||||
self.info("CPU usage (%%): %.02f, %.02f loss" % (percent, loss))
|
||||
return percent, loss
|
||||
|
||||
def main():
|
||||
''' Main routine when running from command-line.
|
||||
'''
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 10, delay = 3, duration = 10, rate = 512,
|
||||
verbose = False,
|
||||
numping = 50, numiperf = 1, nummgen = 1)
|
||||
|
||||
parser.add_option("-d", "--delay", dest = "delay", type = float,
|
||||
help = "wait time before testing")
|
||||
parser.add_option("-l", "--logfile", dest = "logfile", type = str,
|
||||
help = "log detailed output to the specified file")
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
parser.add_option("-r", "--rate", dest = "rate", type = float,
|
||||
help = "kbps rate to use for MGEN CPU tests")
|
||||
parser.add_option("--numping", dest = "numping", type = int,
|
||||
help = "number of ping latency test runs")
|
||||
parser.add_option("--numiperf", dest = "numiperf", type = int,
|
||||
help = "number of iperf throughput test runs")
|
||||
parser.add_option("--nummgen", dest = "nummgen", type = int,
|
||||
help = "number of MGEN CPU tests runs")
|
||||
parser.add_option("-t", "--time", dest = "duration", type = int,
|
||||
help = "duration in seconds of throughput and CPU tests")
|
||||
parser.add_option("-v", "--verbose", dest = "verbose",
|
||||
action = "store_true", help = "be more verbose")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line opt
|
||||
(opt, args) = parser.parse_args()
|
||||
|
||||
if opt.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % opt.numnodes)
|
||||
if opt.delay < 0.0:
|
||||
usage("invalid delay: %s" % opt.delay)
|
||||
if opt.rate < 0.0:
|
||||
usage("invalid rate: %s" % opt.rate)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
results = {}
|
||||
exp = Experiment(opt = opt, start=datetime.datetime.now())
|
||||
|
||||
# bridged
|
||||
exp.info("setting up bridged tests 1/2 no link effects")
|
||||
exp.info("creating topology: numnodes = %s" % \
|
||||
(opt.numnodes, ))
|
||||
exp.createbridgedsession(numnodes=opt.numnodes, verbose=opt.verbose)
|
||||
exp.setnodes()
|
||||
exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
|
||||
time.sleep(opt.delay)
|
||||
results['0 bridged'] = exp.runalltests("bridged")
|
||||
exp.info("done; elapsed time: %s" % (datetime.datetime.now() - exp.start))
|
||||
|
||||
# bridged with netem
|
||||
exp.info("setting up bridged tests 2/2 with netem")
|
||||
exp.setneteffects(bw=54000000, delay=0)
|
||||
exp.info("waiting %s sec (queue bring-up)" % opt.delay)
|
||||
results['1 netem'] = exp.runalltests("netem")
|
||||
exp.info("shutting down bridged session")
|
||||
exp.reset()
|
||||
|
||||
# EMANE bypass model
|
||||
exp.info("setting up EMANE tests 1/2 with bypass model")
|
||||
exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
|
||||
cls=EmaneBypassModel, values=None)
|
||||
exp.setnodes()
|
||||
exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
|
||||
time.sleep(opt.delay)
|
||||
results['2 bypass'] = exp.runalltests("bypass")
|
||||
exp.info("shutting down bypass session")
|
||||
exp.reset()
|
||||
|
||||
exp.info("waiting %s sec (between EMANE tests)" % opt.delay)
|
||||
time.sleep(opt.delay)
|
||||
|
||||
# EMANE RF-PIPE model: no restrictions (max datarate)
|
||||
exp.info("setting up EMANE tests 2/4 with RF-PIPE model")
|
||||
rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
|
||||
rfpnames = EmaneRfPipeModel.getnames()
|
||||
rfpipevals[ rfpnames.index('datarate') ] = '4294967295' # max value
|
||||
rfpipevals[ rfpnames.index('pathlossmode') ] = '2ray'
|
||||
rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '1'
|
||||
exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
|
||||
cls=EmaneRfPipeModel, values=rfpipevals)
|
||||
exp.setnodes()
|
||||
exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
|
||||
time.sleep(opt.delay)
|
||||
results['3 rfpipe'] = exp.runalltests("rfpipe")
|
||||
exp.info("shutting down RF-PIPE session")
|
||||
exp.reset()
|
||||
|
||||
# EMANE RF-PIPE model: 54M datarate
|
||||
exp.info("setting up EMANE tests 3/4 with RF-PIPE model 54M")
|
||||
rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
|
||||
rfpnames = EmaneRfPipeModel.getnames()
|
||||
rfpipevals[ rfpnames.index('datarate') ] = '54000'
|
||||
# TX delay != propagation delay
|
||||
#rfpipevals[ rfpnames.index('delay') ] = '5000'
|
||||
rfpipevals[ rfpnames.index('pathlossmode') ] = '2ray'
|
||||
rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '1'
|
||||
exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
|
||||
cls=EmaneRfPipeModel, values=rfpipevals)
|
||||
exp.setnodes()
|
||||
exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
|
||||
time.sleep(opt.delay)
|
||||
results['4 rfpipe54m'] = exp.runalltests("rfpipe54m")
|
||||
exp.info("shutting down RF-PIPE session")
|
||||
exp.reset()
|
||||
|
||||
# EMANE RF-PIPE model
|
||||
exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss")
|
||||
rfpipevals = list(EmaneRfPipeModel.getdefaultvalues())
|
||||
rfpnames = EmaneRfPipeModel.getnames()
|
||||
rfpipevals[ rfpnames.index('datarate') ] = '54000'
|
||||
rfpipevals[ rfpnames.index('pathlossmode') ] = 'pathloss'
|
||||
rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '0'
|
||||
exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose,
|
||||
cls=EmaneRfPipeModel, values=rfpipevals)
|
||||
exp.setnodes()
|
||||
exp.info("waiting %s sec (node/route bring-up)" % opt.delay)
|
||||
time.sleep(opt.delay)
|
||||
exp.info("sending pathloss events to govern connectivity")
|
||||
exp.setpathloss(opt.numnodes)
|
||||
results['5 pathloss'] = exp.runalltests("pathloss")
|
||||
exp.info("shutting down RF-PIPE session")
|
||||
exp.reset()
|
||||
|
||||
# summary of results in CSV format
|
||||
exp.info("----- summary of results (%s nodes, rate=%s, duration=%s) -----" \
|
||||
% (opt.numnodes, opt.rate, opt.duration))
|
||||
exp.info("netname:latency,mdev,throughput,cpu,loss")
|
||||
|
||||
for test in sorted(results.keys()):
|
||||
(latency, mdev, throughput, cpu, loss) = results[test]
|
||||
exp.info("%s:%.03f,%.03f,%d,%.02f,%.02f" % \
|
||||
(test, latency, mdev, throughput, cpu,loss))
|
||||
|
||||
exp.logend()
|
||||
return exp
|
||||
|
||||
if __name__ == "__main__":
|
||||
exp = main()
|
98
daemon/examples/netns/wlantest.py
Executable file
98
daemon/examples/netns/wlantest.py
Executable file
|
@ -0,0 +1,98 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# run iperf to measure the effective throughput between two nodes when
|
||||
# n nodes are connected to a virtual wlan; run test for testsec
|
||||
# and repeat for minnodes <= n <= maxnodes with a step size of
|
||||
# nodestep
|
||||
|
||||
import optparse, sys, os, datetime
|
||||
|
||||
from core import pycore
|
||||
from core.misc import ipaddr
|
||||
from core.misc.utils import mutecall
|
||||
|
||||
try:
|
||||
mutecall(["iperf", "-v"])
|
||||
except OSError:
|
||||
sys.stderr.write("ERROR: running iperf failed\n")
|
||||
sys.exit(1)
|
||||
|
||||
def test(numnodes, testsec):
|
||||
# node list
|
||||
n = []
|
||||
# IP subnet
|
||||
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
|
||||
session = pycore.Session()
|
||||
# emulated network
|
||||
net = session.addobj(cls = pycore.nodes.WlanNode)
|
||||
for i in xrange(1, numnodes + 1):
|
||||
tmp = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i)
|
||||
tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
n.append(tmp)
|
||||
net.link(n[0].netif(0), n[-1].netif(0))
|
||||
n[0].cmd(["iperf", "-s", "-D"])
|
||||
n[-1].icmd(["iperf", "-t", str(int(testsec)), "-c", str(prefix.addr(1))])
|
||||
n[0].cmd(["killall", "-9", "iperf"])
|
||||
session.shutdown()
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
|
||||
parser.set_defaults(minnodes = 2)
|
||||
parser.add_option("-m", "--minnodes", dest = "minnodes", type = int,
|
||||
help = "min number of nodes to test; default = %s" %
|
||||
parser.defaults["minnodes"])
|
||||
|
||||
parser.set_defaults(maxnodes = 2)
|
||||
parser.add_option("-n", "--maxnodes", dest = "maxnodes", type = int,
|
||||
help = "max number of nodes to test; default = %s" %
|
||||
parser.defaults["maxnodes"])
|
||||
|
||||
parser.set_defaults(testsec = 10)
|
||||
parser.add_option("-t", "--testsec", dest = "testsec", type = int,
|
||||
help = "test time in seconds; default = %s" %
|
||||
parser.defaults["testsec"])
|
||||
|
||||
parser.set_defaults(nodestep = 1)
|
||||
parser.add_option("-s", "--nodestep", dest = "nodestep", type = int,
|
||||
help = "number of nodes step size; default = %s" %
|
||||
parser.defaults["nodestep"])
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.minnodes < 2:
|
||||
usage("invalid min number of nodes: %s" % options.minnodes)
|
||||
if options.maxnodes < options.minnodes:
|
||||
usage("invalid max number of nodes: %s" % options.maxnodes)
|
||||
if options.testsec < 1:
|
||||
usage("invalid test time: %s" % options.testsec)
|
||||
if options.nodestep < 1:
|
||||
usage("invalid node step: %s" % options.nodestep)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
for i in xrange(options.minnodes, options.maxnodes + 1, options.nodestep):
|
||||
print >> sys.stderr, "%s node test:" % i
|
||||
test(i, options.testsec)
|
||||
print >> sys.stderr, ""
|
||||
|
||||
print >> sys.stderr, \
|
||||
"elapsed time: %s" % (datetime.datetime.now() - start)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
30
daemon/examples/services/sampleFirewall
Normal file
30
daemon/examples/services/sampleFirewall
Normal file
|
@ -0,0 +1,30 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# Below are sample iptables firewall rules that you can uncomment and edit.
|
||||
# You can also use ip6tables rules for IPv6.
|
||||
#
|
||||
|
||||
# start by flushing all firewall rules (so this script may be re-run)
|
||||
#iptables -F
|
||||
|
||||
# allow traffic related to established connections
|
||||
#iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
|
||||
# allow TCP packets from any source destined for 192.168.1.1
|
||||
#iptables -A INPUT -s 0/0 -i eth0 -d 192.168.1.1 -p TCP -j ACCEPT
|
||||
|
||||
# allow OpenVPN server traffic from eth0
|
||||
#iptables -A INPUT -p udp --dport 1194 -j ACCEPT
|
||||
#iptables -A INPUT -i eth0 -j DROP
|
||||
#iptables -A OUTPUT -p udp --sport 1194 -j ACCEPT
|
||||
#iptables -A OUTPUT -o eth0 -j DROP
|
||||
|
||||
# allow ICMP ping traffic
|
||||
#iptables -A OUTPUT -p icmp --icmp-type echo-request -j ACCEPT
|
||||
#iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
|
||||
# allow SSH traffic
|
||||
#iptables -A -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
|
||||
|
||||
# drop all other traffic coming in eth0
|
||||
#iptables -A INPUT -i eth0 -j DROP
|
119
daemon/examples/services/sampleIPsec
Normal file
119
daemon/examples/services/sampleIPsec
Normal file
|
@ -0,0 +1,119 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The IPsec service builds ESP tunnels between the specified peers using the
|
||||
# racoon IKEv2 keying daemon. You need to provide keys and the addresses of
|
||||
# peers, along with subnets to tunnel.
|
||||
|
||||
# directory containing the certificate and key described below
|
||||
keydir=/etc/core/keys
|
||||
|
||||
# the name used for the "$certname.pem" x509 certificate and
|
||||
# "$certname.key" RSA private key, which can be generated using openssl
|
||||
certname=ipsec1
|
||||
|
||||
# list the public-facing IP addresses, starting with the localhost and followed
|
||||
# by each tunnel peer, separated with a single space
|
||||
tunnelhosts="172.16.0.1AND172.16.0.2 172.16.0.1AND172.16.2.1"
|
||||
|
||||
# Define T<i> where i is the index for each tunnel peer host from
|
||||
# the tunnel_hosts list above (0 is localhost).
|
||||
# T<i> is a list of IPsec tunnels with peer i, with a local subnet address
|
||||
# followed by the remote subnet address:
|
||||
# T<i>="<local>AND<remote> <local>AND<remote>"
|
||||
# For example, 172.16.0.0/24 is a local network (behind this node) to be
|
||||
# tunneled and 172.16.2.0/24 is a remote network (behind peer 1)
|
||||
T1="172.16.3.0/24AND172.16.5.0/24"
|
||||
T2="172.16.4.0/24AND172.16.5.0/24 172.16.4.0/24AND172.16.6.0/24"
|
||||
|
||||
# -------- END CUSTOMIZATION --------
|
||||
|
||||
echo "building config $PWD/ipsec.conf..."
|
||||
echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log
|
||||
|
||||
checkip=0
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed
|
||||
" >> $PWD/ipsec.log
|
||||
checkip=1
|
||||
fi
|
||||
|
||||
echo "#!/usr/sbin/setkey -f
|
||||
# Flush the SAD and SPD
|
||||
flush;
|
||||
spdflush;
|
||||
|
||||
# Security policies \
|
||||
" > $PWD/ipsec.conf
|
||||
i=0
|
||||
for hostpair in $tunnelhosts; do
|
||||
i=`expr $i + 1`
|
||||
# parse tunnel host IP
|
||||
thishost=${hostpair%%AND*}
|
||||
peerhost=${hostpair##*AND}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid host address $thishost or $peerhost \
|
||||
" >> $PWD/ipsec.log
|
||||
fi
|
||||
# parse each tunnel addresses
|
||||
tunnel_list_var_name=T$i
|
||||
eval tunnels="$"$tunnel_list_var_name""
|
||||
for ttunnel in $tunnels; do
|
||||
lclnet=${ttunnel%%AND*}
|
||||
rmtnet=${ttunnel##*AND}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid tunnel address $lclnet and $rmtnet \
|
||||
" >> $PWD/ipsec.log
|
||||
fi
|
||||
# add tunnel policies
|
||||
echo "
|
||||
spdadd $lclnet $rmtnet any -P out ipsec
|
||||
esp/tunnel/$thishost-$peerhost/require;
|
||||
spdadd $rmtnet $lclnet any -P in ipsec
|
||||
esp/tunnel/$peerhost-$thishost/require; \
|
||||
" >> $PWD/ipsec.conf
|
||||
done
|
||||
done
|
||||
|
||||
echo "building config $PWD/racoon.conf..."
|
||||
if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then
|
||||
echo "ERROR: missing certification files under $keydir \
|
||||
$certname.key or $certname.pem " >> $PWD/ipsec.log
|
||||
fi
|
||||
echo "
|
||||
path certificate \"$keydir\";
|
||||
listen {
|
||||
adminsock disabled;
|
||||
}
|
||||
remote anonymous
|
||||
{
|
||||
exchange_mode main;
|
||||
certificate_type x509 \"$certname.pem\" \"$certname.key\";
|
||||
ca_type x509 \"ca-cert.pem\";
|
||||
my_identifier asn1dn;
|
||||
peers_identifier asn1dn;
|
||||
|
||||
proposal {
|
||||
encryption_algorithm 3des ;
|
||||
hash_algorithm sha1;
|
||||
authentication_method rsasig ;
|
||||
dh_group modp768;
|
||||
}
|
||||
}
|
||||
sainfo anonymous
|
||||
{
|
||||
pfs_group modp768;
|
||||
lifetime time 1 hour ;
|
||||
encryption_algorithm 3des, blowfish 448, rijndael ;
|
||||
authentication_algorithm hmac_sha1, hmac_md5 ;
|
||||
compression_algorithm deflate ;
|
||||
}
|
||||
" > $PWD/racoon.conf
|
||||
|
||||
# the setkey program is required from the ipsec-tools package
|
||||
echo "running setkey -f $PWD/ipsec.conf..."
|
||||
setkey -f $PWD/ipsec.conf
|
||||
|
||||
echo "running racoon -d -f $PWD/racoon.conf..."
|
||||
racoon -d -f $PWD/racoon.conf -l racoon.log
|
63
daemon/examples/services/sampleVPNClient
Normal file
63
daemon/examples/services/sampleVPNClient
Normal file
|
@ -0,0 +1,63 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The VPNClient service builds a VPN tunnel to the specified VPN server using
|
||||
# OpenVPN software and a virtual TUN/TAP device.
|
||||
|
||||
# directory containing the certificate and key described below
|
||||
keydir=/etc/core/keys
|
||||
|
||||
# the name used for a "$keyname.crt" certificate and "$keyname.key" private key.
|
||||
keyname=client1
|
||||
|
||||
# the public IP address of the VPN server this client should connect with
|
||||
vpnserver="10.0.2.10"
|
||||
|
||||
# optional next hop for adding a static route to reach the VPN server
|
||||
nexthop="10.0.1.1"
|
||||
|
||||
# --------- END CUSTOMIZATION --------
|
||||
|
||||
# validate addresses
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed
|
||||
" > $PWD/vpnclient.log
|
||||
else
|
||||
if [ "$(sipcalc "$vpnserver" "$nexthop" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalide address $vpnserver or $nexthop \
|
||||
" > $PWD/vpnclient.log
|
||||
fi
|
||||
fi
|
||||
|
||||
# validate key and certification files
|
||||
if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \
|
||||
|| [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then
|
||||
echo "ERROR: missing certification or key files under $keydir \
|
||||
$keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnclient.log
|
||||
fi
|
||||
|
||||
# if necessary, add a static route for reaching the VPN server IP via the IF
|
||||
vpnservernet=${vpnserver%.*}.0/24
|
||||
if [ "$nexthop" != "" ]; then
|
||||
/sbin/ip route add $vpnservernet via $nexthop
|
||||
fi
|
||||
|
||||
# create openvpn client.conf
|
||||
(
|
||||
cat << EOF
|
||||
client
|
||||
dev tun
|
||||
proto udp
|
||||
remote $vpnserver 1194
|
||||
nobind
|
||||
ca $keydir/ca.crt
|
||||
cert $keydir/$keyname.crt
|
||||
key $keydir/$keyname.key
|
||||
dh $keydir/dh1024.pem
|
||||
cipher AES-256-CBC
|
||||
log $PWD/openvpn-client.log
|
||||
verb 4
|
||||
daemon
|
||||
EOF
|
||||
) > client.conf
|
||||
|
||||
openvpn --config client.conf
|
147
daemon/examples/services/sampleVPNServer
Normal file
147
daemon/examples/services/sampleVPNServer
Normal file
|
@ -0,0 +1,147 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The VPNServer service sets up the OpenVPN server for building VPN tunnels
|
||||
# that allow access via TUN/TAP device to private networks.
|
||||
#
|
||||
# note that the IPForward and DefaultRoute services should be enabled
|
||||
|
||||
# directory containing the certificate and key described below, in addition to
|
||||
# a CA certificate and DH key
|
||||
keydir=/etc/core/keys
|
||||
|
||||
# the name used for a "$keyname.crt" certificate and "$keyname.key" private key.
|
||||
keyname=server2
|
||||
|
||||
# the VPN subnet address from which the client VPN IP (for the TUN/TAP)
|
||||
# will be allocated
|
||||
vpnsubnet=10.0.200.0
|
||||
|
||||
# public IP address of this vpn server (same as VPNClient vpnserver= setting)
|
||||
vpnserver=10.0.2.10
|
||||
|
||||
# optional list of private subnets reachable behind this VPN server
|
||||
# each subnet and next hop is separated by a space
|
||||
# "<subnet1>,<nexthop1> <subnet2>,<nexthop2> ..."
|
||||
privatenets="10.0.11.0,10.0.10.1 10.0.12.0,10.0.10.1"
|
||||
|
||||
# optional list of VPN clients, for statically assigning IP addresses to
|
||||
# clients; also, an optional client subnet can be specified for adding static
|
||||
# routes via the client
|
||||
# Note: VPN addresses x.x.x.0-3 are reserved
|
||||
# "<keyname>,<vpnIP>,<subnetIP> <keyname>,<vpnIP>,<subnetIP> ..."
|
||||
vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,,"
|
||||
|
||||
# NOTE: you may need to enable the StaticRoutes service on nodes within the
|
||||
# private subnet, in order to have routes back to the client.
|
||||
# /sbin/ip ro add <vpnsubnet>/24 via <vpnServerRemoteInterface>
|
||||
# /sbin/ip ro add <vpnClientSubnet>/24 via <vpnServerRemoteInterface>
|
||||
|
||||
# -------- END CUSTOMIZATION --------
|
||||
|
||||
echo > $PWD/vpnserver.log
|
||||
rm -f -r $PWD/ccd
|
||||
|
||||
# validate key and certification files
|
||||
if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \
|
||||
|| [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then
|
||||
echo "ERROR: missing certification or key files under $keydir \
|
||||
$keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnserver.log
|
||||
fi
|
||||
|
||||
# validate configuration IP addresses
|
||||
checkip=0
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed\
|
||||
" >> $PWD/vpnserver.log
|
||||
checkip=1
|
||||
else
|
||||
if [ "$(sipcalc "$vpnsubnet" "$vpnserver" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn subnet or server address \
|
||||
$vpnsubnet or $vpnserver " >> $PWD/vpnserver.log
|
||||
fi
|
||||
fi
|
||||
|
||||
# create client vpn ip pool file
|
||||
(
|
||||
cat << EOF
|
||||
EOF
|
||||
)> $PWD/ippool.txt
|
||||
|
||||
# create server.conf file
|
||||
(
|
||||
cat << EOF
|
||||
# openvpn server config
|
||||
local $vpnserver
|
||||
server $vpnsubnet 255.255.255.0
|
||||
push redirect-gateway def1
|
||||
EOF
|
||||
)> $PWD/server.conf
|
||||
|
||||
# add routes to VPN server private subnets, and push these routes to clients
|
||||
for privatenet in $privatenets; do
|
||||
if [ $privatenet != "" ]; then
|
||||
net=${privatenet%%,*}
|
||||
nexthop=${privatenet##*,}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$net" "$nexthop" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn server private net address \
|
||||
$net or $nexthop " >> $PWD/vpnserver.log
|
||||
fi
|
||||
echo push route $net 255.255.255.0 >> $PWD/server.conf
|
||||
/sbin/ip ro add $net/24 via $nexthop
|
||||
/sbin/ip ro add $vpnsubnet/24 via $nexthop
|
||||
fi
|
||||
done
|
||||
|
||||
# allow subnet through this VPN, one route for each client subnet
|
||||
for client in $vpnclients; do
|
||||
if [ $client != "" ]; then
|
||||
cSubnetIP=${client##*,}
|
||||
cVpnIP=${client#*,}
|
||||
cVpnIP=${cVpnIP%%,*}
|
||||
cKeyFilename=${client%%,*}
|
||||
if [ "$cSubnetIP" != "" ]; then
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$cSubnetIP" "$cVpnIP" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn client and subnet address \
|
||||
$cSubnetIP or $cVpnIP " >> $PWD/vpnserver.log
|
||||
fi
|
||||
echo route $cSubnetIP 255.255.255.0 >> $PWD/server.conf
|
||||
if ! test -d $PWD/ccd; then
|
||||
mkdir -p $PWD/ccd
|
||||
echo client-config-dir $PWD/ccd >> $PWD/server.conf
|
||||
fi
|
||||
if test -e $PWD/ccd/$cKeyFilename; then
|
||||
echo iroute $cSubnetIP 255.255.255.0 >> $PWD/ccd/$cKeyFilename
|
||||
else
|
||||
echo iroute $cSubnetIP 255.255.255.0 > $PWD/ccd/$cKeyFilename
|
||||
fi
|
||||
fi
|
||||
if [ "$cVpnIP" != "" ]; then
|
||||
echo $cKeyFilename,$cVpnIP >> $PWD/ippool.txt
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
(
|
||||
cat << EOF
|
||||
keepalive 10 120
|
||||
ca $keydir/ca.crt
|
||||
cert $keydir/$keyname.crt
|
||||
key $keydir/$keyname.key
|
||||
dh $keydir/dh1024.pem
|
||||
cipher AES-256-CBC
|
||||
status /var/log/openvpn-status.log
|
||||
log /var/log/openvpn-server.log
|
||||
ifconfig-pool-linear
|
||||
ifconfig-pool-persist $PWD/ippool.txt
|
||||
port 1194
|
||||
proto udp
|
||||
dev tun
|
||||
verb 4
|
||||
daemon
|
||||
EOF
|
||||
)>> $PWD/server.conf
|
||||
|
||||
# start vpn server
|
||||
openvpn --config server.conf
|
45
daemon/examples/stopsession.py
Executable file
45
daemon/examples/stopsession.py
Executable file
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env python
|
||||
# (c)2010-2012 the Boeing Company
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# List and stop CORE sessions from the command line.
|
||||
#
|
||||
|
||||
import socket, optparse
|
||||
from core.constants import *
|
||||
from core.api import coreapi
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage = "usage: %prog [-l] <sessionid>")
|
||||
parser.add_option("-l", "--list", dest = "list", action = "store_true",
|
||||
help = "list running sessions")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.list is True:
|
||||
num = '0'
|
||||
flags = coreapi.CORE_API_STR_FLAG
|
||||
else:
|
||||
num = args[0]
|
||||
flags = coreapi.CORE_API_DEL_FLAG
|
||||
tlvdata = coreapi.CoreSessionTlv.pack(coreapi.CORE_TLV_SESS_NUMBER, num)
|
||||
msg = coreapi.CoreSessionMessage.pack(flags, tlvdata)
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.connect(('localhost', coreapi.CORE_API_PORT))
|
||||
sock.send(msg)
|
||||
|
||||
# receive and print a session list
|
||||
if options.list is True:
|
||||
hdr = sock.recv(coreapi.CoreMessage.hdrsiz)
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(hdr)
|
||||
data = ""
|
||||
if msglen:
|
||||
data = sock.recv(msglen)
|
||||
msg = coreapi.CoreMessage(msgflags, hdr, data)
|
||||
sessions = msg.gettlv(coreapi.CORE_TLV_SESS_NUMBER)
|
||||
print "sessions:", sessions
|
||||
|
||||
sock.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
339
daemon/ns3/LICENSE
Normal file
339
daemon/ns3/LICENSE
Normal file
|
@ -0,0 +1,339 @@
|
|||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
40
daemon/ns3/Makefile.am
Executable file
40
daemon/ns3/Makefile.am
Executable file
|
@ -0,0 +1,40 @@
|
|||
# CORE
|
||||
# (c)2012 the Boeing Company.
|
||||
# See the LICENSE file included in this directory.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Makefile for building corens3 components.
|
||||
#
|
||||
|
||||
# Python package build
|
||||
noinst_SCRIPTS = build
|
||||
build:
|
||||
$(PYTHON) setup.py build
|
||||
|
||||
# Python package install
|
||||
install-exec-hook:
|
||||
CORE_CONF_DIR=${DESTDIR}/${CORE_CONF_DIR} $(PYTHON) setup.py install --prefix=${DESTDIR}/${prefix} --install-purelib=${DESTDIR}/${pythondir} --install-platlib=${DESTDIR}/${pyexecdir} --no-compile
|
||||
|
||||
# Python package uninstall
|
||||
uninstall-hook:
|
||||
rm -f ${pythondir}/corens3_python-${COREDPY_VERSION}-py${PYTHON_VERSION}.egg-info
|
||||
rm -rf ${pythondir}/corens3
|
||||
|
||||
# Python package cleanup
|
||||
clean-local:
|
||||
-rm -rf build
|
||||
|
||||
# Python RPM package
|
||||
rpm:
|
||||
$(PYTHON) setup.py bdist_rpm
|
||||
|
||||
# because we include entire directories with EXTRA_DIST, we need to clean up
|
||||
# the source control files
|
||||
dist-hook:
|
||||
rm -rf `find $(distdir)/ -name .svn` `find $(distdir)/ -name '*.pyc'`
|
||||
|
||||
DISTCLEANFILES = Makefile.in *.pyc corens3/*.pyc MANIFEST
|
||||
|
||||
# files to include with distribution tarball
|
||||
EXTRA_DIST = LICENSE setup.py corens3 examples
|
22
daemon/ns3/corens3/__init__.py
Normal file
22
daemon/ns3/corens3/__init__.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this directory.
|
||||
|
||||
"""corens3
|
||||
|
||||
Python package containing CORE components for use
|
||||
with the ns-3 simulator.
|
||||
|
||||
See http://cs.itd.nrl.navy.mil/work/core/ and
|
||||
http://code.google.com/p/coreemu/ for more information on CORE.
|
||||
|
||||
Pieces can be imported individually, for example
|
||||
|
||||
import corens3
|
||||
|
||||
or everything listed in __all__ can be imported using
|
||||
|
||||
from corens3 import *
|
||||
"""
|
||||
|
||||
__all__ = []
|
||||
|
18
daemon/ns3/corens3/constants.py.in
Normal file
18
daemon/ns3/corens3/constants.py.in
Normal file
|
@ -0,0 +1,18 @@
|
|||
# Constants created by autoconf ./configure script
|
||||
COREDPY_VERSION = "@COREDPY_VERSION@"
|
||||
CORE_STATE_DIR = "@CORE_STATE_DIR@"
|
||||
CORE_CONF_DIR = "@CORE_CONF_DIR@"
|
||||
CORE_DATA_DIR = "@CORE_DATA_DIR@"
|
||||
CORE_LIB_DIR = "@CORE_LIB_DIR@"
|
||||
CORE_SBIN_DIR = "@SBINDIR@"
|
||||
|
||||
BRCTL_BIN = "@brctl_path@/brctl"
|
||||
IP_BIN = "@ip_path@/ip"
|
||||
TC_BIN = "@tc_path@/tc"
|
||||
EBTABLES_BIN = "@ebtables_path@/ebtables"
|
||||
IFCONFIG_BIN = "@ifconfig_path@/ifconfig"
|
||||
NGCTL_BIN = "@ngctl_path@/ngctl"
|
||||
VIMAGE_BIN = "@vimage_path@/vimage"
|
||||
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
|
||||
MOUNT_BIN = "@mount_path@/mount"
|
||||
UMOUNT_BIN = "@umount_path@/umount"
|
503
daemon/ns3/corens3/obj.py
Normal file
503
daemon/ns3/corens3/obj.py
Normal file
|
@ -0,0 +1,503 @@
|
|||
#
|
||||
# CORE
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this directory.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ns3.py: defines classes for running emulations with ns-3 simulated networks.
|
||||
'''
|
||||
|
||||
import sys, os, threading, time
|
||||
|
||||
from core.netns.nodes import CoreNode
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.session import Session
|
||||
from core.misc import ipaddr
|
||||
from core.constants import *
|
||||
from core.misc.utils import maketuple, check_call
|
||||
from core.api import coreapi
|
||||
from core.mobility import WayPointMobility
|
||||
|
||||
try:
|
||||
import ns.core
|
||||
except Exception, e:
|
||||
print "Could not locate the ns-3 Python bindings!"
|
||||
print "Try running again from within the ns-3 './waf shell'\n"
|
||||
raise Exception, e
|
||||
import ns.lte
|
||||
import ns.mobility
|
||||
import ns.network
|
||||
import ns.internet
|
||||
import ns.tap_bridge
|
||||
import ns.wifi
|
||||
import ns.wimax
|
||||
|
||||
|
||||
ns.core.GlobalValue.Bind("SimulatorImplementationType",
|
||||
ns.core.StringValue("ns3::RealtimeSimulatorImpl"))
|
||||
ns.core.GlobalValue.Bind("ChecksumEnabled", ns.core.BooleanValue("true"))
|
||||
|
||||
class CoreNs3Node(CoreNode, ns.network.Node):
|
||||
''' The CoreNs3Node is both a CoreNode backed by a network namespace and
|
||||
an ns-3 Node simulator object. When linked to simulated networks, the TunTap
|
||||
device will be used.
|
||||
'''
|
||||
def __init__(self, *args, **kwds):
|
||||
ns.network.Node.__init__(self)
|
||||
objid = self.GetId() + 1 # ns-3 ID starts at 0, CORE uses 1
|
||||
if 'objid' not in kwds:
|
||||
kwds['objid'] = objid
|
||||
CoreNode.__init__(self, *args, **kwds)
|
||||
|
||||
def newnetif(self, net = None, addrlist = [], hwaddr = None,
|
||||
ifindex = None, ifname = None):
|
||||
''' Add a network interface. If we are attaching to a CoreNs3Net, this
|
||||
will be a TunTap. Otherwise dispatch to CoreNode.newnetif().
|
||||
'''
|
||||
if not isinstance(net, CoreNs3Net):
|
||||
return CoreNode.newnetif(self, net, addrlist, hwaddr, ifindex,
|
||||
ifname)
|
||||
ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname, net = net)
|
||||
self.attachnet(ifindex, net)
|
||||
netif = self.netif(ifindex)
|
||||
netif.sethwaddr(hwaddr)
|
||||
for addr in maketuple(addrlist):
|
||||
netif.addaddr(addr)
|
||||
|
||||
addrstr = netif.addrlist[0]
|
||||
(addr, mask) = addrstr.split('/')
|
||||
tap = net._tapdevs[netif]
|
||||
tap.SetAttribute("IpAddress",
|
||||
ns.network.Ipv4AddressValue(ns.network.Ipv4Address(addr)))
|
||||
tap.SetAttribute("Netmask",
|
||||
ns.network.Ipv4MaskValue(ns.network.Ipv4Mask("/" + mask)))
|
||||
ns.core.Simulator.Schedule(ns.core.Time('0'), netif.install)
|
||||
return ifindex
|
||||
|
||||
def getns3position(self):
|
||||
''' Return the ns-3 (x, y, z) position of a node.
|
||||
'''
|
||||
try:
|
||||
mm = self.GetObject(ns.mobility.MobilityModel.GetTypeId())
|
||||
pos = mm.GetPosition()
|
||||
return (pos.x, pos.y, pos.z)
|
||||
except AttributeError:
|
||||
self.warn("ns-3 mobility model not found")
|
||||
return (0,0,0)
|
||||
|
||||
def setns3position(self, x, y, z):
|
||||
''' Set the ns-3 (x, y, z) position of a node.
|
||||
'''
|
||||
try:
|
||||
mm = self.GetObject(ns.mobility.MobilityModel.GetTypeId())
|
||||
if z is None:
|
||||
z = 0.0
|
||||
pos = mm.SetPosition(ns.core.Vector(x, y, z))
|
||||
except AttributeError:
|
||||
self.warn("ns-3 mobility model not found, not setting position")
|
||||
|
||||
class CoreNs3Net(PyCoreNet):
|
||||
''' The CoreNs3Net is a helper PyCoreNet object. Networks are represented
|
||||
entirely in simulation with the TunTap device bridging the emulated and
|
||||
simulated worlds.
|
||||
'''
|
||||
apitype = coreapi.CORE_NODE_WLAN
|
||||
linktype = coreapi.CORE_LINK_WIRELESS
|
||||
type = "wlan" # icon used
|
||||
|
||||
def __init__(self, session, objid = None, name = None, verbose = False,
|
||||
start = True, policy = None):
|
||||
PyCoreNet.__init__(self, session, objid, name)
|
||||
self.tapbridge = ns.tap_bridge.TapBridgeHelper()
|
||||
self._ns3devs = {}
|
||||
self._tapdevs = {}
|
||||
|
||||
def attach(self, netif):
|
||||
''' Invoked from netif.attach(). Create a TAP device using the TapBridge
|
||||
object. Call getns3dev() to get model-specific device.
|
||||
'''
|
||||
self._netif[netif] = netif
|
||||
self._linked[netif] = {}
|
||||
ns3dev = self.getns3dev(netif.node)
|
||||
tap = self.tapbridge.Install(netif.node, ns3dev)
|
||||
tap.SetMode(ns.tap_bridge.TapBridge.CONFIGURE_LOCAL)
|
||||
tap.SetAttribute("DeviceName", ns.core.StringValue(netif.localname))
|
||||
self._ns3devs[netif] = ns3dev
|
||||
self._tapdevs[netif] = tap
|
||||
|
||||
def getns3dev(self, node):
|
||||
''' Implement depending on network helper. Install this network onto
|
||||
the given node and return the device. Register the ns3 device into
|
||||
self._ns3devs
|
||||
'''
|
||||
raise NotImplementedError
|
||||
|
||||
def findns3dev(self, node):
|
||||
''' Given a node, return the interface and ns3 device associated with
|
||||
this network.
|
||||
'''
|
||||
for netif in node.netifs():
|
||||
if netif in self._ns3devs:
|
||||
return netif, self._ns3devs[netif]
|
||||
return None, None
|
||||
|
||||
def shutdown(self):
|
||||
''' Session.shutdown() will invoke this.
|
||||
'''
|
||||
pass
|
||||
|
||||
def usecorepositions(self):
|
||||
''' Set position callbacks for interfaces on this net so the CORE GUI
|
||||
can update the ns-3 node position when moved with the mouse.
|
||||
'''
|
||||
for netif in self.netifs():
|
||||
netif.poshook = self.setns3position
|
||||
|
||||
def setns3position(self, netif, x, y, z):
|
||||
#print "setns3position: %s (%s, %s, %s)" % (netif.node.name, x, y, z)
|
||||
netif.node.setns3position(x, y, z)
|
||||
|
||||
|
||||
class Ns3LteNet(CoreNs3Net):
|
||||
def __init__(self, *args, **kwds):
|
||||
''' Uses a LteHelper to create an ns-3 based LTE network.
|
||||
'''
|
||||
CoreNs3Net.__init__(self, *args, **kwds)
|
||||
self.lte = ns.lte.LteHelper()
|
||||
# enhanced NodeB node list
|
||||
self.enbnodes = []
|
||||
self.dlsubchannels = None
|
||||
self.ulsubchannels = None
|
||||
|
||||
def setsubchannels(self, downlink, uplink):
|
||||
''' Set the downlink/uplink subchannels, which are a list of ints.
|
||||
These should be set prior to using CoreNs3Node.newnetif().
|
||||
'''
|
||||
self.dlsubchannels = downlink
|
||||
self.ulsubchannels = uplink
|
||||
|
||||
def setnodeb(self, node):
|
||||
''' Mark the given node as a nodeb (base transceiver station)
|
||||
'''
|
||||
self.enbnodes.append(node)
|
||||
|
||||
def linknodeb(self, node, nodeb, mob, mobb):
|
||||
''' Register user equipment with a nodeb.
|
||||
Optionally install mobility model while we have the ns-3 devs handy.
|
||||
'''
|
||||
(tmp, nodebdev) = self.findns3dev(nodeb)
|
||||
(tmp, dev) = self.findns3dev(node)
|
||||
if nodebdev is None or dev is None:
|
||||
raise KeyError, "ns-3 device for node not found"
|
||||
self.lte.RegisterUeToTheEnb(dev, nodebdev)
|
||||
if mob:
|
||||
self.lte.AddMobility(dev.GetPhy(), mob)
|
||||
if mobb:
|
||||
self.lte.AddDownlinkChannelRealization(mobb, mob, dev.GetPhy())
|
||||
|
||||
def getns3dev(self, node):
|
||||
''' Get the ns3 NetDevice using the LteHelper.
|
||||
'''
|
||||
if node in self.enbnodes:
|
||||
devtype = ns.lte.LteHelper.DEVICE_TYPE_ENODEB
|
||||
else:
|
||||
devtype = ns.lte.LteHelper.DEVICE_TYPE_USER_EQUIPMENT
|
||||
nodes = ns.network.NodeContainer(node)
|
||||
devs = self.lte.Install(nodes, devtype)
|
||||
devs.Get(0).GetPhy().SetDownlinkSubChannels(self.dlsubchannels)
|
||||
devs.Get(0).GetPhy().SetUplinkSubChannels(self.ulsubchannels)
|
||||
return devs.Get(0)
|
||||
|
||||
def attach(self, netif):
|
||||
''' Invoked from netif.attach(). Create a TAP device using the TapBridge
|
||||
object. Call getns3dev() to get model-specific device.
|
||||
'''
|
||||
self._netif[netif] = netif
|
||||
self._linked[netif] = {}
|
||||
ns3dev = self.getns3dev(netif.node)
|
||||
self.tapbridge.SetAttribute("Mode", ns.core.StringValue("UseLocal"))
|
||||
#self.tapbridge.SetAttribute("Mode",
|
||||
# ns.core.IntegerValue(ns.tap_bridge.TapBridge.USE_LOCAL))
|
||||
tap = self.tapbridge.Install(netif.node, ns3dev)
|
||||
#tap.SetMode(ns.tap_bridge.TapBridge.USE_LOCAL)
|
||||
print "using TAP device %s for %s/%s" % \
|
||||
(netif.localname, netif.node.name, netif.name)
|
||||
check_call(['tunctl', '-t', netif.localname, '-n'])
|
||||
#check_call([IP_BIN, 'link', 'set', 'dev', netif.localname, \
|
||||
# 'address', '%s' % netif.hwaddr])
|
||||
check_call([IP_BIN, 'link', 'set', netif.localname, 'up'])
|
||||
tap.SetAttribute("DeviceName", ns.core.StringValue(netif.localname))
|
||||
self._ns3devs[netif] = ns3dev
|
||||
self._tapdevs[netif] = tap
|
||||
|
||||
class Ns3WifiNet(CoreNs3Net):
|
||||
def __init__(self, *args, **kwds):
|
||||
''' Uses a WifiHelper to create an ns-3 based Wifi network.
|
||||
'''
|
||||
rate = kwds.pop('rate', 'OfdmRate54Mbps')
|
||||
CoreNs3Net.__init__(self, *args, **kwds)
|
||||
self.wifi = ns.wifi.WifiHelper().Default()
|
||||
self.wifi.SetStandard(ns.wifi.WIFI_PHY_STANDARD_80211a)
|
||||
self.wifi.SetRemoteStationManager("ns3::ConstantRateWifiManager",
|
||||
"DataMode",
|
||||
ns.core.StringValue(rate),
|
||||
"NonUnicastMode",
|
||||
ns.core.StringValue(rate))
|
||||
self.mac = ns.wifi.NqosWifiMacHelper.Default()
|
||||
self.mac.SetType("ns3::AdhocWifiMac")
|
||||
|
||||
channel = ns.wifi.YansWifiChannelHelper.Default()
|
||||
self.phy = ns.wifi.YansWifiPhyHelper.Default()
|
||||
self.phy.SetChannel(channel.Create())
|
||||
|
||||
def getns3dev(self, node):
|
||||
''' Get the ns3 NetDevice using the WifiHelper.
|
||||
'''
|
||||
devs = self.wifi.Install(self.phy, self.mac, node)
|
||||
return devs.Get(0)
|
||||
|
||||
|
||||
class Ns3WimaxNet(CoreNs3Net):
|
||||
def __init__(self, *args, **kwds):
|
||||
CoreNs3Net.__init__(self, *args, **kwds)
|
||||
self.wimax = ns.wimax.WimaxHelper()
|
||||
self.scheduler = ns.wimax.WimaxHelper.SCHED_TYPE_SIMPLE
|
||||
self.phy = ns.wimax.WimaxHelper.SIMPLE_PHY_TYPE_OFDM
|
||||
# base station node list
|
||||
self.bsnodes = []
|
||||
|
||||
def setbasestation(self, node):
|
||||
self.bsnodes.append(node)
|
||||
|
||||
def getns3dev(self, node):
|
||||
if node in self.bsnodes:
|
||||
devtype = ns.wimax.WimaxHelper.DEVICE_TYPE_BASE_STATION
|
||||
else:
|
||||
devtype = ns.wimax.WimaxHelper.DEVICE_TYPE_SUBSCRIBER_STATION
|
||||
nodes = ns.network.NodeContainer(node)
|
||||
devs = self.wimax.Install(nodes, devtype, self.phy, self.scheduler)
|
||||
if node not in self.bsnodes:
|
||||
devs.Get(0).SetModulationType(ns.wimax.WimaxPhy.MODULATION_TYPE_QAM16_12)
|
||||
# debug
|
||||
self.wimax.EnableAscii("wimax-device-%s" % node.name, devs)
|
||||
return devs.Get(0)
|
||||
|
||||
@staticmethod
|
||||
def ipv4netifaddr(netif):
|
||||
for addr in netif.addrlist:
|
||||
if ':' in addr:
|
||||
continue # skip ipv6
|
||||
ip = ns.network.Ipv4Address(addr.split('/')[0])
|
||||
mask = ns.network.Ipv4Mask('/' + addr.split('/')[1])
|
||||
return (ip, mask)
|
||||
return (None, None)
|
||||
|
||||
|
||||
def addflow(self, node1, node2, upclass, downclass):
|
||||
''' Add a Wimax service flow between two nodes.
|
||||
'''
|
||||
(netif1, ns3dev1) = self.findns3dev(node1)
|
||||
(netif2, ns3dev2) = self.findns3dev(node2)
|
||||
if not netif1 or not netif2:
|
||||
raise ValueError, "interface not found"
|
||||
(addr1, mask1) = self.ipv4netifaddr(netif1)
|
||||
(addr2, mask2) = self.ipv4netifaddr(netif2)
|
||||
clargs1 = (addr1, mask1, addr2, mask2) + downclass
|
||||
clargs2 = (addr2, mask2, addr1, mask1) + upclass
|
||||
clrec1 = ns.wimax.IpcsClassifierRecord(*clargs1)
|
||||
clrec2 = ns.wimax.IpcsClassifierRecord(*clargs2)
|
||||
ns3dev1.AddServiceFlow( \
|
||||
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_DOWN,
|
||||
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec1))
|
||||
ns3dev1.AddServiceFlow( \
|
||||
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_UP,
|
||||
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec2))
|
||||
ns3dev2.AddServiceFlow( \
|
||||
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_DOWN,
|
||||
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec2))
|
||||
ns3dev2.AddServiceFlow( \
|
||||
self.wimax.CreateServiceFlow(ns.wimax.ServiceFlow.SF_DIRECTION_UP,
|
||||
ns.wimax.ServiceFlow.SF_TYPE_RTPS, clrec1))
|
||||
|
||||
|
||||
class Ns3Session(Session):
|
||||
''' A Session that starts an ns-3 simulation thread.
|
||||
'''
|
||||
def __init__(self, persistent = False, duration=600):
|
||||
self.duration = duration
|
||||
self.nodes = ns.network.NodeContainer()
|
||||
self.mobhelper = ns.mobility.MobilityHelper()
|
||||
Session.__init__(self, persistent = persistent)
|
||||
|
||||
def run(self, vis=False):
|
||||
''' Run the ns-3 simulation and return the simulator thread.
|
||||
'''
|
||||
def runthread():
|
||||
ns.core.Simulator.Stop(ns.core.Seconds(self.duration))
|
||||
print "running ns-3 simulation for %d seconds" % self.duration
|
||||
if vis:
|
||||
try:
|
||||
import visualizer
|
||||
except ImportError:
|
||||
print "visualizer is not available"
|
||||
ns.core.Simulator.Run()
|
||||
else:
|
||||
visualizer.start()
|
||||
else:
|
||||
ns.core.Simulator.Run()
|
||||
#self.evq.run() # event queue may have WayPointMobility events
|
||||
self.setstate(coreapi.CORE_EVENT_RUNTIME_STATE, info=True,
|
||||
sendevent=True)
|
||||
t = threading.Thread(target = runthread)
|
||||
t.daemon = True
|
||||
t.start()
|
||||
return t
|
||||
|
||||
def shutdown(self):
|
||||
# TODO: the following line tends to segfault ns-3 (and therefore
|
||||
# core-daemon)
|
||||
ns.core.Simulator.Destroy()
|
||||
Session.shutdown(self)
|
||||
|
||||
def addnode(self, name):
|
||||
''' A convenience helper for Session.addobj(), for adding CoreNs3Nodes
|
||||
to this session. Keeps a NodeContainer for later use.
|
||||
'''
|
||||
n = self.addobj(cls = CoreNs3Node, name=name)
|
||||
self.nodes.Add(n)
|
||||
return n
|
||||
|
||||
def setupconstantmobility(self):
|
||||
''' Install a ConstantPositionMobilityModel.
|
||||
'''
|
||||
palloc = ns.mobility.ListPositionAllocator()
|
||||
for i in xrange(self.nodes.GetN()):
|
||||
(x, y, z) = ((100.0 * i) + 50, 200.0, 0.0)
|
||||
palloc.Add(ns.core.Vector(x, y, z))
|
||||
node = self.nodes.Get(i)
|
||||
node.position.set(x, y, z)
|
||||
self.mobhelper.SetPositionAllocator(palloc)
|
||||
self.mobhelper.SetMobilityModel("ns3::ConstantPositionMobilityModel")
|
||||
self.mobhelper.Install(self.nodes)
|
||||
|
||||
def setuprandomwalkmobility(self, bounds, time=10, speed=25.0):
|
||||
''' Set up the random walk mobility model within a bounding box.
|
||||
- bounds is the max (x, y, z) boundary
|
||||
- time is the number of seconds to maintain the current speed
|
||||
and direction
|
||||
- speed is the maximum speed, with node speed randomly chosen
|
||||
from [0, speed]
|
||||
'''
|
||||
(x, y, z) = map(float, bounds)
|
||||
self.mobhelper.SetPositionAllocator("ns3::RandomBoxPositionAllocator",
|
||||
"X",
|
||||
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" % x),
|
||||
"Y",
|
||||
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" % y),
|
||||
"Z",
|
||||
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" % z))
|
||||
self.mobhelper.SetMobilityModel("ns3::RandomWalk2dMobilityModel",
|
||||
"Mode", ns.core.StringValue("Time"),
|
||||
"Time", ns.core.StringValue("%ss" % time),
|
||||
"Speed",
|
||||
ns.core.StringValue("ns3::UniformRandomVariable[Min=0|Max=%s]" \
|
||||
% speed),
|
||||
"Bounds", ns.core.StringValue("0|%s|0|%s" % (x, y)))
|
||||
self.mobhelper.Install(self.nodes)
|
||||
|
||||
def startns3mobility(self, refresh_ms=300):
|
||||
''' Start a thread that updates CORE nodes based on their ns-3
|
||||
positions.
|
||||
'''
|
||||
self.setstate(coreapi.CORE_EVENT_INSTANTIATION_STATE)
|
||||
self.mobilitythread = threading.Thread(
|
||||
target=self.ns3mobilitythread,
|
||||
args=(refresh_ms,))
|
||||
self.mobilitythread.daemon = True
|
||||
self.mobilitythread.start()
|
||||
|
||||
def ns3mobilitythread(self, refresh_ms):
|
||||
''' Thread target that updates CORE nodes every refresh_ms based on
|
||||
their ns-3 positions.
|
||||
'''
|
||||
valid_states = (coreapi.CORE_EVENT_RUNTIME_STATE,
|
||||
coreapi.CORE_EVENT_INSTANTIATION_STATE)
|
||||
while self.getstate() in valid_states:
|
||||
for i in xrange(self.nodes.GetN()):
|
||||
node = self.nodes.Get(i)
|
||||
(x, y, z) = node.getns3position()
|
||||
if (x, y, z) == node.position.get():
|
||||
continue
|
||||
# from WayPointMobility.setnodeposition(node, x, y, z)
|
||||
node.position.set(x, y, z)
|
||||
msg = node.tonodemsg(flags=0)
|
||||
self.broadcastraw(None, msg)
|
||||
self.sdt.updatenode(node, flags=0, x=x, y=y, z=z)
|
||||
time.sleep(0.001 * refresh_ms)
|
||||
|
||||
def setupmobilitytracing(self, net, filename, nodes, verbose=False):
|
||||
''' Start a tracing thread using the ASCII output from the ns3
|
||||
mobility helper.
|
||||
'''
|
||||
net.mobility = WayPointMobility(session=self, objid=net.objid,
|
||||
verbose=verbose, values=None)
|
||||
net.mobility.setendtime()
|
||||
net.mobility.refresh_ms = 300
|
||||
net.mobility.empty_queue_stop = False
|
||||
of = ns.network.OutputStreamWrapper(filename, filemode=777)
|
||||
self.mobhelper.EnableAsciiAll(of)
|
||||
self.mobilitytracethread = threading.Thread(target=self.mobilitytrace,
|
||||
args=(net, filename, nodes, verbose))
|
||||
self.mobilitytracethread.daemon = True
|
||||
self.mobilitytracethread.start()
|
||||
|
||||
def mobilitytrace(self, net, filename, nodes, verbose):
|
||||
nodemap = {}
|
||||
# move nodes to initial positions
|
||||
for node in nodes:
|
||||
(x,y,z) = node.getns3position()
|
||||
net.mobility.setnodeposition(node, x, y, z)
|
||||
nodemap[node.GetId()] = node
|
||||
|
||||
if verbose:
|
||||
self.info("mobilitytrace opening '%s'" % filename)
|
||||
try:
|
||||
f = open(filename)
|
||||
f.seek(0,2)
|
||||
except Exception, e:
|
||||
self.warn("mobilitytrace error opening '%s': %s" % (filename, e))
|
||||
sleep = 0.001
|
||||
kickstart = True
|
||||
while True:
|
||||
if self.getstate() != coreapi.CORE_EVENT_RUNTIME_STATE:
|
||||
break
|
||||
line = f.readline()
|
||||
if not line:
|
||||
time.sleep(sleep)
|
||||
if sleep < 1.0:
|
||||
sleep += 0.001
|
||||
continue
|
||||
sleep = 0.001
|
||||
items = dict(map(lambda x: x.split('='), line.split()))
|
||||
if verbose:
|
||||
self.info("trace: %s %s %s" % \
|
||||
(items['node'], items['pos'], items['vel']))
|
||||
(x, y, z) = map(float, items['pos'].split(':'))
|
||||
vel = map(float, items['vel'].split(':'))
|
||||
node = nodemap[int(items['node'])]
|
||||
net.mobility.addwaypoint(time=0, nodenum=node.objid,
|
||||
x=x, y=y, z=z, speed=vel)
|
||||
if kickstart:
|
||||
kickstart = False
|
||||
self.evq.add_event(0, net.mobility.start)
|
||||
self.evq.run()
|
||||
else:
|
||||
if net.mobility.state != net.mobility.STATE_RUNNING:
|
||||
net.mobility.state = net.mobility.STATE_RUNNING
|
||||
self.evq.add_event(0, net.mobility.runround)
|
||||
|
||||
f.close()
|
||||
|
||||
|
110
daemon/ns3/examples/ns3lte.py
Executable file
110
daemon/ns3/examples/ns3lte.py
Executable file
|
@ -0,0 +1,110 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ns3lte.py - This script demonstrates using CORE with the ns-3 LTE model.
|
||||
*** Note that this script is not currently functional, see notes below. ***
|
||||
- issues connecting TapBridge with LteNetDevice
|
||||
|
||||
'''
|
||||
|
||||
import os, sys, time, optparse, datetime, math
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
|
||||
from core.misc import ipaddr
|
||||
from corens3.obj import Ns3Session, Ns3LteNet
|
||||
import ns.core
|
||||
import ns.mobility
|
||||
|
||||
def ltesession(opt):
|
||||
''' Run a test LTE session.
|
||||
'''
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
lte = session.addobj(cls=Ns3LteNet, name="wlan1")
|
||||
lte.setsubchannels(range(25), range(50, 100))
|
||||
if opt.verbose:
|
||||
ascii = ns.network.AsciiTraceHelper()
|
||||
stream = ascii.CreateFileStream('/tmp/ns3lte.tr')
|
||||
lte.lte.EnableAsciiAll(stream)
|
||||
#ns.core.LogComponentEnable("EnbNetDevice", ns.core.LOG_LEVEL_INFO)
|
||||
#ns.core.LogComponentEnable("UeNetDevice", ns.core.LOG_LEVEL_INFO)
|
||||
#lte.lte.EnableLogComponents()
|
||||
|
||||
prefix = ipaddr.IPv4Prefix("10.0.0.0/16")
|
||||
mobb = None
|
||||
nodes = []
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
mob = ns.mobility.ConstantPositionMobilityModel()
|
||||
mob.SetPosition( ns.core.Vector3D(10.0 * i, 0.0, 0.0) )
|
||||
if i == 1:
|
||||
lte.setnodeb(node) # first node is nodeb
|
||||
mobb = mob
|
||||
node.newnetif(lte, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
nodes.append(node)
|
||||
if i == 1:
|
||||
(tmp, ns3dev) = lte.findns3dev(node)
|
||||
lte.lte.AddMobility(ns3dev.GetPhy(), mob)
|
||||
if i > 1:
|
||||
lte.linknodeb(node, nodes[0], mob, mobb)
|
||||
|
||||
session.thread = session.run(vis=opt.visualize)
|
||||
return session
|
||||
|
||||
def main():
|
||||
''' Main routine when running from command-line.
|
||||
'''
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 4, duration = 600, verbose = False, visualize=False)
|
||||
|
||||
parser.add_option("-d", "--duration", dest = "duration", type = int,
|
||||
help = "number of seconds to run the simulation")
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
parser.add_option("-z", "--visualize", dest = "visualize",
|
||||
action = "store_true", help = "enable visualizer")
|
||||
parser.add_option("-v", "--verbose", dest = "verbose",
|
||||
action = "store_true", help = "be more verbose")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
(opt, args) = parser.parse_args()
|
||||
|
||||
if opt.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % opt.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
return ltesession(opt)
|
||||
|
||||
def cleanup():
|
||||
print "shutting down session"
|
||||
session.shutdown()
|
||||
print "joining simulator thread (please kill it)"
|
||||
session.thread.join()
|
||||
|
||||
if __name__ == "__main__":
|
||||
session = main()
|
122
daemon/ns3/examples/ns3wifi.py
Executable file
122
daemon/ns3/examples/ns3wifi.py
Executable file
|
@ -0,0 +1,122 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ns3wifi.py - This script demonstrates using CORE with the ns-3 Wifi model.
|
||||
|
||||
How to run this:
|
||||
|
||||
pushd ~/ns-allinone-3.16/ns-3.16
|
||||
sudo ./waf shell
|
||||
popd
|
||||
python -i ns3wifi.py
|
||||
|
||||
To run with the CORE GUI:
|
||||
|
||||
pushd ~/ns-allinone-3.16/ns-3.16
|
||||
sudo ./waf shell
|
||||
core-daemon
|
||||
|
||||
# in another terminal
|
||||
core-daemon -e ./ns3wifi.py
|
||||
# in a third terminal
|
||||
core
|
||||
# now select the running session
|
||||
|
||||
'''
|
||||
|
||||
import os, sys, time, optparse, datetime, math
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
|
||||
import ns.core
|
||||
from core.misc import ipaddr
|
||||
from corens3.obj import Ns3Session, Ns3WifiNet
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.addsession(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
def wifisession(opt):
|
||||
''' Run a test wifi session.
|
||||
'''
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
session.name = "ns3wifi"
|
||||
session.filename = session.name + ".py"
|
||||
session.node_count = str(opt.numnodes + 1)
|
||||
add_to_server(session)
|
||||
|
||||
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1")
|
||||
wifi.setposition(30, 30, 0)
|
||||
wifi.phy.Set("RxGain", ns.core.DoubleValue(18.0))
|
||||
|
||||
prefix = ipaddr.IPv4Prefix("10.0.0.0/16")
|
||||
nodes = []
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
nodes.append(node)
|
||||
session.setupconstantmobility()
|
||||
wifi.usecorepositions()
|
||||
# PHY tracing
|
||||
#wifi.phy.EnableAsciiAll("ns3wifi")
|
||||
session.thread = session.run(vis=False)
|
||||
return session
|
||||
|
||||
def main():
|
||||
''' Main routine when running from command-line.
|
||||
'''
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 10, duration = 600, verbose = False)
|
||||
|
||||
parser.add_option("-d", "--duration", dest = "duration", type = int,
|
||||
help = "number of seconds to run the simulation")
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
parser.add_option("-v", "--verbose", dest = "verbose",
|
||||
action = "store_true", help = "be more verbose")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
(opt, args) = parser.parse_args()
|
||||
|
||||
if opt.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % opt.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
return wifisession(opt)
|
||||
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
session = main()
|
||||
print "\nsession =", session
|
131
daemon/ns3/examples/ns3wifirandomwalk.py
Executable file
131
daemon/ns3/examples/ns3wifirandomwalk.py
Executable file
|
@ -0,0 +1,131 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2011-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ns3wifirandomwalk.py - This script demonstrates using CORE with the ns-3 Wifi
|
||||
model and random walk mobility.
|
||||
Patterned after the ns-3 example 'main-random-walk.cc'.
|
||||
|
||||
How to run this:
|
||||
|
||||
pushd ~/ns-allinone-3.16/ns-3.16
|
||||
sudo ./waf shell
|
||||
popd
|
||||
python -i ns3wifirandomwalk.py
|
||||
|
||||
'''
|
||||
|
||||
import os, sys, time, optparse, datetime, math, threading
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
|
||||
import ns.core
|
||||
import ns.network
|
||||
from core.api import coreapi
|
||||
from core.misc import ipaddr
|
||||
from corens3.obj import Ns3Session, Ns3WifiNet
|
||||
|
||||
|
||||
def add_to_server(session):
|
||||
''' Add this session to the server's list if this script is executed from
|
||||
the core-daemon server.
|
||||
'''
|
||||
global server
|
||||
try:
|
||||
server.addsession(session)
|
||||
return True
|
||||
except NameError:
|
||||
return False
|
||||
|
||||
def wifisession(opt):
|
||||
''' Run a random walk wifi session.
|
||||
'''
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
session.name = "ns3wifirandomwalk"
|
||||
session.filename = session.name + ".py"
|
||||
session.node_count = str(opt.numnodes + 1)
|
||||
add_to_server(session)
|
||||
wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps")
|
||||
wifi.setposition(30, 30, 0)
|
||||
# for improved connectivity
|
||||
wifi.phy.Set("RxGain", ns.core.DoubleValue(18.0))
|
||||
|
||||
prefix = ipaddr.IPv4Prefix("10.0.0.0/16")
|
||||
services_str = "zebra|OSPFv3MDR|vtysh|IPForward"
|
||||
nodes = []
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
nodes.append(node)
|
||||
session.services.addservicestonode(node, "router", services_str,
|
||||
opt.verbose)
|
||||
session.services.bootnodeservices(node)
|
||||
session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0))
|
||||
|
||||
# PHY tracing
|
||||
#wifi.phy.EnableAsciiAll("ns3wifirandomwalk")
|
||||
|
||||
# mobility tracing
|
||||
#session.setupmobilitytracing(wifi, "ns3wifirandomwalk.mob.tr",
|
||||
# nodes, verbose=True)
|
||||
session.startns3mobility(refresh_ms=150)
|
||||
|
||||
# start simulation
|
||||
# session.instantiate() ?
|
||||
session.thread = session.run(vis=opt.viz)
|
||||
return session
|
||||
|
||||
def main():
|
||||
''' Main routine when running from command-line.
|
||||
'''
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 5, duration = 600, verbose = False, viz = False)
|
||||
opt = { 'numnodes' : 5, 'duration': 600, 'verbose' :False, 'viz': False }
|
||||
|
||||
|
||||
parser.add_option("-d", "--duration", dest = "duration", type = int,
|
||||
help = "number of seconds to run the simulation")
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
parser.add_option("-v", "--verbose", dest = "verbose",
|
||||
action = "store_true", help = "be more verbose")
|
||||
parser.add_option("-V", "--visualize", dest = "viz",
|
||||
action = "store_true", help = "enable PyViz ns-3 visualizer")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
(opt, args) = parser.parse_args()
|
||||
|
||||
if opt.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % opt.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
return wifisession(opt)
|
||||
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
session = main()
|
||||
print "\nsession =", session
|
95
daemon/ns3/examples/ns3wimax.py
Executable file
95
daemon/ns3/examples/ns3wimax.py
Executable file
|
@ -0,0 +1,95 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2011-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
'''
|
||||
ns3wimax.py - This script demonstrates using CORE with the ns-3 Wimax model.
|
||||
*** Note that this script is not currently functional, see notes below. ***
|
||||
Current issues:
|
||||
- large amount of base station chatter; huge trace files, 70% CPU usage
|
||||
- PCAP files unreadable
|
||||
- base station causes segfault if it sends packet; due to missing service flows
|
||||
(but AddFlow() is not available for bs devices)
|
||||
- no packets are sent between nodes - no connection?
|
||||
'''
|
||||
|
||||
import os, sys, time, optparse, datetime, math
|
||||
try:
|
||||
from core import pycore
|
||||
except ImportError:
|
||||
# hack for Fedora autoconf that uses the following pythondir:
|
||||
if "/usr/lib/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.6/site-packages")
|
||||
if "/usr/lib64/python2.6/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.6/site-packages")
|
||||
if "/usr/lib/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib/python2.7/site-packages")
|
||||
if "/usr/lib64/python2.7/site-packages" in sys.path:
|
||||
sys.path.append("/usr/local/lib64/python2.7/site-packages")
|
||||
from core import pycore
|
||||
|
||||
from core.misc import ipaddr
|
||||
from corens3.obj import Ns3Session, Ns3WimaxNet
|
||||
|
||||
def wimaxsession(opt):
|
||||
''' Run a test wimax session.
|
||||
'''
|
||||
session = Ns3Session(persistent=True, duration=opt.duration)
|
||||
wimax = session.addobj(cls=Ns3WimaxNet, name="wlan1")
|
||||
#wimax.wimax.EnableLogComponents()
|
||||
|
||||
prefix = ipaddr.IPv4Prefix("10.0.0.0/16")
|
||||
# create one classifier for ICMP (protocol 1) traffic
|
||||
# src port low/high, dst port low/high, protocol, priority
|
||||
#classifier = (0, 65000, 0, 65000, 1, 1)
|
||||
classifier = (0, 65000, 0, 65000, 17, 1)
|
||||
nodes = []
|
||||
for i in xrange(1, opt.numnodes + 1):
|
||||
node = session.addnode(name = "n%d" % i)
|
||||
if i == 1:
|
||||
wimax.setbasestation(node)
|
||||
node.newnetif(wimax, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
if i > 2:
|
||||
wimax.addflow(nodes[-1], node, classifier, classifier)
|
||||
nodes.append(node)
|
||||
session.setupconstantmobility()
|
||||
session.thread = session.run(vis=False)
|
||||
return session
|
||||
|
||||
def main():
|
||||
''' Main routine when running from command-line.
|
||||
'''
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage = usagestr)
|
||||
parser.set_defaults(numnodes = 3, duration = 600, verbose = False)
|
||||
|
||||
parser.add_option("-d", "--duration", dest = "duration", type = int,
|
||||
help = "number of seconds to run the simulation")
|
||||
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
|
||||
help = "number of nodes")
|
||||
parser.add_option("-v", "--verbose", dest = "verbose",
|
||||
action = "store_true", help = "be more verbose")
|
||||
|
||||
def usage(msg = None, err = 0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
(opt, args) = parser.parse_args()
|
||||
|
||||
if opt.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % opt.numnodes)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
return wimaxsession(opt)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
session = main()
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue