From 7bbc2aef0ec3862ad0951ed13e29e73e7d35a4f2 Mon Sep 17 00:00:00 2001 From: Tom Goff Date: Thu, 20 Oct 2016 18:06:23 -0400 Subject: [PATCH 01/22] daemon: Remove the SIGPIPE signal handler. Python installs a default signal handler for SIGPIPE that raises an exception. Let core-daemon handle the exception instead of exiting. --- daemon/sbin/core-daemon | 1 - 1 file changed, 1 deletion(-) diff --git a/daemon/sbin/core-daemon b/daemon/sbin/core-daemon index f9126b16..2229c866 100755 --- a/daemon/sbin/core-daemon +++ b/daemon/sbin/core-daemon @@ -156,7 +156,6 @@ def sighandler(signum, stackframe): signal.signal(signal.SIGHUP, sighandler) signal.signal(signal.SIGINT, sighandler) -signal.signal(signal.SIGPIPE, sighandler) signal.signal(signal.SIGTERM, sighandler) signal.signal(signal.SIGUSR1, sighandler) signal.signal(signal.SIGUSR2, sighandler) From 591de2cc9f42702c87b3262de050728800100306 Mon Sep 17 00:00:00 2001 From: Andrew Ahlers Date: Sat, 22 Oct 2016 21:14:16 -0400 Subject: [PATCH 02/22] Converted README to rst --- README => README.rst | 61 +++++++++++++++++++++++++++----------------- 1 file changed, 38 insertions(+), 23 deletions(-) rename README => README.rst (50%) diff --git a/README b/README.rst similarity index 50% rename from README rename to README.rst index c3004eb0..e119377d 100644 --- a/README +++ b/README.rst @@ -1,9 +1,16 @@ +==== +CORE +==== CORE: Common Open Research Emulator + Copyright (c)2005-2013 the Boeing Company. + See the LICENSE file included in this distribution. -== ABOUT ======================================================================= +About +===== + CORE is a tool for emulating networks using a GUI or Python scripts. The CORE project site (1) is a good source of introductory information, with a manual, screenshots, and demos about this software. Also a supplemental @@ -13,49 +20,57 @@ Google Code page (2) hosts a wiki, blog, bug tracker, and quickstart guide. 2. http://code.google.com/p/coreemu/ -== BUILDING CORE =============================================================== +Building CORE +============= To build this software you should use: - ./bootstrap.sh - ./configure - make - sudo make install + ./bootstrap.sh + ./configure + make + sudo make install Here is what is installed with 'make install': - /usr/local/bin/core-gui - /usr/local/sbin/core-daemon - /usr/local/sbin/[vcmd, vnoded, coresendmsg, core-cleanup.sh] - /usr/local/lib/core/* - /usr/local/share/core/* - /usr/local/lib/python2.6/dist-packages/core/* - /usr/local/lib/python2.6/dist-packages/[netns,vcmd].so - /etc/core/* - /etc/init.d/core + /usr/local/bin/core-gui + /usr/local/sbin/core-daemon + /usr/local/sbin/[vcmd, vnoded, coresendmsg, core-cleanup.sh] + /usr/local/lib/core/* + /usr/local/share/core/* + /usr/local/lib/python2.6/dist-packages/core/* + /usr/local/lib/python2.6/dist-packages/[netns,vcmd].so + /etc/core/* + /etc/init.d/core See the manual for the software required for building CORE. -== RUNNING CORE ================================================================ +Running CORE +============ First start the CORE services: - sudo /etc/init.d/core-daemon start + sudo /etc/init.d/core-daemon start This automatically runs the core-daemon program. Assuming the GUI is in your PATH, run the CORE GUI by typing the following: - core-gui + core-gui This launches the CORE GUI. You do not need to run the GUI as root. -== SUPPORT ===================================================================== + +Support +======= If you have questions, comments, or trouble, please use the CORE mailing lists: -- core-users for general comments and questions - http://pf.itd.nrl.navy.mil/mailman/listinfo/core-users -- core-dev for bugs, compile errors, and other development issues - http://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev + +- `core-users`_ for general comments and questions + +- `core-dev`_ for bugs, compile errors, and other development issues + + +.. _core-users: http://pf.itd.nrl.navy.mil/mailman/listinfo/core-users +.. _core-dev: http://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev From f80304384ab72a6dace1a0ffa42500ff5c9ccb16 Mon Sep 17 00:00:00 2001 From: Andrew Ahlers Date: Sat, 22 Oct 2016 21:18:17 -0400 Subject: [PATCH 03/22] Added direct link to manual --- README.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index e119377d..113fba6b 100644 --- a/README.rst +++ b/README.rst @@ -16,8 +16,13 @@ project site (1) is a good source of introductory information, with a manual, screenshots, and demos about this software. Also a supplemental Google Code page (2) hosts a wiki, blog, bug tracker, and quickstart guide. - 1. http://www.nrl.navy.mil/itd/ncs/products/core - 2. http://code.google.com/p/coreemu/ +1. http://www.nrl.navy.mil/itd/ncs/products/core + +2. http://code.google.com/p/coreemu/ + +3. `Official Documentation`_ + +.. _Official Documentation: http://downloads.pf.itd.nrl.navy.mil/docs/core/core-html/index.html Building CORE From cec880e6e28dd61dbf62b61a68c725384c42ee61 Mon Sep 17 00:00:00 2001 From: Tom Goff Date: Thu, 22 Dec 2016 10:53:23 -0500 Subject: [PATCH 04/22] daemon: Add the EMANE TDMA model to the list of an available models. --- daemon/data/core.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/data/core.conf b/daemon/data/core.conf index 2479eda7..95750f43 100644 --- a/daemon/data/core.conf +++ b/daemon/data/core.conf @@ -57,7 +57,7 @@ emane_platform_port = 8101 emane_transform_port = 8201 emane_event_generate = True emane_event_monitor = False -emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass +emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass, Tdma # EMANE log level range [0,4] default: 2 #emane_log_level = 2 emane_realtime = True From 49c91a2a31bcf92889636b8030f1773448fac2a5 Mon Sep 17 00:00:00 2001 From: Kyle Jackson Date: Wed, 11 Jan 2017 09:24:41 -0500 Subject: [PATCH 05/22] [MRG] DOC typo in EMANE install instructions --- doc/emane.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/emane.rst b/doc/emane.rst index 18cf2243..b4da0c69 100644 --- a/doc/emane.rst +++ b/doc/emane.rst @@ -111,7 +111,7 @@ Here are quick instructions for installing all EMANE packages: :: # install dependencies - sudo apt-get install libssl-dev libxml-lixbml-perl libxml-simple-perl + sudo apt-get install libssl-dev libxml-libxml-perl libxml-simple-perl # download and install EMANE 0.8.1 export URL=http://downloads.pf.itd.nrl.navy.mil/emane/0.8.1-r2 wget $URL/emane-0.8.1-release-2.ubuntu-12_04.amd64.tgz From da33defa411e19cafdb7a36ff798b891afd56093 Mon Sep 17 00:00:00 2001 From: Gabriel Somlo Date: Thu, 12 Jan 2017 14:14:41 -0500 Subject: [PATCH 06/22] systemd: remove limit on fork() branching factor On distributions using systemd, a default limit on the number of tasks allowed to be created from a given unit may be set by default (e.g., on F24, "DefaultTasksAccounting=yes" and "DefaultTasksMax=512" are set in /etc/systemd/system.conf. A large CORE simulation may well exceed that limit, at least during startup when many (sub)shell processes are generated using fork(). To avoid running into this limit, set "TasksMax=infinity" in the core-daemon.service unit file template. Signed-off-by: Gabriel Somlo --- scripts/core-daemon.service.in | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/core-daemon.service.in b/scripts/core-daemon.service.in index 657c160f..b0c4a7f2 100644 --- a/scripts/core-daemon.service.in +++ b/scripts/core-daemon.service.in @@ -6,6 +6,7 @@ After=network.target Type=forking PIDFile=/var/run/core-daemon.pid ExecStart=@PYTHON@ @SBINDIR@/core-daemon -d +TasksMax=infinity [Install] WantedBy=multi-user.target From 6609d2c394d3d64fab939fd489baa6153e0e4d8d Mon Sep 17 00:00:00 2001 From: Gabriel Somlo Date: Thu, 12 Jan 2017 16:40:45 -0500 Subject: [PATCH 07/22] daemon: streamline Quagga startup Currently, all Quagga daemons are started concurrently by their respective CORE services, using "quaggaboot.sh" generated by the 'zebra' service. However, all routing services depend on 'zebra' already running, and 'vtysh' depends on ALL other Quagga services before it can push configuration from the common "Quagga.conf" to all running daemons (see "waitforvtyfiles()" in "quaggaboot.sh"). The spinwait+timeout based implementation of "waitforvtyfiles()" may, depending on load, give up too early and fail to configure all Quagga daemons. This patch streamlines the way Quagga daemons are started, by launching them all from the 'zebra' service. The correct sequence is to first launch the 'zebra' daemon itself, then proceed with all routing daemons, and finish with a call to "vtysh -b" which configures all running daemons. The list of all applicable daemons to launch is obtained using 'grep' from Quagga.conf, in the same way "waitforvtyfiles()" used to discover which *.vty files to look for in /var/run/quagga/. The startup command for all services other than 'zebra' becomes empty, and "quaggaboot.sh" issues a warning on attempts to have it launch any other daemon. Signed-off-by: Gabriel Somlo --- daemon/core/services/quagga.py | 67 ++++++++++++---------------------- 1 file changed, 23 insertions(+), 44 deletions(-) diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index d045a502..e2892cdc 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -170,21 +170,6 @@ confcheck() fi } -waitforvtyfiles() -{ - for f in "$@"; do - count=1 - until [ -e $QUAGGA_STATE_DIR/$f ]; do - if [ $count -eq 10 ]; then - echo "ERROR: vty file not found: $QUAGGA_STATE_DIR/$f" >&2 - return 1 - fi - sleep 0.1 - count=$(($count + 1)) - done - done -} - bootdaemon() { QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH) @@ -196,53 +181,47 @@ bootdaemon() flags="" - if [ "$1" != "zebra" ]; then - waitforvtyfiles zebra.vty - fi - if [ "$1" = "xpimd" ] && \\ grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $QUAGGA_CONF; then flags="$flags -6" fi $QUAGGA_SBIN_DIR/$1 $flags -u $QUAGGA_USER -g $QUAGGA_GROUP -d + if [ "$?" != "0" ]; then + echo "ERROR: Quagga's '$1' daemon failed to start!:" + return 1 + fi } -bootvtysh() +bootquagga() { - QUAGGA_BIN_DIR=$(searchforprog $1 $QUAGGA_BIN_SEARCH) + QUAGGA_BIN_DIR=$(searchforprog 'vtysh' $QUAGGA_BIN_SEARCH) if [ "z$QUAGGA_BIN_DIR" = "z" ]; then - echo "ERROR: Quagga's '$1' daemon not found in search path:" - echo " $QUAGGA_SBIN_SEARCH" + echo "ERROR: Quagga's 'vtysh' program not found in search path:" + echo " $QUAGGA_BIN_SEARCH" return 1 fi - vtyfiles="zebra.vty" + bootdaemon "zebra" for r in rip ripng ospf6 ospf bgp babel; do if grep -q "^router \<${r}\>" $QUAGGA_CONF; then - vtyfiles="$vtyfiles ${r}d.vty" + bootdaemon "${r}d" fi done if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $QUAGGA_CONF; then - vtyfiles="$vtyfiles xpimd.vty" + bootdaemon "xpimd" fi - # wait for Quagga daemon vty files to appear before invoking vtysh - waitforvtyfiles $vtyfiles - $QUAGGA_BIN_DIR/vtysh -b } -confcheck -if [ "x$1" = "x" ]; then - echo "ERROR: missing the name of the Quagga daemon to boot" +if [ "$1" != "zebra" ]; then + echo "WARNING: '$1': all Quagga daemons are launched by the 'zebra' service!" exit 1 -elif [ "$1" = "vtysh" ]; then - bootvtysh $1 -else - bootdaemon $1 fi +confcheck +bootquagga """ % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \ QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP) @@ -311,7 +290,7 @@ class Ospfv2(QuaggaService): unified Quagga.conf file. ''' _name = "OSPFv2" - _startup = ("sh quaggaboot.sh ospfd",) + _startup = () _shutdown = ("killall ospfd", ) _validate = ("pidof ospfd", ) _ipv4_routing = True @@ -382,7 +361,7 @@ class Ospfv3(QuaggaService): unified Quagga.conf file. ''' _name = "OSPFv3" - _startup = ("sh quaggaboot.sh ospf6d",) + _startup = () _shutdown = ("killall ospf6d", ) _validate = ("pidof ospf6d", ) _ipv4_routing = True @@ -486,7 +465,7 @@ class Bgp(QuaggaService): having the same AS number. ''' _name = "BGP" - _startup = ("sh quaggaboot.sh bgpd",) + _startup = () _shutdown = ("killall bgpd", ) _validate = ("pidof bgpd", ) _custom_needed = True @@ -511,7 +490,7 @@ class Rip(QuaggaService): ''' The RIP service provides IPv4 routing for wired networks. ''' _name = "RIP" - _startup = ("sh quaggaboot.sh ripd",) + _startup = () _shutdown = ("killall ripd", ) _validate = ("pidof ripd", ) _ipv4_routing = True @@ -534,7 +513,7 @@ class Ripng(QuaggaService): ''' The RIP NG service provides IPv6 routing for wired networks. ''' _name = "RIPNG" - _startup = ("sh quaggaboot.sh ripngd",) + _startup = () _shutdown = ("killall ripngd", ) _validate = ("pidof ripngd", ) _ipv6_routing = True @@ -558,7 +537,7 @@ class Babel(QuaggaService): protocol for IPv6 and IPv4 with fast convergence properties. ''' _name = "Babel" - _startup = ("sh quaggaboot.sh babeld",) + _startup = () _shutdown = ("killall babeld", ) _validate = ("pidof babeld", ) _ipv6_routing = True @@ -588,7 +567,7 @@ class Xpimd(QuaggaService): PIM multicast routing based on XORP. ''' _name = 'Xpimd' - _startup = ('sh quaggaboot.sh xpimd',) + _startup = () _shutdown = ('killall xpimd', ) _validate = ('pidof xpimd', ) _ipv4_routing = True @@ -622,7 +601,7 @@ class Vtysh(CoreService): _name = "vtysh" _group = "Quagga" _startindex = 45 - _startup = ("sh quaggaboot.sh vtysh",) + _startup = () _shutdown = () @classmethod From 3a34467e38cc276806fbcb3589f2c344b00869a2 Mon Sep 17 00:00:00 2001 From: Jeff Ahrenholz Date: Thu, 26 Jan 2017 09:13:47 -0800 Subject: [PATCH 08/22] update NRL links from http to https; freshen content; resolves #105 --- README.rst | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/README.rst b/README.rst index 113fba6b..116671c9 100644 --- a/README.rst +++ b/README.rst @@ -13,16 +13,19 @@ About CORE is a tool for emulating networks using a GUI or Python scripts. The CORE project site (1) is a good source of introductory information, with a manual, -screenshots, and demos about this software. Also a supplemental -Google Code page (2) hosts a wiki, blog, bug tracker, and quickstart guide. +screenshots, and demos about this software. The GitHub project (2) hosts the +source repos, wiki, and bug tracker. There is a deprecated +Google Code page (3) with the old wiki, blog, bug tracker, and quickstart guide. 1. http://www.nrl.navy.mil/itd/ncs/products/core -2. http://code.google.com/p/coreemu/ +2. https://github.com/coreemu/core -3. `Official Documentation`_ +3. http://code.google.com/p/coreemu/ -.. _Official Documentation: http://downloads.pf.itd.nrl.navy.mil/docs/core/core-html/index.html +4. `Official Documentation`_ + +.. _Official Documentation: https://downloads.pf.itd.nrl.navy.mil/docs/core/core-html/index.html Building CORE @@ -75,7 +78,7 @@ If you have questions, comments, or trouble, please use the CORE mailing lists: - `core-dev`_ for bugs, compile errors, and other development issues -.. _core-users: http://pf.itd.nrl.navy.mil/mailman/listinfo/core-users -.. _core-dev: http://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev +.. _core-users: https://pf.itd.nrl.navy.mil/mailman/listinfo/core-users +.. _core-dev: https://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev From 33906aae9f518193a46fc459710eee6ecc16fdcb Mon Sep 17 00:00:00 2001 From: Gabriel Somlo Date: Fri, 13 Jan 2017 09:33:48 -0500 Subject: [PATCH 09/22] daemon: remove Quagga 'vtysh' service Since all Quagga daemons are configured from a consolidated location (the 'zebra' service), there is nothing left to do for a dedicated service such as 'vtysh'. This patch removes the service, along with all references to it from the rest of the source tree (sample *.imn files, examples, etc.) Signed-off-by: Gabriel Somlo --- daemon/core/misc/xmlparser1.py | 8 ++++---- daemon/core/services/quagga.py | 19 ------------------- daemon/core/xen/xen.py | 1 - daemon/examples/netns/emane80211.py | 2 +- daemon/examples/netns/howmanynodes.py | 2 +- daemon/ns3/examples/ns3wifirandomwalk.py | 2 +- doc/usage.rst | 4 ++-- gui/configs/sample1.imn | 2 +- gui/configs/sample10-kitchen-sink.imn | 4 ++-- gui/configs/sample3-bgp.imn | 16 ++++++++-------- gui/configs/sample4-nrlsmf.imn | 20 ++++++++++---------- gui/configs/sample5-mgen.imn | 4 ++-- gui/configs/sample8-ipsec-service.imn | 6 +++--- gui/nodes.tcl | 8 ++++---- gui/util.tcl | 2 +- 15 files changed, 40 insertions(+), 60 deletions(-) diff --git a/daemon/core/misc/xmlparser1.py b/daemon/core/misc/xmlparser1.py index 8b0a22c1..88782917 100644 --- a/daemon/core/misc/xmlparser1.py +++ b/daemon/core/misc/xmlparser1.py @@ -911,12 +911,12 @@ class CoreDocumentParser1(object): def parse_default_services(self): # defaults from the CORE GUI self.default_services = { - 'router': ['zebra', 'OSPFv2', 'OSPFv3', 'vtysh', 'IPForward'], + 'router': ['zebra', 'OSPFv2', 'OSPFv3', 'IPForward'], 'host': ['DefaultRoute', 'SSH'], 'PC': ['DefaultRoute',], - 'mdr': ['zebra', 'OSPFv3MDR', 'vtysh', 'IPForward'], - # 'prouter': ['zebra', 'OSPFv2', 'OSPFv3', 'vtysh', 'IPForward'], - # 'xen': ['zebra', 'OSPFv2', 'OSPFv3', 'vtysh', 'IPForward'], + 'mdr': ['zebra', 'OSPFv3MDR', 'IPForward'], + # 'prouter': ['zebra', 'OSPFv2', 'OSPFv3', 'IPForward'], + # 'xen': ['zebra', 'OSPFv2', 'OSPFv3', 'IPForward'], } default_services = \ getFirstChildByTagName(self.scenario, 'CORE:defaultservices') diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index e2892cdc..381f4a56 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -30,7 +30,6 @@ class Zebra(CoreService): ''' _name = "zebra" _group = "Quagga" - _depends = ("vtysh", ) _dirs = ("/usr/local/etc/quagga", "/var/run/quagga") _configs = ("/usr/local/etc/quagga/Quagga.conf", "quaggaboot.sh","/usr/local/etc/quagga/vtysh.conf") @@ -593,21 +592,3 @@ class Xpimd(QuaggaService): return ' ip mfea\n ip igmp\n ip pim\n' addservice(Xpimd) - -class Vtysh(CoreService): - ''' Simple service to run vtysh -b (boot) after all Quagga daemons have - started. - ''' - _name = "vtysh" - _group = "Quagga" - _startindex = 45 - _startup = () - _shutdown = () - - @classmethod - def generateconfig(cls, node, filename, services): - return "" - -addservice(Vtysh) - - diff --git a/daemon/core/xen/xen.py b/daemon/core/xen/xen.py index 68b5a64f..eae3f770 100644 --- a/daemon/core/xen/xen.py +++ b/daemon/core/xen/xen.py @@ -109,7 +109,6 @@ class XenNode(PyCoreNode): #'sh quaggaboot.sh zebra', #'sh quaggaboot.sh ospfd', #'sh quaggaboot.sh ospf6d', - 'sh quaggaboot.sh vtysh', 'killall zebra', 'killall ospfd', 'killall ospf6d', diff --git a/daemon/examples/netns/emane80211.py b/daemon/examples/netns/emane80211.py index 6868d49c..1a494721 100755 --- a/daemon/examples/netns/emane80211.py +++ b/daemon/examples/netns/emane80211.py @@ -70,7 +70,7 @@ def main(): values[ names.index('propagationmodel') ] = '2ray' session.emane.setconfig(wlan.objid, EmaneIeee80211abgModel._name, values) - services_str = "zebra|OSPFv3MDR|vtysh|IPForward" + services_str = "zebra|OSPFv3MDR|IPForward" print "creating %d nodes with addresses from %s" % \ (options.numnodes, prefix) diff --git a/daemon/examples/netns/howmanynodes.py b/daemon/examples/netns/howmanynodes.py index efafb107..ab3f94cc 100755 --- a/daemon/examples/netns/howmanynodes.py +++ b/daemon/examples/netns/howmanynodes.py @@ -97,7 +97,7 @@ def main(): parser.add_option("-s", "--services", dest = "services", type = str, help = "pipe-delimited list of services added to each " \ "node (default = %s)\n(Example: 'zebra|OSPFv2|OSPFv3|" \ - "vtysh|IPForward')" % parser.defaults["services"]) + "IPForward')" % parser.defaults["services"]) def usage(msg = None, err = 0): sys.stdout.write("\n") diff --git a/daemon/ns3/examples/ns3wifirandomwalk.py b/daemon/ns3/examples/ns3wifirandomwalk.py index a02eb69a..205b1157 100755 --- a/daemon/ns3/examples/ns3wifirandomwalk.py +++ b/daemon/ns3/examples/ns3wifirandomwalk.py @@ -66,7 +66,7 @@ def wifisession(opt): wifi.phy.Set("RxGain", ns.core.DoubleValue(18.0)) prefix = ipaddr.IPv4Prefix("10.0.0.0/16") - services_str = "zebra|OSPFv3MDR|vtysh|IPForward" + services_str = "zebra|OSPFv3MDR|IPForward" nodes = [] for i in xrange(1, opt.numnodes + 1): node = session.addnode(name = "n%d" % i) diff --git a/doc/usage.rst b/doc/usage.rst index d0dc35cd..ec6e6028 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -1448,13 +1448,13 @@ Here are the default node types and their services: .. index:: Xen .. index:: physical nodes -* *router* - zebra, OSFPv2, OSPFv3, vtysh, and IPForward services for IGP +* *router* - zebra, OSFPv2, OSPFv3, and IPForward services for IGP link-state routing. * *host* - DefaultRoute and SSH services, representing an SSH server having a default route when connected directly to a router. * *PC* - DefaultRoute service for having a default route when connected directly to a router. -* *mdr* - zebra, OSPFv3MDR, vtysh, and IPForward services for +* *mdr* - zebra, OSPFv3MDR, and IPForward services for wireless-optimized MANET Designated Router routing. * *prouter* - a physical router, having the same default services as the *router* node type; for incorporating Linux testbed machines into an diff --git a/gui/configs/sample1.imn b/gui/configs/sample1.imn index c394bbe6..912f1e71 100644 --- a/gui/configs/sample1.imn +++ b/gui/configs/sample1.imn @@ -105,7 +105,7 @@ node n5 { labelcoords {540.0 376.0} interface-peer {eth0 n10} interface-peer {eth1 n15} - services {zebra OSPFv2 OSPFv3MDR vtysh IPForward} + services {zebra OSPFv2 OSPFv3MDR IPForward} custom-config { custom-config-id service:zebra custom-command zebra diff --git a/gui/configs/sample10-kitchen-sink.imn b/gui/configs/sample10-kitchen-sink.imn index a9e21a4a..d5463952 100644 --- a/gui/configs/sample10-kitchen-sink.imn +++ b/gui/configs/sample10-kitchen-sink.imn @@ -283,7 +283,7 @@ node n11 { } } - services {zebra OSPFv2 OSPFv3MDR vtysh IPForward} + services {zebra OSPFv2 OSPFv3MDR IPForward} } node n12 { @@ -517,7 +517,7 @@ node n20 { } } - services {zebra OSPFv2 OSPFv3MDR vtysh IPForward} + services {zebra OSPFv2 OSPFv3MDR IPForward} } node n21 { diff --git a/gui/configs/sample3-bgp.imn b/gui/configs/sample3-bgp.imn index f782585e..d4a396ae 100644 --- a/gui/configs/sample3-bgp.imn +++ b/gui/configs/sample3-bgp.imn @@ -20,7 +20,7 @@ node n1 { interface-peer {eth1 n2} interface-peer {eth2 n3} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -82,7 +82,7 @@ node n2 { interface-peer {eth1 n16} interface-peer {eth2 n6} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -140,7 +140,7 @@ node n3 { interface-peer {eth0 n4} interface-peer {eth1 n1} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -197,7 +197,7 @@ node n4 { interface-peer {eth0 n3} interface-peer {eth1 n7} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -258,7 +258,7 @@ node n5 { interface-peer {eth0 n7} interface-peer {eth1 n6} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -323,7 +323,7 @@ node n6 { interface-peer {eth0 n5} interface-peer {eth1 n2} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -376,7 +376,7 @@ node n7 { interface-peer {eth0 n5} interface-peer {eth1 n4} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf @@ -555,7 +555,7 @@ node n16 { interface-peer {eth0 n1} interface-peer {eth1 n2} canvas c1 - services {zebra BGP vtysh IPForward} + services {zebra BGP IPForward} custom-config { custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf custom-command /usr/local/etc/quagga/Quagga.conf diff --git a/gui/configs/sample4-nrlsmf.imn b/gui/configs/sample4-nrlsmf.imn index a1b08b53..165c424f 100644 --- a/gui/configs/sample4-nrlsmf.imn +++ b/gui/configs/sample4-nrlsmf.imn @@ -65,7 +65,7 @@ node n1 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_green.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -101,7 +101,7 @@ node n2 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_green.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -137,7 +137,7 @@ node n3 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_green.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -173,7 +173,7 @@ node n4 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_green.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -209,7 +209,7 @@ node n5 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_green.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -245,7 +245,7 @@ node n6 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -281,7 +281,7 @@ node n7 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -317,7 +317,7 @@ node n8 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -353,7 +353,7 @@ node n9 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh @@ -389,7 +389,7 @@ node n10 { canvas c1 interface-peer {eth0 n11} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif - services {zebra OSPFv3MDR vtysh SMF IPForward UserDefined} + services {zebra OSPFv3MDR SMF IPForward UserDefined} custom-config { custom-config-id service:UserDefined:custom-post-config-commands.sh custom-command custom-post-config-commands.sh diff --git a/gui/configs/sample5-mgen.imn b/gui/configs/sample5-mgen.imn index 925266c4..e27b55a7 100644 --- a/gui/configs/sample5-mgen.imn +++ b/gui/configs/sample5-mgen.imn @@ -51,7 +51,7 @@ node n1 { cmdup=('sh mgen.sh', ) } } - services {zebra OSPFv2 OSPFv3 vtysh IPForward UserDefined} + services {zebra OSPFv2 OSPFv3 IPForward UserDefined} } node n2 { @@ -101,7 +101,7 @@ node n2 { mgen input send_$HN.mgn output $LOGDIR/mgen_$HN.log > /dev/null 2> /dev/null < /dev/null & } } - services {zebra OSPFv2 OSPFv3 vtysh IPForward UserDefined} + services {zebra OSPFv2 OSPFv3 IPForward UserDefined} } link l1 { diff --git a/gui/configs/sample8-ipsec-service.imn b/gui/configs/sample8-ipsec-service.imn index d1274c6f..ba409185 100644 --- a/gui/configs/sample8-ipsec-service.imn +++ b/gui/configs/sample8-ipsec-service.imn @@ -354,7 +354,7 @@ node n1 { } } - services {zebra OSPFv2 OSPFv3 vtysh IPForward IPsec} + services {zebra OSPFv2 OSPFv3 IPForward IPsec} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif } @@ -528,7 +528,7 @@ node n2 { } } - services {zebra OSPFv2 OSPFv3 vtysh IPForward IPsec} + services {zebra OSPFv2 OSPFv3 IPForward IPsec} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif } @@ -697,7 +697,7 @@ node n3 { } } - services {zebra OSPFv2 OSPFv3 vtysh IPForward IPsec} + services {zebra OSPFv2 OSPFv3 IPForward IPsec} custom-image $CORE_DATA_DIR/icons/normal/router_red.gif } diff --git a/gui/nodes.tcl b/gui/nodes.tcl index a86ae345..7fd65e57 100644 --- a/gui/nodes.tcl +++ b/gui/nodes.tcl @@ -15,18 +15,18 @@ if { $execMode == "interactive" } { # these are the default node types when nodes.conf does not exist # index {name normal-icon tiny-icon services type metadata} array set g_node_types_default { - 1 {router router.gif router.gif {zebra OSPFv2 OSPFv3 vtysh IPForward} \ + 1 {router router.gif router.gif {zebra OSPFv2 OSPFv3 IPForward} \ netns {built-in type for routing}} 2 {host host.gif host.gif {DefaultRoute SSH} \ netns {built-in type for servers}} 3 {PC pc.gif pc.gif {DefaultRoute} \ netns {built-in type for end hosts}} - 4 {mdr mdr.gif mdr.gif {zebra OSPFv3MDR vtysh IPForward} \ + 4 {mdr mdr.gif mdr.gif {zebra OSPFv3MDR IPForward} \ netns {built-in type for wireless routers}} 5 {prouter router_green.gif router_green.gif \ - {zebra OSPFv2 OSPFv3 vtysh IPForward} \ + {zebra OSPFv2 OSPFv3 IPForward} \ physical {built-in type for physical nodes}} - 6 {xen xen.gif xen.gif {zebra OSPFv2 OSPFv3 vtysh IPForward} \ + 6 {xen xen.gif xen.gif {zebra OSPFv2 OSPFv3 IPForward} \ xen {built-in type for Xen PVM domU router}} } diff --git a/gui/util.tcl b/gui/util.tcl index 88c7cb3b..93cabd4d 100644 --- a/gui/util.tcl +++ b/gui/util.tcl @@ -160,7 +160,7 @@ proc upgradeNetworkConfigToServices { } { set bgp [netconfFetchSection $node "router bgp"] if { $ospfv2 != "" || $ospfv3 != "" || $rip != "" || $ripng != "" } { set cfg "" - set services "zebra vtysh IPForward" + set services "zebra IPForward" foreach ifc [ifcList $node] { lappend cfg "interface $ifc" set ifccfg [netconfFetchSection $node "interface $ifc"] From b1ea0574e58868437fe412b9b7378a5e1616fa01 Mon Sep 17 00:00:00 2001 From: Gabriel Somlo Date: Fri, 13 Jan 2017 09:38:23 -0500 Subject: [PATCH 10/22] daemon: start Quagga with default user/group names Allow native distribution packages to run with their default user and group names by no longer forcing "-u root -g root" on any of the Quagga daemons' command lines. This should continue to allow hand-compiled and/or "side-loaded" Quagga software to run as root, if configured to do so at build time. Signed-off-by: Gabriel Somlo --- daemon/core/services/quagga.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index 381f4a56..914b9070 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -20,11 +20,6 @@ from core.misc.ipaddr import IPv4Prefix, isIPv4Address, isIPv6Address from core.api import coreapi from core.constants import * -QUAGGA_USER="root" -QUAGGA_GROUP="root" -if os.uname()[0] == "FreeBSD": - QUAGGA_GROUP="wheel" - class Zebra(CoreService): ''' ''' @@ -139,8 +134,6 @@ QUAGGA_CONF=%s QUAGGA_SBIN_SEARCH=%s QUAGGA_BIN_SEARCH=%s QUAGGA_STATE_DIR=%s -QUAGGA_USER=%s -QUAGGA_GROUP=%s searchforprog() { @@ -185,7 +178,7 @@ bootdaemon() flags="$flags -6" fi - $QUAGGA_SBIN_DIR/$1 $flags -u $QUAGGA_USER -g $QUAGGA_GROUP -d + $QUAGGA_SBIN_DIR/$1 $flags -d if [ "$?" != "0" ]; then echo "ERROR: Quagga's '$1' daemon failed to start!:" return 1 @@ -222,7 +215,7 @@ fi confcheck bootquagga """ % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \ - QUAGGA_STATE_DIR, QUAGGA_USER, QUAGGA_GROUP) + QUAGGA_STATE_DIR) addservice(Zebra) From 798793ed876756da22806eb85e57009386579abc Mon Sep 17 00:00:00 2001 From: Jeff Ahrenholz Date: Wed, 8 Feb 2017 14:14:55 -0800 Subject: [PATCH 11/22] fix #24 Throughput Widget bug, now that hex interface numbers are used (cherry picked from commit e8d4d5397c86d9edd454870f71c6ee72ea728df2) --- gui/widget.tcl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/gui/widget.tcl b/gui/widget.tcl index ed84ccf9..077ad95e 100644 --- a/gui/widget.tcl +++ b/gui/widget.tcl @@ -924,7 +924,8 @@ proc getstats_link_ifname { link } { # TODO: need to determine session number used by daemon # instead this uses a '*' character for a regexp match against # the interfaces in /proc/net/dev - set ifname "veth$node_num\\.[string range $ifname 3 end]\\.*" + set hex [format "%x" $node_num] + set ifname "veth$hex\\.[string range $ifname 3 end]\\.*" return $ifname } From a70da2eb6122d787db5960c4d5725f886544e041 Mon Sep 17 00:00:00 2001 From: Gabriel Somlo Date: Fri, 10 Feb 2017 11:31:44 -0500 Subject: [PATCH 12/22] rpm: include iproute-tc [Build]Requires for fedora >= 25 As of F25, the 'tc' command was moved to a separate sub-package of iproute, which must be included both at build- and run-time. Signed-off-by: Gabriel Somlo --- packaging/rpm/core.spec.in | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/packaging/rpm/core.spec.in b/packaging/rpm/core.spec.in index 1a2cc9e3..7cf28a7b 100644 --- a/packaging/rpm/core.spec.in +++ b/packaging/rpm/core.spec.in @@ -32,12 +32,18 @@ Requires: procps-ng %if %{with_kernel_modules_extra} Requires: kernel-modules-extra %endif +%if 0%{?fedora} >= 25 +Requires: iproute-tc +%endif BuildRequires: make automake autoconf libev-devel python-devel bridge-utils ebtables iproute net-tools ImageMagick help2man %if 0%{?el6} BuildRequires: procps %else BuildRequires: procps-ng %endif +%if 0%{?fedora} >= 25 +BuildRequires: iproute-tc +%endif Provides: core-daemon # python-sphinx %description daemon From c147d5b2f5faaa9d3210d4d481a5fc4905b76e95 Mon Sep 17 00:00:00 2001 From: adamson <> Date: Sat, 11 Feb 2017 15:52:03 +0000 Subject: [PATCH 13/22] added option Throughput widge to observe transmissions/receptions separately if desired --- gui/widget.tcl | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/gui/widget.tcl b/gui/widget.tcl index 077ad95e..19fe6d0b 100644 --- a/gui/widget.tcl +++ b/gui/widget.tcl @@ -572,7 +572,7 @@ proc exec_observer_callback { node execnum cmd result status } { ##### ##### ################################################################################ -array set thruConfig { show 1 avg 1 thresh 250.0 width 10 color #FF0000 } +array set thruConfig { show 1 up 1 down 1 avg 1 thresh 250.0 width 10 color #FF0000 } # netgraph names of pipe nodes array set throughput_cache { } @@ -597,7 +597,12 @@ proc widget_thru_config {} { checkbutton $wi.tlab.avg \ -text "Use exponentially weighted moving average" \ -variable thruConfig(avg) - pack $wi.tlab.show_thru $wi.tlab.avg -side top -anchor w -padx 4 + checkbutton $wi.tlab.down \ + -text "Include transmissions" -variable thruConfig(down) + checkbutton $wi.tlab.up \ + -text "Include receptions" -variable thruConfig(up) + pack $wi.tlab.show_thru $wi.tlab.avg $wi.tlab.down \ + $wi.tlab.up -side top -anchor w -padx 4 pack $wi.tlab -side top frame $wi.msg -borderwidth 4 @@ -807,7 +812,14 @@ proc widget_thru_periodic { now } { set div [expr { (1000.0 / 8.0) * $dt }] set kbps_down [expr { ([lindex $bytes 0]-[lindex $bytes2 0]) / $div }] set kbps_up [expr { ([lindex $bytes 1]-[lindex $bytes2 1]) / $div }] - set kbps [expr {$kbps_down + $kbps_up}] + set kbps 0.0 + if { $thruConfig(up) } { + set kbps [expr {$kbps + $kbps_up}] + } + if { $thruConfig(down) } { + set kbps [expr {$kbps + $kbps_down}] + } + #set kbps [expr {$kbps_down + $kbps_up}] if { $thruConfig(avg) } { if { ![info exists link_thru_avg_stats($key)] } { @@ -920,6 +932,7 @@ proc getstats_link_ifname { link } { set ifname [ifcByPeer $lnode2 $lnode1] } if { $node_num < 0 } { return "" } + set node_num [format %x $node_num] # TODO: need to determine session number used by daemon # instead this uses a '*' character for a regexp match against From 56896bddd14f0a3d415a9782f564452efa925167 Mon Sep 17 00:00:00 2001 From: adamson <> Date: Sun, 19 Feb 2017 17:55:31 +0000 Subject: [PATCH 14/22] fixed issue with indexing EMANE RJ45 objects --- daemon/core/emane/emane.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/daemon/core/emane/emane.py b/daemon/core/emane/emane.py index 8765a916..671abfea 100644 --- a/daemon/core/emane/emane.py +++ b/daemon/core/emane/emane.py @@ -239,7 +239,9 @@ class Emane(ConfigurableManager): # Adamson change: first check for iface config keyed by "node:ifc.name" # (so that nodes w/ multiple interfaces of same conftype can have # different configs for each separate interface) - key = 1000*ifc.node.objid + ifc.netindex + key = 1000*ifc.node.objid + if ifc.netindex is not None: + key += ifc.netindex values = self.getconfig(key, conftype, None)[1] if not values: values = self.getconfig(ifc.node.objid, conftype, None)[1] From 67117a3af342ada69779cadecf4f6678491af505 Mon Sep 17 00:00:00 2001 From: Tom Goff Date: Fri, 24 Feb 2017 00:20:57 +0000 Subject: [PATCH 15/22] daemon: Improve importing custom services. This should help avoid python module name conflicts. --- daemon/core/service.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/daemon/core/service.py b/daemon/core/service.py index ddca4fc4..23d37699 100644 --- a/daemon/core/service.py +++ b/daemon/core/service.py @@ -15,6 +15,7 @@ services. ''' import sys, os, shlex +import imp from itertools import repeat from core.api import coreapi @@ -47,9 +48,8 @@ class CoreServices(ConfigurableManager): _name = "services" _type = coreapi.CORE_TLV_REG_UTILITY - _invalid_custom_names = ('core', 'addons', 'api', 'bsd', 'emane', 'misc', - 'netns', 'phys', 'services', 'xen') - + service_path = set() + def __init__(self, session): ConfigurableManager.__init__(self, session) # dict of default services tuples, key is node type @@ -65,23 +65,34 @@ class CoreServices(ConfigurableManager): self.importcustom(path) self.isStartupService = startup.Startup.isStartupService + @classmethod + def add_service_path(cls, path): + cls.service_path.add(path) + def importcustom(self, path): ''' Import services from a myservices directory. ''' - if not path or len(path) == 0: + if not path or path in self.service_path: return if not os.path.isdir(path): self.session.warn("invalid custom service directory specified" \ ": %s" % path) return + self.add_service_path(path) try: parentdir, childdir = os.path.split(path) - if childdir in self._invalid_custom_names: - raise ValueError, "use a unique custom services dir name, " \ - "not '%s'" % childdir - if not parentdir in sys.path: - sys.path.append(parentdir) - exec("from %s import *" % childdir) + f, pathname, description = imp.find_module(childdir, [parentdir]) + name = 'core.services.custom.' + childdir + if name in sys.modules: + i = 1 + while name + str(i) in sys.modules: + i += 1 + name += str(i) + m = imp.load_module(name, f, pathname, description) + if hasattr(m, '__all__'): + for x in m.__all__: + f, pathname, description = imp.find_module(x, [path]) + imp.load_module(name + '.' + x, f, pathname, description) except Exception, e: self.session.warn("error importing custom services from " \ "%s:\n%s" % (path, e)) From e665a122b647f753481b3764073a3268037344d1 Mon Sep 17 00:00:00 2001 From: lbaumgaertner Date: Tue, 28 Mar 2017 12:00:24 +0200 Subject: [PATCH 16/22] fix to add correct broadcast address for ipv4 interfaces with netns backend --- daemon/core/netns/vnode.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/daemon/core/netns/vnode.py b/daemon/core/netns/vnode.py index 44041cdc..9ec605af 100644 --- a/daemon/core/netns/vnode.py +++ b/daemon/core/netns/vnode.py @@ -225,8 +225,12 @@ class SimpleLxcNode(PyCoreNode): "error setting MAC address %s" % str(addr)) def addaddr(self, ifindex, addr): if self.up: - self.cmd([IP_BIN, "addr", "add", str(addr), - "dev", self.ifname(ifindex)]) + if ":" in str(addr): # check if addr is ipv6 + self.cmd([IP_BIN, "addr", "add", str(addr), + "dev", self.ifname(ifindex)]) + else: + self.cmd([IP_BIN, "addr", "add", str(addr), "broadcast", "+", + "dev", self.ifname(ifindex)]) self._netif[ifindex].addaddr(addr) def deladdr(self, ifindex, addr): From 724534e3acb691221b9bc0b89327533731497a60 Mon Sep 17 00:00:00 2001 From: Jeff Ahrenholz Date: Fri, 31 Mar 2017 09:55:26 -0700 Subject: [PATCH 17/22] fix broken Throughput Widget and Wireshark for node numbers above 10 this also fixes the Throughput Widget results when multiple sessions are running --- gui/editor.tcl | 9 +++++---- gui/widget.tcl | 10 ++++------ 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/gui/editor.tcl b/gui/editor.tcl index c451c1e7..a69ec4f8 100644 --- a/gui/editor.tcl +++ b/gui/editor.tcl @@ -1475,7 +1475,7 @@ proc addInterfaceCommand { node parentmenu txt cmd state isnodecmd } { $parentmenu add cascade -label $txt -menu $childmenu -state $state if { ! $isnodecmd } { if { $g_current_session == 0 } { set state disabled } - set ssid [shortSessionID $g_current_session] + set ssid [shortSessionID $g_current_session] } foreach ifc [ifcList $node] { set addr [lindex [getIfcIPv4addr $node $ifc] 0] @@ -1483,10 +1483,11 @@ proc addInterfaceCommand { node parentmenu txt cmd state isnodecmd } { if { $isnodecmd } { ;# run command in a node set icmd "spawnShell $node \"$cmd $ifc\"" } else { ;# exec a command directly - set nodenum [string range $node 1 end] + set node_num [string range $node 1 end] + set hex [format "%x" $node_num] set ifnum [string range $ifc 3 end] - set localifc veth$nodenum.$ifnum.$ssid - set icmd "exec $cmd $localifc &" + set ifname "veth$hex\\.$ifnum\\.$ssid" + set icmd "exec $cmd $ifname &" } $childmenu add command -label "$ifc$addr" -state $state -command $icmd } diff --git a/gui/widget.tcl b/gui/widget.tcl index 19fe6d0b..12c40499 100644 --- a/gui/widget.tcl +++ b/gui/widget.tcl @@ -919,6 +919,7 @@ proc getstats_bytes_netgraph { raw_input } { } proc getstats_link_ifname { link } { + global g_current_session set lnode1 [lindex [linkPeers $link] 0] set lnode2 [lindex [linkPeers $link] 1] @@ -932,13 +933,10 @@ proc getstats_link_ifname { link } { set ifname [ifcByPeer $lnode2 $lnode1] } if { $node_num < 0 } { return "" } - set node_num [format %x $node_num] - - # TODO: need to determine session number used by daemon - # instead this uses a '*' character for a regexp match against - # the interfaces in /proc/net/dev + set ssid [shortSessionID $g_current_session] set hex [format "%x" $node_num] - set ifname "veth$hex\\.[string range $ifname 3 end]\\.*" + set ifnum [string range $ifname 3 end] + set ifname "veth$hex.$ifnum.$ssid" return $ifname } From 0b0557e0088f92856a1d42bd9d9aca5ff72ca60b Mon Sep 17 00:00:00 2001 From: Jeff Ahrenholz Date: Fri, 31 Mar 2017 10:59:58 -0700 Subject: [PATCH 18/22] fix /var/run/quagga permissions for Ubuntu 16.04 --- daemon/core/services/quagga.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index 914b9070..7d57ae92 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -194,6 +194,12 @@ bootquagga() return 1 fi + # fix /var/run/quagga permissions + id -u quagga 2>/dev/null >/dev/null + if [ "$?" = "0" ]; then + chown quagga $QUAGGA_STATE_DIR + fi + bootdaemon "zebra" for r in rip ripng ospf6 ospf bgp babel; do if grep -q "^router \<${r}\>" $QUAGGA_CONF; then From b4ce4e93702c9bb32b0db7a3d7123548f70700af Mon Sep 17 00:00:00 2001 From: Tom Goff Date: Thu, 18 May 2017 21:36:45 +0000 Subject: [PATCH 19/22] gui: Fix using xfce4-terminal as the terminal program. --- gui/util.tcl | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/gui/util.tcl b/gui/util.tcl index 93cabd4d..204694b0 100644 --- a/gui/util.tcl +++ b/gui/util.tcl @@ -1113,7 +1113,7 @@ proc get_text_editor { want_default } { # variable, then find the first in the list of terminals that exists on the # system set TERMS "{gnome-terminal -x} {lxterminal -e} {konsole -e} {xterm -e}" -set TERMS "$TERMS {aterm -e} {eterm -e} {rxvt -e} {xfce4-terminal -e}" +set TERMS "$TERMS {aterm -e} {eterm -e} {rxvt -e} {xfce4-terminal -x}" proc get_term_prog { want_default } { global g_prefs env TERMS @@ -1130,8 +1130,13 @@ proc get_term_prog { want_default } { } if { $term != "" } { set arg "-e" - # gnome-terminal has problem w/subsequent arguments after -e, needs -x - if { [file tail $term] == "gnome-terminal" } { set arg "-x" } + # gnome-terminal and xfce4-terminal have problems w/subsequent + # arguments after -e, needs -x + set basename [file tail $term] + if {[lsearch -exact \ + {"gnome-terminal" "xfce4-terminal"} $basename] >= 0} { + set arg "-x" + } set term "$term $arg" } From 17e4fc09336bdd0896c944a08236599a91fd2ba4 Mon Sep 17 00:00:00 2001 From: Dustin Spicuzza Date: Thu, 18 May 2017 18:39:19 -0400 Subject: [PATCH 20/22] Emit more descriptive error message if config file is incorrect --- daemon/core/service.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/core/service.py b/daemon/core/service.py index 23d37699..e6d5ee44 100644 --- a/daemon/core/service.py +++ b/daemon/core/service.py @@ -831,7 +831,7 @@ class CoreService(object): def setvalue(self, key, value): if key not in self.keys: - raise ValueError + raise ValueError('key `%s` not in `%s`' % (key, self.keys)) # this handles data conversion to int, string, and tuples if value: if key == "startidx": From 0a91fe7a3e4c5d3bcb44a9c3a213c8af740ba278 Mon Sep 17 00:00:00 2001 From: Rod A Santiago Date: Mon, 19 Jun 2017 18:03:39 -0700 Subject: [PATCH 21/22] merged cleanup branch with master --- .editorconfig | 13 + daemon/core/api/dataconversion.py | 45 + daemon/core/corehandlers.py | 1790 +++++++++++++++++++++++++++++ daemon/core/data.py | 120 ++ daemon/core/emane/emanemanager.py | 1231 ++++++++++++++++++++ daemon/core/emane/emanemodel.py | 204 ++++ daemon/core/enumerations.py | 318 +++++ daemon/core/misc/ipaddress.py | 449 ++++++++ daemon/core/misc/log.py | 35 + daemon/core/misc/nodemaps.py | 50 + daemon/core/misc/nodeutils.py | 68 ++ daemon/core/misc/structutils.py | 48 + daemon/core/netns/openvswitch.py | 741 ++++++++++++ daemon/core/xml/__init__.py | 0 daemon/core/xml/xmldeployment.py | 207 ++++ daemon/core/xml/xmlparser.py | 46 + daemon/core/xml/xmlparser0.py | 410 +++++++ daemon/core/xml/xmlparser1.py | 876 ++++++++++++++ daemon/core/xml/xmlsession.py | 36 + daemon/core/xml/xmlutils.py | 338 ++++++ daemon/core/xml/xmlwriter.py | 12 + daemon/core/xml/xmlwriter0.py | 389 +++++++ daemon/core/xml/xmlwriter1.py | 1018 ++++++++++++++++ daemon/requirements.txt | 8 + daemon/setup.cfg | 2 + daemon/tests/conftest.py | 133 +++ daemon/tests/test_core.py | 326 ++++++ daemon/tests/test_gui.py | 120 ++ 28 files changed, 9033 insertions(+) create mode 100644 .editorconfig create mode 100644 daemon/core/api/dataconversion.py create mode 100644 daemon/core/corehandlers.py create mode 100644 daemon/core/data.py create mode 100644 daemon/core/emane/emanemanager.py create mode 100644 daemon/core/emane/emanemodel.py create mode 100644 daemon/core/enumerations.py create mode 100644 daemon/core/misc/ipaddress.py create mode 100644 daemon/core/misc/log.py create mode 100644 daemon/core/misc/nodemaps.py create mode 100644 daemon/core/misc/nodeutils.py create mode 100644 daemon/core/misc/structutils.py create mode 100644 daemon/core/netns/openvswitch.py create mode 100644 daemon/core/xml/__init__.py create mode 100644 daemon/core/xml/xmldeployment.py create mode 100644 daemon/core/xml/xmlparser.py create mode 100644 daemon/core/xml/xmlparser0.py create mode 100644 daemon/core/xml/xmlparser1.py create mode 100644 daemon/core/xml/xmlsession.py create mode 100644 daemon/core/xml/xmlutils.py create mode 100644 daemon/core/xml/xmlwriter.py create mode 100644 daemon/core/xml/xmlwriter0.py create mode 100644 daemon/core/xml/xmlwriter1.py create mode 100644 daemon/requirements.txt create mode 100644 daemon/setup.cfg create mode 100644 daemon/tests/conftest.py create mode 100644 daemon/tests/test_core.py create mode 100644 daemon/tests/test_gui.py diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..67ce9503 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,13 @@ +# This file is for unifying the coding style for different editors and IDEs +# editorconfig.org +root = true + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.py] +indent_style = space +indent_size = 4 diff --git a/daemon/core/api/dataconversion.py b/daemon/core/api/dataconversion.py new file mode 100644 index 00000000..29a7369e --- /dev/null +++ b/daemon/core/api/dataconversion.py @@ -0,0 +1,45 @@ +""" +Converts CORE data objects into legacy API messages. +""" + +from core.api import coreapi +from core.enumerations import NodeTlvs +from core.misc import log +from core.misc import structutils + +logger = log.get_logger(__name__) + + +def convert_node(node_data): + """ + Callback to handle an node broadcast out from a session. + + :param core.data.NodeData node_data: node data to handle + :return: packed node message + """ + logger.debug("converting node data to message: %s", node_data) + + tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [ + (NodeTlvs.NUMBER, node_data.id), + (NodeTlvs.TYPE, node_data.node_type), + (NodeTlvs.NAME, node_data.name), + (NodeTlvs.IP_ADDRESS, node_data.ip_address), + (NodeTlvs.MAC_ADDRESS, node_data.mac_address), + (NodeTlvs.IP6_ADDRESS, node_data.ip6_address), + (NodeTlvs.MODEL, node_data.model), + (NodeTlvs.EMULATION_ID, node_data.emulation_id), + (NodeTlvs.EMULATION_SERVER, node_data.emulation_server), + (NodeTlvs.SESSION, node_data.session), + (NodeTlvs.X_POSITION, node_data.x_position), + (NodeTlvs.Y_POSITION, node_data.y_position), + (NodeTlvs.CANVAS, node_data.canvas), + (NodeTlvs.NETWORK_ID, node_data.network_id), + (NodeTlvs.SERVICES, node_data.services), + (NodeTlvs.LATITUDE, node_data.latitude), + (NodeTlvs.LONGITUDE, node_data.longitude), + (NodeTlvs.ALTITUDE, node_data.altitude), + (NodeTlvs.ICON, node_data.icon), + (NodeTlvs.OPAQUE, node_data.opaque) + ]) + + return coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data) diff --git a/daemon/core/corehandlers.py b/daemon/core/corehandlers.py new file mode 100644 index 00000000..8947ce65 --- /dev/null +++ b/daemon/core/corehandlers.py @@ -0,0 +1,1790 @@ +""" +socket server request handlers leveraged by core servers. +""" + +import Queue +import SocketServer +import os +import shlex +import shutil +import sys +import threading +import time + +from core import coreobj +from core.api import coreapi +from core.coreserver import CoreServer +from core.data import ConfigData +from core.data import EventData +from core.data import NodeData +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExceptionTlvs +from core.enumerations import ExecuteTlvs +from core.enumerations import FileTlvs +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.enumerations import SessionTlvs +from core.misc import log +from core.misc import nodeutils +from core.misc import structutils +from core.misc import utils +from core.netns import nodes +from core.xml.xmlsession import open_session_xml +from core.xml.xmlsession import save_session_xml + +logger = log.get_logger(__name__) + + +class CoreRequestHandler(SocketServer.BaseRequestHandler): + """ + The SocketServer class uses the RequestHandler class for servicing + requests, mainly through the handle() method. The CoreRequestHandler + has the following basic flow: + 1. Client connects and request comes in via handle(). + 2. handle() calls recvmsg() in a loop. + 3. recvmsg() does a recv() call on the socket performs basic + checks that this we received a CoreMessage, returning it. + 4. The message data is queued using queuemsg(). + 5. The handlerthread() thread pops messages from the queue and uses + handlemsg() to invoke the appropriate handler for that message type. + """ + + def __init__(self, request, client_address, server): + """ + Create a CoreRequestHandler instance. + + :param request: request object + :param str client_address: client address + :param CoreServer server: core server instance + :return: + """ + self.done = False + self.message_handlers = { + MessageTypes.NODE.value: self.handle_node_message, + MessageTypes.LINK.value: self.handle_link_message, + MessageTypes.EXECUTE.value: self.handle_execute_message, + MessageTypes.REGISTER.value: self.handle_register_message, + MessageTypes.CONFIG.value: self.handle_config_message, + MessageTypes.FILE.value: self.handle_file_message, + MessageTypes.INTERFACE.value: self.handle_interface_message, + MessageTypes.EVENT.value: self.handle_event_message, + MessageTypes.SESSION.value: self.handle_session_message, + } + self.message_queue = Queue.Queue() + self.node_status_request = {} + self._shutdown_lock = threading.Lock() + + self.handler_threads = [] + num_threads = int(server.config["numthreads"]) + if num_threads < 1: + raise ValueError("invalid number of threads: %s" % num_threads) + + logger.info("launching core server handler threads: %s", num_threads) + for _ in xrange(num_threads): + thread = threading.Thread(target=self.handler_thread) + self.handler_threads.append(thread) + thread.start() + + self.master = False + self.session = None + + utils.closeonexec(request.fileno()) + SocketServer.BaseRequestHandler.__init__(self, request, client_address, server) + + def setup(self): + """ + Client has connected, set up a new connection. + + :return: nothing + """ + logger.info("new TCP connection: %s", self.client_address) + # self.register() + + def finish(self): + """ + Client has disconnected, end this request handler and disconnect + from the session. Shutdown sessions that are not running. + + :return: nothing + """ + logger.info("finishing request handler") + self.done = True + + logger.info("remaining message queue size: %s", self.message_queue.qsize()) + # seconds + timeout = 10.0 + logger.info("client disconnected: notifying threads") + for thread in self.handler_threads: + logger.info("waiting for thread: %s", thread.getName()) + thread.join(timeout) + if thread.isAlive(): + logger.warn("joining %s failed: still alive after %s sec", thread.getName(), timeout) + + logger.info("connection closed: %s", self.client_address) + if self.session: + self.session.event_handlers.remove(self.handle_broadcast_event) + self.session.exception_handlers.remove(self.handle_broadcast_exception) + self.session.node_handlers.remove(self.handle_broadcast_node) + self.session.link_handlers.remove(self.handle_broadcast_link) + self.session.shutdown() + + return SocketServer.BaseRequestHandler.finish(self) + + def handle_broadcast_event(self, event_data): + """ + Callback to handle an event broadcast out from a session. + + :param core.data.EventData event_data: event data to handle + :return: nothing + """ + logger.info("handling broadcast event: %s", event_data) + + tlv_data = structutils.pack_values(coreapi.CoreEventTlv, [ + (EventTlvs.NODE, event_data.node), + (EventTlvs.TYPE, event_data.event_type), + (EventTlvs.NAME, event_data.name), + (EventTlvs.DATA, event_data.data), + (EventTlvs.TIME, event_data.time), + (EventTlvs.TIME, event_data.session) + ]) + message = coreapi.CoreEventMessage.pack(0, tlv_data) + + try: + self.sendall(message) + except IOError: + logger.exception("error sending event message") + + def handle_broadcast_file(self, file_data): + """ + Callback to handle a file broadcast out from a session. + + :param core.data.FileData file_data: file data to handle + :return: nothing + """ + logger.info("handling broadcast file: %s", file_data) + + tlv_data = structutils.pack_values(coreapi.CoreFileTlv, [ + (FileTlvs.NODE, file_data.node), + (FileTlvs.NAME, file_data.name), + (FileTlvs.MODE, file_data.mode), + (FileTlvs.NUMBER, file_data.number), + (FileTlvs.TYPE, file_data.type), + (FileTlvs.SOURCE_NAME, file_data.source), + (FileTlvs.SESSION, file_data.session), + (FileTlvs.DATA, file_data.data), + (FileTlvs.COMPRESSED_DATA, file_data.compressed_data), + ]) + message = coreapi.CoreFileMessage.pack(file_data.message_type, tlv_data) + + try: + self.sendall(message) + except IOError: + logger.exception("error sending file message") + + def handle_broadcast_config(self, config_data): + """ + Callback to handle a config broadcast out from a session. + + :param core.data.ConfigData config_data: config data to handle + :return: nothing + """ + logger.info("handling broadcast config: %s", config_data) + + tlv_data = structutils.pack_values(coreapi.CoreConfigTlv, [ + (ConfigTlvs.NODE, config_data.node), + (ConfigTlvs.OBJECT, config_data.object), + (ConfigTlvs.TYPE, config_data.type), + (ConfigTlvs.DATA_TYPES, config_data.data_types), + (ConfigTlvs.VALUES, config_data.data_values), + (ConfigTlvs.CAPTIONS, config_data.captions), + (ConfigTlvs.BITMAP, config_data.bitmap), + (ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values), + (ConfigTlvs.GROUPS, config_data.groups), + (ConfigTlvs.SESSION, config_data.session), + (ConfigTlvs.INTERFACE_NUMBER, config_data.interface_number), + (ConfigTlvs.NETWORK_ID, config_data.network_id), + (ConfigTlvs.OPAQUE, config_data.opaque), + ]) + message = coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data) + + try: + self.sendall(message) + except IOError: + logger.exception("error sending config message") + + def handle_broadcast_exception(self, exception_data): + """ + Callback to handle an exception broadcast out from a session. + + :param core.data.ExceptionData exception_data: exception data to handle + :return: nothing + """ + logger.info("handling broadcast exception: %s", exception_data) + tlv_data = structutils.pack_values(coreapi.CoreExceptionTlv, [ + (ExceptionTlvs.NODE, exception_data.node), + (ExceptionTlvs.SESSION, exception_data.session), + (ExceptionTlvs.LEVEL, exception_data.level), + (ExceptionTlvs.SOURCE, exception_data.source), + (ExceptionTlvs.DATE, exception_data.date), + (ExceptionTlvs.TEXT, exception_data.text) + ]) + message = coreapi.CoreExceptionMessage.pack(0, tlv_data) + + try: + self.sendall(message) + except IOError: + logger.exception("error sending exception message") + + def handle_broadcast_node(self, node_data): + """ + Callback to handle an node broadcast out from a session. + + :param core.data.NodeData node_data: node data to handle + :return: nothing + """ + logger.info("handling broadcast node: %s", node_data) + + tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [ + (NodeTlvs.NUMBER, node_data.id), + (NodeTlvs.TYPE, node_data.node_type), + (NodeTlvs.NAME, node_data.name), + (NodeTlvs.IP_ADDRESS, node_data.ip_address), + (NodeTlvs.MAC_ADDRESS, node_data.mac_address), + (NodeTlvs.IP6_ADDRESS, node_data.ip6_address), + (NodeTlvs.MODEL, node_data.model), + (NodeTlvs.EMULATION_ID, node_data.emulation_id), + (NodeTlvs.EMULATION_SERVER, node_data.emulation_server), + (NodeTlvs.SESSION, node_data.session), + (NodeTlvs.X_POSITION, node_data.x_position), + (NodeTlvs.Y_POSITION, node_data.y_position), + (NodeTlvs.CANVAS, node_data.canvas), + (NodeTlvs.NETWORK_ID, node_data.network_id), + (NodeTlvs.SERVICES, node_data.services), + (NodeTlvs.LATITUDE, node_data.latitude), + (NodeTlvs.LONGITUDE, node_data.longitude), + (NodeTlvs.ALTITUDE, node_data.altitude), + (NodeTlvs.ICON, node_data.icon), + (NodeTlvs.OPAQUE, node_data.opaque) + ]) + message = coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data) + + try: + self.sendall(message) + except IOError: + logger.exception("error sending node message") + + def handle_broadcast_link(self, link_data): + """ + Callback to handle an link broadcast out from a session. + + :param core.data.LinkData link_data: link data to handle + :return: nothing + """ + logger.info("handling broadcast link: %s", link_data) + + tlv_data = structutils.pack_values(coreapi.CoreLinkTlv, [ + (LinkTlvs.N1_NUMBER, link_data.node1_id), + (LinkTlvs.N2_NUMBER, link_data.node2_id), + (LinkTlvs.DELAY, link_data.delay), + (LinkTlvs.BANDWIDTH, link_data.bandwidth), + (LinkTlvs.PER, link_data.per), + (LinkTlvs.DUP, link_data.dup), + (LinkTlvs.JITTER, link_data.jitter), + (LinkTlvs.MER, link_data.mer), + (LinkTlvs.BURST, link_data.burst), + (LinkTlvs.SESSION, link_data.session), + (LinkTlvs.MBURST, link_data.mburst), + (LinkTlvs.TYPE, link_data.link_type), + (LinkTlvs.GUI_ATTRIBUTES, link_data.gui_attributes), + (LinkTlvs.UNIDIRECTIONAL, link_data.unidirectional), + (LinkTlvs.EMULATION_ID, link_data.emulation_id), + (LinkTlvs.NETWORK_ID, link_data.network_id), + (LinkTlvs.KEY, link_data.key), + (LinkTlvs.INTERFACE1_NUMBER, link_data.interface1_id), + (LinkTlvs.INTERFACE1_NAME, link_data.interface1_name), + (LinkTlvs.INTERFACE1_IP4, link_data.interface1_ip4), + (LinkTlvs.INTERFACE1_IP4_MASK, link_data.interface1_ip4_mask), + (LinkTlvs.INTERFACE1_MAC, link_data.interface1_mac), + (LinkTlvs.INTERFACE1_IP6, link_data.interface1_ip6), + (LinkTlvs.INTERFACE1_IP6_MASK, link_data.interface1_ip6_mask), + (LinkTlvs.INTERFACE2_NUMBER, link_data.interface2_id), + (LinkTlvs.INTERFACE2_NAME, link_data.interface2_name), + (LinkTlvs.INTERFACE2_IP4, link_data.interface2_ip4), + (LinkTlvs.INTERFACE2_IP4_MASK, link_data.interface2_ip4_mask), + (LinkTlvs.INTERFACE2_MAC, link_data.interface2_mac), + (LinkTlvs.INTERFACE2_IP6, link_data.interface2_ip6), + (LinkTlvs.INTERFACE2_IP6_MASK, link_data.interface2_ip6_mask), + (LinkTlvs.OPAQUE, link_data.opaque) + ]) + + message = coreapi.CoreLinkMessage.pack(link_data.message_type, tlv_data) + + try: + self.sendall(message) + except IOError: + logger.exception("error sending Event Message") + + def register(self): + """ + Return a Register Message + + :return: register message data + """ + logger.info("GUI has connected to session %d at %s", self.session.session_id, time.ctime()) + + tlv_data = "" + tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, "core-daemon") + tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EMULATION_SERVER.value, "core-daemon") + + # get config objects for session + for name in self.session.config_objects: + config_type, callback = self.session.config_objects[name] + # type must be in coreapi.reg_tlvs + tlv_data += coreapi.CoreRegisterTlv.pack(config_type, name) + + return coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlv_data) + + def sendall(self, data): + """ + Send raw data to the other end of this TCP connection + using socket"s sendall(). + + :param data: data to send over request socket + :return: data sent + """ + return self.request.sendall(data) + + def receive_message(self): + """ + Receive data and return a CORE API message object. + + :return: received message + :rtype: coreapi.CoreMessage + """ + try: + header = self.request.recv(coreapi.CoreMessage.header_len) + if len(header) > 0: + logger.debug("received message header: %s", utils.hexdump(header)) + except IOError as e: + raise IOError("error receiving header (%s)" % e) + + if len(header) != coreapi.CoreMessage.header_len: + if len(header) == 0: + raise EOFError("client disconnected") + else: + raise IOError("invalid message header size") + + message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header(header) + if message_len == 0: + logger.warn("received message with no data") + + data = "" + while len(data) < message_len: + data += self.request.recv(message_len - len(data)) + logger.debug("received message data: %s" % utils.hexdump(data)) + if len(data) > message_len: + error_message = "received message length does not match received data (%s != %s)" % ( + len(data), message_len) + logger.error(error_message) + raise IOError(error_message) + + try: + message_class = coreapi.CLASS_MAP[message_type] + message = message_class(message_flags, header, data) + except KeyError: + message = coreapi.CoreMessage(message_flags, header, data) + message.message_type = message_type + logger.exception("unimplemented core message type: %s", message.type_str()) + + return message + + def queue_message(self, message): + """ + Queue an API message for later processing. + + :param message: message to queue + :return: nothing + """ + logger.info("queueing msg (queuedtimes = %s): type %s", + message.queuedtimes, MessageTypes(message.message_type)) + self.message_queue.put(message) + + def handler_thread(self): + """ + CORE API message handling loop that is spawned for each server + thread; get CORE API messages from the incoming message queue, + and call handlemsg() for processing. + + :return: nothing + """ + while not self.done: + try: + message = self.message_queue.get(timeout=5) + self.handle_message(message) + except Queue.Empty: + logger.debug("timeout getting message") + + def handle_message(self, message): + """ + Handle an incoming message; dispatch based on message type, + optionally sending replies. + + :return: nothing + """ + if self.session and self.session.broker.handle_message(message): + logger.info("%s forwarding message:\n%s", threading.currentThread().getName(), message) + return + + logger.info("%s handling message:\n%s", threading.currentThread().getName(), message) + + if message.message_type not in self.message_handlers: + logger.warn("no handler for message type: %s", message.type_str()) + return + + message_handler = self.message_handlers[message.message_type] + + try: + replies = message_handler(message) + self.dispatch_replies(replies, message) + except: + logger.exception("%s: exception while handling message: %s", + threading.currentThread().getName(), message) + + # Added to allow the auxiliary handlers to define a different behavior when replying + # to messages from clients + def dispatch_replies(self, replies, message): + """ + Dispatch replies by CORE to message msg previously received from the client. + + :param replies: reply messages to dispatch + :param message: message for replies + :return: nothing + """ + logger.info("replies to dispatch: %s", replies) + for reply in replies: + message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply) + try: + reply_message = coreapi.CLASS_MAP[message_type]( + message_flags, + reply[:coreapi.CoreMessage.header_len], + reply[coreapi.CoreMessage.header_len:] + ) + except KeyError: + # multiple TLVs of same type cause KeyError exception + reply_message = "CoreMessage (type %d flags %d length %d)" % ( + message_type, message_flags, message_length) + + logger.info("%s: reply msg: \n%s", threading.currentThread().getName(), reply_message) + + try: + self.sendall(reply) + except IOError: + logger.exception("Error sending reply data") + + def handle(self): + """ + Handle a new connection request from a client. Dispatch to the + recvmsg() method for receiving data into CORE API messages, and + add them to an incoming message queue. + + :return: nothing + """ + # use port as session id + port = self.request.getpeername()[1] + + logger.info("creating new session for client: %s", port) + self.session = self.server.create_session(session_id=port) + + # TODO: hack to associate this handler with this sessions broker for broadcasting + # TODO: broker needs to be pulled out of session to the server/handler level + self.session.broker.session_handler = self + # self.session.connect(self) + + # add handlers for various data + logger.info("adding session broadcast handlers") + self.session.event_handlers.append(self.handle_broadcast_event) + self.session.exception_handlers.append(self.handle_broadcast_exception) + self.session.node_handlers.append(self.handle_broadcast_node) + self.session.link_handlers.append(self.handle_broadcast_link) + self.session.file_handlers.append(self.handle_broadcast_file) + + # set initial session state + self.session.set_state(state=EventTypes.DEFINITION_STATE.value) + + while True: + try: + message = self.receive_message() + except (IOError, EOFError): + logger.exception("error receiving message") + break + + message.queuedtimes = 0 + self.queue_message(message) + + if message.message_type == MessageTypes.SESSION.value: + # delay is required for brief connections, allow session joining + time.sleep(0.125) + + # TODO: do we really want to broadcast node and link messages from a client to other clients? + # self.session.broadcast(self, message) + + def handle_node_message(self, message): + """ + Node Message handler + + :param coreapi.CoreNodeMessage message: node message + :return: replies to node message + """ + replies = [] + if message.flags & MessageFlags.ADD.value and message.flags & MessageFlags.DELETE.value: + logger.warn("ignoring invalid message: add and delete flag both set") + return () + + node_id = message.tlv_data[NodeTlvs.NUMBER.value] + x_position = message.get_tlv(NodeTlvs.X_POSITION.value) + y_position = message.get_tlv(NodeTlvs.Y_POSITION.value) + canvas = message.get_tlv(NodeTlvs.CANVAS.value) + icon = message.get_tlv(NodeTlvs.ICON.value) + lat = message.get_tlv(NodeTlvs.LATITUDE.value) + lng = message.get_tlv(NodeTlvs.LONGITUDE.value) + alt = message.get_tlv(NodeTlvs.ALTITUDE.value) + + if x_position is None and y_position is None and \ + lat is not None and lng is not None and alt is not None: + x, y, z = self.session.location.getxyz(float(lat), float(lng), float(alt)) + x_position = int(x) + y_position = int(y) + + # GUI can"t handle lat/long, so generate another X/Y position message + node_data = NodeData( + id=node_id, + x_position=x_position, + y_position=y_position + ) + + self.session.broadcast_node(node_data) + + if message.flags & MessageFlags.ADD.value: + node_type = message.tlv_data[NodeTlvs.TYPE.value] + try: + node_class = nodeutils.get_node_class(NodeTypes(node_type)) + except KeyError: + try: + node_type_str = " (%s)" % NodeTypes(node_type).name + except KeyError: + node_type_str = "" + + logger.warn("warning: unimplemented node type: %s%s" % (node_type, node_type_str)) + return () + + start = False + if self.session.state > EventTypes.DEFINITION_STATE.value: + start = True + + node_name = message.tlv_data[NodeTlvs.NAME.value] + model = message.get_tlv(NodeTlvs.MODEL.value) + class_args = {"start": start} + + if node_type == NodeTypes.XEN.value: + class_args["model"] = model + + if node_type == NodeTypes.RJ45.value and hasattr( + self.session.options, "enablerj45") and self.session.options.enablerj45 == "0": + class_args["start"] = False + + # this instantiates an object of class nodecls, + # creating the node or network + node = self.session.add_object(cls=node_class, objid=node_id, name=node_name, **class_args) + if x_position is not None and y_position is not None: + node.setposition(x_position, y_position, None) + if canvas is not None: + node.canvas = canvas + if icon is not None: + node.icon = icon + opaque = message.get_tlv(NodeTlvs.OPAQUE.value) + if opaque is not None: + node.opaque = opaque + + # add services to a node, either from its services TLV or + # through the configured defaults for this node type + if node_type in [NodeTypes.DEFAULT.value, NodeTypes.PHYSICAL.value, NodeTypes.XEN.value]: + if model is None: + # TODO: default model from conf file? + model = "router" + node.type = model + services_str = message.get_tlv(NodeTlvs.SERVICES.value) + self.session.services.addservicestonode(node, model, services_str) + + # boot nodes if they are added after runtime (like + # session.bootnodes()) + if self.session.state == EventTypes.RUNTIME_STATE.value: + if isinstance(node, nodes.PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45): + self.session.write_objects() + self.session.add_remove_control_interface(node=node, remove=False) + node.boot() + + if message.flags & MessageFlags.STRING.value: + self.node_status_request[node_id] = True + self.send_node_emulation_id(node_id) + elif message.flags & MessageFlags.STRING.value: + self.node_status_request[node_id] = True + + elif message.flags & MessageFlags.DELETE.value: + with self._shutdown_lock: + self.session.delete_object(node_id) + + if message.flags & MessageFlags.STRING.value: + tlvdata = "" + tlvdata += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id) + flags = MessageFlags.DELETE.value | MessageFlags.LOCAL.value + replies.append(coreapi.CoreNodeMessage.pack(flags, tlvdata)) + + self.session.check_shutdown() + # Node modify message (no add/del flag) + else: + try: + node = self.session.get_object(node_id) + + if x_position is not None and y_position is not None: + node.setposition(x_position, y_position, None) + + if canvas is not None: + node.canvas = canvas + + if icon is not None: + node.icon = icon + except KeyError: + logger.exception("ignoring node message: unknown node number %s", node_id) + + return replies + + def handle_link_message(self, message): + """ + Link Message handler + + :param coreapi.CoreLinkMessage message: link message to handle + :return: link message replies + """ + # get node classes + ptp_class = nodeutils.get_node_class(NodeTypes.PEER_TO_PEER) + + node_num1 = message.get_tlv(LinkTlvs.N1_NUMBER.value) + interface_index1 = message.get_tlv(LinkTlvs.INTERFACE1_NUMBER.value) + ipv41 = message.get_tlv(LinkTlvs.INTERFACE1_IP4.value) + ipv4_mask1 = message.get_tlv(LinkTlvs.INTERFACE1_IP4_MASK.value) + mac1 = message.get_tlv(LinkTlvs.INTERFACE1_MAC.value) + ipv61 = message.get_tlv(LinkTlvs.INTERFACE1_IP6.value) + ipv6_mask1 = message.get_tlv(LinkTlvs.INTERFACE1_IP6_MASK.value) + interface_name1 = message.get_tlv(LinkTlvs.INTERFACE1_NAME.value) + + node_num2 = message.get_tlv(LinkTlvs.N2_NUMBER.value) + interface_index2 = message.get_tlv(LinkTlvs.INTERFACE2_NUMBER.value) + ipv42 = message.get_tlv(LinkTlvs.INTERFACE2_IP4.value) + ipv4_mask2 = message.get_tlv(LinkTlvs.INTERFACE2_IP4_MASK.value) + mac2 = message.get_tlv(LinkTlvs.INTERFACE2_MAC.value) + ipv62 = message.get_tlv(LinkTlvs.INTERFACE2_IP6.value) + ipv6_mask2 = message.get_tlv(LinkTlvs.INTERFACE2_IP6_MASK.value) + interface_name2 = message.get_tlv(LinkTlvs.INTERFACE2_NAME.value) + + node1 = None + node2 = None + net = None + net2 = None + + unidirectional_value = message.get_tlv(LinkTlvs.UNIDIRECTIONAL.value) + if unidirectional_value == 1: + unidirectional = True + else: + unidirectional = False + + # one of the nodes may exist on a remote server + if node_num1 is not None and node_num2 is not None: + tunnel = self.session.broker.gettunnel(node_num1, node_num2) + if isinstance(tunnel, coreobj.PyCoreNet): + net = tunnel + if tunnel.remotenum == node_num1: + node_num1 = None + else: + node_num2 = None + # PhysicalNode connected via GreTap tunnel; uses adoptnetif() below + elif tunnel is not None: + if tunnel.remotenum == node_num1: + node_num1 = None + else: + node_num2 = None + + if node_num1 is not None: + try: + n = self.session.get_object(node_num1) + except KeyError: + # XXX wait and queue this message to try again later + # XXX maybe this should be done differently + time.sleep(0.125) + self.queue_message(message) + return () + if isinstance(n, nodes.PyCoreNode): + node1 = n + elif isinstance(n, coreobj.PyCoreNet): + if net is None: + net = n + else: + net2 = n + else: + raise ValueError("unexpected object class: %s" % n) + + if node_num2 is not None: + try: + n = self.session.get_object(node_num2) + except KeyError: + # XXX wait and queue this message to try again later + # XXX maybe this should be done differently + time.sleep(0.125) + self.queue_message(message) + return () + if isinstance(n, nodes.PyCoreNode): + node2 = n + elif isinstance(n, coreobj.PyCoreNet): + if net is None: + net = n + else: + net2 = n + else: + raise ValueError("unexpected object class: %s" % n) + + link_msg_type = message.get_tlv(LinkTlvs.TYPE.value) + + if node1: + node1.lock.acquire() + if node2: + node2.lock.acquire() + + try: + if link_msg_type == LinkTypes.WIRELESS.value: + """ + Wireless link/unlink event + """ + numwlan = 0 + objs = [node1, node2, net, net2] + objs = filter(lambda (x): x is not None, objs) + if len(objs) < 2: + raise ValueError("wireless link/unlink message between unknown objects") + + nets = objs[0].commonnets(objs[1]) + for netcommon, netif1, netif2 in nets: + if not nodeutils.is_node(netcommon, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): + continue + if message.flags & MessageFlags.ADD.value: + netcommon.link(netif1, netif2) + elif message.flags & MessageFlags.DELETE.value: + netcommon.unlink(netif1, netif2) + else: + raise ValueError("invalid flags for wireless link/unlink message") + numwlan += 1 + if numwlan == 0: + raise ValueError("no common network found for wireless link/unlink") + + elif message.flags & MessageFlags.ADD.value: + """ + Add a new link. + """ + start = False + if self.session.state > EventTypes.DEFINITION_STATE.value: + start = True + + if node1 and node2 and not net: + # a new wired link + net = self.session.add_object(cls=ptp_class, start=start) + + bw = message.get_tlv(LinkTlvs.BANDWIDTH.value) + delay = message.get_tlv(LinkTlvs.DELAY.value) + loss = message.get_tlv(LinkTlvs.PER.value) + duplicate = message.get_tlv(LinkTlvs.DUP.value) + jitter = message.get_tlv(LinkTlvs.JITTER.value) + key = message.get_tlv(LinkTlvs.KEY.value) + + netaddrlist = [] + # print " n1=%s n2=%s net=%s net2=%s" % (node1, node2, net, net2) + if node1 and net: + addrlist = [] + if ipv41 is not None and ipv4_mask1 is not None: + addrlist.append("%s/%s" % (ipv41, ipv4_mask1)) + if ipv61 is not None and ipv6_mask1 is not None: + addrlist.append("%s/%s" % (ipv61, ipv6_mask1)) + if ipv42 is not None and ipv4_mask2 is not None: + netaddrlist.append("%s/%s" % (ipv42, ipv4_mask2)) + if ipv62 is not None and ipv6_mask2 is not None: + netaddrlist.append("%s/%s" % (ipv62, ipv6_mask2)) + interface_index1 = node1.newnetif( + net, addrlist=addrlist, + hwaddr=mac1, ifindex=interface_index1, ifname=interface_name1 + ) + net.linkconfig( + node1.netif(interface_index1, net), bw=bw, + delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter + ) + if node1 is None and net: + if ipv41 is not None and ipv4_mask1 is not None: + netaddrlist.append("%s/%s" % (ipv41, ipv4_mask1)) + # don"t add this address again if node2 and net + ipv41 = None + if ipv61 is not None and ipv6_mask1 is not None: + netaddrlist.append("%s/%s" % (ipv61, ipv6_mask1)) + # don"t add this address again if node2 and net + ipv61 = None + if node2 and net: + addrlist = [] + if ipv42 is not None and ipv4_mask2 is not None: + addrlist.append("%s/%s" % (ipv42, ipv4_mask2)) + if ipv62 is not None and ipv6_mask2 is not None: + addrlist.append("%s/%s" % (ipv62, ipv6_mask2)) + if ipv41 is not None and ipv4_mask1 is not None: + netaddrlist.append("%s/%s" % (ipv41, ipv4_mask1)) + if ipv61 is not None and ipv6_mask1 is not None: + netaddrlist.append("%s/%s" % (ipv61, ipv6_mask1)) + interface_index2 = node2.newnetif( + net, addrlist=addrlist, + hwaddr=mac2, ifindex=interface_index2, ifname=interface_name2 + ) + if not unidirectional: + net.linkconfig( + node2.netif(interface_index2, net), bw=bw, + delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter + ) + if node2 is None and net2: + if ipv42 is not None and ipv4_mask2 is not None: + netaddrlist.append("%s/%s" % (ipv42, ipv4_mask2)) + if ipv62 is not None and ipv6_mask2 is not None: + netaddrlist.append("%s/%s" % (ipv62, ipv6_mask2)) + + # tunnel node finalized with this link message + if key and nodeutils.is_node(net, NodeTypes.TUNNEL): + net.setkey(key) + if len(netaddrlist) > 0: + net.addrconfig(netaddrlist) + if key and nodeutils.is_node(net2, NodeTypes.TUNNEL): + net2.setkey(key) + if len(netaddrlist) > 0: + net2.addrconfig(netaddrlist) + + if net and net2: + # two layer-2 networks linked together + if nodeutils.is_node(net2, NodeTypes.RJ45): + # RJ45 nodes have different linknet() + netif = net2.linknet(net) + else: + netif = net.linknet(net2) + net.linkconfig(netif, bw=bw, delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter) + if not unidirectional: + netif.swapparams("_params_up") + net2.linkconfig(netif, bw=bw, delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter, + devname=netif.name) + netif.swapparams("_params_up") + elif net is None and net2 is None and (node1 is None or node2 is None): + # apply address/parameters to PhysicalNodes + fx = (bw, delay, loss, duplicate, jitter) + addrlist = [] + if node1 and nodeutils.is_node(node1, NodeTypes.PHYSICAL): + if ipv41 is not None and ipv4_mask1 is not None: + addrlist.append("%s/%s" % (ipv41, ipv4_mask1)) + if ipv61 is not None and ipv6_mask1 is not None: + addrlist.append("%s/%s" % (ipv61, ipv6_mask1)) + node1.adoptnetif(tunnel, interface_index1, mac1, addrlist) + node1.linkconfig(tunnel, bw, delay, loss, duplicate, jitter) + elif node2 and nodeutils.is_node(node2, NodeTypes.PHYSICAL): + if ipv42 is not None and ipv4_mask2 is not None: + addrlist.append("%s/%s" % (ipv42, ipv4_mask2)) + if ipv62 is not None and ipv6_mask2 is not None: + addrlist.append("%s/%s" % (ipv62, ipv6_mask2)) + node2.adoptnetif(tunnel, interface_index2, mac2, addrlist) + node2.linkconfig(tunnel, bw, delay, loss, duplicate, jitter) + # delete a link + elif message.flags & MessageFlags.DELETE.value: + """ + Remove a link. + """ + if node1 and node2: + # TODO: fix this for the case where ifindex[1,2] are + # not specified + # a wired unlink event, delete the connecting bridge + netif1 = node1.netif(interface_index1) + netif2 = node2.netif(interface_index2) + if netif1 is None and netif2 is None: + nets = node1.commonnets(node2) + for netcommon, tmp1, tmp2 in nets: + if (net and netcommon == net) or net is None: + netif1 = tmp1 + netif2 = tmp2 + break + if netif1 is None or netif2 is None: + pass + elif netif1.net or netif2.net: + if netif1.net != netif2.net: + if not netif1.up or not netif2.up: + pass + else: + raise ValueError("no common network found") + net = netif1.net + netif1.detachnet() + netif2.detachnet() + if net.numnetif() == 0: + self.session.delete_object(net.objid) + node1.delnetif(interface_index1) + node2.delnetif(interface_index2) + else: + """ + Modify a link. + """ + bw = message.get_tlv(LinkTlvs.BANDWIDTH.value) + delay = message.get_tlv(LinkTlvs.DELAY.value) + loss = message.get_tlv(LinkTlvs.PER.value) + duplicate = message.get_tlv(LinkTlvs.DUP.value) + jitter = message.get_tlv(LinkTlvs.JITTER.value) + numnet = 0 + # TODO: clean up all this logic. Having the add flag or not + # should use the same code block. + if node1 is None and node2 is None: + if net and net2: + # modify link between nets + netif = net.getlinknetif(net2) + upstream = False + if netif is None: + upstream = True + netif = net2.getlinknetif(net) + if netif is None: + raise ValueError, "modify unknown link between nets" + if upstream: + netif.swapparams("_params_up") + net.linkconfig(netif, bw=bw, delay=delay, + loss=loss, duplicate=duplicate, + jitter=jitter, devname=netif.name) + netif.swapparams("_params_up") + else: + net.linkconfig(netif, bw=bw, delay=delay, + loss=loss, duplicate=duplicate, + jitter=jitter) + if not unidirectional: + if upstream: + net2.linkconfig(netif, bw=bw, delay=delay, + loss=loss, + duplicate=duplicate, + jitter=jitter) + else: + netif.swapparams("_params_up") + net2.linkconfig(netif, bw=bw, delay=delay, + loss=loss, + duplicate=duplicate, + jitter=jitter, + devname=netif.name) + netif.swapparams("_params_up") + else: + raise ValueError("modify link for unknown nodes") + elif node1 is None: + # node1 = layer 2node, node2 = layer3 node + net.linkconfig(node2.netif(interface_index2, net), bw=bw, + delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter) + elif node2 is None: + # node2 = layer 2node, node1 = layer3 node + net.linkconfig(node1.netif(interface_index1, net), bw=bw, + delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter) + else: + nets = node1.commonnets(node2) + for net, netif1, netif2 in nets: + if interface_index1 is not None and interface_index1 != node1.getifindex(netif1): + continue + net.linkconfig(netif1, bw=bw, delay=delay, + loss=loss, duplicate=duplicate, + jitter=jitter, netif2=netif2) + if not unidirectional: + net.linkconfig(netif2, bw=bw, delay=delay, + loss=loss, duplicate=duplicate, + jitter=jitter, netif2=netif1) + numnet += 1 + if numnet == 0: + raise ValueError("no common network found") + finally: + if node1: + node1.lock.release() + if node2: + node2.lock.release() + + return () + + def handle_execute_message(self, message): + """ + Execute Message handler + + :param coreapi.CoreExecMessage message: execute message to handle + :return: reply messages + """ + node_num = message.get_tlv(ExecuteTlvs.NODE.value) + execute_num = message.get_tlv(ExecuteTlvs.NUMBER.value) + execute_time = message.get_tlv(ExecuteTlvs.TIME.value) + command = message.get_tlv(ExecuteTlvs.COMMAND.value) + + # local flag indicates command executed locally, not on a node + if node_num is None and not message.flags & MessageFlags.LOCAL.value: + raise ValueError("Execute Message is missing node number.") + + if execute_num is None: + raise ValueError("Execute Message is missing execution number.") + + if execute_time is not None: + self.session.add_event(execute_time, node=node_num, name=None, data=command) + return () + + try: + node = self.session.get_object(node_num) + + # build common TLV items for reply + tlv_data = "" + if node_num is not None: + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node_num) + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, execute_num) + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command) + + if message.flags & MessageFlags.TTY.value: + if node_num is None: + raise NotImplementedError + # echo back exec message with cmd for spawning interactive terminal + if command == "bash": + command = "/bin/bash" + res = node.termcmdstring(command) + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) + reply = coreapi.CoreExecMessage.pack(MessageFlags.TTY.value, tlv_data) + return reply, + else: + logger.info("execute message with cmd=%s", command) + # execute command and send a response + if message.flags & MessageFlags.STRING.value or message.flags & MessageFlags.TEXT.value: + # shlex.split() handles quotes within the string + if message.flags & MessageFlags.LOCAL.value: + status, res = utils.cmdresult(shlex.split(command)) + else: + status, res = node.cmdresult(shlex.split(command)) + logger.info("done exec cmd=%s with status=%d res=(%d bytes)", command, status, len(res)) + if message.flags & MessageFlags.TEXT.value: + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) + if message.flags & MessageFlags.STRING.value: + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.STATUS.value, status) + reply = coreapi.CoreExecMessage.pack(0, tlv_data) + return reply, + # execute the command with no response + else: + if message.flags & MessageFlags.LOCAL.value: + utils.mutedetach(shlex.split(command)) + else: + node.cmd(shlex.split(command), wait=False) + except KeyError: + logger.exception("error getting object: %s", node_num) + # XXX wait and queue this message to try again later + # XXX maybe this should be done differently + if not message.flags & MessageFlags.LOCAL.value: + time.sleep(0.125) + self.queue_message(message) + + return () + + def handle_register_message(self, message): + """ + Register Message Handler + + :param coreapi.CoreRegMessage message: register message to handle + :return: reply messages + """ + replies = [] + + # execute a Python script or XML file + execute_server = message.get_tlv(RegisterTlvs.EXECUTE_SERVER.value) + if execute_server: + try: + logger.info("executing: %s", execute_server) + if not isinstance(self.server, CoreServer): # CoreUdpServer): + server = self.server.mainserver + # elif isinstance(self.server, CoreAuxServer): + # server = self.server.mainserver + else: + server = self.server + if message.flags & MessageFlags.STRING.value: + old_session_ids = set(server.get_session_ids()) + sys.argv = shlex.split(execute_server) + file_name = sys.argv[0] + if os.path.splitext(file_name)[1].lower() == ".xml": + session = server.create_session() + try: + open_session_xml(session, file_name, start=True) + except: + session.shutdown() + server.remove_session(session) + raise + else: + thread = threading.Thread( + target=execfile, + args=(file_name, {"__file__": file_name, "server": server}) + ) + thread.daemon = True + thread.start() + # allow time for session creation + time.sleep(0.25) + if message.flags & MessageFlags.STRING.value: + new_session_ids = set(server.get_session_ids()) + new_sid = new_session_ids.difference(old_session_ids) + try: + sid = new_sid.pop() + logger.info("executed: %s as session %d", execute_server, sid) + except KeyError: + logger.info("executed %s with unknown session ID", execute_server) + return replies + logger.info("checking session %d for RUNTIME state" % sid) + session = self.server.get_session(session_id=sid) + retries = 10 + # wait for session to enter RUNTIME state, to prevent GUI from + # connecting while nodes are still being instantiated + while session.state != EventTypes.RUNTIME_STATE.value: + logger.info("waiting for session %d to enter RUNTIME state" % sid) + time.sleep(1) + retries -= 1 + if retries <= 0: + logger.info("session %d did not enter RUNTIME state" % sid) + return replies + tlv_data = coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, execute_server) + tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.SESSION.value, "%s" % sid) + message = coreapi.CoreRegMessage.pack(0, tlv_data) + replies.append(message) + except Exception as e: + logger.exception("error executing: %s", execute_server) + tlv_data = coreapi.CoreExceptionTlv.pack(ExceptionTlvs.LEVEL.value, 2) + tlv_data += coreapi.CoreExceptionTlv.pack(ExceptionTlvs.TEXT.value, str(e)) + message = coreapi.CoreExceptionMessage.pack(0, tlv_data) + replies.append(message) + + return replies + + gui = message.get_tlv(RegisterTlvs.GUI.value) + if gui is None: + logger.info("ignoring Register message") + else: + # register capabilities with the GUI + self.master = True + + # TODO: need to replicate functionality? + # self.server.set_session_master(self) + replies.append(self.register()) + replies.append(self.server.to_session_message()) + + return replies + + def handle_config_message(self, message): + """ + Configuration Message handler + + :param coreapi.CoreConfMessage message: configuration message to handle + :return: reply messages + """ + # convert config message to standard config data object + config_data = ConfigData( + node=message.get_tlv(ConfigTlvs.NODE.value), + object=message.get_tlv(ConfigTlvs.OBJECT.value), + type=message.get_tlv(ConfigTlvs.TYPE.value), + data_types=message.get_tlv(ConfigTlvs.DATA_TYPES.value), + data_values=message.get_tlv(ConfigTlvs.VALUES.value), + captions=message.get_tlv(ConfigTlvs.CAPTIONS.value), + bitmap=message.get_tlv(ConfigTlvs.BITMAP.value), + possible_values=message.get_tlv(ConfigTlvs.POSSIBLE_VALUES.value), + groups=message.get_tlv(ConfigTlvs.GROUPS.value), + session=message.get_tlv(ConfigTlvs.SESSION.value), + interface_number=message.get_tlv(ConfigTlvs.INTERFACE_NUMBER.value), + network_id=message.get_tlv(ConfigTlvs.NETWORK_ID.value), + opaque=message.get_tlv(ConfigTlvs.OPAQUE.value) + ) + logger.info("Configuration message for %s node %s", config_data.object, config_data.node) + + # dispatch to any registered callback for this object type + replies = self.session.config_object(config_data) + + for reply in replies: + self.handle_broadcast_config(reply) + + return [] + + def handle_file_message(self, message): + """ + File Message handler + + :param coreapi.CoreFileMessage message: file message to handle + :return: reply messages + """ + if message.flags & MessageFlags.ADD.value: + node_num = message.get_tlv(NodeTlvs.NUMBER.value) + file_name = message.get_tlv(FileTlvs.NAME.value) + file_type = message.get_tlv(FileTlvs.TYPE.value) + source_name = message.get_tlv(FileTlvs.SOURCE_NAME.value) + data = message.get_tlv(FileTlvs.DATA.value) + compressed_data = message.get_tlv(FileTlvs.COMPRESSED_DATA.value) + + if compressed_data: + logger.warn("Compressed file data not implemented for File message.") + return () + + if source_name and data: + logger.warn("ignoring invalid File message: source and data TLVs are both present") + return () + + # some File Messages store custom files in services, + # prior to node creation + if file_type is not None: + if file_type[:8] == "service:": + self.session.services.setservicefile(node_num, file_type, file_name, source_name, data) + return () + elif file_type[:5] == "hook:": + self.session.set_hook(file_type, file_name, source_name, data) + return () + + # writing a file to the host + if node_num is None: + if source_name is not None: + shutil.copy2(source_name, file_name) + else: + with open(file_name, "w") as open_file: + open_file.write(data) + return () + + try: + node = self.session.get_object(node_num) + if source_name is not None: + node.addfile(source_name, file_name) + elif data is not None: + node.nodefile(file_name, data) + except KeyError: + # XXX wait and queue this message to try again later + # XXX maybe this should be done differently + logger.warn("File message for %s for node number %s queued." % (file_name, node_num)) + time.sleep(0.125) + self.queue_message(message) + return () + else: + raise NotImplementedError + + return () + + def handle_interface_message(self, message): + """ + Interface Message handler. + + :param message: interface message to handle + :return: reply messages + """ + logger.info("ignoring Interface message") + return () + + def handle_event_message(self, message): + """ + Event Message handler + + :param coreapi.CoreEventMessage message: event message to handle + :return: reply messages + """ + event_data = EventData( + node=message.get_tlv(EventTlvs.NODE.value), + event_type=message.get_tlv(EventTlvs.TYPE.value), + name=message.get_tlv(EventTlvs.NAME.value), + data=message.get_tlv(EventTlvs.DATA.value), + time=message.get_tlv(EventTlvs.TIME.value), + session=message.get_tlv(EventTlvs.SESSION.value) + ) + + event_type = event_data.event_type + if event_type is None: + raise NotImplementedError("Event message missing event type") + node_id = event_data.node + + logger.info("EVENT %d: %s at %s", event_type, EventTypes(event_type).name, time.ctime()) + if event_type <= EventTypes.SHUTDOWN_STATE.value: + if node_id is not None: + try: + node = self.session.get_object(node_id) + except KeyError: + raise KeyError("Event message for unknown node %d" % node_id) + + # configure mobility models for WLAN added during runtime + if event_type == EventTypes.INSTANTIATION_STATE.value and nodeutils.is_node(node, + NodeTypes.WIRELESS_LAN): + self.session.mobility.startup(node_ids=(node.objid,)) + return () + + logger.warn("dropping unhandled Event message with node number") + return () + self.session.set_state(state=event_type) + + if event_type == EventTypes.DEFINITION_STATE.value: + # clear all session objects in order to receive new definitions + self.session.delete_objects() + self.session.del_hooks() + self.session.broker.reset() + elif event_type == EventTypes.CONFIGURATION_STATE.value: + pass + elif event_type == EventTypes.INSTANTIATION_STATE.value: + if len(self.handler_threads) > 1: + # TODO: sync handler threads here before continuing + time.sleep(2.0) # XXX + # done receiving node/link configuration, ready to instantiate + self.session.instantiate() + + # after booting nodes attempt to send emulation id for nodes waiting on status + for obj in self.session.objects.itervalues(): + self.send_node_emulation_id(obj.objid) + elif event_type == EventTypes.RUNTIME_STATE.value: + if self.session.master: + logger.warn("Unexpected event message: RUNTIME state received at session master") + else: + # master event queue is started in session.checkruntime() + self.session.event_loop.run() + elif event_type == EventTypes.DATACOLLECT_STATE.value: + self.session.data_collect() + elif event_type == EventTypes.SHUTDOWN_STATE.value: + if self.session.master: + logger.warn("Unexpected event message: SHUTDOWN state received at session master") + elif event_type in (EventTypes.START.value, EventTypes.STOP.value, + EventTypes.RESTART.value, + EventTypes.PAUSE.value, + EventTypes.RECONFIGURE.value): + handled = False + name = event_data.name + if name: + # TODO: register system for event message handlers, + # like confobjs + if name.startswith("service:"): + self.session.services.handleevent(event_data) + handled = True + elif name.startswith("mobility:"): + self.session.mobility.handleevent(event_data) + handled = True + if not handled: + logger.warn("Unhandled event message: event type %s (%s)", + event_type, coreapi.state_name(event_type)) + elif event_type == EventTypes.FILE_OPEN.value: + self.session.delete_objects() + self.session.del_hooks() + self.session.broker.reset() + filename = event_data.name + open_session_xml(self.session, filename) + + # trigger session to send out messages out itself + self.session.send_objects() + return () + elif event_type == EventTypes.FILE_SAVE.value: + filename = event_data.name + save_session_xml(self.session, filename, self.session.config["xmlfilever"]) + elif event_type == EventTypes.SCHEDULED.value: + etime = event_data.time + node = event_data.node + name = event_data.name + data = event_data.data + if etime is None: + logger.warn("Event message scheduled event missing start time") + return () + if message.flags & MessageFlags.ADD.value: + self.session.add_event(float(etime), node=node, name=name, data=data) + else: + raise NotImplementedError + else: + logger.warn("Unhandled event message: event type %d", event_type) + + return () + + def handle_session_message(self, message): + """ + Session Message handler + + :param coreapi.CoreSessionMessage message: session message to handle + :return: reply messages + """ + session_id_str = message.get_tlv(SessionTlvs.NUMBER.value) + name_str = message.get_tlv(SessionTlvs.NAME.value) + file_str = message.get_tlv(SessionTlvs.FILE.value) + node_count_str = message.get_tlv(SessionTlvs.NODE_COUNT.value) + thumb = message.get_tlv(SessionTlvs.THUMB.value) + user = message.get_tlv(SessionTlvs.USER.value) + session_ids = coreapi.str_to_list(session_id_str) + names = coreapi.str_to_list(name_str) + files = coreapi.str_to_list(file_str) + node_counts = coreapi.str_to_list(node_count_str) + logger.info("SESSION message flags=0x%x sessions=%s" % (message.flags, session_id_str)) + + if message.flags == 0: + # modify a session + i = 0 + for session_id in session_ids: + session_id = int(session_id) + if session_id == 0: + session = self.session + else: + session = self.server.get_session(session_id=session_id) + + if session is None: + logger.info("session %s not found", session_id) + i += 1 + continue + + logger.info("request to modify to session %s", session.session_id) + if names is not None: + session.name = names[i] + if files is not None: + session.file_name = files[i] + if node_counts is not None: + pass + # session.node_count = ncs[i] + if thumb is not None: + session.set_thumbnail(thumb) + if user is not None: + session.set_user(user) + i += 1 + else: + if message.flags & MessageFlags.STRING.value and not message.flags & MessageFlags.ADD.value: + # status request flag: send list of sessions + return self.server.to_session_message(), + + # handle ADD or DEL flags + for session_id in session_ids: + session_id = int(session_id) + session = self.server.get_session(session_id=session_id) + + if session is None: + logger.info("session %s not found (flags=0x%x)", session_id, message.flags) + continue + + if session.server is None: + # this needs to be set when executing a Python script + session.server = self.server + + if message.flags & MessageFlags.ADD.value: + # connect to the first session that exists + logger.info("request to connect to session %s" % session_id) + # this may shutdown the session if no handlers exist + # TODO: determine what we want to do here + self.session.disconnect(self) + self.session = session + self.session.connect(self) + + if user is not None: + self.session.set_user(user) + + if message.flags & MessageFlags.STRING.value: + self.session.send_objects() + elif message.flags & MessageFlags.DELETE.value: + # shut down the specified session(s) + logger.info("request to terminate session %s" % session_id) + session.set_state(state=EventTypes.DATACOLLECT_STATE.value, send_event=True) + session.set_state(state=EventTypes.SHUTDOWN_STATE.value, send_event=True) + session.shutdown() + else: + logger.warn("unhandled session flags for session %s", session_id) + + return () + + def send_node_emulation_id(self, node_id): + """ + Node emulation id to send. + + :param int node_id: node id to send + :return: nothing + """ + if node_id in self.node_status_request: + tlv_data = "" + tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id) + tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.EMULATION_ID.value, node_id) + reply = coreapi.CoreNodeMessage.pack(MessageFlags.ADD.value | MessageFlags.LOCAL.value, tlv_data) + + try: + self.sendall(reply) + except IOError: + logger.exception("error sending node emulation id message: %s", node_id) + + del self.node_status_request[node_id] + + +class CoreDatagramRequestHandler(CoreRequestHandler): + """ + A child of the CoreRequestHandler class for handling connectionless + UDP messages. No new session is created; messages are handled immediately or + sometimes queued on existing session handlers. + """ + + def __init__(self, request, client_address, server): + """ + Create a CoreDatagramRequestHandler instance. + + :param request: request object + :param str client_address: client address + :param CoreServer server: core server instance + """ + # TODO: decide which messages cannot be handled with connectionless UDP + self.message_handlers = { + MessageTypes.NODE.value: self.handle_node_message, + MessageTypes.LINK.value: self.handle_link_message, + MessageTypes.EXECUTE.value: self.handle_execute_message, + MessageTypes.REGISTER.value: self.handle_register_message, + MessageTypes.CONFIG.value: self.handle_config_message, + MessageTypes.FILE.value: self.handle_file_message, + MessageTypes.INTERFACE.value: self.handle_interface_message, + MessageTypes.EVENT.value: self.handle_event_message, + MessageTypes.SESSION.value: self.handle_session_message, + } + self.node_status_request = {} + self.master = False + self.session = None + SocketServer.BaseRequestHandler.__init__(self, request, client_address, server) + + def setup(self): + """ + Client has connected, set up a new connection. + + :return: nothing + """ + logger.info("new UDP connection: %s:%s" % self.client_address) + + def handle(self): + """ + Receive a message. + + :return: nothing + """ + self.receive_message() + + def finish(self): + """ + Handle the finish state of a client. + + :return: nothing + """ + return SocketServer.BaseRequestHandler.finish(self) + + def receive_message(self): + """ + Receive data, parse a CoreMessage and queue it onto an existing + session handler"s queue, if available. + + :return: nothing + """ + data = self.request[0] + sock = self.request[1] + + header = data[:coreapi.CoreMessage.header_len] + if len(header) < coreapi.CoreMessage.header_len: + raise IOError("error receiving header (received %d bytes)" % len(header)) + message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header(header) + if message_len == 0: + logger.warn("received message with no data") + return + + if len(data) != coreapi.CoreMessage.header_len + message_len: + logger.warn("received message length does not match received data (%s != %s)", + len(data), coreapi.CoreMessage.header_len + message_len) + raise IOError + else: + logger.info("UDP socket received message type=%d len=%d", message_type, message_len) + + try: + message_class = coreapi.CLASS_MAP[message_type] + message = message_class(message_flags, header, data[coreapi.CoreMessage.header_len:]) + except KeyError: + message = coreapi.CoreMessage(message_flags, header, data[coreapi.CoreMessage.header_len:]) + message.message_type = message_type + logger.warn("unimplemented core message type: %s" % message.type_str()) + return + + session_ids = message.session_numbers() + message.queuedtimes = 0 + # logger.info("UDP message has session numbers: %s" % sids) + + if len(session_ids) > 0: + for session_id in session_ids: + session = self.server.mainserver.get_session(session_id=session_id) + if session: + self.session = session + session.broadcast(self, message) + self.handle_message(message) + else: + logger.warn("Session %d in %s message not found." % (session_id, message.type_str())) + else: + # no session specified, find an existing one + session = self.server.mainserver.get_session() + if session or message.message_type == MessageTypes.REGISTER.value: + self.session = session + if session: + session.broadcast(self, message) + self.handle_message(message) + else: + logger.warn("No active session, dropping %s message.", message.type_str()) + + def queue_message(self, message): + """ + UDP handlers are short-lived and do not have message queues. + + :return: nothing + """ + raise Exception("Unable to queue %s message for later processing using UDP!" % message.type_str()) + + def sendall(self, data): + """ + Use sendto() on the connectionless UDP socket. + + :return: nothing + """ + self.request[1].sendto(data, self.client_address) + + +class BaseAuxRequestHandler(CoreRequestHandler): + """ + This is the superclass for auxiliary handlers in CORE. A concrete auxiliary handler class + must, at a minimum, define the recvmsg(), sendall(), and dispatchreplies() methods. + See SockerServer.BaseRequestHandler for parameter details. + """ + + def __init__(self, request, client_address, server): + """ + Create a BaseAuxRequestHandler instance. + + :param request: request client + :param str client_address: client address + :param CoreServer server: core server instance + """ + self.message_handlers = { + MessageTypes.NODE.value: self.handle_node_message, + MessageTypes.LINK.value: self.handle_link_message, + MessageTypes.EXECUTE.value: self.handle_execute_message, + MessageTypes.REGISTER.value: self.handle_register_message, + MessageTypes.CONFIG.value: self.handle_config_message, + MessageTypes.FILE.value: self.handle_file_message, + MessageTypes.INTERFACE.value: self.handle_interface_message, + MessageTypes.EVENT.value: self.handle_event_message, + MessageTypes.SESSION.value: self.handle_session_message, + } + self.handler_threads = [] + self.node_status_request = {} + self.master = False + self.session = None + SocketServer.BaseRequestHandler.__init__(self, request, client_address, server) + + def setup(self): + """ + New client has connected to the auxiliary server. + + :return: nothing + """ + logger.info("new auxiliary server client: %s:%s" % self.client_address) + + def handle(self): + """ + The handler main loop + + :return: nothing + """ + port = self.request.getpeername()[1] + self.session = self.server.mainserver.create_session(session_id=port) + self.session.connect(self) + + while True: + try: + messages = self.receive_message() + if messages: + for message in messages: + # TODO: do we really want to broadcast node and link messages from a client to other clients? + # self.session.broadcast(self, message) + self.handle_message(message) + except EOFError: + break + except IOError: + logger.exception("IOError in CoreAuxRequestHandler") + break + + def finish(self): + """ + Disconnect the client + + :return: nothing + """ + if self.session: + self.session.event_handlers.remove(self.handle_broadcast_event) + self.session.exception_handlers.remove(self.handle_broadcast_exception) + self.session.node_handlers.remove(self.handle_broadcast_node) + self.session.link_handlers.remove(self.handle_broadcast_link) + self.session.shutdown() + return SocketServer.BaseRequestHandler.finish(self) + + """ + ======================================================================= + Concrete AuxRequestHandler classes must redefine the following methods + ======================================================================= + """ + + def receive_message(self): + """ + Receive data from the client in the supported format. Parse, transform to CORE API format and + return transformed messages. + + EXAMPLE: + return self.handler.request.recv(siz) + + :return: nothing + """ + raise NotImplemented + + def dispatch_replies(self, replies, message): + """ + Dispatch CORE "replies" to a previously received message "msg" from a client. + Replies passed to this method follow the CORE API. This method allows transformation to + the form supported by the auxiliary handler and within the context of "msg". + Add transformation and transmission code here. + + EXAMPLE: + transformed_replies = stateful_transform (replies, msg) # stateful_transform method needs to be defined + if transformed_replies: + for reply in transformed_replies: + try: + self.request.sendall(reply) + except Exception, e: + if self.debug: + logger.info("-"*60) + traceback.print_exc(file=sys.stdout) + logger.info("-"*60) + raise e + + :return: nothing + """ + raise NotImplemented + + def sendall(self, data): + """ + CORE calls this method when data needs to be asynchronously sent to a client. The data is + in CORE API format. This method allows transformation to the required format supported by this + handler prior to transmission. + + EXAMPLE: + msgs = self.transform(data) # transform method needs to be defined + if msgs: + for msg in msgs: + try: + self.request.sendall(reply) + except Exception, e: + if self.debug: + logger.info("-"*60) + traceback.print_exc(file=sys.stdout) + logger.info("-"*60) + raise e + + :return: nothing + """ + raise NotImplemented diff --git a/daemon/core/data.py b/daemon/core/data.py new file mode 100644 index 00000000..715eb5d5 --- /dev/null +++ b/daemon/core/data.py @@ -0,0 +1,120 @@ +""" +CORE data objects. +""" + +import collections + +ConfigData = collections.namedtuple("ConfigData", [ + "message_type", + "node", + "object", + "type", + "data_types", + "data_values", + "captions", + "bitmap", + "possible_values", + "groups", + "session", + "interface_number", + "network_id", + "opaque" +]) +ConfigData.__new__.__defaults__ = (None,) * len(ConfigData._fields) + +EventData = collections.namedtuple("EventData", [ + "node", + "event_type", + "name", + "data", + "time", + "session" +]) +EventData.__new__.__defaults__ = (None,) * len(EventData._fields) + +ExceptionData = collections.namedtuple("ExceptionData", [ + "node", + "session", + "level", + "source", + "date", + "text", + "opaque" +]) +ExceptionData.__new__.__defaults__ = (None,) * len(ExceptionData._fields) + +FileData = collections.namedtuple("FileData", [ + "message_type", + "node", + "name", + "mode", + "number", + "type", + "source", + "session", + "data", + "compressed_data" +]) +FileData.__new__.__defaults__ = (None,) * len(FileData._fields) + +NodeData = collections.namedtuple("NodeData", [ + "message_type", + "id", + "node_type", + "name", + "ip_address", + "mac_address", + "ip6_address", + "model", + "emulation_id", + "emulation_server", + "session", + "x_position", + "y_position", + "canvas", + "network_id", + "services", + "latitude", + "longitude", + "altitude", + "icon", + "opaque" +]) +NodeData.__new__.__defaults__ = (None,) * len(NodeData._fields) + +LinkData = collections.namedtuple("LinkData", [ + "message_type", + "node1_id", + "node2_id", + "delay", + "bandwidth", + "per", + "dup", + "jitter", + "mer", + "burst", + "session", + "mburst", + "link_type", + "gui_attributes", + "unidirectional", + "emulation_id", + "network_id", + "key", + "interface1_id", + "interface1_name", + "interface1_ip4", + "interface1_ip4_mask", + "interface1_mac", + "interface1_ip6", + "interface1_ip6_mask", + "interface2_id", + "interface2_name", + "interface2_ip4", + "interface2_ip4_mask", + "interface2_mac", + "interface2_ip6", + "interface2_ip6_mask", + "opaque" +]) +LinkData.__new__.__defaults__ = (None,) * len(LinkData._fields) diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py new file mode 100644 index 00000000..c3c87b24 --- /dev/null +++ b/daemon/core/emane/emanemanager.py @@ -0,0 +1,1231 @@ +""" +emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation. +Copyright (c)2010-2014, 2017 the Boeing Company. +""" + +import os +import subprocess +import threading +from xml.dom.minidom import parseString + +from core import constants +from core import emane +from core.api import coreapi +from core.conf import ConfigurableManager +from core.emane.bypass import EmaneBypassModel +from core.emane.commeffect import EmaneCommEffectModel +from core.emane.emanemodel import EmaneModel +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emane.rfpipe import EmaneRfPipeModel +from core.enumerations import ConfigDataTypes, NodeTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import RegisterTlvs +from core.misc import log +from core.misc import nodeutils +from core.misc import utils +from core.misc.ipaddress import MacAddress +from core.xml import xmlutils + +logger = log.get_logger(__name__) + +# EMANE 0.7.4/0.8.1 +try: + import emaneeventservice + import emaneeventlocation +except ImportError: + logger.error("error importing emaneeventservice and emaneeventlocation") + +# EMANE 0.9.1+ +try: + from emanesh.events import EventService + from emanesh.events import LocationEvent +except ImportError: + logger.error("error importing emanesh") + +EMANE_MODELS = [ + EmaneRfPipeModel, + EmaneIeee80211abgModel, + EmaneCommEffectModel, + EmaneBypassModel +] + + +class EmaneManager(ConfigurableManager): + """ + EMANE controller object. Lives in a Session instance and is used for + building EMANE config files from all of the EmaneNode objects in this + emulation, and for controlling the EMANE daemons. + """ + name = "emane" + version = None + versionstr = None + config_type = RegisterTlvs.EMULATION_SERVER.value + _hwaddr_prefix = "02:02" + (SUCCESS, NOT_NEEDED, NOT_READY) = (0, 1, 2) + EVENTCFGVAR = 'LIBEMANEEVENTSERVICECONFIG' + DEFAULT_LOG_LEVEL = 3 + + def __init__(self, session): + """ + Creates a Emane instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + ConfigurableManager.__init__(self) + self.session = session + self._objs = {} + self._objslock = threading.Lock() + self._ifccounts = {} + self._ifccountslock = threading.Lock() + # Port numbers are allocated from these counters + self.platformport = self.session.get_config_item_int('emane_platform_port', 8100) + self.transformport = self.session.get_config_item_int('emane_transform_port', 8200) + self.doeventloop = False + self.eventmonthread = None + self.logversion() + # model for global EMANE configuration options + self.emane_config = EmaneGlobalModel(session, None) + session.broker.handlers.add(self.handledistributed) + self.service = None + self._modelclsmap = { + self.emane_config.name: self.emane_config + } + self.loadmodels() + + def logversion(self): + """ + Log the installed EMANE version. + """ + logger.info("using EMANE version: %s", emane.VERSIONSTR) + + def deleteeventservice(self): + if hasattr(self, 'service'): + if self.service: + for fd in self.service._readFd, self.service._writeFd: + if fd >= 0: + os.close(fd) + for f in self.service._socket, self.service._socketOTA: + if f: + f.close() + del self.service + + def initeventservice(self, filename=None, shutdown=False): + """ + (Re-)initialize the EMANE Event service. + The multicast group and/or port may be configured. + - For versions < 0.9.1 this can be changed via XML config file + and an environment variable pointing to that file. + - For version >= 0.9.1 this is passed into the EventService + constructor. + """ + self.deleteeventservice() + self.service = None + + # EMANE 0.9.1+ does not require event service XML config + if EmaneManager.version >= emane.EMANE091: + if shutdown: + return + # Get the control network to be used for events + values = self.getconfig(None, "emane", + self.emane_config.getdefaultvalues())[1] + group, port = self.emane_config.valueof('eventservicegroup', values).split(':') + eventdev = self.emane_config.valueof('eventservicedevice', values) + eventnetidx = self.session.get_control_net_index(eventdev) + if EmaneManager.version > emane.EMANE091: + if eventnetidx < 0: + msg = "Invalid Event Service device provided: %s" % eventdev + logger.error(msg) + return False + + # Make sure the event control network is in place + eventnet = self.session.add_remove_control_net(net_index=eventnetidx, + remove=False, + conf_required=False) + if eventnet is not None: + # direct EMANE events towards control net bridge + eventdev = eventnet.brname + eventchannel = (group, int(port), eventdev) + + # disabled otachannel for event service + # only needed for e.g. antennaprofile events xmit by models + logger.info("Using %s for event service traffic" % eventdev) + try: + self.service = EventService(eventchannel=eventchannel, otachannel=None) + except: + logger.exception("error instantiating EventService") + + return True + + tmp = None + if filename is not None: + tmp = os.getenv(EmaneManager.EVENTCFGVAR) + os.environ.update({EmaneManager.EVENTCFGVAR: filename}) + + rc = True + try: + self.service = emaneeventservice.EventService() + except: + self.service = None + rc = False + + if self.service: + for f in self.service._readFd, self.service._writeFd, self.service._socket, self.service._socketOTA: + if f: + utils.closeonexec(f) + + if filename is not None: + os.environ.pop(EmaneManager.EVENTCFGVAR) + if tmp is not None: + os.environ.update({EmaneManager.EVENTCFGVAR: tmp}) + + return rc + + def loadmodels(self): + """ + load EMANE models and make them available. + """ + for emane_model in EMANE_MODELS: + logger.info("loading emane model: (%s) %s - %s", + emane_model, emane_model.name, RegisterTlvs(emane_model.config_type)) + self._modelclsmap[emane_model.name] = emane_model + self.session.add_config_object(emane_model.name, emane_model.config_type, + emane_model.configure_emane) + + def addobj(self, obj): + """ + add a new EmaneNode object to this Emane controller object + """ + self._objslock.acquire() + if obj.objid in self._objs: + self._objslock.release() + raise KeyError("non-unique EMANE object id %s for %s" % (obj.objid, obj)) + self._objs[obj.objid] = obj + self._objslock.release() + + def getnodes(self): + """ + Return a set of CoreNodes that are linked to an EmaneNode, + e.g. containers having one or more radio interfaces. + """ + # assumes self._objslock already held + r = set() + for e in self._objs.values(): + for netif in e.netifs(): + r.add(netif.node) + return r + + def getmodels(self, n): + """ + Used with XML export; see ConfigurableManager.getmodels() + """ + r = ConfigurableManager.getmodels(self, n) + # EMANE global params are stored with first EMANE node (if non-default + # values are configured) + sorted_ids = sorted(self.configs.keys()) + if None in self.configs and len(sorted_ids) > 1 and \ + n.objid == sorted_ids[1]: + v = self.configs[None] + for model in v: + cls = self._modelclsmap[model[0]] + vals = model[1] + r.append((cls, vals)) + return r + + def getifcconfig(self, nodenum, conftype, defaultvalues, ifc): + # use the network-wide config values or interface(NEM)-specific values? + if ifc is None: + return self.getconfig(nodenum, conftype, defaultvalues)[1] + else: + # don't use default values when interface config is the same as net + # note here that using ifc.node.objid as key allows for only one type + # of each model per node; TODO: use both node and interface as key + + # Adamson change: first check for iface config keyed by "node:ifc.name" + # (so that nodes w/ multiple interfaces of same conftype can have + # different configs for each separate interface) + key = 1000*ifc.node.objid + if ifc.netindex is not None: + key += ifc.netindex + values = self.getconfig(key, conftype, None)[1] + if not values: + values = self.getconfig(ifc.node.objid, conftype, None)[1] + if not values and emane.VERSION > emane.EMANE091: + # with EMANE 0.9.2+, we need an extra NEM XML from + # model.buildnemxmlfiles(), so defaults are returned here + if ifc.transport_type == "raw": + values = self.getconfig(nodenum, conftype, defaultvalues)[1] + return values + + def setup(self): + """ + Populate self._objs with EmaneNodes; perform distributed setup; + associate models with EmaneNodes from self.config. Returns + Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session + instantiation. + """ + with self.session._objects_lock: + for obj in self.session.objects.itervalues(): + if nodeutils.is_node(obj, NodeTypes.EMANE): + self.addobj(obj) + if len(self._objs) == 0: + return EmaneManager.NOT_NEEDED + if emane.VERSION == emane.EMANEUNK: + raise ValueError, 'EMANE version not properly detected' + # control network bridge required for EMANE 0.9.2 + # - needs to be configured before checkdistributed() for distributed + # - needs to exist when eventservice binds to it (initeventservice) + if emane.VERSION > emane.EMANE091 and self.session.master: + values = self.getconfig(None, "emane", + self.emane_config.getdefaultvalues())[1] + otadev = self.emane_config.valueof('otamanagerdevice', values) + netidx = self.session.get_control_net_index(otadev) + if netidx < 0: + msg = "EMANE cannot be started. " \ + "Invalid OTA device provided: %s. Check core.conf." % otadev + logger.error(msg) + return EmaneManager.NOT_READY + + ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False) + self.distributedctrlnet(ctrlnet) + eventdev = self.emane_config.valueof('eventservicedevice', values) + if eventdev != otadev: + netidx = self.session.get_control_net_index(eventdev) + if netidx < 0: + msg = "EMANE cannot be started." \ + "Invalid Event Service device provided: %s. Check core.conf." % eventdev + logger.error(msg) + return EmaneManager.NOT_READY + + ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False) + self.distributedctrlnet(ctrlnet) + + if self.checkdistributed(): + # we are slave, but haven't received a platformid yet + cfgval = self.getconfig(None, self.emane_config.name, + self.emane_config.getdefaultvalues())[1] + i = self.emane_config.getnames().index('platform_id_start') + if cfgval[i] == self.emane_config.getdefaultvalues()[i]: + return EmaneManager.NOT_READY + self.setnodemodels() + return EmaneManager.SUCCESS + + def startup(self): + """ + After all the EmaneNode objects have been added, build XML files + and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or + NOT_READY) which is used to delay session instantiation. + """ + self.reset() + r = self.setup() + if r != EmaneManager.SUCCESS: + return r # NOT_NEEDED or NOT_READY + if emane.VERSIONSTR == "": + raise ValueError("EMANE version not properly detected") + nems = [] + with self._objslock: + if emane.VERSION < emane.EMANE092: + self.buildxml() + self.initeventservice() + self.starteventmonitor() + if self.numnems() > 0: + # TODO: check and return failure for these methods + self.startdaemons() + self.installnetifs() + else: + self.buildxml2() + self.initeventservice() + self.starteventmonitor() + if self.numnems() > 0: + self.startdaemons2() + self.installnetifs(do_netns=False) + for e in self._objs.itervalues(): + for netif in e.netifs(): + nems.append((netif.node.name, netif.name, + e.getnemid(netif))) + if nems: + emane_nems_filename = os.path.join(self.session.session_dir, + 'emane_nems') + try: + with open(emane_nems_filename, 'w') as f: + for nodename, ifname, nemid in nems: + f.write('%s %s %s\n' % (nodename, ifname, nemid)) + except IOError: + logger.exception('Error writing EMANE NEMs file: %s') + + return EmaneManager.SUCCESS + + def poststartup(self): + """ + Retransmit location events now that all NEMs are active. + """ + if not self.genlocationevents(): + return + with self._objslock: + for n in sorted(self._objs.keys()): + e = self._objs[n] + for netif in e.netifs(): + (x, y, z) = netif.node.position.get() + e.setnemposition(netif, x, y, z) + + def reset(self): + """ + remove all EmaneNode objects from the dictionary, + reset port numbers and nem id counters + """ + with self._objslock: + self._objs.clear() + # don't clear self._ifccounts here; NEM counts are needed for buildxml + self.platformport = self.session.get_config_item_int('emane_platform_port', 8100) + self.transformport = self.session.get_config_item_int('emane_transform_port', 8200) + + def shutdown(self): + """ + stop all EMANE daemons + """ + self._ifccountslock.acquire() + self._ifccounts.clear() + self._ifccountslock.release() + self._objslock.acquire() + if len(self._objs) == 0: + self._objslock.release() + return + logger.info("Stopping EMANE daemons.") + self.deinstallnetifs() + self.stopdaemons() + self.stopeventmonitor() + self._objslock.release() + + def handledistributed(self, message): + """ + Broker handler for processing CORE API messages as they are + received. This is used to snoop the Link add messages to get NEM + counts of NEMs that exist on other servers. + """ + if message.message_type == MessageTypes.LINK.value and message.flags & MessageFlags.ADD.value: + nn = message.node_numbers() + # first node is always link layer node in Link add message + if nn[0] in self.session.broker.network_nodes: + serverlist = self.session.broker.getserversbynode(nn[1]) + for server in serverlist: + self._ifccountslock.acquire() + if server not in self._ifccounts: + self._ifccounts[server] = 1 + else: + self._ifccounts[server] += 1 + self._ifccountslock.release() + + def checkdistributed(self): + """ + Check for EMANE nodes that exist on multiple emulation servers and + coordinate the NEM id and port number space. + If we are the master EMANE node, return False so initialization will + proceed as normal; otherwise slaves return True here and + initialization is deferred. + """ + # check with the session if we are the "master" Emane object? + master = False + self._objslock.acquire() + if len(self._objs) > 0: + master = self.session.master + logger.info("Setup EMANE with master=%s." % master) + self._objslock.release() + + # we are not the master Emane object, wait for nem id and ports + if not master: + return True + + cfgval = self.getconfig(None, self.emane_config.name, + self.emane_config.getdefaultvalues())[1] + values = list(cfgval) + + nemcount = 0 + self._objslock.acquire() + for n in self._objs: + emanenode = self._objs[n] + nemcount += emanenode.numnetif() + nemid = int(self.emane_config.valueof("nem_id_start", values)) + nemid += nemcount + platformid = int(self.emane_config.valueof("platform_id_start", values)) + names = list(self.emane_config.getnames()) + + # build an ordered list of servers so platform ID is deterministic + servers = [] + for n in sorted(self._objs): + for s in self.session.broker.getserversbynode(n): + if s not in servers: + servers.append(s) + self._objslock.release() + + servers.sort(key=lambda x: x.name) + for server in servers: + if server.name == "localhost": + continue + + if server.sock is None: + continue + + platformid += 1 + typeflags = ConfigFlags.UPDATE.value + values[names.index("platform_id_start")] = str(platformid) + values[names.index("nem_id_start")] = str(nemid) + msg = EmaneGlobalModel.config_data(flags=0, node_id=None, type_flags=typeflags, values=values) + server.sock.send(msg) + # increment nemid for next server by number of interfaces + with self._ifccountslock: + if server in self._ifccounts: + nemid += self._ifccounts[server] + + return False + + def buildxml(self): + """ + Build all of the XML files required to run EMANE on the host. + NEMs run in a single host emane process, with TAP devices pushed + into namespaces. + """ + # assume self._objslock is already held here + logger.info("Emane.buildxml()") + self.buildplatformxml() + self.buildnemxml() + self.buildtransportxml() + self.buildeventservicexml() + + def buildxml2(self): + """ + Build XML files required to run EMANE on each node. + NEMs run inside containers using the control network for passing + events and data. + """ + # assume self._objslock is already held here + logger.info("Emane.buildxml2()") + # on master, control network bridge added earlier in startup() + ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False) + self.buildplatformxml2(ctrlnet) + self.buildnemxml() + self.buildeventservicexml() + + def distributedctrlnet(self, ctrlnet): + """ + Distributed EMANE requires multiple control network prefixes to + be configured. This generates configuration for slave control nets + using the default list of prefixes. + """ + session = self.session + # slave server + if not session.master: + return + servers = session.broker.getservernames() + # not distributed + if len(servers) < 2: + return + prefix = session.config.get('controlnet') + prefix = getattr(session.options, 'controlnet', prefix) + prefixes = prefix.split() + # normal Config messaging will distribute controlnets + if len(prefixes) >= len(servers): + return + # this generates a config message having controlnet prefix assignments + logger.info("Setting up default controlnet prefixes for distributed (%d configured)" % len(prefixes)) + prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0] + vals = "controlnet='%s'" % prefixes + tlvdata = "" + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session") + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, vals) + rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata) + msghdr = rawmsg[:coreapi.CoreMessage.header_len] + msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:]) + self.session.broker.handle_message(msg) + + def xmldoc(self, doctype): + """ + Returns an XML xml.minidom.Document with a DOCTYPE tag set to the + provided doctype string, and an initial element having the same + name. + """ + # we hack in the DOCTYPE using the parser + docstr = """ + + <%s/>""" % (doctype, doctype, doctype) + # normally this would be: doc = Document() + return parseString(docstr) + + def xmlparam(self, doc, name, value): + """ + Convenience function for building a parameter tag of the format: + + """ + p = doc.createElement("param") + p.setAttribute("name", name) + p.setAttribute("value", value) + return p + + def xmlshimdefinition(self, doc, name): + """ + Convenience function for building a definition tag of the format: + + """ + p = doc.createElement("shim") + p.setAttribute("definition", name) + return p + + def xmlwrite(self, doc, filename): + """ + Write the given XML document to the specified filename. + """ + pathname = os.path.join(self.session.session_dir, filename) + f = open(pathname, "w") + doc.writexml(writer=f, indent="", addindent=" ", newl="\n", encoding="UTF-8") + f.close() + + def setnodemodels(self): + """ + Associate EmaneModel classes with EmaneNode nodes. The model + configurations are stored in self.configs. + """ + for n in self._objs: + self.setnodemodel(n) + + def setnodemodel(self, n): + emanenode = self._objs[n] + if n not in self.configs: + return False + for t, v in self.configs[n]: + if t is None: + continue + if t == self.emane_config.name: + continue + # only use the first valid EmaneModel + # convert model name to class (e.g. emane_rfpipe -> EmaneRfPipe) + cls = self._modelclsmap[t] + emanenode.setmodel(cls, v) + return True + # no model has been configured for this EmaneNode + return False + + def nemlookup(self, nemid): + """ + Look for the given numerical NEM ID and return the first matching + EmaneNode and NEM interface. + """ + emanenode = None + netif = None + + for n in self._objs: + emanenode = self._objs[n] + netif = emanenode.getnemnetif(nemid) + if netif is not None: + break + else: + emanenode = None + return emanenode, netif + + def numnems(self): + """ + Return the number of NEMs emulated locally. + """ + count = 0 + for o in self._objs.values(): + count += len(o.netifs()) + return count + + def buildplatformxml(self): + """ + Build a platform.xml file now that all nodes are configured. + """ + values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1] + doc = self.xmldoc("platform") + plat = doc.getElementsByTagName("platform").pop() + if emane.VERSION < emane.EMANE091: + platformid = self.emane_config.valueof("platform_id_start", values) + plat.setAttribute("name", "Platform %s" % platformid) + plat.setAttribute("id", platformid) + + names = list(self.emane_config.getnames()) + platform_names = names[:len(self.emane_config._confmatrix_platform)] + platform_names.remove('platform_id_start') + + # append all platform options (except starting id) to doc + map(lambda n: plat.appendChild(self.xmlparam(doc, n, self.emane_config.valueof(n, values))), platform_names) + + nemid = int(self.emane_config.valueof("nem_id_start", values)) + # assume self._objslock is already held here + for n in sorted(self._objs.keys()): + emanenode = self._objs[n] + nems = emanenode.buildplatformxmlentry(doc) + for netif in sorted(nems, key=lambda n: n.node.objid): + # set ID, endpoints here + nementry = nems[netif] + nementry.setAttribute("id", "%d" % nemid) + if emane.VERSION < emane.EMANE092: + # insert nem options (except nem id) to doc + trans_addr = self.emane_config.valueof("transportendpoint", values) + nementry.insertBefore( + self.xmlparam(doc, "transportendpoint", "%s:%d" % (trans_addr, self.transformport)), + nementry.firstChild + ) + platform_addr = self.emane_config.valueof("platformendpoint", values) + nementry.insertBefore( + self.xmlparam(doc, "platformendpoint", "%s:%d" % (platform_addr, self.platformport)), + nementry.firstChild + ) + plat.appendChild(nementry) + emanenode.setnemid(netif, nemid) + # NOTE: MAC address set before here is incorrect, including the one + # sent from the GUI via link message + # MAC address determined by NEM ID: 02:02:00:00:nn:nn" + macstr = self._hwaddr_prefix + ":00:00:" + macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF) + netif.sethwaddr(MacAddress.from_string(macstr)) + # increment counters used to manage IDs, endpoint port numbers + nemid += 1 + self.platformport += 1 + self.transformport += 1 + self.xmlwrite(doc, "platform.xml") + + def newplatformxmldoc(self, values, otadev=None, eventdev=None): + """ + Start a new platform XML file. Use global EMANE config values + as keys. Override OTA manager and event service devices if + specified (in order to support Raw Transport). + """ + doc = self.xmldoc("platform") + plat = doc.getElementsByTagName("platform").pop() + names = list(self.emane_config.getnames()) + platform_names = names[:len(self.emane_config._confmatrix_platform)] + platform_names.remove('platform_id_start') + platform_values = list(values) + if otadev: + i = platform_names.index('otamanagerdevice') + platform_values[i] = otadev + if eventdev: + i = platform_names.index('eventservicedevice') + platform_values[i] = eventdev + # append all platform options (except starting id) to doc + map(lambda n: plat.appendChild(self.xmlparam(doc, n, self.emane_config.valueof(n, platform_values))), + platform_names) + return doc + + def buildplatformxml2(self, ctrlnet): + """ + Build a platform.xml file now that all nodes are configured. + """ + values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1] + nemid = int(self.emane_config.valueof("nem_id_start", values)) + platformxmls = {} + + # assume self._objslock is already held here + for n in sorted(self._objs.keys()): + emanenode = self._objs[n] + nems = emanenode.buildplatformxmlentry(self.xmldoc("platform")) + for netif in sorted(nems, key=lambda n: n.node.objid): + nementry = nems[netif] + nementry.setAttribute("id", "%d" % nemid) + k = netif.node.objid + if netif.transport_type == "raw": + k = 'host' + otadev = ctrlnet.brname + eventdev = ctrlnet.brname + else: + otadev = None + eventdev = None + if k not in platformxmls: + platformxmls[k] = self.newplatformxmldoc(values, otadev, + eventdev) + doc = platformxmls[k] + plat = doc.getElementsByTagName("platform").pop() + plat.appendChild(nementry) + emanenode.setnemid(netif, nemid) + macstr = self._hwaddr_prefix + ":00:00:" + macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF) + netif.sethwaddr(MacAddress.from_string(macstr)) + nemid += 1 + for k in sorted(platformxmls.keys()): + if k == 'host': + self.xmlwrite(platformxmls['host'], "platform.xml") + continue + self.xmlwrite(platformxmls[k], "platform%d.xml" % k) + + def buildnemxml(self): + """ + Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which + are defined on a per-EmaneNode basis. + """ + for n in sorted(self._objs.keys()): + emanenode = self._objs[n] + emanenode.buildnemxmlfiles(self) + + def appendtransporttonem(self, doc, nem, nodenum, ifc=None): + """ + Given a nem XML node and EMANE WLAN node number, append + a tag to the NEM definition, required for using + EMANE's internal transport. + """ + if emane.VERSION < emane.EMANE092: + return + emanenode = self._objs[nodenum] + transtag = doc.createElement("transport") + transtypestr = "virtual" + if ifc and ifc.transport_type == "raw": + transtypestr = "raw" + transtag.setAttribute("definition", emanenode.transportxmlname(transtypestr)) + nem.appendChild(transtag) + + def buildtransportxml(self): + """ + Calls emanegentransportxml using a platform.xml file to build + the transportdaemon*.xml. + """ + try: + subprocess.check_call(["emanegentransportxml", "platform.xml"], cwd=self.session.session_dir) + except subprocess.CalledProcessError: + logger.exception("error running emanegentransportxml") + + def buildeventservicexml(self): + """ + Build the libemaneeventservice.xml file if event service options + were changed in the global config. + """ + defaults = self.emane_config.getdefaultvalues() + values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1] + need_xml = False + keys = ('eventservicegroup', 'eventservicedevice') + for k in keys: + a = self.emane_config.valueof(k, defaults) + b = self.emane_config.valueof(k, values) + if a != b: + need_xml = True + + if not need_xml: + # reset to using default config + self.initeventservice() + return + + try: + group, port = self.emane_config.valueof('eventservicegroup', values).split(':') + except ValueError: + logger.exception("invalid eventservicegroup in EMANE config") + return + dev = self.emane_config.valueof('eventservicedevice', values) + + doc = self.xmldoc("emaneeventmsgsvc") + es = doc.getElementsByTagName("emaneeventmsgsvc").pop() + kvs = (('group', group), ('port', port), ('device', dev), ('mcloop', '1'), ('ttl', '32')) + xmlutils.add_text_elements_from_tuples(doc, es, kvs) + filename = 'libemaneeventservice.xml' + self.xmlwrite(doc, filename) + pathname = os.path.join(self.session.session_dir, filename) + self.initeventservice(filename=pathname) + + def startdaemons(self): + """ + Start the appropriate EMANE daemons. The transport daemon will + bind to the TAP interfaces. + """ + logger.info("Emane.startdaemons()") + path = self.session.session_dir + loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL) + cfgloglevel = self.session.get_config_item_int("emane_log_level") + realtime = self.session.get_config_item_bool("emane_realtime", True) + if cfgloglevel: + logger.info("setting user-defined EMANE log level: %d" % cfgloglevel) + loglevel = str(cfgloglevel) + emanecmd = ["emane", "-d", "--logl", loglevel, "-f", os.path.join(path, "emane.log")] + if realtime: + emanecmd += "-r", + try: + cmd = emanecmd + [os.path.join(path, "platform.xml")] + logger.info("Emane.startdaemons() running %s" % str(cmd)) + subprocess.check_call(cmd, cwd=path) + except subprocess.CalledProcessError: + logger.exception("error starting emane") + + # start one transport daemon per transportdaemon*.xml file + transcmd = ["emanetransportd", "-d", "--logl", loglevel, "-f", os.path.join(path, "emanetransportd.log")] + if realtime: + transcmd += "-r", + files = os.listdir(path) + for file in files: + if file[-3:] == "xml" and file[:15] == "transportdaemon": + cmd = transcmd + [os.path.join(path, file)] + try: + logger.info("Emane.startdaemons() running %s" % str(cmd)) + subprocess.check_call(cmd, cwd=path) + except subprocess.CalledProcessError: + logger.exception("error starting emanetransportd") + + def startdaemons2(self): + """ + Start one EMANE daemon per node having a radio. + Add a control network even if the user has not configured one. + """ + logger.info("Emane.startdaemons()") + loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL) + cfgloglevel = self.session.get_config_item_int("emane_log_level") + realtime = self.session.get_config_item_bool("emane_realtime", True) + if cfgloglevel: + logger.info("setting user-defined EMANE log level: %d" % cfgloglevel) + loglevel = str(cfgloglevel) + emanecmd = ["emane", "-d", "--logl", loglevel] + if realtime: + emanecmd += "-r", + + values = self.getconfig(None, "emane", self.emane_config.getdefaultvalues())[1] + otagroup, otaport = self.emane_config.valueof('otamanagergroup', values).split(':') + otadev = self.emane_config.valueof('otamanagerdevice', values) + otanetidx = self.session.get_control_net_index(otadev) + + eventgroup, eventport = self.emane_config.valueof('eventservicegroup', values).split(':') + eventdev = self.emane_config.valueof('eventservicedevice', values) + eventservicenetidx = self.session.get_control_net_index(eventdev) + + run_emane_on_host = False + for node in self.getnodes(): + if hasattr(node, 'transport_type') and node.transport_type == "raw": + run_emane_on_host = True + continue + path = self.session.session_dir + n = node.objid + + # control network not yet started here + self.session.add_remove_control_interface(node, 0, remove=False, conf_required=False) + + if otanetidx > 0: + logger.info("adding ota device ctrl%d" % otanetidx) + self.session.add_remove_control_interface(node, otanetidx, remove=False, conf_required=False) + + if eventservicenetidx >= 0: + logger.info("adding event service device ctrl%d" % eventservicenetidx) + self.session.add_remove_control_interface(node, eventservicenetidx, remove=False, conf_required=False) + + # multicast route is needed for OTA data + cmd = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev] + # rc = node.cmd(cmd, wait=True) + node.cmd(cmd, wait=True) + # multicast route is also needed for event data if on control network + if eventservicenetidx >= 0 and eventgroup != otagroup: + cmd = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev] + node.cmd(cmd, wait=True) + + try: + cmd = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)] + logger.info("Emane.startdaemons2() running %s" % str(cmd)) + status = node.cmd(cmd, wait=True) + logger.info("Emane.startdaemons2() return code %d" % status) + except subprocess.CalledProcessError: + logger.exception("error starting emane") + + if not run_emane_on_host: + return + + path = self.session.session_dir + try: + emanecmd += ["-f", os.path.join(path, "emane.log")] + cmd = emanecmd + [os.path.join(path, "platform.xml")] + logger.info("Emane.startdaemons2() running %s" % str(cmd)) + subprocess.check_call(cmd, cwd=path) + except subprocess.CalledProcessError: + logger.exception("error starting emane") + + def stopdaemons(self): + """ + Kill the appropriate EMANE daemons. + """ + # TODO: we may want to improve this if we had the PIDs from the + # specific EMANE daemons that we've started + cmd = ["killall", "-q", "emane"] + stop_emane_on_host = False + if emane.VERSION > emane.EMANE091: + for node in self.getnodes(): + if hasattr(node, 'transport_type') and \ + node.transport_type == "raw": + stop_emane_on_host = True + continue + if node.up: + node.cmd(cmd, wait=False) + # TODO: RJ45 node + else: + stop_emane_on_host = True + if stop_emane_on_host: + subprocess.call(cmd) + subprocess.call(["killall", "-q", "emanetransportd"]) + + def installnetifs(self, do_netns=True): + """ + Install TUN/TAP virtual interfaces into their proper namespaces + now that the EMANE daemons are running. + """ + for n in sorted(self._objs.keys()): + emanenode = self._objs[n] + logger.info("Emane.installnetifs() for node %d" % n) + emanenode.installnetifs(do_netns) + + def deinstallnetifs(self): + """ + Uninstall TUN/TAP virtual interfaces. + """ + for n in sorted(self._objs.keys()): + emanenode = self._objs[n] + emanenode.deinstallnetifs() + + def configure(self, session, config_data): + """ + Handle configuration messages for global EMANE config. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + """ + r = self.emane_config.configure_emane(session, config_data) + + # extra logic to start slave Emane object after nemid has been + # configured from the master + config_type = config_data.type + if config_type == ConfigFlags.UPDATE.value and self.session.master is False: + # instantiation was previously delayed by self.setup() + # returning Emane.NOT_READY + self.session.instantiate() + + return r + + def doeventmonitor(self): + """ + Returns boolean whether or not EMANE events will be monitored. + """ + # this support must be explicitly turned on; by default, CORE will + # generate the EMANE events when nodes are moved + return self.session.get_config_item_bool('emane_event_monitor', False) + + def genlocationevents(self): + """ + Returns boolean whether or not EMANE events will be generated. + """ + # By default, CORE generates EMANE location events when nodes + # are moved; this can be explicitly disabled in core.conf + tmp = self.session.get_config_item_bool('emane_event_generate') + if tmp is None: + tmp = not self.doeventmonitor() + return tmp + + def starteventmonitor(self): + """ + Start monitoring EMANE location events if configured to do so. + """ + logger.info("Emane.starteventmonitor()") + if not self.doeventmonitor(): + return + if self.service is None: + errmsg = "Warning: EMANE events will not be generated " \ + "because the emaneeventservice\n binding was " \ + "unable to load " \ + "(install the python-emaneeventservice bindings)" + logger.error(errmsg) + return + self.doeventloop = True + self.eventmonthread = threading.Thread(target=self.eventmonitorloop) + self.eventmonthread.daemon = True + self.eventmonthread.start() + + def stopeventmonitor(self): + """ + Stop monitoring EMANE location events. + """ + self.doeventloop = False + if self.service is not None: + self.service.breakloop() + # reset the service, otherwise nextEvent won't work + self.initeventservice(shutdown=True) + if self.eventmonthread is not None: + if emane.VERSION >= emane.EMANE091: + self.eventmonthread._Thread__stop() + self.eventmonthread.join() + self.eventmonthread = None + + def eventmonitorloop(self): + """ + Thread target that monitors EMANE location events. + """ + if self.service is None: + return + logger.info("Subscribing to EMANE location events (not generating them). " \ + "(%s) " % threading.currentThread().getName()) + while self.doeventloop is True: + if emane.VERSION >= emane.EMANE091: + uuid, seq, events = self.service.nextEvent() + if not self.doeventloop: + break # this occurs with 0.9.1 event service + for event in events: + (nem, eid, data) = event + if eid == LocationEvent.IDENTIFIER: + self.handlelocationevent2(nem, eid, data) + else: + (event, platform, nem, cmp, data) = self.service.nextEvent() + if event == emaneeventlocation.EVENT_ID: + self.handlelocationevent(event, platform, nem, cmp, data) + logger.info("Unsubscribing from EMANE location events. (%s) " % threading.currentThread().getName()) + + def handlelocationevent(self, event, platform, nem, component, data): + """ + Handle an EMANE location event (EMANE 0.8.1 and earlier). + """ + event = emaneeventlocation.EventLocation(data) + entries = event.entries() + for e in entries.values(): + # yaw,pitch,roll,azimuth,elevation,velocity are unhandled + (nemid, lat, long, alt) = e[:4] + self.handlelocationeventtoxyz(nemid, lat, long, alt) + + def handlelocationevent2(self, rxnemid, eid, data): + """ + Handle an EMANE location event (EMANE 0.9.1+). + """ + events = LocationEvent() + events.restore(data) + for event in events: + (txnemid, attrs) = event + if 'latitude' not in attrs or 'longitude' not in attrs or \ + 'altitude' not in attrs: + logger.warn("dropped invalid location event") + continue + # yaw,pitch,roll,azimuth,elevation,velocity are unhandled + lat = attrs['latitude'] + long = attrs['longitude'] + alt = attrs['altitude'] + self.handlelocationeventtoxyz(txnemid, lat, long, alt) + + def handlelocationeventtoxyz(self, nemid, lat, long, alt): + """ + Convert the (NEM ID, lat, long, alt) from a received location event + into a node and x,y,z coordinate values, sending a Node Message. + Returns True if successfully parsed and a Node Message was sent. + """ + # convert nemid to node number + (emanenode, netif) = self.nemlookup(nemid) + if netif is None: + logger.info("location event for unknown NEM %s" % nemid) + return False + n = netif.node.objid + # convert from lat/long/alt to x,y,z coordinates + x, y, z = self.session.location.getxyz(lat, long, alt) + x = int(x) + y = int(y) + z = int(z) + logger.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)", + nemid, lat, long, alt, x, y, z) + try: + if (x.bit_length() > 16) or (y.bit_length() > 16) or \ + (z.bit_length() > 16) or (x < 0) or (y < 0) or (z < 0): + warntxt = "Unable to build node location message since " \ + "received lat/long/alt exceeds coordinate " \ + "space: NEM %s (%d, %d, %d)" % (nemid, x, y, z) + logger.error(warntxt) + return False + except AttributeError: + # int.bit_length() not present on Python 2.6 + logger.exception("error wusing bit_length") + + # generate a node message for this location update + try: + node = self.session.get_object(n) + except KeyError: + logger.exception("location event NEM %s has no corresponding node %s" % (nemid, n)) + return False + # don't use node.setposition(x,y,z) which generates an event + node.position.set(x, y, z) + + node_data = node.data(message_type=0) + self.session.broadcast_node(node_data) + + # TODO: determinehow to add SDT handlers + # self.session.sdt.updatenodegeo(node.objid, lat, long, alt) + + return True + + def emanerunning(self, node): + """ + Return True if an EMANE process associated with the given node + is running, False otherwise. + """ + status = -1 + cmd = ['pkill', '-0', '-x', 'emane'] + + try: + if self.version < emane.EMANE092: + status = subprocess.call(cmd) + else: + status = node.cmd(cmd, wait=True) + except IOError: + logger.exception("error checking if emane is running") + + return status == 0 + + +class EmaneGlobalModel(EmaneModel): + """ + Global EMANE configuration options. + """ + + def __init__(self, session, object_id=None): + EmaneModel.__init__(self, session, object_id) + + # Over-The-Air channel required for EMANE 0.9.2 + _DEFAULT_OTA = '0' + _DEFAULT_DEV = 'lo' + if emane.VERSION >= emane.EMANE092: + _DEFAULT_OTA = '1' + _DEFAULT_DEV = 'ctrl0' + + name = "emane" + _confmatrix_platform_base = [ + ("otamanagerchannelenable", ConfigDataTypes.BOOL.value, _DEFAULT_OTA, + 'on,off', 'enable OTA Manager channel'), + ("otamanagergroup", ConfigDataTypes.STRING.value, '224.1.2.8:45702', + '', 'OTA Manager group'), + ("otamanagerdevice", ConfigDataTypes.STRING.value, _DEFAULT_DEV, + '', 'OTA Manager device'), + ("eventservicegroup", ConfigDataTypes.STRING.value, '224.1.2.8:45703', + '', 'Event Service group'), + ("eventservicedevice", ConfigDataTypes.STRING.value, _DEFAULT_DEV, + '', 'Event Service device'), + ("platform_id_start", ConfigDataTypes.INT32.value, '1', + '', 'starting Platform ID'), + ] + _confmatrix_platform_081 = [ + ("debugportenable", ConfigDataTypes.BOOL.value, '0', + 'on,off', 'enable debug port'), + ("debugport", ConfigDataTypes.UINT16.value, '47000', + '', 'debug port number'), + ] + _confmatrix_platform_091 = [ + ("controlportendpoint", ConfigDataTypes.STRING.value, '0.0.0.0:47000', + '', 'Control port address'), + ("antennaprofilemanifesturi", ConfigDataTypes.STRING.value, '', + '', 'antenna profile manifest URI'), + ] + _confmatrix_nem = [ + ("transportendpoint", ConfigDataTypes.STRING.value, 'localhost', + '', 'Transport endpoint address (port is automatic)'), + ("platformendpoint", ConfigDataTypes.STRING.value, 'localhost', + '', 'Platform endpoint address (port is automatic)'), + ("nem_id_start", ConfigDataTypes.INT32.value, '1', + '', 'starting NEM ID'), + ] + _confmatrix_nem_092 = [ + ("nem_id_start", ConfigDataTypes.INT32.value, '1', + '', 'starting NEM ID'), + ] + + if emane.VERSION >= emane.EMANE091: + _confmatrix_platform = _confmatrix_platform_base + \ + _confmatrix_platform_091 + if emane.VERSION >= emane.EMANE092: + _confmatrix_nem = _confmatrix_nem_092 + else: + _confmatrix_platform = _confmatrix_platform_base + \ + _confmatrix_platform_081 + config_matrix = _confmatrix_platform + _confmatrix_nem + config_groups = "Platform Attributes:1-%d|NEM Parameters:%d-%d" % \ + (len(_confmatrix_platform), len(_confmatrix_platform) + 1, + len(config_matrix)) diff --git a/daemon/core/emane/emanemodel.py b/daemon/core/emane/emanemodel.py new file mode 100644 index 00000000..6d78a65a --- /dev/null +++ b/daemon/core/emane/emanemodel.py @@ -0,0 +1,204 @@ +""" +Defines Emane Models used within CORE. +Copyright (c)2010-2014, 2017 the Boeing Company. +""" + +from core import emane +from core.misc import log +from core.misc import utils +from core.mobility import WirelessModel +from core.xml import xmlutils + +logger = log.get_logger(__name__) + + +class EmaneModel(WirelessModel): + """ + EMANE models inherit from this parent class, which takes care of + handling configuration messages based on the _confmatrix list of + configurable parameters. Helper functions also live here. + """ + _prefix = {'y': 1e-24, # yocto + 'z': 1e-21, # zepto + 'a': 1e-18, # atto + 'f': 1e-15, # femto + 'p': 1e-12, # pico + 'n': 1e-9, # nano + 'u': 1e-6, # micro + 'm': 1e-3, # mili + 'c': 1e-2, # centi + 'd': 1e-1, # deci + 'k': 1e3, # kilo + 'M': 1e6, # mega + 'G': 1e9, # giga + 'T': 1e12, # tera + 'P': 1e15, # peta + 'E': 1e18, # exa + 'Z': 1e21, # zetta + 'Y': 1e24, # yotta + } + + @classmethod + def configure_emane(cls, session, config_data): + """ + Handle configuration messages for setting up a model. + Pass the Emane object as the manager object. + + :param core.session.Session session: session to configure emane + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + """ + return cls.configure(session.emane, config_data) + + @classmethod + def emane074_fixup(cls, value, div=1.0): + """ + Helper for converting 0.8.1 and newer values to EMANE 0.7.4 + compatible values. + NOTE: This should be removed when support for 0.7.4 has been + deprecated. + """ + if div == 0: + return "0" + if type(value) is not str: + return str(value / div) + if value.endswith(tuple(cls._prefix.keys())): + suffix = value[-1] + value = float(value[:-1]) * cls._prefix[suffix] + return str(int(value / div)) + + def buildnemxmlfiles(self, e, ifc): + """ + Build the necessary nem, mac, and phy XMLs in the given path. + """ + raise NotImplementedError + + def buildplatformxmlnementry(self, doc, n, ifc): + """ + Build the NEM definition that goes into the platform.xml file. + This returns an XML element that will be added to the element. + This default method supports per-interface config + (e.g. or per-EmaneNode + config (e.g. . + This can be overriden by a model for NEM flexibility; n is the EmaneNode. + """ + nem = doc.createElement("nem") + nem.setAttribute("name", ifc.localname) + # if this netif contains a non-standard (per-interface) config, + # then we need to use a more specific xml file here + nem.setAttribute("definition", self.nemxmlname(ifc)) + return nem + + def buildplatformxmltransportentry(self, doc, n, ifc): + """ + Build the transport definition that goes into the platform.xml file. + This returns an XML element that will added to the nem definition. + This default method supports raw and virtual transport types, but may be + overriden by a model to support the e.g. pluggable virtual transport. + n is the EmaneNode. + """ + ttype = ifc.transport_type + if not ttype: + logger.info("warning: %s interface type unsupported!" % ifc.name) + ttype = "raw" + trans = doc.createElement("transport") + trans.setAttribute("definition", n.transportxmlname(ttype)) + if emane.VERSION < emane.EMANE092: + trans.setAttribute("group", "1") + param = doc.createElement("param") + param.setAttribute("name", "device") + if ttype == "raw": + # raw RJ45 name e.g. 'eth0' + param.setAttribute("value", ifc.name) + else: + # virtual TAP name e.g. 'n3.0.17' + param.setAttribute("value", ifc.localname) + if emane.VERSION > emane.EMANE091: + param.setAttribute("value", ifc.name) + + trans.appendChild(param) + return trans + + def basename(self, interface=None): + """ + Return the string that other names are based on. + If a specific config is stored for a node's interface, a unique + filename is needed; otherwise the name of the EmaneNode is used. + """ + emane = self.session.emane + name = "n%s" % self.object_id + if interface is not None: + nodenum = interface.node.objid + # Adamson change - use getifcconfig() to get proper result + # if emane.getconfig(nodenum, self._name, None)[1] is not None: + if emane.getifcconfig(nodenum, self.name, None, interface) is not None: + name = interface.localname.replace('.', '_') + return "%s%s" % (name, self.name) + + def nemxmlname(self, interface=None): + """ + Return the string name for the NEM XML file, e.g. 'n3rfpipenem.xml' + """ + append = "" + if emane.VERSION > emane.EMANE091: + if interface and interface.transport_type == "raw": + append = "_raw" + return "%snem%s.xml" % (self.basename(interface), append) + + def shimxmlname(self, ifc=None): + """ + Return the string name for the SHIM XML file, e.g. 'commeffectshim.xml' + """ + return "%sshim.xml" % self.basename(ifc) + + def macxmlname(self, ifc=None): + """ + Return the string name for the MAC XML file, e.g. 'n3rfpipemac.xml' + """ + return "%smac.xml" % self.basename(ifc) + + def phyxmlname(self, ifc=None): + """ + Return the string name for the PHY XML file, e.g. 'n3rfpipephy.xml' + """ + return "%sphy.xml" % self.basename(ifc) + + def update(self, moved, moved_netifs): + """ + invoked from MobilityModel when nodes are moved; this causes + EMANE location events to be generated for the nodes in the moved + list, making EmaneModels compatible with Ns2ScriptedMobility + """ + try: + wlan = self.session.get_object(self.object_id) + wlan.setnempositions(moved_netifs) + except KeyError: + logger.exception("error during update") + + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None): + """ + Invoked when a Link Message is received. Default is unimplemented. + """ + warntxt = "EMANE model %s does not support link " % self.name + warntxt += "configuration, dropping Link Message" + logger.warn(warntxt) + + @staticmethod + def valuestrtoparamlist(dom, name, value): + """ + Helper to convert a parameter to a paramlist. + Returns a an XML paramlist, or None if the value does not expand to + multiple values. + """ + try: + values = utils.maketuplefromstr(value, str) + except SyntaxError: + logger.exception("error in value string to param list") + return None + + if not hasattr(values, '__iter__'): + return None + + if len(values) < 2: + return None + + return xmlutils.add_param_list_to_parent(dom, parent=None, name=name, values=values) diff --git a/daemon/core/enumerations.py b/daemon/core/enumerations.py new file mode 100644 index 00000000..b81ca09f --- /dev/null +++ b/daemon/core/enumerations.py @@ -0,0 +1,318 @@ +""" +Contains all legacy enumerations for interacting with legacy CORE code. +""" + +from enum import Enum + +CORE_API_VERSION = "1.23" +CORE_API_PORT = 4038 + + +class MessageTypes(Enum): + """ + CORE message types. + """ + NODE = 0x01 + LINK = 0x02 + EXECUTE = 0x03 + REGISTER = 0x04 + CONFIG = 0x05 + FILE = 0x06 + INTERFACE = 0x07 + EVENT = 0x08 + SESSION = 0x09 + EXCEPTION = 0x0A + + +class MessageFlags(Enum): + """ + CORE message flags. + """ + ADD = 0x01 + DELETE = 0x02 + CRI = 0x04 + LOCAL = 0x08 + STRING = 0x10 + TEXT = 0x20 + TTY = 0x40 + + +class NodeTlvs(Enum): + """ + Node type, length, value enumerations. + """ + NUMBER = 0x01 + TYPE = 0x02 + NAME = 0x03 + IP_ADDRESS = 0x04 + MAC_ADDRESS = 0x05 + IP6_ADDRESS = 0x06 + MODEL = 0x07 + EMULATION_SERVER = 0x08 + SESSION = 0x0A + X_POSITION = 0x20 + Y_POSITION = 0x21 + CANVAS = 0x22 + EMULATION_ID = 0x23 + NETWORK_ID = 0x24 + SERVICES = 0x25 + LATITUDE = 0x30 + LONGITUDE = 0x31 + ALTITUDE = 0x32 + ICON = 0x42 + OPAQUE = 0x50 + + +class NodeTypes(Enum): + """ + Node types. + """ + DEFAULT = 0 + PHYSICAL = 1 + XEN = 2 + TBD = 3 + SWITCH = 4 + HUB = 5 + WIRELESS_LAN = 6 + RJ45 = 7 + TUNNEL = 8 + KTUNNEL = 9 + EMANE = 10 + TAP_BRIDGE = 11 + PEER_TO_PEER = 12 + CONTROL_NET = 13 + EMANE_NET = 14 + + +class Rj45Models(Enum): + """ + RJ45 model types. + """ + LINKED = 0 + WIRELESS = 1 + INSTALLED = 2 + + +# Link Message TLV Types +class LinkTlvs(Enum): + """ + Link type, length, value enumerations. + """ + N1_NUMBER = 0x01 + N2_NUMBER = 0x02 + DELAY = 0x03 + BANDWIDTH = 0x04 + PER = 0x05 + DUP = 0x06 + JITTER = 0x07 + MER = 0x08 + BURST = 0x09 + SESSION = 0x0A + MBURST = 0x10 + TYPE = 0x20 + GUI_ATTRIBUTES = 0x21 + UNIDIRECTIONAL = 0x22 + EMULATION_ID = 0x23 + NETWORK_ID = 0x24 + KEY = 0x25 + INTERFACE1_NUMBER = 0x30 + INTERFACE1_IP4 = 0x31 + INTERFACE1_IP4_MASK = 0x32 + INTERFACE1_MAC = 0x33 + INTERFACE1_IP6 = 0x34 + INTERFACE1_IP6_MASK = 0x35 + INTERFACE2_NUMBER = 0x36 + INTERFACE2_IP4 = 0x37 + INTERFACE2_IP4_MASK = 0x38 + INTERFACE2_MAC = 0x39 + INTERFACE2_IP6 = 0x40 + INTERFACE2_IP6_MASK = 0x41 + INTERFACE1_NAME = 0x42 + INTERFACE2_NAME = 0x43 + OPAQUE = 0x50 + + +class LinkTypes(Enum): + """ + Link types. + """ + WIRELESS = 0 + WIRED = 1 + + +class ExecuteTlvs(Enum): + """ + Execute type, length, value enumerations. + """ + NODE = 0x01 + NUMBER = 0x02 + TIME = 0x03 + COMMAND = 0x04 + RESULT = 0x05 + STATUS = 0x06 + SESSION = 0x0A + + +class RegisterTlvs(Enum): + """ + Register type, length, value enumerations. + """ + WIRELESS = 0x01 + MOBILITY = 0x02 + UTILITY = 0x03 + EXECUTE_SERVER = 0x04 + GUI = 0x05 + EMULATION_SERVER = 0x06 + SESSION = 0x0A + + +class ConfigTlvs(Enum): + """ + Configuration type, length, value enumerations. + """ + NODE = 0x01 + OBJECT = 0x02 + TYPE = 0x03 + DATA_TYPES = 0x04 + VALUES = 0x05 + CAPTIONS = 0x06 + BITMAP = 0x07 + POSSIBLE_VALUES = 0x08 + GROUPS = 0x09 + SESSION = 0x0A + INTERFACE_NUMBER = 0x0B + NETWORK_ID = 0x24 + OPAQUE = 0x50 + + +class ConfigFlags(Enum): + """ + Configuration flags. + """ + NONE = 0x00 + REQUEST = 0x01 + UPDATE = 0x02 + RESET = 0x03 + + +class ConfigDataTypes(Enum): + """ + Configuration data types. + """ + UINT8 = 0x01 + UINT16 = 0x02 + UINT32 = 0x03 + UINT64 = 0x04 + INT8 = 0x05 + INT16 = 0x06 + INT32 = 0x07 + INT64 = 0x08 + FLOAT = 0x09 + STRING = 0x0A + BOOL = 0x0B + + +class FileTlvs(Enum): + """ + File type, length, value enumerations. + """ + NODE = 0x01 + NAME = 0x02 + MODE = 0x03 + NUMBER = 0x04 + TYPE = 0x05 + SOURCE_NAME = 0x06 + SESSION = 0x0A + DATA = 0x10 + COMPRESSED_DATA = 0x11 + + +class InterfaceTlvs(Enum): + """ + Interface type, length, value enumerations. + """ + NODE = 0x01 + NUMBER = 0x02 + NAME = 0x03 + IP_ADDRESS = 0x04 + MASK = 0x05 + MAC_ADDRESS = 0x06 + IP6_ADDRESS = 0x07 + IP6_MASK = 0x08 + TYPE = 0x09 + SESSION = 0x0A + STATE = 0x0B + EMULATION_ID = 0x23 + NETWORK_ID = 0x24 + + +class EventTlvs(Enum): + """ + Event type, length, value enumerations. + """ + NODE = 0x01 + TYPE = 0x02 + NAME = 0x03 + DATA = 0x04 + TIME = 0x05 + SESSION = 0x0A + + +class EventTypes(Enum): + """ + Event types. + """ + NONE = 0 + DEFINITION_STATE = 1 + CONFIGURATION_STATE = 2 + INSTANTIATION_STATE = 3 + RUNTIME_STATE = 4 + DATACOLLECT_STATE = 5 + SHUTDOWN_STATE = 6 + START = 7 + STOP = 8 + PAUSE = 9 + RESTART = 10 + FILE_OPEN = 11 + FILE_SAVE = 12 + SCHEDULED = 13 + RECONFIGURE = 14 + INSTANTIATION_COMPLETE = 15 + + +class SessionTlvs(Enum): + """ + Session type, length, value enumerations. + """ + NUMBER = 0x01 + NAME = 0x02 + FILE = 0x03 + NODE_COUNT = 0x04 + DATE = 0x05 + THUMB = 0x06 + USER = 0x07 + OPAQUE = 0x0A + + +class ExceptionTlvs(Enum): + """ + Exception type, length, value enumerations. + """ + NODE = 0x01 + SESSION = 0x02 + LEVEL = 0x03 + SOURCE = 0x04 + DATE = 0x05 + TEXT = 0x06 + OPAQUE = 0x0A + + +class ExceptionLevels(Enum): + """ + Exception levels. + """ + NONE = 0 + FATAL = 1 + ERROR = 2 + WARNING = 3 + NOTICE = 4 diff --git a/daemon/core/misc/ipaddress.py b/daemon/core/misc/ipaddress.py new file mode 100644 index 00000000..f5abd6ba --- /dev/null +++ b/daemon/core/misc/ipaddress.py @@ -0,0 +1,449 @@ +""" +Helper objects for dealing with IPv4/v6 addresses. +""" + +import random +import socket +import struct +from socket import AF_INET +from socket import AF_INET6 + +from core.misc import log + +logger = log.get_logger(__name__) + + +class MacAddress(object): + """ + Provides mac address utilities for use within core. + """ + + def __init__(self, address): + """ + Creates a MacAddress instance. + + :param str address: mac address + """ + self.addr = address + + def __str__(self): + """ + Create a string representation of a MacAddress. + + :return: string representation + :rtype: str + """ + return ":".join(map(lambda x: "%02x" % ord(x), self.addr)) + + def to_link_local(self): + """ + Convert the MAC address to a IPv6 link-local address, using EUI 48 + to EUI 64 conversion process per RFC 5342. + + :return: ip address object + :rtype: IpAddress + """ + if not self.addr: + return IpAddress.from_string("::") + tmp = struct.unpack("!Q", '\x00\x00' + self.addr)[0] + nic = long(tmp) & 0x000000FFFFFFL + oui = long(tmp) & 0xFFFFFF000000L + # toggle U/L bit + oui ^= 0x020000000000L + # append EUI-48 octets + oui = (oui << 16) | 0xFFFE000000L + return IpAddress(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic)) + + @classmethod + def from_string(cls, s): + """ + Create a mac address object from a string. + + :param s: string representation of a mac address + :return: mac address class + :rtype: MacAddress + """ + addr = "".join(map(lambda x: chr(int(x, 16)), s.split(":"))) + return cls(addr) + + @classmethod + def random(cls): + """ + Create a random mac address. + + :return: random mac address + :rtype: MacAddress + """ + tmp = random.randint(0, 0xFFFFFF) + # use the Xen OID 00:16:3E + tmp |= 0x00163E << 24 + tmpbytes = struct.pack("!Q", tmp) + return cls(tmpbytes[2:]) + + +class IpAddress(object): + """ + Provides ip utilities and functionality for use within core. + """ + + def __init__(self, af, address): + """ + Create a IpAddress instance. + + :param int af: address family + :param str address: ip address + :return: + """ + # check if (af, addr) is valid + if not socket.inet_ntop(af, address): + raise ValueError("invalid af/addr") + self.af = af + self.addr = address + + def is_ipv4(self): + """ + Checks if this is an ipv4 address. + + :return: True if ipv4 address, False otherwise + :rtype: bool + """ + return self.af == AF_INET + + def is_ipv6(self): + """ + Checks if this is an ipv6 address. + + :return: True if ipv6 address, False otherwise + :rtype: bool + """ + return self.af == AF_INET6 + + def __str__(self): + """ + Create a string representation of this address. + + :return: string representation of address + :rtype: str + """ + return socket.inet_ntop(self.af, self.addr) + + def __eq__(self, other): + """ + Checks for equality with another ip address. + + :param IpAddress other: other ip address to check equality with + :return: True is the other IpAddress is equal, False otherwise + :rtype: bool + """ + if not isinstance(other, IpAddress): + return False + elif self is other: + return True + else: + return other.af == self.af and other.addr == self.addr + + def __add__(self, other): + """ + Add value to ip addresses. + + :param int other: value to add to ip address + :return: added together ip address instance + :rtype: IpAddress + """ + try: + carry = int(other) + except ValueError: + logger.exception("error during addition") + return NotImplemented + + tmp = map(lambda x: ord(x), self.addr) + for i in xrange(len(tmp) - 1, -1, -1): + x = tmp[i] + carry + tmp[i] = x & 0xff + carry = x >> 8 + if carry == 0: + break + addr = "".join(map(lambda x: chr(x), tmp)) + return self.__class__(self.af, addr) + + def __sub__(self, other): + """ + Subtract value from ip address. + + :param int other: value to subtract from ip address + :return: + """ + try: + tmp = -int(other) + except ValueError: + logger.exception("error during subtraction") + return NotImplemented + + return self.__add__(tmp) + + @classmethod + def from_string(cls, s): + """ + Create a ip address from a string representation. + + :param s: string representation to create ip address from + :return: ip address instance + :rtype: IpAddress + """ + for af in AF_INET, AF_INET6: + return cls(af, socket.inet_pton(af, s)) + + @staticmethod + def to_int(s): + """ + Convert IPv4 string to integer + + :param s: string to convert to 32-bit integer + :return: integer value + :rtype: int + """ + bin = socket.inet_pton(AF_INET, s) + return struct.unpack('!I', bin)[0] + + +class IpPrefix(object): + """ + Provides ip address generation and prefix utilities. + """ + + def __init__(self, af, prefixstr): + """ + Create a IpPrefix instance. + + :param int af: address family for ip prefix + :param prefixstr: ip prefix string + """ + # prefixstr format: address/prefixlen + tmp = prefixstr.split("/") + if len(tmp) > 2: + raise ValueError("invalid prefix: '%s'" % prefixstr) + self.af = af + if self.af == AF_INET: + self.addrlen = 32 + elif self.af == AF_INET6: + self.addrlen = 128 + else: + raise ValueError("invalid address family: '%s'" % self.af) + if len(tmp) == 2: + self.prefixlen = int(tmp[1]) + else: + self.prefixlen = self.addrlen + self.prefix = socket.inet_pton(self.af, tmp[0]) + if self.addrlen > self.prefixlen: + addrbits = self.addrlen - self.prefixlen + netmask = ((1L << self.prefixlen) - 1) << addrbits + prefix = "" + for i in xrange(-1, -(addrbits >> 3) - 2, -1): + prefix = chr(ord(self.prefix[i]) & (netmask & 0xff)) + prefix + netmask >>= 8 + self.prefix = self.prefix[:i] + prefix + + def __str__(self): + """ + String representation of an ip prefix. + + :return: string representation + :rtype: str + """ + return "%s/%s" % (socket.inet_ntop(self.af, self.prefix), self.prefixlen) + + def __eq__(self, other): + """ + Compare equality with another ip prefix. + + :param IpPrefix other: other ip prefix to compare with + :return: True is equal, False otherwise + :rtype: bool + """ + if not isinstance(other, IpPrefix): + return False + elif self is other: + return True + else: + return other.af == self.af and other.prefixlen == self.prefixlen and other.prefix == self.prefix + + def __add__(self, other): + """ + Add a value to this ip prefix. + + :param int other: value to add + :return: added ip prefix instance + :rtype: IpPrefix + """ + try: + tmp = int(other) + except ValueError: + logger.exception("error during addition") + return NotImplemented + + a = IpAddress(self.af, self.prefix) + (tmp << (self.addrlen - self.prefixlen)) + prefixstr = "%s/%s" % (a, self.prefixlen) + if self.__class__ == IpPrefix: + return self.__class__(self.af, prefixstr) + else: + return self.__class__(prefixstr) + + def __sub__(self, other): + """ + Subtract value from this ip prefix. + + :param int other: value to subtract + :return: subtracted ip prefix instance + :rtype: IpPrefix + """ + try: + tmp = -int(other) + except ValueError: + logger.exception("error during subtraction") + return NotImplemented + + return self.__add__(tmp) + + def addr(self, hostid): + """ + Create an ip address for a given host id. + + :param hostid: host id for an ip address + :return: ip address + :rtype: IpAddress + """ + tmp = int(hostid) + if tmp in [-1, 0, 1] and self.addrlen == self.prefixlen: + return IpAddress(self.af, self.prefix) + + if tmp == 0 or tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or ( + self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1): + raise ValueError("invalid hostid for prefix %s: %s" % (self, hostid)) + + addr = "" + prefix_endpoint = -1 + for i in xrange(-1, -(self.addrlen >> 3) - 1, -1): + prefix_endpoint = i + addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr + tmp >>= 8 + if not tmp: + break + addr = self.prefix[:prefix_endpoint] + addr + return IpAddress(self.af, addr) + + def min_addr(self): + """ + Return the minimum ip address for this prefix. + + :return: minimum ip address + :rtype: IpAddress + """ + return self.addr(1) + + def max_addr(self): + """ + Return the maximum ip address for this prefix. + + :return: maximum ip address + :rtype: IpAddress + """ + if self.af == AF_INET: + return self.addr((1 << (self.addrlen - self.prefixlen)) - 2) + else: + return self.addr((1 << (self.addrlen - self.prefixlen)) - 1) + + def num_addr(self): + """ + Retrieve the number of ip addresses for this prefix. + + :return: maximum number of ip addresses + :rtype: int + """ + return max(0, (1 << (self.addrlen - self.prefixlen)) - 2) + + def prefix_str(self): + """ + Retrieve the prefix string for this ip address. + + :return: prefix string + :rtype: str + """ + return "%s" % socket.inet_ntop(self.af, self.prefix) + + def netmask_str(self): + """ + Retrieve the netmask string for this ip address. + + :return: netmask string + :rtype: str + """ + addrbits = self.addrlen - self.prefixlen + netmask = ((1L << self.prefixlen) - 1) << addrbits + netmaskbytes = struct.pack("!L", netmask) + return IpAddress(af=AF_INET, address=netmaskbytes).__str__() + + +class Ipv4Prefix(IpPrefix): + """ + Provides an ipv4 specific class for ip prefixes. + """ + + def __init__(self, prefixstr): + """ + Create a Ipv4Prefix instance. + + :param str prefixstr: ip prefix + """ + IpPrefix.__init__(self, AF_INET, prefixstr) + + +class Ipv6Prefix(IpPrefix): + """ + Provides an ipv6 specific class for ip prefixes. + """ + + def __init__(self, prefixstr): + """ + Create a Ipv6Prefix instance. + + :param str prefixstr: ip prefix + """ + IpPrefix.__init__(self, AF_INET6, prefixstr) + + +def is_ip_address(af, addrstr): + """ + Check if ip address string is a valid ip address. + + :param int af: address family + :param str addrstr: ip address string + :return: True if a valid ip address, False otherwise + :rtype: bool + """ + try: + socket.inet_pton(af, addrstr) + return True + except IOError: + return False + + +def is_ipv4_address(addrstr): + """ + Check if ipv4 address string is a valid ipv4 address. + + :param str addrstr: ipv4 address string + :return: True if a valid ipv4 address, False otherwise + :rtype: bool + """ + return is_ip_address(AF_INET, addrstr) + + +def is_ipv6_address(addrstr): + """ + Check if ipv6 address string is a valid ipv6 address. + + :param str addrstr: ipv6 address string + :return: True if a valid ipv6 address, False otherwise + :rtype: bool + """ + return is_ip_address(AF_INET6, addrstr) diff --git a/daemon/core/misc/log.py b/daemon/core/misc/log.py new file mode 100644 index 00000000..a12b183e --- /dev/null +++ b/daemon/core/misc/log.py @@ -0,0 +1,35 @@ +""" +Convenience methods to setup logging. +""" + +import logging + +_LOG_LEVEL = logging.INFO +_LOG_FORMAT = "%(levelname)-7s %(asctime)s %(name)-15s %(funcName)-15s %(lineno)-4d: %(message)s" +_INITIAL = True + + +def setup(level=_LOG_LEVEL, log_format=_LOG_FORMAT): + """ + Configure a logging with a basic configuration, output to console. + + :param logging.LEVEL level: level for logger, defaults to module defined format + :param int log_format: format for logger, default to DEBUG + :return: nothing + """ + logging.basicConfig(level=level, format=log_format) + + +def get_logger(name): + """ + Retrieve a logger for logging. + + :param str name: name for logger to retrieve + :return: logging.logger + """ + global _INITIAL + if _INITIAL: + setup() + _INITIAL = False + + return logging.getLogger(name) diff --git a/daemon/core/misc/nodemaps.py b/daemon/core/misc/nodemaps.py new file mode 100644 index 00000000..72249aee --- /dev/null +++ b/daemon/core/misc/nodemaps.py @@ -0,0 +1,50 @@ +""" +Provides default node maps that can be used to run core with. +""" + +from core.emane.nodes import EmaneNet +from core.emane.nodes import EmaneNode +from core.enumerations import NodeTypes +from core.netns import nodes +from core.netns import openvswitch +from core.netns.vnet import GreTapBridge +from core.phys import pnodes +from core.xen import xen + +# legacy core nodes, that leverage linux bridges +CLASSIC_NODES = { + NodeTypes.DEFAULT: nodes.CoreNode, + NodeTypes.PHYSICAL: pnodes.PhysicalNode, + NodeTypes.XEN: xen.XenNode, + NodeTypes.TBD: None, + NodeTypes.SWITCH: nodes.SwitchNode, + NodeTypes.HUB: nodes.HubNode, + NodeTypes.WIRELESS_LAN: nodes.WlanNode, + NodeTypes.RJ45: nodes.RJ45Node, + NodeTypes.TUNNEL: nodes.TunnelNode, + NodeTypes.KTUNNEL: None, + NodeTypes.EMANE: EmaneNode, + NodeTypes.EMANE_NET: EmaneNet, + NodeTypes.TAP_BRIDGE: GreTapBridge, + NodeTypes.PEER_TO_PEER: nodes.PtpNet, + NodeTypes.CONTROL_NET: nodes.CtrlNet +} + +# ovs nodes, that depend on ovs to leverage ovs based bridges +OVS_NODES = { + NodeTypes.DEFAULT: nodes.CoreNode, + NodeTypes.PHYSICAL: pnodes.PhysicalNode, + NodeTypes.XEN: xen.XenNode, + NodeTypes.TBD: None, + NodeTypes.SWITCH: openvswitch.OvsSwitchNode, + NodeTypes.HUB: openvswitch.OvsHubNode, + NodeTypes.WIRELESS_LAN: openvswitch.OvsWlanNode, + NodeTypes.RJ45: nodes.RJ45Node, + NodeTypes.TUNNEL: openvswitch.OvsTunnelNode, + NodeTypes.KTUNNEL: None, + NodeTypes.EMANE: EmaneNode, + NodeTypes.EMANE_NET: EmaneNet, + NodeTypes.TAP_BRIDGE: openvswitch.OvsGreTapBridge, + NodeTypes.PEER_TO_PEER: openvswitch.OvsPtpNet, + NodeTypes.CONTROL_NET: openvswitch.OvsCtrlNet +} diff --git a/daemon/core/misc/nodeutils.py b/daemon/core/misc/nodeutils.py new file mode 100644 index 00000000..27a41892 --- /dev/null +++ b/daemon/core/misc/nodeutils.py @@ -0,0 +1,68 @@ +""" +Serves as a global point for storing and retrieving node types needed during simulation. +""" + +import pprint + +from core.misc import log + +logger = log.get_logger(__name__) + +_NODE_MAP = None + + +def _convert_map(x, y): + """ + Convenience method to create a human readable version of the node map to log. + + :param dict x: dictionary to reduce node items into + :param tuple y: current node item + :return: + """ + x[y[0].name] = y[1] + return x + + +def set_node_map(node_map): + """ + Set the global node map that proides a consistent way to retrieve differently configured nodes. + + :param dict node_map: node map to set to + :return: nothing + """ + global _NODE_MAP + print_map = reduce(lambda x, y: _convert_map(x, y), node_map.items(), {}) + logger.info("setting node class map: \n%s", pprint.pformat(print_map, indent=4)) + _NODE_MAP = node_map + + +def get_node_class(node_type): + """ + Retrieve the node class for a given node type. + + :param int node_type: node type to retrieve class for + :return: node class + """ + global _NODE_MAP + return _NODE_MAP[node_type] + + +def is_node(obj, node_types): + """ + Validates if an object is one of the provided node types. + + :param obj: object to check type for + :param int|tuple|list node_types: node type(s) to check against + :return: True if the object is one of the node types, False otherwise + :rtype: bool + """ + type_classes = [] + if isinstance(node_types, (tuple, list)): + for node_type in node_types: + type_class = get_node_class(node_type) + type_classes.append(type_class) + else: + type_class = get_node_class(node_types) + type_classes.append(type_class) + + return isinstance(obj, tuple(type_classes)) diff --git a/daemon/core/misc/structutils.py b/daemon/core/misc/structutils.py new file mode 100644 index 00000000..c58c6f56 --- /dev/null +++ b/daemon/core/misc/structutils.py @@ -0,0 +1,48 @@ +""" +Utilities for working with python struct data. +""" + +from core.misc import log + +logger = log.get_logger(__name__) + + +def pack_values(clazz, packers): + """ + Pack values for a given legacy class. + + :param class clazz: class that will provide a pack method + :param list packers: a list of tuples that are used to pack values and transform them + :return: packed data string of all values + """ + + # iterate through tuples of values to pack + data = "" + for packer in packers: + # check if a transformer was provided for valid values + transformer = None + if len(packer) == 2: + tlv_type, value = packer + elif len(packer) == 3: + tlv_type, value, transformer = packer + else: + raise RuntimeError("packer had more than 3 arguments") + + # convert unicode to normal str for packing + if isinstance(value, unicode): + value = str(value) + + # only pack actual values and avoid packing empty strings + # protobuf defaults to empty strings and does no imply a value to set + if value is None or (isinstance(value, str) and not value): + continue + + # transform values as needed + if transformer: + value = transformer(value) + + # pack and add to existing data + logger.info("packing: %s - %s", tlv_type, value) + data += clazz.pack(tlv_type.value, value) + + return data diff --git a/daemon/core/netns/openvswitch.py b/daemon/core/netns/openvswitch.py new file mode 100644 index 00000000..e3fe058d --- /dev/null +++ b/daemon/core/netns/openvswitch.py @@ -0,0 +1,741 @@ +""" +TODO: probably goes away, or implement the usage of "unshare", or docker formal. +""" + +import socket +import subprocess +import threading +from socket import AF_INET +from socket import AF_INET6 + +from core import constants +from core.coreobj import PyCoreNet +from core.data import LinkData +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import ipaddress +from core.misc import log +from core.misc import utils +from core.netns.vif import GreTap +from core.netns.vif import VEth +from core.netns.vnet import EbtablesQueue +from core.netns.vnet import GreTapBridge + +logger = log.get_logger(__name__) + +# a global object because all WLANs share the same queue +# cannot have multiple threads invoking the ebtables commnd +ebtables_queue = EbtablesQueue() + +ebtables_lock = threading.Lock() + +utils.check_executables([ + constants.IP_BIN, + constants.EBTABLES_BIN, + constants.TC_BIN +]) + + +def ebtables_commands(call, commands): + ebtables_lock.acquire() + try: + for command in commands: + call(command) + finally: + ebtables_lock.release() + + +class OvsNet(PyCoreNet): + """ + Used to be LxBrNet. + + Base class for providing Openvswitch functionality to objects that create bridges. + """ + + policy = "DROP" + + def __init__(self, session, objid=None, name=None, start=True, policy=None): + """ + Creates an OvsNet instance. + + :param core.session.Session session: session this object is a part of + :param objid: + :param name: + :param start: + :param policy: + :return: + """ + + PyCoreNet.__init__(self, session, objid, name, start) + + if policy: + self.policy = policy + else: + self.policy = self.__class__.policy + + session_id = self.session.short_session_id() + self.bridge_name = "b.%s.%s" % (str(self.objid), session_id) + self.up = False + + if start: + self.startup() + ebtables_queue.startupdateloop(self) + + def startup(self): + try: + subprocess.check_call([constants.OVS_BIN, "add-br", self.bridge_name]) + except subprocess.CalledProcessError: + logger.exception("error adding bridge") + + try: + # turn off spanning tree protocol and forwarding delay + # TODO: appears stp and rstp are off by default, make sure this always holds true + # TODO: apears ovs only supports rstp forward delay and again it's off by default + subprocess.check_call([constants.IP_BIN, "link", "set", self.bridge_name, "up"]) + + # create a new ebtables chain for this bridge + ebtables_commands(subprocess.check_call, [ + [constants.EBTABLES_BIN, "-N", self.bridge_name, "-P", self.policy], + [constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name] + ]) + except subprocess.CalledProcessError: + logger.exception("Error setting bridge parameters") + + self.up = True + + def shutdown(self): + if not self.up: + logger.info("exiting shutdown, object is not up") + return + + ebtables_queue.stopupdateloop(self) + + utils.mutecall([constants.IP_BIN, "link", "set", self.bridge_name, "down"]) + utils.mutecall([constants.OVS_BIN, "del-br", self.bridge_name]) + + ebtables_commands(utils.mutecall, [ + [constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name], + [constants.EBTABLES_BIN, "-X", self.bridge_name] + ]) + + for interface in self.netifs(): + # removes veth pairs used for bridge-to-bridge connections + interface.shutdown() + + self._netif.clear() + self._linked.clear() + del self.session + self.up = False + + def attach(self, interface): + if self.up: + try: + subprocess.check_call([constants.OVS_BIN, "add-port", self.bridge_name, interface.localname]) + subprocess.check_call([constants.IP_BIN, "link", "set", interface.localname, "up"]) + except subprocess.CalledProcessError: + logger.exception("error joining interface %s to bridge %s", interface.localname, self.bridge_name) + return + + PyCoreNet.attach(self, interface) + + def detach(self, interface): + if self.up: + try: + subprocess.check_call([constants.OVS_BIN, "del-port", self.bridge_name, interface.localname]) + except subprocess.CalledProcessError: + logger.exception("error removing interface %s from bridge %s", interface.localname, self.bridge_name) + return + + PyCoreNet.detach(self, interface) + + def linked(self, interface_one, interface_two): + # check if the network interfaces are attached to this network + if self._netif[interface_one.netifi] != interface_one: + raise ValueError("inconsistency for interface %s" % interface_one.name) + + if self._netif[interface_two.netifi] != interface_two: + raise ValueError("inconsistency for interface %s" % interface_two.name) + + try: + linked = self._linked[interface_one][interface_two] + except KeyError: + if self.policy == "ACCEPT": + linked = True + elif self.policy == "DROP": + linked = False + else: + raise ValueError("unknown policy: %s" % self.policy) + + self._linked[interface_one][interface_two] = linked + + return linked + + def unlink(self, interface_one, interface_two): + """ + Unlink two PyCoreNetIfs, resulting in adding or removing ebtables + filtering rules. + """ + with self._linked_lock: + if not self.linked(interface_one, interface_two): + return + + self._linked[interface_one][interface_two] = False + + ebtables_queue.ebchange(self) + + def link(self, interface_one, interface_two): + """ + Link two PyCoreNetIfs together, resulting in adding or removing + ebtables filtering rules. + """ + with self._linked_lock: + if self.linked(interface_one, interface_two): + return + + self._linked[interface_one][interface_two] = True + + ebtables_queue.ebchange(self) + + def linkconfig(self, interface, bw=None, delay=None, loss=None, duplicate=None, + jitter=None, netif2=None, devname=None): + """ + Configure link parameters by applying tc queuing disciplines on the + interface. + """ + if not devname: + devname = interface.localname + + tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname] + parent = ["root"] + + # attempt to set bandwidth and update as needed if value changed + bandwidth_changed = interface.setparam("bw", bw) + if bandwidth_changed: + # from tc-tbf(8): minimum value for burst is rate / kernel_hz + if bw > 0: + if self.up: + burst = max(2 * interface.mtu, bw / 1000) + limit = 0xffff # max IP payload + tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)] + logger.info("linkconfig: %s" % [tc + parent + ["handle", "1:"] + tbf]) + subprocess.check_call(tc + parent + ["handle", "1:"] + tbf) + interface.setparam("has_tbf", True) + elif interface.getparam("has_tbf") and bw <= 0: + tcd = [] + tc + tcd[2] = "delete" + + if self.up: + subprocess.check_call(tcd + parent) + + interface.setparam("has_tbf", False) + # removing the parent removes the child + interface.setparam("has_netem", False) + + if interface.getparam("has_tbf"): + parent = ["parent", "1:1"] + + netem = ["netem"] + delay_changed = interface.setparam("delay", delay) + + if loss is not None: + loss = float(loss) + loss_changed = interface.setparam("loss", loss) + + if duplicate is not None: + duplicate = float(duplicate) + duplicate_changed = interface.setparam("duplicate", duplicate) + jitter_changed = interface.setparam("jitter", jitter) + + # if nothing changed return + if not any([bandwidth_changed, delay_changed, loss_changed, duplicate_changed, jitter_changed]): + return + + # jitter and delay use the same delay statement + if delay is not None: + netem += ["delay", "%sus" % delay] + else: + netem += ["delay", "0us"] + + if jitter is not None: + netem += ["%sus" % jitter, "25%"] + + if loss is not None: + netem += ["loss", "%s%%" % min(loss, 100)] + + if duplicate is not None: + netem += ["duplicate", "%s%%" % min(duplicate, 100)] + + if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0: + # possibly remove netem if it exists and parent queue wasn"t removed + if not interface.getparam("has_netem"): + return + + tc[2] = "delete" + + if self.up: + logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],)) + subprocess.check_call(tc + parent + ["handle", "10:"]) + interface.setparam("has_netem", False) + elif len(netem) > 1: + if self.up: + logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],)) + subprocess.check_call(tc + parent + ["handle", "10:"] + netem) + interface.setparam("has_netem", True) + + def linknet(self, network): + """ + Link this bridge with another by creating a veth pair and installing + each device into each bridge. + """ + session_id = self.session.short_session_id() + + try: + self_objid = "%x" % self.objid + except TypeError: + self_objid = "%s" % self.objid + + try: + net_objid = "%x" % network.objid + except TypeError: + net_objid = "%s" % network.objid + + localname = "veth%s.%s.%s" % (self_objid, net_objid, session_id) + + if len(localname) >= 16: + raise ValueError("interface local name %s too long" % localname) + + name = "veth%s.%s.%s" % (net_objid, self_objid, session_id) + if len(name) >= 16: + raise ValueError("interface name %s too long" % name) + + interface = VEth(node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up) + self.attach(interface) + if network.up: + # this is similar to net.attach() but uses netif.name instead + # of localname + subprocess.check_call([constants.OVS_BIN, "add-port", network.brname, interface.name]) + subprocess.check_call([constants.IP_BIN, "link", "set", interface.name, "up"]) + + # TODO: is there a native method for this? see if this causes issues + # i = network.newifindex() + # network._netif[i] = interface + # with network._linked_lock: + # network._linked[interface] = {} + # this method call is equal to the above, with a interface.netifi = call + network.attach(interface) + + interface.net = self + interface.othernet = network + return interface + + def getlinknetif(self, network): + """ + Return the interface of that links this net with another net + (that were linked using linknet()). + """ + for interface in self.netifs(): + if hasattr(interface, "othernet") and interface.othernet == network: + return interface + + return None + + def addrconfig(self, addresses): + """ + Set addresses on the bridge. + """ + if not self.up: + return + + for address in addresses: + try: + subprocess.check_call([constants.IP_BIN, "addr", "add", str(address), "dev", self.bridge_name]) + except subprocess.CalledProcessError: + logger.exception("error adding IP address") + + +class OvsCtrlNet(OvsNet): + policy = "ACCEPT" + CTRLIF_IDX_BASE = 99 # base control interface index + DEFAULT_PREFIX_LIST = [ + "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", + "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", + "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", + "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24" + ] + + def __init__(self, session, objid="ctrlnet", name=None, prefix=None, hostid=None, + start=True, assign_address=True, updown_script=None, serverintf=None): + OvsNet.__init__(self, session, objid=objid, name=name, start=start) + self.prefix = ipaddress.Ipv4Prefix(prefix) + self.hostid = hostid + self.assign_address = assign_address + self.updown_script = updown_script + self.serverintf = serverintf + + def startup(self): + if self.detectoldbridge(): + return + + OvsNet.startup(self) + if self.hostid: + addr = self.prefix.addr(self.hostid) + else: + addr = self.prefix.max_addr() + + message = "Added control network bridge: %s %s" % (self.bridge_name, self.prefix) + addresses = ["%s/%s" % (addr, self.prefix.prefixlen)] + if self.assign_address: + self.addrconfig(addresses=addresses) + message += " address %s" % addr + logger.info(message) + + if self.updown_script: + logger.info("interface %s updown script %s startup called" % (self.bridge_name, self.updown_script)) + subprocess.check_call([self.updown_script, self.bridge_name, "startup"]) + + if self.serverintf: + try: + subprocess.check_call([constants.OVS_BIN, "add-port", self.bridge_name, self.serverintf]) + subprocess.check_call([constants.IP_BIN, "link", "set", self.serverintf, "up"]) + except subprocess.CalledProcessError: + logger.exception("error joining server interface %s to controlnet bridge %s", + self.serverintf, self.bridge_name) + + def detectoldbridge(self): + """ + Occassionally, control net bridges from previously closed sessions are not cleaned up. + Check if there are old control net bridges and delete them + """ + + status, output = utils.cmdresult([constants.OVS_BIN, "list-br"]) + output = output.strip() + if output: + for line in output.split("\n"): + bride_name = line.split(".") + if bride_name[0] == "b" and bride_name[1] == self.objid: + logger.error("older session may still be running with conflicting id for bridge: %s", line) + return True + + return False + + def shutdown(self): + if self.serverintf: + try: + subprocess.check_call([constants.OVS_BIN, "del-port", self.bridge_name, self.serverintf]) + except subprocess.CalledProcessError: + logger.exception("Error deleting server interface %s to controlnet bridge %s", + self.serverintf, self.bridge_name) + + if self.updown_script: + logger.info("interface %s updown script '%s shutdown' called", self.bridge_name, self.updown_script) + subprocess.check_call([self.updown_script, self.bridge_name, "shutdown"]) + + OvsNet.shutdown(self) + + def all_link_data(self, flags): + """ + Do not include CtrlNet in link messages describing this session. + """ + return [] + + +class OvsPtpNet(OvsNet): + policy = "ACCEPT" + + def attach(self, interface): + if len(self._netif) >= 2: + raise ValueError("point-to-point links support at most 2 network interfaces") + OvsNet.attach(self, interface) + + def data(self, message_type): + """ + Do not generate a Node Message for point-to-point links. They are + built using a link message instead. + """ + pass + + def all_link_data(self, flags): + """ + Build CORE API TLVs for a point-to-point link. One Link message describes this network. + """ + + all_links = [] + + if len(self._netif) != 2: + return all_links + + if1, if2 = self._netif.items() + if1 = if1[1] + if2 = if2[1] + + unidirectional = 0 + if if1.getparams() != if2.getparams(): + unidirectional = 1 + + interface1_ip4 = None + interface1_ip4_mask = None + interface1_ip6 = None + interface1_ip6_mask = None + for address in if1.addrlist: + ip, sep, mask = address.partition('/') + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip6_mask = mask + + interface2_ip4 = None + interface2_ip4_mask = None + interface2_ip6 = None + interface2_ip6_mask = None + for address in if2.addrlist: + ip, sep, mask = address.partition('/') + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip6_mask = mask + + # TODO: not currently used + # loss=netif.getparam('loss') + link_data = LinkData( + message_type=flags, + node1_id=if1.node.objid, + node2_id=if2.node.objid, + link_type=self.linktype, + unidirectional=unidirectional, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + interface1_id=if1.node.getifindex(if1), + interface1_mac=if1.hwaddr, + interface1_ip4=interface1_ip4, + interface1_ip4_mask=interface1_ip4_mask, + interface1_ip6=interface1_ip6, + interface1_ip6_mask=interface1_ip6_mask, + interface2_id=if2.node.getifindex(if2), + interface2_mac=if2.hwaddr, + interface2_ip4=interface2_ip4, + interface2_ip4_mask=interface2_ip4_mask, + interface2_ip6=interface2_ip6, + interface2_ip6_mask=interface2_ip6_mask, + ) + + all_links.append(link_data) + + # build a 2nd link message for the upstream link parameters + # (swap if1 and if2) + if unidirectional: + link_data = LinkData( + message_type=0, + node1_id=if2.node.objid, + node2_id=if1.node.objid, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + unidirectional=1, + interface1_id=if2.node.getifindex(if2), + interface2_id=if1.node.getifindex(if1) + ) + all_links.append(link_data) + + return all_links + + +class OvsSwitchNode(OvsNet): + apitype = NodeTypes.SWITCH.value + policy = "ACCEPT" + type = "lanswitch" + + +class OvsHubNode(OvsNet): + apitype = NodeTypes.HUB.value + policy = "ACCEPT" + type = "hub" + + def __init__(self, session, objid=None, name=None, start=True): + """ + the Hub node forwards packets to all bridge ports by turning off + the MAC address learning + """ + OvsNet.__init__(self, session, objid, name, start) + + if start: + # TODO: verify that the below flow accomplishes what is desired for a "HUB" + # TODO: replace "brctl setageing 0" + subprocess.check_call([constants.OVS_FLOW_BIN, "add-flow", self.bridge_name, "action=flood"]) + + +class OvsWlanNode(OvsNet): + apitype = NodeTypes.WIRELESS_LAN.value + linktype = LinkTypes.WIRELESS.value + policy = "DROP" + type = "wlan" + + def __init__(self, session, objid=None, name=None, start=True, policy=None): + OvsNet.__init__(self, session, objid, name, start, policy) + + # wireless model such as basic range + self.model = None + # mobility model such as scripted + self.mobility = None + + def attach(self, interface): + OvsNet.attach(self, interface) + + if self.model: + interface.poshook = self.model.position_callback + + if interface.node is None: + return + + x, y, z = interface.node.position.get() + # invokes any netif.poshook + interface.setposition(x, y, z) + # self.model.setlinkparams() + + def setmodel(self, model, config): + """ + Mobility and wireless model. + """ + logger.info("adding model %s", model.name) + + if model.type == RegisterTlvs.WIRELESS.value: + self.model = model(session=self.session, object_id=self.objid, values=config) + if self.model.position_callback: + for interface in self.netifs(): + interface.poshook = self.model.position_callback + if interface.node is not None: + x, y, z = interface.node.position.get() + interface.poshook(interface, x, y, z) + self.model.setlinkparams() + elif model.type == RegisterTlvs.MOBILITY.value: + self.mobility = model(session=self.session, object_id=self.objid, values=config) + + def updatemodel(self, model_name, values): + """ + Allow for model updates during runtime (similar to setmodel().) + """ + logger.info("updating model %s", model_name) + if self.model is None or self.model.name != model_name: + logger.info( + "failure to update model, model doesn't exist or invalid name: model(%s) - name(%s)", + self.model, model_name + ) + return + + model = self.model + if model.type == RegisterTlvs.WIRELESS.value: + if not model.updateconfig(values): + return + if self.model.position_callback: + for interface in self.netifs(): + interface.poshook = self.model.position_callback + if interface.node is not None: + x, y, z = interface.node.position.get() + interface.poshook(interface, x, y, z) + self.model.setlinkparams() + + def all_link_data(self, flags): + all_links = OvsNet.all_link_data(self, flags) + + if self.model: + all_links.extend(self.model.all_link_data(flags)) + + return all_links + + +class OvsTunnelNode(GreTapBridge): + apitype = NodeTypes.TUNNEL.value + policy = "ACCEPT" + type = "tunnel" + + +class OvsGreTapBridge(OvsNet): + """ + A network consisting of a bridge with a gretap device for tunneling to + another system. + """ + + def __init__(self, session, remoteip=None, objid=None, name=None, policy="ACCEPT", + localip=None, ttl=255, key=None, start=True): + OvsNet.__init__(self, session=session, objid=objid, name=name, policy=policy, start=False) + self.grekey = key + if self.grekey is None: + self.grekey = self.session.session_id ^ self.objid + + self.localnum = None + self.remotenum = None + self.remoteip = remoteip + self.localip = localip + self.ttl = ttl + + if remoteip is None: + self.gretap = None + else: + self.gretap = GreTap(node=self, name=None, session=session, remoteip=remoteip, + objid=None, localip=localip, ttl=ttl, key=self.grekey) + if start: + self.startup() + + def startup(self): + """ + Creates a bridge and adds the gretap device to it. + """ + OvsNet.startup(self) + + if self.gretap: + self.attach(self.gretap) + + def shutdown(self): + """ + Detach the gretap device and remove the bridge. + """ + if self.gretap: + self.detach(self.gretap) + self.gretap.shutdown() + self.gretap = None + + OvsNet.shutdown(self) + + def addrconfig(self, addresses): + """ + Set the remote tunnel endpoint. This is a one-time method for + creating the GreTap device, which requires the remoteip at startup. + The 1st address in the provided list is remoteip, 2nd optionally + specifies localip. + """ + if self.gretap: + raise ValueError("gretap already exists for %s" % self.name) + + remoteip = addresses[0].split('/')[0] + localip = None + + if len(addresses) > 1: + localip = addresses[1].split('/')[0] + + self.gretap = GreTap(session=self.session, remoteip=remoteip, objid=None, name=None, + localip=localip, ttl=self.ttl, key=self.grekey) + self.attach(self.gretap) + + def setkey(self, key): + """ + Set the GRE key used for the GreTap device. This needs to be set + prior to instantiating the GreTap device (before addrconfig). + """ + self.grekey = key diff --git a/daemon/core/xml/__init__.py b/daemon/core/xml/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/daemon/core/xml/xmldeployment.py b/daemon/core/xml/xmldeployment.py new file mode 100644 index 00000000..d94bfce6 --- /dev/null +++ b/daemon/core/xml/xmldeployment.py @@ -0,0 +1,207 @@ +import os +import socket +import subprocess + +from core import constants +from core import emane +from core.enumerations import NodeTypes +from core.misc import ipaddress +from core.misc import log +from core.misc import nodeutils +from core.netns import nodes +from core.xml import xmlutils + +logger = log.get_logger(__name__) + + +class CoreDeploymentWriter(object): + def __init__(self, dom, root, session): + self.dom = dom + self.root = root + self.session = session + self.hostname = socket.gethostname() + if emane.VERSION < emane.EMANE092: + self.transport = None + self.platform = None + + @staticmethod + def get_ipv4_addresses(hostname): + if hostname == 'localhost': + addr_list = [] + cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show') + output = subprocess.check_output(cmd) + for line in output.split(os.linesep): + split = line.split() + if not split: + continue + addr = split[3] + if not addr.startswith('127.'): + addr_list.append(addr) + return addr_list + else: + # TODO: handle other hosts + raise NotImplementedError + + @staticmethod + def get_interface_names(hostname): + """ + Uses same methodology of get_ipv4_addresses() to get + parallel list of interface names to go with ... + """ + if hostname == 'localhost': + iface_list = [] + cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show') + output = subprocess.check_output(cmd) + for line in output.split(os.linesep): + split = line.split() + if not split: + continue + interface_name = split[1] + addr = split[3] + if not addr.startswith('127.'): + iface_list.append(interface_name) + return iface_list + else: + # TODO: handle other hosts + raise NotImplementedError + + @staticmethod + def find_device(scenario, name): + tag_name = ('device', 'host', 'router') + for d in xmlutils.iter_descendants_with_attribute(scenario, tag_name, 'name', name): + return d + return None + + @staticmethod + def find_interface(device, name): + for i in xmlutils.iter_descendants_with_attribute(device, 'interface', 'name', name): + return i + return None + + def add_deployment(self): + testbed = self.dom.createElement('container') + testbed.setAttribute('name', 'TestBed') + testbed.setAttribute('id', 'TestBed') + self.root.base_element.appendChild(testbed) + nodelist = [] + for obj in self.session.objects.itervalues(): + if isinstance(obj, nodes.PyCoreNode): + nodelist.append(obj) + name = self.hostname + ipv4_addresses = self.get_ipv4_addresses('localhost') + iface_names = self.get_interface_names('localhost') + testhost = self.add_physical_host(testbed, name, ipv4_addresses, iface_names) + for n in nodelist: + self.add_virtual_host(testhost, n) + # TODO: handle other servers + # servers = self.session.broker.getservernames() + # servers.remove('localhost') + + def add_child_element(self, parent, tag_name): + el = self.dom.createElement(tag_name) + parent.appendChild(el) + return el + + def add_child_element_with_nameattr(self, parent, tag_name, name, setid=True): + el = self.add_child_element(parent, tag_name) + el.setAttribute('name', name) + if setid: + el.setAttribute('id', '%s/%s' % (parent.getAttribute('id'), name)) + return el + + def add_address(self, parent, address_type, address_str, address_iface=None): + el = self.add_child_element(parent, 'address') + el.setAttribute('type', address_type) + if address_iface is not None: + el.setAttribute('iface', address_iface) + el.appendChild(self.dom.createTextNode(address_str)) + return el + + def add_type(self, parent, type_str): + el = self.add_child_element(parent, 'type') + el.appendChild(self.dom.createTextNode(type_str)) + return el + + def add_platform(self, parent, name): + el = self.add_child_element_with_nameattr(parent, 'emanePlatform', name) + return el + + def add_transport(self, parent, name): + el = self.add_child_element_with_nameattr(parent, 'transport', name) + return el + + def add_nem(self, parent, name): + el = self.add_child_element_with_nameattr(parent, 'nem', name) + return el + + def add_parameter(self, parent, name, val): + el = self.add_child_element_with_nameattr(parent, 'parameter', name, False) + el.appendChild(self.dom.createTextNode(val)) + return el + + def add_mapping(self, parent, maptype, mapref): + el = self.add_child_element(parent, 'mapping') + el.setAttribute('type', maptype) + el.setAttribute('ref', mapref) + return el + + def add_host(self, parent, name): + el = self.add_child_element_with_nameattr(parent, 'testHost', name) + return el + + def add_physical_host(self, parent, name, ipv4_addresses, iface_names): + el = self.add_host(parent, name) + self.add_type(el, 'physical') + for i in range(0, len(ipv4_addresses)): + addr = ipv4_addresses[i] + if iface_names: + interface_name = iface_names[i] + else: + interface_name = None + self.add_address(el, 'IPv4', addr, interface_name) + return el + + def add_virtual_host(self, parent, obj): + assert isinstance(obj, nodes.PyCoreNode) + el = self.add_host(parent, obj.name) + device = self.find_device(self.root.base_element, obj.name) + if device is None: + logger.warn('corresponding XML device not found for %s' % obj.name) + return + self.add_mapping(device, 'testHost', el.getAttribute('id')) + self.add_type(el, 'virtual') + for netif in obj.netifs(): + for address in netif.addrlist: + addr, slash, prefixlen = address.partition('/') + if ipaddress.is_ipv4_address(addr): + addr_type = 'IPv4' + elif ipaddress.is_ipv6_address(addr): + addr_type = 'IPv6' + else: + raise NotImplementedError + self.add_address(el, addr_type, address, netif.name) + if nodeutils.is_node(netif.net, NodeTypes.EMANE): + nem = self.add_emane_interface(parent, el, netif) + interface = self.find_interface(device, netif.name) + self.add_mapping(interface, 'nem', nem.getAttribute('id')) + return el + + def add_emane_interface(self, physical_host, virtual_host, netif, platform_name='p1', transport_name='t1'): + nemid = netif.net.nemidmap[netif] + if emane.VERSION < emane.EMANE092: + if self.platform is None: + self.platform = \ + self.add_platform(physical_host, name=platform_name) + platform = self.platform + if self.transport is None: + self.transport = \ + self.add_transport(physical_host, name=transport_name) + transport = self.transport + else: + platform = self.add_platform(virtual_host, name=platform_name) + transport = self.add_transport(virtual_host, name=transport_name) + nem_name = 'nem%s' % nemid + nem = self.add_nem(platform, nem_name) + self.add_parameter(nem, 'nemid', str(nemid)) + self.add_mapping(transport, 'nem', nem.getAttribute('id')) + return nem diff --git a/daemon/core/xml/xmlparser.py b/daemon/core/xml/xmlparser.py new file mode 100644 index 00000000..fbbceffc --- /dev/null +++ b/daemon/core/xml/xmlparser.py @@ -0,0 +1,46 @@ +from xml.dom.minidom import parse + +from core.xml.xmlparser0 import CoreDocumentParser0 +from core.xml.xmlparser1 import CoreDocumentParser1 +from core.xml.xmlutils import get_first_child_by_tag_name + + +class CoreVersionParser(object): + """ + Helper class to check the version of Network Plan document. This + simply looks for a "Scenario" element; when present, this + indicates a 0.0 version document. The dom member is set in order + to prevent parsing a file twice (it can be passed to the + appropriate CoreDocumentParser class.) + """ + + DEFAULT_SCENARIO_VERSION = '1.0' + + def __init__(self, filename, options): + if 'dom' in options: + self.dom = options['dom'] + else: + self.dom = parse(filename) + scenario = get_first_child_by_tag_name(self.dom, 'scenario') + if scenario: + version = scenario.getAttribute('version') + if not version: + version = self.DEFAULT_SCENARIO_VERSION + self.version = version + elif get_first_child_by_tag_name(self.dom, 'Scenario'): + self.version = '0.0' + else: + self.version = 'unknown' + + +def core_document_parser(session, filename, options): + vp = CoreVersionParser(filename, options) + if 'dom' not in options: + options['dom'] = vp.dom + if vp.version == '0.0': + doc = CoreDocumentParser0(session, filename, options) + elif vp.version == '1.0': + doc = CoreDocumentParser1(session, filename, options) + else: + raise ValueError, 'unsupported document version: %s' % vp.version + return doc diff --git a/daemon/core/xml/xmlparser0.py b/daemon/core/xml/xmlparser0.py new file mode 100644 index 00000000..e701879a --- /dev/null +++ b/daemon/core/xml/xmlparser0.py @@ -0,0 +1,410 @@ +from xml.dom.minidom import parse + +from core.enumerations import NodeTypes +from core.misc import log +from core.misc import nodeutils +from core.service import ServiceManager +from core.xml import xmlutils + +logger = log.get_logger(__name__) + + +class CoreDocumentParser0(object): + def __init__(self, session, filename, options): + self.session = session + self.filename = filename + if 'dom' in options: + # this prevents parsing twice when detecting file versions + self.dom = options['dom'] + else: + self.dom = parse(filename) + self.start = options['start'] + self.nodecls = options['nodecls'] + + self.np = xmlutils.get_one_element(self.dom, "NetworkPlan") + if self.np is None: + raise ValueError, "missing NetworkPlan!" + self.mp = xmlutils.get_one_element(self.dom, "MotionPlan") + self.sp = xmlutils.get_one_element(self.dom, "ServicePlan") + self.meta = xmlutils.get_one_element(self.dom, "CoreMetaData") + + self.coords = self.getmotiondict(self.mp) + # link parameters parsed in parsenets(), applied in parsenodes() + self.linkparams = {} + + self.parsedefaultservices() + self.parseorigin() + self.parsenets() + self.parsenodes() + self.parseservices() + self.parsemeta() + + def getmotiondict(self, mp): + """ + Parse a MotionPlan into a dict with node names for keys and coordinates + for values. + """ + if mp is None: + return {} + coords = {} + for node in mp.getElementsByTagName("Node"): + nodename = str(node.getAttribute("name")) + if nodename == '': + continue + for m in node.getElementsByTagName("motion"): + if m.getAttribute("type") != "stationary": + continue + point = m.getElementsByTagName("point") + if len(point) == 0: + continue + txt = point[0].firstChild + if txt is None: + continue + xyz = map(int, txt.nodeValue.split(',')) + z = None + x, y = xyz[0:2] + if len(xyz) == 3: + z = xyz[2] + coords[nodename] = (x, y, z) + return coords + + @staticmethod + def getcommonattributes(obj): + """ + Helper to return tuple of attributes common to nodes and nets. + """ + id = int(obj.getAttribute("id")) + name = str(obj.getAttribute("name")) + type = str(obj.getAttribute("type")) + return id, name, type + + def parsenets(self): + linkednets = [] + for net in self.np.getElementsByTagName("NetworkDefinition"): + id, name, type = self.getcommonattributes(net) + nodecls = xmlutils.xml_type_to_node_class(self.session, type) + if not nodecls: + logger.warn("skipping unknown network node '%s' type '%s'", name, type) + continue + n = self.session.add_object(cls=nodecls, objid=id, name=name, start=self.start) + if name in self.coords: + x, y, z = self.coords[name] + n.setposition(x, y, z) + xmlutils.get_params_set_attrs(net, ("icon", "canvas", "opaque"), n) + if hasattr(n, "canvas") and n.canvas is not None: + n.canvas = int(n.canvas) + # links between two nets (e.g. switch-switch) + for ifc in net.getElementsByTagName("interface"): + netid = str(ifc.getAttribute("net")) + ifcname = str(ifc.getAttribute("name")) + linkednets.append((n, netid, ifcname)) + self.parsemodels(net, n) + # link networks together now that they all have been parsed + for n, netid, ifcname in linkednets: + try: + n2 = n.session.get_object_by_name(netid) + except KeyError: + logger.warn("skipping net %s interface: unknown net %s", n.name, netid) + continue + upstream = False + netif = n.getlinknetif(n2) + if netif is None: + netif = n2.linknet(n) + else: + netif.swapparams('_params_up') + upstream = True + key = (n2.name, ifcname) + if key in self.linkparams: + for k, v in self.linkparams[key]: + netif.setparam(k, v) + if upstream: + netif.swapparams('_params_up') + + def parsenodes(self): + for node in self.np.getElementsByTagName("Node"): + id, name, type = self.getcommonattributes(node) + if type == "rj45": + nodecls = nodeutils.get_node_class(NodeTypes.RJ45) + else: + nodecls = self.nodecls + n = self.session.add_object(cls=nodecls, objid=id, name=name, start=self.start) + if name in self.coords: + x, y, z = self.coords[name] + n.setposition(x, y, z) + n.type = type + xmlutils.get_params_set_attrs(node, ("icon", "canvas", "opaque"), n) + if hasattr(n, "canvas") and n.canvas is not None: + n.canvas = int(n.canvas) + for ifc in node.getElementsByTagName("interface"): + self.parseinterface(n, ifc) + + def parseinterface(self, n, ifc): + """ + Parse a interface block such as: + +
00:00:00:aa:00:01
+
10.0.0.2/24
+
2001::2/64
+
+ """ + name = str(ifc.getAttribute("name")) + netid = str(ifc.getAttribute("net")) + hwaddr = None + addrlist = [] + try: + net = n.session.get_object_by_name(netid) + except KeyError: + logger.warn("skipping node %s interface %s: unknown net %s", n.name, name, netid) + return + for addr in ifc.getElementsByTagName("address"): + addrstr = xmlutils.get_text_child(addr) + if addrstr is None: + continue + if addr.getAttribute("type") == "mac": + hwaddr = addrstr + else: + addrlist.append(addrstr) + i = n.newnetif(net, addrlist=addrlist, hwaddr=hwaddr, ifindex=None, ifname=name) + for model in ifc.getElementsByTagName("model"): + self.parsemodel(model, n, n.objid) + key = (n.name, name) + if key in self.linkparams: + netif = n.netif(i) + for k, v in self.linkparams[key]: + netif.setparam(k, v) + + def parsemodels(self, dom, obj): + """ + Mobility/wireless model config is stored in a ConfigurableManager's + config dict. + """ + nodenum = int(dom.getAttribute("id")) + for model in dom.getElementsByTagName("model"): + self.parsemodel(model, obj, nodenum) + + def parsemodel(self, model, obj, nodenum): + """ + Mobility/wireless model config is stored in a ConfigurableManager's + config dict. + """ + name = model.getAttribute("name") + if name == '': + return + type = model.getAttribute("type") + # convert child text nodes into key=value pairs + kvs = xmlutils.get_text_elements_to_list(model) + + mgr = self.session.mobility + # TODO: the session.confobj() mechanism could be more generic; + # it only allows registering Conf Message callbacks, but here + # we want access to the ConfigurableManager, not the callback + if name[:5] == "emane": + mgr = self.session.emane + elif name[:5] == "netem": + mgr = None + self.parsenetem(model, obj, kvs) + + elif name[:3] == "xen": + mgr = self.session.xen + # TODO: assign other config managers here + if mgr: + mgr.setconfig_keyvalues(nodenum, name, kvs) + + def parsenetem(self, model, obj, kvs): + """ + Determine interface and invoke setparam() using the parsed + (key, value) pairs. + """ + ifname = model.getAttribute("netif") + peer = model.getAttribute("peer") + key = (peer, ifname) + # nodes and interfaces do not exist yet, at this point of the parsing, + # save (key, value) pairs for later + try: + # kvs = map(lambda(k, v): (int(v)), kvs) + kvs = map(self.numericvalue, kvs) + except ValueError: + logger.warn("error parsing link parameters for '%s' on '%s'", ifname, peer) + self.linkparams[key] = kvs + + @staticmethod + def numericvalue(keyvalue): + (key, value) = keyvalue + if '.' in str(value): + value = float(value) + else: + value = int(value) + return key, value + + def parseorigin(self): + """ + Parse any origin tag from the Mobility Plan and set the CoreLocation + reference point appropriately. + """ + origin = xmlutils.get_one_element(self.mp, "origin") + if not origin: + return + location = self.session.location + geo = [] + attrs = ("lat", "lon", "alt") + for i in xrange(3): + a = origin.getAttribute(attrs[i]) + if a is not None: + a = float(a) + geo.append(a) + location.setrefgeo(geo[0], geo[1], geo[2]) + scale = origin.getAttribute("scale100") + if scale is not None: + location.refscale = float(scale) + point = xmlutils.get_one_element(origin, "point") + if point is not None and point.firstChild is not None: + xyz = point.firstChild.nodeValue.split(',') + if len(xyz) == 2: + xyz.append('0.0') + if len(xyz) == 3: + xyz = map(lambda (x): float(x), xyz) + location.refxyz = (xyz[0], xyz[1], xyz[2]) + + def parsedefaultservices(self): + """ + Prior to parsing nodes, use session.services manager to store + default services for node types + """ + for node in self.sp.getElementsByTagName("Node"): + type = node.getAttribute("type") + if type == '': + continue # node-specific service config + services = [] + for service in node.getElementsByTagName("Service"): + services.append(str(service.getAttribute("name"))) + self.session.services.defaultservices[type] = services + logger.info("default services for type %s set to %s" % (type, services)) + + def parseservices(self): + """ + After node objects exist, parse service customizations and add them + to the nodes. + """ + svclists = {} + # parse services and store configs into session.services.configs + for node in self.sp.getElementsByTagName("Node"): + name = node.getAttribute("name") + if name == '': + continue # node type without name + n = self.session.get_object_by_name(name) + if n is None: + logger.warn("skipping service config for unknown node '%s'" % name) + continue + for service in node.getElementsByTagName("Service"): + svcname = service.getAttribute("name") + if self.parseservice(service, n): + if n.objid in svclists: + svclists[n.objid] += "|" + svcname + else: + svclists[n.objid] = svcname + # nodes in NetworkPlan but not in ServicePlan use the + # default services for their type + for node in self.np.getElementsByTagName("Node"): + id, name, type = self.getcommonattributes(node) + if id in svclists: + continue # custom config exists + else: + svclists[int(id)] = None # use defaults + + # associate nodes with services + for objid in sorted(svclists.keys()): + n = self.session.get_object(objid) + self.session.services.addservicestonode(node=n, nodetype=n.type, services_str=svclists[objid]) + + def parseservice(self, service, n): + """ + Use session.services manager to store service customizations before + they are added to a node. + """ + name = service.getAttribute("name") + svc = ServiceManager.get(name) + if svc is None: + return False + values = [] + startup_idx = service.getAttribute("startup_idx") + if startup_idx is not None: + values.append("startidx=%s" % startup_idx) + startup_time = service.getAttribute("start_time") + if startup_time is not None: + values.append("starttime=%s" % startup_time) + dirs = [] + for dir in service.getElementsByTagName("Directory"): + dirname = dir.getAttribute("name") + dirs.append(dirname) + if len(dirs): + values.append("dirs=%s" % dirs) + + startup = [] + shutdown = [] + validate = [] + for cmd in service.getElementsByTagName("Command"): + type = cmd.getAttribute("type") + cmdstr = xmlutils.get_text_child(cmd) + if cmdstr is None: + continue + if type == "start": + startup.append(cmdstr) + elif type == "stop": + shutdown.append(cmdstr) + elif type == "validate": + validate.append(cmdstr) + if len(startup): + values.append("cmdup=%s" % startup) + if len(shutdown): + values.append("cmddown=%s" % shutdown) + if len(validate): + values.append("cmdval=%s" % validate) + + files = [] + for file in service.getElementsByTagName("File"): + filename = file.getAttribute("name") + files.append(filename) + data = xmlutils.get_text_child(file) + typestr = "service:%s:%s" % (name, filename) + self.session.services.setservicefile(nodenum=n.objid, type=typestr, + filename=filename, + srcname=None, data=data) + if len(files): + values.append("files=%s" % files) + if not bool(service.getAttribute("custom")): + return True + self.session.services.setcustomservice(n.objid, svc, values) + return True + + def parsehooks(self, hooks): + ''' Parse hook scripts from XML into session._hooks. + ''' + for hook in hooks.getElementsByTagName("Hook"): + filename = hook.getAttribute("name") + state = hook.getAttribute("state") + data = xmlutils.get_text_child(hook) + if data is None: + data = "" # allow for empty file + type = "hook:%s" % state + self.session.set_hook(type, file_name=filename, source_name=None, data=data) + + def parsemeta(self): + opt = xmlutils.get_one_element(self.meta, "SessionOptions") + if opt: + for param in opt.getElementsByTagName("param"): + k = str(param.getAttribute("name")) + v = str(param.getAttribute("value")) + if v == '': + v = xmlutils.get_text_child(param) # allow attribute/text for newlines + setattr(self.session.options, k, v) + hooks = xmlutils.get_one_element(self.meta, "Hooks") + if hooks: + self.parsehooks(hooks) + meta = xmlutils.get_one_element(self.meta, "MetaData") + if meta: + for param in meta.getElementsByTagName("param"): + k = str(param.getAttribute("name")) + v = str(param.getAttribute("value")) + if v == '': + v = xmlutils.get_text_child(param) + self.session.metadata.add_item(k, v) diff --git a/daemon/core/xml/xmlparser1.py b/daemon/core/xml/xmlparser1.py new file mode 100644 index 00000000..c1861526 --- /dev/null +++ b/daemon/core/xml/xmlparser1.py @@ -0,0 +1,876 @@ +import random +from xml.dom.minidom import Node +from xml.dom.minidom import parse + +from core import constants +from core.enumerations import NodeTypes +from core.misc import log +from core.misc import nodeutils +from core.misc.ipaddress import MacAddress +from core.service import ServiceManager +from core.xml import xmlutils + +logger = log.get_logger(__name__) + + +class CoreDocumentParser1(object): + layer2_device_types = 'hub', 'switch' + layer3_device_types = 'host', 'router' + device_types = layer2_device_types + layer3_device_types + + # TODO: support CORE interface classes: + # RJ45Node + # TunnelNode + + def __init__(self, session, filename, options): + """ + + :param core.session.Session session: + :param filename: + :param options: + :return: + """ + self.session = session + self.filename = filename + if 'dom' in options: + # this prevents parsing twice when detecting file versions + self.dom = options['dom'] + else: + self.dom = parse(filename) + self.start = options['start'] + self.nodecls = options['nodecls'] + self.scenario = self.get_scenario(self.dom) + self.location_refgeo_set = False + self.location_refxyz_set = False + # saved link parameters saved when parsing networks and applied later + self.link_params = {} + # map from id-string to objid, for files having node names but + # not node numbers + self.objidmap = {} + self.objids = set() + self.default_services = {} + if self.scenario: + self.parse_scenario() + + @staticmethod + def get_scenario(dom): + scenario = xmlutils.get_first_child_by_tag_name(dom, 'scenario') + if not scenario: + raise ValueError, 'no scenario element found' + version = scenario.getAttribute('version') + if version and version != '1.0': + raise ValueError, \ + 'unsupported scenario version found: \'%s\'' % version + return scenario + + def parse_scenario(self): + self.parse_default_services() + self.parse_session_config() + self.parse_network_plan() + + def assign_id(self, idstr, idval): + if idstr in self.objidmap: + assert self.objidmap[idstr] == idval and idval in self.objids + return + self.objidmap[idstr] = idval + self.objids.add(idval) + + def rand_id(self): + while True: + x = random.randint(0, 0xffff) + if x not in self.objids: + return x + + def get_id(self, idstr): + """ + Get a, possibly new, object id (node number) corresponding to + the given XML string id. + """ + if not idstr: + idn = self.rand_id() + self.objids.add(idn) + return idn + elif idstr in self.objidmap: + return self.objidmap[idstr] + else: + try: + idn = int(idstr) + except ValueError: + idn = self.rand_id() + self.assign_id(idstr, idn) + return idn + + def get_common_attributes(self, node): + """ + Return id, name attributes for the given XML element. These + attributes are common to nodes and networks. + """ + idstr = node.getAttribute('id') + # use an explicit set COREID if it exists + coreid = self.find_core_id(node) + + if coreid: + idn = int(coreid) + if idstr: + self.assign_id(idstr, idn) + else: + idn = self.get_id(idstr) + + # TODO: consider supporting unicode; for now convert to an + # ascii string + namestr = str(node.getAttribute('name')) + return idn, namestr + + def iter_network_member_devices(self, element): + # element can be a network or a channel + for interface in xmlutils.iter_children_with_attribute(element, 'member', 'type', 'interface'): + if_id = xmlutils.get_child_text_trim(interface) + assert if_id # XXX for testing + if not if_id: + continue + device, if_name = self.find_device_with_interface(if_id) + assert device, 'no device for if_id: %s' % if_id # XXX for testing + if device: + yield device, if_name + + def network_class(self, network, network_type): + """ + Return the corresponding CORE network class for the given + network/network_type. + """ + if network_type in ['ethernet', 'satcom']: + return nodeutils.get_node_class(NodeTypes.PEER_TO_PEER) + elif network_type == 'wireless': + channel = xmlutils.get_first_child_by_tag_name(network, 'channel') + if channel: + # use an explicit CORE type if it exists + coretype = xmlutils.get_first_child_text_trim_with_attribute(channel, 'type', 'domain', 'CORE') + if coretype: + if coretype == 'basic_range': + return nodeutils.get_node_class(NodeTypes.WIRELESS_LAN) + elif coretype.startswith('emane'): + return nodeutils.get_node_class(NodeTypes.EMANE) + else: + logger.warn('unknown network type: \'%s\'', coretype) + return xmlutils.xml_type_to_node_class(self.session, coretype) + return nodeutils.get_node_class(NodeTypes.WIRELESS_LAN) + logger.warn('unknown network type: \'%s\'', network_type) + return None + + def create_core_object(self, objcls, objid, objname, element, node_type): + obj = self.session.add_object(cls=objcls, objid=objid, name=objname, start=self.start) + logger.info('added object objid=%s name=%s cls=%s' % (objid, objname, objcls)) + self.set_object_position(obj, element) + self.set_object_presentation(obj, element, node_type) + return obj + + def get_core_object(self, idstr): + if idstr and idstr in self.objidmap: + objid = self.objidmap[idstr] + return self.session.get_object(objid) + return None + + def parse_network_plan(self): + # parse the scenario in the following order: + # 1. layer-2 devices + # 2. other networks (ptp/wlan) + # 3. layer-3 devices + self.parse_layer2_devices() + self.parse_networks() + self.parse_layer3_devices() + + def set_ethernet_link_parameters(self, channel, link_params, mobility_model_name, mobility_params): + # save link parameters for later use, indexed by the tuple + # (device_id, interface_name) + for dev, if_name in self.iter_network_member_devices(channel): + if self.device_type(dev) in self.device_types: + dev_id = dev.getAttribute('id') + key = (dev_id, if_name) + self.link_params[key] = link_params + if mobility_model_name or mobility_params: + raise NotImplementedError + + def set_wireless_link_parameters(self, channel, link_params, mobility_model_name, mobility_params): + network = self.find_channel_network(channel) + network_id = network.getAttribute('id') + if network_id in self.objidmap: + nodenum = self.objidmap[network_id] + else: + logger.warn('unknown network: %s', network.toxml('utf-8')) + assert False # XXX for testing + model_name = xmlutils.get_first_child_text_trim_with_attribute(channel, 'type', 'domain', 'CORE') + if not model_name: + model_name = 'basic_range' + if model_name == 'basic_range': + mgr = self.session.mobility + elif model_name.startswith('emane'): + mgr = self.session.emane + elif model_name.startswith('xen'): + mgr = self.session.xen + else: + # TODO: any other config managers? + raise NotImplementedError + mgr.setconfig_keyvalues(nodenum, model_name, link_params.items()) + if mobility_model_name and mobility_params: + mgr.setconfig_keyvalues(nodenum, mobility_model_name, mobility_params.items()) + + def link_layer2_devices(self, device1, ifname1, device2, ifname2): + """ + Link two layer-2 devices together. + """ + devid1 = device1.getAttribute('id') + dev1 = self.get_core_object(devid1) + devid2 = device2.getAttribute('id') + dev2 = self.get_core_object(devid2) + assert dev1 and dev2 # XXX for testing + if dev1 and dev2: + # TODO: review this + if nodeutils.is_node(dev2, NodeTypes.RJ45): + # RJ45 nodes have different linknet() + netif = dev2.linknet(dev1) + else: + netif = dev1.linknet(dev2) + self.set_wired_link_parameters(dev1, netif, devid1, ifname1) + + @classmethod + def parse_xml_value(cls, valtext): + if not valtext: + return None + try: + if not valtext.translate(None, '0123456789'): + val = int(valtext) + else: + val = float(valtext) + except ValueError: + val = str(valtext) + return val + + @classmethod + def parse_parameter_children(cls, parent): + params = {} + for parameter in xmlutils.iter_children_with_name(parent, 'parameter'): + param_name = parameter.getAttribute('name') + assert param_name # XXX for testing + if not param_name: + continue + # TODO: consider supporting unicode; for now convert + # to an ascii string + param_name = str(param_name) + param_val = cls.parse_xml_value(xmlutils.get_child_text_trim(parameter)) + # TODO: check if the name already exists? + if param_name and param_val: + params[param_name] = param_val + return params + + def parse_network_channel(self, channel): + element = self.search_for_element(channel, 'type', lambda x: not x.hasAttributes()) + channel_type = xmlutils.get_child_text_trim(element) + link_params = self.parse_parameter_children(channel) + + mobility = xmlutils.get_first_child_by_tag_name(channel, 'CORE:mobility') + if mobility: + mobility_model_name = xmlutils.get_first_child_text_trim_by_tag_name(mobility, 'type') + mobility_params = self.parse_parameter_children(mobility) + else: + mobility_model_name = None + mobility_params = None + if channel_type == 'wireless': + self.set_wireless_link_parameters(channel, link_params, mobility_model_name, mobility_params) + elif channel_type == 'ethernet': + # TODO: maybe this can be done in the loop below to avoid + # iterating through channel members multiple times + self.set_ethernet_link_parameters(channel, link_params, mobility_model_name, mobility_params) + else: + raise NotImplementedError + layer2_device = [] + for dev, if_name in self.iter_network_member_devices(channel): + if self.device_type(dev) in self.layer2_device_types: + layer2_device.append((dev, if_name)) + assert len(layer2_device) <= 2 + if len(layer2_device) == 2: + self.link_layer2_devices(layer2_device[0][0], layer2_device[0][1], + layer2_device[1][0], layer2_device[1][1]) + + def parse_network(self, network): + """ + Each network element should have an 'id' and 'name' attribute + and include the following child elements: + + type (one) + member (zero or more with type="interface" or type="channel") + channel (zero or more) + """ + layer2_members = set() + layer3_members = 0 + for dev, if_name in self.iter_network_member_devices(network): + if not dev: + continue + devtype = self.device_type(dev) + if devtype in self.layer2_device_types: + layer2_members.add(dev) + elif devtype in self.layer3_device_types: + layer3_members += 1 + else: + raise NotImplementedError + + if len(layer2_members) == 0: + net_type = xmlutils.get_first_child_text_trim_by_tag_name(network, 'type') + if not net_type: + logger.warn('no network type found for network: \'%s\'', network.toxml('utf-8')) + assert False # XXX for testing + net_cls = self.network_class(network, net_type) + objid, net_name = self.get_common_attributes(network) + logger.info('parsing network: name=%s id=%s' % (net_name, objid)) + if objid in self.session.objects: + return + n = self.create_core_object(net_cls, objid, net_name, network, None) + + # handle channel parameters + for channel in xmlutils.iter_children_with_name(network, 'channel'): + self.parse_network_channel(channel) + + def parse_networks(self): + """ + Parse all 'network' elements. + """ + for network in xmlutils.iter_descendants_with_name(self.scenario, 'network'): + self.parse_network(network) + + def parse_addresses(self, interface): + mac = [] + ipv4 = [] + ipv6 = [] + hostname = [] + for address in xmlutils.iter_children_with_name(interface, 'address'): + addr_type = address.getAttribute('type') + if not addr_type: + msg = 'no type attribute found for address ' \ + 'in interface: \'%s\'' % interface.toxml('utf-8') + logger.warn(msg) + assert False # XXX for testing + addr_text = xmlutils.get_child_text_trim(address) + if not addr_text: + msg = 'no text found for address ' \ + 'in interface: \'%s\'' % interface.toxml('utf-8') + logger.warn(msg) + assert False # XXX for testing + if addr_type == 'mac': + mac.append(addr_text) + elif addr_type == 'IPv4': + ipv4.append(addr_text) + elif addr_type == 'IPv6': + ipv6.append(addr_text) + elif addr_type == 'hostname': + hostname.append(addr_text) + else: + msg = 'skipping unknown address type \'%s\' in ' \ + 'interface: \'%s\'' % (addr_type, interface.toxml('utf-8')) + logger.warn(msg) + assert False # XXX for testing + return mac, ipv4, ipv6, hostname + + def parse_interface(self, node, device_id, interface): + """ + Each interface can have multiple 'address' elements. + """ + if_name = interface.getAttribute('name') + network = self.find_interface_network_object(interface) + if not network: + msg = 'skipping node \'%s\' interface \'%s\': ' \ + 'unknown network' % (node.name, if_name) + logger.warn(msg) + assert False # XXX for testing + mac, ipv4, ipv6, hostname = self.parse_addresses(interface) + if mac: + hwaddr = MacAddress.from_string(mac[0]) + else: + hwaddr = None + ifindex = node.newnetif(network, addrlist=ipv4 + ipv6, hwaddr=hwaddr, ifindex=None, ifname=if_name) + # TODO: 'hostname' addresses are unused + msg = 'node \'%s\' interface \'%s\' connected ' \ + 'to network \'%s\'' % (node.name, if_name, network.name) + logger.info(msg) + # set link parameters for wired links + if nodeutils.is_node(network, (NodeTypes.HUB, NodeTypes.PEER_TO_PEER, NodeTypes.SWITCH)): + netif = node.netif(ifindex) + self.set_wired_link_parameters(network, netif, device_id) + + def set_wired_link_parameters(self, network, netif, device_id, netif_name=None): + if netif_name is None: + netif_name = netif.name + key = (device_id, netif_name) + if key in self.link_params: + link_params = self.link_params[key] + if self.start: + bw = link_params.get('bw') + delay = link_params.get('delay') + loss = link_params.get('loss') + duplicate = link_params.get('duplicate') + jitter = link_params.get('jitter') + network.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate, jitter=jitter) + else: + for k, v in link_params.iteritems(): + netif.setparam(k, v) + + @staticmethod + def search_for_element(node, tag_name, match=None): + """ + Search the given node and all ancestors for an element named + tagName that satisfies the given matching function. + """ + while True: + for child in xmlutils.iter_children(node, Node.ELEMENT_NODE): + if child.tagName == tag_name and (match is None or match(child)): + return child + node = node.parentNode + if not node: + break + return None + + @classmethod + def find_core_id(cls, node): + def match(x): + domain = x.getAttribute('domain') + return domain == 'COREID' + + alias = cls.search_for_element(node, 'alias', match) + if alias: + return xmlutils.get_child_text_trim(alias) + return None + + @classmethod + def find_point(cls, node): + return cls.search_for_element(node, 'point') + + @staticmethod + def find_channel_network(channel): + p = channel.parentNode + if p and p.tagName == 'network': + return p + return None + + def find_interface_network_object(self, interface): + network_id = xmlutils.get_first_child_text_trim_with_attribute(interface, 'member', 'type', 'network') + if not network_id: + # support legacy notation: + + + + + """ + for item in iterable: + element = dom.createElement(name) + element.setAttribute(attr_name, item) + parent.appendChild(element) + + +def add_text_elements_from_list(dom, parent, iterable, name, attrs): + """ + XML helper to iterate through a list and add items to parent using tags + of the given name, attributes specified in the attrs tuple, and having the + text of the item within the tags. + Example: addtextelementsfromlist(dom, parent, ('a','b','c'), "letter", + (('show','True'),)) + + a + b + c + + """ + for item in iterable: + element = dom.createElement(name) + for k, v in attrs: + element.setAttribute(k, v) + parent.appendChild(element) + txt = dom.createTextNode(item) + element.appendChild(txt) + + +def add_text_elements_from_tuples(dom, parent, iterable, attrs=()): + """ + XML helper to iterate through a list of tuples and add items to + parent using tags named for the first tuple element, + attributes specified in the attrs tuple, and having the + text of second tuple element. + Example: addtextelementsfromtuples(dom, parent, + (('first','a'),('second','b'),('third','c')), + (('show','True'),)) + + a + b + c + + """ + for name, value in iterable: + element = dom.createElement(name) + for k, v in attrs: + element.setAttribute(k, v) + parent.appendChild(element) + txt = dom.createTextNode(value) + element.appendChild(txt) + + +def get_text_elements_to_list(parent): + """ + XML helper to parse child text nodes from the given parent and return + a list of (key, value) tuples. + """ + r = [] + for n in parent.childNodes: + if n.nodeType != Node.ELEMENT_NODE: + continue + k = str(n.nodeName) + v = '' # sometimes want None here? + for c in n.childNodes: + if c.nodeType != Node.TEXT_NODE: + continue + v = str(c.nodeValue) + break + r.append((k, v)) + return r + + +def add_param_to_parent(dom, parent, name, value): + """ + XML helper to add a tag to the parent + element, when value is not None. + """ + if value is None: + return None + p = dom.createElement("param") + parent.appendChild(p) + p.setAttribute("name", name) + p.setAttribute("value", "%s" % value) + return p + + +def add_text_param_to_parent(dom, parent, name, value): + """ + XML helper to add a value tag to the parent + element, when value is not None. + """ + if value is None: + return None + p = dom.createElement("param") + parent.appendChild(p) + p.setAttribute("name", name) + txt = dom.createTextNode(value) + p.appendChild(txt) + return p + + +def add_param_list_to_parent(dom, parent, name, values): + """ + XML helper to return a parameter list and optionally add it to the + parent element: + + + + + """ + if values is None: + return None + p = dom.createElement("paramlist") + if parent: + parent.appendChild(p) + p.setAttribute("name", name) + for v in values: + item = dom.createElement("item") + item.setAttribute("value", str(v)) + p.appendChild(item) + return p + + +def get_one_element(dom, name): + e = dom.getElementsByTagName(name) + if len(e) == 0: + return None + return e[0] + + +def iter_descendants(dom, max_depth=0): + """ + Iterate over all descendant element nodes in breadth first order. + Only consider nodes up to max_depth deep when max_depth is greater + than zero. + """ + nodes = [dom] + depth = 0 + current_depth_nodes = 1 + next_depth_nodes = 0 + while nodes: + n = nodes.pop(0) + for child in n.childNodes: + if child.nodeType == Node.ELEMENT_NODE: + yield child + nodes.append(child) + next_depth_nodes += 1 + current_depth_nodes -= 1 + if current_depth_nodes == 0: + depth += 1 + if max_depth > 0 and depth == max_depth: + return + current_depth_nodes = next_depth_nodes + next_depth_nodes = 0 + + +def iter_matching_descendants(dom, match_function, max_depth=0): + """ + Iterate over descendant elements where matchFunction(descendant) + returns true. Only consider nodes up to max_depth deep when + max_depth is greater than zero. + """ + for d in iter_descendants(dom, max_depth): + if match_function(d): + yield d + + +def iter_descendants_with_name(dom, tag_name, max_depth=0): + """ + Iterate over descendant elements whose name is contained in + tagName (or is named tagName if tagName is a string). Only + consider nodes up to max_depth deep when max_depth is greater than + zero. + """ + if isinstance(tag_name, basestring): + tag_name = (tag_name,) + + def match(d): + return d.tagName in tag_name + + return iter_matching_descendants(dom, match, max_depth) + + +def iter_descendants_with_attribute(dom, tag_name, attr_name, attr_value, max_depth=0): + """ + Iterate over descendant elements whose name is contained in + tagName (or is named tagName if tagName is a string) and have an + attribute named attrName with value attrValue. Only consider + nodes up to max_depth deep when max_depth is greater than zero. + """ + if isinstance(tag_name, basestring): + tag_name = (tag_name,) + + def match(d): + return d.tagName in tag_name and \ + d.getAttribute(attr_name) == attr_value + + return iter_matching_descendants(dom, match, max_depth) + + +def iter_children(dom, node_type): + """ + Iterate over all child elements of the given type. + """ + for child in dom.childNodes: + if child.nodeType == node_type: + yield child + + +def get_text_child(dom): + """ + Return the text node of the given element. + """ + for child in iter_children(dom, Node.TEXT_NODE): + return str(child.nodeValue) + return None + + +def get_child_text_trim(dom): + text = get_text_child(dom) + if text: + text = text.strip() + return text + + +def get_params_set_attrs(dom, param_names, target): + """ + XML helper to get tags and set + the attribute in the target object. String type is used. Target object + attribute is unchanged if the XML attribute is not present. + """ + params = dom.getElementsByTagName("param") + for param in params: + param_name = param.getAttribute("name") + value = param.getAttribute("value") + if value is None: + continue # never reached? + if param_name in param_names: + setattr(target, param_name, str(value)) + + +def xml_type_to_node_class(session, type): + """ + Helper to convert from a type string to a class name in nodes.*. + """ + if hasattr(nodes, type): + # TODO: remove and use a mapping to known nodes + logger.error("using eval to retrieve node type: %s", type) + return eval("nodes.%s" % type) + else: + return None + + +def iter_children_with_name(dom, tag_name): + return iter_descendants_with_name(dom, tag_name, 1) + + +def iter_children_with_attribute(dom, tag_name, attr_name, attr_value): + return iter_descendants_with_attribute(dom, tag_name, attr_name, attr_value, 1) + + +def get_first_child_by_tag_name(dom, tag_name): + """ + Return the first child element whose name is contained in tagName + (or is named tagName if tagName is a string). + """ + for child in iter_children_with_name(dom, tag_name): + return child + return None + + +def get_first_child_text_by_tag_name(dom, tag_name): + """ + Return the corresponding text of the first child element whose + name is contained in tagName (or is named tagName if tagName is a + string). + """ + child = get_first_child_by_tag_name(dom, tag_name) + if child: + return get_text_child(child) + return None + + +def get_first_child_text_trim_by_tag_name(dom, tag_name): + text = get_first_child_text_by_tag_name(dom, tag_name) + if text: + text = text.strip() + return text + + +def get_first_child_with_attribute(dom, tag_name, attr_name, attr_value): + """ + Return the first child element whose name is contained in tagName + (or is named tagName if tagName is a string) that has an attribute + named attrName with value attrValue. + """ + for child in \ + iter_children_with_attribute(dom, tag_name, attr_name, attr_value): + return child + return None + + +def get_first_child_text_with_attribute(dom, tag_name, attr_name, attr_value): + """ + Return the corresponding text of the first child element whose + name is contained in tagName (or is named tagName if tagName is a + string) that has an attribute named attrName with value attrValue. + """ + child = get_first_child_with_attribute(dom, tag_name, attr_name, attr_value) + if child: + return get_text_child(child) + return None + + +def get_first_child_text_trim_with_attribute(dom, tag_name, attr_name, attr_value): + text = get_first_child_text_with_attribute(dom, tag_name, attr_name, attr_value) + if text: + text = text.strip() + return text diff --git a/daemon/core/xml/xmlwriter.py b/daemon/core/xml/xmlwriter.py new file mode 100644 index 00000000..14b49458 --- /dev/null +++ b/daemon/core/xml/xmlwriter.py @@ -0,0 +1,12 @@ +from core.xml.xmlwriter0 import CoreDocumentWriter0 +from core.xml.xmlwriter1 import CoreDocumentWriter1 + + +def core_document_writer(session, version): + if version == '0.0': + doc = CoreDocumentWriter0(session) + elif version == '1.0': + doc = CoreDocumentWriter1(session) + else: + raise ValueError('unsupported document version: %s' % version) + return doc diff --git a/daemon/core/xml/xmlwriter0.py b/daemon/core/xml/xmlwriter0.py new file mode 100644 index 00000000..af9e2bd4 --- /dev/null +++ b/daemon/core/xml/xmlwriter0.py @@ -0,0 +1,389 @@ +import os +from xml.dom.minidom import Document + +import pwd + +from core.coreobj import PyCoreNet +from core.coreobj import PyCoreNode +from core.enumerations import RegisterTlvs +from core.misc import log +from core.xml import xmlutils + +logger = log.get_logger(__name__) + + +class CoreDocumentWriter0(Document): + """ + Utility class for writing a CoreSession to XML. The init method builds + an xml.dom.minidom.Document, and the writexml() method saves the XML file. + """ + + def __init__(self, session): + """ + Create an empty Scenario XML Document, then populate it with + objects from the given session. + """ + Document.__init__(self) + self.session = session + self.scenario = self.createElement("Scenario") + self.np = self.createElement("NetworkPlan") + self.mp = self.createElement("MotionPlan") + self.sp = self.createElement("ServicePlan") + self.meta = self.createElement("CoreMetaData") + + self.appendChild(self.scenario) + self.scenario.appendChild(self.np) + self.scenario.appendChild(self.mp) + self.scenario.appendChild(self.sp) + self.scenario.appendChild(self.meta) + + self.populatefromsession() + + def populatefromsession(self): + self.session.emane.setup() # not during runtime? + self.addorigin() + self.adddefaultservices() + self.addnets() + self.addnodes() + self.addmetadata() + + def writexml(self, filename): + logger.info("saving session XML file %s", filename) + f = open(filename, "w") + Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", encoding="UTF-8") + f.close() + if self.session.user is not None: + uid = pwd.getpwnam(self.session.user).pw_uid + gid = os.stat(self.session.sessiondir).st_gid + os.chown(filename, uid, gid) + + def addnets(self): + """ + Add PyCoreNet objects as NetworkDefinition XML elements. + """ + with self.session._objects_lock: + for net in self.session.objects.itervalues(): + if not isinstance(net, PyCoreNet): + continue + self.addnet(net) + + def addnet(self, net): + """ + Add one PyCoreNet object as a NetworkDefinition XML element. + """ + n = self.createElement("NetworkDefinition") + self.np.appendChild(n) + n.setAttribute("name", net.name) + # could use net.brname + n.setAttribute("id", "%s" % net.objid) + n.setAttribute("type", "%s" % net.__class__.__name__) + self.addnetinterfaces(n, net) + # key used with tunnel node + if hasattr(net, 'grekey') and net.grekey is not None: + n.setAttribute("key", "%s" % net.grekey) + # link parameters + for netif in net.netifs(sort=True): + self.addnetem(n, netif) + # wireless/mobility models + modelconfigs = net.session.mobility.getmodels(net) + modelconfigs += net.session.emane.getmodels(net) + self.addmodels(n, modelconfigs) + self.addposition(net) + + def addnetem(self, n, netif): + """ + Similar to addmodels(); used for writing netem link effects + parameters. TODO: Interface parameters should be moved to the model + construct, then this separate method shouldn't be required. + """ + params = netif.getparams() + if len(params) == 0: + return + model = self.createElement("model") + model.setAttribute("name", "netem") + model.setAttribute("netif", netif.name) + if hasattr(netif, "node") and netif.node is not None: + model.setAttribute("peer", netif.node.name) + # link between switches uses one veth interface + elif hasattr(netif, "othernet") and netif.othernet is not None: + if netif.othernet.name == n.getAttribute("name"): + model.setAttribute("peer", netif.net.name) + else: + model.setAttribute("peer", netif.othernet.name) + model.setAttribute("netif", netif.localname) + # hack used for upstream parameters for link between switches + # (see LxBrNet.linknet()) + if netif.othernet.objid == int(n.getAttribute("id")): + netif.swapparams('_params_up') + params = netif.getparams() + netif.swapparams('_params_up') + has_params = False + for k, v in params: + # default netem parameters are 0 or None + if v is None or v == 0: + continue + if k == "has_netem" or k == "has_tbf": + continue + key = self.createElement(k) + key.appendChild(self.createTextNode("%s" % v)) + model.appendChild(key) + has_params = True + if has_params: + n.appendChild(model) + + def addmodels(self, n, configs): + """ + Add models from a list of model-class, config values tuples. + """ + for m, conf in configs: + model = self.createElement("model") + n.appendChild(model) + model.setAttribute("name", m._name) + type = "wireless" + if m._type == RegisterTlvs.MOBILITY.value: + type = "mobility" + model.setAttribute("type", type) + for i, k in enumerate(m.getnames()): + key = self.createElement(k) + value = conf[i] + if value is None: + value = "" + key.appendChild(self.createTextNode("%s" % value)) + model.appendChild(key) + + def addnodes(self): + """ + Add PyCoreNode objects as node XML elements. + """ + with self.session._objects_lock: + for node in self.session.objects.itervalues(): + if not isinstance(node, PyCoreNode): + continue + self.addnode(node) + + def addnode(self, node): + """ + Add a PyCoreNode object as node XML elements. + """ + n = self.createElement("Node") + self.np.appendChild(n) + n.setAttribute("name", node.name) + n.setAttribute("id", "%s" % node.nodeid()) + if node.type: + n.setAttribute("type", node.type) + self.addinterfaces(n, node) + self.addposition(node) + xmlutils.add_param_to_parent(self, n, "icon", node.icon) + xmlutils.add_param_to_parent(self, n, "canvas", node.canvas) + self.addservices(node) + + def addinterfaces(self, n, node): + """ + Add PyCoreNetIfs to node XML elements. + """ + for ifc in node.netifs(sort=True): + i = self.createElement("interface") + n.appendChild(i) + i.setAttribute("name", ifc.name) + netmodel = None + if ifc.net: + i.setAttribute("net", ifc.net.name) + if hasattr(ifc.net, "model"): + netmodel = ifc.net.model + if ifc.mtu and ifc.mtu != 1500: + i.setAttribute("mtu", "%s" % ifc.mtu) + # could use ifc.params, transport_type + self.addaddresses(i, ifc) + # per-interface models + if netmodel and netmodel._name[:6] == "emane_": + cfg = self.session.emane.getifcconfig(node.objid, netmodel._name, + None, ifc) + if cfg: + self.addmodels(i, ((netmodel, cfg),)) + + def addnetinterfaces(self, n, net): + """ + Similar to addinterfaces(), but only adds interface elements to the + supplied XML node that would not otherwise appear in the Node elements. + These are any interfaces that link two switches/hubs together. + """ + for ifc in net.netifs(sort=True): + if not hasattr(ifc, "othernet") or not ifc.othernet: + continue + i = self.createElement("interface") + n.appendChild(i) + if net.objid == ifc.net.objid: + i.setAttribute("name", ifc.localname) + i.setAttribute("net", ifc.othernet.name) + else: + i.setAttribute("name", ifc.name) + i.setAttribute("net", ifc.net.name) + + def addposition(self, node): + """ + Add object coordinates as location XML element. + """ + (x, y, z) = node.position.get() + if x is None or y is None: + return + # + mpn = self.createElement("Node") + mpn.setAttribute("name", node.name) + self.mp.appendChild(mpn) + + # + motion = self.createElement("motion") + motion.setAttribute("type", "stationary") + mpn.appendChild(motion) + + # $X$,$Y$,$Z$ + pt = self.createElement("point") + motion.appendChild(pt) + coordstxt = "%s,%s" % (x, y) + if z: + coordstxt += ",%s" % z + coords = self.createTextNode(coordstxt) + pt.appendChild(coords) + + def addorigin(self): + """ + Add origin to Motion Plan using canvas reference point. + The CoreLocation class maintains this reference point. + """ + refgeo = self.session.location.refgeo + origin = self.createElement("origin") + attrs = ("lat", "lon", "alt") + have_origin = False + for i in xrange(3): + if refgeo[i] is not None: + origin.setAttribute(attrs[i], str(refgeo[i])) + have_origin = True + if not have_origin: + return + if self.session.location.refscale != 1.0: # 100 pixels = refscale m + origin.setAttribute("scale100", str(self.session.location.refscale)) + if self.session.location.refxyz != (0.0, 0.0, 0.0): + pt = self.createElement("point") + origin.appendChild(pt) + x, y, z = self.session.location.refxyz + coordstxt = "%s,%s" % (x, y) + if z: + coordstxt += ",%s" % z + coords = self.createTextNode(coordstxt) + pt.appendChild(coords) + + self.mp.appendChild(origin) + + def adddefaultservices(self): + """ + Add default services and node types to the ServicePlan. + """ + for type in self.session.services.defaultservices: + defaults = self.session.services.getdefaultservices(type) + spn = self.createElement("Node") + spn.setAttribute("type", type) + self.sp.appendChild(spn) + for svc in defaults: + s = self.createElement("Service") + spn.appendChild(s) + s.setAttribute("name", str(svc._name)) + + def addservices(self, node): + """ + Add services and their customizations to the ServicePlan. + """ + if len(node.services) == 0: + return + defaults = self.session.services.getdefaultservices(node.type) + if node.services == defaults: + return + spn = self.createElement("Node") + spn.setAttribute("name", node.name) + self.sp.appendChild(spn) + + for svc in node.services: + s = self.createElement("Service") + spn.appendChild(s) + s.setAttribute("name", str(svc._name)) + s.setAttribute("startup_idx", str(svc._startindex)) + if svc._starttime != "": + s.setAttribute("start_time", str(svc._starttime)) + # only record service names if not a customized service + if not svc._custom: + continue + s.setAttribute("custom", str(svc._custom)) + xmlutils.add_elements_from_list(self, s, svc._dirs, "Directory", "name") + + for fn in svc._configs: + if len(fn) == 0: + continue + f = self.createElement("File") + f.setAttribute("name", fn) + # all file names are added to determine when a file has been deleted + s.appendChild(f) + data = self.session.services.getservicefiledata(svc, fn) + if data is None: + # this includes only customized file contents and skips + # the auto-generated files + continue + txt = self.createTextNode(data) + f.appendChild(txt) + + xmlutils.add_text_elements_from_list(self, s, svc._startup, "Command", (("type", "start"),)) + xmlutils.add_text_elements_from_list(self, s, svc._shutdown, "Command", (("type", "stop"),)) + xmlutils.add_text_elements_from_list(self, s, svc._validate, "Command", (("type", "validate"),)) + + def addaddresses(self, i, netif): + """ + Add MAC and IP addresses to interface XML elements. + """ + if netif.hwaddr: + h = self.createElement("address") + i.appendChild(h) + h.setAttribute("type", "mac") + htxt = self.createTextNode("%s" % netif.hwaddr) + h.appendChild(htxt) + for addr in netif.addrlist: + a = self.createElement("address") + i.appendChild(a) + # a.setAttribute("type", ) + atxt = self.createTextNode("%s" % addr) + a.appendChild(atxt) + + def addhooks(self): + """ + Add hook script XML elements to the metadata tag. + """ + hooks = self.createElement("Hooks") + for state in sorted(self.session._hooks.keys()): + for filename, data in self.session._hooks[state]: + hook = self.createElement("Hook") + hook.setAttribute("name", filename) + hook.setAttribute("state", str(state)) + txt = self.createTextNode(data) + hook.appendChild(txt) + hooks.appendChild(hook) + if hooks.hasChildNodes(): + self.meta.appendChild(hooks) + + def addmetadata(self): + """ + Add CORE-specific session meta-data XML elements. + """ + # options + options = self.createElement("SessionOptions") + defaults = self.session.options.getdefaultvalues() + for i, (k, v) in enumerate(self.session.options.getkeyvaluelist()): + if str(v) != str(defaults[i]): + xmlutils.add_text_param_to_parent(self, options, k, v) + # addparamtoparent(self, options, k, v) + if options.hasChildNodes(): + self.meta.appendChild(options) + # hook scripts + self.addhooks() + # meta + meta = self.createElement("MetaData") + self.meta.appendChild(meta) + for k, v in self.session.metadata.items(): + xmlutils.add_text_param_to_parent(self, meta, k, v) + # addparamtoparent(self, meta, k, v) diff --git a/daemon/core/xml/xmlwriter1.py b/daemon/core/xml/xmlwriter1.py new file mode 100644 index 00000000..d4ae6f19 --- /dev/null +++ b/daemon/core/xml/xmlwriter1.py @@ -0,0 +1,1018 @@ +import collections +import os +from xml.dom.minidom import Document + +import pwd + +from core import coreobj +from core.enumerations import EventTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import ipaddress +from core.misc import log +from core.misc import nodeutils +from core.netns import nodes +from core.xml import xmlutils +from core.xml.xmldeployment import CoreDeploymentWriter + +logger = log.get_logger(__name__) + + +class Alias: + ID = "COREID" + + +class MembType: + INTERFACE = "interface" + CHANNEL = "channel" + SWITCH = "switch" + HUB = "hub" + TUNNEL = "tunnel" + NETWORK = "network" + + +class NodeType: + ROUTER = "router" + HOST = "host" + MDR = "mdr" + PC = "PC" + RJ45 = "rj45" + SWITCH = "lanswitch" + HUB = "hub" + + +class DevType: + HOST = "host" + ROUTER = "router" + SWITCH = "switch" + HUB = "hub" + + +class NetType: + WIRELESS = "wireless" + ETHERNET = "ethernet" + PTP_WIRED = "point-to-point-wired" + PTP_WIRELESS = "point-to-point-wireless" + + +""" +A link endpoint in CORE +net: the network that the endpoint belongs to +netif: the network interface at this end +id: the identifier for the endpoint +l2devport: if the other end is a layer 2 device, this is the assigned port in that device +params: link/interface parameters +""" +Endpoint = collections.namedtuple('Endpoint', + ['net', 'netif', 'type', 'id', 'l2devport', 'params']) + + +class CoreDocumentWriter1(Document): + """ + Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init + method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file. + """ + + def __init__(self, session): + """ + Create an empty Scenario XML Document, then populate it with + objects from the given session. + """ + Document.__init__(self) + logger.info('Exporting to NMF XML version 1.0') + with session._objects_lock: + self.scenarioPlan = ScenarioPlan(self, session) + if session.state == EventTypes.RUNTIME_STATE.value: + deployment = CoreDeploymentWriter(self, self.scenarioPlan, session) + deployment.add_deployment() + self.scenarioPlan.setAttribute('deployed', 'true') + + def writexml(self, filename): + """ + Commit to file + """ + logger.info("saving session XML file %s", filename) + f = open(filename, "w") + Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", encoding="UTF-8") + f.close() + if self.scenarioPlan.coreSession.user is not None: + uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid + gid = os.stat(self.scenarioPlan.coreSession.session_dir).st_gid + os.chown(filename, uid, gid) + + +class XmlElement(object): + """ + The base class for all XML elements in the scenario plan. Includes + convenience functions. + """ + + def __init__(self, document, parent, element_type): + self.document = document + self.parent = parent + self.base_element = document.createElement("%s" % element_type) + if self.parent is not None: + self.parent.appendChild(self.base_element) + + def createElement(self, element_tag): + return self.document.createElement(element_tag) + + def getTagName(self): + return self.base_element.tagName + + def createTextNode(self, node_tag): + return self.document.createTextNode(node_tag) + + def appendChild(self, child): + if isinstance(child, XmlElement): + self.base_element.appendChild(child.base_element) + else: + self.base_element.appendChild(child) + + @staticmethod + def add_parameter(doc, parent, key, value): + if key and value: + parm = doc.createElement("parameter") + parm.setAttribute("name", str(key)) + parm.appendChild(doc.createTextNode(str(value))) + parent.appendChild(parm) + + def addParameter(self, key, value): + """ + Add a parameter to the xml element + """ + self.add_parameter(self.document, self, key, value) + + def setAttribute(self, name, val): + self.base_element.setAttribute(name, val) + + def getAttribute(self, name): + return self.base_element.getAttribute(name) + + +class NamedXmlElement(XmlElement): + """ + The base class for all "named" xml elements. Named elements are + xml elements in the scenario plan that have an id and a name attribute. + """ + + def __init__(self, scen_plan, parent, element_type, element_name): + XmlElement.__init__(self, scen_plan.document, parent, element_type) + + self.scenPlan = scen_plan + self.coreSession = scen_plan.coreSession + + element_path = '' + self.id = None + if self.parent is not None and isinstance(self.parent, XmlElement) and self.parent.getTagName() != "scenario": + element_path = "%s/" % self.parent.getAttribute("id") + + self.id = "%s%s" % (element_path, element_name) + self.setAttribute("name", element_name) + self.setAttribute("id", self.id) + + def addPoint(self, core_object): + """ + Add position to an object + """ + (x, y, z) = core_object.position.get() + if x is None or y is None: + return + lat, lon, alt = self.coreSession.location.getgeo(x, y, z) + + pt = self.createElement("point") + pt.setAttribute("type", "gps") + pt.setAttribute("lat", "%s" % lat) + pt.setAttribute("lon", "%s" % lon) + if z: + pt.setAttribute("z", "%s" % alt) + self.appendChild(pt) + + def createAlias(self, domain, value_str): + """ + Create an alias element for CORE specific information + """ + a = self.createElement("alias") + a.setAttribute("domain", "%s" % domain) + a.appendChild(self.createTextNode(value_str)) + return a + + +class ScenarioPlan(XmlElement): + """ + Container class for ScenarioPlan. + """ + + def __init__(self, document, session): + XmlElement.__init__(self, document, parent=document, element_type='scenario') + + self.coreSession = session + + self.setAttribute('version', '1.0') + self.setAttribute("name", "%s" % session.name) + + self.setAttribute('xmlns', 'nmfPlan') + self.setAttribute('xmlns:CORE', 'coreSpecific') + self.setAttribute('compiled', 'true') + + self.all_channel_members = dict() + self.last_network_id = 0 + self.addNetworks() + self.addDevices() + + # XXX Do we need these? + # self.session.emane.setup() # not during runtime? + # self.addorigin() + + self.addDefaultServices() + + self.addSessionConfiguration() + + def addNetworks(self): + """ + Add networks in the session to the scenPlan. + """ + for net in self.coreSession.objects.itervalues(): + if not isinstance(net, coreobj.PyCoreNet): + continue + + if nodeutils.is_node(net, NodeTypes.CONTROL_NET): + continue + + # Do not add switches and hubs that belong to another network + if nodeutils.is_node(net, (NodeTypes.SWITCH, NodeTypes.HUB)): + if in_other_network(net): + continue + + try: + NetworkElement(self, self, net) + except: + logger.exception("error adding node") + if hasattr(net, "name") and net.name: + logger.warn('Unsupported net name: %s, class: %s, type: %s', + net.name, net.__class__.__name__, net.type) + else: + logger.warn('Unsupported net class: %s', net.__class__.__name__) + + def addDevices(self): + """ + Add device elements to the scenario plan. + """ + for node in self.coreSession.objects.itervalues(): + if not isinstance(node, nodes.PyCoreNode): + continue + + try: + DeviceElement(self, self, node) + except: + logger.exception("error adding device") + if hasattr(node, "name") and node.name: + logger.warn('Unsupported device name: %s, class: %s, type: %s', + node.name, node.__class__.__name__, node.type) + else: + logger.warn('Unsupported device: %s', node.__class__.__name__) + + def addDefaultServices(self): + """ + Add default services and node types to the ServicePlan. + """ + defaultservices = self.createElement("CORE:defaultservices") + for type in self.coreSession.services.defaultservices: + defaults = self.coreSession.services.getdefaultservices(type) + spn = self.createElement("device") + spn.setAttribute("type", type) + defaultservices.appendChild(spn) + for svc in defaults: + s = self.createElement("service") + spn.appendChild(s) + s.setAttribute("name", str(svc._name)) + if defaultservices.hasChildNodes(): + self.appendChild(defaultservices) + + def addSessionConfiguration(self): + """ + Add CORE-specific session configuration XML elements. + """ + config = self.createElement("CORE:sessionconfig") + + # origin: geolocation of cartesian coordinate 0,0,0 + refgeo = self.coreSession.location.refgeo + origin = self.createElement("origin") + attrs = ("lat", "lon", "alt") + have_origin = False + for i in xrange(3): + if refgeo[i] is not None: + origin.setAttribute(attrs[i], str(refgeo[i])) + have_origin = True + if have_origin: + if self.coreSession.location.refscale != 1.0: # 100 pixels = refscale m + origin.setAttribute("scale100", str(self.coreSession.location.refscale)) + if self.coreSession.location.refxyz != (0.0, 0.0, 0.0): + pt = self.createElement("point") + origin.appendChild(pt) + x, y, z = self.coreSession.location.refxyz + coordstxt = "%s,%s" % (x, y) + if z: + coordstxt += ",%s" % z + coords = self.createTextNode(coordstxt) + pt.appendChild(coords) + config.appendChild(origin) + + # options + options = self.createElement("options") + defaults = self.coreSession.options.getdefaultvalues() + for i, (k, v) in enumerate(self.coreSession.options.getkeyvaluelist()): + if str(v) != str(defaults[i]): + XmlElement.add_parameter(self.document, options, k, v) + if options.hasChildNodes(): + config.appendChild(options) + + # hook scripts + hooks = self.createElement("hooks") + for state in sorted(self.coreSession._hooks.keys()): + for filename, data in self.coreSession._hooks[state]: + hook = self.createElement("hook") + hook.setAttribute("name", filename) + hook.setAttribute("state", str(state)) + txt = self.createTextNode(data) + hook.appendChild(txt) + hooks.appendChild(hook) + if hooks.hasChildNodes(): + config.appendChild(hooks) + + # metadata + meta = self.createElement("metadata") + for k, v in self.coreSession.metadata.items(): + XmlElement.add_parameter(self.document, meta, k, v) + if meta.hasChildNodes(): + config.appendChild(meta) + + if config.hasChildNodes(): + self.appendChild(config) + + +class NetworkElement(NamedXmlElement): + def __init__(self, scen_plan, parent, network_object): + """ + Add one PyCoreNet object as one network XML element. + """ + element_name = self.getNetworkName(scen_plan, network_object) + NamedXmlElement.__init__(self, scen_plan, parent, "network", element_name) + + self.scenPlan = scen_plan + + self.addPoint(network_object) + + network_type = None + if nodeutils.is_node(network_object, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): + network_type = NetType.WIRELESS + elif nodeutils.is_node(network_object, (NodeTypes.SWITCH, NodeTypes.HUB, + NodeTypes.PEER_TO_PEER, NodeTypes.TUNNEL)): + network_type = NetType.ETHERNET + else: + network_type = "%s" % network_object.__class__.__name__ + + type_element = self.createElement("type") + type_element.appendChild(self.createTextNode(network_type)) + self.appendChild(type_element) + + # Gather all endpoints belonging to this network + self.endpoints = get_endpoints(network_object) + + # Special case for a network of switches and hubs + create_alias = True + self.l2devices = [] + if nodeutils.is_node(network_object, (NodeTypes.SWITCH, NodeTypes.HUB)): + create_alias = False + self.appendChild(type_element) + self.addL2Devices(network_object) + + if create_alias: + a = self.createAlias(Alias.ID, "%d" % int(network_object.objid)) + self.appendChild(a) + + # XXXX TODO: Move this to channel? + # key used with tunnel node + if hasattr(network_object, 'grekey') and network_object.grekey is not None: + a = self.createAlias("COREGREKEY", "%s" % network_object.grekey) + self.appendChild(a) + + self.addNetMembers(network_object) + self.addChannels(network_object) + + presentation_element = self.createElement("CORE:presentation") + add_presentation_element = False + if network_object.icon and not network_object.icon.isspace(): + presentation_element.setAttribute("icon", network_object.icon) + add_presentation_element = True + if network_object.canvas: + presentation_element.setAttribute("canvas", str(network_object.canvas)) + add_presentation_element = True + if add_presentation_element: + self.appendChild(presentation_element) + + def getNetworkName(self, scenario_plan, network_object): + """ + Determine the name to use for this network element + + :param ScenarioPlan scenario_plan: + :param network_object: + :return: + """ + if nodeutils.is_node(network_object, (NodeTypes.PEER_TO_PEER, NodeTypes.TUNNEL)): + name = "net%s" % scenario_plan.last_network_id + scenario_plan.last_network_id += 1 + elif network_object.name: + name = str(network_object.name) # could use net.brname for bridges? + elif nodeutils.is_node(network_object, (NodeTypes.SWITCH, NodeTypes.HUB)): + name = "lan%s" % network_object.objid + else: + name = '' + return name + + def addL2Devices(self, network_object): + """ + Add switches and hubs + """ + + # Add the netObj as a device + self.l2devices.append(DeviceElement(self.scenPlan, self, network_object)) + + # Add downstream switches/hubs + l2devs = [] + neweps = [] + for ep in self.endpoints: + if ep.type and ep.net.objid != network_object.objid: + l2s, eps = get_dowmstream_l2_devices(ep.net) + l2devs.extend(l2s) + neweps.extend(eps) + + for l2dev in l2devs: + self.l2devices.append(DeviceElement(self.scenPlan, self, l2dev)) + + self.endpoints.extend(neweps) + + # XXX: Optimize later + def addNetMembers(self, network_object): + """ + Add members to a network XML element. + """ + + for ep in self.endpoints: + if ep.type: + MemberElement(self.scenPlan, self, referenced_type=ep.type, referenced_id=ep.id) + + if ep.l2devport: + MemberElement(self.scenPlan, + self, + referenced_type=MembType.INTERFACE, + referenced_id="%s/%s" % (self.id, ep.l2devport)) + + # XXX Revisit this + # Create implied members given the network type + if nodeutils.is_node(network_object, NodeTypes.TUNNEL): + MemberElement(self.scenPlan, self, referenced_type=MembType.TUNNEL, + referenced_id="%s/%s" % (network_object.name, network_object.name)) + + # XXX: Optimize later + def addChannels(self, network_object): + """ + Add channels to a network XML element + """ + + if nodeutils.is_node(network_object, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): + modelconfigs = network_object.session.mobility.getmodels(network_object) + modelconfigs += network_object.session.emane.getmodels(network_object) + chan = None + for model, conf in modelconfigs: + # Handle mobility parameters below + if model.config_type == RegisterTlvs.MOBILITY.value: + continue + + # Create the channel + if chan is None: + name = "wireless" + chan = ChannelElement(self.scenPlan, self, network_object, + channel_type=model.name, + channel_name=name, + channel_domain="CORE") + + # Add wireless model parameters + for i, key in enumerate(model.getnames()): + value = conf[i] + if value is not None: + chan.addParameter(key, model.valueof(key, conf)) + + for model, conf in modelconfigs: + if model.config_type == RegisterTlvs.MOBILITY.value: + # Add wireless mobility parameters + mobility = XmlElement(self.scenPlan, chan, "CORE:mobility") + # Add a type child + type_element = self.createElement("type") + type_element.appendChild(self.createTextNode(model.name)) + mobility.appendChild(type_element) + for i, key in enumerate(model.getnames()): + value = conf[i] + if value is not None: + mobility.addParameter(key, value) + + # Add members to the channel + if chan is not None: + chan.addChannelMembers(self.endpoints) + self.appendChild(chan.base_element) + elif nodeutils.is_node(network_object, NodeTypes.PEER_TO_PEER): + if len(self.endpoints) < 2: + if len(self.endpoints) == 1: + logger.warn('Pt2Pt network with only 1 endpoint: %s', self.endpoints[0].id) + else: + logger.warn('Pt2Pt network with no endpoints encountered in %s', network_object.name) + return + name = "chan%d" % (0) + chan = ChannelElement(self.scenPlan, self, network_object, + channel_type=NetType.ETHERNET, + channel_name=name) + + # Add interface parameters + if self.endpoints[0].params != self.endpoints[1].params: + logger.warn('Pt2Pt Endpoint parameters do not match in %s', network_object.name) + for key, value in self.endpoints[0].params: + # XXX lifted from original addnetem function. revisit this. + # default netem parameters are 0 or None + if value is None or value == 0: + continue + if key == "has_netem" or key == "has_tbf": + continue + chan.addParameter(key, value) + + # Add members to the channel + chan.addChannelMembers(self.endpoints) + self.appendChild(chan) + + elif nodeutils.is_node(network_object, (NodeTypes.SWITCH, NodeTypes.HUB, NodeTypes.TUNNEL)): + cidx = 0 + channels = [] + for ep in self.endpoints: + # Create one channel member per ep + if ep.type: + name = "chan%d" % cidx + chan = ChannelElement(self.scenPlan, self, network_object, channel_type=NetType.ETHERNET, + channel_name=name) + + # Add interface parameters + for key, value in ep.params: + # XXX lifted from original addnetem function. revisit this. + # default netem parameters are 0 or None + if value is None or value == 0: + continue + if key == "has_netem" or key == "has_tbf": + continue + chan.addParameter(key, value) + + # Add members to the channel + chan.addChannelMembers(ep) + channels.append(chan) + cidx += 1 + + for chan in channels: + self.appendChild(chan) + + +class DeviceElement(NamedXmlElement): + """ + A device element in the scenario plan. + """ + + def __init__(self, scen_plan, parent, device_object): + """ + Add a PyCoreNode object as a device element. + """ + + device_type = None + core_device_type = None + if hasattr(device_object, "type") and device_object.type: + core_device_type = device_object.type + if device_object.type == NodeType.ROUTER: + device_type = DevType.ROUTER + elif device_object.type == NodeType.MDR: + device_type = DevType.ROUTER + elif device_object.type == NodeType.HOST: + device_type = DevType.HOST + elif device_object.type == NodeType.PC: + device_type = DevType.HOST + elif device_object.type == NodeType.RJ45: + device_type = DevType.HOST + node_id = "EMULATOR-HOST" + elif device_object.type == NodeType.HUB: + device_type = DevType.HUB + elif device_object.type == NodeType.SWITCH: + device_type = DevType.SWITCH + else: + # Default custom types (defined in ~/.core/nodes.conf) to HOST + device_type = DevType.HOST + + if device_type is None: + raise Exception + + NamedXmlElement.__init__(self, scen_plan, parent, device_type, device_object.name) + + if core_device_type is not None: + type_element = self.createElement("type") + type_element.setAttribute("domain", "CORE") + type_element.appendChild(self.createTextNode("%s" % core_device_type)) + self.appendChild(type_element) + + self.interfaces = [] + self.addInterfaces(device_object) + alias = self.createAlias(Alias.ID, "%s" % device_object.objid) + self.appendChild(alias) + self.addPoint(device_object) + self.addServices(device_object) + + presentation_element = self.createElement("CORE:presentation") + add_presentation_element = False + if device_object.icon and not device_object.icon.isspace(): + presentation_element.setAttribute("icon", device_object.icon) + add_presentation_element = True + if device_object.canvas: + presentation_element.setAttribute("canvas", str(device_object.canvas)) + add_presentation_element = True + if add_presentation_element: + self.appendChild(presentation_element) + + def addInterfaces(self, device_object): + """ + Add interfaces to a device element. + """ + idx = 0 + for interface_object in device_object.netifs(sort=True): + if interface_object.net and nodeutils.is_node(interface_object.net, NodeTypes.CONTROL_NET): + continue + if isinstance(device_object, nodes.PyCoreNode): + interface_element = InterfaceElement(self.scenPlan, self, device_object, interface_object) + else: # isinstance(node, (nodes.HubNode nodes.SwitchNode)): + interface_element = InterfaceElement(self.scenPlan, self, device_object, interface_object, idx) + idx += 1 + + netmodel = None + if interface_object.net: + if hasattr(interface_object.net, "model"): + netmodel = interface_object.net.model + if interface_object.mtu and interface_object.mtu != 1500: + interface_element.setAttribute("mtu", "%s" % interface_object.mtu) + + # The interfaces returned for Switches and Hubs are the interfaces of the nodes connected to them. + # The addresses are for those interfaces. Don't include them here. + if isinstance(device_object, nodes.PyCoreNode): + # could use ifcObj.params, transport_type + interface_element.addAddresses(interface_object) + # per-interface models + # XXX Remove??? + if netmodel and netmodel.name[:6] == "emane_": + cfg = self.coreSession.emane.getifcconfig(device_object.objid, netmodel.name, + None, interface_object) + if cfg: + interface_element.addModels(((netmodel, cfg),)) + + self.interfaces.append(interface_element) + + def addServices(self, device_object): + """ + Add services and their customizations to the ServicePlan. + """ + if not hasattr(device_object, "services"): + return + + if len(device_object.services) == 0: + return + + defaults = self.coreSession.services.getdefaultservices(device_object.type) + if device_object.services == defaults: + return + spn = self.createElement("CORE:services") + spn.setAttribute("name", device_object.name) + self.appendChild(spn) + + for svc in device_object.services: + s = self.createElement("service") + spn.appendChild(s) + s.setAttribute("name", str(svc._name)) + s.setAttribute("startup_idx", str(svc._startindex)) + if svc._starttime != "": + s.setAttribute("start_time", str(svc._starttime)) + # only record service names if not a customized service + if not svc._custom: + continue + s.setAttribute("custom", str(svc._custom)) + xmlutils.add_elements_from_list(self, s, svc._dirs, "directory", "name") + + for fn in svc._configs: + if len(fn) == 0: + continue + f = self.createElement("file") + f.setAttribute("name", fn) + # all file names are added to determine when a file has been deleted + s.appendChild(f) + data = self.coreSession.services.getservicefiledata(svc, fn) + if data is None: + # this includes only customized file contents and skips + # the auto-generated files + continue + txt = self.createTextNode("\n" + data) + f.appendChild(txt) + + xmlutils.add_text_elements_from_list(self, s, svc._startup, "command", + (("type", "start"),)) + xmlutils.add_text_elements_from_list(self, s, svc._shutdown, "command", + (("type", "stop"),)) + xmlutils.add_text_elements_from_list(self, s, svc._validate, "command", + (("type", "validate"),)) + + +class ChannelElement(NamedXmlElement): + """ + A channel element in the scenario plan + """ + + def __init__(self, scen_plan, parent, network_object, channel_type, channel_name, channel_domain=None): + NamedXmlElement.__init__(self, scen_plan, parent, "channel", channel_name) + ''' + Create a channel element and append a member child referencing this channel element + in the parent element. + ''' + # Create a member element for this channel in the parent + MemberElement(self.scenPlan, parent, referenced_type=MembType.CHANNEL, referenced_id=self.id) + + # Add a type child + type_element = self.createElement("type") + if channel_domain is not None: + type_element.setAttribute("domain", "%s" % channel_domain) + type_element.appendChild(self.createTextNode(channel_type)) + self.appendChild(type_element) + + def addChannelMembers(self, endpoints): + """ + Add network channel members referencing interfaces in the channel + """ + if isinstance(endpoints, list): + # A list of endpoints is given. Create one channel member per endpoint + idx = 0 + for ep in endpoints: + self.addChannelMember(ep.type, ep.id, idx) + idx += 1 + else: + # A single endpoint is given. Create one channel member for the endpoint, + # and if the endpoint is associated with a Layer 2 device port, add the + # port as a second member + ep = endpoints + self.addChannelMember(ep.type, ep.id, 0) + if ep.l2devport is not None: + member_id = "%s/%s" % (self.parent.getAttribute("id"), ep.l2devport) + self.addChannelMember(ep.type, member_id, 1) + + def addChannelMember(self, member_interface_type, member_interface_id, member_index): + """ + add a member to a given channel + """ + + m = MemberElement(self.scenPlan, + self, + referenced_type=member_interface_type, + referenced_id=member_interface_id, + index=member_index) + self.scenPlan.all_channel_members[member_interface_id] = m + + +class InterfaceElement(NamedXmlElement): + """ + A network interface element + """ + + def __init__(self, scen_plan, parent, device_object, interface_object, interface_index=None): + """ + Create a network interface element with references to channel that this + interface is used. + """ + element_name = None + if interface_index is not None: + element_name = "e%d" % interface_index + else: + element_name = interface_object.name + NamedXmlElement.__init__(self, scen_plan, parent, "interface", element_name) + self.ifcObj = interface_object + self.addChannelReference() + + def addChannelReference(self): + """ + Add a reference to the channel that uses this interface + """ + try: + cm = self.scenPlan.all_channel_members[self.id] + if cm is not None: + ch = cm.base_element.parentNode + if ch is not None: + net = ch.parentNode + if net is not None: + MemberElement(self.scenPlan, + self, + referenced_type=MembType.CHANNEL, + referenced_id=ch.getAttribute("id"), + index=int(cm.getAttribute("index"))) + MemberElement(self.scenPlan, + self, + referenced_type=MembType.NETWORK, + referenced_id=net.getAttribute("id")) + except KeyError: + # Not an error. This occurs when an interface belongs to a switch + # or a hub within a network and the channel is yet to be defined + logger.exception("noted as not an error, add channel reference error") + + def addAddresses(self, interface_object): + """ + Add MAC and IP addresses to interface XML elements. + """ + if interface_object.hwaddr: + h = self.createElement("address") + self.appendChild(h) + h.setAttribute("type", "mac") + htxt = self.createTextNode("%s" % interface_object.hwaddr) + h.appendChild(htxt) + for addr in interface_object.addrlist: + a = self.createElement("address") + self.appendChild(a) + (ip, sep, mask) = addr.partition('/') + # mask = int(mask) XXX? + if ipaddress.is_ipv4_address(ip): + a.setAttribute("type", "IPv4") + else: + a.setAttribute("type", "IPv6") + + # a.setAttribute("type", ) + atxt = self.createTextNode("%s" % addr) + a.appendChild(atxt) + + # XXX Remove? + def addModels(self, configs): + """ + Add models from a list of model-class, config values tuples. + """ + for m, conf in configs: + node_element = self.createElement("model") + node_element.setAttribute("name", m.name) + type_string = "wireless" + if m.config_type == RegisterTlvs.MOBILITY.value: + type_string = "mobility" + node_element.setAttribute("type", type_string) + for i, k in enumerate(m.getnames()): + key = self.createElement(k) + value = conf[i] + if value is None: + value = "" + key.appendChild(self.createTextNode("%s" % value)) + node_element.appendChild(key) + self.appendChild(node_element) + + +class MemberElement(XmlElement): + """ + Member elements are references to other elements in the network plan elements of the scenario. + They are used in networks to reference channels, in channels to reference interfaces, + and in interfaces to reference networks/channels. Member elements provided allow bi-directional + traversal of network plan components. + """ + + def __init__(self, scene_plan, parent, referenced_type, referenced_id, index=None): + """ + Create a member element + """ + XmlElement.__init__(self, scene_plan.document, parent, "member") + self.setAttribute("type", "%s" % referenced_type) + # See'Understanding the Network Modeling Framework document' + if index is not None: + self.setAttribute("index", "%d" % index) + self.appendChild(self.createTextNode("%s" % referenced_id)) + + +# +# ======================================================================================= +# Helpers +# ======================================================================================= + +def get_endpoint(network_object, interface_object): + """ + Create an Endpoint object given the network and the interface of interest + """ + ep = None + l2devport = None + + # if ifcObj references an interface of a node and is part of this network + if interface_object.net.objid == network_object.objid and hasattr(interface_object, + 'node') and interface_object.node: + params = interface_object.getparams() + if nodeutils.is_node(interface_object.net, (NodeTypes.HUB, NodeTypes.SWITCH)): + l2devport = "%s/e%d" % (interface_object.net.name, interface_object.net.getifindex(interface_object)) + ep = Endpoint(network_object, + interface_object, + type=MembType.INTERFACE, + id="%s/%s" % (interface_object.node.name, interface_object.name), + l2devport=l2devport, + params=params) + + # else if ifcObj references another node and is connected to this network + elif hasattr(interface_object, "othernet"): + if interface_object.othernet.objid == network_object.objid: + # #hack used for upstream parameters for link between switches + # #(see LxBrNet.linknet()) + interface_object.swapparams('_params_up') + params = interface_object.getparams() + interface_object.swapparams('_params_up') + owner = interface_object.net + l2devport = "%s/e%d" % ( + interface_object.othernet.name, interface_object.othernet.getifindex(interface_object)) + + # Create the endpoint. + # XXX the interface index might not match what is shown in the gui. For switches and hubs, + # The gui assigns its index but doesn't pass it to the daemon and vice versa. + # The gui stores it's index in the IMN file, which it reads and writes without daemon intervention. + # Fix this! + ep = Endpoint(owner, + interface_object, + type=MembType.INTERFACE, + id="%s/%s/e%d" % (network_object.name, owner.name, owner.getifindex(interface_object)), + l2devport=l2devport, + params=params) + # else this node has an interface that belongs to another network + # i.e. a switch/hub interface connected to another switch/hub and CORE has the other switch/hub + # as the containing network + else: + ep = Endpoint(network_object, interface_object, type=None, id=None, l2devport=None, params=None) + + return ep + + +def get_endpoints(network_object): + """ + Gather all endpoints of the given network + """ + # Get all endpoints + endpoints = [] + + # XXX TODO: How to represent physical interfaces. + # + # NOTE: The following code works except it would be missing physical (rj45) interfaces from Pt2pt links + # TODO: Fix data in net.netifs to include Pt2Pt physical interfaces + # + # Iterate through all the nodes in the scenario, then iterate through all the interface for each node, + # and check if the interface is connected to this network. + + for interface_object in network_object.netifs(sort=True): + try: + ep = get_endpoint(network_object, interface_object) + if ep is not None: + endpoints.append(ep) + except: + logger.exception("error geting enpoints") + + return endpoints + + +def get_dowmstream_l2_devices(network_object): + """ + Helper function for getting a list of all downstream layer 2 devices from the given netObj + """ + l2_device_objects = [network_object] + allendpoints = [] + myendpoints = get_endpoints(network_object) + allendpoints.extend(myendpoints) + for ep in myendpoints: + if ep.type and ep.net.objid != network_object.objid: + l2s, eps = get_dowmstream_l2_devices(ep.net) + l2_device_objects.extend(l2s) + allendpoints.extend(eps) + + return l2_device_objects, allendpoints + + +def get_all_network_interfaces(session): + """ + Gather all network interfacecs in the session + """ + netifs = [] + for node in session.objects.itervalues(): + for netif in node.netifs(sort=True): + if netif not in netifs: + netifs.append(netif) + return netifs + + +def in_other_network(network_object): + """ + Determine if CORE considers a given network object to be part of another network. + Note: CORE considers layer 2 devices to be their own networks. However, if a l2 device + is connected to another device, it is possible that one of its ports belong to the other + l2 device's network (thus, "othernet"). + """ + for netif in network_object.netifs(sort=True): + if hasattr(netif, "othernet"): + if netif.othernet.objid != network_object.objid: + return True + return False diff --git a/daemon/requirements.txt b/daemon/requirements.txt new file mode 100644 index 00000000..897df186 --- /dev/null +++ b/daemon/requirements.txt @@ -0,0 +1,8 @@ +enum34==1.1.6 +grpcio==1.0.0 +grpcio-tools==1.0.0 +pycco==0.5.1 +sphinx==1.4.8 +sphinx_rtd_theme==0.1.9 +pytest==3.0.7 +pytest-runner==2.11.1 diff --git a/daemon/setup.cfg b/daemon/setup.cfg new file mode 100644 index 00000000..9af7e6f1 --- /dev/null +++ b/daemon/setup.cfg @@ -0,0 +1,2 @@ +[aliases] +test=pytest \ No newline at end of file diff --git a/daemon/tests/conftest.py b/daemon/tests/conftest.py new file mode 100644 index 00000000..3e196c4b --- /dev/null +++ b/daemon/tests/conftest.py @@ -0,0 +1,133 @@ +""" +Unit test fixture module. +""" +import os +import pytest + +from core.session import Session +from core.misc import ipaddress +from core.misc import nodemaps +from core.misc import nodeutils +from core.netns import nodes + + +class Core(object): + def __init__(self, session, ip_prefix): + self.session = session + self.ip_prefix = ip_prefix + self.current_ip = 1 + self.nodes = {} + self.node_ips = {} + + def create_node(self, name): + node = self.session.add_object(cls=nodes.CoreNode, name=name) + self.nodes[name] = node + + def add_interface(self, network, name): + node_ip = self.ip_prefix.addr(self.current_ip) + self.current_ip += 1 + self.node_ips[name] = node_ip + node = self.nodes[name] + interface_id = node.newnetif(network, ["%s/%s" % (node_ip, self.ip_prefix.prefixlen)]) + return node.netif(interface_id) + + def get_node(self, name): + """ + Retrieve node from current session. + + :param str name: name of node to retrieve + :return: core node + :rtype: core.netns.nodes.CoreNode + """ + return self.nodes[name] + + def get_ip(self, name): + return self.node_ips[name] + + def link(self, network, from_interface, to_interface): + network.link(from_interface, to_interface) + + def configure_link(self, network, interface_one, interface_two, values, unidirectional=False): + network.linkconfig(netif=interface_one, netif2=interface_two, **values) + + if not unidirectional: + network.linkconfig(netif=interface_two, netif2=interface_one, **values) + + def ping(self, from_name, to_name): + from_node = self.nodes[from_name] + to_ip = str(self.get_ip(to_name)) + return from_node.cmd(["ping", "-c", "3", to_ip]) + + def ping_output(self, from_name, to_name): + from_node = self.nodes[from_name] + to_ip = str(self.get_ip(to_name)) + vcmd, stdin, stdout, stderr = from_node.popen(["ping", "-i", "0.05", "-c", "3", to_ip]) + return stdout.read().strip() + + def iping(self, from_name, to_name): + from_node = self.nodes[from_name] + to_ip = str(self.get_ip(to_name)) + from_node.icmd(["ping", "-i", "0.01", "-c", "10", to_ip]) + + def iperf(self, from_name, to_name): + from_node = self.nodes[from_name] + to_node = self.nodes[to_name] + to_ip = str(self.get_ip(to_name)) + + # run iperf server, run client, kill iperf server + vcmd, stdin, stdout, stderr = to_node.popen(["iperf", "-s", "-u", "-y", "C"]) + from_node.cmd(["iperf", "-u", "-t", "5", "-c", to_ip]) + to_node.cmd(["killall", "-9", "iperf"]) + + return stdout.read().strip() + + def assert_nodes(self): + for node in self.nodes.itervalues(): + assert os.path.exists(node.nodedir) + + def create_link_network(self): + # create switch + ptp_node = self.session.add_object(cls=nodes.PtpNet) + + # create nodes + self.create_node("n1") + self.create_node("n2") + + # add interfaces + interface_one = self.add_interface(ptp_node, "n1") + interface_two = self.add_interface(ptp_node, "n2") + + # instantiate session + self.session.instantiate() + + # assert node directories created + self.assert_nodes() + + return ptp_node, interface_one, interface_two + + +@pytest.fixture() +def session(): + # configure default nodes + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + + # create and return session + session_fixture = Session(1, persistent=True) + assert os.path.exists(session_fixture.session_dir) + yield session_fixture + + # cleanup + print "shutting down session" + session_fixture.shutdown() + assert not os.path.exists(session_fixture.session_dir) + + +@pytest.fixture(scope="module") +def ip_prefix(): + return ipaddress.Ipv4Prefix("10.83.0.0/16") + + +@pytest.fixture() +def core(session, ip_prefix): + return Core(session, ip_prefix) diff --git a/daemon/tests/test_core.py b/daemon/tests/test_core.py new file mode 100644 index 00000000..ab26042b --- /dev/null +++ b/daemon/tests/test_core.py @@ -0,0 +1,326 @@ +""" +Unit tests for testing with a CORE switch. +""" + +import time + +from core.mobility import BasicRangeModel +from core.netns import nodes +from core.phys.pnodes import PhysicalNode + + +class TestCore: + def test_physical(self, core): + """ + Test physical node network. + + :param conftest.Core core: core fixture to test with + """ + + # create switch node + switch_node = core.session.add_object(cls=nodes.SwitchNode) + + # create a physical node + physical_node = core.session.add_object(cls=PhysicalNode, name="p1") + core.nodes[physical_node.name] = physical_node + + # create regular node + core.create_node("n1") + + # add interface + core.add_interface(switch_node, "n1") + core.add_interface(switch_node, "p1") + + # instantiate session + core.session.instantiate() + + # assert node directories created + core.assert_nodes() + + # ping n2 from n1 and assert success + status = core.ping("n1", "p1") + assert not status + + def test_ptp(self, core): + """ + Test ptp node network. + + :param conftest.Core core: core fixture to test with + """ + + # create ptp + ptp_node = core.session.add_object(cls=nodes.PtpNet) + + # create nodes + core.create_node("n1") + core.create_node("n2") + + # add interfaces + core.add_interface(ptp_node, "n1") + core.add_interface(ptp_node, "n2") + + # instantiate session + core.session.instantiate() + + # assert node directories created + core.assert_nodes() + + # ping n2 from n1 and assert success + status = core.ping("n1", "n2") + assert not status + + def test_hub(self, core): + """ + Test basic hub network. + + :param conftest.Core core: core fixture to test with + """ + + # create hub + hub_node = core.session.add_object(cls=nodes.HubNode) + + # create nodes + core.create_node("n1") + core.create_node("n2") + + # add interfaces + core.add_interface(hub_node, "n1") + core.add_interface(hub_node, "n2") + + # instantiate session + core.session.instantiate() + + # assert node directories created + core.assert_nodes() + + # ping n2 from n1 and assert success + status = core.ping("n1", "n2") + assert not status + + def test_switch(self, core): + """ + Test basic switch network. + + :param conftest.Core core: core fixture to test with + """ + + # create switch + switch_node = core.session.add_object(cls=nodes.SwitchNode) + + # create nodes + core.create_node("n1") + core.create_node("n2") + + # add interfaces + core.add_interface(switch_node, "n1") + core.add_interface(switch_node, "n2") + + # instantiate session + core.session.instantiate() + + # assert node directories created + core.assert_nodes() + + # ping n2 from n1 and assert success + status = core.ping("n1", "n2") + assert not status + + def test_wlan_basic_range_good(self, core): + """ + Test basic wlan network. + + :param conftest.Core core: core fixture to test with + """ + + # create wlan + wlan_node = core.session.add_object(cls=nodes.WlanNode) + values = BasicRangeModel.getdefaultvalues() + wlan_node.setmodel(BasicRangeModel, values) + + # create nodes + core.create_node("n1") + core.create_node("n2") + + # add interfaces + interface_one = core.add_interface(wlan_node, "n1") + interface_two = core.add_interface(wlan_node, "n2") + + # link nodes in wlan + core.link(wlan_node, interface_one, interface_two) + + # mark node position as together + core.get_node("n1").setposition(0, 0) + core.get_node("n2").setposition(0, 0) + + # instantiate session + core.session.instantiate() + + # assert node directories created + core.assert_nodes() + + # ping n2 from n1 and assert success + status = core.ping("n1", "n2") + assert not status + + def test_wlan_basic_range_bad(self, core): + """ + Test basic wlan network with leveraging basic range model. + + :param conftest.Core core: core fixture to test with + """ + + # create wlan + wlan_node = core.session.add_object(cls=nodes.WlanNode) + values = BasicRangeModel.getdefaultvalues() + wlan_node.setmodel(BasicRangeModel, values) + + # create nodes + core.create_node("n1") + core.create_node("n2") + + # add interfaces + interface_one = core.add_interface(wlan_node, "n1") + interface_two = core.add_interface(wlan_node, "n2") + + # link nodes in wlan + core.link(wlan_node, interface_one, interface_two) + + # move nodes out of range, default range check is 275 + core.get_node("n1").setposition(0, 0) + core.get_node("n2").setposition(500, 500) + + # instantiate session + core.session.instantiate() + + # assert node directories created + core.assert_nodes() + + # ping n2 from n1 and assert failure + time.sleep(1) + status = core.ping("n1", "n2") + assert status + + def test_link_bandwidth(self, core): + """ + Test ptp node network with modifying link bandwidth. + + :param conftest.Core core: core fixture to test with + """ + + # create link network + ptp_node, interface_one, interface_two = core.create_link_network() + + # output csv index + bandwidth_index = 8 + + # run iperf, validate normal bandwidth + stdout = core.iperf("n1", "n2") + assert stdout + value = int(stdout.split(',')[bandwidth_index]) + assert 900000 <= value <= 1100000 + + # change bandwidth in bits per second + bandwidth = 500000 + core.configure_link(ptp_node, interface_one, interface_two, { + "bw": bandwidth + }) + + # run iperf again + stdout = core.iperf("n1", "n2") + assert stdout + value = int(stdout.split(',')[bandwidth_index]) + assert 400000 <= value <= 600000 + + def test_link_loss(self, core): + """ + Test ptp node network with modifying link packet loss. + + :param conftest.Core core: core fixture to test with + """ + + # create link network + ptp_node, interface_one, interface_two = core.create_link_network() + + # output csv index + loss_index = -2 + + # run iperf, validate normal bandwidth + stdout = core.iperf("n1", "n2") + assert stdout + value = float(stdout.split(',')[loss_index]) + assert 0 <= value <= 0.5 + + # change bandwidth in bits per second + loss = 50 + core.configure_link(ptp_node, interface_one, interface_two, { + "loss": loss + }) + + # run iperf again + stdout = core.iperf("n1", "n2") + assert stdout + value = float(stdout.split(',')[loss_index]) + assert 45 <= value <= 55 + + def test_link_delay(self, core): + """ + Test ptp node network with modifying link packet delay. + + :param conftest.Core core: core fixture to test with + """ + + # create link network + ptp_node, interface_one, interface_two = core.create_link_network() + + # run ping for delay information + stdout = core.ping_output("n1", "n2") + assert stdout + rtt_line = stdout.split("\n")[-1] + rtt_values = rtt_line.split("=")[1].split("ms")[0].strip() + rtt_avg = float(rtt_values.split("/")[2]) + assert 0 <= rtt_avg <= 0.1 + + # change delay in microseconds + delay = 1000000 + core.configure_link(ptp_node, interface_one, interface_two, { + "delay": delay + }) + + # run ping for delay information again + stdout = core.ping_output("n1", "n2") + assert stdout + rtt_line = stdout.split("\n")[-1] + rtt_values = rtt_line.split("=")[1].split("ms")[0].strip() + rtt_avg = float(rtt_values.split("/")[2]) + assert 1800 <= rtt_avg <= 2200 + + def test_link_jitter(self, core): + """ + Test ptp node network with modifying link packet jitter. + + :param conftest.Core core: core fixture to test with + """ + + # create link network + ptp_node, interface_one, interface_two = core.create_link_network() + + # output csv index + jitter_index = 9 + + # run iperf + stdout = core.iperf("n1", "n2") + assert stdout + value = float(stdout.split(",")[jitter_index]) + assert -0.5 <= value <= 0.05 + + # change jitter in microseconds + jitter = 1000000 + core.configure_link(ptp_node, interface_one, interface_two, { + "jitter": jitter + }) + + # run iperf again + stdout = core.iperf("n1", "n2") + assert stdout + value = float(stdout.split(",")[jitter_index]) + assert 200 <= value <= 500 diff --git a/daemon/tests/test_gui.py b/daemon/tests/test_gui.py new file mode 100644 index 00000000..e1dad455 --- /dev/null +++ b/daemon/tests/test_gui.py @@ -0,0 +1,120 @@ +""" +Unit tests for testing with a CORE switch. +""" +from core.api import coreapi, dataconversion +from core.api.coreapi import CoreExecuteTlv +from core.enumerations import CORE_API_PORT, EventTypes, EventTlvs, MessageFlags, LinkTlvs, LinkTypes, ExecuteTlvs, \ + MessageTypes +from core.misc import ipaddress +from core.netns.nodes import SwitchNode, CoreNode + + +def cmd(node, exec_cmd): + """ + Convenience method for sending commands to a node using the legacy API. + + :param node: The node the command should be issued too + :param exec_cmd: A string with the command to be run + :return: Returns the result of the command + """ + # Set up the command api message + tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.objid) + tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1) + tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd) + message = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data) + node.session.broker.handlerawmsg(message) + + # Now wait for the response + server = node.session.broker.servers["localhost"] + server.sock.settimeout(50.0) + + # receive messages until we get our execute response + result = None + while True: + message_header = server.sock.recv(coreapi.CoreMessage.header_len) + message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(message_header) + message_data = server.sock.recv(message_length) + + # If we get the right response return the results + print "received response message: %s" % MessageTypes(message_type) + if message_type == MessageTypes.EXECUTE.value: + message = coreapi.CoreExecMessage(message_flags, message_header, message_data) + result = message.get_tlv(ExecuteTlvs.RESULT.value) + break + + return result + + +class TestGui: + def test_broker(self, core): + """ + Test session broker creation. + + :param conftest.Core core: core fixture to test with + """ + + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + daemon = "localhost" + + # add server + core.session.broker.addserver(daemon, "127.0.0.1", CORE_API_PORT) + + # setup server + core.session.broker.setupserver(daemon) + + # do not want the recvloop running as we will deal ourselves + core.session.broker.dorecvloop = False + + # have broker handle a configuration state change + core.session.set_state(EventTypes.CONFIGURATION_STATE.value) + tlv_data = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value) + raw_event_message = coreapi.CoreEventMessage.pack(0, tlv_data) + core.session.broker.handlerawmsg(raw_event_message) + + # create a switch node + switch = core.session.add_object(cls=SwitchNode, name="switch", start=False) + switch.setposition(x=80, y=50) + switch.server = daemon + + # retrieve switch data representation, create a switch message for broker to handle + switch_data = switch.data(MessageFlags.ADD.value) + switch_message = dataconversion.convert_node(switch_data) + core.session.broker.handlerawmsg(switch_message) + + # create node one + core.create_node("n1") + node_one = core.get_node("n1") + node_one.server = daemon + + # create node two + core.create_node("n2") + node_two = core.get_node("n2") + node_two.server = daemon + + # create node messages for the broker to handle + for node in [node_one, node_two]: + node_data = node.data(MessageFlags.ADD.value) + node_message = dataconversion.convert_node(node_data) + core.session.broker.handlerawmsg(node_message) + + # create links to switch from nodes for broker to handle + for index, node in enumerate([node_one, node_two], start=1): + tlv_data = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.objid) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, node.objid) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) + ip4_address = prefix.addr(index) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, ip4_address) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen) + raw_link_message = coreapi.CoreLinkMessage.pack(MessageFlags.ADD.value, tlv_data) + core.session.broker.handlerawmsg(raw_link_message) + + # change session to instantiation state + tlv_data = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value) + raw_event_message = coreapi.CoreEventMessage.pack(0, tlv_data) + core.session.broker.handlerawmsg(raw_event_message) + + # Get the ip or last node and ping it from the first + print "pinging from the first to the last node" + pingip = cmd(node_one, "ip -4 -o addr show dev eth0").split()[3].split("/")[0] + print cmd(node_two, "ping -c 5 " + pingip) From 55a6e2dcefddafbad955a6b48f61ab2a7c56fcfd Mon Sep 17 00:00:00 2001 From: Rod A Santiago Date: Mon, 19 Jun 2017 18:09:28 -0700 Subject: [PATCH 22/22] merged cleanup branch with master --- .gitignore | 4 + README.rst | 3 + daemon/core/__init__.py | 14 +- daemon/core/addons/__init__.py | 8 +- daemon/core/api/__init__.py | 3 + daemon/core/api/coreapi.py | 1295 ++++++---- daemon/core/api/data.py | 333 --- daemon/core/broker.py | 952 ++++--- daemon/core/bsd/netgraph.py | 74 +- daemon/core/bsd/nodes.py | 167 +- daemon/core/bsd/vnet.py | 132 +- daemon/core/bsd/vnode.py | 195 +- daemon/core/conf.py | 527 ++-- daemon/core/coreobj.py | 763 ++++-- daemon/core/coreserver.py | 1964 +++----------- daemon/core/emane/__init__.py | 54 + daemon/core/emane/bypass.py | 50 +- daemon/core/emane/commeffect.py | 107 +- daemon/core/emane/emane.py | 1437 ----------- daemon/core/emane/ieee80211abg.py | 133 +- daemon/core/emane/nodes.py | 266 +- daemon/core/emane/rfpipe.py | 98 +- daemon/core/emane/tdma.py | 94 +- daemon/core/emane/universal.py | 158 +- daemon/core/location.py | 265 +- daemon/core/misc/LatLongUTMconversion.py | 262 +- daemon/core/misc/event.py | 238 +- daemon/core/misc/ipaddr.py | 230 -- daemon/core/misc/quagga.py | 147 +- daemon/core/misc/utils.py | 409 ++- daemon/core/misc/utm.py | 28 +- daemon/core/misc/xmldeployment.py | 205 -- daemon/core/misc/xmlparser.py | 46 - daemon/core/misc/xmlparser0.py | 420 --- daemon/core/misc/xmlparser1.py | 942 ------- daemon/core/misc/xmlsession.py | 34 - daemon/core/misc/xmlutils.py | 303 --- daemon/core/misc/xmlwriter.py | 15 - daemon/core/misc/xmlwriter0.py | 377 --- daemon/core/misc/xmlwriter1.py | 989 -------- daemon/core/mobility.py | 1229 +++++---- daemon/core/netns/nodes.py | 713 ++++-- daemon/core/netns/vif.py | 284 ++- daemon/core/netns/vnet.py | 562 ++-- daemon/core/netns/vnode.py | 604 +++-- daemon/core/netns/vnodeclient.py | 223 +- daemon/core/phys/pnodes.py | 230 +- daemon/core/pycore.py | 27 - daemon/core/sdt.py | 462 ++-- daemon/core/service.py | 1080 ++++---- daemon/core/services/__init__.py | 4 +- daemon/core/services/bird.py | 123 +- daemon/core/services/dockersvc.py | 103 +- daemon/core/services/nrl.py | 284 ++- daemon/core/services/quagga.py | 329 +-- daemon/core/services/security.py | 112 +- daemon/core/services/startup.py | 28 +- daemon/core/services/ucarp.py | 374 ++- daemon/core/services/utility.py | 290 ++- daemon/core/services/xorp.py | 258 +- daemon/core/session.py | 2252 ++++++++++------- daemon/core/xen/xen.py | 542 ++-- daemon/core/xen/xenconfig.py | 291 +-- daemon/data/core.conf | 26 +- daemon/examples/netns/basicrange.py | 88 +- daemon/examples/netns/daemonnodes.py | 170 +- daemon/examples/netns/distributed.py | 105 +- daemon/examples/netns/emane80211.py | 62 +- daemon/examples/netns/howmanynodes.py | 150 +- .../examples/netns/iperf-performance-chain.py | 68 +- daemon/examples/netns/ospfmanetmdrtest.py | 216 +- daemon/examples/netns/switch.py | 52 +- daemon/examples/netns/switchtest.py | 65 +- daemon/examples/netns/wlanemanetests.py | 696 ++--- daemon/examples/netns/wlantest.py | 62 +- daemon/examples/stopsession.py | 36 +- daemon/sbin/core-daemon | 422 +-- daemon/sbin/core-manage | 116 +- daemon/setup.py | 55 +- daemon/src/setup.py | 62 +- python-prefix.py | 21 +- 81 files changed, 11596 insertions(+), 15021 deletions(-) delete mode 100644 daemon/core/api/data.py delete mode 100644 daemon/core/emane/emane.py delete mode 100644 daemon/core/misc/ipaddr.py delete mode 100644 daemon/core/misc/xmldeployment.py delete mode 100644 daemon/core/misc/xmlparser.py delete mode 100644 daemon/core/misc/xmlparser0.py delete mode 100644 daemon/core/misc/xmlparser1.py delete mode 100644 daemon/core/misc/xmlsession.py delete mode 100644 daemon/core/misc/xmlutils.py delete mode 100644 daemon/core/misc/xmlwriter.py delete mode 100644 daemon/core/misc/xmlwriter0.py delete mode 100644 daemon/core/misc/xmlwriter1.py delete mode 100644 daemon/core/pycore.py diff --git a/.gitignore b/.gitignore index 3c6a8ea2..bd5c7af1 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,7 @@ configure core-*.tar.gz debian stamp-h1 + +# intelli +*.iml +.idea diff --git a/README.rst b/README.rst index 116671c9..5fb323ac 100644 --- a/README.rst +++ b/README.rst @@ -38,6 +38,9 @@ To build this software you should use: make sudo make install +Note: You may need to pass the proxy settings to sudo make install: + sudo make install HTTP_PROXY= + Here is what is installed with 'make install': /usr/local/bin/core-gui diff --git a/daemon/core/__init__.py b/daemon/core/__init__.py index fc9a2033..50d263c3 100644 --- a/daemon/core/__init__.py +++ b/daemon/core/__init__.py @@ -1,23 +1,17 @@ # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. -"""core +""" +core Top-level Python package containing CORE components. -See http://www.nrl.navy.mil/itd/ncs/products/core and -http://code.google.com/p/coreemu/ for more information on CORE. +See http://www.nrl.navy.mil/itd/ncs/products/core for more information on CORE. Pieces can be imported individually, for example - import core.netns.vnode - -or everything listed in __all__ can be imported using - - from core import * + from core.netns import vnode """ -__all__ = [] - # Automatically import all add-ons listed in addons.__all__ from addons import * diff --git a/daemon/core/addons/__init__.py b/daemon/core/addons/__init__.py index 1250143e..c7cbb64e 100644 --- a/daemon/core/addons/__init__.py +++ b/daemon/core/addons/__init__.py @@ -1,6 +1,6 @@ -"""Optional add-ons - -Add on files can be put in this directory. Everything listed in -__all__ is automatically loaded by the main core module. """ +Optional add ons can be put in this directory. Everything listed in __all__ is automatically +loaded by the main core module. +""" + __all__ = [] diff --git a/daemon/core/api/__init__.py b/daemon/core/api/__init__.py index e69de29b..c2e3c613 100644 --- a/daemon/core/api/__init__.py +++ b/daemon/core/api/__init__.py @@ -0,0 +1,3 @@ +""" +Contains code specific to the legacy TCP API for interacting with the TCL based GUI. +""" diff --git a/daemon/core/api/coreapi.py b/daemon/core/api/coreapi.py index 2872d414..97013a22 100644 --- a/daemon/core/api/coreapi.py +++ b/daemon/core/api/coreapi.py @@ -1,634 +1,999 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -coreapi.py: uses coreapi_data for Message and TLV types, and defines TLV data +""" +Uses coreapi_data for message and TLV types, and defines TLV data types and objects used for parsing and building CORE API messages. -''' +CORE API messaging is leveraged for communication with the GUI. +""" + +import socket import struct -from core.api.data import * -from core.misc.ipaddr import * +from enum import Enum + +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExceptionTlvs +from core.enumerations import ExecuteTlvs +from core.enumerations import FileTlvs +from core.enumerations import InterfaceTlvs +from core.enumerations import LinkTlvs +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import RegisterTlvs +from core.enumerations import SessionTlvs +from core.misc import log +from core.misc.ipaddress import IpAddress +from core.misc.ipaddress import MacAddress + +logger = log.get_logger(__name__) class CoreTlvData(object): - datafmt = None - datatype = None - padlen = None + """ + Helper base class used for packing and unpacking values using struct. + """ + + # format string for packing data + data_format = None + # python data type for the data + data_type = None + # pad length for data after packing + pad_len = None @classmethod def pack(cls, value): - "return: (tlvlen, tlvdata)" - tmp = struct.pack(cls.datafmt, value) - return len(tmp) - cls.padlen, tmp + """ + Convenience method for packing data using the struct module. + + :param value: value to pack + :return: length of data and the packed data itself + :rtype: tuple + """ + data = struct.pack(cls.data_format, value) + length = len(data) - cls.pad_len + return length, data @classmethod def unpack(cls, data): - return struct.unpack(cls.datafmt, data)[0] - + """ + Convenience method for unpacking data using the struct module. + + :param data: data to unpack + :return: the value of the unpacked data + """ + return struct.unpack(cls.data_format, data)[0] + @classmethod - def packstring(cls, strvalue): - return cls.pack(cls.fromstring(strvalue)) - + def pack_string(cls, value): + """ + Convenience method for packing data from a string representation. + + :param str value: value to pack + :return: length of data and the packed data itself + :rtype: tuple + """ + return cls.pack(cls.from_string(value)) + @classmethod - def fromstring(cls, s): - return cls.datatype(s) + def from_string(cls, value): + """ + Retrieve the value type from a string representation. + + :param str value: value to get a data type from + :return: value parse from string representation + """ + return cls.data_type(value) + class CoreTlvDataObj(CoreTlvData): + """ + Helper class for packing custom object data. + """ + @classmethod def pack(cls, obj): - "return: (tlvlen, tlvdata)" - tmp = struct.pack(cls.datafmt, cls.getvalue(obj)) - return len(tmp) - cls.padlen, tmp + """ + Convenience method for packing custom object data. + + :param obj: custom object to pack + :return: length of data and the packed data itself + :rtype: tuple + """ + value = cls.get_value(obj) + return super(CoreTlvDataObj, cls).pack(value) @classmethod def unpack(cls, data): - return cls.newobj(struct.unpack(cls.datafmt, data)[0]) + """ + Convenience method for unpacking custom object data. + + :param data: data to unpack custom object from + :return: unpacked custom object + """ + data = super(CoreTlvDataObj, cls).unpack(data) + return cls.new_obj(data) @staticmethod - def getvalue(obj): + def get_value(obj): + """ + Method that will be used to retrieve the data to pack from a custom object. + + :param obj: custom object to get data to pack + :return: data value to pack + """ raise NotImplementedError @staticmethod - def newobj(obj): + def new_obj(obj): + """ + Method for retrieving data to unpack from an object. + + :param obj: object to get unpack data from + :return: value of unpacked data + """ raise NotImplementedError + class CoreTlvDataUint16(CoreTlvData): - datafmt = "!H" - datatype = int - padlen = 0 + """ + Helper class for packing uint16 data. + """ + data_format = "!H" + data_type = int + pad_len = 0 + class CoreTlvDataUint32(CoreTlvData): - datafmt = "!2xI" - datatype = int - padlen = 2 + """ + Helper class for packing uint32 data. + """ + data_format = "!2xI" + data_type = int + pad_len = 2 + class CoreTlvDataUint64(CoreTlvData): - datafmt = "!2xQ" - datatype = long - padlen = 2 + """ + Helper class for packing uint64 data. + """ + data_format = "!2xQ" + data_type = long + pad_len = 2 + class CoreTlvDataString(CoreTlvData): - datatype = str + """ + Helper class for packing string data. + """ + data_type = str - @staticmethod - def pack(value): + @classmethod + def pack(cls, value): + """ + Convenience method for packing string data. + + :param str value: string to pack + :return: length of data packed and the packed data + :rtype: tuple + """ if not isinstance(value, str): - raise ValueError, "value not a string: %s" % value - if len(value) < 256: - hdrsiz = CoreTlv.hdrsiz - else: - hdrsiz = CoreTlv.longhdrsiz - padlen = -(hdrsiz + len(value)) % 4 - return len(value), value + '\0' * padlen + raise ValueError("value not a string: %s" % value) + + if len(value) < 256: + header_len = CoreTlv.header_len + else: + header_len = CoreTlv.long_header_len + + pad_len = -(header_len + len(value)) % 4 + return len(value), value + "\0" * pad_len + + @classmethod + def unpack(cls, data): + """ + Convenience method for unpacking string data. + + :param str data: unpack string data + :return: unpacked string data + """ + return data.rstrip("\0") - @staticmethod - def unpack(data): - return data.rstrip('\0') class CoreTlvDataUint16List(CoreTlvData): - ''' List of unsigned 16-bit values. - ''' - datatype = tuple + """ + List of unsigned 16-bit values. + """ + data_type = tuple + data_format = "!H" - @staticmethod - def pack(values): - if not isinstance(values, tuple): - raise ValueError, "value not a tuple: %s" % values - data = "" - for v in values: - data += struct.pack("!H", v) - padlen = -(CoreTlv.hdrsiz + len(data)) % 4 - return len(data), data + '\0' * padlen - - @staticmethod - def unpack(data): - datafmt = "!%dH" % (len(data)/2) - return struct.unpack(datafmt, data) - @classmethod - def fromstring(cls, s): - return tuple(map(lambda(x): int(x), s.split())) + def pack(cls, values): + """ + Convenience method for packing a uint 16 list. -class CoreTlvDataIPv4Addr(CoreTlvDataObj): - datafmt = "!2x4s" - datatype = IPAddr.fromstring - padlen = 2 + :param list values: unint 16 list to pack + :return: length of data packed and the packed data + :rtype: tuple + """ + if not isinstance(values, tuple): + raise ValueError("value not a tuple: %s" % values) + + data = "" + for value in values: + data += struct.pack(cls.data_format, value) + + pad_len = -(CoreTlv.header_len + len(data)) % 4 + return len(data), data + "\0" * pad_len + + @classmethod + def unpack(cls, data): + """ + Convenience method for unpacking a uint 16 list. + + :param data: data to unpack + :return: unpacked data + """ + data_format = "!%dH" % (len(data) / 2) + return struct.unpack(data_format, data) + + @classmethod + def from_string(cls, value): + """ + Retrieves a unint 16 list from a string + + :param str value: string representation of a uint 16 list + :return: unint 16 list + :rtype: list + """ + return tuple(map(lambda (x): int(x), value.split())) + + +class CoreTlvDataIpv4Addr(CoreTlvDataObj): + """ + Utility class for packing/unpacking Ipv4 addresses. + """ + data_type = IpAddress.from_string + data_format = "!2x4s" + pad_len = 2 @staticmethod - def getvalue(obj): + def get_value(obj): + """ + Retrieve Ipv4 address value from object. + + :param core.misc.ipaddress.IpAddress obj: ip address to get value from + :return: + """ return obj.addr @staticmethod - def newobj(value): - return IPAddr(af = AF_INET, addr = value) + def new_obj(value): + """ + Retrieve Ipv4 address from a string representation. + + :param str value: value to get Ipv4 address from + :return: Ipv4 address + :rtype: core.misc.ipaddress.IpAddress + """ + logger.info("getting new ipv4 address for: %s", value) + return IpAddress(af=socket.AF_INET, address=value) + class CoreTlvDataIPv6Addr(CoreTlvDataObj): - datafmt = "!16s2x" - datatype = IPAddr.fromstring - padlen = 2 + """ + Utility class for packing/unpacking Ipv6 addresses. + """ + data_format = "!16s2x" + data_type = IpAddress.from_string + pad_len = 2 @staticmethod - def getvalue(obj): + def get_value(obj): + """ + Retrieve Ipv6 address value from object. + + :param core.misc.ipaddress.IpAddress obj: ip address to get value from + :return: + """ return obj.addr @staticmethod - def newobj(value): - return IPAddr(af = AF_INET6, addr = value) + def new_obj(value): + """ + Retrieve Ipv6 address from a string representation. + + :param str value: value to get Ipv4 address from + :return: Ipv4 address + :rtype: core.misc.ipaddress.IpAddress + """ + return IpAddress(af=socket.AF_INET6, address=value) + class CoreTlvDataMacAddr(CoreTlvDataObj): - datafmt = "!2x8s" - datatype = MacAddr.fromstring - padlen = 2 + """ + Utility class for packing/unpacking mac addresses. + """ + data_format = "!2x8s" + data_type = MacAddress.from_string + pad_len = 2 @staticmethod - def getvalue(obj): - return '\0\0' + obj.addr # extend to 64 bits + def get_value(obj): + """ + Retrieve Ipv6 address value from object. + + :param core.misc.ipaddress.MacAddress obj: mac address to get value from + :return: + """ + # extend to 64 bits + return "\0\0" + obj.addr @staticmethod - def newobj(value): - return MacAddr(addr = value[2:]) # only use 48 bits + def new_obj(value): + """ + Retrieve mac address from a string representation. + + :param str value: value to get Ipv4 address from + :return: Ipv4 address + :rtype: core.misc.ipaddress.MacAddress + """ + # only use 48 bits + return MacAddress(address=value[2:]) + class CoreTlv(object): - hdrfmt = "!BB" - hdrsiz = struct.calcsize(hdrfmt) + """ + Base class for representing CORE TLVs. + """ + header_format = "!BB" + header_len = struct.calcsize(header_format) - longhdrfmt = "!BBH" - longhdrsiz = struct.calcsize(longhdrfmt) + long_header_format = "!BBH" + long_header_len = struct.calcsize(long_header_format) - tlvtypemap = {} - tlvdataclsmap = {} + tlv_type_map = Enum + tlv_data_class_map = {} - def __init__(self, tlvtype, tlvdata): - self.tlvtype = tlvtype - if tlvdata: + def __init__(self, tlv_type, tlv_data): + """ + Create a CoreTlv instance. + + :param int tlv_type: tlv type + :param tlv_data: data to unpack + :return: unpacked data + """ + self.tlv_type = tlv_type + if tlv_data: try: - self.value = self.tlvdataclsmap[self.tlvtype].unpack(tlvdata) + self.value = self.tlv_data_class_map[self.tlv_type].unpack(tlv_data) except KeyError: - self.value = tlvdata + self.value = tlv_data else: self.value = None @classmethod def unpack(cls, data): - "parse data and return (tlv, remainingdata)" - tlvtype, tlvlen = struct.unpack(cls.hdrfmt, data[:cls.hdrsiz]) - hdrsiz = cls.hdrsiz - if tlvlen == 0: - tlvtype, zero, tlvlen = struct.unpack(cls.longhdrfmt, - data[:cls.longhdrsiz]) - hdrsiz = cls.longhdrsiz - tlvsiz = hdrsiz + tlvlen - tlvsiz += -tlvsiz % 4 # for 32-bit alignment - return cls(tlvtype, data[hdrsiz:tlvsiz]), data[tlvsiz:] + """ + Parse data and return unpacked class. + + :param data: data to unpack + :return: unpacked data class + """ + tlv_type, tlv_len = struct.unpack(cls.header_format, data[:cls.header_len]) + header_len = cls.header_len + if tlv_len == 0: + tlv_type, zero, tlv_len = struct.unpack(cls.long_header_format, data[:cls.long_header_len]) + header_len = cls.long_header_len + tlv_size = header_len + tlv_len + # for 32-bit alignment + tlv_size += -tlv_size % 4 + return cls(tlv_type, data[header_len:tlv_size]), data[tlv_size:] @classmethod - def pack(cls, tlvtype, value): - try: - tlvlen, tlvdata = cls.tlvdataclsmap[tlvtype].pack(value) - except Exception, e: - raise ValueError, "TLV packing error type=%s: %s" % (tlvtype, e) - if tlvlen < 256: - hdr = struct.pack(cls.hdrfmt, tlvtype, tlvlen) + def pack(cls, tlv_type, value): + """ + Pack a TLV value, based on type. + + :param int tlv_type: type of data to pack + :param value: data to pack + :return: header and packed data + """ + tlv_len, tlv_data = cls.tlv_data_class_map[tlv_type].pack(value) + + if tlv_len < 256: + hdr = struct.pack(cls.header_format, tlv_type, tlv_len) else: - hdr = struct.pack(cls.longhdrfmt, tlvtype, 0, tlvlen) - return hdr + tlvdata - - @classmethod - def packstring(cls, tlvtype, value): - return cls.pack(tlvtype, cls.tlvdataclsmap[tlvtype].fromstring(value)) + hdr = struct.pack(cls.long_header_format, tlv_type, 0, tlv_len) - def typestr(self): + return hdr + tlv_data + + @classmethod + def pack_string(cls, tlv_type, value): + """ + Pack data type from a string representation + + :param int tlv_type: type of data to pack + :param str value: string representation of data + :return: header and packed data + """ + return cls.pack(tlv_type, cls.tlv_data_class_map[tlv_type].from_string(value)) + + def type_str(self): + """ + Retrieve type string for this data type. + + :return: data type name + :rtype: str + """ try: - return self.tlvtypemap[self.tlvtype] - except KeyError: - return "unknown tlv type: %s" % str(self.tlvtype) + return self.tlv_type_map(self.tlv_type).name + except ValueError: + return "unknown tlv type: %s" % str(self.tlv_type) def __str__(self): - return "%s " % \ - (self.__class__.__name__, self.typestr(), self.value) + """ + String representation of this data type. + + :return: string representation + :rtype: str + """ + return "%s " % (self.__class__.__name__, self.type_str(), self.value) + class CoreNodeTlv(CoreTlv): - tlvtypemap = node_tlvs - tlvdataclsmap = { - CORE_TLV_NODE_NUMBER: CoreTlvDataUint32, - CORE_TLV_NODE_TYPE: CoreTlvDataUint32, - CORE_TLV_NODE_NAME: CoreTlvDataString, - CORE_TLV_NODE_IPADDR: CoreTlvDataIPv4Addr, - CORE_TLV_NODE_MACADDR: CoreTlvDataMacAddr, - CORE_TLV_NODE_IP6ADDR: CoreTlvDataIPv6Addr, - CORE_TLV_NODE_MODEL: CoreTlvDataString, - CORE_TLV_NODE_EMUSRV: CoreTlvDataString, - CORE_TLV_NODE_SESSION: CoreTlvDataString, - CORE_TLV_NODE_XPOS: CoreTlvDataUint16, - CORE_TLV_NODE_YPOS: CoreTlvDataUint16, - CORE_TLV_NODE_CANVAS: CoreTlvDataUint16, - CORE_TLV_NODE_EMUID: CoreTlvDataUint32, - CORE_TLV_NODE_NETID: CoreTlvDataUint32, - CORE_TLV_NODE_SERVICES: CoreTlvDataString, - CORE_TLV_NODE_LAT: CoreTlvDataString, - CORE_TLV_NODE_LONG: CoreTlvDataString, - CORE_TLV_NODE_ALT: CoreTlvDataString, - CORE_TLV_NODE_ICON: CoreTlvDataString, - CORE_TLV_NODE_OPAQUE: CoreTlvDataString, + """ + Class for representing CORE Node TLVs. + """ + + tlv_type_map = NodeTlvs + tlv_data_class_map = { + NodeTlvs.NUMBER.value: CoreTlvDataUint32, + NodeTlvs.TYPE.value: CoreTlvDataUint32, + NodeTlvs.NAME.value: CoreTlvDataString, + NodeTlvs.IP_ADDRESS.value: CoreTlvDataIpv4Addr, + NodeTlvs.MAC_ADDRESS.value: CoreTlvDataMacAddr, + NodeTlvs.IP6_ADDRESS.value: CoreTlvDataIPv6Addr, + NodeTlvs.MODEL.value: CoreTlvDataString, + NodeTlvs.EMULATION_SERVER.value: CoreTlvDataString, + NodeTlvs.SESSION.value: CoreTlvDataString, + NodeTlvs.X_POSITION.value: CoreTlvDataUint16, + NodeTlvs.Y_POSITION.value: CoreTlvDataUint16, + NodeTlvs.CANVAS.value: CoreTlvDataUint16, + NodeTlvs.EMULATION_ID.value: CoreTlvDataUint32, + NodeTlvs.NETWORK_ID.value: CoreTlvDataUint32, + NodeTlvs.SERVICES.value: CoreTlvDataString, + NodeTlvs.LATITUDE.value: CoreTlvDataString, + NodeTlvs.LONGITUDE.value: CoreTlvDataString, + NodeTlvs.ALTITUDE.value: CoreTlvDataString, + NodeTlvs.ICON.value: CoreTlvDataString, + NodeTlvs.OPAQUE.value: CoreTlvDataString, } + class CoreLinkTlv(CoreTlv): - tlvtypemap = link_tlvs - tlvdataclsmap = { - CORE_TLV_LINK_N1NUMBER: CoreTlvDataUint32, - CORE_TLV_LINK_N2NUMBER: CoreTlvDataUint32, - CORE_TLV_LINK_DELAY: CoreTlvDataUint64, - CORE_TLV_LINK_BW: CoreTlvDataUint64, - CORE_TLV_LINK_PER: CoreTlvDataString, - CORE_TLV_LINK_DUP: CoreTlvDataString, - CORE_TLV_LINK_JITTER: CoreTlvDataUint64, - CORE_TLV_LINK_MER: CoreTlvDataUint16, - CORE_TLV_LINK_BURST: CoreTlvDataUint16, - CORE_TLV_LINK_SESSION: CoreTlvDataString, - CORE_TLV_LINK_MBURST: CoreTlvDataUint16, - CORE_TLV_LINK_TYPE: CoreTlvDataUint32, - CORE_TLV_LINK_GUIATTR: CoreTlvDataString, - CORE_TLV_LINK_UNI: CoreTlvDataUint16, - CORE_TLV_LINK_EMUID: CoreTlvDataUint32, - CORE_TLV_LINK_NETID: CoreTlvDataUint32, - CORE_TLV_LINK_KEY: CoreTlvDataUint32, - CORE_TLV_LINK_IF1NUM: CoreTlvDataUint16, - CORE_TLV_LINK_IF1IP4: CoreTlvDataIPv4Addr, - CORE_TLV_LINK_IF1IP4MASK: CoreTlvDataUint16, - CORE_TLV_LINK_IF1MAC: CoreTlvDataMacAddr, - CORE_TLV_LINK_IF1IP6: CoreTlvDataIPv6Addr, - CORE_TLV_LINK_IF1IP6MASK: CoreTlvDataUint16, - CORE_TLV_LINK_IF2NUM: CoreTlvDataUint16, - CORE_TLV_LINK_IF2IP4: CoreTlvDataIPv4Addr, - CORE_TLV_LINK_IF2IP4MASK: CoreTlvDataUint16, - CORE_TLV_LINK_IF2MAC: CoreTlvDataMacAddr, - CORE_TLV_LINK_IF2IP6: CoreTlvDataIPv6Addr, - CORE_TLV_LINK_IF2IP6MASK: CoreTlvDataUint16, - CORE_TLV_LINK_IF1NAME: CoreTlvDataString, - CORE_TLV_LINK_IF2NAME: CoreTlvDataString, - CORE_TLV_LINK_OPAQUE: CoreTlvDataString, + """ + Class for representing CORE link TLVs. + """ + + tlv_type_map = LinkTlvs + tlv_data_class_map = { + LinkTlvs.N1_NUMBER.value: CoreTlvDataUint32, + LinkTlvs.N2_NUMBER.value: CoreTlvDataUint32, + LinkTlvs.DELAY.value: CoreTlvDataUint64, + LinkTlvs.BANDWIDTH.value: CoreTlvDataUint64, + LinkTlvs.PER.value: CoreTlvDataString, + LinkTlvs.DUP.value: CoreTlvDataString, + LinkTlvs.JITTER.value: CoreTlvDataUint64, + LinkTlvs.MER.value: CoreTlvDataUint16, + LinkTlvs.BURST.value: CoreTlvDataUint16, + LinkTlvs.SESSION.value: CoreTlvDataString, + LinkTlvs.MBURST.value: CoreTlvDataUint16, + LinkTlvs.TYPE.value: CoreTlvDataUint32, + LinkTlvs.GUI_ATTRIBUTES.value: CoreTlvDataString, + LinkTlvs.UNIDIRECTIONAL.value: CoreTlvDataUint16, + LinkTlvs.EMULATION_ID.value: CoreTlvDataUint32, + LinkTlvs.NETWORK_ID.value: CoreTlvDataUint32, + LinkTlvs.KEY.value: CoreTlvDataUint32, + LinkTlvs.INTERFACE1_NUMBER.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE1_IP4.value: CoreTlvDataIpv4Addr, + LinkTlvs.INTERFACE1_IP4_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE1_MAC.value: CoreTlvDataMacAddr, + LinkTlvs.INTERFACE1_IP6.value: CoreTlvDataIPv6Addr, + LinkTlvs.INTERFACE1_IP6_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE2_NUMBER.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE2_IP4.value: CoreTlvDataIpv4Addr, + LinkTlvs.INTERFACE2_IP4_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE2_MAC.value: CoreTlvDataMacAddr, + LinkTlvs.INTERFACE2_IP6.value: CoreTlvDataIPv6Addr, + LinkTlvs.INTERFACE2_IP6_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE1_NAME.value: CoreTlvDataString, + LinkTlvs.INTERFACE2_NAME.value: CoreTlvDataString, + LinkTlvs.OPAQUE.value: CoreTlvDataString, } -class CoreExecTlv(CoreTlv): - tlvtypemap = exec_tlvs - tlvdataclsmap = { - CORE_TLV_EXEC_NODE: CoreTlvDataUint32, - CORE_TLV_EXEC_NUM: CoreTlvDataUint32, - CORE_TLV_EXEC_TIME: CoreTlvDataUint32, - CORE_TLV_EXEC_CMD: CoreTlvDataString, - CORE_TLV_EXEC_RESULT: CoreTlvDataString, - CORE_TLV_EXEC_STATUS: CoreTlvDataUint32, - CORE_TLV_EXEC_SESSION: CoreTlvDataString, + +class CoreExecuteTlv(CoreTlv): + """ + Class for representing CORE execute TLVs. + """ + + tlv_type_map = ExecuteTlvs + tlv_data_class_map = { + ExecuteTlvs.NODE.value: CoreTlvDataUint32, + ExecuteTlvs.NUMBER.value: CoreTlvDataUint32, + ExecuteTlvs.TIME.value: CoreTlvDataUint32, + ExecuteTlvs.COMMAND.value: CoreTlvDataString, + ExecuteTlvs.RESULT.value: CoreTlvDataString, + ExecuteTlvs.STATUS.value: CoreTlvDataUint32, + ExecuteTlvs.SESSION.value: CoreTlvDataString, } -class CoreRegTlv(CoreTlv): - tlvtypemap = reg_tlvs - tlvdataclsmap = { - CORE_TLV_REG_WIRELESS: CoreTlvDataString, - CORE_TLV_REG_MOBILITY: CoreTlvDataString, - CORE_TLV_REG_UTILITY: CoreTlvDataString, - CORE_TLV_REG_EXECSRV: CoreTlvDataString, - CORE_TLV_REG_GUI: CoreTlvDataString, - CORE_TLV_REG_EMULSRV: CoreTlvDataString, - CORE_TLV_REG_SESSION: CoreTlvDataString, + +class CoreRegisterTlv(CoreTlv): + """ + Class for representing CORE register TLVs. + """ + + tlv_type_map = RegisterTlvs + tlv_data_class_map = { + RegisterTlvs.WIRELESS.value: CoreTlvDataString, + RegisterTlvs.MOBILITY.value: CoreTlvDataString, + RegisterTlvs.UTILITY.value: CoreTlvDataString, + RegisterTlvs.EXECUTE_SERVER.value: CoreTlvDataString, + RegisterTlvs.GUI.value: CoreTlvDataString, + RegisterTlvs.EMULATION_SERVER.value: CoreTlvDataString, + RegisterTlvs.SESSION.value: CoreTlvDataString, } -class CoreConfTlv(CoreTlv): - tlvtypemap = conf_tlvs - tlvdataclsmap = { - CORE_TLV_CONF_NODE: CoreTlvDataUint32, - CORE_TLV_CONF_OBJ: CoreTlvDataString, - CORE_TLV_CONF_TYPE: CoreTlvDataUint16, - CORE_TLV_CONF_DATA_TYPES: CoreTlvDataUint16List, - CORE_TLV_CONF_VALUES: CoreTlvDataString, - CORE_TLV_CONF_CAPTIONS: CoreTlvDataString, - CORE_TLV_CONF_BITMAP: CoreTlvDataString, - CORE_TLV_CONF_POSSIBLE_VALUES: CoreTlvDataString, - CORE_TLV_CONF_GROUPS: CoreTlvDataString, - CORE_TLV_CONF_SESSION: CoreTlvDataString, - CORE_TLV_CONF_IFNUM: CoreTlvDataUint16, - CORE_TLV_CONF_NETID: CoreTlvDataUint32, - CORE_TLV_CONF_OPAQUE: CoreTlvDataString, + +class CoreConfigTlv(CoreTlv): + """ + Class for representing CORE configuration TLVs. + """ + + tlv_type_map = ConfigTlvs + tlv_data_class_map = { + ConfigTlvs.NODE.value: CoreTlvDataUint32, + ConfigTlvs.OBJECT.value: CoreTlvDataString, + ConfigTlvs.TYPE.value: CoreTlvDataUint16, + ConfigTlvs.DATA_TYPES.value: CoreTlvDataUint16List, + ConfigTlvs.VALUES.value: CoreTlvDataString, + ConfigTlvs.CAPTIONS.value: CoreTlvDataString, + ConfigTlvs.BITMAP.value: CoreTlvDataString, + ConfigTlvs.POSSIBLE_VALUES.value: CoreTlvDataString, + ConfigTlvs.GROUPS.value: CoreTlvDataString, + ConfigTlvs.SESSION.value: CoreTlvDataString, + ConfigTlvs.INTERFACE_NUMBER.value: CoreTlvDataUint16, + ConfigTlvs.NETWORK_ID.value: CoreTlvDataUint32, + ConfigTlvs.OPAQUE.value: CoreTlvDataString, } + class CoreFileTlv(CoreTlv): - tlvtypemap = file_tlvs - tlvdataclsmap = { - CORE_TLV_FILE_NODE: CoreTlvDataUint32, - CORE_TLV_FILE_NAME: CoreTlvDataString, - CORE_TLV_FILE_MODE: CoreTlvDataString, - CORE_TLV_FILE_NUM: CoreTlvDataUint16, - CORE_TLV_FILE_TYPE: CoreTlvDataString, - CORE_TLV_FILE_SRCNAME: CoreTlvDataString, - CORE_TLV_FILE_SESSION: CoreTlvDataString, - CORE_TLV_FILE_DATA: CoreTlvDataString, - CORE_TLV_FILE_CMPDATA: CoreTlvDataString, + """ + Class for representing CORE file TLVs. + """ + + tlv_type_map = FileTlvs + tlv_data_class_map = { + FileTlvs.NODE.value: CoreTlvDataUint32, + FileTlvs.NAME.value: CoreTlvDataString, + FileTlvs.MODE.value: CoreTlvDataString, + FileTlvs.NUMBER.value: CoreTlvDataUint16, + FileTlvs.TYPE.value: CoreTlvDataString, + FileTlvs.SOURCE_NAME.value: CoreTlvDataString, + FileTlvs.SESSION.value: CoreTlvDataString, + FileTlvs.DATA.value: CoreTlvDataString, + FileTlvs.COMPRESSED_DATA.value: CoreTlvDataString, } -class CoreIfaceTlv(CoreTlv): - tlvtypemap = iface_tlvs - tlvdataclsmap = { - CORE_TLV_IFACE_NODE: CoreTlvDataUint32, - CORE_TLV_IFACE_NUM: CoreTlvDataUint16, - CORE_TLV_IFACE_NAME: CoreTlvDataString, - CORE_TLV_IFACE_IPADDR: CoreTlvDataIPv4Addr, - CORE_TLV_IFACE_MASK: CoreTlvDataUint16, - CORE_TLV_IFACE_MACADDR: CoreTlvDataMacAddr, - CORE_TLV_IFACE_IP6ADDR: CoreTlvDataIPv6Addr, - CORE_TLV_IFACE_IP6MASK: CoreTlvDataUint16, - CORE_TLV_IFACE_TYPE: CoreTlvDataUint16, - CORE_TLV_IFACE_SESSION: CoreTlvDataString, - CORE_TLV_IFACE_STATE: CoreTlvDataUint16, - CORE_TLV_IFACE_EMUID: CoreTlvDataUint32, - CORE_TLV_IFACE_NETID: CoreTlvDataUint32, + +class CoreInterfaceTlv(CoreTlv): + """ + Class for representing CORE interface TLVs. + """ + + tlv_type_map = InterfaceTlvs + tlv_data_class_map = { + InterfaceTlvs.NODE.value: CoreTlvDataUint32, + InterfaceTlvs.NUMBER.value: CoreTlvDataUint16, + InterfaceTlvs.NAME.value: CoreTlvDataString, + InterfaceTlvs.IP_ADDRESS.value: CoreTlvDataIpv4Addr, + InterfaceTlvs.MASK.value: CoreTlvDataUint16, + InterfaceTlvs.MAC_ADDRESS.value: CoreTlvDataMacAddr, + InterfaceTlvs.IP6_ADDRESS.value: CoreTlvDataIPv6Addr, + InterfaceTlvs.IP6_MASK.value: CoreTlvDataUint16, + InterfaceTlvs.TYPE.value: CoreTlvDataUint16, + InterfaceTlvs.SESSION.value: CoreTlvDataString, + InterfaceTlvs.STATE.value: CoreTlvDataUint16, + InterfaceTlvs.EMULATION_ID.value: CoreTlvDataUint32, + InterfaceTlvs.NETWORK_ID.value: CoreTlvDataUint32, } + class CoreEventTlv(CoreTlv): - tlvtypemap = event_tlvs - tlvdataclsmap = { - CORE_TLV_EVENT_NODE: CoreTlvDataUint32, - CORE_TLV_EVENT_TYPE: CoreTlvDataUint32, - CORE_TLV_EVENT_NAME: CoreTlvDataString, - CORE_TLV_EVENT_DATA: CoreTlvDataString, - CORE_TLV_EVENT_TIME: CoreTlvDataString, - CORE_TLV_EVENT_SESSION: CoreTlvDataString, + """ + Class for representing CORE event TLVs. + """ + + tlv_type_map = EventTlvs + tlv_data_class_map = { + EventTlvs.NODE.value: CoreTlvDataUint32, + EventTlvs.TYPE.value: CoreTlvDataUint32, + EventTlvs.NAME.value: CoreTlvDataString, + EventTlvs.DATA.value: CoreTlvDataString, + EventTlvs.TIME.value: CoreTlvDataString, + EventTlvs.SESSION.value: CoreTlvDataString, } + class CoreSessionTlv(CoreTlv): - tlvtypemap = session_tlvs - tlvdataclsmap = { - CORE_TLV_SESS_NUMBER: CoreTlvDataString, - CORE_TLV_SESS_NAME: CoreTlvDataString, - CORE_TLV_SESS_FILE: CoreTlvDataString, - CORE_TLV_SESS_NODECOUNT: CoreTlvDataString, - CORE_TLV_SESS_DATE: CoreTlvDataString, - CORE_TLV_SESS_THUMB: CoreTlvDataString, - CORE_TLV_SESS_USER: CoreTlvDataString, - CORE_TLV_SESS_OPAQUE: CoreTlvDataString, + """ + Class for representing CORE session TLVs. + """ + + tlv_type_map = SessionTlvs + tlv_data_class_map = { + SessionTlvs.NUMBER.value: CoreTlvDataString, + SessionTlvs.NAME.value: CoreTlvDataString, + SessionTlvs.FILE.value: CoreTlvDataString, + SessionTlvs.NODE_COUNT.value: CoreTlvDataString, + SessionTlvs.DATE.value: CoreTlvDataString, + SessionTlvs.THUMB.value: CoreTlvDataString, + SessionTlvs.USER.value: CoreTlvDataString, + SessionTlvs.OPAQUE.value: CoreTlvDataString, } + class CoreExceptionTlv(CoreTlv): - tlvtypemap = exception_tlvs - tlvdataclsmap = { - CORE_TLV_EXCP_NODE: CoreTlvDataUint32, - CORE_TLV_EXCP_SESSION: CoreTlvDataString, - CORE_TLV_EXCP_LEVEL: CoreTlvDataUint16, - CORE_TLV_EXCP_SOURCE: CoreTlvDataString, - CORE_TLV_EXCP_DATE: CoreTlvDataString, - CORE_TLV_EXCP_TEXT: CoreTlvDataString, - CORE_TLV_EXCP_OPAQUE: CoreTlvDataString, + """ + Class for representing CORE exception TLVs. + """ + + tlv_type_map = ExceptionTlvs + tlv_data_class_map = { + ExceptionTlvs.NODE.value: CoreTlvDataUint32, + ExceptionTlvs.SESSION.value: CoreTlvDataString, + ExceptionTlvs.LEVEL.value: CoreTlvDataUint16, + ExceptionTlvs.SOURCE.value: CoreTlvDataString, + ExceptionTlvs.DATE.value: CoreTlvDataString, + ExceptionTlvs.TEXT.value: CoreTlvDataString, + ExceptionTlvs.OPAQUE.value: CoreTlvDataString, } class CoreMessage(object): - hdrfmt = "!BBH" - hdrsiz = struct.calcsize(hdrfmt) + """ + Base class for representing CORE messages. + """ - msgtype = None - - flagmap = {} - - tlvcls = CoreTlv + header_format = "!BBH" + header_len = struct.calcsize(header_format) + message_type = None + flag_map = MessageFlags + tlv_class = CoreTlv def __init__(self, flags, hdr, data): - self.rawmsg = hdr + data + self.raw_message = hdr + data self.flags = flags - self.tlvdata = {} - self.parsedata(data) + self.tlv_data = {} + self.parse_data(data) @classmethod - def unpackhdr(cls, data): - "parse data and return (msgtype, msgflags, msglen)" - msgtype, msgflags, msglen = struct.unpack(cls.hdrfmt, data[:cls.hdrsiz]) - return msgtype, msgflags, msglen + def unpack_header(cls, data): + """ + parse data and return (message_type, message_flags, message_len). + + :param str data: data to parse + :return: unpacked tuple + :rtype: tuple + """ + message_type, message_flags, message_len = struct.unpack(cls.header_format, data[:cls.header_len]) + return message_type, message_flags, message_len @classmethod - def pack(cls, msgflags, tlvdata): - hdr = struct.pack(cls.hdrfmt, cls.msgtype, msgflags, len(tlvdata)) - return hdr + tlvdata + def pack(cls, message_flags, tlv_data): + """ + Pack CORE message data. - def addtlvdata(self, k, v): - if k in self.tlvdata: - raise KeyError, "key already exists: %s (val=%s)" % (k, v) - self.tlvdata[k] = v + :param message_flags: message flags to pack with data + :param tlv_data: data to get length from for packing + :return: combined header and tlv data + """ + header = struct.pack(cls.header_format, cls.message_type, message_flags, len(tlv_data)) + return header + tlv_data - def gettlv(self, tlvtype): - if tlvtype in self.tlvdata: - return self.tlvdata[tlvtype] - else: - return None + def add_tlv_data(self, key, value): + """ + Add TLV data into the data map. - def parsedata(self, data): + :param int key: key to store TLV data + :param value: data to associate with key + :return: nothing + """ + if key in self.tlv_data: + raise KeyError("key already exists: %s (val=%s)" % (key, value)) + + self.tlv_data[key] = value + + def get_tlv(self, tlv_type): + """ + Retrieve TLV data from data map. + + :param int tlv_type: type of data to retrieve + :return: TLV type data + """ + return self.tlv_data.get(tlv_type) + + def parse_data(self, data): + """ + Parse data while possible and adding TLV data to the data map. + + :param data: data to parse for TLV data + :return: nothing + """ while data: - tlv, data = self.tlvcls.unpack(data) - self.addtlvdata(tlv.tlvtype, tlv.value) - - def packtlvdata(self): - ''' Opposite of parsedata(). Return packed TLV data using - self.tlvdata dict. Used by repack(). - ''' - tlvdata = "" - keys = sorted(self.tlvdata.keys()) - for k in keys: - v = self.tlvdata[k] - tlvdata += self.tlvcls.pack(k, v) - return tlvdata - + tlv, data = self.tlv_class.unpack(data) + self.add_tlv_data(tlv.tlv_type, tlv.value) + + def pack_tlv_data(self): + """ + Opposite of parse_data(). Return packed TLV data using self.tlv_data dict. Used by repack(). + + :return: packed data + :rtype: str + """ + tlv_data = "" + keys = sorted(self.tlv_data.keys()) + + for key in keys: + value = self.tlv_data[key] + tlv_data += self.tlv_class.pack(key, value) + + return tlv_data + def repack(self): - ''' Invoke after updating self.tlvdata[] to rebuild self.rawmsg. + """ + Invoke after updating self.tlv_data[] to rebuild self.raw_message. Useful for modifying a message that has been parsed, before sending the raw data again. - ''' - tlvdata = self.packtlvdata() - self.rawmsg = self.pack(self.flags, tlvdata) - def typestr(self): + :return: nothing + """ + tlv_data = self.pack_tlv_data() + self.raw_message = self.pack(self.flags, tlv_data) + + def type_str(self): + """ + Retrieve data of the message type. + + :return: name of message type + :rtype: str + """ try: - return message_types[self.msgtype] - except KeyError: - return "unknown message type: %s" % str(self.msgtype) + return MessageTypes(self.message_type).name + except ValueError: + return "unknown message type: %s" % str(self.message_type) - def flagstr(self): - msgflags = [] + def flag_str(self): + """ + Retrieve message flag string. + + :return: message flag string + :rtype: str + """ + message_flags = [] flag = 1L + while True: - if (self.flags & flag): + if self.flags & flag: try: - msgflags.append(self.flagmap[flag]) - except KeyError: - msgflags.append("0x%x" % flag) + message_flags.append(self.flag_map(flag).name) + except ValueError: + message_flags.append("0x%x" % flag) flag <<= 1 if not (self.flags & ~(flag - 1)): break - return "0x%x <%s>" % (self.flags, " | ".join(msgflags)) + + return "0x%x <%s>" % (self.flags, " | ".join(message_flags)) def __str__(self): - tmp = "%s " % \ - (self.__class__.__name__, self.typestr(), self.flagstr()) - for k, v in self.tlvdata.iteritems(): - if k in self.tlvcls.tlvtypemap: - tlvtype = self.tlvcls.tlvtypemap[k] - else: - tlvtype = "tlv type %s" % k - tmp += "\n %s: %s" % (tlvtype, v) - return tmp + """ + Retrieve string representation of the message. + + :return: string representation + :rtype: str + """ + result = "%s " % (self.__class__.__name__, self.type_str(), self.flag_str()) + + for key, value in self.tlv_data.iteritems(): + try: + tlv_type = self.tlv_class.tlv_type_map(key).name + except ValueError: + tlv_type = "tlv type %s" % key + + result += "\n %s: %s" % (tlv_type, value) + + return result + + def node_numbers(self): + """ + Return a list of node numbers included in this message. + """ + number1 = None + number2 = None - def nodenumbers(self): - ''' Return a list of node numbers included in this message. - ''' - n = None - n2 = None # not all messages have node numbers - if self.msgtype == CORE_API_NODE_MSG: - n = self.gettlv(CORE_TLV_NODE_NUMBER) - elif self.msgtype == CORE_API_LINK_MSG: - n = self.gettlv(CORE_TLV_LINK_N1NUMBER) - n2 = self.gettlv(CORE_TLV_LINK_N2NUMBER) - elif self.msgtype == CORE_API_EXEC_MSG: - n = self.gettlv(CORE_TLV_EXEC_NODE) - elif self.msgtype == CORE_API_CONF_MSG: - n = self.gettlv(CORE_TLV_CONF_NODE) - elif self.msgtype == CORE_API_FILE_MSG: - n = self.gettlv(CORE_TLV_FILE_NODE) - elif self.msgtype == CORE_API_IFACE_MSG: - n = self.gettlv(CORE_TLV_IFACE_NODE) - elif self.msgtype == CORE_API_EVENT_MSG: - n = self.gettlv(CORE_TLV_EVENT_NODE) - r = [] - if n is not None: - r.append(n) - if n2 is not None: - r.append(n2) - return r - - def sessionnumbers(self): - ''' Return a list of session numbers included in this message. - ''' - r = [] - if self.msgtype == CORE_API_SESS_MSG: - s = self.gettlv(CORE_TLV_SESS_NUMBER) - elif self.msgtype == CORE_API_EXCP_MSG: - s = self.gettlv(CORE_TLV_EXCP_SESSION) + if self.message_type == MessageTypes.NODE.value: + number1 = self.get_tlv(NodeTlvs.NUMBER.value) + elif self.message_type == MessageTypes.LINK.value: + number1 = self.get_tlv(LinkTlvs.N1_NUMBER.value) + number2 = self.get_tlv(LinkTlvs.N2_NUMBER.value) + elif self.message_type == MessageTypes.EXECUTE.value: + number1 = self.get_tlv(ExecuteTlvs.NODE.value) + elif self.message_type == MessageTypes.CONFIG.value: + number1 = self.get_tlv(ConfigTlvs.NODE.value) + elif self.message_type == MessageTypes.FILE.value: + number1 = self.get_tlv(FileTlvs.NODE.value) + elif self.message_type == MessageTypes.INTERFACE.value: + number1 = self.get_tlv(InterfaceTlvs.NODE.value) + elif self.message_type == MessageTypes.EVENT.value: + number1 = self.get_tlv(EventTlvs.NODE) + + result = [] + + if number1: + result.append(number1) + + if number2: + result.append(number2) + + return result + + def session_numbers(self): + """ + Return a list of session numbers included in this message. + """ + result = [] + + if self.message_type == MessageTypes.SESSION.value: + sessions = self.get_tlv(SessionTlvs.NUMBER.value) + elif self.message_type == MessageTypes.EXCEPTION.value: + sessions = self.get_tlv(ExceptionTlvs.SESSION.value) else: # All other messages share TLV number 0xA for the session number(s). - s = self.gettlv(CORE_TLV_NODE_SESSION) - if s is not None: - for sid in s.split('|'): - r.append(int(sid)) - return r + sessions = self.get_tlv(NodeTlvs.SESSION.value) + + if sessions: + for session_id in sessions.split("|"): + result.append(int(session_id)) + + return result class CoreNodeMessage(CoreMessage): - msgtype = CORE_API_NODE_MSG - flagmap = message_flags - tlvcls = CoreNodeTlv + """ + CORE node message class. + """ + message_type = MessageTypes.NODE.value + tlv_class = CoreNodeTlv + class CoreLinkMessage(CoreMessage): - msgtype = CORE_API_LINK_MSG - flagmap = message_flags - tlvcls = CoreLinkTlv + """ + CORE link message class. + """ + message_type = MessageTypes.LINK.value + tlv_class = CoreLinkTlv + class CoreExecMessage(CoreMessage): - msgtype = CORE_API_EXEC_MSG - flagmap = message_flags - tlvcls = CoreExecTlv + """ + CORE execute message class. + """ + message_type = MessageTypes.EXECUTE.value + tlv_class = CoreExecuteTlv + class CoreRegMessage(CoreMessage): - msgtype = CORE_API_REG_MSG - flagmap = message_flags - tlvcls = CoreRegTlv + """ + CORE register message class. + """ + message_type = MessageTypes.REGISTER.value + tlv_class = CoreRegisterTlv + class CoreConfMessage(CoreMessage): - msgtype = CORE_API_CONF_MSG - flagmap = message_flags - tlvcls = CoreConfTlv + """ + CORE configuration message class. + """ + message_type = MessageTypes.CONFIG.value + tlv_class = CoreConfigTlv + class CoreFileMessage(CoreMessage): - msgtype = CORE_API_FILE_MSG - flagmap = message_flags - tlvcls = CoreFileTlv + """ + CORE file message class. + """ + message_type = MessageTypes.FILE.value + tlv_class = CoreFileTlv + class CoreIfaceMessage(CoreMessage): - msgtype = CORE_API_IFACE_MSG - flagmap = message_flags - tlvcls = CoreIfaceTlv + """ + CORE interface message class. + """ + message_type = MessageTypes.INTERFACE.value + tlv_class = CoreInterfaceTlv + class CoreEventMessage(CoreMessage): - msgtype = CORE_API_EVENT_MSG - flagmap = message_flags - tlvcls = CoreEventTlv + """ + CORE event message class. + """ + message_type = MessageTypes.EVENT.value + tlv_class = CoreEventTlv + class CoreSessionMessage(CoreMessage): - msgtype = CORE_API_SESS_MSG - flagmap = message_flags - tlvcls = CoreSessionTlv + """ + CORE session message class. + """ + message_type = MessageTypes.SESSION.value + tlv_class = CoreSessionTlv + class CoreExceptionMessage(CoreMessage): - msgtype = CORE_API_EXCP_MSG - flagmap = message_flags - tlvcls = CoreExceptionTlv + """ + CORE exception message class. + """ + message_type = MessageTypes.EXCEPTION.value + tlv_class = CoreExceptionTlv -msgclsmap = { - CORE_API_NODE_MSG: CoreNodeMessage, - CORE_API_LINK_MSG: CoreLinkMessage, - CORE_API_EXEC_MSG: CoreExecMessage, - CORE_API_REG_MSG: CoreRegMessage, - CORE_API_CONF_MSG: CoreConfMessage, - CORE_API_FILE_MSG: CoreFileMessage, - CORE_API_IFACE_MSG: CoreIfaceMessage, - CORE_API_EVENT_MSG: CoreEventMessage, - CORE_API_SESS_MSG: CoreSessionMessage, - CORE_API_EXCP_MSG: CoreExceptionMessage, + +# map used to translate enumerated message type values to message class objects +CLASS_MAP = { + MessageTypes.NODE.value: CoreNodeMessage, + MessageTypes.LINK.value: CoreLinkMessage, + MessageTypes.EXECUTE.value: CoreExecMessage, + MessageTypes.REGISTER.value: CoreRegMessage, + MessageTypes.CONFIG.value: CoreConfMessage, + MessageTypes.FILE.value: CoreFileMessage, + MessageTypes.INTERFACE.value: CoreIfaceMessage, + MessageTypes.EVENT.value: CoreEventMessage, + MessageTypes.SESSION.value: CoreSessionMessage, + MessageTypes.EXCEPTION.value: CoreExceptionMessage, } -def msg_class(msgtypeid): - global msgclsmap - return msgclsmap[msgtypeid] -nodeclsmap = {} +def str_to_list(value): + """ + Helper to convert pipe-delimited string ("a|b|c") into a list (a, b, c). -def add_node_class(name, nodetypeid, nodecls, change = False): - global nodeclsmap - if nodetypeid in nodeclsmap: - if not change: - raise ValueError, \ - "node class already exists for nodetypeid %s" % nodetypeid - nodeclsmap[nodetypeid] = nodecls - if nodetypeid not in node_types: - node_types[nodetypeid] = name - exec "%s = %s" % (name, nodetypeid) in globals() - elif name != node_types[nodetypeid]: - raise ValueError, "node type already exists for '%s'" % name - else: - pass + :param str value: string to convert + :return: converted list + :rtype: list + """ -def change_node_class(name, nodetypeid, nodecls): - return add_node_class(name, nodetypeid, nodecls, change = True) - -def node_class(nodetypeid): - global nodeclsmap - return nodeclsmap[nodetypeid] - -def str_to_list(s): - ''' Helper to convert pipe-delimited string ("a|b|c") into a list (a, b, c) - ''' - if s is None: + if value is None: return None - return s.split("|") -def state_name(n): - ''' Helper to convert state number into state name using event types. - ''' - if n in event_types: - eventname = event_types[n] - name = eventname.split('_')[2] - else: - name = "unknown" - return name + return value.split("|") + + +def state_name(value): + """ + Helper to convert state number into state name using event types. + + :param int value: state value to derive name from + :return: state name + :rtype: str + """ + + try: + value = EventTypes(value).name + except ValueError: + value = "unknown" + + return value diff --git a/daemon/core/api/data.py b/daemon/core/api/data.py deleted file mode 100644 index 45f838da..00000000 --- a/daemon/core/api/data.py +++ /dev/null @@ -1,333 +0,0 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Tom Goff -# -''' -data.py: constant definitions for the CORE API, enumerating the -different message and TLV types (these constants are also found in coreapi.h) -''' - -def enumdict(d): - for k, v in d.iteritems(): - exec "%s = %s" % (v, k) in globals() - -# Constants - -CORE_API_VER = "1.23" -CORE_API_PORT = 4038 - -# Message types - -message_types = { - 0x01: "CORE_API_NODE_MSG", - 0x02: "CORE_API_LINK_MSG", - 0x03: "CORE_API_EXEC_MSG", - 0x04: "CORE_API_REG_MSG", - 0x05: "CORE_API_CONF_MSG", - 0x06: "CORE_API_FILE_MSG", - 0x07: "CORE_API_IFACE_MSG", - 0x08: "CORE_API_EVENT_MSG", - 0x09: "CORE_API_SESS_MSG", - 0x0A: "CORE_API_EXCP_MSG", - 0x0B: "CORE_API_MSG_MAX", -} - -enumdict(message_types) - -# Generic Message Flags - -message_flags = { - 0x01: "CORE_API_ADD_FLAG", - 0x02: "CORE_API_DEL_FLAG", - 0x04: "CORE_API_CRI_FLAG", - 0x08: "CORE_API_LOC_FLAG", - 0x10: "CORE_API_STR_FLAG", - 0x20: "CORE_API_TXT_FLAG", - 0x40: "CORE_API_TTY_FLAG", -} - -enumdict(message_flags) - -# Node Message TLV Types - -node_tlvs = { - 0x01: "CORE_TLV_NODE_NUMBER", - 0x02: "CORE_TLV_NODE_TYPE", - 0x03: "CORE_TLV_NODE_NAME", - 0x04: "CORE_TLV_NODE_IPADDR", - 0x05: "CORE_TLV_NODE_MACADDR", - 0x06: "CORE_TLV_NODE_IP6ADDR", - 0x07: "CORE_TLV_NODE_MODEL", - 0x08: "CORE_TLV_NODE_EMUSRV", - 0x0A: "CORE_TLV_NODE_SESSION", - 0x20: "CORE_TLV_NODE_XPOS", - 0x21: "CORE_TLV_NODE_YPOS", - 0x22: "CORE_TLV_NODE_CANVAS", - 0x23: "CORE_TLV_NODE_EMUID", - 0x24: "CORE_TLV_NODE_NETID", - 0x25: "CORE_TLV_NODE_SERVICES", - 0x30: "CORE_TLV_NODE_LAT", - 0x31: "CORE_TLV_NODE_LONG", - 0x32: "CORE_TLV_NODE_ALT", - 0x42: "CORE_TLV_NODE_ICON", - 0x50: "CORE_TLV_NODE_OPAQUE", -} - -enumdict(node_tlvs) - -node_types = dict(enumerate([ - "CORE_NODE_DEF", - "CORE_NODE_PHYS", - "CORE_NODE_XEN", - "CORE_NODE_TBD", - "CORE_NODE_SWITCH", - "CORE_NODE_HUB", - "CORE_NODE_WLAN", - "CORE_NODE_RJ45", - "CORE_NODE_TUNNEL", - "CORE_NODE_KTUNNEL", - "CORE_NODE_EMANE", -])) - -enumdict(node_types) - -rj45_models = dict(enumerate([ - "RJ45_MODEL_LINKED", - "RJ45_MODEL_WIRELESS", - "RJ45_MODEL_INSTALLED", -])) - -enumdict(rj45_models) - -# Link Message TLV Types - -link_tlvs = { - 0x01: "CORE_TLV_LINK_N1NUMBER", - 0x02: "CORE_TLV_LINK_N2NUMBER", - 0x03: "CORE_TLV_LINK_DELAY", - 0x04: "CORE_TLV_LINK_BW", - 0x05: "CORE_TLV_LINK_PER", - 0x06: "CORE_TLV_LINK_DUP", - 0x07: "CORE_TLV_LINK_JITTER", - 0x08: "CORE_TLV_LINK_MER", - 0x09: "CORE_TLV_LINK_BURST", - CORE_TLV_NODE_SESSION: "CORE_TLV_LINK_SESSION", - 0x10: "CORE_TLV_LINK_MBURST", - 0x20: "CORE_TLV_LINK_TYPE", - 0x21: "CORE_TLV_LINK_GUIATTR", - 0x22: "CORE_TLV_LINK_UNI", - 0x23: "CORE_TLV_LINK_EMUID", - 0x24: "CORE_TLV_LINK_NETID", - 0x25: "CORE_TLV_LINK_KEY", - 0x30: "CORE_TLV_LINK_IF1NUM", - 0x31: "CORE_TLV_LINK_IF1IP4", - 0x32: "CORE_TLV_LINK_IF1IP4MASK", - 0x33: "CORE_TLV_LINK_IF1MAC", - 0x34: "CORE_TLV_LINK_IF1IP6", - 0x35: "CORE_TLV_LINK_IF1IP6MASK", - 0x36: "CORE_TLV_LINK_IF2NUM", - 0x37: "CORE_TLV_LINK_IF2IP4", - 0x38: "CORE_TLV_LINK_IF2IP4MASK", - 0x39: "CORE_TLV_LINK_IF2MAC", - 0x40: "CORE_TLV_LINK_IF2IP6", - 0x41: "CORE_TLV_LINK_IF2IP6MASK", - 0x42: "CORE_TLV_LINK_IF1NAME", - 0x43: "CORE_TLV_LINK_IF2NAME", - 0x50: "CORE_TLV_LINK_OPAQUE", -} - -enumdict(link_tlvs) - -link_types = dict(enumerate([ - "CORE_LINK_WIRELESS", - "CORE_LINK_WIRED", -])) - -enumdict(link_types) - -# Execute Message TLV Types - -exec_tlvs = { - 0x01: "CORE_TLV_EXEC_NODE", - 0x02: "CORE_TLV_EXEC_NUM", - 0x03: "CORE_TLV_EXEC_TIME", - 0x04: "CORE_TLV_EXEC_CMD", - 0x05: "CORE_TLV_EXEC_RESULT", - 0x06: "CORE_TLV_EXEC_STATUS", - CORE_TLV_NODE_SESSION: "CORE_TLV_EXEC_SESSION", -} - -enumdict(exec_tlvs) - -# Register Message TLV Types - -reg_tlvs = { - 0x01: "CORE_TLV_REG_WIRELESS", - 0x02: "CORE_TLV_REG_MOBILITY", - 0x03: "CORE_TLV_REG_UTILITY", - 0x04: "CORE_TLV_REG_EXECSRV", - 0x05: "CORE_TLV_REG_GUI", - 0x06: "CORE_TLV_REG_EMULSRV", - CORE_TLV_NODE_SESSION: "CORE_TLV_REG_SESSION", -} - -enumdict(reg_tlvs) - -# Configuration Message TLV Types - -conf_tlvs = { - 0x01: "CORE_TLV_CONF_NODE", - 0x02: "CORE_TLV_CONF_OBJ", - 0x03: "CORE_TLV_CONF_TYPE", - 0x04: "CORE_TLV_CONF_DATA_TYPES", - 0x05: "CORE_TLV_CONF_VALUES", - 0x06: "CORE_TLV_CONF_CAPTIONS", - 0x07: "CORE_TLV_CONF_BITMAP", - 0x08: "CORE_TLV_CONF_POSSIBLE_VALUES", - 0x09: "CORE_TLV_CONF_GROUPS", - CORE_TLV_NODE_SESSION: "CORE_TLV_CONF_SESSION", - 0x0B: "CORE_TLV_CONF_IFNUM", - CORE_TLV_NODE_NETID: "CORE_TLV_CONF_NETID", - 0x50: "CORE_TLV_CONF_OPAQUE", -} - -enumdict(conf_tlvs) - -conf_flags = { - 0x00: "CONF_TYPE_FLAGS_NONE", - 0x01: "CONF_TYPE_FLAGS_REQUEST", - 0x02: "CONF_TYPE_FLAGS_UPDATE", - 0x03: "CONF_TYPE_FLAGS_RESET", -} - -enumdict(conf_flags) - -conf_data_types = { - 0x01: "CONF_DATA_TYPE_UINT8", - 0x02: "CONF_DATA_TYPE_UINT16", - 0x03: "CONF_DATA_TYPE_UINT32", - 0x04: "CONF_DATA_TYPE_UINT64", - 0x05: "CONF_DATA_TYPE_INT8", - 0x06: "CONF_DATA_TYPE_INT16", - 0x07: "CONF_DATA_TYPE_INT32", - 0x08: "CONF_DATA_TYPE_INT64", - 0x09: "CONF_DATA_TYPE_FLOAT", - 0x0A: "CONF_DATA_TYPE_STRING", - 0x0B: "CONF_DATA_TYPE_BOOL", -} - -enumdict(conf_data_types) - -# File Message TLV Types - -file_tlvs = { - 0x01: "CORE_TLV_FILE_NODE", - 0x02: "CORE_TLV_FILE_NAME", - 0x03: "CORE_TLV_FILE_MODE", - 0x04: "CORE_TLV_FILE_NUM", - 0x05: "CORE_TLV_FILE_TYPE", - 0x06: "CORE_TLV_FILE_SRCNAME", - CORE_TLV_NODE_SESSION: "CORE_TLV_FILE_SESSION", - 0x10: "CORE_TLV_FILE_DATA", - 0x11: "CORE_TLV_FILE_CMPDATA", -} - -enumdict(file_tlvs) - -# Interface Message TLV Types - -iface_tlvs = { - 0x01: "CORE_TLV_IFACE_NODE", - 0x02: "CORE_TLV_IFACE_NUM", - 0x03: "CORE_TLV_IFACE_NAME", - 0x04: "CORE_TLV_IFACE_IPADDR", - 0x05: "CORE_TLV_IFACE_MASK", - 0x06: "CORE_TLV_IFACE_MACADDR", - 0x07: "CORE_TLV_IFACE_IP6ADDR", - 0x08: "CORE_TLV_IFACE_IP6MASK", - 0x09: "CORE_TLV_IFACE_TYPE", - CORE_TLV_NODE_SESSION: "CORE_TLV_IFACE_SESSION", - 0x0B: "CORE_TLV_IFACE_STATE", - CORE_TLV_NODE_EMUID: "CORE_TLV_IFACE_EMUID", - CORE_TLV_NODE_NETID: "CORE_TLV_IFACE_NETID", -} - -enumdict(iface_tlvs) - -# Event Message TLV Types - -event_tlvs = { - 0x01: "CORE_TLV_EVENT_NODE", - 0x02: "CORE_TLV_EVENT_TYPE", - 0x03: "CORE_TLV_EVENT_NAME", - 0x04: "CORE_TLV_EVENT_DATA", - 0x05: "CORE_TLV_EVENT_TIME", - CORE_TLV_NODE_SESSION: "CORE_TLV_EVENT_SESSION", -} - -enumdict(event_tlvs) - -event_types = dict(enumerate([ - "CORE_EVENT_NONE", - "CORE_EVENT_DEFINITION_STATE", - "CORE_EVENT_CONFIGURATION_STATE", - "CORE_EVENT_INSTANTIATION_STATE", - "CORE_EVENT_RUNTIME_STATE", - "CORE_EVENT_DATACOLLECT_STATE", - "CORE_EVENT_SHUTDOWN_STATE", - "CORE_EVENT_START", - "CORE_EVENT_STOP", - "CORE_EVENT_PAUSE", - "CORE_EVENT_RESTART", - "CORE_EVENT_FILE_OPEN", - "CORE_EVENT_FILE_SAVE", - "CORE_EVENT_SCHEDULED", - "CORE_EVENT_RECONFIGURE", - "CORE_EVENT_INSTANTIATION_COMPLETE", -])) - -enumdict(event_types) - -# Session Message TLV Types - -session_tlvs = { - 0x01: "CORE_TLV_SESS_NUMBER", - 0x02: "CORE_TLV_SESS_NAME", - 0x03: "CORE_TLV_SESS_FILE", - 0x04: "CORE_TLV_SESS_NODECOUNT", - 0x05: "CORE_TLV_SESS_DATE", - 0x06: "CORE_TLV_SESS_THUMB", - 0x07: "CORE_TLV_SESS_USER", - 0x0A: "CORE_TLV_SESS_OPAQUE", -} - -enumdict(session_tlvs) - -# Exception Message TLV Types - -exception_tlvs = { - 0x01: "CORE_TLV_EXCP_NODE", - 0x02: "CORE_TLV_EXCP_SESSION", - 0x03: "CORE_TLV_EXCP_LEVEL", - 0x04: "CORE_TLV_EXCP_SOURCE", - 0x05: "CORE_TLV_EXCP_DATE", - 0x06: "CORE_TLV_EXCP_TEXT", - 0x0A: "CORE_TLV_EXCP_OPAQUE", -} - -enumdict(exception_tlvs) - -exception_levels = dict(enumerate([ - "CORE_EXCP_LEVEL_NONE", - "CORE_EXCP_LEVEL_FATAL", - "CORE_EXCP_LEVEL_ERROR", - "CORE_EXCP_LEVEL_WARNING", - "CORE_EXCP_LEVEL_NOTICE", -])) - -enumdict(exception_levels) - -del enumdict diff --git a/daemon/core/broker.py b/daemon/core/broker.py index 96b6e086..5da3ca2a 100644 --- a/daemon/core/broker.py +++ b/daemon/core/broker.py @@ -1,32 +1,54 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' -broker.py: definition of CoreBroker class that is part of the -pycore session object. Handles distributing parts of the emulation out to -other emulation servers. The broker is consulted during the -CoreRequestHandler.handlemsg() loop to determine if messages should be handled -locally or forwarded on to another emulation server. -''' +""" +Broker class that is part of the session object. Handles distributing parts of the emulation out to +other emulation servers. The broker is consulted when handling messages to determine if messages +should be handled locally or forwarded on to another emulation server. +""" + +import os +import select +import socket +import threading -import os, socket, select, threading, sys from core.api import coreapi -from core.coreobj import PyCoreNode, PyCoreNet -from core.emane.nodes import EmaneNet -from core.netns.nodes import CtrlNet -from core.phys.pnodes import PhysicalNode -from core.misc.ipaddr import IPAddr from core.conf import ConfigurableManager -if os.uname()[0] == "Linux": - from core.netns.vif import GreTap - from core.netns.vnet import GreTapBridge +from core.coreobj import PyCoreNet +from core.coreobj import PyCoreNode +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import FileTlvs +from core.enumerations import LinkTlvs +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import log +from core.misc import nodeutils +from core.misc.ipaddress import IpAddress +from core.netns.vif import GreTap +from core.netns.vnet import GreTapBridge +from core.phys.pnodes import PhysicalNode +logger = log.get_logger(__name__) + + +# TODO: name conflict with main core server, probably should rename class CoreServer(object): + """ + Reptesents CORE daemon servers for communication. + """ def __init__(self, name, host, port): + """ + Creates a CoreServer instance. + + :param str name: name of the CORE server + :param str host: server address + :param int port: server port + """ self.name = name self.host = host self.port = port @@ -34,33 +56,57 @@ class CoreServer(object): self.instantiation_complete = False def connect(self): + """ + Connect to CORE server and save connection. + + :return: nothing + """ assert self.sock is None sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - #sock.setblocking(0) + try: sock.connect((self.host, self.port)) - except: + except IOError as e: sock.close() - raise + raise e + self.sock = sock def close(self): + """ + Close connection with CORE server. + + :return: nothing + """ if self.sock is not None: self.sock.close() self.sock = None + class CoreBroker(ConfigurableManager): - ''' Member of pycore session class for handling global emulation server - data. - ''' - _name = "broker" - _type = coreapi.CORE_TLV_REG_UTILITY - - def __init__(self, session, verbose = False): - ConfigurableManager.__init__(self, session) + """ + Helps with brokering messages between CORE daemon servers. + """ + + # configurable manager name + name = "broker" + + # configurable manager type + config_type = RegisterTlvs.UTILITY.value + + def __init__(self, session): + """ + Creates a CoreBroker instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + + ConfigurableManager.__init__(self) + self.session = session + self.session_handler = None self.session_id_master = None self.myip = None - self.verbose = verbose # dict containing tuples of (host, port, sock) self.servers = {} self.servers_lock = threading.Lock() @@ -72,35 +118,37 @@ class CoreBroker(ConfigurableManager): # reference counts of nodes on servers self.nodecounts = {} # set of node numbers that are link-layer nodes (networks) - self.nets = set() + self.network_nodes = set() # set of node numbers that are PhysicalNode nodes - self.phys = set() + self.physical_nodes = set() # allows for other message handlers to process API messages (e.g. EMANE) self.handlers = set() # dict with tunnel key to tunnel device mapping self.tunnels = {} self.dorecvloop = False self.recvthread = None + self.bootcount = 0 def startup(self): - ''' Build tunnels between network-layer nodes now that all node - and link information has been received; called when session - enters the instantation state. - ''' + """ + Build tunnels between network-layer nodes now that all node + and link information has been received; called when session + enters the instantation state. + """ self.addnettunnels() self.writeservers() def shutdown(self): - ''' Close all active sockets; called when the session enters the - data collect state - ''' + """ + Close all active sockets; called when the session enters the + data collect state + """ with self.servers_lock: while len(self.servers) > 0: name, server = self.servers.popitem() if server.sock is not None: - if self.verbose: - self.session.info("closing connection with %s @ %s:%s" % \ - (name, server.host, server.port)) + logger.info("closing connection with %s @ %s:%s" % + (name, server.host, server.port)) server.close() self.reset() self.dorecvloop = False @@ -108,24 +156,27 @@ class CoreBroker(ConfigurableManager): self.recvthread.join() def reset(self): - ''' Reset to initial state. - ''' + """ + Reset to initial state. + """ self.nodemap_lock.acquire() self.nodemap.clear() for server, count in self.nodecounts.iteritems(): if count < 1: self.delserver(server) self.nodecounts.clear() + self.bootcount = 0 self.nodemap_lock.release() - self.nets.clear() - self.phys.clear() + self.network_nodes.clear() + self.physical_nodes.clear() while len(self.tunnels) > 0: - (key, gt) = self.tunnels.popitem() + key, gt = self.tunnels.popitem() gt.shutdown() def startrecvloop(self): - ''' Spawn the recvloop() thread if it hasn't been already started. - ''' + """ + Spawn the receive loop for receiving messages. + """ if self.recvthread is not None: if self.recvthread.isAlive(): return @@ -133,15 +184,16 @@ class CoreBroker(ConfigurableManager): self.recvthread.join() # start reading data from connected sockets self.dorecvloop = True - self.recvthread = threading.Thread(target = self.recvloop) + self.recvthread = threading.Thread(target=self.recvloop) self.recvthread.daemon = True self.recvthread.start() def recvloop(self): - ''' Thread target that receives messages from server sockets. - ''' + """ + Receive loop for receiving messages from server sockets. + """ self.dorecvloop = True - # note: this loop continues after emulation is stopped, + # note: this loop continues after emulation is stopped, # even with 0 servers while self.dorecvloop: rlist = [] @@ -158,137 +210,138 @@ class CoreBroker(ConfigurableManager): continue rcvlen = self.recv(server) if rcvlen == 0: - if self.verbose: - msg = 'connection with %s @ %s:%s has closed' % \ - (server.name, server.host, server.port) - self.session.info(msg) + logger.info("connection with %s @ %s:%s has closed" % ( + server.name, server.host, server.port)) def recv(self, server): - ''' Receive data on an emulation server socket and broadcast it to - all connected session handlers. Returns the length of data recevied - and forwarded. Return value of zero indicates the socket has closed - and should be removed from the self.servers dict. - ''' - msghdr = server.sock.recv(coreapi.CoreMessage.hdrsiz) + """ + Receive data on an emulation server socket and broadcast it to + all connected session handlers. Returns the length of data recevied + and forwarded. Return value of zero indicates the socket has closed + and should be removed from the self.servers dict. + + :param CoreServer server: server to receive from + :return: message length + :rtype: int + """ + msghdr = server.sock.recv(coreapi.CoreMessage.header_len) if len(msghdr) == 0: # server disconnected server.close() return 0 - if len(msghdr) != coreapi.CoreMessage.hdrsiz: - if self.verbose: - self.session.info("warning: broker received not enough data " \ - "len=%s" % len(msghdr)) + + if len(msghdr) != coreapi.CoreMessage.header_len: + logger.info("warning: broker received not enough data len=%s" % len(msghdr)) return len(msghdr) - msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr) + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) msgdata = server.sock.recv(msglen) data = msghdr + msgdata count = None # snoop exec response for remote interactive TTYs - if msgtype == coreapi.CORE_API_EXEC_MSG and \ - msgflags & coreapi.CORE_API_TTY_FLAG: + if msgtype == MessageTypes.EXECUTE.value and msgflags & MessageFlags.TTY.value: data = self.fixupremotetty(msghdr, msgdata, server.host) - elif msgtype == coreapi.CORE_API_NODE_MSG: + elif msgtype == MessageTypes.NODE.value: # snoop node delete response to decrement node counts - if msgflags & coreapi.CORE_API_DEL_FLAG: + if msgflags & MessageFlags.DELETE.value: msg = coreapi.CoreNodeMessage(msgflags, msghdr, msgdata) - nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER) + nodenum = msg.get_tlv(NodeTlvs.NUMBER.value) if nodenum is not None: count = self.delnodemap(server, nodenum) - elif msgtype == coreapi.CORE_API_LINK_MSG: + elif msgtype == MessageTypes.LINK.value: # this allows green link lines for remote WLANs msg = coreapi.CoreLinkMessage(msgflags, msghdr, msgdata) - self.session.sdt.handledistributed(msg) - elif msgtype == coreapi.CORE_API_EVENT_MSG: + self.session.sdt.handle_distributed(msg) + elif msgtype == MessageTypes.EVENT.value: msg = coreapi.CoreEventMessage(msgflags, msghdr, msgdata) - eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE) - if eventtype == coreapi.CORE_EVENT_INSTANTIATION_COMPLETE: + eventtype = msg.get_tlv(EventTlvs.TYPE.value) + if eventtype == EventTypes.INSTANTIATION_COMPLETE.value: server.instantiation_complete = True if self.instantiation_complete(): - self.session.checkruntime() + self.session.check_runtime() + else: + logger.error("unknown message type received: %s", msgtype) + + try: + self.session_handler.sendall(data) + except IOError: + logger.exception("error sending message") - self.session.broadcastraw(None, data) if count is not None and count < 1: return 0 else: return len(data) - def local_instantiation_complete(self): - '''\ - Set the local server's instantiation-complete status to True. - ''' - with self.servers_lock: - server = self.servers.get('localhost') - if server is not None: - server.instantiation_complete = True - - def instantiation_complete(self): - '''\ - Return True if all servers have completed instantiation, False - otherwise. - ''' - with self.servers_lock: - for server in self.servers.itervalues(): - if not server.instantiation_complete: - return False - return True - def addserver(self, name, host, port): - ''' Add a new server, and try to connect to it. If we're already - connected to this (host, port), then leave it alone. When host,port - is None, do not try to connect. - ''' + """ + Add a new server, and try to connect to it. If we"re already connected to this + (host, port), then leave it alone. When host,port is None, do not try to connect. + + :param str name: name of server + :param str host: server address + :param int port: server port + :return: nothing + """ with self.servers_lock: server = self.servers.get(name) if server is not None: if host == server.host and port == server.port and \ - server.sock is not None: + server.sock is not None: # leave this socket connected return - if self.verbose: - self.session.info('closing connection with %s @ %s:%s' % \ - (name, server.host, server.port)) + + logger.info("closing connection with %s @ %s:%s" % (name, server.host, server.port)) server.close() del self.servers[name] - if self.verbose: - self.session.info('adding server %s @ %s:%s' % \ - (name, host, port)) + + logger.info("adding server %s @ %s:%s" % (name, host, port)) server = CoreServer(name, host, port) if host is not None and port is not None: try: server.connect() - except Exception as e: - self.session.warn('error connecting to server %s:%s:\n\t%s' % \ - (host, port, e)) + except IOError: + logger.exception("error connecting to server %s:%s" % (host, port)) if server.sock is not None: self.startrecvloop() self.servers[name] = server def delserver(self, server): - ''' Remove a server and hang up any connection. - ''' + """ + Remove a server and hang up any connection. + + :param CoreServer server: server to delete + :return: nothing + """ with self.servers_lock: try: s = self.servers.pop(server.name) assert s == server except KeyError: - pass + logger.exception("error deleting server") + if server.sock is not None: - if self.verbose: - self.session.info("closing connection with %s @ %s:%s" % \ - (server.name, server.host, server.port)) + logger.info("closing connection with %s @ %s:%s" % (server.name, server.host, server.port)) server.close() def getserverbyname(self, name): - ''' Return the server object having the given name, or None. - ''' + """ + Return the server object having the given name, or None. + + :param str name: name of server to retrieve + :return: server for given name + :rtype: CoreServer + """ with self.servers_lock: return self.servers.get(name) def getserverbysock(self, sock): - ''' Return the server object corresponding to the given socket, - or None. - ''' + """ + Return the server object corresponding to the given socket, or None. + + :param sock: socket associated with a server + :return: core server associated wit the socket + :rtype: CoreServer + """ with self.servers_lock: for server in self.servers.itervalues(): if server.sock == sock: @@ -296,90 +349,120 @@ class CoreBroker(ConfigurableManager): return None def getservers(self): - '''Return a list of servers sorted by name.''' + """ + Return a list of servers sorted by name. + + :return: sorted server list + :rtype: list + """ with self.servers_lock: - return sorted(self.servers.values(), key = lambda x: x.name) + return sorted(self.servers.values(), key=lambda x: x.name) def getservernames(self): - ''' Return a sorted list of server names (keys from self.servers). - ''' + """ + Return a sorted list of server names (keys from self.servers). + + :return: sorted server names + :rtype: list + """ with self.servers_lock: return sorted(self.servers.keys()) def tunnelkey(self, n1num, n2num): - ''' Compute a 32-bit key used to uniquely identify a GRE tunnel. + """ + Compute a 32-bit key used to uniquely identify a GRE tunnel. The hash(n1num), hash(n2num) values are used, so node numbers may be None or string values (used for e.g. "ctrlnet"). - ''' + + :param int n1num: node one id + :param int n2num: node two id + :return: tunnel key for the node pair + :rtype: int + """ sid = self.session_id_master if sid is None: # this is the master session - sid = self.session.sessionid - - key = (sid << 16) ^ hash(n1num) ^ (hash(n2num) << 8) + sid = self.session.session_id + + key = (sid << 16) ^ hash(n1num) ^ (hash(n2num) << 8) return key & 0xFFFFFFFF - + def addtunnel(self, remoteip, n1num, n2num, localnum): - ''' Add a new GreTapBridge between nodes on two different machines. - ''' + """ + Adds a new GreTapBridge between nodes on two different machines. + + :param str remoteip: remote address for tunnel + :param int n1num: node one id + :param int n2num: node two id + :param int localnum: local id + :return: nothing + """ key = self.tunnelkey(n1num, n2num) if localnum == n2num: remotenum = n1num else: remotenum = n2num if key in self.tunnels.keys(): - self.session.warn("tunnel with key %s (%s-%s) already exists!" % \ - (key, n1num, n2num)) + logger.warn("tunnel with key %s (%s-%s) already exists!" % (key, n1num, n2num)) else: - objid = key & ((1<<16)-1) - self.session.info("Adding tunnel for %s-%s to %s with key %s" % \ - (n1num, n2num, remoteip, key)) - if localnum in self.phys: + objid = key & ((1 << 16) - 1) + logger.info("Adding tunnel for %s-%s to %s with key %s", n1num, n2num, remoteip, key) + if localnum in self.physical_nodes: # no bridge is needed on physical nodes; use the GreTap directly gt = GreTap(node=None, name=None, session=self.session, - remoteip=remoteip, key=key) + remoteip=remoteip, key=key) else: - gt = self.session.addobj(cls = GreTapBridge, objid = objid, - policy="ACCEPT", remoteip=remoteip, key = key) + gt = self.session.add_object(cls=GreTapBridge, objid=objid, + policy="ACCEPT", remoteip=remoteip, key=key) gt.localnum = localnum gt.remotenum = remotenum self.tunnels[key] = gt - + def addnettunnels(self): - ''' Add GreTaps between network devices on different machines. + """ + Add GreTaps between network devices on different machines. The GreTapBridge is not used since that would add an extra bridge. - ''' - for n in self.nets: + """ + for n in self.network_nodes: self.addnettunnel(n) - def addnettunnel(self, n): - try: - net = self.session.obj(n) - except KeyError: - raise KeyError, "network node %s not found" % n - # add other nets here that do not require tunnels - if isinstance(net, EmaneNet): - return None - if isinstance(net, CtrlNet): - if hasattr(net, 'serverintf'): - if net.serverintf is not None: - return None + def addnettunnel(self, node_id): + """ + Add network tunnel between node and broker. - servers = self.getserversbynode(n) - if len(servers) < 2: + :param int node_id: node id of network to add tunnel to + :return: list of gre taps + :rtype: list + """ + try: + net = self.session.get_object(node_id) + except KeyError: + raise KeyError("network node %s not found" % node_id) + + # add other nets here that do not require tunnels + if nodeutils.is_node(net, NodeTypes.EMANE_NET): + logger.warn("emane network does not require a tunnel") return None + + server_interface = getattr(net, "serverintf", None) + if nodeutils.is_node(net, NodeTypes.CONTROL_NET) and server_interface is not None: + logger.warn("control networks with server interfaces do not need a tunnel") + return None + + servers = self.getserversbynode(node_id) + if len(servers) < 2: + logger.warn("not enough servers to create a tunnel: %s", servers) + return None + hosts = [] for server in servers: if server.host is None: continue hosts.append(server.host) - if len(hosts) == 0: + + if len(hosts) == 0 and self.session_handler.client_address != "": # get IP address from API message sender (master) - self.session._handlerslock.acquire() - for h in self.session._handlers: - if h.client_address != "": - hosts.append(h.client_address[0]) - self.session._handlerslock.release() + hosts.append(self.session_handler.client_address[0]) r = [] for host in hosts: @@ -389,35 +472,44 @@ class CoreBroker(ConfigurableManager): else: # we are the session master myip = host - key = self.tunnelkey(n, IPAddr.toint(myip)) + key = self.tunnelkey(node_id, IpAddress.to_int(myip)) if key in self.tunnels.keys(): continue - self.session.info("Adding tunnel for net %s to %s with key %s" % \ - (n, host, key)) - gt = GreTap(node=None, name=None, session=self.session, - remoteip=host, key=key) + logger.info("Adding tunnel for net %s to %s with key %s" % (node_id, host, key)) + gt = GreTap(node=None, name=None, session=self.session, remoteip=host, key=key) self.tunnels[key] = gt r.append(gt) # attaching to net will later allow gt to be destroyed # during net.shutdown() net.attach(gt) + return r def deltunnel(self, n1num, n2num): - ''' Cleanup of the GreTapBridge. - ''' + """ + Delete tunnel between nodes. + + :param int n1num: node one id + :param int n2num: node two id + :return: nothing + """ key = self.tunnelkey(n1num, n2num) try: gt = self.tunnels.pop(key) except KeyError: gt = None if gt: - self.session.delobj(gt.objid) + self.session.delete_object(gt.objid) del gt - + def gettunnel(self, n1num, n2num): - ''' Return the GreTap between two nodes if it exists. - ''' + """ + Return the GreTap between two nodes if it exists. + + :param int n1num: node one id + :param int n2num: node two id + :return: gre tap between nodes or none + """ key = self.tunnelkey(n1num, n2num) if key in self.tunnels.keys(): return self.tunnels[key] @@ -425,8 +517,13 @@ class CoreBroker(ConfigurableManager): return None def addnodemap(self, server, nodenum): - ''' Record a node number to emulation server mapping. - ''' + """ + Record a node number to emulation server mapping. + + :param CoreServer server: core server to associate node with + :param int nodenum: node id + :return: nothing + """ with self.nodemap_lock: if nodenum in self.nodemap: if server in self.nodemap[nodenum]: @@ -434,124 +531,163 @@ class CoreBroker(ConfigurableManager): self.nodemap[nodenum].add(server) else: self.nodemap[nodenum] = {server} + if server in self.nodecounts: self.nodecounts[server] += 1 else: self.nodecounts[server] = 1 def delnodemap(self, server, nodenum): - ''' Remove a node number to emulation server mapping. - Return the number of nodes left on this server. - ''' + """ + Remove a node number to emulation server mapping. + Return the number of nodes left on this server. + + :param CoreServer server: server to remove from node map + :param int nodenum: node id + :return: number of nodes left on server + :rtype: int + """ count = None with self.nodemap_lock: if nodenum not in self.nodemap: return count + self.nodemap[nodenum].remove(server) if server in self.nodecounts: count = self.nodecounts[server] count -= 1 self.nodecounts[server] = count + return count def getserversbynode(self, nodenum): - ''' Retrieve a set of emulation servers given a node number. - ''' + """ + Retrieve a set of emulation servers given a node number. + + :param int nodenum: node id + :return: core server associated with node + :rtype: set + """ with self.nodemap_lock: if nodenum not in self.nodemap: return set() return self.nodemap[nodenum] def addnet(self, nodenum): - ''' Add a node number to the list of link-layer nodes. - ''' - self.nets.add(nodenum) + """ + Add a node number to the list of link-layer nodes. + + :param int nodenum: node id to add + :return: nothing + """ + self.network_nodes.add(nodenum) def addphys(self, nodenum): - ''' Add a node number to the list of physical nodes. - ''' - self.phys.add(nodenum) + """ + Add a node number to the list of physical nodes. - def configure_reset(self, msg): - ''' Ignore reset messages, because node delete responses may still - arrive and require the use of nodecounts. - ''' + :param int nodenum: node id to add + :return: nothing + """ + self.physical_nodes.add(nodenum) + + def configure_reset(self, config_data): + """ + Ignore reset messages, because node delete responses may still + arrive and require the use of nodecounts. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: nothing + """ return None - def configure_values(self, msg, values): - ''' Receive configuration message with a list of server:host:port - combinations that we'll need to connect with. - ''' - objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) - conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE) - + def configure_values(self, config_data): + """ + Receive configuration message with a list of server:host:port + combinations that we"ll need to connect with. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: nothing + """ + values = config_data.data_values + session_id = config_data.session + if values is None: - self.session.info("emulation server data missing") + logger.info("emulation server data missing") return None - values = values.split('|') + values = values.split("|") + # string of "server:ip:port,server:ip:port,..." - serverstrings = values[0] - server_list = serverstrings.split(',') + server_strings = values[0] + server_list = server_strings.split(",") + for server in server_list: - server_items = server.split(':') + server_items = server.split(":") (name, host, port) = server_items[:3] - if host == '': + + if host == "": host = None - if port == '': + + if port == "": port = None else: port = int(port) - sid = msg.gettlv(coreapi.CORE_TLV_CONF_SESSION) - if sid is not None: + + if session_id is not None: # receive session ID and my IP from master - self.session_id_master = int(sid.split('|')[0]) + self.session_id_master = int(session_id.split("|")[0]) self.myip = host host = None port = None + # this connects to the server immediately; maybe we should wait # or spin off a new "client" thread here self.addserver(name, host, port) self.setupserver(name) + return None - def handlemsg(self, msg): - ''' Handle an API message. Determine whether this needs to be handled - by the local server or forwarded on to another one. - Returns True when message does not need to be handled locally, - and performs forwarding if required. - Returning False indicates this message should be handled locally. - ''' + def handle_message(self, message): + """ + Handle an API message. Determine whether this needs to be handled + by the local server or forwarded on to another one. + Returns True when message does not need to be handled locally, + and performs forwarding if required. + Returning False indicates this message should be handled locally. + + :param core.api.coreapi.CoreMessage message: message to handle + :return: true or false for handling locally + :rtype: bool + """ servers = set() handle_locally = False # Do not forward messages when in definition state # (for e.g. configuring services) - if self.session.getstate() == coreapi.CORE_EVENT_DEFINITION_STATE: + if self.session.state == EventTypes.DEFINITION_STATE.value: return False # Decide whether message should be handled locally or forwarded, or both - if msg.msgtype == coreapi.CORE_API_NODE_MSG: - handle_locally, servers = self.handlenodemsg(msg) - elif msg.msgtype == coreapi.CORE_API_EVENT_MSG: + if message.message_type == MessageTypes.NODE.value: + handle_locally, servers = self.handlenodemsg(message) + elif message.message_type == MessageTypes.EVENT.value: # broadcast events everywhere servers = self.getservers() - elif msg.msgtype == coreapi.CORE_API_CONF_MSG: + elif message.message_type == MessageTypes.CONFIG.value: # broadcast location and services configuration everywhere - confobj = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) + confobj = message.get_tlv(ConfigTlvs.OBJECT.value) if confobj == "location" or confobj == "services" or \ - confobj == "session" or confobj == "all": + confobj == "session" or confobj == "all": servers = self.getservers() - elif msg.msgtype == coreapi.CORE_API_FILE_MSG: + elif message.message_type == MessageTypes.FILE.value: # broadcast hook scripts and custom service files everywhere - filetype = msg.gettlv(coreapi.CORE_TLV_FILE_TYPE) - if filetype is not None and \ - (filetype[:5] == "hook:" or filetype[:8] == "service:"): + filetype = message.get_tlv(FileTlvs.TYPE.value) + if filetype is not None and (filetype[:5] == "hook:" or filetype[:8] == "service:"): servers = self.getservers() - - if msg.msgtype == coreapi.CORE_API_LINK_MSG: + if message.message_type == MessageTypes.LINK.value: # prepare a server list from two node numbers in link message - handle_locally, servers, msg = self.handlelinkmsg(msg) + handle_locally, servers, message = self.handlelinkmsg(message) elif len(servers) == 0: # check for servers based on node numbers in all messages but link - nn = msg.nodenumbers() + nn = message.node_numbers() if len(nn) == 0: return False servers = self.getserversbynode(nn[0]) @@ -560,95 +696,105 @@ class CoreBroker(ConfigurableManager): # by e.g. EMANE to use the link add message to keep counts of # interfaces on other servers) for handler in self.handlers: - handler(msg) + handler(message) # Perform any message forwarding - handle_locally |= self.forwardmsg(msg, servers) + handle_locally |= self.forwardmsg(message, servers) return not handle_locally def setupserver(self, servername): - ''' Send the appropriate API messages for configuring the specified - emulation server. - ''' + """ + Send the appropriate API messages for configuring the specified emulation server. + + :param str servername: name of server to configure + :return: nothing + """ server = self.getserverbyname(servername) if server is None: - msg = 'ignoring unknown server: \'%s\'' % servername - self.session.warn(msg) + logger.warn("ignoring unknown server: %s" % servername) return if server.sock is None or server.host is None or server.port is None: - if self.verbose: - msg = 'ignoring disconnected server: \'%s\'' % servername - self.session.info(msg) + logger.info("ignoring disconnected server: %s" % servername) return - # communicate this session's current state to the server - tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - self.session.getstate()) + + # communicate this session"s current state to the server + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, self.session.state) msg = coreapi.CoreEventMessage.pack(0, tlvdata) server.sock.send(msg) + # send a Configuration message for the broker object and inform the # server of its local name tlvdata = "" - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, "broker") - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, - coreapi.CONF_TYPE_FLAGS_UPDATE) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES, - (coreapi.CONF_DATA_TYPE_STRING,)) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES, - "%s:%s:%s" % (server.name, server.host, server.port)) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_SESSION, - "%s" % self.session.sessionid) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "broker") + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, ConfigFlags.UPDATE.value) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.DATA_TYPES.value, (ConfigDataTypes.STRING.value,)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, + "%s:%s:%s" % (server.name, server.host, server.port)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.SESSION.value, "%s" % self.session.session_id) msg = coreapi.CoreConfMessage.pack(0, tlvdata) server.sock.send(msg) @staticmethod def fixupremotetty(msghdr, msgdata, host): - ''' When an interactive TTY request comes from the GUI, snoop the reply - and add an SSH command to the appropriate remote server. - ''' - msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr) - msgcls = coreapi.msg_class(msgtype) + """ + When an interactive TTY request comes from the GUI, snoop the reply + and add an SSH command to the appropriate remote server. + + :param msghdr: message header + :param msgdata: message data + :param str host: host address + :return: packed core execute tlv data + """ + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) + msgcls = coreapi.CLASS_MAP[msgtype] msg = msgcls(msgflags, msghdr, msgdata) - nodenum = msg.gettlv(coreapi.CORE_TLV_EXEC_NODE) - execnum = msg.gettlv(coreapi.CORE_TLV_EXEC_NUM) - cmd = msg.gettlv(coreapi.CORE_TLV_EXEC_CMD) - res = msg.gettlv(coreapi.CORE_TLV_EXEC_RESULT) + nodenum = msg.get_tlv(ExecuteTlvs.NODE.value) + execnum = msg.get_tlv(ExecuteTlvs.NUMBER.value) + cmd = msg.get_tlv(ExecuteTlvs.COMMAND.value) + res = msg.get_tlv(ExecuteTlvs.RESULT.value) tlvdata = "" - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NODE, nodenum) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NUM, execnum) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_CMD, cmd) + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, nodenum) + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, execnum) + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, cmd) title = "\\\"CORE: n%s @ %s\\\"" % (nodenum, host) res = "ssh -X -f " + host + " xterm -e " + res - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_RESULT, res) + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) return coreapi.CoreExecMessage.pack(msgflags, tlvdata) - def handlenodemsg(self, msg): - ''' Determine and return the servers to which this node message should - be forwarded. Also keep track of link-layer nodes and the mapping of - nodes to servers. - ''' + def handlenodemsg(self, message): + """ + Determine and return the servers to which this node message should + be forwarded. Also keep track of link-layer nodes and the mapping of + nodes to servers. + + :param core.api.coreapi.CoreMessage message: message to handle + :return: boolean for handling locally and set of servers + :rtype: tuple + """ servers = set() handle_locally = False serverfiletxt = None + # snoop Node Message for emulation server TLV and record mapping - n = msg.tlvdata[coreapi.CORE_TLV_NODE_NUMBER] + n = message.tlv_data[NodeTlvs.NUMBER.value] + # replicate link-layer nodes on all servers - nodetype = msg.gettlv(coreapi.CORE_TLV_NODE_TYPE) + nodetype = message.get_tlv(NodeTlvs.TYPE.value) if nodetype is not None: try: - nodecls = coreapi.node_class(nodetype) + nodecls = nodeutils.get_node_class(NodeTypes(nodetype)) except KeyError: - self.session.warn("broker invalid node type %s" % nodetype) + logger.warn("broker invalid node type %s" % nodetype) return handle_locally, servers if nodecls is None: - self.session.warn("broker unimplemented node type %s" % nodetype) + logger.warn("broker unimplemented node type %s" % nodetype) return handle_locally, servers - if issubclass(nodecls, PyCoreNet) and \ - nodetype != coreapi.CORE_NODE_WLAN: + if issubclass(nodecls, PyCoreNet) and nodetype != NodeTypes.WIRELESS_LAN.value: # network node replicated on all servers; could be optimized - # don't replicate WLANs, because ebtables rules won't work + # don"t replicate WLANs, because ebtables rules won"t work servers = self.getservers() handle_locally = True self.addnet(n) @@ -658,7 +804,7 @@ class CoreBroker(ConfigurableManager): # nodes are replicated across all server return handle_locally, servers elif issubclass(nodecls, PyCoreNode): - name = msg.gettlv(coreapi.CORE_TLV_NODE_NAME) + name = message.get_tlv(NodeTlvs.NAME.value) if name: serverfiletxt = "%s %s %s" % (n, name, nodecls) if issubclass(nodecls, PhysicalNode): @@ -666,7 +812,7 @@ class CoreBroker(ConfigurableManager): self.addphys(n) # emulation server TLV specifies server - servername = msg.gettlv(coreapi.CORE_TLV_NODE_EMUSRV) + servername = message.get_tlv(NodeTlvs.EMULATION_SERVER.value) server = self.getserverbyname(servername) if server is not None: self.addnodemap(server, n) @@ -674,29 +820,36 @@ class CoreBroker(ConfigurableManager): servers.add(server) if serverfiletxt and self.session.master: self.writenodeserver(serverfiletxt, server) + # hook to update coordinates of physical nodes - if n in self.phys: - self.session.mobility.physnodeupdateposition(msg) + if n in self.physical_nodes: + self.session.mobility.physnodeupdateposition(message) + return handle_locally, servers - def handlelinkmsg(self, msg): - ''' Determine and return the servers to which this link message should - be forwarded. Also build tunnels between different servers or add - opaque data to the link message before forwarding. - ''' + def handlelinkmsg(self, message): + """ + Determine and return the servers to which this link message should + be forwarded. Also build tunnels between different servers or add + opaque data to the link message before forwarding. + + :param core.api.coreapi.CoreMessage message: message to handle + :return: boolean to handle locally, a set of server, and message + :rtype: tuple + """ servers = set() handle_locally = False # determine link message destination using non-network nodes - nn = msg.nodenumbers() - if nn[0] in self.nets: - if nn[1] in self.nets: + nn = message.node_numbers() + if nn[0] in self.network_nodes: + if nn[1] in self.network_nodes: # two network nodes linked together - prevent loops caused by # the automatic tunnelling handle_locally = True else: servers = self.getserversbynode(nn[1]) - elif nn[1] in self.nets: + elif nn[1] in self.network_nodes: servers = self.getserversbynode(nn[0]) else: servers1 = self.getserversbynode(nn[0]) @@ -727,136 +880,191 @@ class CoreBroker(ConfigurableManager): elif len(servers2) == 0: localn = nn[1] if host is None: - host = self.getlinkendpoint(msg, localn == nn[0]) + host = self.getlinkendpoint(message, localn == nn[0]) if localn is None: - msg = self.addlinkendpoints(msg, servers1, servers2) - elif msg.flags & coreapi.CORE_API_ADD_FLAG: + message = self.addlinkendpoints(message, servers1, servers2) + elif message.flags & MessageFlags.ADD.value: self.addtunnel(host, nn[0], nn[1], localn) - elif msg.flags & coreapi.CORE_API_DEL_FLAG: + elif message.flags & MessageFlags.DELETE.value: self.deltunnel(nn[0], nn[1]) handle_locally = False else: servers = servers1.union(servers2) - return handle_locally, servers, msg + return handle_locally, servers, message - def addlinkendpoints(self, msg, servers1, servers2): - ''' For a link message that is not handled locally, inform the remote - servers of the IP addresses used as tunnel endpoints by adding - opaque data to the link message. - ''' - ip1 = '' + def addlinkendpoints(self, message, servers1, servers2): + """ + For a link message that is not handled locally, inform the remote + servers of the IP addresses used as tunnel endpoints by adding + opaque data to the link message. + + :param core.api.coreapi.CoreMessage message: message to link end points + :param servers1: + :param servers2: + :return: core link message + :rtype: coreapi.CoreLinkMessage + """ + ip1 = "" for server in servers1: if server.host is not None: ip1 = server.host break - ip2 = '' + ip2 = "" for server in servers2: if server.host is not None: ip2 = server.host break - tlvdata = msg.rawmsg[coreapi.CoreMessage.hdrsiz:] - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_OPAQUE, - "%s:%s" % (ip1, ip2)) - newraw = coreapi.CoreLinkMessage.pack(msg.flags, tlvdata) - msghdr = newraw[:coreapi.CoreMessage.hdrsiz] - return coreapi.CoreLinkMessage(msg.flags, msghdr, tlvdata) + tlvdata = message.raw_message[coreapi.CoreMessage.header_len:] + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.OPAQUE.value, "%s:%s" % (ip1, ip2)) + newraw = coreapi.CoreLinkMessage.pack(message.flags, tlvdata) + msghdr = newraw[:coreapi.CoreMessage.header_len] + return coreapi.CoreLinkMessage(message.flags, msghdr, tlvdata) def getlinkendpoint(self, msg, first_is_local): - ''' A link message between two different servers has been received, - and we need to determine the tunnel endpoint. First look for - opaque data in the link message, otherwise use the IP of the message - sender (the master server). - ''' + """ + A link message between two different servers has been received, + and we need to determine the tunnel endpoint. First look for + opaque data in the link message, otherwise use the IP of the message + sender (the master server). + + :param coreapi.CoreLinkMessage msg: + :param bool first_is_local: is first local + :return: host address + :rtype: str + """ host = None - opaque = msg.gettlv(coreapi.CORE_TLV_LINK_OPAQUE) + opaque = msg.get_tlv(LinkTlvs.OPAQUE.value) if opaque is not None: if first_is_local: - host = opaque.split(':')[1] + host = opaque.split(":")[1] else: - host = opaque.split(':')[0] + host = opaque.split(":")[0] if host == "": host = None - if host is None: + if host is None and self.session_handler.client_address != "": # get IP address from API message sender (master) - self.session._handlerslock.acquire() - for h in self.session._handlers: - if h.client_address != "": - host = h.client_address[0] - self.session._handlerslock.release() + host = self.session_handler.client_address[0] return host def handlerawmsg(self, msg): - ''' Helper to invoke handlemsg() using raw (packed) message bytes. - ''' - hdr = msg[:coreapi.CoreMessage.hdrsiz] - msgtype, flags, msglen = coreapi.CoreMessage.unpackhdr(hdr) - msgcls = coreapi.msg_class(msgtype) - return self.handlemsg(msgcls(flags, hdr, msg[coreapi.CoreMessage.hdrsiz:])) + """ + Helper to invoke message handler, using raw (packed) message bytes. - def forwardmsg(self, msg, servers): - ''' Forward API message to all given servers. + :param msg: raw message butes + :return: should handle locally or not + :rtype: bool + """ + hdr = msg[:coreapi.CoreMessage.header_len] + msgtype, flags, msglen = coreapi.CoreMessage.unpack_header(hdr) + msgcls = coreapi.CLASS_MAP[msgtype] + return self.handle_message(msgcls(flags, hdr, msg[coreapi.CoreMessage.header_len:])) - Return True if an empty host/port is encountered, indicating - the message should be handled locally. - ''' + def forwardmsg(self, message, servers): + """ + Forward API message to all given servers. + + Return True if an empty host/port is encountered, indicating + the message should be handled locally. + + :param core.api.coreapi.CoreMessage message: message to forward + :param list servers: server to forward message to + :return: handle locally value + :rtype: bool + """ handle_locally = len(servers) == 0 for server in servers: if server.host is None and server.port is None: # local emulation server, handle this locally handle_locally = True elif server.sock is None: - self.session.info("server %s @ %s:%s is disconnected" % \ - (server.name, server.host, server.port)) + logger.info("server %s @ %s:%s is disconnected" % ( + server.name, server.host, server.port)) else: - server.sock.send(msg.rawmsg) + server.sock.send(message.raw_message) return handle_locally def writeservers(self): - ''' Write the server list to a text file in the session directory upon + """ + Write the server list to a text file in the session directory upon startup: /tmp/pycore.nnnnn/servers - ''' + + :return: nothing + """ servers = self.getservers() - filename = os.path.join(self.session.sessiondir, "servers") + filename = os.path.join(self.session.session_dir, "servers") master = self.session_id_master if master is None: - master = self.session.sessionid + master = self.session.session_id try: - with open(filename, 'w') as f: + with open(filename, "w") as f: f.write("master=%s\n" % master) for server in servers: if server.name == "localhost": continue try: - (lhost, lport) = server.sock.getsockname() - except: + lhost, lport = server.sock.getsockname() + except IOError: lhost, lport = None, None - f.write('%s %s %s %s %s\n' % (server.name, server.host, - server.port, lhost, lport)) - except Exception as e: - msg = 'Error writing server list to the file: \'%s\'\n%s' % \ - (filename, e) - self.session.warn(msg) + f.write("%s %s %s %s %s\n" % (server.name, server.host, server.port, lhost, lport)) + except IOError: + logger.exception("error writing server list to the file: %s" % filename) def writenodeserver(self, nodestr, server): - ''' Creates a /tmp/pycore.nnnnn/nX.conf/server file having the node + """ + Creates a /tmp/pycore.nnnnn/nX.conf/server file having the node and server info. This may be used by scripts for accessing nodes on other machines, much like local nodes may be accessed via the VnodeClient class. - ''' + + :param str nodestr: node string + :param CoreServer server: core server + :return: nothing + """ serverstr = "%s %s %s" % (server.name, server.host, server.port) name = nodestr.split()[1] - dirname = os.path.join(self.session.sessiondir, name + ".conf") + dirname = os.path.join(self.session.session_dir, name + ".conf") filename = os.path.join(dirname, "server") try: os.makedirs(dirname) except OSError: # directory may already exist from previous distributed run - pass + logger.exception("error creating directory: %s", dirname) + try: - with open(filename, 'w') as f: - f.write('%s\n%s\n' % (serverstr, nodestr)) - except Exception as e: - msg = 'Error writing server file \'%s\' for node %s:\n%s' % \ - (filename, name, e) - self.session.warn(msg) + with open(filename, "w") as f: + f.write("%s\n%s\n" % (serverstr, nodestr)) + except IOError: + logger.exception("error writing server file %s for node %s" % (filename, name)) + + def local_instantiation_complete(self): + """ + Set the local server"s instantiation-complete status to True. + + :return: nothing + """ + # TODO: do we really want to allow a localhost to not exist? + with self.servers_lock: + server = self.servers.get("localhost") + if server is not None: + server.instantiation_complete = True + + if self.session_handler: + tlvdata = "" + tlvdata += coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_COMPLETE.value) + msg = coreapi.CoreEventMessage.pack(0, tlvdata) + self.session_handler.sendall(msg) + + def instantiation_complete(self): + """ + Return True if all servers have completed instantiation, False + otherwise. + + :return: have all server completed instantiation + :rtype: bool + """ + with self.servers_lock: + for server in self.servers.itervalues(): + if not server.instantiation_complete: + return False + return True diff --git a/daemon/core/bsd/netgraph.py b/daemon/core/bsd/netgraph.py index 3cb9bffb..c1cd4eb0 100644 --- a/daemon/core/bsd/netgraph.py +++ b/daemon/core/bsd/netgraph.py @@ -3,63 +3,71 @@ # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. # -# authors: core-dev@pf.itd.nrl.navy.mil +# authors: core-dev@pf.itd.nrl.navy.mil # -''' + +""" netgraph.py: Netgraph helper functions; for now these are wrappers around ngctl commands. -''' +""" import subprocess -from core.misc.utils import * -from core.constants import * -checkexec([NGCTL_BIN]) +from core import constants +from core.misc import utils + +utils.checkexec([constants.NGCTL_BIN]) + def createngnode(type, hookstr, name=None): - ''' Create a new Netgraph node of type and optionally assign name. The - hook string hookstr should contain two names. This is a string so - other commands may be inserted after the two names. - Return the name and netgraph ID of the new node. - ''' + """ + Create a new Netgraph node of type and optionally assign name. The + hook string hookstr should contain two names. This is a string so + other commands may be inserted after the two names. + Return the name and netgraph ID of the new node. + """ hook1 = hookstr.split()[0] ngcmd = "mkpeer %s %s \n show .%s" % (type, hookstr, hook1) - cmd = [NGCTL_BIN, "-f", "-"] - cmdid = subprocess.Popen(cmd, stdin = subprocess.PIPE, - stdout = subprocess.PIPE, - stderr = subprocess.STDOUT) - result, err = cmdid.communicate(input = ngcmd) # err will always be None + cmd = [constants.NGCTL_BIN, "-f", "-"] + cmdid = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + # err will always be None + result, err = cmdid.communicate(input=ngcmd) status = cmdid.wait() if status > 0: - raise Exception, "error creating Netgraph node %s (%s): %s" % \ - (type, ngcmd, result) + raise Exception("error creating Netgraph node %s (%s): %s" % (type, ngcmd, result)) results = result.split() ngname = results[1] ngid = results[5] if name: - check_call([NGCTL_BIN, "name", "[0x%s]:" % ngid, name]) - return (ngname, ngid) + utils.check_call([constants.NGCTL_BIN, "name", "[0x%s]:" % ngid, name]) + return ngname, ngid + def destroyngnode(name): - ''' Shutdown a Netgraph node having the given name. - ''' - check_call([NGCTL_BIN, "shutdown", "%s:" % name]) + """ Shutdown a Netgraph node having the given name. + """ + utils.check_call([constants.NGCTL_BIN, "shutdown", "%s:" % name]) + def connectngnodes(name1, name2, hook1, hook2): - ''' Connect two hooks of two Netgraph nodes given by their names. - ''' + """ Connect two hooks of two Netgraph nodes given by their names. + """ node1 = "%s:" % name1 node2 = "%s:" % name2 - check_call([NGCTL_BIN, "connect", node1, node2, hook1, hook2]) + utils.check_call([constants.NGCTL_BIN, "connect", node1, node2, hook1, hook2]) + def ngmessage(name, msg): - ''' Send a Netgraph message to the node named name. - ''' - cmd = [NGCTL_BIN, "msg", "%s:" % name] + msg - check_call(cmd) + """ Send a Netgraph message to the node named name. + """ + cmd = [constants.NGCTL_BIN, "msg", "%s:" % name] + msg + utils.check_call(cmd) + def ngloadkernelmodule(name): - ''' Load a kernel module by invoking kldstat. This is needed for the + """ Load a kernel module by invoking kldstat. This is needed for the ng_ether module which automatically creates Netgraph nodes when loaded. - ''' - mutecall(["kldload", name]) + """ + utils.mutecall(["kldload", name]) diff --git a/daemon/core/bsd/nodes.py b/daemon/core/bsd/nodes.py index 2eea4815..df696796 100644 --- a/daemon/core/bsd/nodes.py +++ b/daemon/core/bsd/nodes.py @@ -6,172 +6,173 @@ # author: core-dev@pf.itd.nrl.navy.mil # -''' +""" nodes.py: definition of CoreNode classes and other node classes that inherit from the CoreNode, implementing specific node types. -''' +""" -from vnode import * -from vnet import * -from core.constants import * -from core.misc.ipaddr import * +import socket + +from core import constants from core.api import coreapi +from core.bsd.netgraph import connectngnodes from core.bsd.netgraph import ngloadkernelmodule +from core.bsd.vnet import NetgraphNet +from core.bsd.vnet import NetgraphPipeNet +from core.bsd.vnode import JailNode +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import ipaddress +from core.misc import utils + +utils.checkexec([constants.IFCONFIG_BIN]) -checkexec([IFCONFIG_BIN]) class CoreNode(JailNode): - apitype = coreapi.CORE_NODE_DEF + apitype = NodeTypes.DEFAULT.value + class PtpNet(NetgraphPipeNet): def tonodemsg(self, flags): - ''' Do not generate a Node Message for point-to-point links. They are + """ Do not generate a Node Message for point-to-point links. They are built using a link message instead. - ''' + """ pass def tolinkmsgs(self, flags): - ''' Build CORE API TLVs for a point-to-point link. One Link message + """ Build CORE API TLVs for a point-to-point link. One Link message describes this network. - ''' + """ tlvdata = "" if len(self._netif) != 2: return tlvdata (if1, if2) = self._netif.items() if1 = if1[1] if2 = if2[1] - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - if1.node.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, - if2.node.objid) - delay = if1.getparam('delay') - bw = if1.getparam('bw') - loss = if1.getparam('loss') - duplicate = if1.getparam('duplicate') - jitter = if1.getparam('jitter') + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, if1.node.objid) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, if2.node.objid) + delay = if1.getparam("delay") + bw = if1.getparam("bw") + loss = if1.getparam("loss") + duplicate = if1.getparam("duplicate") + jitter = if1.getparam("jitter") if delay is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY, - delay) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DELAY.value, delay) if bw is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.BANDWIDTH.value, bw) if loss is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER, - str(loss)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.PER.value, str(loss)) if duplicate is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP, - str(duplicate)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DUP.value, str(duplicate)) if jitter is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER, - jitter) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE, - self.linktype) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.JITTER.value, jitter) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, self.linktype) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \ - if1.node.getifindex(if1)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE1_NUMBER.value, if1.node.getifindex(if1)) if if1.hwaddr: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1MAC, - if1.hwaddr) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE1_MAC.value, if1.hwaddr) for addr in if1.addrlist: - (ip, sep, mask) = addr.partition('/') + (ip, sep, mask) = addr.partition("/") mask = int(mask) - if isIPv4Address(ip): - family = AF_INET - tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4 - tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK + if ipaddress.is_ipv4_address(ip): + family = socket.AF_INET + tlvtypeip = LinkTlvs.INTERFACE1_IP4.value + tlvtypemask = LinkTlvs.INTERFACE1_IP4_MASK else: - family = AF_INET6 - tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6 - tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK + family = socket.AF_INET6 + tlvtypeip = LinkTlvs.INTERFACE1_IP6.value + tlvtypemask = LinkTlvs.INTERFACE1_IP6_MASK.value ipl = socket.inet_pton(family, ip) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, - IPAddr(af=family, addr=ipl)) + tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, ipaddress.IpAddress(af=family, address=ipl)) tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \ - if2.node.getifindex(if2)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, if2.node.getifindex(if2)) if if2.hwaddr: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2MAC, - if2.hwaddr) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_MAC.value, if2.hwaddr) for addr in if2.addrlist: - (ip, sep, mask) = addr.partition('/') + (ip, sep, mask) = addr.partition("/") mask = int(mask) - if isIPv4Address(ip): - family = AF_INET - tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4 - tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK + if ipaddress.is_ipv4_address(ip): + family = socket.AF_INET + tlvtypeip = LinkTlvs.INTERFACE2_IP4.value + tlvtypemask = LinkTlvs.INTERFACE2_IP4_MASK else: - family = AF_INET6 - tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6 - tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK + family = socket.AF_INET6 + tlvtypeip = LinkTlvs.INTERFACE2_IP6.value + tlvtypemask = LinkTlvs.INTERFACE2_IP6_MASK.value ipl = socket.inet_pton(family, ip) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, - IPAddr(af=family, addr=ipl)) + tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, ipaddress.IpAddress(af=family, address=ipl)) tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask) msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) - return [msg,] + return [msg, ] + class SwitchNode(NetgraphNet): ngtype = "bridge" nghooks = "link0 link0\nmsg .link0 setpersistent" - apitype = coreapi.CORE_NODE_SWITCH + apitype = NodeTypes.SWITCH.value policy = "ACCEPT" + class HubNode(NetgraphNet): ngtype = "hub" nghooks = "link0 link0\nmsg .link0 setpersistent" - apitype = coreapi.CORE_NODE_HUB + apitype = NodeTypes.HUB.value policy = "ACCEPT" - + + class WlanNode(NetgraphNet): ngtype = "wlan" nghooks = "anchor anchor" - apitype = coreapi.CORE_NODE_WLAN - linktype = coreapi.CORE_LINK_WIRELESS + apitype = NodeTypes.WIRELESS_LAN.value + linktype = LinkTypes.WIRELESS.value policy = "DROP" - - def __init__(self, session, objid = None, name = None, verbose = False, - start = True, policy = None): + + def __init__(self, session, objid=None, name=None, verbose=False, + start=True, policy=None): NetgraphNet.__init__(self, session, objid, name, verbose, start, policy) # wireless model such as basic range self.model = None # mobility model such as scripted self.mobility = None - + def attach(self, netif): NetgraphNet.attach(self, netif) if self.model: netif.poshook = self.model._positioncallback if netif.node is None: return - (x,y,z) = netif.node.position.get() + x, y, z = netif.node.position.get() netif.poshook(netif, x, y, z) def setmodel(self, model, config): - ''' Mobility and wireless model. - ''' - if (self.verbose): + """ Mobility and wireless model. + """ + if self.verbose: self.info("adding model %s" % model._name) - if model._type == coreapi.CORE_TLV_REG_WIRELESS: + if model._type == RegisterTlvs.WIRELESS.value: self.model = model(session=self.session, objid=self.objid, verbose=self.verbose, values=config) if self.model._positioncallback: for netif in self.netifs(): netif.poshook = self.model._positioncallback if netif.node is not None: - (x,y,z) = netif.node.position.get() + (x, y, z) = netif.node.position.get() netif.poshook(netif, x, y, z) self.model.setlinkparams() - elif model._type == coreapi.CORE_TLV_REG_MOBILITY: + elif model._type == RegisterTlvs.MOBILITY.value: self.mobility = model(session=self.session, objid=self.objid, verbose=self.verbose, values=config) class RJ45Node(NetgraphPipeNet): - apitype = coreapi.CORE_NODE_RJ45 + apitype = NodeTypes.RJ45.value policy = "ACCEPT" - def __init__(self, session, objid, name, verbose, start = True): + def __init__(self, session, objid, name, verbose, start=True): if start: ngloadkernelmodule("ng_ether") NetgraphPipeNet.__init__(self, session, objid, name, verbose, start) @@ -186,18 +187,18 @@ class RJ45Node(NetgraphPipeNet): p = "promisc" if not promisc: p = "-" + p - check_call([IFCONFIG_BIN, self.name, "up", p]) + utils.check_call([constants.IFCONFIG_BIN, self.name, "up", p]) def attach(self, netif): if len(self._netif) > 0: raise ValueError, \ - "RJ45 networks support at most 1 network interface" + "RJ45 networks support at most 1 network interface" NetgraphPipeNet.attach(self, netif) connectngnodes(self.ngname, self.name, self.gethook(), "lower") + class TunnelNode(NetgraphNet): ngtype = "pipe" nghooks = "upper lower" - apitype = coreapi.CORE_NODE_TUNNEL + apitype = NodeTypes.TUNNEL.value policy = "ACCEPT" - diff --git a/daemon/core/bsd/vnet.py b/daemon/core/bsd/vnet.py index a92eb849..0ff62dfb 100644 --- a/daemon/core/bsd/vnet.py +++ b/daemon/core/bsd/vnet.py @@ -5,25 +5,25 @@ # # authors: core-dev@pf.itd.nrl.navy.mil # -''' + +""" vnet.py: NetgraphNet and NetgraphPipeNet classes that implement virtual networks using the FreeBSD Netgraph subsystem. -''' +""" -import sys, threading +from core.bsd.netgraph import connectngnodes +from core.bsd.netgraph import createngnode +from core.bsd.netgraph import destroyngnode +from core.bsd.netgraph import ngmessage +from core.coreobj import PyCoreNet -from core.misc.utils import * -from core.constants import * -from core.coreobj import PyCoreNet, PyCoreObj -from core.bsd.netgraph import * -from core.bsd.vnode import VEth class NetgraphNet(PyCoreNet): ngtype = None nghooks = () - def __init__(self, session, objid = None, name = None, verbose = False, - start = True, policy = None): + def __init__(self, session, objid=None, name=None, verbose=False, + start=True, policy=None): PyCoreNet.__init__(self, session, objid, name) if name is None: name = str(self.objid) @@ -40,8 +40,7 @@ class NetgraphNet(PyCoreNet): self.startup() def startup(self): - tmp, self.ngid = createngnode(type=self.ngtype, hookstr=self.nghooks, - name=self.ngname) + tmp, self.ngid = createngnode(type=self.ngtype, hookstr=self.nghooks, name=self.ngname) self.up = True def shutdown(self): @@ -62,13 +61,12 @@ class NetgraphNet(PyCoreNet): destroyngnode(self.ngname) def attach(self, netif): - ''' Attach an interface to this netgraph node. Create a pipe between + """ Attach an interface to this netgraph node. Create a pipe between the interface and the hub/switch/wlan node. (Note that the PtpNet subclass overrides this method.) - ''' + """ if self.up: - pipe = self.session.addobj(cls = NetgraphPipeNet, - verbose = self.verbose, start = True) + pipe = self.session.addobj(cls=NetgraphPipeNet, verbose=self.verbose, start=True) pipe.attach(netif) hook = "link%d" % len(self._netif) pipe.attachnet(self, hook) @@ -82,14 +80,16 @@ class NetgraphNet(PyCoreNet): def linked(self, netif1, netif2): # check if the network interfaces are attached to this network if self._netif[netif1] != netif1: - raise ValueError, "inconsistency for netif %s" % netif1.name + raise ValueError("inconsistency for netif %s" % netif1.name) if self._netif[netif2] != netif2: - raise ValueError, "inconsistency for netif %s" % netif2.name + raise ValueError("inconsistency for netif %s" % netif2.name) + try: linked = self._linked[netif1][netif2] except KeyError: linked = False self._linked[netif1][netif2] = linked + return linked def unlink(self, netif1, netif2): @@ -109,108 +109,106 @@ class NetgraphNet(PyCoreNet): self._linked[netif1][netif2] = True def linknet(self, net): - ''' Link this bridge with another by creating a veth pair and installing + """ Link this bridge with another by creating a veth pair and installing each device into each bridge. - ''' + """ raise NotImplementedError - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2=None): - ''' Set link effects by modifying the pipe connected to an interface. - ''' + def linkconfig(self, netif, bw=None, delay=None, + loss=None, duplicate=None, jitter=None, netif2=None): + """ Set link effects by modifying the pipe connected to an interface. + """ if not netif.pipe: - self.warn("linkconfig for %s but interface %s has no pipe" % \ - (self.name, netif.name)) + self.warn("linkconfig for %s but interface %s has no pipe" % (self.name, netif.name)) return - return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter, - netif2) + return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter, netif2) + class NetgraphPipeNet(NetgraphNet): ngtype = "pipe" nghooks = "upper lower" - def __init__(self, session, objid = None, name = None, verbose = False, - start = True, policy = None): + def __init__(self, session, objid=None, name=None, verbose=False, + start=True, policy=None): NetgraphNet.__init__(self, session, objid, name, verbose, start, policy) if start: # account for Ethernet header ngmessage(self.ngname, ["setcfg", "{", "header_offset=14", "}"]) def attach(self, netif): - ''' Attach an interface to this pipe node. + """ Attach an interface to this pipe node. The first interface is connected to the "upper" hook, the second connected to the "lower" hook. - ''' + """ if len(self._netif) > 1: raise ValueError, \ - "Netgraph pipes support at most 2 network interfaces" + "Netgraph pipes support at most 2 network interfaces" if self.up: hook = self.gethook() connectngnodes(self.ngname, netif.localname, hook, netif.hook) if netif.pipe: raise ValueError, \ - "Interface %s already attached to pipe %s" % \ - (netif.name, netif.pipe.name) + "Interface %s already attached to pipe %s" % \ + (netif.name, netif.pipe.name) netif.pipe = self self._netif[netif] = netif self._linked[netif] = {} def attachnet(self, net, hook): - ''' Attach another NetgraphNet to this pipe node. - ''' + """ Attach another NetgraphNet to this pipe node. + """ localhook = self.gethook() connectngnodes(self.ngname, net.ngname, localhook, hook) def gethook(self): - ''' Returns the first hook (e.g. "upper") then the second hook + """ Returns the first hook (e.g. "upper") then the second hook (e.g. "lower") based on the number of connections. - ''' + """ hooks = self.nghooks.split() if len(self._netif) == 0: return hooks[0] else: return hooks[1] - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2 = None): - ''' Set link effects by sending a Netgraph setcfg message to the pipe. - ''' - netif.setparam('bw', bw) - netif.setparam('delay', delay) - netif.setparam('loss', loss) - netif.setparam('duplicate', duplicate) - netif.setparam('jitter', jitter) + def linkconfig(self, netif, bw=None, delay=None, + loss=None, duplicate=None, jitter=None, netif2=None): + """ Set link effects by sending a Netgraph setcfg message to the pipe. + """ + netif.setparam("bw", bw) + netif.setparam("delay", delay) + netif.setparam("loss", loss) + netif.setparam("duplicate", duplicate) + netif.setparam("jitter", jitter) if not self.up: return params = [] upstream = [] downstream = [] if bw is not None: - if str(bw)=="0": - bw="-1" - params += ["bandwidth=%s" % bw,] + if str(bw) == "0": + bw = "-1" + params += ["bandwidth=%s" % bw, ] if delay is not None: - if str(delay)=="0": - delay="-1" - params += ["delay=%s" % delay,] + if str(delay) == "0": + delay = "-1" + params += ["delay=%s" % delay, ] if loss is not None: - if str(loss)=="0": - loss="-1" - upstream += ["BER=%s" % loss,] - downstream += ["BER=%s" % loss,] + if str(loss) == "0": + loss = "-1" + upstream += ["BER=%s" % loss, ] + downstream += ["BER=%s" % loss, ] if duplicate is not None: - if str(duplicate)=="0": - duplicate="-1" - upstream += ["duplicate=%s" % duplicate,] - downstream += ["duplicate=%s" % duplicate,] + if str(duplicate) == "0": + duplicate = "-1" + upstream += ["duplicate=%s" % duplicate, ] + downstream += ["duplicate=%s" % duplicate, ] if jitter: self.warn("jitter parameter ignored for link %s" % self.name) if len(params) > 0 or len(upstream) > 0 or len(downstream) > 0: - setcfg = ["setcfg", "{",] + params + setcfg = ["setcfg", "{", ] + params if len(upstream) > 0: - setcfg += ["upstream={",] + upstream + ["}",] + setcfg += ["upstream={", ] + upstream + ["}", ] if len(downstream) > 0: - setcfg += ["downstream={",] + downstream + ["}",] - setcfg += ["}",] + setcfg += ["downstream={", ] + downstream + ["}", ] + setcfg += ["}", ] ngmessage(self.ngname, setcfg) - diff --git a/daemon/core/bsd/vnode.py b/daemon/core/bsd/vnode.py index 9f723d21..df8c1c4b 100644 --- a/daemon/core/bsd/vnode.py +++ b/daemon/core/bsd/vnode.py @@ -3,27 +3,32 @@ # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. # -# authors: core-dev@pf.itd.nrl.navy.mil +# authors: core-dev@pf.itd.nrl.navy.mil # -''' + +""" vnode.py: SimpleJailNode and JailNode classes that implement the FreeBSD jail-based virtual node. -''' +""" -import os, signal, sys, subprocess, threading, string -import random, time -from core.misc.utils import * -from core.constants import * -from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position -from core.emane.nodes import EmaneNode -from core.bsd.netgraph import * +import os +import subprocess +import threading + +from core import constants +from core.bsd.netgraph import createngnode +from core.bsd.netgraph import destroyngnode +from core.coreobj import PyCoreNetIf +from core.coreobj import PyCoreNode +from core.misc import utils + +utils.checkexec([constants.IFCONFIG_BIN, constants.VIMAGE_BIN]) -checkexec([IFCONFIG_BIN, VIMAGE_BIN]) class VEth(PyCoreNetIf): - def __init__(self, node, name, localname, mtu = 1500, net = None, - start = True): - PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu) + def __init__(self, node, name, localname, mtu=1500, net=None, + start=True): + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) # name is the device name (e.g. ngeth0, ngeth1, etc.) before it is # installed in a node; the Netgraph name is renamed to localname # e.g. before install: name = ngeth0 localname = n0_0_123 @@ -45,7 +50,7 @@ class VEth(PyCoreNetIf): name=self.localname) self.name = ngname self.ngid = ngid - check_call([IFCONFIG_BIN, ngname, "up"]) + utils.check_call([constants.IFCONFIG_BIN, ngname, "up"]) self.up = True def shutdown(self): @@ -74,15 +79,18 @@ class VEth(PyCoreNetIf): def sethwaddr(self, addr): self.hwaddr = addr + class TunTap(PyCoreNetIf): - '''TUN/TAP virtual device in TAP mode''' - def __init__(self, node, name, localname, mtu = None, net = None, - start = True): + """TUN/TAP virtual device in TAP mode""" + + def __init__(self, node, name, localname, mtu=None, net=None, + start=True): raise NotImplementedError + class SimpleJailNode(PyCoreNode): - def __init__(self, session, objid = None, name = None, nodedir = None, - verbose = False): + def __init__(self, session, objid=None, name=None, nodedir=None, + verbose=False): PyCoreNode.__init__(self, session, objid, name) self.nodedir = nodedir self.verbose = verbose @@ -94,17 +102,17 @@ class SimpleJailNode(PyCoreNode): def startup(self): if self.up: raise Exception, "already up" - vimg = [VIMAGE_BIN, "-c", self.name] + vimg = [constants.VIMAGE_BIN, "-c", self.name] try: - os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg) + os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, *vimg) except OSError: raise Exception, ("vimage command not found while running: %s" % \ - vimg) + vimg) self.info("bringing up loopback interface") - self.cmd([IFCONFIG_BIN, "lo0", "127.0.0.1"]) + self.cmd([constants.IFCONFIG_BIN, "lo0", "127.0.0.1"]) self.info("setting hostname: %s" % self.name) self.cmd(["hostname", self.name]) - self.cmd([SYSCTL_BIN, "vfs.morphing_symlinks=1"]) + self.cmd([constants.SYSCTL_BIN, "vfs.morphing_symlinks=1"]) self.up = True def shutdown(self): @@ -114,27 +122,26 @@ class SimpleJailNode(PyCoreNode): netif.shutdown() self._netif.clear() del self.session - vimg = [VIMAGE_BIN, "-d", self.name] + vimg = [constants.VIMAGE_BIN, "-d", self.name] try: - os.spawnlp(os.P_WAIT, VIMAGE_BIN, *vimg) + os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, *vimg) except OSError: - raise Exception, ("vimage command not found while running: %s" % \ - vimg) + raise Exception("vimage command not found while running: %s" % vimg) self.up = False - def cmd(self, args, wait = True): + def cmd(self, args, wait=True): if wait: mode = os.P_WAIT else: mode = os.P_NOWAIT - tmp = call([VIMAGE_BIN, self.name] + args, cwd=self.nodedir) + tmp = utils.call([constants.VIMAGE_BIN, self.name] + args, cwd=self.nodedir) if not wait: tmp = None if tmp: self.warn("cmd exited with status %s: %s" % (tmp, str(args))) return tmp - def cmdresult(self, args, wait = True): + def cmdresult(self, args, wait=True): cmdid, cmdin, cmdout, cmderr = self.popen(args) result = cmdout.read() result += cmderr.read() @@ -145,30 +152,30 @@ class SimpleJailNode(PyCoreNode): status = cmdid.wait() else: status = 0 - return (status, result) + return status, result def popen(self, args): - cmd = [VIMAGE_BIN, self.name] + cmd = [constants.VIMAGE_BIN, self.name] cmd.extend(args) - tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE, - stdout = subprocess.PIPE, - stderr = subprocess.PIPE, cwd=self.nodedir) + tmp = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, cwd=self.nodedir) return tmp, tmp.stdin, tmp.stdout, tmp.stderr def icmd(self, args): - return os.spawnlp(os.P_WAIT, VIMAGE_BIN, VIMAGE_BIN, self.name, *args) + return os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, constants.VIMAGE_BIN, self.name, *args) - def term(self, sh = "/bin/sh"): - return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut", - "-title", self.name, "-e", VIMAGE_BIN, self.name, sh) + def term(self, sh="/bin/sh"): + return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut", + "-title", self.name, "-e", constants.VIMAGE_BIN, self.name, sh) - def termcmdstring(self, sh = "/bin/sh"): - ''' We add 'sudo' to the command string because the GUI runs as a + def termcmdstring(self, sh="/bin/sh"): + """ We add "sudo" to the command string because the GUI runs as a normal user. - ''' - return "cd %s && sudo %s %s %s" % (self.nodedir, VIMAGE_BIN, self.name, sh) + """ + return "cd %s && sudo %s %s %s" % (self.nodedir, constants.VIMAGE_BIN, self.name, sh) - def shcmd(self, cmdstr, sh = "/bin/sh"): + def shcmd(self, cmdstr, sh="/bin/sh"): return self.cmd([sh, "-c", cmdstr]) def boot(self): @@ -180,9 +187,9 @@ class SimpleJailNode(PyCoreNode): self.addsymlink(path=target, file=None) def umount(self, target): - self.info("unmounting '%s'" % target) + self.info("unmounting %s" % target) - def newveth(self, ifindex = None, ifname = None, net = None): + def newveth(self, ifindex=None, ifname=None, net=None): self.lock.acquire() try: if ifindex is None: @@ -191,15 +198,17 @@ class SimpleJailNode(PyCoreNode): ifname = "eth%d" % ifindex sessionid = self.session.shortsessionid() name = "n%s_%s_%s" % (self.objid, ifindex, sessionid) - localname = name + localname = name ifclass = VEth - veth = ifclass(node = self, name = name, localname = localname, - mtu = 1500, net = net, start = self.up) + veth = ifclass(node=self, name=name, localname=localname, + mtu=1500, net=net, start=self.up) if self.up: # install into jail - check_call([IFCONFIG_BIN, veth.name, "vnet", self.name]) + utils.check_call([constants.IFCONFIG_BIN, veth.name, "vnet", self.name]) + # rename from "ngeth0" to "eth0" - self.cmd([IFCONFIG_BIN, veth.name, "name", ifname]) + self.cmd([constants.IFCONFIG_BIN, veth.name, "name", ifname]) + veth.name = ifname try: self.addnetif(veth, ifindex) @@ -214,17 +223,17 @@ class SimpleJailNode(PyCoreNode): def sethwaddr(self, ifindex, addr): self._netif[ifindex].sethwaddr(addr) if self.up: - self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "link", - str(addr)]) + self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), "link", + str(addr)]) def addaddr(self, ifindex, addr): if self.up: - if ':' in addr: + if ":" in addr: family = "inet6" else: family = "inet" - self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "alias", - str(addr)]) + self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), family, "alias", + str(addr)]) self._netif[ifindex].addaddr(addr) def deladdr(self, ifindex, addr): @@ -233,40 +242,40 @@ class SimpleJailNode(PyCoreNode): except ValueError: self.warn("trying to delete unknown address: %s" % addr) if self.up: - if ':' in addr: + if ":" in addr: family = "inet6" else: family = "inet" - self.cmd([IFCONFIG_BIN, self.ifname(ifindex), family, "-alias", - str(addr)]) + self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), family, "-alias", + str(addr)]) valid_deladdrtype = ("inet", "inet6", "inet6link") - def delalladdr(self, ifindex, addrtypes = valid_deladdrtype): - addr = self.getaddr(self.ifname(ifindex), rescan = True) + + def delalladdr(self, ifindex, addrtypes=valid_deladdrtype): + addr = self.getaddr(self.ifname(ifindex), rescan=True) for t in addrtypes: if t not in self.valid_deladdrtype: raise ValueError, "addr type must be in: " + \ - " ".join(self.valid_deladdrtype) + " ".join(self.valid_deladdrtype) for a in addr[t]: self.deladdr(ifindex, a) # update cached information - self.getaddr(self.ifname(ifindex), rescan = True) + self.getaddr(self.ifname(ifindex), rescan=True) def ifup(self, ifindex): if self.up: - self.cmd([IFCONFIG_BIN, self.ifname(ifindex), "up"]) + self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), "up"]) - def newnetif(self, net = None, addrlist = [], hwaddr = None, - ifindex = None, ifname = None): + def newnetif(self, net=None, addrlist=[], hwaddr=None, + ifindex=None, ifname=None): self.lock.acquire() try: - ifindex = self.newveth(ifindex = ifindex, ifname = ifname, - net = net) + ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) if net is not None: self.attachnet(ifindex, net) if hwaddr: self.sethwaddr(ifindex, hwaddr) - for addr in maketuple(addrlist): + for addr in utils.maketuple(addrlist): self.addaddr(ifindex, addr) self.ifup(ifindex) return ifindex @@ -280,18 +289,17 @@ class SimpleJailNode(PyCoreNode): self._netif[ifindex].detachnet() def addfile(self, srcname, filename): - shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \ - (filename, srcname, filename) + shcmd = 'mkdir -p $(dirname "%s") && mv "%s" "%s" && sync' % (filename, srcname, filename) self.shcmd(shcmd) - def getaddr(self, ifname, rescan = False): + def getaddr(self, ifname, rescan=False): return None - #return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan) + # return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan) def addsymlink(self, path, file): - ''' Create a symbolic link from /path/name/file -> + """ Create a symbolic link from /path/name/file -> /tmp/pycore.nnnnn/@.conf/path.name/file - ''' + """ dirname = path if dirname and dirname[0] == "/": dirname = dirname[1:] @@ -316,14 +324,14 @@ class SimpleJailNode(PyCoreNode): self.info("creating symlink %s -> %s" % (pathname, sym)) os.symlink(sym, pathname) -class JailNode(SimpleJailNode): - def __init__(self, session, objid = None, name = None, - nodedir = None, bootsh = "boot.sh", verbose = False, - start = True): - super(JailNode, self).__init__(session = session, objid = objid, - name = name, nodedir = nodedir, - verbose = verbose) +class JailNode(SimpleJailNode): + def __init__(self, session, objid=None, name=None, + nodedir=None, bootsh="boot.sh", verbose=False, + start=True): + super(JailNode, self).__init__(session=session, objid=objid, + name=name, nodedir=nodedir, + verbose=verbose) self.bootsh = bootsh if not start: return @@ -341,8 +349,8 @@ class JailNode(SimpleJailNode): self.lock.acquire() try: super(JailNode, self).startup() - #self.privatedir("/var/run") - #self.privatedir("/var/log") + # self.privatedir("/var/run") + # self.privatedir("/var/log") finally: self.lock.release() @@ -351,7 +359,7 @@ class JailNode(SimpleJailNode): return self.lock.acquire() # services are instead stopped when session enters datacollect state - #self.session.services.stopnodeservices(self) + # self.session.services.stopnodeservices(self) try: super(JailNode, self).shutdown() finally: @@ -362,7 +370,7 @@ class JailNode(SimpleJailNode): if path[0] != "/": raise ValueError, "path not fully qualified: " + path hostpath = os.path.join(self.nodedir, - os.path.normpath(path).strip('/').replace('/', '.')) + os.path.normpath(path).strip("/").replace("/", ".")) try: os.mkdir(hostpath) except OSError: @@ -371,9 +379,9 @@ class JailNode(SimpleJailNode): raise Exception, e self.mount(hostpath, path) - def opennodefile(self, filename, mode = "w"): + def opennodefile(self, filename, mode="w"): dirname, basename = os.path.split(filename) - #self.addsymlink(path=dirname, file=basename) + # self.addsymlink(path=dirname, file=basename) if not basename: raise ValueError, "no basename for filename: " + filename if dirname and dirname[0] == "/": @@ -381,14 +389,13 @@ class JailNode(SimpleJailNode): dirname = dirname.replace("/", ".") dirname = os.path.join(self.nodedir, dirname) if not os.path.isdir(dirname): - os.makedirs(dirname, mode = 0755) + os.makedirs(dirname, mode=0755) hostfilename = os.path.join(dirname, basename) return open(hostfilename, mode) - def nodefile(self, filename, contents, mode = 0644): + def nodefile(self, filename, contents, mode=0644): f = self.opennodefile(filename, "w") f.write(contents) os.chmod(f.name, mode) f.close() - self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) - + self.info("created nodefile: %s; mode: 0%o" % (f.name, mode)) diff --git a/daemon/core/conf.py b/daemon/core/conf.py index 08967729..3ccdb75d 100644 --- a/daemon/core/conf.py +++ b/daemon/core/conf.py @@ -1,81 +1,112 @@ -# -# CORE -# Copyright (c)2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Jeff Ahrenholz -# -''' -conf.py: common support for configurable objects -''' +""" +Common support for configurable CORE objects. +""" + import string -from core.api import coreapi + +from core.data import ConfigData +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.misc import log + +logger = log.get_logger(__name__) + class ConfigurableManager(object): - ''' A generic class for managing Configurables. This class can register - with a session to receive Config Messages for setting some parameters - for itself or for the Configurables that it manages. - ''' + """ + A generic class for managing Configurables. This class can register + with a session to receive Config Messages for setting some parameters + for itself or for the Configurables that it manages. + """ # name corresponds to configuration object field - _name = "" - # type corresponds with register message types - _type = None - - def __init__(self, session=None): - self.session = session - self.session.addconfobj(self._name, self._type, self.configure) - # Configurable key=values, indexed by node number + name = "" + + # type corresponds with register message types + config_type = None + + def __init__(self): + """ + Creates a ConfigurableManager instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + # configurable key=values, indexed by node number self.configs = {} - - def configure(self, session, msg): - ''' Handle configure messages. The configuration message sent to a - ConfigurableManager usually is used to: - 1. Request a list of Configurables (request flag) - 2. Reset manager and clear configs (reset flag) - 3. Send values that configure the manager or one of its - Configurables - - Returns any reply messages. - ''' - objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) - conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE) - if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST: - return self.configure_request(msg) - elif conftype == coreapi.CONF_TYPE_FLAGS_RESET: - if objname == "all" or objname == self._name: - return self.configure_reset(msg) + # TODO: fix the need for this and isolate to the mobility class that wants it + self._modelclsmap = {} + + def configure(self, session, config_data): + """ + Handle configure messages. The configuration message sent to a + ConfigurableManager usually is used to: + 1. Request a list of Configurables (request flag) + 2. Reset manager and clear configs (reset flag) + 3. Send values that configure the manager or one of its + Configurables + + Returns any reply messages. + + :param core.session.Session session: CORE session object + :param ConfigData config_data: configuration data for carrying out a configuration + :return: response messages + """ + + if config_data.type == ConfigFlags.REQUEST.value: + return self.configure_request(config_data) + elif config_data.type == ConfigFlags.RESET.value: + return self.configure_reset(config_data) else: - return self.configure_values(msg, - msg.gettlv(coreapi.CORE_TLV_CONF_VALUES)) + return self.configure_values(config_data) - def configure_request(self, msg): - ''' Request configuration data. - ''' + def configure_request(self, config_data): + """ + Request configuration data. + + :param ConfigData config_data: configuration data for carrying out a configuration + :return: nothing + """ return None - def configure_reset(self, msg): - ''' By default, resets this manager to clear configs. - ''' + def configure_reset(self, config_data): + """ + By default, resets this manager to clear configs. + + :param ConfigData config_data: configuration data for carrying out a configuration + :return: reset response messages, or None + """ return self.reset() - - def configure_values(self, msg, values): - ''' Values have been sent to this manager. - ''' + + def configure_values(self, config_data): + """ + Values have been sent to this manager. + + :param ConfigData config_data: configuration data for carrying out a configuration + :return: nothing + """ return None - - def configure_values_keyvalues(self, msg, values, target, keys): - ''' Helper that can be used for configure_values for parsing in - 'key=value' strings from a values field. The key name must be - in the keys list, and target.key=value is set. - ''' + + def configure_values_keyvalues(self, config_data, target, keys): + """ + Helper that can be used for configure_values for parsing in + 'key=value' strings from a values field. The key name must be + in the keys list, and target.key=value is set. + + :param ConfigData config_data: configuration data for carrying out a configuration + :param target: target to set attribute values on + :param keys: list of keys to verify validity + :return: nothing + """ + values = config_data.data_values + if values is None: return None + kvs = values.split('|') for kv in kvs: try: - # key=value - (key, value) = kv.split('=', 1) + key, value = kv.split('=', 1) if value is not None and not value.strip(): value = None except ValueError: @@ -83,25 +114,38 @@ class ConfigurableManager(object): key = keys[kvs.index(kv)] value = kv if key not in keys: - raise ValueError, "invalid key: %s" % key + raise ValueError("invalid key: %s" % key) if value is not None: setattr(target, key, value) + return None def reset(self): + """ + Reset functionality for the configurable class. + + :return: nothing + """ return None - + def setconfig(self, nodenum, conftype, values): - ''' add configuration values for a node to a dictionary; values are - usually received from a Configuration Message, and may refer to a - node for which no object exists yet - ''' + """ + Add configuration values for a node to a dictionary; values are + usually received from a Configuration Message, and may refer to a + node for which no object exists yet + + :param int nodenum: node id + :param conftype: configuration types + :param values: configuration values + :return: nothing + """ + logger.info("setting config for node(%s): %s - %s", nodenum, conftype, values) conflist = [] if nodenum in self.configs: oldlist = self.configs[nodenum] found = False - for (t, v) in oldlist: - if (t == conftype): + for t, v in oldlist: + if t == conftype: # replace existing config found = True conflist.append((conftype, values)) @@ -114,34 +158,52 @@ class ConfigurableManager(object): self.configs[nodenum] = conflist def getconfig(self, nodenum, conftype, defaultvalues): - ''' get configuration values for a node; if the values don't exist in - our dictionary then return the default values supplied - ''' + """ + Get configuration values for a node; if the values don't exist in + our dictionary then return the default values supplied + + :param int nodenum: node id + :param conftype: configuration type + :param defaultvalues: default values + :return: configuration type and default values + :type: tuple + """ + logger.info("getting config for node(%s): %s - default(%s)", + nodenum, conftype, defaultvalues) if nodenum in self.configs: # return configured values conflist = self.configs[nodenum] - for (t, v) in conflist: - if (conftype is None) or (t == conftype): - return (t, v) + for t, v in conflist: + if conftype is None or t == conftype: + return t, v # return default values provided (may be None) - return (conftype, defaultvalues) - + return conftype, defaultvalues + def getallconfigs(self, use_clsmap=True): - ''' Return (nodenum, conftype, values) tuples for all stored configs. + """ + Return (nodenum, conftype, values) tuples for all stored configs. Used when reconnecting to a session. - ''' + + :param bool use_clsmap: should a class map be used, default to True + :return: list of all configurations + :rtype: list + """ r = [] for nodenum in self.configs: - for (t, v) in self.configs[nodenum]: + for t, v in self.configs[nodenum]: if use_clsmap: t = self._modelclsmap[t] - r.append( (nodenum, t, v) ) + r.append((nodenum, t, v)) return r def clearconfig(self, nodenum): - ''' remove configuration values for the specified node; - when nodenum is None, remove all configuration values - ''' + """ + remove configuration values for the specified node; + when nodenum is None, remove all configuration values + + :param int nodenum: node id + :return: nothing + """ if nodenum is None: self.configs = {} return @@ -149,10 +211,16 @@ class ConfigurableManager(object): self.configs.pop(nodenum) def setconfig_keyvalues(self, nodenum, conftype, keyvalues): - ''' keyvalues list of tuples - ''' + """ + Key values list of tuples for a node. + + :param int nodenum: node id + :param conftype: configuration type + :param keyvalues: key valyes + :return: nothing + """ if conftype not in self._modelclsmap: - self.warn("Unknown model type '%s'" % (conftype)) + logger.warn("Unknown model type '%s'" % conftype) return model = self._modelclsmap[conftype] keys = model.getnames() @@ -160,19 +228,24 @@ class ConfigurableManager(object): values = list(model.getdefaultvalues()) for key, value in keyvalues: if key not in keys: - self.warn("Skipping unknown configuration key for %s: '%s'" % \ - (conftype, key)) + logger.warn("Skipping unknown configuration key for %s: '%s'" % \ + (conftype, key)) continue i = keys.index(key) values[i] = value self.setconfig(nodenum, conftype, values) def getmodels(self, n): - ''' Return a list of model classes and values for a net if one has been + """ + Return a list of model classes and values for a net if one has been configured. This is invoked when exporting a session to XML. This assumes self.configs contains an iterable of (model-names, values) and a self._modelclsmapdict exists. - ''' + + :param n: network node to get models for + :return: list of model and values tuples for the network node + :rtype: list + """ r = [] if n.objid in self.configs: v = self.configs[n.objid] @@ -183,92 +256,119 @@ class ConfigurableManager(object): return r - def info(self, msg): - self.session.info(msg) - - def warn(self, msg): - self.session.warn(msg) - - class Configurable(object): - ''' A generic class for managing configuration parameters. - Parameters are sent via Configuration Messages, which allow the GUI - to build dynamic dialogs depending on what is being configured. - ''' - _name = "" + """ + A generic class for managing configuration parameters. + Parameters are sent via Configuration Messages, which allow the GUI + to build dynamic dialogs depending on what is being configured. + """ + name = "" # Configuration items: # ('name', 'type', 'default', 'possible-value-list', 'caption') - _confmatrix = [] - _confgroups = None - _bitmap = None - - def __init__(self, session=None, objid=None): + config_matrix = [] + config_groups = None + bitmap = None + + def __init__(self, session=None, object_id=None): + """ + Creates a Configurable instance. + + :param core.session.Session session: session for this configurable + :param object_id: + """ self.session = session - self.objid = objid - + self.object_id = object_id + def reset(self): + """ + Reset method. + + :return: nothing + """ pass - + def register(self): + """ + Register method. + + :return: nothing + """ pass - + @classmethod def getdefaultvalues(cls): - return tuple( map(lambda x: x[2], cls._confmatrix) ) - + """ + Retrieve default values from configuration matrix. + + :return: tuple of default values + :rtype: tuple + """ + # TODO: why the need for a tuple? + return tuple(map(lambda x: x[2], cls.config_matrix)) + @classmethod def getnames(cls): - return tuple( map( lambda x: x[0], cls._confmatrix) ) + """ + Retrieve name values from configuration matrix. + + :return: tuple of name values + :rtype: tuple + """ + # TODO: why the need for a tuple? + return tuple(map(lambda x: x[0], cls.config_matrix)) @classmethod - def configure(cls, mgr, msg): - ''' Handle configuration messages for this object. - ''' - reply = None - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) - objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) - conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE) - - ifacenum = msg.gettlv(coreapi.CORE_TLV_CONF_IFNUM) - if ifacenum is not None: - nodenum = nodenum*1000 + ifacenum + def configure(cls, manager, config_data): + """ + Handle configuration messages for this object. - if mgr.verbose: - mgr.info("received configure message for %s nodenum:%s" % (cls._name, str(nodenum))) - if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST: - if mgr.verbose: - mgr.info("replying to configure request for %s model" % - cls._name) + :param ConfigurableManager manager: configuration manager + :param config_data: configuration data + :return: configuration data object + :rtype: ConfigData + """ + reply = None + node_id = config_data.node + object_name = config_data.object + config_type = config_data.type + interface_id = config_data.interface_number + values_str = config_data.data_values + + if interface_id is not None: + node_id = node_id * 1000 + interface_id + + logger.info("received configure message for %s nodenum:%s", cls.name, str(node_id)) + if config_type == ConfigFlags.REQUEST.value: + logger.info("replying to configure request for %s model", cls.name) # when object name is "all", the reply to this request may be None # if this node has not been configured for this model; otherwise we # reply with the defaults for this model - if objname == "all": + if object_name == "all": defaults = None - typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE + typeflags = ConfigFlags.UPDATE.value else: defaults = cls.getdefaultvalues() - typeflags = coreapi.CONF_TYPE_FLAGS_NONE - values = mgr.getconfig(nodenum, cls._name, defaults)[1] + typeflags = ConfigFlags.coreapi.CONF_TYPE_FLAGS_NONE + values = manager.getconfig(node_id, cls.name, defaults)[1] if values is None: # node has no active config for this model (don't send defaults) return None # reply with config options - reply = cls.toconfmsg(0, nodenum, typeflags, values) - elif conftype == coreapi.CONF_TYPE_FLAGS_RESET: - if objname == "all": - mgr.clearconfig(nodenum) - #elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE: + reply = cls.config_data(0, node_id, typeflags, values) + elif config_type == ConfigFlags.RESET.value: + if object_name == "all": + manager.clearconfig(node_id) + # elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE: else: # store the configuration values for later use, when the node # object has been created - if objname is None: - mgr.info("no configuration object for node %s" % nodenum) + if object_name is None: + logger.info("no configuration object for node %s", node_id) return None - values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES) defaults = cls.getdefaultvalues() if values_str is None: # use default or preconfigured values - values = mgr.getconfig(nodenum, cls._name, defaults)[1] + values = manager.getconfig(node_id, cls.name, defaults)[1] else: # use new values supplied from the conf message values = values_str.split('|') @@ -282,61 +382,69 @@ class Configurable(object): try: new_values[keys.index(key)] = value except ValueError: - mgr.info("warning: ignoring invalid key '%s'" % key) + logger.info("warning: ignoring invalid key '%s'" % key) values = new_values - mgr.setconfig(nodenum, objname, values) + manager.setconfig(node_id, object_name, values) + return reply @classmethod - def toconfmsg(cls, flags, nodenum, typeflags, values): - ''' Convert this class to a Config API message. Some TLVs are defined - by the class, but node number, conf type flags, and values must - be passed in. - ''' + def config_data(cls, flags, node_id, type_flags, values): + """ + Convert this class to a Config API message. Some TLVs are defined + by the class, but node number, conf type flags, and values must + be passed in. + + :param flags: message flags + :param int node_id: node id + :param type_flags: type flags + :param values: values + :return: configuration data object + :rtype: ConfigData + """ keys = cls.getnames() - keyvalues = map(lambda a,b: "%s=%s" % (a,b), keys, values) + keyvalues = map(lambda a, b: "%s=%s" % (a, b), keys, values) values_str = string.join(keyvalues, '|') - tlvdata = "" - if nodenum is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, - nodenum) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, - cls._name) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, - typeflags) - datatypes = tuple( map(lambda x: x[1], cls._confmatrix) ) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES, - datatypes) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES, - values_str) - captions = reduce( lambda a,b: a + '|' + b, \ - map(lambda x: x[4], cls._confmatrix)) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS, - captions) - possiblevals = reduce( lambda a,b: a + '|' + b, \ - map(lambda x: x[3], cls._confmatrix)) - tlvdata += coreapi.CoreConfTlv.pack( - coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals) - if cls._bitmap is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP, - cls._bitmap) - if cls._confgroups is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS, - cls._confgroups) - msg = coreapi.CoreConfMessage.pack(flags, tlvdata) - return msg + datatypes = tuple(map(lambda x: x[1], cls.config_matrix)) + captions = reduce(lambda a, b: a + '|' + b, map(lambda x: x[4], cls.config_matrix)) + possible_valuess = reduce(lambda a, b: a + '|' + b, map(lambda x: x[3], cls.config_matrix)) + + return ConfigData( + message_type=flags, + node=node_id, + object=cls.name, + type=type_flags, + data_types=datatypes, + data_values=values_str, + captions=captions, + possible_values=possible_valuess, + bitmap=cls.bitmap, + groups=cls.config_groups + ) @staticmethod def booltooffon(value): - ''' Convenience helper turns bool into on (True) or off (False) string. - ''' + """ + Convenience helper turns bool into on (True) or off (False) string. + + :param str value: value to retrieve on/off value for + :return: on or off string + :rtype: str + """ if value == "1" or value == "true" or value == "on": return "on" else: return "off" - + @staticmethod def offontobool(value): + """ + Convenience helper for converting an on/off string to a integer. + + :param str value: on/off string + :return: on/off integer value + :rtype: int + """ if type(value) == str: if value.lower() == "on": return 1 @@ -345,36 +453,51 @@ class Configurable(object): return value @classmethod - def valueof(cls, name, values): - ''' Helper to return a value by the name defined in confmatrix. - Checks if it is boolean''' + def valueof(cls, name, values): + """ + Helper to return a value by the name defined in confmatrix. + Checks if it is boolean + + :param str name: name to get value of + :param values: values to get value from + :return: value for name + """ i = cls.getnames().index(name) - if cls._confmatrix[i][1] == coreapi.CONF_DATA_TYPE_BOOL and \ - values[i] != "": + if cls.config_matrix[i][1] == ConfigDataTypes.BOOL.value and values[i] != "": return cls.booltooffon(values[i]) else: return values[i] @staticmethod def haskeyvalues(values): - ''' Helper to check for list of key=value pairs versus a plain old - list of values. Returns True if all elements are "key=value". - ''' + """ + Helper to check for list of key=value pairs versus a plain old + list of values. Returns True if all elements are "key=value". + + :param values: items to check for key/value pairs + :return: True if all values are key/value pairs, False otherwise + :rtype: bool + """ if len(values) == 0: return False for v in values: if "=" not in v: return False return True - + def getkeyvaluelist(self): - ''' Helper to return a list of (key, value) tuples. Keys come from - self._confmatrix and values are instance attributes. - ''' - r = [] - for k in self.getnames(): - if hasattr(self, k): - r.append((k, getattr(self, k))) - return r + """ + Helper to return a list of (key, value) tuples. Keys come from + configuration matrix and values are instance attributes. + :return: tuples of key value pairs + :rtype: list + """ + key_values = [] + for name in self.getnames(): + if hasattr(self, name): + value = getattr(self, name) + key_values.append((name, value)) + + return key_values diff --git a/daemon/core/coreobj.py b/daemon/core/coreobj.py index 2bedbfa8..f82cf194 100644 --- a/daemon/core/coreobj.py +++ b/daemon/core/coreobj.py @@ -1,32 +1,50 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -coreobj.py: defines the basic objects for emulation: the PyCoreObj base class, -along with PyCoreNode, PyCoreNet, and PyCoreNetIf -''' -import sys, threading, os, shutil +""" +Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode, +PyCoreNet, and PyCoreNetIf. +""" + +import os +import shutil +import socket +import threading +from socket import AF_INET +from socket import AF_INET6 from core.api import coreapi -from core.misc.ipaddr import * +from core.data import NodeData, LinkData +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.misc import ipaddress + class Position(object): - ''' Helper class for Cartesian coordinate position - ''' - def __init__(self, x = None, y = None, z = None): - self.x = None - self.y = None - self.z = None - self.set(x, y, z) + """ + Helper class for Cartesian coordinate position + """ - def set(self, x = None, y = None, z = None): - ''' Returns True if the position has actually changed. - ''' + def __init__(self, x=None, y=None, z=None): + """ + Creates a Position instance. + + :param x: x position + :param y: y position + :param z: z position + :return: + """ + self.x = x + self.y = y + self.z = z + + def set(self, x=None, y=None, z=None): + """ + Returns True if the position has actually changed. + + :param x: x position + :param y: y position + :param z: z position + :return: True if position changed, False otherwise + :rtype: bool + """ if self.x == x and self.y == y and self.z == z: return False self.x = x @@ -35,20 +53,36 @@ class Position(object): return True def get(self): - ''' Fetch the (x,y,z) position tuple. - ''' - return (self.x, self.y, self.z) + """ + Retrieve x,y,z position. + + :return: x,y,z position tuple + :rtype: tuple + """ + return self.x, self.y, self.z + class PyCoreObj(object): - ''' Base class for pycore objects (nodes and nets) - ''' + """ + Base class for CORE objects (nodes and networks) + """ apitype = None - def __init__(self, session, objid = None, name = None, verbose = False, - start = True): + # TODO: appears start has no usage, verify and remove + def __init__(self, session, objid=None, name=None, start=True): + """ + Creates a PyCoreObj instance. + + :param core.session.Session session: CORE session object + :param int objid: object id + :param str name: object name + :param bool start: start value + :return: + """ + self.session = session if objid is None: - objid = session.getobjid() + objid = session.get_object_id() self.objid = objid if name is None: name = "o%s" % self.objid @@ -59,286 +93,414 @@ class PyCoreObj(object): self.canvas = None self.icon = None self.opaque = None - self.verbose = verbose self.position = Position() def startup(self): - ''' Each object implements its own startup method. - ''' + """ + Each object implements its own startup method. + + :return: nothing + """ raise NotImplementedError def shutdown(self): - ''' Each object implements its own shutdown method. - ''' + """ + Each object implements its own shutdown method. + + :return: nothing + """ raise NotImplementedError - def setposition(self, x = None, y = None, z = None): - ''' Set the (x,y,z) position of the object. - ''' - return self.position.set(x = x, y = y, z = z) + def setposition(self, x=None, y=None, z=None): + """ + Set the (x,y,z) position of the object. + + :param x: x position + :param y: y position + :param z: z position + :return: True if position changed, False otherwise + :rtype: bool + """ + return self.position.set(x=x, y=y, z=z) def getposition(self): - ''' Return an (x,y,z) tuple representing this object's position. - ''' + """ + Return an (x,y,z) tuple representing this object's position. + + :return: x,y,z position tuple + :rtype: tuple + """ return self.position.get() def ifname(self, ifindex): - return self.netif(ifindex).name + """ + Retrieve interface name for index. + + :param int ifindex: interface index + :return: interface name + :rtype: str + """ + return self._netif[ifindex].name def netifs(self, sort=False): - ''' Iterate over attached network interfaces. - ''' + """ + Retrieve network interfaces, sorted if desired. + + :param bool sort: boolean used to determine if interfaces should be sorted + :return: network interfaces + :rtype: list + """ if sort: return map(lambda k: self._netif[k], sorted(self._netif.keys())) else: return self._netif.itervalues() def numnetif(self): - ''' Return the attached interface count. - ''' + """ + Return the attached interface count. + + :return: number of network interfaces + :rtype: int + """ return len(self._netif) - + def getifindex(self, netif): + """ + Retrieve index for an interface. + + :param PyCoreNetIf netif: interface to get index for + :return: interface index if found, -1 otherwise + :rtype: int + """ + for ifindex in self._netif: if self._netif[ifindex] is netif: return ifindex + return -1 def newifindex(self): + """ + Create a new interface index. + + :return: interface index + :rtype: int + """ while self.ifindex in self._netif: self.ifindex += 1 ifindex = self.ifindex self.ifindex += 1 return ifindex - def tonodemsg(self, flags): - ''' Build a CORE API Node Message for this object. Both nodes and - networks can be represented by a Node Message. - ''' + def data(self, message_type): + """ + Build a data object for this node. + + :param message_type: purpose for the data object we are creating + :return: node data object + :rtype: core.data.NodeData + """ if self.apitype is None: return None - tlvdata = "" - (x, y, z) = self.getposition() - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER, - self.objid) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_TYPE, - self.apitype) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NAME, - self.name) - if hasattr(self, "type") and self.type is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_MODEL, - self.type) - if hasattr(self, "server") and self.server is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUSRV, - self.server) + + x, y, z = self.getposition() + + model = None + if hasattr(self, "type"): + model = self.type + + emulation_server = None + if hasattr(self, "server"): + emulation_server = self.server + + services = None if hasattr(self, "services") and len(self.services) != 0: nodeservices = [] for s in self.services: - nodeservices.append(s._name) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_SERVICES, - "|".join(nodeservices)) + nodeservices.append(s._name) + services = "|".join(nodeservices) + node_data = NodeData( + message_type=message_type, + id=self.objid, + node_type=self.apitype, + name=self.name, + emulation_id=self.objid, + canvas=self.canvas, + icon=self.icon, + opaque=self.opaque, + x_position=x, + y_position=y, + model=model, + emulation_server=emulation_server, + services=services + ) - if x is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_XPOS, x) - if y is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_YPOS, y) - if self.canvas is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_CANVAS, - self.canvas) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID, - self.objid) - if self.icon is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_ICON, - self.icon) - if self.opaque is not None: - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_OPAQUE, - self.opaque) - msg = coreapi.CoreNodeMessage.pack(flags, tlvdata) - return msg + return node_data - def tolinkmsgs(self, flags): - ''' Build CORE API Link Messages for this object. There is no default - method for PyCoreObjs as PyCoreNodes do not implement this but - PyCoreNets do. - ''' + def all_link_data(self, flags): + """ + Build CORE Link data for this object. There is no default + method for PyCoreObjs as PyCoreNodes do not implement this but + PyCoreNets do. + + :param flags: message flags + :return: list of link data + :rtype: link + """ return [] - - def info(self, msg): - ''' Utility method for printing informational messages when verbose - is turned on. - ''' - if self.verbose: - print "%s: %s" % (self.name, msg) - sys.stdout.flush() - - def warn(self, msg): - ''' Utility method for printing warning/error messages - ''' - print >> sys.stderr, "%s: %s" % (self.name, msg) - sys.stderr.flush() - - def exception(self, level, source, text): - ''' Generate an Exception Message for this session, providing this - object number. - ''' - if self.session: - id = None - if isinstance(self.objid, int): - id = self.objid - elif isinstance(self.objid, str) and self.objid.isdigit(): - id = int(self.objid) - self.session.exception(level, source, id, text) class PyCoreNode(PyCoreObj): - ''' Base class for nodes - ''' - def __init__(self, session, objid = None, name = None, verbose = False, - start = True): - ''' Initialization for node objects. - ''' - PyCoreObj.__init__(self, session, objid, name, verbose=verbose, - start=start) + """ + Base class for CORE nodes. + """ + + # TODO: start seems like it should go away + def __init__(self, session, objid=None, name=None, start=True): + """ + Create a PyCoreNode instance. + + :param core.session.Session session: CORE session object + :param int objid: object id + :param str name: object name + :param bool start: boolean for starting + """ + PyCoreObj.__init__(self, session, objid, name, start=start) self.services = [] if not hasattr(self, "type"): self.type = None self.nodedir = None + self.tmpnodedir = False + # TODO: getter method that should not be needed def nodeid(self): + """ + Retrieve node id. + + :return: node id + :rtype: int + """ return self.objid - - def addservice(self, service): + + def addservice(self, service): + """ + Add a services to the service list. + + :param core.service.CoreService service: service to add + :return: nothing + """ if service is not None: self.services.append(service) def makenodedir(self): + """ + Create the node directory. + + :return: nothing + """ if self.nodedir is None: - self.nodedir = \ - os.path.join(self.session.sessiondir, self.name + ".conf") + self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf") os.makedirs(self.nodedir) self.tmpnodedir = True else: self.tmpnodedir = False - + def rmnodedir(self): - if hasattr(self.session.options, 'preservedir'): - if self.session.options.preservedir == '1': - return + """ + Remove the node directory, unless preserve directory has been set. + + :return: nothing + """ + preserve = getattr(self.session.options, "preservedir", None) + if preserve == "1": + return + if self.tmpnodedir: - shutil.rmtree(self.nodedir, ignore_errors = True) + shutil.rmtree(self.nodedir, ignore_errors=True) def addnetif(self, netif, ifindex): + """ + Add network interface to node and set the network interface index if successful. + + :param PyCoreNetIf netif: network interface to add + :param int ifindex: interface index + :return: nothing + """ if ifindex in self._netif: - raise ValueError, "ifindex %s already exists" % ifindex + raise ValueError("ifindex %s already exists" % ifindex) self._netif[ifindex] = netif + # TODO: this hould have probably been set ahead, seems bad to me, check for failure and fix netif.netindex = ifindex def delnetif(self, ifindex): + """ + Delete a network interface + + :param int ifindex: interface index to delete + :return: nothing + """ if ifindex not in self._netif: - raise ValueError, "ifindex %s does not exist" % ifindex + raise ValueError("ifindex %s does not exist" % ifindex) netif = self._netif.pop(ifindex) netif.shutdown() del netif - def netif(self, ifindex, net = None): + # TODO: net parameter is not used, remove + def netif(self, ifindex, net=None): + """ + Retrieve network interface. + + :param int ifindex: index of interface to retrieve + :param PyCoreNetIf net: network node + :return: network interface, or None if not found + :rtype: PyCoreNetIf + """ if ifindex in self._netif: return self._netif[ifindex] else: return None - + def attachnet(self, ifindex, net): + """ + Attach a network. + + :param int ifindex: interface of index to attach + :param PyCoreNetIf net: network to attach + :return: + """ if ifindex not in self._netif: - raise ValueError, "ifindex %s does not exist" % ifindex + raise ValueError("ifindex %s does not exist" % ifindex) self._netif[ifindex].attachnet(net) def detachnet(self, ifindex): + """ + Detach network interface. + + :param int ifindex: interface index to detach + :return: nothing + """ if ifindex not in self._netif: - raise ValueError, "ifindex %s does not exist" % ifindex + raise ValueError("ifindex %s does not exist" % ifindex) self._netif[ifindex].detachnet() - def setposition(self, x = None, y = None, z = None): - changed = PyCoreObj.setposition(self, x = x, y = y, z = z) - if not changed: - # save extra interface range calculations - return - for netif in self.netifs(sort=True): - netif.setposition(x, y, z) + def setposition(self, x=None, y=None, z=None): + """ + Set position. + + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + changed = super(PyCoreNode, self).setposition(x, y, z) + if changed: + for netif in self.netifs(sort=True): + netif.setposition(x, y, z) def commonnets(self, obj, want_ctrl=False): - ''' Given another node or net object, return common networks between - this node and that object. A list of tuples is returned, with each tuple - consisting of (network, interface1, interface2). - ''' - r = [] + """ + Given another node or net object, return common networks between + this node and that object. A list of tuples is returned, with each tuple + consisting of (network, interface1, interface2). + + :param obj: object to get common network with + :param want_ctrl: flag set to determine if control network are wanted + :return: tuples of common networks + :rtype: list + """ + common = [] for netif1 in self.netifs(): - if not want_ctrl and hasattr(netif1, 'control'): + if not want_ctrl and hasattr(netif1, "control"): continue for netif2 in obj.netifs(): if netif1.net == netif2.net: - r += (netif1.net, netif1, netif2), - return r + common.append((netif1.net, netif1, netif2)) + return common class PyCoreNet(PyCoreObj): - ''' Base class for networks - ''' - linktype = coreapi.CORE_LINK_WIRED + """ + Base class for networks + """ + linktype = LinkTypes.WIRED.value - def __init__(self, session, objid, name, verbose = False, start = True): - ''' Initialization for network objects. - ''' - PyCoreObj.__init__(self, session, objid, name, verbose=verbose, - start=start) + # TODO: remove start if appropriate + def __init__(self, session, objid, name, start=True): + """ + Create a PyCoreNet instance. + + :param core.session.Session session: CORE session object + :param int objid: object id + :param str name: object name + :param bool start: should object start + """ + PyCoreObj.__init__(self, session, objid, name, start=start) self._linked = {} self._linked_lock = threading.Lock() def attach(self, netif): + """ + Attach network interface. + + :param PyCoreNetIf netif: network interface to attach + :return: nothing + """ i = self.newifindex() self._netif[i] = netif netif.netifi = i with self._linked_lock: self._linked[netif] = {} - + def detach(self, netif): + """ + Detach network interface. + + :param PyCoreNetIf netif: network interface to detach + :return: nothing + """ del self._netif[netif.netifi] netif.netifi = None with self._linked_lock: del self._linked[netif] + # TODO: needs to be abstracted out, seems like it may be ok to remove def netifparamstolink(self, netif): - ''' Helper for tolinkmsgs() to build TLVs having link parameters - from interface parameters. - ''' + """ + Helper for tolinkmsgs() to build TLVs having link parameters from interface parameters. + + :param PyCoreNetIf netif: network interface to retrieve params from + :return: tlv data + """ + + delay = netif.getparam("delay") + bw = netif.getparam("bw") + loss = netif.getparam("loss") + duplicate = netif.getparam("duplicate") + jitter = netif.getparam("jitter") + tlvdata = "" - delay = netif.getparam('delay') - bw = netif.getparam('bw') - loss = netif.getparam('loss') - duplicate = netif.getparam('duplicate') - jitter = netif.getparam('jitter') if delay is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DELAY, - delay) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DELAY.value, delay) if bw is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_BW, bw) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.BANDWIDTH.value, bw) if loss is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_PER, - str(loss)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.PER.value, str(loss)) if duplicate is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_DUP, - str(duplicate)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DUP.value, str(duplicate)) if jitter is not None: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_JITTER, - jitter) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.JITTER.value, jitter) + return tlvdata - - def tolinkmsgs(self, flags): - ''' Build CORE API Link Messages for this network. Each link message - describes a link between this network and a node. - ''' - msgs = [] + def all_link_data(self, flags): + """ + Build link data objects for this network. Each link object describes a link + between this network and a node. + """ + all_links = [] + # build a link message from this network node to each node having a # connected interface for netif in self.netifs(sort=True): @@ -358,62 +520,86 @@ class PyCoreNet(PyCoreObj): netif.swapparams('_params_up') if netif.getparams() != upstream_params: uni = True - - tlvdata = "" - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - self.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, - otherobj.objid) - tlvdata += self.netifparamstolink(netif) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE, - self.linktype) + + unidirectional = 0 if uni: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, - 1) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, - otherobj.getifindex(netif)) - if netif.hwaddr: - tlvdata += \ - coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2MAC, - netif.hwaddr) - for addr in netif.addrlist: - (ip, sep, mask) = addr.partition('/') + unidirectional = 1 + + interface2_ip4 = None + interface2_ip4_mask = None + interface2_ip6 = None + interface2_ip6_mask = None + for address in netif.addrlist: + ip, sep, mask = address.partition('/') mask = int(mask) - if isIPv4Address(ip): + if ipaddress.is_ipv4_address(ip): family = AF_INET - tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4 - tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK + ipl = socket.inet_pton(family, ip) + interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip4_mask = mask else: family = AF_INET6 - tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6 - tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK - ipl = socket.inet_pton(family, ip) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, \ - IPAddr(af=family, addr=ipl)) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask) + ipl = socket.inet_pton(family, ip) + interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip6_mask = mask + + # TODO: not currently used + # loss = netif.getparam('loss') + link_data = LinkData( + message_type=flags, + node1_id=self.objid, + node2_id=otherobj.objid, + link_type=self.linktype, + unidirectional=unidirectional, + interface2_id=otherobj.getifindex(netif), + interface2_mac=netif.hwaddr, + interface2_ip4=interface2_ip4, + interface2_ip4_mask=interface2_ip4_mask, + interface2_ip6=interface2_ip6, + interface2_ip6_mask=interface2_ip6_mask, + delay=netif.getparam("delay"), + bandwidth=netif.getparam("bw"), + dup=netif.getparam("duplicate"), + jitter=netif.getparam("jitter") + ) + + all_links.append(link_data) - msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) - msgs.append(msg) if not uni: continue - # build a 2nd link message for any upstream link parameters - tlvdata = "" - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - otherobj.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, - self.objid) + netif.swapparams('_params_up') - tlvdata += self.netifparamstolink(netif) + link_data = LinkData( + message_type=0, + node1_id=otherobj.objid, + node2_id=self.objid, + unidirectional=1, + delay=netif.getparam("delay"), + bandwidth=netif.getparam("bw"), + dup=netif.getparam("duplicate"), + jitter=netif.getparam("jitter") + ) netif.swapparams('_params_up') - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, 1) - msg = coreapi.CoreLinkMessage.pack(0, tlvdata) - msgs.append(msg) - return msgs + + all_links.append(link_data) + + return all_links + class PyCoreNetIf(object): - ''' Base class for interfaces. - ''' + """ + Base class for network interfaces. + """ + def __init__(self, node, name, mtu): + """ + Creates a PyCoreNetIf instance. + + :param node: node for interface + :param str name: interface name + :param mtu: mtu value + """ + self.node = node self.name = name if not isinstance(mtu, (int, long)): @@ -428,67 +614,119 @@ class PyCoreNetIf(object): self.transport_type = None # interface index on the network self.netindex = None + # index used to find flow data + self.flow_id = None def startup(self): + """ + Startup method for the interface. + + :return: nothing + """ pass def shutdown(self): + """ + Shutdown method for the interface. + + :return: nothing + """ pass - + def attachnet(self, net): + """ + Attach network. + + :param PyCoreNet net: network to attach to + :return:nothing + """ if self.net: self.detachnet() self.net = None + net.attach(self) self.net = net def detachnet(self): + """ + Detach from a network. + + :return: nothing + """ if self.net is not None: self.net.detach(self) def addaddr(self, addr): + """ + Add address. + + :param str addr: address to add + :return: nothing + """ + self.addrlist.append(addr) def deladdr(self, addr): + """ + Delete address. + + :param str addr: address to delete + :return: nothing + """ self.addrlist.remove(addr) def sethwaddr(self, addr): + """ + Set hardware address. + + :param core.misc.ipaddress.MacAddress addr: hardware address to set to. + :return: nothing + """ self.hwaddr = addr def getparam(self, key): - ''' Retrieve a parameter from the _params dict, - or None if the parameter does not exist. - ''' - if key not in self._params: - return None - return self._params[key] - + """ + Retrieve a parameter from the, or None if the parameter does not exist. + + :param key: parameter to get value for + :return: parameter value + """ + return self._params.get(key) + def getparams(self): - ''' Return (key, value) pairs from the _params dict. - ''' - r = [] + """ + Return (key, value) pairs for parameters. + """ + parameters = [] for k in sorted(self._params.keys()): - r.append((k, self._params[k])) - return r - + parameters.append((k, self._params[k])) + return parameters + def setparam(self, key, value): - ''' Set a parameter in the _params dict. - Returns True if the parameter has changed. - ''' - if key in self._params: - if self._params[key] == value: - return False - elif self._params[key] <= 0 and value <= 0: - # treat None and 0 as unchanged values - return False + """ + Set a parameter value, returns True if the parameter has changed. + + :param key: parameter name to set + :param value: parameter value + :return: True if parameter changed, False otherwise + """ + # treat None and 0 as unchanged values + current_value = self._params.get(key) + if current_value == value or current_value <= 0 and value <= 0: + return False + self._params[key] = value return True - + def swapparams(self, name): - ''' Swap out the _params dict for name. If name does not exist, + """ + Swap out parameters dict for name. If name does not exist, intialize it. This is for supporting separate upstream/downstream parameters when two layer-2 nodes are linked together. - ''' + + :param str name: name of parameter to swap + :return: nothing + """ tmp = self._params if not hasattr(self, name): setattr(self, name, {}) @@ -496,8 +734,13 @@ class PyCoreNetIf(object): setattr(self, name, tmp) def setposition(self, x, y, z): - ''' Dispatch to any position hook (self.poshook) handler. - ''' + """ + Dispatch position hook handler. + + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ if self.poshook is not None: self.poshook(self, x, y, z) - diff --git a/daemon/core/coreserver.py b/daemon/core/coreserver.py index 4d54dc3e..8620945d 100644 --- a/daemon/core/coreserver.py +++ b/daemon/core/coreserver.py @@ -1,1717 +1,377 @@ -#!/usr/bin/env python -# -# CORE -# Copyright (c)2010-2016 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# Rod Santiago -# +""" +Defines server classes and request handlers for TCP and UDP. Also defined here is a TCP based +auxiliary server class for supporting externally defined handlers. +""" +import SocketServer +import os +import threading +import time -import SocketServer, sys, threading, time, traceback -import os, gc, shlex, shutil -from core import pycore from core.api import coreapi -from core.misc.utils import hexdump, cmdresult, mutedetach, closeonexec -from core.misc.xmlsession import opensessionxml, savesessionxml - - -''' -Defines server classes and request handlers for TCP and UDP. Also defined here is a TCP based auxiliary server class for supporting externally defined handlers. -''' +from core.enumerations import EventTypes +from core.enumerations import SessionTlvs +from core.misc import log +from core.session import Session +logger = log.get_logger(__name__) class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): - ''' TCP server class, manages sessions and spawns request handlers for - incoming connections. - ''' + """ + TCP server class, manages sessions and spawns request handlers for + incoming connections. + """ daemon_threads = True allow_reuse_address = True servers = set() - def __init__(self, server_address, RequestHandlerClass, cfg = None): - ''' Server class initialization takes configuration data and calls - the SocketServer constructor - ''' - self.cfg = cfg - self._sessions = {} - self._sessionslock = threading.Lock() - self.newserver(self) - SocketServer.TCPServer.__init__(self, server_address, - RequestHandlerClass) + def __init__(self, server_address, handler_class, config=None): + """ + Server class initialization takes configuration data and calls + the SocketServer constructor + + :param tuple[str, int] server_address: server host and port to use + :param class handler_class: request handler + :param dict config: configuration setting + :return: + """ + self.config = config + self.sessions = {} + self.udpserver = None + self.udpthread = None + self.auxserver = None + self.auxthread = None + self._sessions_lock = threading.Lock() + CoreServer.add_server(self) + SocketServer.TCPServer.__init__(self, server_address, handler_class) @classmethod - def newserver(cls, server): + def add_server(cls, server): + """ + Add a core server to the known servers set. + + :param CoreServer server: server to add + :return: nothing + """ cls.servers.add(server) @classmethod - def delserver(cls, server): - try: + def remove_server(cls, server): + """ + Remove a core server from the known servers set. + + :param CoreServer server: server to remove + :return: nothing + """ + if server in cls.servers: cls.servers.remove(server) - except KeyError: - pass def shutdown(self): - for session in self._sessions.values(): + """ + Shutdown the server, all known sessions, and remove server from known servers set. + + :return: nothing + """ + # shutdown all known sessions + for session in self.sessions.values(): session.shutdown() - if self.cfg['daemonize']: - pidfilename = self.cfg['pidfile'] + + # if we are a daemon remove pid file + if self.config["daemonize"]: + pid_file = self.config["pidfile"] try: - os.unlink(pidfilename) + os.unlink(pid_file) except OSError: - pass - self.delserver(self) + logger.exception("error daemon pid file: %s", pid_file) + + # remove server from server list + CoreServer.remove_server(self) + + def add_session(self, session): + """ + Add a session to our dictionary of sessions, ensuring a unique session number. + + :param core.session.Session session: session to add + :return: added session + :raise KeyError: when a session with the same id already exists + """ + with self._sessions_lock: + if session.session_id in self.sessions: + raise KeyError("non-unique session id %s for %s" % (session.session_id, session)) + self.sessions[session.session_id] = session - def addsession(self, session): - ''' Add a session to our dictionary of sessions, ensuring a unique - session number - ''' - self._sessionslock.acquire() - try: - if session.sessionid in self._sessions: - raise KeyError, "non-unique session id %s for %s" % \ - (session.sessionid, session) - self._sessions[session.sessionid] = session - finally: - self._sessionslock.release() return session - def delsession(self, session): - ''' Remove a session from our dictionary of sessions. - ''' - with self._sessionslock: - if session.sessionid not in self._sessions: - print "session id %s not found (sessions=%s)" % \ - (session.sessionid, self._sessions.keys()) + def remove_session(self, session): + """ + Remove a session from our dictionary of sessions. + + :param core.session.Session session: session to remove + :return: removed session + :rtype: core.session.Session + """ + with self._sessions_lock: + if session.session_id not in self.sessions: + logger.info("session id %s not found (sessions=%s)", session.session_id, self.sessions.keys()) else: - del(self._sessions[session.sessionid]) - return session - - def getsessionids(self): - ''' Return a list of active session numbers. - ''' - with self._sessionslock: - sids = self._sessions.keys() - return sids + del self.sessions[session.session_id] + + return session + + def get_session_ids(self): + """ + Return a list of active session numbers. + + :return: known session ids + :rtype: list + """ + with self._sessions_lock: + session_ids = self.sessions.keys() + + return session_ids + + def create_session(self, session_id=None): + """ + Convenience method for creating sessions with the servers config. + + :param int session_id: session id for new session + :return: create session + :rtype: core.session.Session + """ + + # create random id when necessary, seems to be 1 case wanted, based on legacy code + # creating a value so high, typical client side generation schemes hopefully wont collide + if not session_id: + session_id = next( + session_id for session_id in xrange(60000, 65000) + if session_id not in self.sessions + ) + + # create and add session to local manager + session = Session(session_id, config=self.config) + self.add_session(session) + + # add shutdown handler to remove session from manager + session.shutdown_handlers.append(self.session_shutdown) + + return session + + def get_session(self, session_id=None): + """ + Create a new session or retrieve an existing one from our + dictionary of sessions. When the session_id=0 and the use_existing + flag is set, return on of the existing sessions. + + :param int session_id: session id of session to retrieve, defaults to returning random session + :return: session + :rtype: core.session.Session + """ + + with self._sessions_lock: + # return specified session or none + if session_id: + return self.sessions.get(session_id) + + # retrieving known session + session = None + + # find runtime session with highest node count + for known_session in filter(lambda x: x.state == EventTypes.RUNTIME_STATE.value, + self.sessions.itervalues()): + if not session or known_session.get_node_count() > session.get_node_count(): + session = known_session + + # return first known session otherwise + if not session: + for known_session in self.sessions.itervalues(): + session = known_session + break - def getsession(self, sessionid = None, useexisting = True): - ''' Create a new session or retrieve an existing one from our - dictionary of sessions. When the sessionid=0 and the useexisting - flag is set, return on of the existing sessions. - ''' - if not useexisting: - session = pycore.Session(sessionid, cfg = self.cfg, server = self) - self.addsession(session) return session - with self._sessionslock: - # look for the specified session id - if sessionid in self._sessions: - session = self._sessions[sessionid] - else: - session = None - # pick an existing session - if sessionid == 0: - for s in self._sessions.itervalues(): - if s.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE: - if session is None: - session = s - elif s.node_count > session.node_count: - session = s - if session is None: - for s in self._sessions.itervalues(): - session = s - break - return session + def session_shutdown(self, session): + """ + Handler method to be used as a callback when a session has shutdown. - def tosessionmsg(self, flags = 0): - ''' Build CORE API Sessions message based on current session info. - ''' - idlist = [] - namelist = [] - filelist = [] - nclist = [] - datelist = [] - thumblist = [] + :param core.session.Session session: session shutting down + :return: nothing + """ + self.remove_session(session) + + def to_session_message(self, flags=0): + """ + Build CORE API Sessions message based on current session info. + + :param int flags: message flags + :return: session message + """ + id_list = [] + name_list = [] + file_list = [] + node_count_list = [] + date_list = [] + thumb_list = [] num_sessions = 0 - with self._sessionslock: - for sessionid in self._sessions: - session = self._sessions[sessionid] + with self._sessions_lock: + for session_id in self.sessions: + session = self.sessions[session_id] # debug: session.dumpsession() num_sessions += 1 - idlist.append(str(sessionid)) + id_list.append(str(session_id)) + name = session.name - if name is None: + if not name: name = "" - namelist.append(name) - file = session.filename - if file is None: + name_list.append(name) + + file = session.file_name + if not file: file = "" - filelist.append(file) - nc = session.node_count - if nc is None: - nc = "" - nclist.append(str(nc)) - datelist.append(time.ctime(session._time)) + file_list.append(file) + + node_count_list.append(str(session.get_node_count())) + + date_list.append(time.ctime(session._state_time)) + thumb = session.thumbnail - if thumb is None: + if not thumb: thumb = "" - thumblist.append(thumb) - sids = "|".join(idlist) - names = "|".join(namelist) - files = "|".join(filelist) - ncs = "|".join(nclist) - dates = "|".join(datelist) - thumbs = "|".join(thumblist) + thumb_list.append(thumb) + + session_ids = "|".join(id_list) + names = "|".join(name_list) + files = "|".join(file_list) + node_counts = "|".join(node_count_list) + dates = "|".join(date_list) + thumbs = "|".join(thumb_list) if num_sessions > 0: - tlvdata = "" - if len(sids) > 0: - tlvdata += coreapi.CoreSessionTlv.pack( \ - coreapi.CORE_TLV_SESS_NUMBER, sids) + tlv_data = "" + if len(session_ids) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session_ids) if len(names) > 0: - tlvdata += coreapi.CoreSessionTlv.pack( \ - coreapi.CORE_TLV_SESS_NAME, names) + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NAME.value, names) if len(files) > 0: - tlvdata += coreapi.CoreSessionTlv.pack( \ - coreapi.CORE_TLV_SESS_FILE, files) - if len(ncs) > 0: - tlvdata += coreapi.CoreSessionTlv.pack( \ - coreapi.CORE_TLV_SESS_NODECOUNT, ncs) + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.FILE.value, files) + if len(node_counts) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NODE_COUNT.value, node_counts) if len(dates) > 0: - tlvdata += coreapi.CoreSessionTlv.pack( \ - coreapi.CORE_TLV_SESS_DATE, dates) + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.DATE.value, dates) if len(thumbs) > 0: - tlvdata += coreapi.CoreSessionTlv.pack( \ - coreapi.CORE_TLV_SESS_THUMB, thumbs) - msg = coreapi.CoreSessionMessage.pack(flags, tlvdata) + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.THUMB.value, thumbs) + message = coreapi.CoreSessionMessage.pack(flags, tlv_data) else: - msg = None - return(msg) + message = None - def dumpsessions(self): - ''' Debug print all session info. - ''' - print "sessions:" - self._sessionslock.acquire() - try: - for sessionid in self._sessions: - print sessionid, - finally: - self._sessionslock.release() - print "" - sys.stdout.flush() + return message - def setsessionmaster(self, handler): - ''' Call the setmaster() method for every session. Returns True when - a session having the given handler was updated. - ''' - found = False - self._sessionslock.acquire() - try: - for sessionid in self._sessions: - found = self._sessions[sessionid].setmaster(handler) - if found is True: - break - finally: - self._sessionslock.release() - return found + def dump_sessions(self): + """ + Log currently known session information. + """ + logger.info("sessions:") + with self._sessions_lock: + for session_id in self.sessions: + logger.info(session_id) + + # def set_session_master(self, handler): + # """ + # Call the setmaster() method for every session. Returns True when + # a session having the given handler was updated. + # """ + # found = False + # + # with self._sessions_lock: + # for session_id in self.sessions: + # found = self.sessions[session_id].set_master(handler) + # if found is True: + # break + # + # return found class CoreUdpServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer): - ''' UDP server class, manages sessions and spawns request handlers for - incoming connections. - ''' + """ + UDP server class, manages sessions and spawns request handlers for + incoming connections. + """ daemon_threads = True allow_reuse_address = True - def __init__(self, server_address, RequestHandlerClass, mainserver): - ''' Server class initialization takes configuration data and calls - the SocketServer constructor - ''' - self.mainserver = mainserver - SocketServer.UDPServer.__init__(self, server_address, - RequestHandlerClass) - - def start(self): - ''' Thread target to run concurrently with the TCP server. - ''' - self.serve_forever() - + def __init__(self, server_address, handler_class, main_server): + """ + Server class initialization takes configuration data and calls + the SocketServer constructor + :param tuple[str, int] server_address: server address + :param class handler_class: class for handling requests + :param main_server: main server to associate with + """ + self.mainserver = main_server + SocketServer.UDPServer.__init__(self, server_address, handler_class) + + def start(self): + """ + Thread target to run concurrently with the TCP server. + + :return: nothing + """ + self.serve_forever() class CoreAuxServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): - ''' An auxiliary TCP server. - ''' + """ + An auxiliary TCP server. + """ daemon_threads = True allow_reuse_address = True - def __init__(self, server_address, RequestHandlerClass, mainserver): - self.mainserver = mainserver - sys.stdout.write("auxiliary server started, listening on: %s:%s\n" % server_address) - sys.stdout.flush() - SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass) + def __init__(self, server_address, handler_class, main_server): + """ + Create a CoreAuxServer instance. + + :param tuple[str, int] server_address: server address + :param class handler_class: class for handling requests + :param main_server: main server to associate with + """ + + self.mainserver = main_server + logger.info("auxiliary server started, listening on: %s", server_address) + SocketServer.TCPServer.__init__(self, server_address, handler_class) def start(self): + """ + Start the core auxiliary server. + + :return: nothing + """ self.serve_forever() - def setsessionmaster(self, handler): - return self.mainserver.setsessionmaster(handler) - - def getsession(self, sessionid = None, useexisting = True): - return self.mainserver.getsession(sessionid, useexisting) - - def tosessionmsg(self, flags = 0): - return self.mainserver.tosessionmsg(flags) - - - - - - -class CoreRequestHandler(SocketServer.BaseRequestHandler): - ''' The SocketServer class uses the RequestHandler class for servicing - requests, mainly through the handle() method. The CoreRequestHandler - has the following basic flow: - 1. Client connects and request comes in via handle(). - 2. handle() calls recvmsg() in a loop. - 3. recvmsg() does a recv() call on the socket performs basic - checks that this we received a CoreMessage, returning it. - 4. The message data is queued using queuemsg(). - 5. The handlerthread() thread pops messages from the queue and uses - handlemsg() to invoke the appropriate handler for that message type. - - ''' - - maxmsgqueuedtimes = 8 - - def __init__(self, request, client_address, server): - self.done = False - self.msghandler = { - coreapi.CORE_API_NODE_MSG: self.handlenodemsg, - coreapi.CORE_API_LINK_MSG: self.handlelinkmsg, - coreapi.CORE_API_EXEC_MSG: self.handleexecmsg, - coreapi.CORE_API_REG_MSG: self.handleregmsg, - coreapi.CORE_API_CONF_MSG: self.handleconfmsg, - coreapi.CORE_API_FILE_MSG: self.handlefilemsg, - coreapi.CORE_API_IFACE_MSG: self.handleifacemsg, - coreapi.CORE_API_EVENT_MSG: self.handleeventmsg, - coreapi.CORE_API_SESS_MSG: self.handlesessionmsg, - } - self.msgq = [] - self.msgcv = threading.Condition() - self.nodestatusreq = {} - self._shutdownlock = threading.Lock() - numthreads = int(server.cfg['numthreads']) - if numthreads < 1: - raise ValueError, \ - "invalid number of threads: %s" % numthreads - self.handlerthreads = [] - while numthreads: - t = threading.Thread(target = self.handlerthread) - self.handlerthreads.append(t) - t.start() - numthreads -= 1 - self.master = False - self.verbose = bool(server.cfg['verbose'].lower() == "true") - self.debug = bool(server.cfg['debug'].lower() == "true") - self.session = None - #self.numwlan = 0 - closeonexec(request.fileno()) - SocketServer.BaseRequestHandler.__init__(self, request, - client_address, server) - - def setup(self): - ''' Client has connected, set up a new connection. - ''' - self.info("new TCP connection: %s:%s" % self.client_address) - #self.register() - - - def finish(self): - ''' Client has disconnected, end this request handler and disconnect - from the session. Shutdown sessions that are not running. - ''' - if self.verbose: - self.info("client disconnected: notifying threads") - max_attempts = 5 - timeout = 0.0625 # wait for 1.9375s max - while len(self.msgq) > 0 and max_attempts > 0: - if self.verbose: - self.info("%d messages remain in queue (%d)" % \ - (len(self.msgq), max_attempts)) - max_attempts -= 1 - self.msgcv.acquire() - self.msgcv.notifyAll() # drain msgq before dying - self.msgcv.release() - time.sleep(timeout) # allow time for msg processing - timeout *= 2 # backoff timer - self.msgcv.acquire() - self.done = True - self.msgcv.notifyAll() - self.msgcv.release() - for t in self.handlerthreads: - if self.verbose: - self.info("waiting for thread: %s" % t.getName()) - timeout = 2.0 # seconds - t.join(timeout) - if t.isAlive(): - self.warn("joining %s failed: still alive after %s sec" % - (t.getName(), timeout)) - self.info("connection closed: %s:%s" % self.client_address) - if self.session: - self.session.disconnect(self) - return SocketServer.BaseRequestHandler.finish(self) - - - def info(self, msg): - ''' Utility method for writing output to stdout. - ''' - print msg - sys.stdout.flush() - - - def warn(self, msg): - ''' Utility method for writing output to stderr. - ''' - print >> sys.stderr, msg - sys.stderr.flush() - - def register(self): - ''' Return a Register Message - ''' - self.info("GUI has connected to session %d at %s" % \ - (self.session.sessionid, time.ctime())) - tlvdata = "" - tlvdata += coreapi.CoreRegTlv.pack(coreapi.CORE_TLV_REG_EXECSRV, - "core-daemon") - tlvdata += coreapi.CoreRegTlv.pack(coreapi.CORE_TLV_REG_EMULSRV, - "core-daemon") - tlvdata += self.session.confobjs_to_tlvs() - return coreapi.CoreRegMessage.pack(coreapi.CORE_API_ADD_FLAG, tlvdata) - - def sendall(self, data): - ''' Send raw data to the other end of this TCP connection - using socket's sendall(). - ''' - return self.request.sendall(data) - - def recvmsg(self): - ''' Receive data and return a CORE API message object. - ''' - try: - msghdr = self.request.recv(coreapi.CoreMessage.hdrsiz) - if self.debug and len(msghdr) > 0: - self.info("received message header:\n%s" % hexdump(msghdr)) - except Exception, e: - raise IOError, "error receiving header (%s)" % e - if len(msghdr) != coreapi.CoreMessage.hdrsiz: - if len(msghdr) == 0: - raise EOFError, "client disconnected" - else: - raise IOError, "invalid message header size" - msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr) - if msglen == 0: - self.warn("received message with no data") - data = "" - while len(data) < msglen: - data += self.request.recv(msglen - len(data)) - if self.debug: - self.info("received message data:\n%s" % hexdump(data)) - if len(data) > msglen: - self.warn("received message length does not match received data " \ - "(%s != %s)" % (len(data), msglen)) - raise IOError - try: - msgcls = coreapi.msg_class(msgtype) - msg = msgcls(msgflags, msghdr, data) - except KeyError: - msg = coreapi.CoreMessage(msgflags, msghdr, data) - msg.msgtype = msgtype - self.warn("unimplemented core message type: %s" % msg.typestr()) - return msg - - - def queuemsg(self, msg): - ''' Queue an API message for later processing. - ''' - if msg.queuedtimes >= self.maxmsgqueuedtimes: - self.warn("dropping message queued %d times: %s" % - (msg.queuedtimes, msg)) - return - if self.debug: - self.info("queueing msg (queuedtimes = %s): type %s" % - (msg.queuedtimes, msg.msgtype)) - msg.queuedtimes += 1 - self.msgcv.acquire() - self.msgq.append(msg) - self.msgcv.notify() - self.msgcv.release() - - def handlerthread(self): - ''' CORE API message handling loop that is spawned for each server - thread; get CORE API messages from the incoming message queue, - and call handlemsg() for processing. - ''' - while not self.done: - # get a coreapi.CoreMessage() from the incoming queue - self.msgcv.acquire() - while not self.msgq: - self.msgcv.wait() - if self.done: - self.msgcv.release() - return - msg = self.msgq.pop(0) - self.msgcv.release() - self.handlemsg(msg) - - - def handlemsg(self, msg): - ''' Handle an incoming message; dispatch based on message type, - optionally sending replies. - ''' - if self.session and self.session.broker.handlemsg(msg): - if self.debug: - self.info("%s forwarding message:\n%s" % - (threading.currentThread().getName(), msg)) - return - - if self.debug: - self.info("%s handling message:\n%s" % - (threading.currentThread().getName(), msg)) - - if msg.msgtype not in self.msghandler: - self.warn("no handler for message type: %s" % - msg.typestr()) - return - msghandler = self.msghandler[msg.msgtype] - - try: - replies = msghandler(msg) - self.dispatchreplies(replies,msg) - except Exception, e: - self.warn("%s: exception while handling msg:\n%s\n%s" % - (threading.currentThread().getName(), msg, - traceback.format_exc())) - - # Added to allow the auxiliary handlers to define a different behavior when replying - # to messages from clients - def dispatchreplies(self, replies, msg): - ''' - Dispatch replies by CORE to message msg previously received from the client. - ''' - for reply in replies: - if self.debug: - msgtype, msgflags, msglen = \ - coreapi.CoreMessage.unpackhdr(reply) - try: - rmsg = coreapi.msg_class(msgtype)(msgflags, - reply[:coreapi.CoreMessage.hdrsiz], - reply[coreapi.CoreMessage.hdrsiz:]) - except KeyError: - # multiple TLVs of same type cause KeyError exception - rmsg = "CoreMessage (type %d flags %d length %d)" % \ - (msgtype, msgflags, msglen) - self.info("%s: reply msg:\n%s" % - (threading.currentThread().getName(), rmsg)) - try: - self.sendall(reply) - except Exception, e: - self.warn("Error sending reply data: %s" % e) - - - def handle(self): - ''' Handle a new connection request from a client. Dispatch to the - recvmsg() method for receiving data into CORE API messages, and - add them to an incoming message queue. - ''' - # use port as session id - port = self.request.getpeername()[1] - self.session = self.server.getsession(sessionid = port, - useexisting = False) - self.session.connect(self) - while True: - try: - msg = self.recvmsg() - except EOFError: - break - except IOError, e: - self.warn("IOError: %s" % e) - break - msg.queuedtimes = 0 - self.queuemsg(msg) - if (msg.msgtype == coreapi.CORE_API_SESS_MSG): - # delay is required for brief connections, allow session joining - time.sleep(0.125) - self.session.broadcast(self, msg) - #self.session.shutdown() - #del self.session - gc.collect() -# print "gc count:", gc.get_count() -# for o in gc.get_objects(): -# if isinstance(o, pycore.PyCoreObj): -# print "XXX XXX XXX PyCoreObj:", o -# for r in gc.get_referrers(o): -# print "XXX XXX XXX referrer:", gc.get_referrers(o) - - - def handlenodemsg(self, msg): - ''' Node Message handler - ''' - replies = [] - if msg.flags & coreapi.CORE_API_ADD_FLAG and \ - msg.flags & coreapi.CORE_API_DEL_FLAG: - self.warn("ignoring invalid message: " - "add and delete flag both set") - return () - nodenum = msg.tlvdata[coreapi.CORE_TLV_NODE_NUMBER] - nodexpos = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS) - nodeypos = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS) - canvas = msg.gettlv(coreapi.CORE_TLV_NODE_CANVAS) - icon = msg.gettlv(coreapi.CORE_TLV_NODE_ICON) - lat = msg.gettlv(coreapi.CORE_TLV_NODE_LAT) - lng = msg.gettlv(coreapi.CORE_TLV_NODE_LONG) - alt = msg.gettlv(coreapi.CORE_TLV_NODE_ALT) - if nodexpos is None and nodeypos is None and \ - lat is not None and lng is not None and alt is not None: - (x, y, z) = self.session.location.getxyz(float(lat), float(lng), - float(alt)) - nodexpos = int(x) - nodeypos = int(y) - # GUI can't handle lat/long, so generate another X/Y position message - tlvdata = "" - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER, - nodenum) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_XPOS, - nodexpos) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_YPOS, - nodeypos) - self.session.broadcastraw(self, coreapi.CoreNodeMessage.pack(0, tlvdata)) - - if msg.flags & coreapi.CORE_API_ADD_FLAG: - nodetype = msg.tlvdata[coreapi.CORE_TLV_NODE_TYPE] - try: - nodecls = coreapi.node_class(nodetype) - except KeyError: - try: - nodetypestr = " (%s)" % coreapi.node_types[nodetype] - except KeyError: - nodetypestr = "" - self.warn("warning: unimplemented node type: %s%s" % \ - (nodetype, nodetypestr)) - return () - start = False - if self.session.getstate() > coreapi.CORE_EVENT_DEFINITION_STATE: - start = True - - nodename = msg.tlvdata[coreapi.CORE_TLV_NODE_NAME] - model = msg.gettlv(coreapi.CORE_TLV_NODE_MODEL) - clsargs = { 'verbose': self.verbose, 'start': start } - if nodetype == coreapi.CORE_NODE_XEN: - clsargs['model'] = model - if nodetype == coreapi.CORE_NODE_RJ45: - if hasattr(self.session.options, 'enablerj45'): - if self.session.options.enablerj45 == '0': - clsargs['start'] = False - # this instantiates an object of class nodecls, - # creating the node or network - n = self.session.addobj(cls = nodecls, objid = nodenum, - name = nodename, **clsargs) - if nodexpos is not None and nodeypos is not None: - n.setposition(nodexpos, nodeypos, None) - if canvas is not None: - n.canvas = canvas - if icon is not None: - n.icon = icon - opaque = msg.gettlv(coreapi.CORE_TLV_NODE_OPAQUE) - if opaque is not None: - n.opaque = opaque - - # add services to a node, either from its services TLV or - # through the configured defaults for this node type - if nodetype == coreapi.CORE_NODE_DEF or \ - nodetype == coreapi.CORE_NODE_PHYS or \ - nodetype == coreapi.CORE_NODE_XEN: - if model is None: - # TODO: default model from conf file? - model = "router" - n.type = model - services_str = msg.gettlv(coreapi.CORE_TLV_NODE_SERVICES) - self.session.services.addservicestonode(n, model, services_str, - self.verbose) - # boot nodes if they are added after runtime (like - # session.bootnodes()) - if self.session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE: - if isinstance(n, pycore.nodes.PyCoreNode) and \ - not isinstance(n, pycore.nodes.RJ45Node): - self.session.writeobjs() - self.session.addremovectrlif(node=n, remove=False) - n.boot() - # self.session.updatectrlifhosts() - # n.validate() - if msg.flags & coreapi.CORE_API_STR_FLAG: - self.nodestatusreq[nodenum] = True - self.session.sendnodeemuid(self, nodenum) - - elif msg.flags & coreapi.CORE_API_STR_FLAG: - self.nodestatusreq[nodenum] = True - - elif msg.flags & coreapi.CORE_API_DEL_FLAG: - n = None - try: - n = self.session.obj(nodenum) - except KeyError: - pass - - with self._shutdownlock: - self.session.delobj(nodenum) - - if msg.flags & coreapi.CORE_API_STR_FLAG: - tlvdata = "" - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER, - nodenum) - flags = coreapi.CORE_API_DEL_FLAG | coreapi.CORE_API_LOC_FLAG - replies.append(coreapi.CoreNodeMessage.pack(flags, tlvdata)) - - for reply in self.session.checkshutdown(): - replies.append(reply) - # Node modify message (no add/del flag) - else: - n = None - try: - n = self.session.obj(nodenum) - except KeyError: - if self.verbose: - self.warn("ignoring node message: unknown node number %s" \ - % nodenum) - #nodeemuid = msg.gettlv(coreapi.CORE_TLV_NODE_EMUID) - if nodexpos is None or nodeypos is None: - if self.verbose: - self.info("ignoring node message: nothing to do") - else: - if n: - n.setposition(nodexpos, nodeypos, None) - if n: - if canvas is not None: - n.canvas = canvas - if icon is not None: - n.icon = icon - - return replies - - - def handlelinkmsg(self, msg): - ''' Link Message handler - ''' - - nodenum1 = msg.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER) - ifindex1 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1NUM) - ipv41 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1IP4) - ipv4mask1 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1IP4MASK) - mac1 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1MAC) - ipv61 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1IP6) - ipv6mask1 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1IP6MASK) - ifname1 = msg.gettlv(coreapi.CORE_TLV_LINK_IF1NAME) - - nodenum2 = msg.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER) - ifindex2 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2NUM) - ipv42 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2IP4) - ipv4mask2 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2IP4MASK) - mac2 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2MAC) - ipv62 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2IP6) - ipv6mask2 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2IP6MASK) - ifname2 = msg.gettlv(coreapi.CORE_TLV_LINK_IF2NAME) - - node1 = None - node2 = None - net = None - net2 = None - - uni = msg.gettlv(coreapi.CORE_TLV_LINK_UNI) - if uni is not None and uni == 1: - unidirectional = True - else: - unidirectional = False - - - # one of the nodes may exist on a remote server - if nodenum1 is not None and nodenum2 is not None: - t = self.session.broker.gettunnel(nodenum1, nodenum2) - if isinstance(t, pycore.nodes.PyCoreNet): - net = t - if t.remotenum == nodenum1: - nodenum1 = None - else: - nodenum2 = None - # PhysicalNode connected via GreTap tunnel; uses adoptnetif() below - elif t is not None: - if t.remotenum == nodenum1: - nodenum1 = None - else: - nodenum2 = None - - - if nodenum1 is not None: - try: - n = self.session.obj(nodenum1) - except KeyError: - # XXX wait and queue this message to try again later - # XXX maybe this should be done differently - time.sleep(0.125) - self.queuemsg(msg) - return () - if isinstance(n, pycore.nodes.PyCoreNode): - node1 = n - elif isinstance(n, pycore.nodes.PyCoreNet): - if net is None: - net = n - else: - net2 = n - else: - raise ValueError, "unexpected object class: %s" % n - - if nodenum2 is not None: - try: - n = self.session.obj(nodenum2) - except KeyError: - # XXX wait and queue this message to try again later - # XXX maybe this should be done differently - time.sleep(0.125) - self.queuemsg(msg) - return () - if isinstance(n, pycore.nodes.PyCoreNode): - node2 = n - elif isinstance(n, pycore.nodes.PyCoreNet): - if net is None: - net = n - else: - net2 = n - else: - raise ValueError, "unexpected object class: %s" % n - - link_msg_type = msg.gettlv(coreapi.CORE_TLV_LINK_TYPE) - - if node1: - node1.lock.acquire() - if node2: - node2.lock.acquire() - - try: - if link_msg_type == coreapi.CORE_LINK_WIRELESS: - ''' Wireless link/unlink event - ''' - numwlan = 0 - objs = [node1, node2, net, net2] - objs = filter( lambda(x): x is not None, objs ) - if len(objs) < 2: - raise ValueError, "wireless link/unlink message between unknown objects" - - nets = objs[0].commonnets(objs[1]) - for (netcommon, netif1, netif2) in nets: - if not isinstance(netcommon, pycore.nodes.WlanNode) and \ - not isinstance(netcommon, pycore.nodes.EmaneNode): - continue - if msg.flags & coreapi.CORE_API_ADD_FLAG: - netcommon.link(netif1, netif2) - elif msg.flags & coreapi.CORE_API_DEL_FLAG: - netcommon.unlink(netif1, netif2) - else: - raise ValueError, "invalid flags for wireless link/unlink message" - numwlan += 1 - if numwlan == 0: - raise ValueError, \ - "no common network found for wireless link/unlink" - - elif msg.flags & coreapi.CORE_API_ADD_FLAG: - ''' Add a new link. - ''' - start = False - if self.session.getstate() > coreapi.CORE_EVENT_DEFINITION_STATE: - start = True - - if node1 and node2 and not net: - # a new wired link - net = self.session.addobj(cls = pycore.nodes.PtpNet, - verbose = self.verbose, - start = start) - - bw = msg.gettlv(coreapi.CORE_TLV_LINK_BW) - delay = msg.gettlv(coreapi.CORE_TLV_LINK_DELAY) - loss = msg.gettlv(coreapi.CORE_TLV_LINK_PER) - duplicate = msg.gettlv(coreapi.CORE_TLV_LINK_DUP) - jitter = msg.gettlv(coreapi.CORE_TLV_LINK_JITTER) - key = msg.gettlv(coreapi.CORE_TLV_LINK_KEY) - - netaddrlist = [] - #print " n1=%s n2=%s net=%s net2=%s" % (node1, node2, net, net2) - if node1 and net: - addrlist = [] - if ipv41 is not None and ipv4mask1 is not None: - addrlist.append("%s/%s" % (ipv41, ipv4mask1)) - if ipv61 is not None and ipv6mask1 is not None: - addrlist.append("%s/%s" % (ipv61, ipv6mask1)) - if ipv42 is not None and ipv4mask2 is not None: - netaddrlist.append("%s/%s" % (ipv42, ipv4mask2)) - if ipv62 is not None and ipv6mask2 is not None: - netaddrlist.append("%s/%s" % (ipv62, ipv6mask2)) - ifindex1 = node1.newnetif(net, addrlist = addrlist, - hwaddr = mac1, ifindex = ifindex1, ifname=ifname1) - net.linkconfig(node1.netif(ifindex1, net), bw = bw, - delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter) - if node1 is None and net: - if ipv41 is not None and ipv4mask1 is not None: - netaddrlist.append("%s/%s" % (ipv41, ipv4mask1)) - # don't add this address again if node2 and net - ipv41 = None - if ipv61 is not None and ipv6mask1 is not None: - netaddrlist.append("%s/%s" % (ipv61, ipv6mask1)) - # don't add this address again if node2 and net - ipv61 = None - if node2 and net: - addrlist = [] - if ipv42 is not None and ipv4mask2 is not None: - addrlist.append("%s/%s" % (ipv42, ipv4mask2)) - if ipv62 is not None and ipv6mask2 is not None: - addrlist.append("%s/%s" % (ipv62, ipv6mask2)) - if ipv41 is not None and ipv4mask1 is not None: - netaddrlist.append("%s/%s" % (ipv41, ipv4mask1)) - if ipv61 is not None and ipv6mask1 is not None: - netaddrlist.append("%s/%s" % (ipv61, ipv6mask1)) - ifindex2 = node2.newnetif(net, addrlist = addrlist, - hwaddr = mac2, ifindex = ifindex2, ifname=ifname2) - if not unidirectional: - net.linkconfig(node2.netif(ifindex2, net), bw = bw, - delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter) - if node2 is None and net2: - if ipv42 is not None and ipv4mask2 is not None: - netaddrlist.append("%s/%s" % (ipv42, ipv4mask2)) - if ipv62 is not None and ipv6mask2 is not None: - netaddrlist.append("%s/%s" % (ipv62, ipv6mask2)) - - # tunnel node finalized with this link message - if key and isinstance(net, pycore.nodes.TunnelNode): - net.setkey(key) - if len(netaddrlist) > 0: - net.addrconfig(netaddrlist) - if key and isinstance(net2, pycore.nodes.TunnelNode): - net2.setkey(key) - if len(netaddrlist) > 0: - net2.addrconfig(netaddrlist) - - if net and net2: - # two layer-2 networks linked together - if isinstance(net2, pycore.nodes.RJ45Node): - netif = net2.linknet(net) # RJ45 nodes have different linknet() - else: - netif = net.linknet(net2) - net.linkconfig(netif, bw = bw, delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter) - if not unidirectional: - netif.swapparams('_params_up') - net2.linkconfig(netif, bw = bw, delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter, - devname = netif.name) - netif.swapparams('_params_up') - - - elif net is None and net2 is None and \ - (node1 is None or node2 is None): - # apply address/parameters to PhysicalNodes - fx = (bw, delay, loss, duplicate, jitter) - addrlist = [] - if node1 and isinstance(node1, pycore.pnodes.PhysicalNode): - if ipv41 is not None and ipv4mask1 is not None: - addrlist.append("%s/%s" % (ipv41, ipv4mask1)) - if ipv61 is not None and ipv6mask1 is not None: - addrlist.append("%s/%s" % (ipv61, ipv6mask1)) - node1.adoptnetif(t, ifindex1, mac1, addrlist) - node1.linkconfig(t, bw, delay, loss, duplicate, jitter) - elif node2 and isinstance(node2, pycore.pnodes.PhysicalNode): - if ipv42 is not None and ipv4mask2 is not None: - addrlist.append("%s/%s" % (ipv42, ipv4mask2)) - if ipv62 is not None and ipv6mask2 is not None: - addrlist.append("%s/%s" % (ipv62, ipv6mask2)) - node2.adoptnetif(t, ifindex2, mac2, addrlist) - node2.linkconfig(t, bw, delay, loss, duplicate, jitter) - # delete a link - elif msg.flags & coreapi.CORE_API_DEL_FLAG: - ''' Remove a link. - ''' - if node1 and node2: - # TODO: fix this for the case where ifindex[1,2] are - # not specified - # a wired unlink event, delete the connecting bridge - netif1 = node1.netif(ifindex1) - netif2 = node2.netif(ifindex2) - if netif1 is None and netif2 is None: - nets = node1.commonnets(node2) - for (netcommon, tmp1, tmp2) in nets: - if (net and netcommon == net) or net is None: - netif1 = tmp1 - netif2 = tmp2 - break - if netif1 is None or netif2 is None: - pass - elif netif1.net or netif2.net: - if netif1.net != netif2.net: - if not netif1.up or not netif2.up: - pass - else: - raise ValueError, "no common network found" - net = netif1.net - netif1.detachnet() - netif2.detachnet() - if net.numnetif() == 0: - self.session.delobj(net.objid) - node1.delnetif(ifindex1) - node2.delnetif(ifindex2) - else: - ''' Modify a link. - ''' - bw = msg.gettlv(coreapi.CORE_TLV_LINK_BW) - delay = msg.gettlv(coreapi.CORE_TLV_LINK_DELAY) - loss = msg.gettlv(coreapi.CORE_TLV_LINK_PER) - duplicate = msg.gettlv(coreapi.CORE_TLV_LINK_DUP) - jitter = msg.gettlv(coreapi.CORE_TLV_LINK_JITTER) - numnet = 0 - # TODO: clean up all this logic. Having the add flag or not - # should use the same code block. - if node1 is None and node2 is None: - if net and net2: - # modify link between nets - netif = net.getlinknetif(net2) - upstream = False - if netif is None: - upstream = True - netif = net2.getlinknetif(net) - if netif is None: - raise ValueError, "modify unknown link between nets" - if upstream: - netif.swapparams('_params_up') - net.linkconfig(netif, bw = bw, delay = delay, - loss = loss, duplicate = duplicate, - jitter = jitter, devname = netif.name) - netif.swapparams('_params_up') - else: - net.linkconfig(netif, bw = bw, delay = delay, - loss = loss, duplicate = duplicate, - jitter = jitter) - if not unidirectional: - if upstream: - net2.linkconfig(netif, bw = bw, delay = delay, - loss = loss, - duplicate = duplicate, - jitter = jitter) - else: - netif.swapparams('_params_up') - net2.linkconfig(netif, bw = bw, delay = delay, - loss = loss, - duplicate = duplicate, - jitter = jitter, - devname = netif.name) - netif.swapparams('_params_up') - else: - raise ValueError, "modify link for unknown nodes" - elif node1 is None: - # node1 = layer 2node, node2 = layer3 node - net.linkconfig(node2.netif(ifindex2, net), bw = bw, - delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter) - elif node2 is None: - # node2 = layer 2node, node1 = layer3 node - net.linkconfig(node1.netif(ifindex1, net), bw = bw, - delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter) - else: - nets = node1.commonnets(node2) - for (net, netif1, netif2) in nets: - if ifindex1 is not None and \ - ifindex1 != node1.getifindex(netif1): - continue - net.linkconfig(netif1, bw = bw, delay = delay, - loss = loss, duplicate = duplicate, - jitter = jitter, netif2 = netif2) - if not unidirectional: - net.linkconfig(netif2, bw = bw, delay = delay, - loss = loss, duplicate = duplicate, - jitter = jitter, netif2 = netif1) - numnet += 1 - if numnet == 0: - raise ValueError, "no common network found" - - - finally: - if node1: - node1.lock.release() - if node2: - node2.lock.release() - return () - - def handleexecmsg(self, msg): - ''' Execute Message handler - ''' - nodenum = msg.gettlv(coreapi.CORE_TLV_EXEC_NODE) - execnum = msg.gettlv(coreapi.CORE_TLV_EXEC_NUM) - exectime = msg.gettlv(coreapi.CORE_TLV_EXEC_TIME) - cmd = msg.gettlv(coreapi.CORE_TLV_EXEC_CMD) - - # local flag indicates command executed locally, not on a node - if nodenum is None and not msg.flags & coreapi.CORE_API_LOC_FLAG: - raise ValueError, "Execute Message is missing node number." - if execnum is None: - raise ValueError, "Execute Message is missing execution number." - if exectime is not None: - self.session.addevent(exectime, node=nodenum, name=None, data=cmd) - return () - - try: - n = self.session.obj(nodenum) - except KeyError: - # XXX wait and queue this message to try again later - # XXX maybe this should be done differently - if not msg.flags & coreapi.CORE_API_LOC_FLAG: - time.sleep(0.125) - self.queuemsg(msg) - return () - else: - pass - # build common TLV items for reply - tlvdata = "" - if nodenum is not None: - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NODE, - nodenum) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NUM, execnum) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_CMD, cmd) - - if msg.flags & coreapi.CORE_API_TTY_FLAG: - if nodenum is None: - raise NotImplementedError - # echo back exec message with cmd for spawning interactive terminal - if cmd == "bash": - cmd = "/bin/bash" - res = n.termcmdstring(cmd) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_RESULT, - res) - reply = coreapi.CoreExecMessage.pack(coreapi.CORE_API_TTY_FLAG, - tlvdata) - return (reply, ) - else: - if self.verbose: - self.info("execute message with cmd = '%s'" % cmd) - # execute command and send a response - if msg.flags & coreapi.CORE_API_STR_FLAG or \ - msg.flags & coreapi.CORE_API_TXT_FLAG: - # shlex.split() handles quotes within the string - if msg.flags & coreapi.CORE_API_LOC_FLAG: - status, res = cmdresult(shlex.split(cmd)) - else: - status, res = n.cmdresult(shlex.split(cmd)) - if self.verbose: - self.info("done exec cmd='%s' with status=%d res=(%d bytes)" - % (cmd, status, len(res))) - if msg.flags & coreapi.CORE_API_TXT_FLAG: - tlvdata += coreapi.CoreExecTlv.pack( \ - coreapi.CORE_TLV_EXEC_RESULT, res) - if msg.flags & coreapi.CORE_API_STR_FLAG: - tlvdata += coreapi.CoreExecTlv.pack( \ - coreapi.CORE_TLV_EXEC_STATUS, status) - reply = coreapi.CoreExecMessage.pack(0, tlvdata) - return (reply, ) - # execute the command with no response - else: - if msg.flags & coreapi.CORE_API_LOC_FLAG: - mutedetach(shlex.split(cmd)) - else: - n.cmd(shlex.split(cmd), wait=False) - return () - - - def handleregmsg(self, msg): - ''' Register Message Handler - ''' - replies = [] - # execute a Python script or XML file - ex = msg.gettlv(coreapi.CORE_TLV_REG_EXECSRV) - if ex: - try: - self.info("executing '%s'" % ex) - if not isinstance(self.server, CoreServer): # CoreUdpServer): - server = self.server.mainserver - # elif isinstance(self.server, CoreAuxServer): - # server = self.server.mainserver - else: - server = self.server - if msg.flags & coreapi.CORE_API_STR_FLAG: - old_session_ids = set(server.getsessionids()) - sys.argv = shlex.split(ex) - filename = sys.argv[0] - if os.path.splitext(filename)[1].lower() == '.xml': - session = server.getsession(useexisting=False) - try: - opensessionxml(session, filename, start=True) - except: - session.shutdown() - server.delsession(session) - raise - else: - t = threading.Thread(target = execfile, - args=(filename, {'__file__': filename, - 'server': server})) - t.daemon = True - t.start() - time.sleep(0.25) # allow time for session creation - if msg.flags & coreapi.CORE_API_STR_FLAG: - new_session_ids = set(server.getsessionids()) - new_sid = new_session_ids.difference(old_session_ids) - try: - sid = new_sid.pop() - self.info("executed '%s' as session %d" % (ex, sid)) - except KeyError: - self.info("executed '%s' with unknown session ID" % ex) - return replies - self.info("checking session %d for RUNTIME state" % sid) - session = self.server.getsession(sessionid=sid, useexisting=True) - retries = 10 - # wait for session to enter RUNTIME state, to prevent GUI from - # connecting while nodes are still being instantiated - while session.getstate() != coreapi.CORE_EVENT_RUNTIME_STATE: - self.info("waiting for session %d to enter RUNTIME state" % sid) - time.sleep(1) - retries -= 1 - if retries <= 0: - self.info("session %d did not enter RUNTIME state" % sid) - return replies - tlvdata = coreapi.CoreRegTlv.pack( \ - coreapi.CORE_TLV_REG_EXECSRV, ex) - tlvdata += coreapi.CoreRegTlv.pack( \ - coreapi.CORE_TLV_REG_SESSION, "%s" % sid) - msg = coreapi.CoreRegMessage.pack(0, tlvdata) - replies.append(msg) - except Exception, e: - self.warn("error executing '%s': %s" % \ - (ex, traceback.format_exc())) - tlvdata = coreapi.CoreExceptionTlv.pack( \ - coreapi.CORE_TLV_EXCP_LEVEL, 2) - tlvdata += coreapi.CoreExceptionTlv.pack( \ - coreapi.CORE_TLV_EXCP_TEXT, str(e)) - msg = coreapi.CoreExceptionMessage.pack(0, tlvdata) - replies.append(msg) - return replies - - gui = msg.gettlv(coreapi.CORE_TLV_REG_GUI) - if gui is None: - self.info("ignoring Register message") - else: - # register capabilities with the GUI - self.master = True - found = self.server.setsessionmaster(self) - replies.append(self.register()) - replies.append(self.server.tosessionmsg()) - return replies - - def handleconfmsg(self, msg): - ''' Configuration Message handler - ''' - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) - objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) - if self.verbose: - self.info("Configuration message for %s node %s" % \ - (objname, nodenum)) - # dispatch to any registered callback for this object type - replies = self.session.confobj(objname, self.session, msg) - # config requests usually have a reply with default data - return replies - - def handlefilemsg(self, msg): - ''' File Message handler - ''' - if msg.flags & coreapi.CORE_API_ADD_FLAG: - nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER) - filename = msg.gettlv(coreapi.CORE_TLV_FILE_NAME) - type = msg.gettlv(coreapi.CORE_TLV_FILE_TYPE) - srcname = msg.gettlv(coreapi.CORE_TLV_FILE_SRCNAME) - data = msg.gettlv(coreapi.CORE_TLV_FILE_DATA) - cmpdata = msg.gettlv(coreapi.CORE_TLV_FILE_CMPDATA) - - if cmpdata is not None: - self.warn("Compressed file data not implemented for File " \ - "message.") - return () - if srcname is not None and data is not None: - self.warn("ignoring invalid File message: source and data " \ - "TLVs are both present") - return () - - # some File Messages store custom files in services, - # prior to node creation - if type is not None: - if type[:8] == "service:": - self.session.services.setservicefile(nodenum, type, - filename, srcname, data) - return () - elif type[:5] == "hook:": - self.session.sethook(type, filename, srcname, data) - return () - # writing a file to the host - if nodenum is None: - if srcname is not None: - shutil.copy2(srcname, filename) - else: - with open(filename, "w") as f: - f.write(data) - return () - try: - n = self.session.obj(nodenum) - except KeyError: - # XXX wait and queue this message to try again later - # XXX maybe this should be done differently - self.warn("File message for %s for node number %s queued." % \ - (filename, nodenum)) - time.sleep(0.125) - self.queuemsg(msg) - return () - if srcname is not None: - n.addfile(srcname, filename) - elif data is not None: - n.nodefile(filename, data) - else: - raise NotImplementedError - return () - - def handleifacemsg(self, msg): - ''' Interface Message handler - ''' - self.info("ignoring Interface message") - return () - - def handleeventmsg(self, msg): - ''' Event Message handler - ''' - eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE) - if eventtype is None: - raise NotImplementedError, "Event message missing event type" - node = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE) - - if self.verbose: - self.info("EVENT %d: %s at %s" % \ - (eventtype, coreapi.event_types[eventtype], time.ctime())) - if eventtype <= coreapi.CORE_EVENT_SHUTDOWN_STATE: - if node is not None: - try: - n = self.session.obj(node) - except KeyError: - raise KeyError, "Event message for unknown node %d" % node - if eventtype == coreapi.CORE_EVENT_INSTANTIATION_STATE: - # configure mobility models for WLAN added during runtime - if isinstance(n, pycore.nodes.WlanNode): - return (self.session.mobility.startup(nodenums=(n.objid,))) - self.warn("dropping unhandled Event message with node number") - return () - self.session.setstate(state=eventtype, info=True, sendevent=False) - - if eventtype == coreapi.CORE_EVENT_DEFINITION_STATE: - # clear all session objects in order to receive new definitions - self.session.delobjs() - self.session.delhooks() - self.session.broker.reset() - elif eventtype == coreapi.CORE_EVENT_CONFIGURATION_STATE: - pass - elif eventtype == coreapi.CORE_EVENT_INSTANTIATION_STATE: - if len(self.handlerthreads) > 1: - # TODO: sync handler threads here before continuing - time.sleep(2.0) # XXX - # done receiving node/link configuration, ready to instantiate - self.session.instantiate(handler=self) - elif eventtype == coreapi.CORE_EVENT_RUNTIME_STATE: - if self.session.master: - self.warn("Unexpected event message: RUNTIME state received " \ - "at session master") - else: - # master event queue is started in session.checkruntime() - self.session.evq.run() - elif eventtype == coreapi.CORE_EVENT_DATACOLLECT_STATE: - self.session.datacollect() - elif eventtype == coreapi.CORE_EVENT_SHUTDOWN_STATE: - if self.session.master: - self.warn("Unexpected event message: SHUTDOWN state received " \ - "at session master") - elif eventtype in (coreapi.CORE_EVENT_START, coreapi.CORE_EVENT_STOP, \ - coreapi.CORE_EVENT_RESTART, \ - coreapi.CORE_EVENT_PAUSE, \ - coreapi.CORE_EVENT_RECONFIGURE): - handled = False - name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME) - if name: - # TODO: register system for event message handlers, - # like confobjs - if name.startswith("service:"): - self.session.services.handleevent(msg) - handled = True - elif name.startswith("mobility:"): - self.session.mobility.handleevent(msg) - handled = True - else: - pass - if not handled: - self.warn("Unhandled event message: event type %s (%s)" % \ - (eventtype, coreapi.state_name(eventtype))) - elif eventtype == coreapi.CORE_EVENT_FILE_OPEN: - self.session.delobjs() - self.session.delhooks() - self.session.broker.reset() - filename = msg.tlvdata[coreapi.CORE_TLV_EVENT_NAME] - opensessionxml(self.session, filename) - return self.session.sendobjs() - elif eventtype == coreapi.CORE_EVENT_FILE_SAVE: - filename = msg.tlvdata[coreapi.CORE_TLV_EVENT_NAME] - savesessionxml(self.session, filename, self.session.cfg['xmlfilever']) - elif eventtype == coreapi.CORE_EVENT_SCHEDULED: - etime = msg.gettlv(coreapi.CORE_TLV_EVENT_TIME) - node = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE) - name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME) - data = msg.gettlv(coreapi.CORE_TLV_EVENT_DATA) - if etime is None: - self.warn("Event message scheduled event missing start time") - return () - if msg.flags & coreapi.CORE_API_ADD_FLAG: - self.session.addevent(float(etime), node=node, name=name, - data=data) - else: - raise NotImplementedError - else: - self.warn("Unhandled event message: event type %d" % eventtype) - return () - - def handlesessionmsg(self, msg): - ''' Session Message handler - ''' - replies = [] - sid_str = msg.gettlv(coreapi.CORE_TLV_SESS_NUMBER) - name_str = msg.gettlv(coreapi.CORE_TLV_SESS_NAME) - file_str = msg.gettlv(coreapi.CORE_TLV_SESS_FILE) - nc_str = msg.gettlv(coreapi.CORE_TLV_SESS_NODECOUNT) - thumb = msg.gettlv(coreapi.CORE_TLV_SESS_THUMB) - user = msg.gettlv(coreapi.CORE_TLV_SESS_USER) - sids = coreapi.str_to_list(sid_str) - names = coreapi.str_to_list(name_str) - files = coreapi.str_to_list(file_str) - ncs = coreapi.str_to_list(nc_str) - self.info("SESSION message flags=0x%x sessions=%s" % (msg.flags, sid_str)) - - if msg.flags == 0: - # modify a session - i = 0 - for sid in sids: - sid = int(sid) - if sid == 0: - session = self.session - else: - session = self.server.getsession(sessionid = sid, - useexisting = True) - if session is None: - self.info("session %s not found" % sid) - i += 1 - continue - self.info("request to modify to session %s" % session.sessionid) - if names is not None: - session.name = names[i] - if files is not None: - session.filename = files[i] - if ncs is not None: - session.node_count = ncs[i] - if thumb is not None: - session.setthumbnail(thumb) - if user is not None: - session.setuser(user) - i += 1 - else: - if msg.flags & coreapi.CORE_API_STR_FLAG and not \ - msg.flags & coreapi.CORE_API_ADD_FLAG: - # status request flag: send list of sessions - return (self.server.tosessionmsg(), ) - # handle ADD or DEL flags - for sid in sids: - sid = int(sid) - session = self.server.getsession(sessionid = sid, - useexisting = True) - if session is None: - self.info("session %s not found (flags=0x%x)" % \ - (sid, msg.flags)) - continue - if session.server is None: - # this needs to be set when executing a Python script - session.server = self.server - if msg.flags & coreapi.CORE_API_ADD_FLAG: - # connect to the first session that exists - self.info("request to connect to session %s" % sid) - # this may shutdown the session if no handlers exist - self.session.disconnect(self) - self.session = session - self.session.connect(self) - if user is not None: - self.session.setuser(user) - if msg.flags & coreapi.CORE_API_STR_FLAG: - replies.extend(self.session.sendobjs()) - elif msg.flags & coreapi.CORE_API_DEL_FLAG: - # shut down the specified session(s) - self.info("request to terminate session %s" % sid) - session.setstate(state=coreapi.CORE_EVENT_DATACOLLECT_STATE, - info=True, sendevent=True) - session.setstate(state=coreapi.CORE_EVENT_SHUTDOWN_STATE, - info=True, sendevent=True) - session.shutdown() - else: - self.warn("unhandled session flags for session %s" % sid) - return replies - -class CoreDatagramRequestHandler(CoreRequestHandler): - ''' A child of the CoreRequestHandler class for handling connectionless - UDP messages. No new session is created; messages are handled immediately or - sometimes queued on existing session handlers. - ''' - - def __init__(self, request, client_address, server): - # TODO: decide which messages cannot be handled with connectionless UDP - self.msghandler = { - coreapi.CORE_API_NODE_MSG: self.handlenodemsg, - coreapi.CORE_API_LINK_MSG: self.handlelinkmsg, - coreapi.CORE_API_EXEC_MSG: self.handleexecmsg, - coreapi.CORE_API_REG_MSG: self.handleregmsg, - coreapi.CORE_API_CONF_MSG: self.handleconfmsg, - coreapi.CORE_API_FILE_MSG: self.handlefilemsg, - coreapi.CORE_API_IFACE_MSG: self.handleifacemsg, - coreapi.CORE_API_EVENT_MSG: self.handleeventmsg, - coreapi.CORE_API_SESS_MSG: self.handlesessionmsg, - } - self.nodestatusreq = {} - self.master = False - self.session = None - self.verbose = bool(server.mainserver.cfg['verbose'].lower() == "true") - self.debug = bool(server.mainserver.cfg['debug'].lower() == "true") - SocketServer.BaseRequestHandler.__init__(self, request, - client_address, server) - - def setup(self): - ''' Client has connected, set up a new connection. - ''' - if self.verbose: - self.info("new UDP connection: %s:%s" % self.client_address) - - def handle(self): - msg = self.recvmsg() - - def finish(self): - return SocketServer.BaseRequestHandler.finish(self) - - def recvmsg(self): - ''' Receive data, parse a CoreMessage and queue it onto an existing - session handler's queue, if available. - ''' - data = self.request[0] - socket = self.request[1] - msghdr = data[:coreapi.CoreMessage.hdrsiz] - if len(msghdr) < coreapi.CoreMessage.hdrsiz: - raise IOError, "error receiving header (received %d bytes)" % \ - len(msghdr) - msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr) - if msglen == 0: - self.warn("received message with no data") - return - if len(data) != coreapi.CoreMessage.hdrsiz + msglen: - self.warn("received message length does not match received data " \ - "(%s != %s)" % \ - (len(data), coreapi.CoreMessage.hdrsiz + msglen)) - raise IOError - elif self.verbose: - self.info("UDP socket received message type=%d len=%d" % \ - (msgtype, msglen)) - try: - msgcls = coreapi.msg_class(msgtype) - msg = msgcls(msgflags, msghdr, data[coreapi.CoreMessage.hdrsiz:]) - except KeyError: - msg = coreapi.CoreMessage(msgflags, msghdr, - data[coreapi.CoreMessage.hdrsiz:]) - msg.msgtype = msgtype - self.warn("unimplemented core message type: %s" % msg.typestr()) - return - sids = msg.sessionnumbers() - msg.queuedtimes = 0 - #self.info("UDP message has session numbers: %s" % sids) - if len(sids) > 0: - for sid in sids: - sess = self.server.mainserver.getsession(sessionid=sid, - useexisting=True) - if sess: - self.session = sess - sess.broadcast(self, msg) - self.handlemsg(msg) - else: - self.warn("Session %d in %s message not found." % \ - (sid, msg.typestr())) - else: - # no session specified, find an existing one - sess = self.server.mainserver.getsession(sessionid=0, - useexisting=True) - if sess or msg.msgtype == coreapi.CORE_API_REG_MSG: - self.session = sess - if sess: - sess.broadcast(self, msg) - self.handlemsg(msg) - else: - self.warn("No active session, dropping %s message." % \ - msg.typestr()) - - def queuemsg(self, msg): - ''' UDP handlers are short-lived and do not have message queues. - ''' - raise Exception, "Unable to queue %s message for later processing " \ - "using UDP!" % msg.typestr() - - def sendall(self, data): - ''' Use sendto() on the connectionless UDP socket. - ''' - self.request[1].sendto(data, self.client_address) - - - - -class BaseAuxRequestHandler(CoreRequestHandler): - ''' - This is the superclass for auxiliary handlers in CORE. A concrete auxiliary handler class - must, at a minimum, define the recvmsg(), sendall(), and dispatchreplies() methods. - See SockerServer.BaseRequestHandler for parameter details. - ''' - - def __init__(self, request, client_address, server): - self.msghandler = { - coreapi.CORE_API_NODE_MSG: self.handlenodemsg, - coreapi.CORE_API_LINK_MSG: self.handlelinkmsg, - coreapi.CORE_API_EXEC_MSG: self.handleexecmsg, - coreapi.CORE_API_REG_MSG: self.handleregmsg, - coreapi.CORE_API_CONF_MSG: self.handleconfmsg, - coreapi.CORE_API_FILE_MSG: self.handlefilemsg, - coreapi.CORE_API_IFACE_MSG: self.handleifacemsg, - coreapi.CORE_API_EVENT_MSG: self.handleeventmsg, - coreapi.CORE_API_SESS_MSG: self.handlesessionmsg, - } - self.handlerthreads = [] - self.nodestatusreq = {} - self.master = False - self.session = None - self.verbose = bool(server.mainserver.cfg['verbose'].lower() == "true") - self.debug = bool(server.mainserver.cfg['debug'].lower() == "true") - SocketServer.BaseRequestHandler.__init__(self, request, - client_address, server) - - def setup(self): - ''' New client has connected to the auxiliary server. - ''' - if self.verbose: - self.info("new auxiliary server client: %s:%s" % self.client_address) - - def handle(self): - ''' - The handler main loop - ''' - port = self.request.getpeername()[1] - self.session = self.server.mainserver.getsession(sessionid = port, - useexisting = False) - self.session.connect(self) - while True: - try: - msgs = self.recvmsg() - if msgs: - for msg in msgs: - self.session.broadcast(self, msg) - self.handlemsg(msg) - except EOFError: - break; - except IOError, e: - self.warn("IOError in CoreAuxRequestHandler: %s" % e) - break; - - def finish(self): - ''' - Disconnect the client - ''' - if self.session: - self.session.disconnect(self) - return SocketServer.BaseRequestHandler.finish(self) - - ''' - ======================================================================= - Concrete AuxRequestHandler classes must redefine the following methods - ======================================================================= - ''' - - - def recvmsg(self): - ''' - Receive data from the client in the supported format. Parse, transform to CORE API format and - return transformed messages. - - EXAMPLE: - return self.handler.request.recv(siz) - - ''' - pass - return None - - def dispatchreplies(self, replies, msg): - ''' - Dispatch CORE 'replies' to a previously received message 'msg' from a client. - Replies passed to this method follow the CORE API. This method allows transformation to - the form supported by the auxiliary handler and within the context of 'msg'. - Add transformation and transmission code here. - - EXAMPLE: - transformed_replies = stateful_transform (replies, msg) # stateful_transform method needs to be defined - if transformed_replies: - for reply in transformed_replies: - try: - self.request.sendall(reply) - except Exception, e: - if self.debug: - self.info("-"*60) - traceback.print_exc(file=sys.stdout) - self.info("-"*60) - raise e - - ''' - pass - - - def sendall(self, data): - ''' - CORE calls this method when data needs to be asynchronously sent to a client. The data is - in CORE API format. This method allows transformation to the required format supported by this - handler prior to transmission. - - EXAMPLE: - msgs = self.transform(data) # transform method needs to be defined - if msgs: - for msg in msgs: - try: - self.request.sendall(reply) - except Exception, e: - if self.debug: - self.info("-"*60) - traceback.print_exc(file=sys.stdout) - self.info("-"*60) - raise e - ''' - pass - - - - - - - + def set_session_master(self, handler): + """ + Set the session master handler. + + :param func handler: session master handler + :return: + """ + return self.mainserver.set_session_master(handler) + + def get_session(self, session_id=None): + """ + Retrieve a session. + + :param int session_id: id of session to retrieve + :return: core.session.Session + """ + return self.mainserver.get_session(session_id) + + def to_session_message(self, flags=0): + """ + Retrieve a session message. + + :param flags: message flags + :return: session message + """ + return self.mainserver.to_session_message(flags) diff --git a/daemon/core/emane/__init__.py b/daemon/core/emane/__init__.py index e69de29b..698aa63d 100644 --- a/daemon/core/emane/__init__.py +++ b/daemon/core/emane/__init__.py @@ -0,0 +1,54 @@ +import subprocess + +from core.misc import log +from core.misc import utils + +logger = log.get_logger(__name__) + +EMANEUNK = 0 +EMANE074 = 7 +EMANE081 = 8 +EMANE091 = 91 +EMANE092 = 92 +EMANE093 = 93 +EMANE101 = 101 + +VERSION = None +VERSIONSTR = None + + +def emane_version(): + """ + Return the locally installed EMANE version identifier and string. + """ + global VERSION + global VERSIONSTR + cmd = ("emane", "--version") + + try: + status, result = utils.cmdresult(cmd) + except (OSError, subprocess.CalledProcessError): + logger.exception("error checking emane version") + status = -1 + result = "" + + VERSION = EMANEUNK + if status == 0: + if result.startswith("0.7.4"): + VERSION = EMANE074 + elif result.startswith("0.8.1"): + VERSION = EMANE081 + elif result.startswith("0.9.1"): + VERSION = EMANE091 + elif result.startswith("0.9.2"): + VERSION = EMANE092 + elif result.startswith("0.9.3"): + VERSION = EMANE093 + elif result.startswith("1.0.1"): + VERSION = EMANE101 + + VERSIONSTR = result.strip() + + +# set version variables for the Emane class +emane_version() diff --git a/daemon/core/emane/bypass.py b/daemon/core/emane/bypass.py index 1b9935e1..3cb2861c 100644 --- a/daemon/core/emane/bypass.py +++ b/daemon/core/emane/bypass.py @@ -1,48 +1,38 @@ -# -# CORE -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" bypass.py: EMANE Bypass model for CORE -''' +""" -import sys -import string -from core.api import coreapi +from core.emane.emanemodel import EmaneModel +from core.enumerations import ConfigDataTypes -from core.constants import * -from emane import EmaneModel class EmaneBypassModel(EmaneModel): - def __init__(self, session, objid = None, verbose = False): - EmaneModel.__init__(self, session, objid, verbose) + def __init__(self, session, object_id=None): + EmaneModel.__init__(self, session, object_id) - _name = "emane_bypass" - _confmatrix = [ - ("none",coreapi.CONF_DATA_TYPE_BOOL, '0', - 'True,False','There are no parameters for the bypass model.'), + name = "emane_bypass" + config_matrix = [ + ("none", ConfigDataTypes.BOOL.value, '0', + 'True,False', 'There are no parameters for the bypass model.'), ] # value groupings - _confgroups = "Bypass Parameters:1-1" + config_groups = "Bypass Parameters:1-1" def buildnemxmlfiles(self, e, ifc): - ''' Build the necessary nem, mac, and phy XMLs in the given path. - If an individual NEM has a nonstandard config, we need to build - that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml, - nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used. - ''' - values = e.getifcconfig(self.objid, self._name, - self.getdefaultvalues(), ifc) + """ + Build the necessary nem, mac, and phy XMLs in the given path. + If an individual NEM has a nonstandard config, we need to build + that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml, + nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used. + """ + values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc) if values is None: return nemdoc = e.xmldoc("nem") nem = nemdoc.getElementsByTagName("nem").pop() nem.setAttribute("name", "BYPASS NEM") - e.appendtransporttonem(nemdoc, nem, self.objid, ifc) + e.appendtransporttonem(nemdoc, nem, self.object_id, ifc) mactag = nemdoc.createElement("mac") mactag.setAttribute("definition", self.macxmlname(ifc)) nem.appendChild(mactag) @@ -62,5 +52,3 @@ class EmaneBypassModel(EmaneModel): phy.setAttribute("name", "BYPASS PHY") phy.setAttribute("library", "bypassphylayer") e.xmlwrite(phydoc, self.phyxmlname(ifc)) - - diff --git a/daemon/core/emane/commeffect.py b/daemon/core/emane/commeffect.py index 91c0ea9f..6ad8c351 100644 --- a/daemon/core/emane/commeffect.py +++ b/daemon/core/emane/commeffect.py @@ -1,76 +1,70 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Jeff Ahrenholz -# Randy Charland -# -''' +""" commeffect.py: EMANE CommEffect model for CORE -''' +""" + +from core import emane +from core.emane.emanemodel import EmaneModel +from core.enumerations import ConfigDataTypes +from core.misc import log + +logger = log.get_logger(__name__) -import sys -import string try: from emanesh.events import EventService -except: - pass -from core.api import coreapi -from core.constants import * -from emane import Emane, EmaneModel +except ImportError: + logger.error("error importing emanesh") try: import emaneeventservice import emaneeventcommeffect -except Exception, e: - pass +except ImportError: + logger.error("error importing emaneeventservice and emaneeventcommeffect") + class EmaneCommEffectModel(EmaneModel): - def __init__(self, session, objid = None, verbose = False): - EmaneModel.__init__(self, session, objid, verbose) + def __init__(self, session, object_id=None): + EmaneModel.__init__(self, session, object_id) # model name - _name = "emane_commeffect" + name = "emane_commeffect" # CommEffect parameters _confmatrix_shim_base = [ - ("filterfile", coreapi.CONF_DATA_TYPE_STRING, '', + ("filterfile", ConfigDataTypes.STRING.value, '', '', 'filter file'), - ("groupid", coreapi.CONF_DATA_TYPE_UINT32, '0', + ("groupid", ConfigDataTypes.UINT32.value, '0', '', 'NEM Group ID'), - ("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable promiscuous mode'), - ("receivebufferperiod", coreapi.CONF_DATA_TYPE_FLOAT, '1.0', + ("receivebufferperiod", ConfigDataTypes.FLOAT.value, '1.0', '', 'receivebufferperiod'), ] _confmatrix_shim_081 = [ - ("defaultconnectivity", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("defaultconnectivity", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'defaultconnectivity'), - ("enabletighttimingmode", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("enabletighttimingmode", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable tight timing mode'), ] _confmatrix_shim_091 = [ - ("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("defaultconnectivitymode", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'defaultconnectivity'), ] - if Emane.version >= Emane.EMANE091: + if emane.VERSION >= emane.EMANE091: _confmatrix_shim = _confmatrix_shim_base + _confmatrix_shim_091 else: _confmatrix_shim = _confmatrix_shim_base + _confmatrix_shim_081 - _confmatrix = _confmatrix_shim + config_matrix = _confmatrix_shim # value groupings - _confgroups = "CommEffect SHIM Parameters:1-%d" \ - % len(_confmatrix_shim) + config_groups = "CommEffect SHIM Parameters:1-%d" % len(_confmatrix_shim) def buildnemxmlfiles(self, e, ifc): - ''' Build the necessary nem and commeffect XMLs in the given path. - If an individual NEM has a nonstandard config, we need to build - that file also. Otherwise the WLAN-wide - nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used. - ''' - values = e.getifcconfig(self.objid, self._name, - self.getdefaultvalues(), ifc) + """ + Build the necessary nem and commeffect XMLs in the given path. + If an individual NEM has a nonstandard config, we need to build + that file also. Otherwise the WLAN-wide + nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used. + """ + values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc) if values is None: return shimdoc = e.xmldoc("shim") @@ -83,8 +77,7 @@ class EmaneCommEffectModel(EmaneModel): shimnames.remove("filterfile") # append all shim options (except filterfile) to shimdoc - map( lambda n: shim.appendChild(e.xmlparam(shimdoc, n, \ - self.valueof(n, values))), shimnames) + map(lambda n: shim.appendChild(e.xmlparam(shimdoc, n, self.valueof(n, values))), shimnames) # empty filterfile is not allowed ff = self.valueof("filterfile", values) if ff.strip() != '': @@ -95,20 +88,24 @@ class EmaneCommEffectModel(EmaneModel): nem = nemdoc.getElementsByTagName("nem").pop() nem.setAttribute("name", "commeffect NEM") nem.setAttribute("type", "unstructured") - e.appendtransporttonem(nemdoc, nem, self.objid, ifc) + e.appendtransporttonem(nemdoc, nem, self.object_id, ifc) nem.appendChild(e.xmlshimdefinition(nemdoc, self.shimxmlname(ifc))) e.xmlwrite(nemdoc, self.nemxmlname(ifc)) - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2 = None): - ''' Generate CommEffect events when a Link Message is received having + def linkconfig(self, netif, bw=None, delay=None, + loss=None, duplicate=None, jitter=None, netif2=None): + """ + Generate CommEffect events when a Link Message is received having link parameters. - ''' - if self.session.emane.version >= self.session.emane.EMANE091: + """ + if emane.VERSION >= emane.EMANE091: raise NotImplementedError, \ - "CommEffect linkconfig() not implemented for EMANE 0.9.1+" + "CommEffect linkconfig() not implemented for EMANE 0.9.1+" + def z(x): - ''' Helper to use 0 for None values. ''' + """ + Helper to use 0 for None values. + """ if type(x) is str: x = float(x) if x is None: @@ -118,17 +115,16 @@ class EmaneCommEffectModel(EmaneModel): service = self.session.emane.service if service is None: - self.session.warn("%s: EMANE event service unavailable" % \ - self._name) + logger.warn("%s: EMANE event service unavailable" % self.name) return if netif is None or netif2 is None: - self.session.warn("%s: missing NEM information" % self._name) + logger.warn("%s: missing NEM information" % self.name) return # TODO: batch these into multiple events per transmission # TODO: may want to split out seconds portion of delay and jitter event = emaneeventcommeffect.EventCommEffect(1) index = 0 - e = self.session.obj(self.objid) + e = self.session.get_object(self.object_id) nemid = e.getnemid(netif) nemid2 = e.getnemid(netif2) mbw = bw @@ -139,6 +135,3 @@ class EmaneCommEffectModel(EmaneModel): emaneeventservice.PLATFORMID_ANY, nemid2, emaneeventservice.COMPONENTID_ANY, event.export()) - - - diff --git a/daemon/core/emane/emane.py b/daemon/core/emane/emane.py deleted file mode 100644 index 671abfea..00000000 --- a/daemon/core/emane/emane.py +++ /dev/null @@ -1,1437 +0,0 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' -emane.py: definition of an Emane class for implementing configuration - control of an EMANE emulation. -''' - -import sys, os, threading, subprocess, time, string -from xml.dom.minidom import parseString, Document -from core.constants import * -from core.api import coreapi -from core.misc.ipaddr import MacAddr -from core.misc.utils import maketuplefromstr, cmdresult, closeonexec -from core.misc.xmlutils import addtextelementsfromtuples, addparamlisttoparent -from core.conf import ConfigurableManager, Configurable -from core.mobility import WirelessModel -from core.emane.nodes import EmaneNode - -# EMANE 0.7.4/0.8.1 -try: - import emaneeventservice - import emaneeventlocation -except Exception, e: - pass -# EMANE 0.9.1+ -try: - from emanesh.events import EventService - from emanesh.events import LocationEvent -except Exception, e: - pass - -class Emane(ConfigurableManager): - ''' EMANE controller object. Lives in a Session instance and is used for - building EMANE config files from all of the EmaneNode objects in this - emulation, and for controlling the EMANE daemons. - ''' - _name = "emane" - _type = coreapi.CORE_TLV_REG_EMULSRV - _hwaddr_prefix = "02:02" - (SUCCESS, NOT_NEEDED, NOT_READY) = (0, 1, 2) - EVENTCFGVAR = 'LIBEMANEEVENTSERVICECONFIG' - # possible self.version values - (EMANEUNK, EMANE074, EMANE081, EMANE091, EMANE092, EMANE093, EMANE101) = \ - (0, 7, 8, 91, 92, 93, 101) - DEFAULT_LOG_LEVEL = 3 - - def __init__(self, session): - ConfigurableManager.__init__(self, session) - self.verbose = self.session.getcfgitembool('verbose', False) - self._objs = {} - self._objslock = threading.Lock() - self._ifccounts = {} - self._ifccountslock = threading.Lock() - self._modelclsmap = {} - # Port numbers are allocated from these counters - self.platformport = self.session.getcfgitemint('emane_platform_port', - 8100) - self.transformport = self.session.getcfgitemint('emane_transform_port', - 8200) - self.doeventloop = False - self.eventmonthread = None - self.logversion() - # model for global EMANE configuration options - self.emane_config = EmaneGlobalModel(session, None, self.verbose) - session.broker.handlers.add(self.handledistributed) - self.loadmodels() - self.service = None - - def logversion(self): - 'Log the installed EMANE version.' - if self.verbose: - self.info("using EMANE version: %s" % self.versionstr) - - def deleteeventservice(self): - if hasattr(self, 'service'): - if self.service: - for fd in self.service._readFd, self.service._writeFd: - if fd >= 0: - os.close(fd) - for f in self.service._socket, self.service._socketOTA: - if f: - f.close() - del self.service - - def initeventservice(self, filename=None, shutdown=False): - ''' (Re-)initialize the EMANE Event service. - The multicast group and/or port may be configured. - - For versions < 0.9.1 this can be changed via XML config file - and an environment variable pointing to that file. - - For version >= 0.9.1 this is passed into the EventService - constructor. - ''' - self.deleteeventservice() - self.service = None - - # EMANE 0.9.1+ does not require event service XML config - if self.version >= self.EMANE091: - if shutdown: - return - #Get the control network to be used for events - values = self.getconfig(None, "emane", - self.emane_config.getdefaultvalues())[1] - group, port = self.emane_config.valueof('eventservicegroup', values).split(':') - eventdev = self.emane_config.valueof('eventservicedevice', values) - eventnetidx = self.session.getctrlnetidx(eventdev) - if self.version > self.EMANE091: - if eventnetidx < 0: - msg = "Invalid Event Service device provided: %s" % eventdev - self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "Emane.initeventservice()", None, msg) - self.info(msg) - return False - - # Make sure the event control network is in place - eventnet = self.session.addremovectrlnet(netidx=eventnetidx, - remove=False, - conf_reqd=False) - if eventnet is not None: - # direct EMANE events towards control net bridge - eventdev = eventnet.brname - eventchannel = (group, int(port), eventdev) - - - # disabled otachannel for event service - # only needed for e.g. antennaprofile events xmit by models - self.info("Using %s for event service traffic" % eventdev) - try: - self.service = EventService(eventchannel=eventchannel, - otachannel=None) - except Exception, e: - msg = "Error instantiating EMANE event service: %s" % e - self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "Emane.initeventservice()", None, msg) - return True - if filename is not None: - tmp = os.getenv(self.EVENTCFGVAR) - os.environ.update( {self.EVENTCFGVAR: filename} ) - rc = True - try: - self.service = emaneeventservice.EventService() - except: - self.service = None - rc = False - if self.service: - for f in self.service._readFd, self.service._writeFd, \ - self.service._socket, self.service._socketOTA: - if f: - closeonexec(f) - if filename is not None: - os.environ.pop(self.EVENTCFGVAR) - if tmp is not None: - os.environ.update( {self.EVENTCFGVAR: tmp} ) - return rc - - def loadmodels(self): - ''' dynamically load EMANE models that were specified in the config file - ''' - self._modelclsmap.clear() - self._modelclsmap[self.emane_config._name] = self.emane_config - emane_models = self.session.getcfgitem('emane_models') - if emane_models is None: - return - emane_models = emane_models.split(',') - for model in emane_models: - model = model.strip() - try: - modelfile = "%s" % model.lower() - clsname = "Emane%sModel" % model - importcmd = "from %s import %s" % (modelfile, clsname) - exec(importcmd) - except Exception, e: - warntxt = "unable to load the EMANE model '%s'" % modelfile - warntxt += " specified in the config file (%s)" % e - self.session.exception(coreapi.CORE_EXCP_LEVEL_WARNING, "emane", - None, warntxt) - self.warn(warntxt) - continue - # record the model name to class name mapping - # this should match clsname._name - confname = "emane_%s" % model.lower() - self._modelclsmap[confname] = eval(clsname) - # each EmaneModel must have ModelName.configure() defined - confmethod = eval("%s.configure_emane" % clsname) - self.session.addconfobj(confname, coreapi.CORE_TLV_REG_WIRELESS, - confmethod) - - def addobj(self, obj): - ''' add a new EmaneNode object to this Emane controller object - ''' - self._objslock.acquire() - if obj.objid in self._objs: - self._objslock.release() - raise KeyError, "non-unique EMANE object id %s for %s" % \ - (obj.objid, obj) - self._objs[obj.objid] = obj - self._objslock.release() - - def getnodes(self): - ''' Return a set of CoreNodes that are linked to an EmaneNode, - e.g. containers having one or more radio interfaces. - ''' - # assumes self._objslock already held - r = set() - for e in self._objs.values(): - for netif in e.netifs(): - r.add(netif.node) - return r - - def getmodels(self, n): - ''' Used with XML export; see ConfigurableManager.getmodels() - ''' - r = ConfigurableManager.getmodels(self, n) - # EMANE global params are stored with first EMANE node (if non-default - # values are configured) - sorted_ids = sorted(self.configs.keys()) - if None in self.configs and len(sorted_ids) > 1 and \ - n.objid == sorted_ids[1]: - v = self.configs[None] - for model in v: - cls = self._modelclsmap[model[0]] - vals = model[1] - r.append((cls, vals)) - return r - - def getifcconfig(self, nodenum, conftype, defaultvalues, ifc): - # use the network-wide config values or interface(NEM)-specific values? - if ifc is None: - return self.getconfig(nodenum, conftype, defaultvalues)[1] - else: - # don't use default values when interface config is the same as net - # note here that using ifc.node.objid as key allows for only one type - # of each model per node; TODO: use both node and interface as key - - # Adamson change: first check for iface config keyed by "node:ifc.name" - # (so that nodes w/ multiple interfaces of same conftype can have - # different configs for each separate interface) - key = 1000*ifc.node.objid - if ifc.netindex is not None: - key += ifc.netindex - values = self.getconfig(key, conftype, None)[1] - if not values: - values = self.getconfig(ifc.node.objid, conftype, None)[1] - if not values and self.version > self.EMANE091: - # with EMANE 0.9.2+, we need an extra NEM XML from - # model.buildnemxmlfiles(), so defaults are returned here - if ifc.transport_type == "raw": - values = self.getconfig(nodenum, conftype, defaultvalues)[1] - return values - - - def setup(self): - ''' Populate self._objs with EmaneNodes; perform distributed setup; - associate models with EmaneNodes from self.config. Returns - Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session - instantiation. - ''' - with self.session._objslock: - for obj in self.session.objs(): - if isinstance(obj, EmaneNode): - self.addobj(obj) - if len(self._objs) == 0: - return Emane.NOT_NEEDED - if self.version == self.EMANEUNK: - raise ValueError, 'EMANE version not properly detected' - # control network bridge required for EMANE 0.9.2 - # - needs to be configured before checkdistributed() for distributed - # - needs to exist when eventservice binds to it (initeventservice) - if self.version > self.EMANE091 and self.session.master: - values = self.getconfig(None, "emane", - self.emane_config.getdefaultvalues())[1] - otadev = self.emane_config.valueof('otamanagerdevice', values) - netidx = self.session.getctrlnetidx(otadev) - if netidx < 0: - msg = "EMANE cannot be started. "\ - "Invalid OTA device provided: %s. Check core.conf." % otadev - self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "Emane.setup()", None, msg) - self.info(msg) - return Emane.NOT_READY - - ctrlnet = self.session.addremovectrlnet(netidx=netidx, - remove=False, - conf_reqd=False) - self.distributedctrlnet(ctrlnet) - eventdev = self.emane_config.valueof('eventservicedevice', values) - if eventdev != otadev: - netidx = self.session.getctrlnetidx(eventdev) - if netidx < 0: - msg = "EMANE cannot be started."\ - "Invalid Event Service device provided: %s. Check core.conf." % eventdev - self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "Emane.setup()", None, msg) - self.info(msg) - return Emane.NOT_READY - - - ctrlnet = self.session.addremovectrlnet(netidx=netidx, - remove=False, - conf_reqd=False) - self.distributedctrlnet(ctrlnet) - - if self.checkdistributed(): - # we are slave, but haven't received a platformid yet - cfgval = self.getconfig(None, self.emane_config._name, - self.emane_config.getdefaultvalues())[1] - i = self.emane_config.getnames().index('platform_id_start') - if cfgval[i] == self.emane_config.getdefaultvalues()[i]: - return Emane.NOT_READY - self.setnodemodels() - return Emane.SUCCESS - - def startup(self): - ''' After all the EmaneNode objects have been added, build XML files - and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or - NOT_READY) which is used to delay session instantiation. - ''' - self.reset() - r = self.setup() - if r != Emane.SUCCESS: - return r # NOT_NEEDED or NOT_READY - if self.versionstr == "": - raise ValueError, "EMANE version not properly detected" - nems = [] - with self._objslock: - if self.version < self.EMANE092: - self.buildxml() - self.initeventservice() - self.starteventmonitor() - if self.numnems() > 0: - # TODO: check and return failure for these methods - self.startdaemons() - self.installnetifs() - else: - self.buildxml2() - self.initeventservice() - self.starteventmonitor() - if self.numnems() > 0: - self.startdaemons2() - self.installnetifs(do_netns=False) - for e in self._objs.itervalues(): - for netif in e.netifs(): - nems.append((netif.node.name, netif.name, - e.getnemid(netif))) - if nems: - emane_nems_filename = os.path.join(self.session.sessiondir, - 'emane_nems') - try: - with open(emane_nems_filename, 'w') as f: - for nodename, ifname, nemid in nems: - f.write('%s %s %s\n' % (nodename, ifname, nemid)) - except Exception as e: - self.warn('Error writing EMANE NEMs file: %s' % e) - return Emane.SUCCESS - - def poststartup(self): - ''' Retransmit location events now that all NEMs are active. - ''' - if not self.genlocationevents(): - return - with self._objslock: - for n in sorted(self._objs.keys()): - e = self._objs[n] - for netif in e.netifs(): - (x, y, z) = netif.node.position.get() - e.setnemposition(netif, x, y, z) - - def reset(self): - ''' remove all EmaneNode objects from the dictionary, - reset port numbers and nem id counters - ''' - with self._objslock: - self._objs.clear() - # don't clear self._ifccounts here; NEM counts are needed for buildxml - self.platformport = self.session.getcfgitemint('emane_platform_port', - 8100) - self.transformport = self.session.getcfgitemint('emane_transform_port', - 8200) - - def shutdown(self): - ''' stop all EMANE daemons - ''' - self._ifccountslock.acquire() - self._ifccounts.clear() - self._ifccountslock.release() - self._objslock.acquire() - if len(self._objs) == 0: - self._objslock.release() - return - self.info("Stopping EMANE daemons.") - self.deinstallnetifs() - self.stopdaemons() - self.stopeventmonitor() - self._objslock.release() - - def handledistributed(self, msg): - ''' Broker handler for processing CORE API messages as they are - received. This is used to snoop the Link add messages to get NEM - counts of NEMs that exist on other servers. - ''' - if msg.msgtype == coreapi.CORE_API_LINK_MSG and \ - msg.flags & coreapi.CORE_API_ADD_FLAG: - nn = msg.nodenumbers() - # first node is always link layer node in Link add message - if nn[0] in self.session.broker.nets: - serverlist = self.session.broker.getserversbynode(nn[1]) - for server in serverlist: - self._ifccountslock.acquire() - if server not in self._ifccounts: - self._ifccounts[server] = 1 - else: - self._ifccounts[server] += 1 - self._ifccountslock.release() - - def checkdistributed(self): - ''' Check for EMANE nodes that exist on multiple emulation servers and - coordinate the NEM id and port number space. - If we are the master EMANE node, return False so initialization will - proceed as normal; otherwise slaves return True here and - initialization is deferred. - ''' - # check with the session if we are the "master" Emane object? - master = False - self._objslock.acquire() - if len(self._objs) > 0: - master = self.session.master - self.info("Setup EMANE with master=%s." % master) - self._objslock.release() - - # we are not the master Emane object, wait for nem id and ports - if not master: - return True - - cfgval = self.getconfig(None, self.emane_config._name, - self.emane_config.getdefaultvalues())[1] - values = list(cfgval) - - nemcount = 0 - self._objslock.acquire() - for n in self._objs: - emanenode = self._objs[n] - nemcount += emanenode.numnetif() - nemid = int(self.emane_config.valueof("nem_id_start", values)) - nemid += nemcount - platformid = int(self.emane_config.valueof("platform_id_start", values)) - names = list(self.emane_config.getnames()) - - # build an ordered list of servers so platform ID is deterministic - servers = [] - for n in sorted(self._objs): - for s in self.session.broker.getserversbynode(n): - if s not in servers: - servers.append(s) - self._objslock.release() - - servers.sort(key = lambda x: x.name) - for server in servers: - if server.name == "localhost": - continue - if server.sock is None: - continue - platformid += 1 - typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE - values[names.index("platform_id_start")] = str(platformid) - values[names.index("nem_id_start")] = str(nemid) - msg = EmaneGlobalModel.toconfmsg(flags=0, nodenum=None, - typeflags=typeflags, values=values) - server.sock.send(msg) - # increment nemid for next server by number of interfaces - with self._ifccountslock: - if server in self._ifccounts: - nemid += self._ifccounts[server] - - return False - - def buildxml(self): - ''' Build all of the XML files required to run EMANE on the host. - NEMs run in a single host emane process, with TAP devices pushed - into namespaces. - ''' - # assume self._objslock is already held here - if self.verbose: - self.info("Emane.buildxml()") - self.buildplatformxml() - self.buildnemxml() - self.buildtransportxml() - self.buildeventservicexml() - - def buildxml2(self): - ''' Build XML files required to run EMANE on each node. - NEMs run inside containers using the control network for passing - events and data. - ''' - # assume self._objslock is already held here - if self.verbose: - self.info("Emane.buildxml2()") - # on master, control network bridge added earlier in startup() - ctrlnet = self.session.addremovectrlnet(netidx=0, remove=False, conf_reqd=False) - self.buildplatformxml2(ctrlnet) - self.buildnemxml() - self.buildeventservicexml() - - def distributedctrlnet(self, ctrlnet): - ''' Distributed EMANE requires multiple control network prefixes to - be configured. This generates configuration for slave control nets - using the default list of prefixes. - ''' - session = self.session - if not session.master: - return # slave server - servers = session.broker.getservernames() - if len(servers) < 2: - return # not distributed - prefix = session.cfg.get('controlnet') - prefix = getattr(session.options, 'controlnet', prefix) - prefixes = prefix.split() - if len(prefixes) >= len(servers): - return # normal Config messaging will distribute controlnets - # this generates a config message having controlnet prefix assignments - self.info("Setting up default controlnet prefixes for distributed " \ - "(%d configured)" % len(prefixes)) - prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0] - vals = "controlnet='%s'" % prefixes - tlvdata = "" - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, - "session") - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, 0) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES, vals) - rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata) - msghdr = rawmsg[:coreapi.CoreMessage.hdrsiz] - msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, - data=rawmsg[coreapi.CoreMessage.hdrsiz:]) - self.session.broker.handlemsg(msg) - - def xmldoc(self, doctype): - ''' Returns an XML xml.minidom.Document with a DOCTYPE tag set to the - provided doctype string, and an initial element having the same - name. - ''' - # we hack in the DOCTYPE using the parser - docstr = """ - - <%s/>""" % (doctype, doctype, doctype) - # normally this would be: doc = Document() - return parseString(docstr) - - def xmlparam(self, doc, name, value): - ''' Convenience function for building a parameter tag of the format: - - ''' - p = doc.createElement("param") - p.setAttribute("name", name) - p.setAttribute("value", value) - return p - - def xmlshimdefinition(self, doc, name): - ''' Convenience function for building a definition tag of the format: - - ''' - p = doc.createElement("shim") - p.setAttribute("definition", name) - return p - - def xmlwrite(self, doc, filename): - ''' Write the given XML document to the specified filename. - ''' - #self.info("%s" % doc.toprettyxml(indent=" ")) - pathname = os.path.join(self.session.sessiondir, filename) - f = open(pathname, "w") - doc.writexml(writer=f, indent="", addindent=" ", newl="\n", \ - encoding="UTF-8") - f.close() - - def setnodemodels(self): - ''' Associate EmaneModel classes with EmaneNode nodes. The model - configurations are stored in self.configs. - ''' - for n in self._objs: - self.setnodemodel(n) - - def setnodemodel(self, n): - emanenode = self._objs[n] - if n not in self.configs: - return False - for (t, v) in self.configs[n]: - if t is None: - continue - if t == self.emane_config._name: - continue - # only use the first valid EmaneModel - # convert model name to class (e.g. emane_rfpipe -> EmaneRfPipe) - cls = self._modelclsmap[t] - emanenode.setmodel(cls, v) - return True - # no model has been configured for this EmaneNode - return False - - def nemlookup(self, nemid): - ''' Look for the given numerical NEM ID and return the first matching - EmaneNode and NEM interface. - ''' - emanenode = None - netif = None - - for n in self._objs: - emanenode = self._objs[n] - netif = emanenode.getnemnetif(nemid) - if netif is not None: - break - else: - emanenode = None - return (emanenode, netif) - - def numnems(self): - ''' Return the number of NEMs emulated locally. - ''' - count = 0 - for o in self._objs.values(): - count += len(o.netifs()) - return count - - def buildplatformxml(self): - ''' Build a platform.xml file now that all nodes are configured. - ''' - values = self.getconfig(None, "emane", - self.emane_config.getdefaultvalues())[1] - doc = self.xmldoc("platform") - plat = doc.getElementsByTagName("platform").pop() - if self.version < self.EMANE091: - platformid = self.emane_config.valueof("platform_id_start", values) - plat.setAttribute("name", "Platform %s" % platformid) - plat.setAttribute("id", platformid) - - names = list(self.emane_config.getnames()) - platform_names = names[:len(self.emane_config._confmatrix_platform)] - platform_names.remove('platform_id_start') - - # append all platform options (except starting id) to doc - map( lambda n: plat.appendChild(self.xmlparam(doc, n, \ - self.emane_config.valueof(n, values))), platform_names) - - nemid = int(self.emane_config.valueof("nem_id_start", values)) - # assume self._objslock is already held here - for n in sorted(self._objs.keys()): - emanenode = self._objs[n] - nems = emanenode.buildplatformxmlentry(doc) - for netif in sorted(nems, key=lambda n: n.node.objid): - # set ID, endpoints here - nementry = nems[netif] - nementry.setAttribute("id", "%d" % nemid) - if self.version < self.EMANE092: - # insert nem options (except nem id) to doc - trans_addr = self.emane_config.valueof("transportendpoint", \ - values) - nementry.insertBefore(self.xmlparam(doc, "transportendpoint", \ - "%s:%d" % (trans_addr, self.transformport)), - nementry.firstChild) - platform_addr = self.emane_config.valueof("platformendpoint", \ - values) - nementry.insertBefore(self.xmlparam(doc, "platformendpoint", \ - "%s:%d" % (platform_addr, self.platformport)), - nementry.firstChild) - plat.appendChild(nementry) - emanenode.setnemid(netif, nemid) - # NOTE: MAC address set before here is incorrect, including the one - # sent from the GUI via link message - # MAC address determined by NEM ID: 02:02:00:00:nn:nn" - macstr = self._hwaddr_prefix + ":00:00:" - macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF) - netif.sethwaddr(MacAddr.fromstring(macstr)) - # increment counters used to manage IDs, endpoint port numbers - nemid += 1 - self.platformport += 1 - self.transformport += 1 - self.xmlwrite(doc, "platform.xml") - - def newplatformxmldoc(self, values, otadev=None, eventdev=None): - ''' Start a new platform XML file. Use global EMANE config values - as keys. Override OTA manager and event service devices if - specified (in order to support Raw Transport). - ''' - doc = self.xmldoc("platform") - plat = doc.getElementsByTagName("platform").pop() - names = list(self.emane_config.getnames()) - platform_names = names[:len(self.emane_config._confmatrix_platform)] - platform_names.remove('platform_id_start') - platform_values = list(values) - if otadev: - i = platform_names.index('otamanagerdevice') - platform_values[i] = otadev - if eventdev: - i = platform_names.index('eventservicedevice') - platform_values[i] = eventdev - # append all platform options (except starting id) to doc - map( lambda n: plat.appendChild(self.xmlparam(doc, n, \ - self.emane_config.valueof(n, platform_values))), \ - platform_names) - return doc - - def buildplatformxml2(self, ctrlnet): - ''' Build a platform.xml file now that all nodes are configured. - ''' - values = self.getconfig(None, "emane", - self.emane_config.getdefaultvalues())[1] - nemid = int(self.emane_config.valueof("nem_id_start", values)) - platformxmls = {} - - # assume self._objslock is already held here - for n in sorted(self._objs.keys()): - emanenode = self._objs[n] - nems = emanenode.buildplatformxmlentry(self.xmldoc("platform")) - for netif in sorted(nems, key=lambda n: n.node.objid): - nementry = nems[netif] - nementry.setAttribute("id", "%d" % nemid) - k = netif.node.objid - if netif.transport_type == "raw": - k = 'host' - otadev = ctrlnet.brname - eventdev = ctrlnet.brname - else: - otadev = None - eventdev = None - if k not in platformxmls: - platformxmls[k] = self.newplatformxmldoc(values, otadev, - eventdev) - doc = platformxmls[k] - plat = doc.getElementsByTagName("platform").pop() - plat.appendChild(nementry) - emanenode.setnemid(netif, nemid) - macstr = self._hwaddr_prefix + ":00:00:" - macstr += "%02X:%02X" % ((nemid >> 8) & 0xFF, nemid & 0xFF) - netif.sethwaddr(MacAddr.fromstring(macstr)) - nemid += 1 - for k in sorted(platformxmls.keys()): - if k == 'host': - self.xmlwrite(platformxmls['host'], "platform.xml") - continue - self.xmlwrite(platformxmls[k], "platform%d.xml" % k) - - def buildnemxml(self): - ''' Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which - are defined on a per-EmaneNode basis. - ''' - for n in sorted(self._objs.keys()): - emanenode = self._objs[n] - emanenode.buildnemxmlfiles(self) - - def appendtransporttonem(self, doc, nem, nodenum, ifc=None): - ''' Given a nem XML node and EMANE WLAN node number, append - a tag to the NEM definition, required for using - EMANE's internal transport. - ''' - if self.version < self.EMANE092: - return - emanenode = self._objs[nodenum] - transtag = doc.createElement("transport") - transtypestr = "virtual" - if ifc and ifc.transport_type == "raw": - transtypestr = "raw" - transtag.setAttribute("definition", - emanenode.transportxmlname(transtypestr)) - nem.appendChild(transtag) - - def buildtransportxml(self): - ''' Calls emanegentransportxml using a platform.xml file to build - the transportdaemon*.xml. - ''' - try: - subprocess.check_call(["emanegentransportxml", "platform.xml"], \ - cwd=self.session.sessiondir) - except Exception, e: - self.info("error running emanegentransportxml: %s" % e) - - def buildeventservicexml(self): - ''' Build the libemaneeventservice.xml file if event service options - were changed in the global config. - ''' - defaults = self.emane_config.getdefaultvalues() - values = self.getconfig(None, "emane", - self.emane_config.getdefaultvalues())[1] - need_xml = False - keys = ('eventservicegroup', 'eventservicedevice') - for k in keys: - a = self.emane_config.valueof(k, defaults) - b = self.emane_config.valueof(k, values) - if a != b: - need_xml = True - - if not need_xml: - # reset to using default config - self.initeventservice() - return - - try: - group, port = self.emane_config.valueof('eventservicegroup', - values).split(':') - except ValueError: - self.warn("invalid eventservicegroup in EMANE config") - return - dev = self.emane_config.valueof('eventservicedevice', values) - - doc = self.xmldoc("emaneeventmsgsvc") - es = doc.getElementsByTagName("emaneeventmsgsvc").pop() - kvs = ( ('group', group), ('port', port), ('device', dev), - ('mcloop', '1'), ('ttl', '32') ) - addtextelementsfromtuples(doc, es, kvs) - filename = 'libemaneeventservice.xml' - self.xmlwrite(doc, filename) - pathname = os.path.join(self.session.sessiondir, filename) - self.initeventservice(filename=pathname) - - def startdaemons(self): - ''' Start the appropriate EMANE daemons. The transport daemon will - bind to the TAP interfaces. - ''' - if self.verbose: - self.info("Emane.startdaemons()") - path = self.session.sessiondir - loglevel = str(self.DEFAULT_LOG_LEVEL) - cfgloglevel = self.session.getcfgitemint("emane_log_level") - realtime = self.session.getcfgitembool("emane_realtime", True) - if cfgloglevel: - self.info("setting user-defined EMANE log level: %d" % cfgloglevel) - loglevel = str(cfgloglevel) - emanecmd = ["emane", "-d", "--logl", loglevel, "-f", \ - os.path.join(path, "emane.log")] - if realtime: - emanecmd += "-r", - try: - cmd = emanecmd + [os.path.join(path, "platform.xml")] - if self.verbose: - self.info("Emane.startdaemons() running %s" % str(cmd)) - subprocess.check_call(cmd, cwd=path) - except Exception, e: - errmsg = "error starting emane: %s" % e - self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane", - None, errmsg) - self.info(errmsg) - - # start one transport daemon per transportdaemon*.xml file - transcmd = ["emanetransportd", "-d", "--logl", loglevel, "-f", \ - os.path.join(path, "emanetransportd.log")] - if realtime: - transcmd += "-r", - files = os.listdir(path) - for file in files: - if file[-3:] == "xml" and file[:15] == "transportdaemon": - cmd = transcmd + [os.path.join(path, file)] - try: - if self.verbose: - self.info("Emane.startdaemons() running %s" % str(cmd)) - subprocess.check_call(cmd, cwd=path) - except Exception, e: - errmsg = "error starting emanetransportd: %s" % e - self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane", - None, errmsg) - self.info(errmsg) - - def startdaemons2(self): - ''' Start one EMANE daemon per node having a radio. - Add a control network even if the user has not configured one. - ''' - if self.verbose: - self.info("Emane.startdaemons()") - loglevel = str(self.DEFAULT_LOG_LEVEL) - cfgloglevel = self.session.getcfgitemint("emane_log_level") - realtime = self.session.getcfgitembool("emane_realtime", True) - if cfgloglevel: - self.info("setting user-defined EMANE log level: %d" % cfgloglevel) - loglevel = str(cfgloglevel) - emanecmd = ["emane", "-d", "--logl", loglevel] - if realtime: - emanecmd += "-r", - - values = self.getconfig(None, "emane", - self.emane_config.getdefaultvalues())[1] - otagroup, otaport = self.emane_config.valueof('otamanagergroup', - values).split(':') - otadev = self.emane_config.valueof('otamanagerdevice', values) - otanetidx = self.session.getctrlnetidx(otadev) - - eventgroup, eventport = self.emane_config.valueof('eventservicegroup', - values).split(':') - eventdev = self.emane_config.valueof('eventservicedevice', values) - eventservicenetidx = self.session.getctrlnetidx(eventdev) - - run_emane_on_host = False - for node in self.getnodes(): - if hasattr(node, 'transport_type') and \ - node.transport_type == "raw": - run_emane_on_host = True - continue - path = self.session.sessiondir - n = node.objid - - # control network not yet started here - self.session.addremovectrlif(node, 0, remove=False, conf_reqd=False) - - if otanetidx > 0: - self.info("adding ota device ctrl%d" % otanetidx) - self.session.addremovectrlif(node, otanetidx, remove=False, conf_reqd=False) - - if eventservicenetidx >= 0: - self.info("adding event service device ctrl%d" % eventservicenetidx) - self.session.addremovectrlif(node, eventservicenetidx, remove=False, conf_reqd=False) - - # multicast route is needed for OTA data - cmd = [IP_BIN, "route", "add", otagroup, "dev", otadev] - #rc = node.cmd(cmd, wait=True) - node.cmd(cmd, wait=True) - # multicast route is also needed for event data if on control network - if eventservicenetidx >= 0 and eventgroup != otagroup: - cmd = [IP_BIN, "route", "add", eventgroup, "dev", eventdev] - node.cmd(cmd, wait=True) - - try: - cmd = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), - os.path.join(path, "platform%d.xml" % n)] - if self.verbose: - self.info("Emane.startdaemons2() running %s" % str(cmd)) - status = node.cmd(cmd, wait=True) - if self.verbose: - self.info("Emane.startdaemons2() return code %d" % status) - except Exception, e: - errmsg = "error starting emane: %s" % e - self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane", - n, errmsg) - self.info(errmsg) - if not run_emane_on_host: - return - path = self.session.sessiondir - try: - emanecmd += ["-f", os.path.join(path, "emane.log")] - cmd = emanecmd + [os.path.join(path, "platform.xml")] - if self.verbose: - self.info("Emane.startdaemons2() running %s" % str(cmd)) - subprocess.check_call(cmd, cwd=path) - except Exception, e: - errmsg = "error starting emane: %s" % e - self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "emane", - None, errmsg) - self.info(errmsg) - - def stopdaemons(self): - ''' Kill the appropriate EMANE daemons. - ''' - # TODO: we may want to improve this if we had the PIDs from the - # specific EMANE daemons that we've started - cmd = ["killall", "-q", "emane"] - stop_emane_on_host = False - if self.version > self.EMANE091: - for node in self.getnodes(): - if hasattr(node, 'transport_type') and \ - node.transport_type == "raw": - stop_emane_on_host = True - continue - if node.up: - node.cmd(cmd, wait=False) - # TODO: RJ45 node - else: - stop_emane_on_host = True - if stop_emane_on_host: - subprocess.call(cmd) - subprocess.call(["killall", "-q", "emanetransportd"]) - - def installnetifs(self, do_netns=True): - ''' Install TUN/TAP virtual interfaces into their proper namespaces - now that the EMANE daemons are running. - ''' - for n in sorted(self._objs.keys()): - emanenode = self._objs[n] - if self.verbose: - self.info("Emane.installnetifs() for node %d" % n) - emanenode.installnetifs(do_netns) - - def deinstallnetifs(self): - ''' Uninstall TUN/TAP virtual interfaces. - ''' - for n in sorted(self._objs.keys()): - emanenode = self._objs[n] - emanenode.deinstallnetifs() - - def configure(self, session, msg): - ''' Handle configuration messages for global EMANE config. - ''' - r = self.emane_config.configure_emane(session, msg) - - # extra logic to start slave Emane object after nemid has been - # configured from the master - conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE) - if conftype == coreapi.CONF_TYPE_FLAGS_UPDATE and \ - self.session.master == False: - # instantiation was previously delayed by self.setup() - # returning Emane.NOT_READY - h = None - with self.session._handlerslock: - for h in self.session._handlers: - break - self.session.instantiate(handler=h) - - return r - - def doeventmonitor(self): - ''' Returns boolean whether or not EMANE events will be monitored. - ''' - # this support must be explicitly turned on; by default, CORE will - # generate the EMANE events when nodes are moved - return self.session.getcfgitembool('emane_event_monitor', False) - - def genlocationevents(self): - ''' Returns boolean whether or not EMANE events will be generated. - ''' - # By default, CORE generates EMANE location events when nodes - # are moved; this can be explicitly disabled in core.conf - tmp = self.session.getcfgitembool('emane_event_generate') - if tmp is None: - tmp = not self.doeventmonitor() - return tmp - - def starteventmonitor(self): - ''' Start monitoring EMANE location events if configured to do so. - ''' - if self.verbose: - self.info("Emane.starteventmonitor()") - if not self.doeventmonitor(): - return - if self.service is None: - errmsg = "Warning: EMANE events will not be generated " \ - "because the emaneeventservice\n binding was " \ - "unable to load " \ - "(install the python-emaneeventservice bindings)" - self.session.exception(coreapi.CORE_EXCP_LEVEL_WARNING, "emane", - None, errmsg) - self.warn(errmsg) - - return - self.doeventloop = True - self.eventmonthread = threading.Thread(target = self.eventmonitorloop) - self.eventmonthread.daemon = True - self.eventmonthread.start() - - - def stopeventmonitor(self): - ''' Stop monitoring EMANE location events. - ''' - self.doeventloop = False - if self.service is not None: - self.service.breakloop() - # reset the service, otherwise nextEvent won't work - self.initeventservice(shutdown=True) - if self.eventmonthread is not None: - if self.version >= self.EMANE091: - self.eventmonthread._Thread__stop() - self.eventmonthread.join() - self.eventmonthread = None - - def eventmonitorloop(self): - ''' Thread target that monitors EMANE location events. - ''' - if self.service is None: - return - self.info("Subscribing to EMANE location events (not generating them). " \ - "(%s) " % threading.currentThread().getName()) - while self.doeventloop is True: - if self.version >= self.EMANE091: - (uuid, seq, events) = self.service.nextEvent() - if not self.doeventloop: - break # this occurs with 0.9.1 event service - for event in events: - (nem, eid, data) = event - if eid == LocationEvent.IDENTIFIER: - self.handlelocationevent2(nem, eid, data) - else: - (event, platform, nem, cmp, data) = self.service.nextEvent() - if event == emaneeventlocation.EVENT_ID: - self.handlelocationevent(event, platform, nem, cmp, data) - self.info("Unsubscribing from EMANE location events. (%s) " % \ - threading.currentThread().getName()) - - def handlelocationevent(self, event, platform, nem, component, data): - ''' Handle an EMANE location event (EMANE 0.8.1 and earlier). - ''' - event = emaneeventlocation.EventLocation(data) - entries = event.entries() - for e in entries.values(): - # yaw,pitch,roll,azimuth,elevation,velocity are unhandled - (nemid, lat, long, alt) = e[:4] - self.handlelocationeventtoxyz(nemid, lat, long, alt) - - def handlelocationevent2(self, rxnemid, eid, data): - ''' Handle an EMANE location event (EMANE 0.9.1+). - ''' - events = LocationEvent() - events.restore(data) - for event in events: - (txnemid, attrs) = event - if 'latitude' not in attrs or 'longitude' not in attrs or \ - 'altitude' not in attrs: - self.warn("dropped invalid location event") - continue - # yaw,pitch,roll,azimuth,elevation,velocity are unhandled - lat = attrs['latitude'] - long = attrs['longitude'] - alt = attrs['altitude'] - self.handlelocationeventtoxyz(txnemid, lat, long, alt) - - def handlelocationeventtoxyz(self, nemid, lat, long, alt): - ''' Convert the (NEM ID, lat, long, alt) from a received location event - into a node and x,y,z coordinate values, sending a Node Message. - Returns True if successfully parsed and a Node Message was sent. - ''' - # convert nemid to node number - (emanenode, netif) = self.nemlookup(nemid) - if netif is None: - if self.verbose: - self.info("location event for unknown NEM %s" % nemid) - return False - n = netif.node.objid - # convert from lat/long/alt to x,y,z coordinates - (x, y, z) = self.session.location.getxyz(lat, long, alt) - x = int(x) - y = int(y) - z = int(z) - if self.verbose: - self.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)" \ - % (nemid, lat, long, alt, x, y, z)) - try: - if (x.bit_length() > 16) or (y.bit_length() > 16) or \ - (z.bit_length() > 16) or (x < 0) or (y < 0) or (z < 0): - warntxt = "Unable to build node location message since " \ - "received lat/long/alt exceeds coordinate " \ - "space: NEM %s (%d, %d, %d)" % (nemid, x, y, z) - self.info(warntxt) - self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "emane", None, warntxt) - return False - except AttributeError: - # int.bit_length() not present on Python 2.6 - pass - - # generate a node message for this location update - try: - node = self.session.obj(n) - except KeyError: - self.warn("location event NEM %s has no corresponding node %s" \ - % (nemid, n)) - return False - # don't use node.setposition(x,y,z) which generates an event - node.position.set(x,y,z) - msg = node.tonodemsg(flags=0) - self.session.broadcastraw(None, msg) - self.session.sdt.updatenodegeo(node.objid, lat, long, alt) - return True - - def emanerunning(self, node): - '''\ - Return True if an EMANE process associated with the given node - is running, False otherwise. - ''' - status = -1 - cmd = ['pkill', '-0', '-x', 'emane'] - try: - if self.version < self.EMANE092: - status = subprocess.call(cmd) - else: - status = node.cmd(cmd, wait=True) - except: - pass - return status == 0 - -def emane_version(): - 'Return the locally installed EMANE version identifier and string.' - cmd = ('emane', '--version') - try: - status, result = cmdresult(cmd) - except: - status = -1 - result = '' - v = Emane.EMANEUNK - if status == 0: - if result.startswith('0.7.4'): - v = Emane.EMANE074 - elif result.startswith('0.8.1'): - v = Emane.EMANE081 - elif result.startswith('0.9.1'): - v = Emane.EMANE091 - elif result.startswith('0.9.2'): - v = Emane.EMANE092 - elif result.startswith('0.9.3'): - v = Emane.EMANE093 - elif result.startswith('1.0.1'): - v = Emane.EMANE101 - return v, result.strip() - -# set version variables for the Emane class -Emane.version, Emane.versionstr = emane_version() - -class EmaneModel(WirelessModel): - ''' EMANE models inherit from this parent class, which takes care of - handling configuration messages based on the _confmatrix list of - configurable parameters. Helper functions also live here. - ''' - _prefix = {'y': 1e-24, # yocto - 'z': 1e-21, # zepto - 'a': 1e-18, # atto - 'f': 1e-15, # femto - 'p': 1e-12, # pico - 'n': 1e-9, # nano - 'u': 1e-6, # micro - 'm': 1e-3, # mili - 'c': 1e-2, # centi - 'd': 1e-1, # deci - 'k': 1e3, # kilo - 'M': 1e6, # mega - 'G': 1e9, # giga - 'T': 1e12, # tera - 'P': 1e15, # peta - 'E': 1e18, # exa - 'Z': 1e21, # zetta - 'Y': 1e24, # yotta - } - - @classmethod - def configure_emane(cls, session, msg): - ''' Handle configuration messages for setting up a model. - Pass the Emane object as the manager object. - ''' - return cls.configure(session.emane, msg) - - @classmethod - def emane074_fixup(cls, value, div=1.0): - ''' Helper for converting 0.8.1 and newer values to EMANE 0.7.4 - compatible values. - NOTE: This should be removed when support for 0.7.4 has been - deprecated. - ''' - if div == 0: - return "0" - if type(value) is not str: - return str(value / div) - if value.endswith(tuple(cls._prefix.keys())): - suffix = value[-1] - value = float(value[:-1]) * cls._prefix[suffix] - return str(int(value / div)) - - def buildnemxmlfiles(self, e, ifc): - ''' Build the necessary nem, mac, and phy XMLs in the given path. - ''' - raise NotImplementedError - - def buildplatformxmlnementry(self, doc, n, ifc): - ''' Build the NEM definition that goes into the platform.xml file. - This returns an XML element that will be added to the element. - This default method supports per-interface config - (e.g. or per-EmaneNode - config (e.g. . - This can be overriden by a model for NEM flexibility; n is the EmaneNode. - ''' - nem = doc.createElement("nem") - nem.setAttribute("name", ifc.localname) - # if this netif contains a non-standard (per-interface) config, - # then we need to use a more specific xml file here - nem.setAttribute("definition", self.nemxmlname(ifc)) - return nem - - def buildplatformxmltransportentry(self, doc, n, ifc): - ''' Build the transport definition that goes into the platform.xml file. - This returns an XML element that will added to the nem definition. - This default method supports raw and virtual transport types, but may be - overriden by a model to support the e.g. pluggable virtual transport. - n is the EmaneNode. - ''' - ttype = ifc.transport_type - if not ttype: - self.session.info("warning: %s interface type unsupported!" % ifc.name) - ttype = "raw" - trans = doc.createElement("transport") - trans.setAttribute("definition", n.transportxmlname(ttype)) - if self.session.emane.version < self.session.emane.EMANE092: - trans.setAttribute("group", "1") - param = doc.createElement("param") - param.setAttribute("name", "device") - if ttype == "raw": - # raw RJ45 name e.g. 'eth0' - param.setAttribute("value", ifc.name) - else: - # virtual TAP name e.g. 'n3.0.17' - param.setAttribute("value", ifc.localname) - if self.session.emane.version > self.session.emane.EMANE091: - param.setAttribute("value", ifc.name) - - trans.appendChild(param) - return trans - - def basename(self, ifc = None): - ''' Return the string that other names are based on. - If a specific config is stored for a node's interface, a unique - filename is needed; otherwise the name of the EmaneNode is used. - ''' - emane = self.session.emane - name = "n%s" % self.objid - if ifc is not None: - nodenum = ifc.node.objid - # Adamson change - use getifcconfig() to get proper result - #if emane.getconfig(nodenum, self._name, None)[1] is not None: - if emane.getifcconfig(nodenum, self._name, None, ifc) is not None: - name = ifc.localname.replace('.','_') - return "%s%s" % (name, self._name) - - def nemxmlname(self, ifc = None): - ''' Return the string name for the NEM XML file, e.g. 'n3rfpipenem.xml' - ''' - append = "" - if self.session.emane.version > self.session.emane.EMANE091: - if ifc and ifc.transport_type == "raw": - append = "_raw" - return "%snem%s.xml" % (self.basename(ifc), append) - - def shimxmlname(self, ifc = None): - ''' Return the string name for the SHIM XML file, e.g. 'commeffectshim.xml' - ''' - return "%sshim.xml" % self.basename(ifc) - - def macxmlname(self, ifc = None): - ''' Return the string name for the MAC XML file, e.g. 'n3rfpipemac.xml' - ''' - return "%smac.xml" % self.basename(ifc) - - def phyxmlname(self, ifc = None): - ''' Return the string name for the PHY XML file, e.g. 'n3rfpipephy.xml' - ''' - return "%sphy.xml" % self.basename(ifc) - - def update(self, moved, moved_netifs): - ''' invoked from MobilityModel when nodes are moved; this causes - EMANE location events to be generated for the nodes in the moved - list, making EmaneModels compatible with Ns2ScriptedMobility - ''' - try: - wlan = self.session.obj(self.objid) - except KeyError: - return - wlan.setnempositions(moved_netifs) - - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2 = None): - ''' Invoked when a Link Message is received. Default is unimplemented. - ''' - warntxt = "EMANE model %s does not support link " % self._name - warntxt += "configuration, dropping Link Message" - self.session.warn(warntxt) - - @staticmethod - def valuestrtoparamlist(dom, name, value): - ''' Helper to convert a parameter to a paramlist. - Returns a an XML paramlist, or None if the value does not expand to - multiple values. - ''' - try: - values = maketuplefromstr(value, str) - except SyntaxError: - return None - if not hasattr(values, '__iter__'): - return None - if len(values) < 2: - return None - return addparamlisttoparent(dom, parent=None, name=name, values=values) - -class EmaneGlobalModel(EmaneModel): - ''' Global EMANE configuration options. - ''' - def __init__(self, session, objid = None, verbose = False): - EmaneModel.__init__(self, session, objid, verbose) - - # Over-The-Air channel required for EMANE 0.9.2 - _DEFAULT_OTA = '0' - _DEFAULT_DEV = 'lo' - if Emane.version >= Emane.EMANE092: - _DEFAULT_OTA = '1' - _DEFAULT_DEV = 'ctrl0' - - _name = "emane" - _confmatrix_platform_base = [ - ("otamanagerchannelenable", coreapi.CONF_DATA_TYPE_BOOL, _DEFAULT_OTA, - 'on,off', 'enable OTA Manager channel'), - ("otamanagergroup", coreapi.CONF_DATA_TYPE_STRING, '224.1.2.8:45702', - '', 'OTA Manager group'), - ("otamanagerdevice", coreapi.CONF_DATA_TYPE_STRING, _DEFAULT_DEV, - '', 'OTA Manager device'), - ("eventservicegroup", coreapi.CONF_DATA_TYPE_STRING, '224.1.2.8:45703', - '', 'Event Service group'), - ("eventservicedevice", coreapi.CONF_DATA_TYPE_STRING, _DEFAULT_DEV, - '', 'Event Service device'), - ("platform_id_start", coreapi.CONF_DATA_TYPE_INT32, '1', - '', 'starting Platform ID'), - ] - _confmatrix_platform_081 = [ - ("debugportenable", coreapi.CONF_DATA_TYPE_BOOL, '0', - 'on,off', 'enable debug port'), - ("debugport", coreapi.CONF_DATA_TYPE_UINT16, '47000', - '', 'debug port number'), - ] - _confmatrix_platform_091 = [ - ("controlportendpoint", coreapi.CONF_DATA_TYPE_STRING, '0.0.0.0:47000', - '', 'Control port address'), - ("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '', - '','antenna profile manifest URI'), - ] - _confmatrix_nem = [ - ("transportendpoint", coreapi.CONF_DATA_TYPE_STRING, 'localhost', - '', 'Transport endpoint address (port is automatic)'), - ("platformendpoint", coreapi.CONF_DATA_TYPE_STRING, 'localhost', - '', 'Platform endpoint address (port is automatic)'), - ("nem_id_start", coreapi.CONF_DATA_TYPE_INT32, '1', - '', 'starting NEM ID'), - ] - _confmatrix_nem_092 = [ - ("nem_id_start", coreapi.CONF_DATA_TYPE_INT32, '1', - '', 'starting NEM ID'), - ] - - if Emane.version >= Emane.EMANE091: - _confmatrix_platform = _confmatrix_platform_base + \ - _confmatrix_platform_091 - if Emane.version >= Emane.EMANE092: - _confmatrix_nem = _confmatrix_nem_092 - else: - _confmatrix_platform = _confmatrix_platform_base + \ - _confmatrix_platform_081 - _confmatrix = _confmatrix_platform + _confmatrix_nem - _confgroups = "Platform Attributes:1-%d|NEM Parameters:%d-%d" % \ - (len(_confmatrix_platform), len(_confmatrix_platform) + 1, - len(_confmatrix)) diff --git a/daemon/core/emane/ieee80211abg.py b/daemon/core/emane/ieee80211abg.py index 97ceeebc..abb592c7 100644 --- a/daemon/core/emane/ieee80211abg.py +++ b/daemon/core/emane/ieee80211abg.py @@ -1,117 +1,113 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" ieee80211abg.py: EMANE IEEE 802.11abg model for CORE -''' +""" + +from core import emane +from core.emane.emanemodel import EmaneModel +from core.emane.universal import EmaneUniversalModel +from core.enumerations import ConfigDataTypes +from core.misc import log + +logger = log.get_logger(__name__) -import sys -import string try: from emanesh.events import EventService except: - pass -from core.api import coreapi -from core.constants import * -from emane import Emane, EmaneModel -from universal import EmaneUniversalModel + logger.error("error importing emanesh") + class EmaneIeee80211abgModel(EmaneModel): - def __init__(self, session, objid = None, verbose = False): - EmaneModel.__init__(self, session, objid, verbose) + def __init__(self, session, object_id=None): + EmaneModel.__init__(self, session, object_id) # model name - _name = "emane_ieee80211abg" + name = "emane_ieee80211abg" _80211rates = '1 1 Mbps,2 2 Mbps,3 5.5 Mbps,4 11 Mbps,5 6 Mbps,' + \ - '6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps,' + \ - '12 54 Mbps' - if Emane.version >= Emane.EMANE091: + '6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps,' + \ + '12 54 Mbps' + if emane.VERSION >= emane.EMANE091: xml_path = '/usr/share/emane/xml/models/mac/ieee80211abg' else: xml_path = "/usr/share/emane/models/ieee80211abg/xml" # MAC parameters _confmatrix_mac_base = [ - ("mode", coreapi.CONF_DATA_TYPE_UINT8, '0', + ("mode", ConfigDataTypes.UINT8.value, '0', '0 802.11b (DSSS only),1 802.11b (DSSS only),' + \ '2 802.11a or g (OFDM),3 802.11b/g (DSSS and OFDM)', 'mode'), - ("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable promiscuous mode'), - ("distance", coreapi.CONF_DATA_TYPE_UINT32, '1000', + ("distance", ConfigDataTypes.UINT32.value, '1000', '', 'max distance (m)'), - ("unicastrate", coreapi.CONF_DATA_TYPE_UINT8, '4', _80211rates, + ("unicastrate", ConfigDataTypes.UINT8.value, '4', _80211rates, 'unicast rate (Mbps)'), - ("multicastrate", coreapi.CONF_DATA_TYPE_UINT8, '1', _80211rates, + ("multicastrate", ConfigDataTypes.UINT8.value, '1', _80211rates, 'multicast rate (Mbps)'), - ("rtsthreshold", coreapi.CONF_DATA_TYPE_UINT16, '0', + ("rtsthreshold", ConfigDataTypes.UINT16.value, '0', '', 'RTS threshold (bytes)'), - ("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING, + ("pcrcurveuri", ConfigDataTypes.STRING.value, '%s/ieee80211pcr.xml' % xml_path, '', 'SINR/PCR curve file'), - ("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("flowcontrolenable", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable traffic flow control'), - ("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10', + ("flowcontroltokens", ConfigDataTypes.UINT16.value, '10', '', 'number of flow control tokens'), ] - # mac parameters introduced in EMANE 0.8.1 + # mac parameters introduced in EMANE 0.8.1 # Note: The entry format for category queue parameters (queuesize, aifs, etc) were changed in # EMANE 9.x, but are being preserved for the time being due to space constraints in the - # CORE GUI. A conversion function (get9xmacparamequivalent) has been defined to support this. + # CORE GUI. A conversion function (get9xmacparamequivalent) has been defined to support this. _confmatrix_mac_extended = [ - ("wmmenable", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("wmmenable", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'WiFi Multimedia (WMM)'), - ("queuesize", coreapi.CONF_DATA_TYPE_STRING, '0:255 1:255 2:255 3:255', + ("queuesize", ConfigDataTypes.STRING.value, '0:255 1:255 2:255 3:255', '', 'queue size (0-4:size)'), - ("cwmin", coreapi.CONF_DATA_TYPE_STRING, '0:32 1:32 2:16 3:8', + ("cwmin", ConfigDataTypes.STRING.value, '0:32 1:32 2:16 3:8', '', 'min contention window (0-4:minw)'), - ("cwmax", coreapi.CONF_DATA_TYPE_STRING, '0:1024 1:1024 2:64 3:16', + ("cwmax", ConfigDataTypes.STRING.value, '0:1024 1:1024 2:64 3:16', '', 'max contention window (0-4:maxw)'), - ("aifs", coreapi.CONF_DATA_TYPE_STRING, '0:2 1:2 2:2 3:1', + ("aifs", ConfigDataTypes.STRING.value, '0:2 1:2 2:2 3:1', '', 'arbitration inter frame space (0-4:aifs)'), - ("txop", coreapi.CONF_DATA_TYPE_STRING, '0:0 1:0 2:0 3:0', + ("txop", ConfigDataTypes.STRING.value, '0:0 1:0 2:0 3:0', '', 'txop (0-4:usec)'), - ("retrylimit", coreapi.CONF_DATA_TYPE_STRING, '0:3 1:3 2:3 3:3', + ("retrylimit", ConfigDataTypes.STRING.value, '0:3 1:3 2:3 3:3', '', 'retry limit (0-4:numretries)'), ] _confmatrix_mac_091 = [ - ('radiometricenable', coreapi.CONF_DATA_TYPE_BOOL, '0', + ('radiometricenable', ConfigDataTypes.BOOL.value, '0', 'On,Off', 'report radio metrics via R2RI'), - ('radiometricreportinterval', coreapi.CONF_DATA_TYPE_FLOAT, '1.0', + ('radiometricreportinterval', ConfigDataTypes.FLOAT.value, '1.0', '', 'R2RI radio metric report interval (sec)'), - ('neighbormetricdeletetime', coreapi.CONF_DATA_TYPE_FLOAT, '60.0', + ('neighbormetricdeletetime', ConfigDataTypes.FLOAT.value, '60.0', '', 'R2RI neighbor table inactivity time (sec)'), ] _confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_extended - if Emane.version >= Emane.EMANE091: + if emane.VERSION >= emane.EMANE091: _confmatrix_mac += _confmatrix_mac_091 # PHY parameters from Universal PHY - _confmatrix_phy = EmaneUniversalModel._confmatrix + _confmatrix_phy = EmaneUniversalModel.config_matrix - _confmatrix = _confmatrix_mac + _confmatrix_phy + config_matrix = _confmatrix_mac + _confmatrix_phy # value groupings - _confgroups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \ - % (len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix)) + config_groups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % ( + len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix)) def buildnemxmlfiles(self, e, ifc): - ''' Build the necessary nem, mac, and phy XMLs in the given path. - If an individual NEM has a nonstandard config, we need to build - that file also. Otherwise the WLAN-wide - nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml, - nXXemane_ieee80211abgphy.xml are used. - ''' - values = e.getifcconfig(self.objid, self._name, - self.getdefaultvalues(), ifc) + """ + Build the necessary nem, mac, and phy XMLs in the given path. + If an individual NEM has a nonstandard config, we need to build + that file also. Otherwise the WLAN-wide + nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml, + nXXemane_ieee80211abgphy.xml are used. + """ + values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc) if values is None: return nemdoc = e.xmldoc("nem") nem = nemdoc.getElementsByTagName("nem").pop() nem.setAttribute("name", "ieee80211abg NEM") - e.appendtransporttonem(nemdoc, nem, self.objid, ifc) + e.appendtransporttonem(nemdoc, nem, self.object_id, ifc) mactag = nemdoc.createElement("mac") mactag.setAttribute("definition", self.macxmlname(ifc)) nem.appendChild(mactag) @@ -130,15 +126,14 @@ class EmaneIeee80211abgModel(EmaneModel): phynames = names[len(self._confmatrix_mac):] # append all MAC options to macdoc - if Emane.version >= Emane.EMANE091: + if emane.VERSION >= emane.EMANE091: for macname in macnames: mac9xnvpairlist = self.get9xmacparamequivalent(macname, values) for nvpair in mac9xnvpairlist: mac.appendChild(e.xmlparam(macdoc, nvpair[0], nvpair[1])) else: - map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \ - self.valueof(n, values))), macnames) - + map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames) + e.xmlwrite(macdoc, self.macxmlname(ifc)) phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames) @@ -149,25 +144,25 @@ class EmaneIeee80211abgModel(EmaneModel): # This allows CORE to preserve the entry layout for the mac 'category' parameters # and work with EMANE 9.x onwards. # - def get9xmacparamequivalent(self, macname, values): - ''' Generate a list of 80211abg mac parameters in 0.9.x layout for a given mac parameter - in 8.x layout.For mac category parameters, the list returned will contain the four + def get9xmacparamequivalent(self, macname, values): + """ + Generate a list of 80211abg mac parameters in 0.9.x layout for a given mac parameter + in 8.x layout.For mac category parameters, the list returned will contain the four equivalent 9.x parameter and value pairs. Otherwise, the list returned will only contain a single name and value pair. - ''' + """ nvpairlist = [] macparmval = self.valueof(macname, values) - if macname in ["queuesize","aifs","cwmin","cwmax","txop","retrylimit"]: + if macname in ["queuesize", "aifs", "cwmin", "cwmax", "txop", "retrylimit"]: for catval in macparmval.split(): idx_and_val = catval.split(":") idx = int(idx_and_val[0]) val = idx_and_val[1] # aifs and tx are in microseconds. Convert to seconds. - if macname in ["aifs","txop"]: - val = "%f" % (float(val)*(1e-6)) + if macname in ["aifs", "txop"]: + val = "%f" % (float(val) * 1e-6) name9x = "%s%d" % (macname, idx) nvpairlist.append([name9x, val]) else: nvpairlist.append([macname, macparmval]) return nvpairlist - diff --git a/daemon/core/emane/nodes.py b/daemon/core/emane/nodes.py index e240e913..950a057f 100644 --- a/daemon/core/emane/nodes.py +++ b/daemon/core/emane/nodes.py @@ -1,69 +1,72 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" nodes.py: definition of an EmaneNode class for implementing configuration control of an EMANE emulation. An EmaneNode has several attached NEMs that share the same MAC+PHY model. -''' +""" -import sys -import os.path +from os import path -from core.api import coreapi +from core import emane from core.coreobj import PyCoreNet +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import log + +logger = log.get_logger(__name__) + try: from emanesh.events import EventService from emanesh.events import LocationEvent -except Exception, e: - pass +except ImportError: + logger.error("error loading emanesh") try: import emaneeventservice import emaneeventlocation -except Exception, e: - ''' Don't require all CORE users to have EMANE libeventservice and its - Python bindings installed. - ''' - pass +except ImportError: + """ + Don't require all CORE users to have EMANE libeventservice and its + Python bindings installed. + """ + logger.error("error loading emaneeventservice and emaneeventlocation") + class EmaneNet(PyCoreNet): - ''' EMANE network base class. - ''' - apitype = coreapi.CORE_NODE_EMANE - linktype = coreapi.CORE_LINK_WIRELESS - type = "wlan" # icon used + """ + EMANE network base class. + """ + apitype = NodeTypes.EMANE.value + linktype = LinkTypes.WIRELESS.value + # icon used + type = "wlan" + class EmaneNode(EmaneNet): - ''' EMANE node contains NEM configuration and causes connected nodes - to have TAP interfaces (instead of VEth). These are managed by the - Emane controller object that exists in a session. - ''' - def __init__(self, session, objid = None, name = None, verbose = False, - start = True): - PyCoreNet.__init__(self, session, objid, name, verbose, start) - self.verbose = verbose + """ + EMANE node contains NEM configuration and causes connected nodes + to have TAP interfaces (instead of VEth). These are managed by the + Emane controller object that exists in a session. + """ + + def __init__(self, session, objid=None, name=None, start=True): + PyCoreNet.__init__(self, session, objid, name, start) self.conf = "" self.up = False self.nemidmap = {} self.model = None self.mobility = None - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2 = None): - ''' The CommEffect model supports link configuration. - ''' + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None): + """ + The CommEffect model supports link configuration. + """ if not self.model: return return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss, - duplicate=duplicate, jitter=jitter, netif2=netif2) + duplicate=duplicate, jitter=jitter, netif2=netif2) def config(self, conf): - #print "emane", self.name, "got config:", conf self.conf = conf def shutdown(self): @@ -76,55 +79,57 @@ class EmaneNode(EmaneNet): pass def setmodel(self, model, config): - ''' set the EmaneModel associated with this node - ''' - if (self.verbose): - self.info("adding model %s" % model._name) - if model._type == coreapi.CORE_TLV_REG_WIRELESS: + """ + set the EmaneModel associated with this node + """ + logger.info("adding model: %s", model.name) + if model.config_type == RegisterTlvs.WIRELESS.value: # EmaneModel really uses values from ConfigurableManager # when buildnemxml() is called, not during init() - self.model = model(session=self.session, objid=self.objid, - verbose=self.verbose) - elif model._type == coreapi.CORE_TLV_REG_MOBILITY: - self.mobility = model(session=self.session, objid=self.objid, - verbose=self.verbose, values=config) + self.model = model(session=self.session, object_id=self.objid) + elif model.config_type == RegisterTlvs.MOBILITY.value: + self.mobility = model(session=self.session, object_id=self.objid, values=config) def setnemid(self, netif, nemid): - ''' Record an interface to numerical ID mapping. The Emane controller - object manages and assigns these IDs for all NEMs. - ''' + """ + Record an interface to numerical ID mapping. The Emane controller + object manages and assigns these IDs for all NEMs. + """ self.nemidmap[netif] = nemid def getnemid(self, netif): - ''' Given an interface, return its numerical ID. - ''' + """ + Given an interface, return its numerical ID. + """ if netif not in self.nemidmap: return None else: return self.nemidmap[netif] def getnemnetif(self, nemid): - ''' Given a numerical NEM ID, return its interface. This returns the - first interface that matches the given NEM ID. - ''' + """ + Given a numerical NEM ID, return its interface. This returns the + first interface that matches the given NEM ID. + """ for netif in self.nemidmap: if self.nemidmap[netif] == nemid: return netif return None def netifs(self, sort=True): - ''' Retrieve list of linked interfaces sorted by node number. - ''' + """ + Retrieve list of linked interfaces sorted by node number. + """ return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid) def buildplatformxmlentry(self, doc): - ''' Return a dictionary of XML elements describing the NEMs - connected to this EmaneNode for inclusion in the platform.xml file. - ''' + """ + Return a dictionary of XML elements describing the NEMs + connected to this EmaneNode for inclusion in the platform.xml file. + """ ret = {} if self.model is None: - self.info("warning: EmaneNode %s has no associated model" % \ - self.name) + logger.info("warning: EmaneNode %s has no associated model" % self.name) return ret for netif in self.netifs(): # @@ -139,9 +144,9 @@ class EmaneNode(EmaneNet): return ret def buildnemxmlfiles(self, emane): - ''' Let the configured model build the necessary nem, mac, and phy - XMLs. - ''' + """ + Let the configured model build the necessary nem, mac, and phy XMLs. + """ if self.model is None: return # build XML for overall network (EmaneNode) configs @@ -166,8 +171,9 @@ class EmaneNode(EmaneNet): self.buildtransportxml(emane, rtype) def buildtransportxml(self, emane, type): - ''' Write a transport XML file for the Virtual or Raw Transport. - ''' + """ + Write a transport XML file for the Virtual or Raw Transport. + """ transdoc = emane.xmldoc("transport") trans = transdoc.getElementsByTagName("transport").pop() trans.setAttribute("name", "%s Transport" % type.capitalize()) @@ -176,7 +182,7 @@ class EmaneNode(EmaneNet): flowcontrol = False names = self.model.getnames() - values = emane.getconfig(self.objid, self.model._name, + values = emane.getconfig(self.objid, self.model.name, self.model.getdefaultvalues())[1] if "flowcontrolenable" in names and values: i = names.index("flowcontrolenable") @@ -184,35 +190,30 @@ class EmaneNode(EmaneNet): flowcontrol = True if "virtual" in type.lower(): - if os.path.exists("/dev/net/tun_flowctl"): - trans.appendChild(emane.xmlparam(transdoc, "devicepath", - "/dev/net/tun_flowctl")) + if path.exists("/dev/net/tun_flowctl"): + trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun_flowctl")) else: - trans.appendChild(emane.xmlparam(transdoc, "devicepath", - "/dev/net/tun")) + trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun")) if flowcontrol: - trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable", - "on")) + trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable", "on")) emane.xmlwrite(transdoc, self.transportxmlname(type.lower())) def transportxmlname(self, type): - ''' Return the string name for the Transport XML file, - e.g. 'n3transvirtual.xml' - ''' + """ + Return the string name for the Transport XML file, e.g. 'n3transvirtual.xml' + """ return "n%strans%s.xml" % (self.objid, type) - def installnetifs(self, do_netns=True): - ''' Install TAP devices into their namespaces. This is done after - EMANE daemons have been started, because that is their only chance - to bind to the TAPs. - ''' - if self.session.emane.genlocationevents() and \ - self.session.emane.service is None: + """ + Install TAP devices into their namespaces. This is done after + EMANE daemons have been started, because that is their only chance + to bind to the TAPs. + """ + if self.session.emane.genlocationevents() and self.session.emane.service is None: warntxt = "unable to publish EMANE events because the eventservice " warntxt += "Python bindings failed to load" - self.session.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.name, - self.objid, warntxt) + logger.error(warntxt) for netif in self.netifs(): if do_netns and "virtual" in netif.transport_type.lower(): @@ -224,98 +225,99 @@ class EmaneNode(EmaneNet): # at this point we register location handlers for generating # EMANE location events netif.poshook = self.setnemposition - (x,y,z) = netif.node.position.get() + (x, y, z) = netif.node.position.get() self.setnemposition(netif, x, y, z) def deinstallnetifs(self): - ''' Uninstall TAP devices. This invokes their shutdown method for - any required cleanup; the device may be actually removed when - emanetransportd terminates. - ''' + """ + Uninstall TAP devices. This invokes their shutdown method for + any required cleanup; the device may be actually removed when + emanetransportd terminates. + """ for netif in self.netifs(): if "virtual" in netif.transport_type.lower(): netif.shutdown() netif.poshook = None def setnemposition(self, netif, x, y, z): - ''' Publish a NEM location change event using the EMANE event service. - ''' + """ + Publish a NEM location change event using the EMANE event service. + """ if self.session.emane.service is None: - if self.verbose: - self.info("position service not available") + logger.info("position service not available") return - nemid = self.getnemid(netif) + nemid = self.getnemid(netif) ifname = netif.localname if nemid is None: - self.info("nemid for %s is unknown" % ifname) + logger.info("nemid for %s is unknown" % ifname) return (lat, long, alt) = self.session.location.getgeo(x, y, z) - if self.verbose: - self.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)" - "(%.6f,%.6f,%.6f)" % \ - (ifname, nemid, x, y, z, lat, long, alt)) - if self.session.emane.version >= self.session.emane.EMANE091: + logger.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)" + "(%.6f,%.6f,%.6f)" % \ + (ifname, nemid, x, y, z, lat, long, alt)) + if emane.VERSION >= emane.EMANE091: event = LocationEvent() else: event = emaneeventlocation.EventLocation(1) # altitude must be an integer or warning is printed # unused: yaw, pitch, roll, azimuth, elevation, velocity alt = int(round(alt)) - if self.session.emane.version >= self.session.emane.EMANE091: + if emane.VERSION >= emane.EMANE091: event.append(nemid, latitude=lat, longitude=long, altitude=alt) self.session.emane.service.publish(0, event) else: event.set(0, nemid, lat, long, alt) - self.session.emane.service.publish(emaneeventlocation.EVENT_ID, - emaneeventservice.PLATFORMID_ANY, - emaneeventservice.NEMID_ANY, - emaneeventservice.COMPONENTID_ANY, - event.export()) + self.session.emane.service.publish( + emaneeventlocation.EVENT_ID, + emaneeventservice.PLATFORMID_ANY, + emaneeventservice.NEMID_ANY, + emaneeventservice.COMPONENTID_ANY, + event.export() + ) def setnempositions(self, moved_netifs): - ''' Several NEMs have moved, from e.g. a WaypointMobilityModel - calculation. Generate an EMANE Location Event having several - entries for each netif that has moved. - ''' + """ + Several NEMs have moved, from e.g. a WaypointMobilityModel + calculation. Generate an EMANE Location Event having several + entries for each netif that has moved. + """ if len(moved_netifs) == 0: return if self.session.emane.service is None: - if self.verbose: - self.info("position service not available") + logger.info("position service not available") return - if self.session.emane.version >= self.session.emane.EMANE091: + if emane.VERSION >= emane.EMANE091: event = LocationEvent() else: event = emaneeventlocation.EventLocation(len(moved_netifs)) i = 0 for netif in moved_netifs: - nemid = self.getnemid(netif) + nemid = self.getnemid(netif) ifname = netif.localname if nemid is None: - self.info("nemid for %s is unknown" % ifname) + logger.info("nemid for %s is unknown" % ifname) continue (x, y, z) = netif.node.getposition() (lat, long, alt) = self.session.location.getgeo(x, y, z) - if self.verbose: - self.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)" - "(%.6f,%.6f,%.6f)" % \ - (i, ifname, nemid, x, y, z, lat, long, alt)) + logger.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)" + "(%.6f,%.6f,%.6f)" % + (i, ifname, nemid, x, y, z, lat, long, alt)) # altitude must be an integer or warning is printed alt = int(round(alt)) - if self.session.emane.version >= self.session.emane.EMANE091: + if emane.VERSION >= emane.EMANE091: event.append(nemid, latitude=lat, longitude=long, altitude=alt) else: event.set(i, nemid, lat, long, alt) i += 1 - if self.session.emane.version >= self.session.emane.EMANE091: + if emane.VERSION >= emane.EMANE091: self.session.emane.service.publish(0, event) else: - self.session.emane.service.publish(emaneeventlocation.EVENT_ID, - emaneeventservice.PLATFORMID_ANY, - emaneeventservice.NEMID_ANY, - emaneeventservice.COMPONENTID_ANY, - event.export()) - - + self.session.emane.service.publish( + emaneeventlocation.EVENT_ID, + emaneeventservice.PLATFORMID_ANY, + emaneeventservice.NEMID_ANY, + emaneeventservice.COMPONENTID_ANY, + event.export() + ) diff --git a/daemon/core/emane/rfpipe.py b/daemon/core/emane/rfpipe.py index 6eb329e9..0d56e990 100644 --- a/daemon/core/emane/rfpipe.py +++ b/daemon/core/emane/rfpipe.py @@ -1,33 +1,28 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Jeff Ahrenholz -# Harry Bullen -# -''' +""" rfpipe.py: EMANE RF-PIPE model for CORE -''' +""" + +from core import emane +from core.emane.emanemodel import EmaneModel +from core.emane.universal import EmaneUniversalModel +from core.enumerations import ConfigDataTypes +from core.misc import log + +logger = log.get_logger(__name__) -import sys -import string try: from emanesh.events import EventService -except: - pass -from core.api import coreapi -from core.constants import * -from emane import Emane, EmaneModel -from universal import EmaneUniversalModel +except ImportError: + logger.error("error importing emanesh") + class EmaneRfPipeModel(EmaneModel): - def __init__(self, session, objid = None, verbose = False): - EmaneModel.__init__(self, session, objid, verbose) + def __init__(self, session, object_id=None): + EmaneModel.__init__(self, session, object_id) # model name - _name = "emane_rfpipe" - if Emane.version >= Emane.EMANE091: + name = "emane_rfpipe" + if emane.VERSION >= emane.EMANE091: xml_path = '/usr/share/emane/xml/models/mac/rfpipe' else: xml_path = "/usr/share/emane/models/rfpipe/xml" @@ -36,68 +31,69 @@ class EmaneRfPipeModel(EmaneModel): # ( 'name', 'type', 'default', 'possible-value-list', 'caption') # MAC parameters _confmatrix_mac_base = [ - ("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0', 'True,False', 'enable promiscuous mode'), - ("datarate", coreapi.CONF_DATA_TYPE_UINT32, '1M', + ("datarate", ConfigDataTypes.UINT32.value, '1M', '', 'data rate (bps)'), - ("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("flowcontrolenable", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable traffic flow control'), - ("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10', + ("flowcontroltokens", ConfigDataTypes.UINT16.value, '10', '', 'number of flow control tokens'), - ("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING, + ("pcrcurveuri", ConfigDataTypes.STRING.value, '%s/rfpipepcr.xml' % xml_path, '', 'SINR/PCR curve file'), ] _confmatrix_mac_081 = [ - ("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', + ("jitter", ConfigDataTypes.FLOAT.value, '0.0', '', 'transmission jitter (usec)'), - ("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', + ("delay", ConfigDataTypes.FLOAT.value, '0.0', '', 'transmission delay (usec)'), - ("transmissioncontrolmap", coreapi.CONF_DATA_TYPE_STRING, '', + ("transmissioncontrolmap", ConfigDataTypes.STRING.value, '', '', 'tx control map (nem:rate:freq:tx_dBm)'), - ("enabletighttiming", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("enabletighttiming", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable tight timing for pkt delay'), ] _confmatrix_mac_091 = [ - ("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', + ("jitter", ConfigDataTypes.FLOAT.value, '0.0', '', 'transmission jitter (sec)'), - ("delay", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', + ("delay", ConfigDataTypes.FLOAT.value, '0.0', '', 'transmission delay (sec)'), - ('radiometricenable', coreapi.CONF_DATA_TYPE_BOOL, '0', + ('radiometricenable', ConfigDataTypes.BOOL.value, '0', 'On,Off', 'report radio metrics via R2RI'), - ('radiometricreportinterval', coreapi.CONF_DATA_TYPE_FLOAT, '1.0', + ('radiometricreportinterval', ConfigDataTypes.FLOAT.value, '1.0', '', 'R2RI radio metric report interval (sec)'), - ('neighbormetricdeletetime', coreapi.CONF_DATA_TYPE_FLOAT, '60.0', + ('neighbormetricdeletetime', ConfigDataTypes.FLOAT.value, '60.0', '', 'R2RI neighbor table inactivity time (sec)'), ] - if Emane.version >= Emane.EMANE091: + if emane.VERSION >= emane.EMANE091: _confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_091 else: _confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_081 # PHY parameters from Universal PHY - _confmatrix_phy = EmaneUniversalModel._confmatrix + _confmatrix_phy = EmaneUniversalModel.config_matrix - _confmatrix = _confmatrix_mac + _confmatrix_phy + config_matrix = _confmatrix_mac + _confmatrix_phy # value groupings - _confgroups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \ - % ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix)) + config_groups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % ( + len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix)) def buildnemxmlfiles(self, e, ifc): - ''' Build the necessary nem, mac, and phy XMLs in the given path. - If an individual NEM has a nonstandard config, we need to build - that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml, - nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used. - ''' - values = e.getifcconfig(self.objid, self._name, + """ + Build the necessary nem, mac, and phy XMLs in the given path. + If an individual NEM has a nonstandard config, we need to build + that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml, + nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used. + """ + values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc) if values is None: return nemdoc = e.xmldoc("nem") nem = nemdoc.getElementsByTagName("nem").pop() nem.setAttribute("name", "RF-PIPE NEM") - e.appendtransporttonem(nemdoc, nem, self.objid, ifc) + e.appendtransporttonem(nemdoc, nem, self.object_id, ifc) mactag = nemdoc.createElement("mac") mactag.setAttribute("definition", self.macxmlname(ifc)) nem.appendChild(mactag) @@ -115,7 +111,7 @@ class EmaneRfPipeModel(EmaneModel): mac.setAttribute("name", "RF-PIPE MAC") mac.setAttribute("library", "rfpipemaclayer") if e.version < e.EMANE091 and \ - self.valueof("transmissioncontrolmap", values) is "": + self.valueof("transmissioncontrolmap", values) is "": macnames.remove("transmissioncontrolmap") # EMANE 0.7.4 support if e.version == e.EMANE074: @@ -124,10 +120,8 @@ class EmaneRfPipeModel(EmaneModel): values = list(values) values[i] = self.emane074_fixup(values[i], 1000) # append MAC options to macdoc - map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, \ - self.valueof(n, values))), macnames) + map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames) e.xmlwrite(macdoc, self.macxmlname(ifc)) phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames) e.xmlwrite(phydoc, self.phyxmlname(ifc)) - diff --git a/daemon/core/emane/tdma.py b/daemon/core/emane/tdma.py index cc705f56..0e748a77 100644 --- a/daemon/core/emane/tdma.py +++ b/daemon/core/emane/tdma.py @@ -1,91 +1,85 @@ - -# -# CORE -# Copyright (c)2013 Company. -# See the LICENSE file included in this distribution. -# -# author: Name -# -''' +""" tdma.py: EMANE TDMA model bindings for CORE -''' +""" + +from core import emane +from core.emane.emanemodel import EmaneModel +from core.emane.universal import EmaneUniversalModel +from core.enumerations import ConfigDataTypes +from core.misc import log + +logger = log.get_logger(__name__) -import sys -import string try: from emanesh.events import EventService except: - pass -from core.api import coreapi -from core.constants import * -from emane import Emane, EmaneModel -from universal import EmaneUniversalModel + logger.error("error importing emanesh") + class EmaneTdmaModel(EmaneModel): - def __init__(self, session, objid = None, verbose = False): - EmaneModel.__init__(self, session, objid, verbose) + def __init__(self, session, object_id=None): + EmaneModel.__init__(self, session, object_id) # model name - _name = "emane_tdma" - if Emane.version >= Emane.EMANE101: + name = "emane_tdma" + if emane.VERSION >= emane.EMANE101: xml_path = '/usr/share/emane/xml/models/mac/tdmaeventscheduler' else: raise Exception("EMANE TDMA requires EMANE 1.0.1 or greater") - - + # MAC parameters _confmatrix_mac = [ - ("enablepromiscuousmode", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("enablepromiscuousmode", ConfigDataTypes.BOOL.value, '0', 'True,False', 'enable promiscuous mode'), - ("flowcontrolenable", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("flowcontrolenable", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable traffic flow control'), - ("flowcontroltokens", coreapi.CONF_DATA_TYPE_UINT16, '10', + ("flowcontroltokens", ConfigDataTypes.UINT16.value, '10', '', 'number of flow control tokens'), - ("fragmentcheckthreshold", coreapi.CONF_DATA_TYPE_UINT16, '2', + ("fragmentcheckthreshold", ConfigDataTypes.UINT16.value, '2', '', 'rate in seconds for check if fragment reassembly efforts should be abandoned'), - ("fragmenttimeoutthreshold", coreapi.CONF_DATA_TYPE_UINT16, '5', + ("fragmenttimeoutthreshold", ConfigDataTypes.UINT16.value, '5', '', 'threshold in seconds to wait for another packet fragment for reassembly'), - ('neighbormetricdeletetime', coreapi.CONF_DATA_TYPE_FLOAT, '60.0', + ('neighbormetricdeletetime', ConfigDataTypes.FLOAT.value, '60.0', '', 'neighbor RF reception timeout for removal from neighbor table (sec)'), - ('neighbormetricupdateinterval', coreapi.CONF_DATA_TYPE_FLOAT, '1.0', + ('neighbormetricupdateinterval', ConfigDataTypes.FLOAT.value, '1.0', '', 'neighbor table update interval (sec)'), - ("pcrcurveuri", coreapi.CONF_DATA_TYPE_STRING, '%s/tdmabasemodelpcr.xml' % xml_path, + ("pcrcurveuri", ConfigDataTypes.STRING.value, '%s/tdmabasemodelpcr.xml' % xml_path, '', 'SINR/PCR curve file'), - ("queue.aggregationenable", coreapi.CONF_DATA_TYPE_BOOL, '1', + ("queue.aggregationenable", ConfigDataTypes.BOOL.value, '1', 'On,Off', 'enable transmit packet aggregation'), - ('queue.aggregationslotthreshold', coreapi.CONF_DATA_TYPE_FLOAT, '90.0', + ('queue.aggregationslotthreshold', ConfigDataTypes.FLOAT.value, '90.0', '', 'percentage of a slot that must be filled in order to conclude aggregation'), - ("queue.depth", coreapi.CONF_DATA_TYPE_UINT16, '256', + ("queue.depth", ConfigDataTypes.UINT16.value, '256', '', 'size of the per service class downstream packet queues (packets)'), - ("queue.fragmentationenable", coreapi.CONF_DATA_TYPE_BOOL, '1', + ("queue.fragmentationenable", ConfigDataTypes.BOOL.value, '1', 'On,Off', 'enable packet fragmentation (over multiple slots)'), - ("queue.strictdequeueenable", coreapi.CONF_DATA_TYPE_BOOL, '0', + ("queue.strictdequeueenable", ConfigDataTypes.BOOL.value, '0', 'On,Off', 'enable strict dequeueing to specified queues only'), ] # PHY parameters from Universal PHY - _confmatrix_phy = EmaneUniversalModel._confmatrix + _confmatrix_phy = EmaneUniversalModel.config_matrix - _confmatrix = _confmatrix_mac + _confmatrix_phy + config_matrix = _confmatrix_mac + _confmatrix_phy # value groupings - _confgroups = "TDMA MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % \ - (len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix)) + config_groups = "TDMA MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % ( + len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix)) def buildnemxmlfiles(self, e, ifc): - ''' Build the necessary nem, mac, and phy XMLs in the given path. - If an individual NEM has a nonstandard config, we need to build - that file also. Otherwise the WLAN-wide nXXemane_tdmanem.xml, - nXXemane_tdmamac.xml, nXXemane_tdmaphy.xml are used. - ''' - values = e.getifcconfig(self.objid, self._name, - self.getdefaultvalues(), ifc) + """ + Build the necessary nem, mac, and phy XMLs in the given path. + If an individual NEM has a nonstandard config, we need to build + that file also. Otherwise the WLAN-wide nXXemane_tdmanem.xml, + nXXemane_tdmamac.xml, nXXemane_tdmaphy.xml are used. + """ + values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc) if values is None: return nemdoc = e.xmldoc("nem") nem = nemdoc.getElementsByTagName("nem").pop() nem.setAttribute("name", "TDMA NEM") - e.appendtransporttonem(nemdoc, nem, self.objid, ifc) + e.appendtransporttonem(nemdoc, nem, self.object_id, ifc) mactag = nemdoc.createElement("mac") mactag.setAttribute("definition", self.macxmlname(ifc)) nem.appendChild(mactag) @@ -105,10 +99,8 @@ class EmaneTdmaModel(EmaneModel): mac.setAttribute("name", "TDMA MAC") mac.setAttribute("library", "tdmaeventschedulerradiomodel") # append MAC options to macdoc - map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, \ - self.valueof(n, values))), macnames) + map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames) e.xmlwrite(macdoc, self.macxmlname(ifc)) phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames) e.xmlwrite(phydoc, self.phyxmlname(ifc)) - diff --git a/daemon/core/emane/universal.py b/daemon/core/emane/universal.py index a5878234..ec37d0ea 100644 --- a/daemon/core/emane/universal.py +++ b/daemon/core/emane/universal.py @@ -1,99 +1,97 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" universal.py: EMANE Universal PHY model for CORE. Enumerates configuration items used for the Universal PHY. -''' +""" + +from core import emane +from core.emane.emanemodel import EmaneModel +from core.enumerations import ConfigDataTypes +from core.misc import log + +logger = log.get_logger(__name__) -import sys -import string try: from emanesh.events import EventService -except: - pass -from core.api import coreapi -from core.constants import * -from emane import Emane, EmaneModel +except ImportError: + logger.error("error importing emanesh") + class EmaneUniversalModel(EmaneModel): - ''' This Univeral PHY model is meant to be imported by other models, - not instantiated. - ''' - def __init__(self, session, objid = None, verbose = False): - raise SyntaxError + """ + This Univeral PHY model is meant to be imported by other models, + not instantiated. + """ - _name = "emane_universal" + def __init__(self, session, object_id=None): + raise NotImplemented("Cannot use this class directly") + + name = "emane_universal" _xmlname = "universalphy" _xmllibrary = "universalphylayer" # universal PHY parameters _confmatrix_base = [ - ("bandwidth", coreapi.CONF_DATA_TYPE_UINT64, '1M', - '', 'rf bandwidth (hz)'), - ("frequency", coreapi.CONF_DATA_TYPE_UINT64, '2.347G', - '','frequency (Hz)'), - ("frequencyofinterest", coreapi.CONF_DATA_TYPE_UINT64, '2.347G', - '','frequency of interest (Hz)'), - ("subid", coreapi.CONF_DATA_TYPE_UINT16, '1', - '','subid'), - ("systemnoisefigure", coreapi.CONF_DATA_TYPE_FLOAT, '4.0', - '','system noise figure (dB)'), - ("txpower", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', - '','transmit power (dBm)'), + ("bandwidth", ConfigDataTypes.UINT64.value, '1M', + '', 'rf bandwidth (hz)'), + ("frequency", ConfigDataTypes.UINT64.value, '2.347G', + '', 'frequency (Hz)'), + ("frequencyofinterest", ConfigDataTypes.UINT64.value, '2.347G', + '', 'frequency of interest (Hz)'), + ("subid", ConfigDataTypes.UINT16.value, '1', + '', 'subid'), + ("systemnoisefigure", ConfigDataTypes.FLOAT.value, '4.0', + '', 'system noise figure (dB)'), + ("txpower", ConfigDataTypes.FLOAT.value, '0.0', + '', 'transmit power (dBm)'), ] _confmatrix_081 = [ - ("antennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', - '','antenna gain (dBi)'), - ("antennaazimuth", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', - '','antenna azimuth (deg)'), - ("antennaelevation", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', - '','antenna elevation (deg)'), - ("antennaprofileid", coreapi.CONF_DATA_TYPE_STRING, '1', - '','antenna profile ID'), - ("antennaprofilemanifesturi", coreapi.CONF_DATA_TYPE_STRING, '', - '','antenna profile manifest URI'), - ("antennaprofileenable", coreapi.CONF_DATA_TYPE_BOOL, '0', - 'On,Off','antenna profile mode'), - ("defaultconnectivitymode", coreapi.CONF_DATA_TYPE_BOOL, '1', - 'On,Off','default connectivity'), - ("frequencyofinterestfilterenable", coreapi.CONF_DATA_TYPE_BOOL, '1', - 'On,Off','frequency of interest filter enable'), - ("noiseprocessingmode", coreapi.CONF_DATA_TYPE_BOOL, '0', - 'On,Off','enable noise processing'), - ("pathlossmode", coreapi.CONF_DATA_TYPE_STRING, '2ray', - 'pathloss,2ray,freespace','path loss mode'), + ("antennagain", ConfigDataTypes.FLOAT.value, '0.0', + '', 'antenna gain (dBi)'), + ("antennaazimuth", ConfigDataTypes.FLOAT.value, '0.0', + '', 'antenna azimuth (deg)'), + ("antennaelevation", ConfigDataTypes.FLOAT.value, '0.0', + '', 'antenna elevation (deg)'), + ("antennaprofileid", ConfigDataTypes.STRING.value, '1', + '', 'antenna profile ID'), + ("antennaprofilemanifesturi", ConfigDataTypes.STRING.value, '', + '', 'antenna profile manifest URI'), + ("antennaprofileenable", ConfigDataTypes.BOOL.value, '0', + 'On,Off', 'antenna profile mode'), + ("defaultconnectivitymode", ConfigDataTypes.BOOL.value, '1', + 'On,Off', 'default connectivity'), + ("frequencyofinterestfilterenable", ConfigDataTypes.BOOL.value, '1', + 'On,Off', 'frequency of interest filter enable'), + ("noiseprocessingmode", ConfigDataTypes.BOOL.value, '0', + 'On,Off', 'enable noise processing'), + ("pathlossmode", ConfigDataTypes.STRING.value, '2ray', + 'pathloss,2ray,freespace', 'path loss mode'), ] _confmatrix_091 = [ - ("fixedantennagain", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', - '','antenna gain (dBi)'), - ("fixedantennagainenable", coreapi.CONF_DATA_TYPE_BOOL, '1', - 'On,Off','enable fixed antenna gain'), - ("noisemode", coreapi.CONF_DATA_TYPE_STRING, 'none', - 'none,all,outofband','noise processing mode'), - ("noisebinsize", coreapi.CONF_DATA_TYPE_UINT64, '20', - '','noise bin size in microseconds'), - ("propagationmodel", coreapi.CONF_DATA_TYPE_STRING, '2ray', - 'precomputed,2ray,freespace','path loss mode'), + ("fixedantennagain", ConfigDataTypes.FLOAT.value, '0.0', + '', 'antenna gain (dBi)'), + ("fixedantennagainenable", ConfigDataTypes.BOOL.value, '1', + 'On,Off', 'enable fixed antenna gain'), + ("noisemode", ConfigDataTypes.STRING.value, 'none', + 'none,all,outofband', 'noise processing mode'), + ("noisebinsize", ConfigDataTypes.UINT64.value, '20', + '', 'noise bin size in microseconds'), + ("propagationmodel", ConfigDataTypes.STRING.value, '2ray', + 'precomputed,2ray,freespace', 'path loss mode'), ] - if Emane.version >= Emane.EMANE091: - _confmatrix = _confmatrix_base + _confmatrix_091 + if emane.VERSION >= emane.EMANE091: + config_matrix = _confmatrix_base + _confmatrix_091 else: - _confmatrix = _confmatrix_base + _confmatrix_081 + config_matrix = _confmatrix_base + _confmatrix_081 # old parameters _confmatrix_ver074 = [ - ("antennaazimuthbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '360.0', - '','azimith beam width (deg)'), - ("antennaelevationbeamwidth", coreapi.CONF_DATA_TYPE_FLOAT, '180.0', - '','elevation beam width (deg)'), - ("antennatype", coreapi.CONF_DATA_TYPE_STRING, 'omnidirectional', - 'omnidirectional,unidirectional','antenna type'), - ] + ("antennaazimuthbeamwidth", ConfigDataTypes.FLOAT.value, '360.0', + '', 'azimith beam width (deg)'), + ("antennaelevationbeamwidth", ConfigDataTypes.FLOAT.value, '180.0', + '', 'elevation beam width (deg)'), + ("antennatype", ConfigDataTypes.STRING.value, 'omnidirectional', + 'omnidirectional,unidirectional', 'antenna type'), + ] # parameters that require unit conversion for 0.7.4 _update_ver074 = ("bandwidth", "frequency", "frequencyofinterest") @@ -102,16 +100,15 @@ class EmaneUniversalModel(EmaneModel): "antennaprofilemanifesturi", "frequencyofinterestfilterenable") - @classmethod def getphydoc(cls, e, mac, values, phynames): phydoc = e.xmldoc("phy") phy = phydoc.getElementsByTagName("phy").pop() phy.setAttribute("name", cls._xmlname) - if e.version < e.EMANE091: + if emane.VERSION < emane.EMANE091: phy.setAttribute("library", cls._xmllibrary) # EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated - if e.version == e.EMANE074: + if emane.VERSION == emane.EMANE074: names = mac.getnames() values = list(values) phynames = list(phynames) @@ -128,7 +125,7 @@ class EmaneUniversalModel(EmaneModel): phy.appendChild(e.xmlparam(phydoc, old[0], old[2])) frequencies = None - if e.version >= e.EMANE091: + if emane.VERSION >= emane.EMANE091: name = "frequencyofinterest" value = mac.valueof(name, values) frequencies = cls.valuestrtoparamlist(phydoc, name, value) @@ -137,10 +134,7 @@ class EmaneUniversalModel(EmaneModel): phynames.remove("frequencyofinterest") # append all PHY options to phydoc - map( lambda n: phy.appendChild(e.xmlparam(phydoc, n, \ - mac.valueof(n, values))), phynames) + map(lambda n: phy.appendChild(e.xmlparam(phydoc, n, mac.valueof(n, values))), phynames) if frequencies: phy.appendChild(frequencies) return phydoc - - diff --git a/daemon/core/location.py b/daemon/core/location.py index 824a54df..858f8af5 100644 --- a/daemon/core/location.py +++ b/daemon/core/location.py @@ -1,42 +1,50 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" location.py: definition of CoreLocation class that is a member of the Session object. Provides conversions between Cartesian and geographic coordinate systems. Depends on utm contributed module, from https://pypi.python.org/pypi/utm (version 0.3.0). -''' +""" from core.conf import ConfigurableManager -from core.api import coreapi +from core.enumerations import RegisterTlvs +from core.misc import log from core.misc import utm +logger = log.get_logger(__name__) + + class CoreLocation(ConfigurableManager): - ''' Member of session class for handling global location data. This keeps - track of a latitude/longitude/altitude reference point and scale in - order to convert between X,Y and geo coordinates. - - TODO: this could be updated to use more generic - Configurable/ConfigurableManager code like other Session objects - ''' - _name = "location" - _type = coreapi.CORE_TLV_REG_UTILITY - - def __init__(self, session): - ConfigurableManager.__init__(self, session) + """ + Member of session class for handling global location data. This keeps + track of a latitude/longitude/altitude reference point and scale in + order to convert between X,Y and geo coordinates. + + TODO: this could be updated to use more generic + Configurable/ConfigurableManager code like other Session objects + """ + name = "location" + config_type = RegisterTlvs.UTILITY.value + + def __init__(self): + """ + Creates a MobilityManager instance. + + :return: nothing + """ + ConfigurableManager.__init__(self) self.reset() self.zonemap = {} + self.refxyz = (0.0, 0.0, 0.0) + self.refscale = 1.0 + self.zoneshifts = {} + self.refgeo = (0.0, 0.0, 0.0) for n, l in utm.ZONE_LETTERS: self.zonemap[l] = n def reset(self): - ''' Reset to initial state. - ''' + """ + Reset to initial state. + """ # (x, y, z) coordinates of the point given by self.refgeo self.refxyz = (0.0, 0.0, 0.0) # decimal latitude, longitude, and altitude at the point (x, y, z) @@ -46,70 +54,97 @@ class CoreLocation(ConfigurableManager): # cached distance to refpt in other zones self.zoneshifts = {} - def configure_values(self, msg, values): - ''' Receive configuration message for setting the reference point - and scale. - ''' + def configure_values(self, config_data): + """ + Receive configuration message for setting the reference point + and scale. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: nothing + """ + values = config_data.data_values + if values is None: - self.session.info("location data missing") + logger.info("location data missing") return None values = values.split('|') + # Cartesian coordinate reference point - refx,refy = map(lambda x: float(x), values[0:2]) + refx, refy = map(lambda x: float(x), values[0:2]) refz = 0.0 self.refxyz = (refx, refy, refz) # Geographic reference point - lat,long,alt = map(lambda x: float(x), values[2:5]) - self.setrefgeo(lat, long, alt) + lat, lon, alt = map(lambda x: float(x), values[2:5]) + self.setrefgeo(lat, lon, alt) self.refscale = float(values[5]) - self.session.info("location configured: (%.2f,%.2f,%.2f) = " - "(%.5f,%.5f,%.5f) scale=%.2f" % - (self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0], - self.refgeo[1], self.refgeo[2], self.refscale)) - self.session.info("location configured: UTM(%.5f,%.5f,%.5f)" % - (self.refutm[1], self.refutm[2], self.refutm[3])) + logger.info("location configured: (%.2f,%.2f,%.2f) = (%.5f,%.5f,%.5f) scale=%.2f" % + (self.refxyz[0], self.refxyz[1], self.refxyz[2], self.refgeo[0], + self.refgeo[1], self.refgeo[2], self.refscale)) + logger.info("location configured: UTM(%.5f,%.5f,%.5f)" % + (self.refutm[1], self.refutm[2], self.refutm[3])) def px2m(self, val): - ''' Convert the specified value in pixels to meters using the - configured scale. The scale is given as s, where - 100 pixels = s meters. - ''' + """ + Convert the specified value in pixels to meters using the + configured scale. The scale is given as s, where + 100 pixels = s meters. + + :param val: value to use in converting to meters + :return: value converted to meters + """ return (val / 100.0) * self.refscale def m2px(self, val): - ''' Convert the specified value in meters to pixels using the - configured scale. The scale is given as s, where - 100 pixels = s meters. - ''' + """ + Convert the specified value in meters to pixels using the + configured scale. The scale is given as s, where + 100 pixels = s meters. + + :param val: value to convert to pixels + :return: value converted to pixels + """ if self.refscale == 0.0: return 0.0 return 100.0 * (val / self.refscale) def setrefgeo(self, lat, lon, alt): - ''' Record the geographical reference point decimal (lat, lon, alt) - and convert and store its UTM equivalent for later use. - ''' + """ + Record the geographical reference point decimal (lat, lon, alt) + and convert and store its UTM equivalent for later use. + + :param lat: latitude + :param lon: longitude + :param alt: altitude + :return: nothing + """ self.refgeo = (lat, lon, alt) # easting, northing, zone - (e, n, zonen, zonel) = utm.from_latlon(lat, lon) - self.refutm = ( (zonen, zonel), e, n, alt) + e, n, zonen, zonel = utm.from_latlon(lat, lon) + self.refutm = ((zonen, zonel), e, n, alt) def getgeo(self, x, y, z): - ''' Given (x, y, z) Cartesian coordinates, convert them to latitude, - longitude, and altitude based on the configured reference point - and scale. - ''' + """ + Given (x, y, z) Cartesian coordinates, convert them to latitude, + longitude, and altitude based on the configured reference point + and scale. + + :param x: x value + :param y: y value + :param z: z value + :return: lat, lon, alt values for provided coordinates + :rtype: tuple + """ # shift (x,y,z) over to reference point (x,y,z) - x = x - self.refxyz[0] + x -= self.refxyz[0] y = -(y - self.refxyz[1]) if z is None: z = self.refxyz[2] else: - z = z - self.refxyz[2] + z -= self.refxyz[2] # use UTM coordinates since unit is meters zone = self.refutm[0] if zone == "": - raise ValueError, "reference point not configured" + raise ValueError("reference point not configured") e = self.refutm[1] + self.px2m(x) n = self.refutm[2] + self.px2m(y) alt = self.refutm[3] + self.px2m(z) @@ -117,23 +152,29 @@ class CoreLocation(ConfigurableManager): try: lat, lon = utm.to_latlon(e, n, zone[0], zone[1]) except utm.OutOfRangeError: - self.info("UTM out of range error for e=%s n=%s zone=%s" \ - "xyz=(%s,%s,%s)" % (e, n, zone, x, y, z)) - (lat, lon) = self.refgeo[:2] - #self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \ + logger.exception("UTM out of range error for n=%s zone=%s xyz=(%s,%s,%s)", n, zone, x, y, z) + lat, lon = self.refgeo[:2] + # self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \ # "%.3f,%.3f,%.3f" % (x, y, z, e, n, zone, lat, lon, alt)) - return (lat, lon, alt) + return lat, lon, alt def getxyz(self, lat, lon, alt): - ''' Given latitude, longitude, and altitude location data, convert them - to (x, y, z) Cartesian coordinates based on the configured - reference point and scale. Lat/lon is converted to UTM meter - coordinates, UTM zones are accounted for, and the scale turns - meters to pixels. - ''' + """ + Given latitude, longitude, and altitude location data, convert them + to (x, y, z) Cartesian coordinates based on the configured + reference point and scale. Lat/lon is converted to UTM meter + coordinates, UTM zones are accounted for, and the scale turns + meters to pixels. + + :param lat: latitude + :param lon: longitude + :param alt: altitude + :return: converted x, y, z coordinates + :rtype: tuple + """ # convert lat/lon to UTM coordinates in meters - (e, n, zonen, zonel) = utm.from_latlon(lat, lon) - (rlat, rlon, ralt) = self.refgeo + e, n, zonen, zonel = utm.from_latlon(lat, lon) + rlat, rlon, ralt = self.refgeo xshift = self.geteastingshift(zonen, zonel) if xshift is None: xm = e - self.refutm[1] @@ -145,31 +186,40 @@ class CoreLocation(ConfigurableManager): else: ym = n + yshift zm = alt - ralt - + # shift (x,y,z) over to reference point (x,y,z) x = self.m2px(xm) + self.refxyz[0] y = -(self.m2px(ym) + self.refxyz[1]) z = self.m2px(zm) + self.refxyz[2] - return (x, y, z) + return x, y, z def geteastingshift(self, zonen, zonel): - ''' If the lat, lon coordinates being converted are located in a + """ + If the lat, lon coordinates being converted are located in a different UTM zone than the canvas reference point, the UTM meters may need to be shifted. - This picks a reference point in the same longitudinal band + This picks a reference point in the same longitudinal band (UTM zone number) as the provided zone, to calculate the shift in meters for the x coordinate. - ''' + + :param zonen: zonen + :param zonel: zone1 + :return: the x shift value + """ rzonen = int(self.refutm[0][0]) + # same zone number, no x shift required if zonen == rzonen: - return None # same zone number, no x shift required + return None z = (zonen, zonel) + # x shift already calculated, cached if z in self.zoneshifts and self.zoneshifts[z][0] is not None: - return self.zoneshifts[z][0] # x shift already calculated, cached - - (rlat, rlon, ralt) = self.refgeo - lon2 = rlon + 6*(zonen - rzonen) # ea. zone is 6deg band - (e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, lon2) # ignore northing + return self.zoneshifts[z][0] + + rlat, rlon, ralt = self.refgeo + # ea. zone is 6deg band + lon2 = rlon + 6 * (zonen - rzonen) + # ignore northing + e2, n2, zonen2, zonel2 = utm.from_latlon(rlat, lon2) # NOTE: great circle distance used here, not reference ellipsoid! xshift = utm.haversine(rlon, rlat, lon2, rlat) - e2 # cache the return value @@ -178,27 +228,35 @@ class CoreLocation(ConfigurableManager): yshift = self.zoneshifts[z][1] self.zoneshifts[z] = (xshift, yshift) return xshift - + def getnorthingshift(self, zonen, zonel): - ''' If the lat, lon coordinates being converted are located in a + """ + If the lat, lon coordinates being converted are located in a different UTM zone than the canvas reference point, the UTM meters may need to be shifted. This picks a reference point in the same latitude band (UTM zone letter) as the provided zone, to calculate the shift in meters for the y coordinate. - ''' + + :param zonen: zonen + :param zonel: zone1 + :return: calculated y shift + """ rzonel = self.refutm[0][1] + # same zone letter, no y shift required if zonel == rzonel: - return None # same zone letter, no y shift required + return None z = (zonen, zonel) + # y shift already calculated, cached if z in self.zoneshifts and self.zoneshifts[z][1] is not None: - return self.zoneshifts[z][1] # y shift already calculated, cached - - (rlat, rlon, ralt) = self.refgeo + return self.zoneshifts[z][1] + + rlat, rlon, ralt = self.refgeo # zonemap is used to calculate degrees difference between zone letters latshift = self.zonemap[zonel] - self.zonemap[rzonel] - lat2 = rlat + latshift # ea. latitude band is 8deg high - (e2, n2, zonen2, zonel2) = utm.from_latlon(lat2, rlon) + # ea. latitude band is 8deg high + lat2 = rlat + latshift + e2, n2, zonen2, zonel2 = utm.from_latlon(lat2, rlon) # NOTE: great circle distance used here, not reference ellipsoid yshift = -(utm.haversine(rlon, rlat, rlon, lat2) + n2) # cache the return value @@ -209,26 +267,32 @@ class CoreLocation(ConfigurableManager): return yshift def getutmzoneshift(self, e, n): - ''' Given UTM easting and northing values, check if they fall outside + """ + Given UTM easting and northing values, check if they fall outside the reference point's zone boundary. Return the UTM coordinates in a different zone and the new zone if they do. Zone lettering is only changed when the reference point is in the opposite hemisphere. - ''' + + :param e: easting value + :param n: northing value + :return: modified easting, northing, and zone values + :rtype: tuple + """ zone = self.refutm[0] - (rlat, rlon, ralt) = self.refgeo + rlat, rlon, ralt = self.refgeo if e > 834000 or e < 166000: - num_zones = (int(e) - 166000) / (utm.R/10) + num_zones = (int(e) - 166000) / (utm.R / 10) # estimate number of zones to shift, E (positive) or W (negative) rlon2 = self.refgeo[1] + (num_zones * 6) - (e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2) + e2, n2, zonen2, zonel2 = utm.from_latlon(rlat, rlon2) xshift = utm.haversine(rlon, rlat, rlon2, rlat) # after >3 zones away from refpt, the above estimate won't work # (the above estimate could be improved) if not 100000 <= (e - xshift) < 1000000: # move one more zone away - num_zones = (abs(num_zones)+1) * (abs(num_zones)/num_zones) + num_zones = (abs(num_zones) + 1) * (abs(num_zones) / num_zones) rlon2 = self.refgeo[1] + (num_zones * 6) - (e2, n2, zonen2, zonel2) = utm.from_latlon(rlat, rlon2) + e2, n2, zonen2, zonel2 = utm.from_latlon(rlat, rlon2) xshift = utm.haversine(rlon, rlat, rlon2, rlat) e = e - xshift zone = (zonen2, zonel2) @@ -240,7 +304,4 @@ class CoreLocation(ConfigurableManager): # refpt in southern hemisphere and we crossed north of equator n -= 10000000 zone = (zone[0], 'N') - return (e, n, zone) - - - + return e, n, zone diff --git a/daemon/core/misc/LatLongUTMconversion.py b/daemon/core/misc/LatLongUTMconversion.py index 2c3bf238..4f7c13dc 100755 --- a/daemon/core/misc/LatLongUTMconversion.py +++ b/daemon/core/misc/LatLongUTMconversion.py @@ -5,9 +5,9 @@ from math import pi, sin, cos, tan, sqrt -#LatLong- UTM conversion..h -#definitions for lat/long to UTM and UTM to lat/lng conversions -#include +# LatLong- UTM conversion..h +# definitions for lat/long to UTM and UTM to lat/lng conversions +# include _deg2rad = pi / 180.0 _rad2deg = 180.0 / pi @@ -16,48 +16,49 @@ _EquatorialRadius = 2 _eccentricitySquared = 3 _ellipsoid = [ -# id, Ellipsoid name, Equatorial Radius, square of eccentricity -# first once is a placeholder only, To allow array indices to match id numbers - [ -1, "Placeholder", 0, 0], - [ 1, "Airy", 6377563, 0.00667054], - [ 2, "Australian National", 6378160, 0.006694542], - [ 3, "Bessel 1841", 6377397, 0.006674372], - [ 4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372], - [ 5, "Clarke 1866", 6378206, 0.006768658], - [ 6, "Clarke 1880", 6378249, 0.006803511], - [ 7, "Everest", 6377276, 0.006637847], - [ 8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422], - [ 9, "Fischer 1968", 6378150, 0.006693422], - [ 10, "GRS 1967", 6378160, 0.006694605], - [ 11, "GRS 1980", 6378137, 0.00669438], - [ 12, "Helmert 1906", 6378200, 0.006693422], - [ 13, "Hough", 6378270, 0.00672267], - [ 14, "International", 6378388, 0.00672267], - [ 15, "Krassovsky", 6378245, 0.006693422], - [ 16, "Modified Airy", 6377340, 0.00667054], - [ 17, "Modified Everest", 6377304, 0.006637847], - [ 18, "Modified Fischer 1960", 6378155, 0.006693422], - [ 19, "South American 1969", 6378160, 0.006694542], - [ 20, "WGS 60", 6378165, 0.006693422], - [ 21, "WGS 66", 6378145, 0.006694542], - [ 22, "WGS-72", 6378135, 0.006694318], - [ 23, "WGS-84", 6378137, 0.00669438] + # id, Ellipsoid name, Equatorial Radius, square of eccentricity + # first once is a placeholder only, To allow array indices to match id numbers + [-1, "Placeholder", 0, 0], + [1, "Airy", 6377563, 0.00667054], + [2, "Australian National", 6378160, 0.006694542], + [3, "Bessel 1841", 6377397, 0.006674372], + [4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372], + [5, "Clarke 1866", 6378206, 0.006768658], + [6, "Clarke 1880", 6378249, 0.006803511], + [7, "Everest", 6377276, 0.006637847], + [8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422], + [9, "Fischer 1968", 6378150, 0.006693422], + [10, "GRS 1967", 6378160, 0.006694605], + [11, "GRS 1980", 6378137, 0.00669438], + [12, "Helmert 1906", 6378200, 0.006693422], + [13, "Hough", 6378270, 0.00672267], + [14, "International", 6378388, 0.00672267], + [15, "Krassovsky", 6378245, 0.006693422], + [16, "Modified Airy", 6377340, 0.00667054], + [17, "Modified Everest", 6377304, 0.006637847], + [18, "Modified Fischer 1960", 6378155, 0.006693422], + [19, "South American 1969", 6378160, 0.006694542], + [20, "WGS 60", 6378165, 0.006693422], + [21, "WGS 66", 6378145, 0.006694542], + [22, "WGS-72", 6378135, 0.006694318], + [23, "WGS-84", 6378137, 0.00669438] ] -#Reference ellipsoids derived from Peter H. Dana's website- -#http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html -#Department of Geography, University of Texas at Austin -#Internet: pdana@mail.utexas.edu -#3/22/95 -#Source -#Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System -#1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency +# Reference ellipsoids derived from Peter H. Dana's website- +# http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html +# Department of Geography, University of Texas at Austin +# Internet: pdana@mail.utexas.edu +# 3/22/95 -#def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long, +# Source +# Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System +# 1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency + +# def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long, # double &UTMNorthing, double &UTMEasting, char* UTMZone) -def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None): +def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone=None): """converts lat/long to UTM coords. Equations from USGS Bulletin 1532 East Longitudes are positive, West longitudes are negative. North latitudes are positive, South latitudes are negative @@ -68,14 +69,14 @@ def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None): eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared] k0 = 0.9996 - #Make sure the longitude is between -180.00 .. 179.9 - LongTemp = (Long+180)-int((Long+180)/360)*360-180 # -180.00 .. 179.9 + # Make sure the longitude is between -180.00 .. 179.9 + LongTemp = (Long + 180) - int((Long + 180) / 360) * 360 - 180 # -180.00 .. 179.9 - LatRad = Lat*_deg2rad - LongRad = LongTemp*_deg2rad + LatRad = Lat * _deg2rad + LongRad = LongTemp * _deg2rad if zone is None: - ZoneNumber = int((LongTemp + 180)/6) + 1 + ZoneNumber = int((LongTemp + 180) / 6) + 1 else: ZoneNumber = zone @@ -84,46 +85,50 @@ def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone = None): # Special zones for Svalbard if Lat >= 72.0 and Lat < 84.0: - if LongTemp >= 0.0 and LongTemp < 9.0:ZoneNumber = 31 - elif LongTemp >= 9.0 and LongTemp < 21.0: ZoneNumber = 33 - elif LongTemp >= 21.0 and LongTemp < 33.0: ZoneNumber = 35 - elif LongTemp >= 33.0 and LongTemp < 42.0: ZoneNumber = 37 + if LongTemp >= 0.0 and LongTemp < 9.0: + ZoneNumber = 31 + elif LongTemp >= 9.0 and LongTemp < 21.0: + ZoneNumber = 33 + elif LongTemp >= 21.0 and LongTemp < 33.0: + ZoneNumber = 35 + elif LongTemp >= 33.0 and LongTemp < 42.0: + ZoneNumber = 37 - LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 #+3 puts origin in middle of zone + LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone LongOriginRad = LongOrigin * _deg2rad - #compute the UTM Zone from the latitude and longitude + # compute the UTM Zone from the latitude and longitude UTMZone = "%d%c" % (ZoneNumber, _UTMLetterDesignator(Lat)) - eccPrimeSquared = (eccSquared)/(1-eccSquared) - N = a/sqrt(1-eccSquared*sin(LatRad)*sin(LatRad)) - T = tan(LatRad)*tan(LatRad) - C = eccPrimeSquared*cos(LatRad)*cos(LatRad) - A = cos(LatRad)*(LongRad-LongOriginRad) + eccPrimeSquared = (eccSquared) / (1 - eccSquared) + N = a / sqrt(1 - eccSquared * sin(LatRad) * sin(LatRad)) + T = tan(LatRad) * tan(LatRad) + C = eccPrimeSquared * cos(LatRad) * cos(LatRad) + A = cos(LatRad) * (LongRad - LongOriginRad) - M = a*((1 - - eccSquared/4 - - 3*eccSquared*eccSquared/64 - - 5*eccSquared*eccSquared*eccSquared/256)*LatRad - - (3*eccSquared/8 - + 3*eccSquared*eccSquared/32 - + 45*eccSquared*eccSquared*eccSquared/1024)*sin(2*LatRad) - + (15*eccSquared*eccSquared/256 + 45*eccSquared*eccSquared*eccSquared/1024)*sin(4*LatRad) - - (35*eccSquared*eccSquared*eccSquared/3072)*sin(6*LatRad)) + M = a * ((1 + - eccSquared / 4 + - 3 * eccSquared * eccSquared / 64 + - 5 * eccSquared * eccSquared * eccSquared / 256) * LatRad + - (3 * eccSquared / 8 + + 3 * eccSquared * eccSquared / 32 + + 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(2 * LatRad) + + (15 * eccSquared * eccSquared / 256 + 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(4 * LatRad) + - (35 * eccSquared * eccSquared * eccSquared / 3072) * sin(6 * LatRad)) - UTMEasting = (k0*N*(A+(1-T+C)*A*A*A/6 - + (5-18*T+T*T+72*C-58*eccPrimeSquared)*A*A*A*A*A/120) + UTMEasting = (k0 * N * (A + (1 - T + C) * A * A * A / 6 + + (5 - 18 * T + T * T + 72 * C - 58 * eccPrimeSquared) * A * A * A * A * A / 120) + 500000.0) - UTMNorthing = (k0*(M+N*tan(LatRad)*(A*A/2+(5-T+9*C+4*C*C)*A*A*A*A/24 - + (61 - -58*T - +T*T - +600*C - -330*eccPrimeSquared)*A*A*A*A*A*A/720))) + UTMNorthing = (k0 * (M + N * tan(LatRad) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24 + + (61 + - 58 * T + + T * T + + 600 * C + - 330 * eccPrimeSquared) * A * A * A * A * A * A / 720))) if Lat < 0: - UTMNorthing = UTMNorthing + 10000000.0; #10000000 meter offset for southern hemisphere + UTMNorthing = UTMNorthing + 10000000.0; # 10000000 meter offset for southern hemisphere return (UTMZone, UTMEasting, UTMNorthing) @@ -132,29 +137,51 @@ def _UTMLetterDesignator(Lat): latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S Written by Chuck Gantz- chuck.gantz@globalstar.com""" - if 84 >= Lat >= 72: return 'X' - elif 72 > Lat >= 64: return 'W' - elif 64 > Lat >= 56: return 'V' - elif 56 > Lat >= 48: return 'U' - elif 48 > Lat >= 40: return 'T' - elif 40 > Lat >= 32: return 'S' - elif 32 > Lat >= 24: return 'R' - elif 24 > Lat >= 16: return 'Q' - elif 16 > Lat >= 8: return 'P' - elif 8 > Lat >= 0: return 'N' - elif 0 > Lat >= -8: return 'M' - elif -8> Lat >= -16: return 'L' - elif -16 > Lat >= -24: return 'K' - elif -24 > Lat >= -32: return 'J' - elif -32 > Lat >= -40: return 'H' - elif -40 > Lat >= -48: return 'G' - elif -48 > Lat >= -56: return 'F' - elif -56 > Lat >= -64: return 'E' - elif -64 > Lat >= -72: return 'D' - elif -72 > Lat >= -80: return 'C' - else: return 'Z' # if the Latitude is outside the UTM limits + if 84 >= Lat >= 72: + return 'X' + elif 72 > Lat >= 64: + return 'W' + elif 64 > Lat >= 56: + return 'V' + elif 56 > Lat >= 48: + return 'U' + elif 48 > Lat >= 40: + return 'T' + elif 40 > Lat >= 32: + return 'S' + elif 32 > Lat >= 24: + return 'R' + elif 24 > Lat >= 16: + return 'Q' + elif 16 > Lat >= 8: + return 'P' + elif 8 > Lat >= 0: + return 'N' + elif 0 > Lat >= -8: + return 'M' + elif -8 > Lat >= -16: + return 'L' + elif -16 > Lat >= -24: + return 'K' + elif -24 > Lat >= -32: + return 'J' + elif -32 > Lat >= -40: + return 'H' + elif -40 > Lat >= -48: + return 'G' + elif -48 > Lat >= -56: + return 'F' + elif -56 > Lat >= -64: + return 'E' + elif -64 > Lat >= -72: + return 'D' + elif -72 > Lat >= -80: + return 'C' + else: + return 'Z' # if the Latitude is outside the UTM limits -#void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone, + +# void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone, # double& Lat, double& Long ) def UTMtoLL(ReferenceEllipsoid, northing, easting, zone): @@ -168,10 +195,10 @@ Converted to Python by Russ Nelson """ k0 = 0.9996 a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius] eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared] - e1 = (1-sqrt(1-eccSquared))/(1+sqrt(1-eccSquared)) - #NorthernHemisphere; //1 for northern hemispher, 0 for southern + e1 = (1 - sqrt(1 - eccSquared)) / (1 + sqrt(1 - eccSquared)) + # NorthernHemisphere; //1 for northern hemispher, 0 for southern - x = easting - 500000.0 #remove 500,000 meter offset for longitude + x = easting - 500000.0 # remove 500,000 meter offset for longitude y = northing ZoneLetter = zone[-1] @@ -180,37 +207,40 @@ Converted to Python by Russ Nelson """ NorthernHemisphere = 1 # point is in northern hemisphere else: NorthernHemisphere = 0 # point is in southern hemisphere - y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere + y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere - LongOrigin = (ZoneNumber - 1)*6 - 180 + 3 # +3 puts origin in middle of zone + LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone - eccPrimeSquared = (eccSquared)/(1-eccSquared) + eccPrimeSquared = (eccSquared) / (1 - eccSquared) M = y / k0 - mu = M/(a*(1-eccSquared/4-3*eccSquared*eccSquared/64-5*eccSquared*eccSquared*eccSquared/256)) + mu = M / ( + a * (1 - eccSquared / 4 - 3 * eccSquared * eccSquared / 64 - 5 * eccSquared * eccSquared * eccSquared / 256)) - phi1Rad = (mu + (3*e1/2-27*e1*e1*e1/32)*sin(2*mu) - + (21*e1*e1/16-55*e1*e1*e1*e1/32)*sin(4*mu) - +(151*e1*e1*e1/96)*sin(6*mu)) - phi1 = phi1Rad*_rad2deg; + phi1Rad = (mu + (3 * e1 / 2 - 27 * e1 * e1 * e1 / 32) * sin(2 * mu) + + (21 * e1 * e1 / 16 - 55 * e1 * e1 * e1 * e1 / 32) * sin(4 * mu) + + (151 * e1 * e1 * e1 / 96) * sin(6 * mu)) + phi1 = phi1Rad * _rad2deg; - N1 = a/sqrt(1-eccSquared*sin(phi1Rad)*sin(phi1Rad)) - T1 = tan(phi1Rad)*tan(phi1Rad) - C1 = eccPrimeSquared*cos(phi1Rad)*cos(phi1Rad) - R1 = a*(1-eccSquared)/pow(1-eccSquared*sin(phi1Rad)*sin(phi1Rad), 1.5) - D = x/(N1*k0) + N1 = a / sqrt(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad)) + T1 = tan(phi1Rad) * tan(phi1Rad) + C1 = eccPrimeSquared * cos(phi1Rad) * cos(phi1Rad) + R1 = a * (1 - eccSquared) / pow(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad), 1.5) + D = x / (N1 * k0) - Lat = phi1Rad - (N1*tan(phi1Rad)/R1)*(D*D/2-(5+3*T1+10*C1-4*C1*C1-9*eccPrimeSquared)*D*D*D*D/24 - +(61+90*T1+298*C1+45*T1*T1-252*eccPrimeSquared-3*C1*C1)*D*D*D*D*D*D/720) + Lat = phi1Rad - (N1 * tan(phi1Rad) / R1) * ( + D * D / 2 - (5 + 3 * T1 + 10 * C1 - 4 * C1 * C1 - 9 * eccPrimeSquared) * D * D * D * D / 24 + + (61 + 90 * T1 + 298 * C1 + 45 * T1 * T1 - 252 * eccPrimeSquared - 3 * C1 * C1) * D * D * D * D * D * D / 720) Lat = Lat * _rad2deg - Long = (D-(1+2*T1+C1)*D*D*D/6+(5-2*C1+28*T1-3*C1*C1+8*eccPrimeSquared+24*T1*T1) - *D*D*D*D*D/120)/cos(phi1Rad) + Long = (D - (1 + 2 * T1 + C1) * D * D * D / 6 + ( + 5 - 2 * C1 + 28 * T1 - 3 * C1 * C1 + 8 * eccPrimeSquared + 24 * T1 * T1) + * D * D * D * D * D / 120) / cos(phi1Rad) Long = LongOrigin + Long * _rad2deg return (Lat, Long) + if __name__ == '__main__': (z, e, n) = LLtoUTM(23, 45.00, -75.00) print z, e, n print UTMtoLL(23, n, e, z) - diff --git a/daemon/core/misc/event.py b/daemon/core/misc/event.py index 1f6cd795..e4714a73 100644 --- a/daemon/core/misc/event.py +++ b/daemon/core/misc/event.py @@ -1,76 +1,141 @@ -# -# CORE -# Copyright (c)2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# -''' +""" event.py: event loop implementation using a heap queue and threads. -''' -import time -import threading +""" + import heapq +import threading +import time + +from core.misc import log + +logger = log.get_logger(__name__) + + +class Timer(threading.Thread): + """ + Based on threading.Timer but cancel() returns if the timer was + already running. + """ + + def __init__(self, interval, function, args=None, kwargs=None): + """ + Create a Timer instance. + + :param interval: time interval + :param function: function to call when timer finishes + :param args: function arguments + :param kwargs: function keyword arguments + """ + super(Timer, self).__init__() + self.interval = interval + self.function = function + + self.finished = threading.Event() + self._running = threading.Lock() + + # validate arguments were provided + if args: + self.args = args + else: + self.args = [] + + # validate keyword arguments were provided + if kwargs: + self.kwargs = kwargs + else: + self.kwargs = {} + + def cancel(self): + """ + Stop the timer if it hasn't finished yet. Return False if + the timer was already running. + + :return: True if canceled, False otherwise + :rtype: bool + """ + locked = self._running.acquire(False) + if locked: + self.finished.set() + self._running.release() + return locked + + def run(self): + """ + Run the timer. + + :return: nothing + """ + self.finished.wait(self.interval) + with self._running: + if not self.finished.is_set(): + self.function(*self.args, **self.kwargs) + self.finished.set() + + +class Event(object): + """ + Provides event objects that can be used within the EventLoop class. + """ + + def __init__(self, eventnum, event_time, func, *args, **kwds): + """ + Create an Event instance. + + :param eventnum: event number + :param event_time: event time + :param func: event function + :param args: function arguments + :param kwds: function keyword arguments + """ + self.eventnum = eventnum + self.time = event_time + self.func = func + self.args = args + self.kwds = kwds + self.canceled = False + + def __cmp__(self, other): + """ + Comparison function. + + :param Event other: event to compare with + :return: comparison result + :rtype: int + """ + tmp = cmp(self.time, other.time) + if tmp == 0: + tmp = cmp(self.eventnum, other.eventnum) + return tmp + + def run(self): + """ + Run an event. + + :return: nothing + """ + if self.canceled: + return + self.func(*self.args, **self.kwds) + + def cancel(self): + """ + Cancel event. + + :return: nothing + """ + # XXX not thread-safe + self.canceled = True + class EventLoop(object): - - class Timer(threading.Thread): - '''\ - Based on threading.Timer but cancel() returns if the timer was - already running. - ''' - - def __init__(self, interval, function, args=[], kwargs={}): - super(EventLoop.Timer, self).__init__() - self.interval = interval - self.function = function - self.args = args - self.kwargs = kwargs - self.finished = threading.Event() - self._running = threading.Lock() - - def cancel(self): - '''\ - Stop the timer if it hasn't finished yet. Return False if - the timer was already running. - ''' - locked = self._running.acquire(False) - if locked: - self.finished.set() - self._running.release() - return locked - - def run(self): - self.finished.wait(self.interval) - with self._running: - if not self.finished.is_set(): - self.function(*self.args, **self.kwargs) - self.finished.set() - - class Event(object): - def __init__(self, eventnum, time, func, *args, **kwds): - self.eventnum = eventnum - self.time = time - self.func = func - self.args = args - self.kwds = kwds - self.canceled = False - - def __cmp__(self, other): - tmp = cmp(self.time, other.time) - if tmp == 0: - tmp = cmp(self.eventnum, other.eventnum) - return tmp - - def run(self): - if self.canceled: - return - self.func(*self.args, **self.kwds) - - def cancel(self): - self.canceled = True # XXX not thread-safe + """ + Provides an event loop for running events. + """ def __init__(self): + """ + Creates a EventLoop instance. + """ self.lock = threading.RLock() self.queue = [] self.eventnum = 0 @@ -79,6 +144,11 @@ class EventLoop(object): self.start = None def __run_events(self): + """ + Run events. + + :return: nothing + """ schedule = False while True: with self.lock: @@ -91,23 +161,34 @@ class EventLoop(object): event = heapq.heappop(self.queue) assert event.time <= now event.run() + with self.lock: self.timer = None if schedule: self.__schedule_event() def __schedule_event(self): + """ + Schedule event. + + :return: nothing + """ with self.lock: assert self.running if not self.queue: return delay = self.queue[0].time - time.time() assert self.timer is None - self.timer = EventLoop.Timer(delay, self.__run_events) + self.timer = Timer(delay, self.__run_events) self.timer.daemon = True self.timer.start() def run(self): + """ + Start event loop. + + :return: nothing + """ with self.lock: if self.running: return @@ -118,6 +199,11 @@ class EventLoop(object): self.__schedule_event() def stop(self): + """ + Stop event loop. + + :return: nothing + """ with self.lock: if not self.running: return @@ -130,13 +216,23 @@ class EventLoop(object): self.start = None def add_event(self, delaysec, func, *args, **kwds): + """ + Add an event to the event loop. + + :param int delaysec: delay in seconds for event + :param func: event function + :param args: event arguments + :param kwds: event keyword arguments + :return: created event + :rtype: Event + """ with self.lock: eventnum = self.eventnum self.eventnum += 1 evtime = float(delaysec) if self.running: evtime += time.time() - event = self.Event(eventnum, evtime, func, *args, **kwds) + event = Event(eventnum, evtime, func, *args, **kwds) if self.queue: prevhead = self.queue[0] @@ -152,12 +248,14 @@ class EventLoop(object): self.__schedule_event() return event + +# TODO: move example to documentation def example(): loop = EventLoop() def msg(arg): delta = time.time() - loop.start - print delta, 'arg:', arg + logger.debug("%s arg: %s", delta, arg) def repeat(interval, count): count -= 1 diff --git a/daemon/core/misc/ipaddr.py b/daemon/core/misc/ipaddr.py deleted file mode 100644 index d6600f7d..00000000 --- a/daemon/core/misc/ipaddr.py +++ /dev/null @@ -1,230 +0,0 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Tom Goff -# -''' -ipaddr.py: helper objects for dealing with IPv4/v6 addresses. -''' - -import socket -import struct -import random - -AF_INET = socket.AF_INET -AF_INET6 = socket.AF_INET6 - -class MacAddr(object): - def __init__(self, addr): - self.addr = addr - - def __str__(self): - return ":".join(map(lambda x: ("%02x" % ord(x)), self.addr)) - - def tolinklocal(self): - ''' Convert the MAC address to a IPv6 link-local address, using EUI 48 - to EUI 64 conversion process per RFC 5342. - ''' - if not self.addr: - return IPAddr.fromstring("::") - tmp = struct.unpack("!Q", '\x00\x00' + self.addr)[0] - nic = long(tmp) & 0x000000FFFFFFL - oui = long(tmp) & 0xFFFFFF000000L - # toggle U/L bit - oui ^= 0x020000000000L - # append EUI-48 octets - oui = (oui << 16) | 0xFFFE000000L - return IPAddr(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic)) - - @classmethod - def fromstring(cls, s): - addr = "".join(map(lambda x: chr(int(x, 16)), s.split(":"))) - return cls(addr) - - @classmethod - def random(cls): - tmp = random.randint(0, 0xFFFFFF) - tmp |= 0x00163E << 24 # use the Xen OID 00:16:3E - tmpbytes = struct.pack("!Q", tmp) - return cls(tmpbytes[2:]) - -class IPAddr(object): - def __init__(self, af, addr): - # check if (af, addr) is valid - if not socket.inet_ntop(af, addr): - raise ValueError, "invalid af/addr" - self.af = af - self.addr = addr - - def isIPv4(self): - return self.af == AF_INET - - def isIPv6(self): - return self.af == AF_INET6 - - def __str__(self): - return socket.inet_ntop(self.af, self.addr) - - def __eq__(self, other): - try: - return other.af == self.af and other.addr == self.addr - except: - return False - - def __add__(self, other): - try: - carry = int(other) - except: - return NotImplemented - tmp = map(lambda x: ord(x), self.addr) - for i in xrange(len(tmp) - 1, -1, -1): - x = tmp[i] + carry - tmp[i] = x & 0xff - carry = x >> 8 - if carry == 0: - break - addr = "".join(map(lambda x: chr(x), tmp)) - return self.__class__(self.af, addr) - - def __sub__(self, other): - try: - tmp = -int(other) - except: - return NotImplemented - return self.__add__(tmp) - - @classmethod - def fromstring(cls, s): - for af in AF_INET, AF_INET6: - try: - return cls(af, socket.inet_pton(af, s)) - except Exception, e: - pass - raise e - - @staticmethod - def toint(s): - ''' convert IPv4 string to 32-bit integer - ''' - bin = socket.inet_pton(AF_INET, s) - return(struct.unpack('!I', bin)[0]) - -class IPPrefix(object): - def __init__(self, af, prefixstr): - "prefixstr format: address/prefixlen" - tmp = prefixstr.split("/") - if len(tmp) > 2: - raise ValueError, "invalid prefix: '%s'" % prefixstr - self.af = af - if self.af == AF_INET: - self.addrlen = 32 - elif self.af == AF_INET6: - self.addrlen = 128 - else: - raise ValueError, "invalid address family: '%s'" % self.af - if len(tmp) == 2: - self.prefixlen = int(tmp[1]) - else: - self.prefixlen = self.addrlen - self.prefix = socket.inet_pton(self.af, tmp[0]) - if self.addrlen > self.prefixlen: - addrbits = self.addrlen - self.prefixlen - netmask = ((1L << self.prefixlen) - 1) << addrbits - prefix = "" - for i in xrange(-1, -(addrbits >> 3) - 2, -1): - prefix = chr(ord(self.prefix[i]) & (netmask & 0xff)) + prefix - netmask >>= 8 - self.prefix = self.prefix[:i] + prefix - - def __str__(self): - return "%s/%s" % (socket.inet_ntop(self.af, self.prefix), - self.prefixlen) - - def __eq__(self, other): - try: - return other.af == self.af and \ - other.prefixlen == self.prefixlen and \ - other.prefix == self.prefix - except: - return False - - def __add__(self, other): - try: - tmp = int(other) - except: - return NotImplemented - a = IPAddr(self.af, self.prefix) + \ - (tmp << (self.addrlen - self.prefixlen)) - prefixstr = "%s/%s" % (a, self.prefixlen) - if self.__class__ == IPPrefix: - return self.__class__(self.af, prefixstr) - else: - return self.__class__(prefixstr) - - def __sub__(self, other): - try: - tmp = -int(other) - except: - return NotImplemented - return self.__add__(tmp) - - def addr(self, hostid): - tmp = int(hostid) - if (tmp == 1 or tmp == 0 or tmp == -1) and self.addrlen == self.prefixlen: - return IPAddr(self.af, self.prefix) - if tmp == 0 or \ - tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or \ - (self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1): - raise ValueError, "invalid hostid for prefix %s: %s" % (self, hostid) - addr = "" - for i in xrange(-1, -(self.addrlen >> 3) - 1, -1): - addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr - tmp >>= 8 - if not tmp: - break - addr = self.prefix[:i] + addr - return IPAddr(self.af, addr) - - def minaddr(self): - return self.addr(1) - - def maxaddr(self): - if self.af == AF_INET: - return self.addr((1 << (self.addrlen - self.prefixlen)) - 2) - else: - return self.addr((1 << (self.addrlen - self.prefixlen)) - 1) - - def numaddr(self): - return max(0, (1 << (self.addrlen - self.prefixlen)) - 2) - - def prefixstr(self): - return "%s" % socket.inet_ntop(self.af, self.prefix) - - def netmaskstr(self): - addrbits = self.addrlen - self.prefixlen - netmask = ((1L << self.prefixlen) - 1) << addrbits - netmaskbytes = struct.pack("!L", netmask) - return IPAddr(af=AF_INET, addr=netmaskbytes).__str__() - -class IPv4Prefix(IPPrefix): - def __init__(self, prefixstr): - IPPrefix.__init__(self, AF_INET, prefixstr) - -class IPv6Prefix(IPPrefix): - def __init__(self, prefixstr): - IPPrefix.__init__(self, AF_INET6, prefixstr) - -def isIPAddress(af, addrstr): - try: - tmp = socket.inet_pton(af, addrstr) - return True - except: - return False - -def isIPv4Address(addrstr): - return isIPAddress(AF_INET, addrstr) - -def isIPv6Address(addrstr): - return isIPAddress(AF_INET6, addrstr) diff --git a/daemon/core/misc/quagga.py b/daemon/core/misc/quagga.py index 4e8511ba..8df3361f 100644 --- a/daemon/core/misc/quagga.py +++ b/daemon/core/misc/quagga.py @@ -5,35 +5,76 @@ # # author: Tom Goff # -''' -quagga.py: helper class for generating Quagga configuration. -''' -import os.path +""" +quagga.py: helper class for generating Quagga configuration. +""" + from string import Template -def maketuple(obj): - if hasattr(obj, "__iter__"): - return tuple(obj) +from core.misc import utils + + +def addrstr(x): + if x.find(".") >= 0: + return "ip address %s" % x + elif x.find(":") >= 0: + return "ipv6 address %s" % x else: - return (obj,) + raise ValueError("invalid address: %s" % x) + class NetIf(object): - def __init__(self, name, addrlist = []): + """ + Represents a network interface. + """ + + def __init__(self, name, addrlist=None): + """ + Create a NetIf instance. + + :param str name: interface name + :param addrlist: address list for the interface + """ self.name = name - self.addrlist = addrlist + + if addrlist: + self.addrlist = addrlist + else: + self.addrlist = [] + class Conf(object): - def __init__(self, **kwds): - self.kwds = kwds + """ + Provides a configuration object. + """ + + def __init__(self, **kwargs): + """ + Create a Conf instance. + + :param dict kwargs: configuration keyword arguments + """ + self.kwargs = kwargs def __str__(self): - tmp = self.template.substitute(**self.kwds) - if tmp[-1] == '\n': + """ + Provides a string representation of a configuration object. + + :return: string representation + :rtype: str + """ + # TODO: seems like an error here + tmp = self.template.substitute(**self.kwargs) + if tmp[-1] == "\n": tmp = tmp[:-1] return tmp + class QuaggaOSPF6Interface(Conf): + """ + Provides quagga ospf6 interface functionality. + """ AF_IPV6_ID = 0 AF_IPV4_ID = 65 @@ -50,32 +91,40 @@ interface $interface ipv6 ospf6 lsafullness mincostlsa """) -# ip address $ipaddr/32 -# ipv6 ospf6 simhelloLLtoULRecv :$simhelloport -# !$ipaddr:$simhelloport + # ip address $ipaddr/32 + # ipv6 ospf6 simhelloLLtoULRecv :$simhelloport + # !$ipaddr:$simhelloport - def __init__(self, netif, instanceid = AF_IPV4_ID, - network = "manet-designated-router", **kwds): + def __init__(self, netif, instanceid=AF_IPV4_ID, network="manet-designated-router", **kwargs): + """ + Create a QuaggaOSPF6Interface instance. + + :param netif: network interface + :param int instanceid: instance id + :param network: network + :param dict kwargs: keyword arguments + """ self.netif = netif - def addrstr(x): - if x.find(".") >= 0: - return "ip address %s" % x - elif x.find(":") >= 0: - return "ipv6 address %s" % x - else: - raise Value, "invalid address: %s", x addr = "\n ".join(map(addrstr, netif.addrlist)) - self.instanceid = instanceid self.network = network - Conf.__init__(self, interface = netif.name, addr = addr, - instanceid = instanceid, network = network, **kwds) + Conf.__init__(self, interface=netif.name, addr=addr, + instanceid=instanceid, network=network, **kwargs) def name(self): + """ + Retrieve network interface name. + + :return: network interface name + :rtype: str + """ return self.netif.name -class QuaggaOSPF6(Conf): +class QuaggaOSPF6(Conf): + """ + Provides quagga ospf6 functionality. + """ template = Template("""\ $interfaces ! @@ -85,17 +134,25 @@ router ospf6 $redistribute """) - def __init__(self, ospf6ifs, area, routerid, - redistribute = "! no redistribute"): - ospf6ifs = maketuple(ospf6ifs) + def __init__(self, ospf6ifs, area, routerid, redistribute="! no redistribute"): + """ + Create a QuaggaOSPF6 instance. + + :param list ospf6ifs: ospf6 interfaces + :param area: area + :param routerid: router id + :param str redistribute: redistribute value + """ + ospf6ifs = utils.maketuple(ospf6ifs) interfaces = "\n!\n".join(map(str, ospf6ifs)) - ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % \ - (x.name(), area), ospf6ifs)) - Conf.__init__(self, interfaces = interfaces, routerid = routerid, - ospfifs = ospfifs, redistribute = redistribute) + ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % (x.name(), area), ospf6ifs)) + Conf.__init__(self, interfaces=interfaces, routerid=routerid, ospfifs=ospfifs, redistribute=redistribute) class QuaggaConf(Conf): + """ + Provides quagga configuration functionality. + """ template = Template("""\ log file $logfile $debugs @@ -105,12 +162,18 @@ $routers $forwarding """) - def __init__(self, routers, logfile, debugs = ()): - routers = "\n!\n".join(map(str, maketuple(routers))) + def __init__(self, routers, logfile, debugs=()): + """ + Create a QuaggaConf instance. + + :param list routers: routers + :param str logfile: log file name + :param debugs: debug options + """ + routers = "\n!\n".join(map(str, utils.maketuple(routers))) if debugs: - debugs = "\n".join(maketuple(debugs)) + debugs = "\n".join(utils.maketuple(debugs)) else: debugs = "! no debugs" forwarding = "ip forwarding\nipv6 forwarding" - Conf.__init__(self, logfile = logfile, debugs = debugs, - routers = routers, forwarding = forwarding) + Conf.__init__(self, logfile=logfile, debugs=debugs, routers=routers, forwarding=forwarding) diff --git a/daemon/core/misc/utils.py b/daemon/core/misc/utils.py index 14bbcb46..32e24340 100644 --- a/daemon/core/misc/utils.py +++ b/daemon/core/misc/utils.py @@ -1,123 +1,235 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -utils.py: miscellaneous utility functions, wrappers around some subprocess -procedures. -''' +""" +Miscellaneous utility functions, wrappers around some subprocess procedures. +""" + +import ast +import os +import subprocess -import subprocess, os, ast import fcntl +import resource + +from core.misc import log + +logger = log.get_logger(__name__) + def closeonexec(fd): + """ + Close on execution of a shell process. + + :param fd: file descriptor to close + :return: nothing + """ fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC) -def checkexec(execlist): - for bin in execlist: - if which(bin) is None: - raise EnvironmentError, "executable not found: %s" % bin + +def check_executables(executables): + """ + Check executables, verify they exist and are executable. + + :param list[str] executables: executable to check + :return: nothing + :raises EnvironmentError: when an executable doesn't exist or is not executable + """ + for executable in executables: + if not is_exe(executable): + raise EnvironmentError("executable not found: %s" % executable) + + +def is_exe(file_path): + """ + Check if a given file path exists and is an executable file. + + :param str file_path: file path to check + :return: True if the file is considered and executable file, False otherwise + :rtype: bool + """ + return os.path.isfile(file_path) and os.access(file_path, os.X_OK) + def which(program): - ''' From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python - ''' - def is_exe(fpath): - return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + """ + From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python + :param str program: program to check for + :return: path if it exists, none otherwise + """ fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): - path = path.strip('"') + path = path.strip("\"") exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None + def ensurepath(pathlist): + """ + Checks a list of paths are contained within the environment path, if not add it to the path. + + :param list[str] pathlist: list of paths to check + :return: nothing + """ searchpath = os.environ["PATH"].split(":") for p in set(pathlist): if p not in searchpath: os.environ["PATH"] += ":" + p + def maketuple(obj): + """ + Create a tuple from an object, or return the object itself. + + :param obj: object to convert to a tuple + :return: converted tuple or the object itself + :rtype: tuple + """ if hasattr(obj, "__iter__"): return tuple(obj) else: - return (obj,) - + return obj, + + +# TODO: remove unused parameter type def maketuplefromstr(s, type): - s.replace('\\', '\\\\') + """ + Create a tuple from a string. + + :param str s: string to convert to a tuple + :param type: type of tuple to convert to + :return: tuple from string + :rtype: tuple + """ + s.replace("\\", "\\\\") return ast.literal_eval(s) - #return tuple(type(i) for i in s[1:-1].split(',')) - #r = () - #for i in s.strip("()").split(','): - # r += (i.strip("' "), ) - # chop empty last element from "('a',)" strings - #if r[-1] == '': - # r = r[:-1] - #return r -def call(*args, **kwds): - return subprocess.call(*args, **kwds) -def mutecall(*args, **kwds): - kwds["stdout"] = open(os.devnull, "w") - kwds["stderr"] = subprocess.STDOUT - return call(*args, **kwds) +def mutecall(*args, **kwargs): + """ + Run a muted call command. -def check_call(*args, **kwds): - return subprocess.check_call(*args, **kwds) + :param list args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: command result + :rtype: int + """ + kwargs["stdout"] = open(os.devnull, "w") + kwargs["stderr"] = subprocess.STDOUT + return subprocess.call(*args, **kwargs) -def mutecheck_call(*args, **kwds): - kwds["stdout"] = open(os.devnull, "w") - kwds["stderr"] = subprocess.STDOUT - return subprocess.check_call(*args, **kwds) -def spawn(*args, **kwds): - return subprocess.Popen(*args, **kwds).pid +def mutecheck_call(*args, **kwargs): + """ + Run a muted check call command. + + :param list args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: command result + :rtype: int + """ + kwargs["stdout"] = open(os.devnull, "w") + kwargs["stderr"] = subprocess.STDOUT + return subprocess.check_call(*args, **kwargs) + + +def spawn(*args, **kwargs): + """ + Wrapper for running a spawn command and returning the process id. + + :param list args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: process id of the command + :rtype: int + """ + return subprocess.Popen(*args, **kwargs).pid + + +def mutespawn(*args, **kwargs): + """ + Wrapper for running a muted spawned command. + + :param list args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: process id of the command + :rtype: int + """ + kwargs["stdout"] = open(os.devnull, "w") + kwargs["stderr"] = subprocess.STDOUT + return subprocess.Popen(*args, **kwargs).pid -def mutespawn(*args, **kwds): - kwds["stdout"] = open(os.devnull, "w") - kwds["stderr"] = subprocess.STDOUT - return subprocess.Popen(*args, **kwds).pid def detachinit(): + """ + Fork a child process and exit. + + :return: nothing + """ if os.fork(): - os._exit(0) # parent exits + # parent exits + os._exit(0) os.setsid() -def detach(*args, **kwds): - kwds["preexec_fn"] = detachinit - return subprocess.Popen(*args, **kwds).pid -def mutedetach(*args, **kwds): - kwds["preexec_fn"] = detachinit - kwds["stdout"] = open(os.devnull, "w") - kwds["stderr"] = subprocess.STDOUT - return subprocess.Popen(*args, **kwds).pid +def detach(*args, **kwargs): + """ + Run a detached process by forking it. + + :param list args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: process id of the command + :rtype: int + """ + kwargs["preexec_fn"] = detachinit + return subprocess.Popen(*args, **kwargs).pid + + +def mutedetach(*args, **kwargs): + """ + Run a muted detached process by forking it. + + :param list args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: process id of the command + :rtype: int + """ + kwargs["preexec_fn"] = detachinit + kwargs["stdout"] = open(os.devnull, "w") + kwargs["stderr"] = subprocess.STDOUT + return subprocess.Popen(*args, **kwargs).pid + def cmdresult(args): - ''' Execute a command on the host and return a tuple containing the - exit status and result string. stderr output - is folded into the stdout result string. - ''' - cmdid = subprocess.Popen(args, stdin = open(os.devnull, 'r'), - stdout = subprocess.PIPE, - stderr = subprocess.STDOUT) - result, err = cmdid.communicate() # err will always be None - status = cmdid.wait() - return (status, result) + """ + Execute a command on the host and return a tuple containing the exit status and result string. stderr output + is folded into the stdout result string. -def hexdump(s, bytes_per_word = 2, words_per_line = 8): + :param list args: command arguments + :return: command status and stdout + :rtype: tuple[int, str] + """ + cmdid = subprocess.Popen(args, stdin=open(os.devnull, "r"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + # err will always be None + result, err = cmdid.communicate() + status = cmdid.wait() + return status, result + + +def hexdump(s, bytes_per_word=2, words_per_line=8): + """ + Hex dump of a string. + + :param str s: string to hex dump + :param bytes_per_word: number of bytes per word + :param words_per_line: number of words per line + :return: hex dump of string + """ dump = "" count = 0 bytes = bytes_per_word * words_per_line @@ -132,20 +244,34 @@ def hexdump(s, bytes_per_word = 2, words_per_line = 8): count += len(line) return dump[:-1] + def filemunge(pathname, header, text): - ''' Insert text at the end of a file, surrounded by header comments. - ''' - filedemunge(pathname, header) # prevent duplicates - f = open(pathname, 'a') + """ + Insert text at the end of a file, surrounded by header comments. + + :param str pathname: file path to add text to + :param str header: header text comments + :param str text: text to append to file + :return: nothing + """ + # prevent duplicates + filedemunge(pathname, header) + f = open(pathname, "a") f.write("# BEGIN %s\n" % header) f.write(text) f.write("# END %s\n" % header) f.close() + def filedemunge(pathname, header): - ''' Remove text that was inserted in a file surrounded by header comments. - ''' - f = open(pathname, 'r') + """ + Remove text that was inserted in a file surrounded by header comments. + + :param str pathname: file path to open for removing a header + :param str header: header text to target for removal + :return: nothing + """ + f = open(pathname, "r") lines = f.readlines() f.close() start = None @@ -157,66 +283,101 @@ def filedemunge(pathname, header): end = i + 1 if start is None or end is None: return - f = open(pathname, 'w') + f = open(pathname, "w") lines = lines[:start] + lines[end:] f.write("".join(lines)) f.close() - + + def expandcorepath(pathname, session=None, node=None): - ''' Expand a file path given session information. - ''' + """ + Expand a file path given session information. + + :param str pathname: file path to expand + :param core.session.Session session: core session object to expand path with + :param core.netns.LxcNode node: node to expand path with + :return: expanded path + :rtype: str + """ if session is not None: - pathname = pathname.replace('~', "/home/%s" % session.user) - pathname = pathname.replace('%SESSION%', str(session.sessionid)) - pathname = pathname.replace('%SESSION_DIR%', session.sessiondir) - pathname = pathname.replace('%SESSION_USER%', session.user) + pathname = pathname.replace("~", "/home/%s" % session.user) + pathname = pathname.replace("%SESSION%", str(session.session_id)) + pathname = pathname.replace("%SESSION_DIR%", session.session_dir) + pathname = pathname.replace("%SESSION_USER%", session.user) if node is not None: - pathname = pathname.replace('%NODE%', str(node.objid)) - pathname = pathname.replace('%NODENAME%', node.name) + pathname = pathname.replace("%NODE%", str(node.objid)) + pathname = pathname.replace("%NODENAME%", node.name) return pathname - + + def sysctldevname(devname): - ''' Translate a device name to the name used with sysctl. - ''' + """ + Translate a device name to the name used with sysctl. + + :param str devname: device name to translate + :return: translated device name + :rtype: str + """ if devname is None: return None return devname.replace(".", "/") -def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (), - stdin = os.devnull, stdout = os.devnull, stderr = os.devnull, - stdoutmode = 0644, stderrmode = 0644, pidfilename = None, - defaultmaxfd = 1024): - ''' Run the background process as a daemon. - ''' + +def daemonize(rootdir="/", umask=0, close_fds=False, dontclose=(), + stdin=os.devnull, stdout=os.devnull, stderr=os.devnull, + stdoutmode=0644, stderrmode=0644, pidfilename=None, + defaultmaxfd=1024): + """ + Run the background process as a daemon. + + :param str rootdir: root directory for daemon + :param int umask: umask for daemon + :param bool close_fds: flag to close file descriptors + :param dontclose: dont close options + :param stdin: stdin for daemon + :param stdout: stdout for daemon + :param stderr: stderr for daemon + :param int stdoutmode: stdout mode + :param int stderrmode: stderr mode + :param str pidfilename: pid file name + :param int defaultmaxfd: default max file descriptors + :return: nothing + """ if not hasattr(dontclose, "__contains__"): if not isinstance(dontclose, int): - raise TypeError, "dontclose must be an integer" + raise TypeError("dontclose must be an integer") dontclose = (int(dontclose),) else: for fd in dontclose: if not isinstance(fd, int): - raise TypeError, "dontclose must contain only integers" + raise TypeError("dontclose must contain only integers") + # redirect stdin if stdin: fd = os.open(stdin, os.O_RDONLY) os.dup2(fd, 0) os.close(fd) + # redirect stdout if stdout: fd = os.open(stdout, os.O_WRONLY | os.O_CREAT | os.O_APPEND, stdoutmode) os.dup2(fd, 1) - if (stdout == stderr): + if stdout == stderr: os.dup2(1, 2) os.close(fd) + # redirect stderr if stderr and (stderr != stdout): fd = os.open(stderr, os.O_WRONLY | os.O_CREAT | os.O_APPEND, stderrmode) os.dup2(fd, 2) os.close(fd) + if os.fork(): - os._exit(0) # parent exits + # parent exits + os._exit(0) + os.setsid() pid = os.fork() if pid: @@ -225,11 +386,14 @@ def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (), f = open(pidfilename, "w") f.write("%s\n" % pid) f.close() - except: - pass - os._exit(0) # parent exits + except IOError: + logger.exception("error writing to file: %s", pidfilename) + # parent exits + os._exit(0) + if rootdir: os.chdir(rootdir) + os.umask(umask) if close_fds: try: @@ -238,38 +402,49 @@ def daemonize(rootdir = "/", umask = 0, close_fds = False, dontclose = (), raise ValueError except: maxfd = defaultmaxfd + for fd in xrange(3, maxfd): if fd in dontclose: continue try: os.close(fd) - except: - pass + except IOError: + logger.exception("error closing file descriptor") + def readfileintodict(filename, d): - ''' Read key=value pairs from a file, into a dict. - Skip comments; strip newline characters and spacing. - ''' - with open(filename, 'r') as f: + """ + Read key=value pairs from a file, into a dict. Skip comments; strip newline characters and spacing. + + :param str filename: file to read into a dictionary + :param dict d: dictionary to read file into + :return: nothing + """ + with open(filename, "r") as f: lines = f.readlines() for l in lines: - if l[:1] == '#': + if l[:1] == "#": continue try: - key, value = l.split('=', 1) + key, value = l.split("=", 1) d[key] = value.strip() except ValueError: - pass + logger.exception("error reading file to dict: %s", filename) def checkforkernelmodule(name): - ''' Return a string if a Linux kernel module is loaded, None otherwise. + """ + Return a string if a Linux kernel module is loaded, None otherwise. The string is the line from /proc/modules containing the module name, memory size (bytes), number of loaded instances, dependencies, state, and kernel memory offset. - ''' - with open('/proc/modules', 'r') as f: + + :param str name: name of kernel module to check for + :return: kernel module line, None otherwise + :rtype: str + """ + with open("/proc/modules", "r") as f: for line in f: - if line.startswith(name + ' '): + if line.startswith(name + " "): return line.rstrip() return None diff --git a/daemon/core/misc/utm.py b/daemon/core/misc/utm.py index 8e54f3ab..b80a7d6d 100644 --- a/daemon/core/misc/utm.py +++ b/daemon/core/misc/utm.py @@ -66,6 +66,7 @@ import math __all__ = ['to_latlon', 'from_latlon'] + class OutOfRangeError(ValueError): pass @@ -139,7 +140,7 @@ def to_latlon(easting, northing, zone_number, zone_letter): n = R / ep_sin_sqrt r = (1 - E) / ep_sin - c = _E * p_cos**2 + c = _E * p_cos ** 2 c2 = c * c d = x / (n * K0) @@ -152,7 +153,7 @@ def to_latlon(easting, northing, zone_number, zone_letter): latitude = (p_rad - (p_tan / r) * (d2 / 2 - d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) + - d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2)) + d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2)) longitude = (d - d3 / 6 * (1 + 2 * p_tan2 + c) + @@ -184,8 +185,8 @@ def from_latlon(latitude, longitude): zone_letter = latitude_to_zone_letter(latitude) - n = R / math.sqrt(1 - E * lat_sin**2) - c = E_P2 * lat_cos**2 + n = R / math.sqrt(1 - E * lat_sin ** 2) + c = E_P2 * lat_cos ** 2 a = lat_cos * (lon_rad - central_lon_rad) a2 = a * a @@ -204,7 +205,7 @@ def from_latlon(latitude, longitude): a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000 northing = K0 * (m + n * lat_tan * (a2 / 2 + - a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c**2) + + a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c ** 2) + a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2))) if latitude < 0: @@ -244,16 +245,15 @@ def zone_number_to_central_longitude(zone_number): def haversine(lon1, lat1, lon2, lat2): """ - Calculate the great circle distance between two points + Calculate the great circle distance between two points on the earth (specified in decimal degrees) """ - # convert decimal degrees to radians + # convert decimal degrees to radians lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2]) - # haversine formula - dlon = lon2 - lon1 - dlat = lat2 - lat1 - a = math.sin(dlat/2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon/2)**2 - c = 2 * math.asin(math.sqrt(a)) + # haversine formula + dlon = lon2 - lon1 + dlat = lat2 - lat1 + a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2 + c = 2 * math.asin(math.sqrt(a)) m = 6367000 * c - return m - + return m diff --git a/daemon/core/misc/xmldeployment.py b/daemon/core/misc/xmldeployment.py deleted file mode 100644 index 0544f9f6..00000000 --- a/daemon/core/misc/xmldeployment.py +++ /dev/null @@ -1,205 +0,0 @@ -import socket -import subprocess -import os -import xmlutils - -from core.netns import nodes -from core.misc import ipaddr -from core import constants - -class CoreDeploymentWriter(object): - def __init__(self, dom, root, session): - self.dom = dom - self.root = root - self.session = session - self.hostname = socket.gethostname() - if self.session.emane.version < self.session.emane.EMANE092: - self.transport = None - self.platform = None - - @staticmethod - def get_ipv4_addresses(hostname): - if hostname == 'localhost': - addr_list = [] - cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show') - output = subprocess.check_output(cmd) - for line in output.split(os.linesep): - split = line.split() - if not split: - continue - addr = split[3] - if not addr.startswith('127.'): - addr_list.append(addr) - return addr_list - else: - # TODO: handle other hosts - raise NotImplementedError - - @staticmethod - def get_interface_names(hostname): - '''Uses same methodology of get_ipv4_addresses() to get - parallel list of interface names to go with ...''' - if hostname == 'localhost': - iface_list = [] - cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show') - output = subprocess.check_output(cmd) - for line in output.split(os.linesep): - split = line.split() - if not split: - continue - ifaceName = split[1] - addr = split[3] - if not addr.startswith('127.'): - iface_list.append(ifaceName) - return iface_list - else: - # TODO: handle other hosts - raise NotImplementedError - - @staticmethod - def find_device(scenario, name): - tagName = ('device', 'host', 'router') - for d in xmlutils.iterDescendantsWithAttribute(scenario, tagName, - 'name', name): - return d - return None - - @staticmethod - def find_interface(device, name): - for i in xmlutils.iterDescendantsWithAttribute(device, 'interface', - 'name', name): - return i - return None - - def add_deployment(self): - testbed = self.dom.createElement('container') - testbed.setAttribute('name', 'TestBed') - testbed.setAttribute('id', 'TestBed') - self.root.baseEle.appendChild(testbed) - nodelist = [] - for obj in self.session.objs(): - if isinstance(obj, nodes.PyCoreNode): - nodelist.append(obj) - name = self.hostname - ipv4_addresses = self.get_ipv4_addresses('localhost') - iface_names = self.get_interface_names('localhost') - testhost = self.add_physical_host(testbed, name, ipv4_addresses, iface_names) - for n in nodelist: - self.add_virtual_host(testhost, n) - # TODO: handle other servers - # servers = self.session.broker.getservernames() - # servers.remove('localhost') - - def add_child_element(self, parent, tagName): - el = self.dom.createElement(tagName) - parent.appendChild(el) - return el - - def add_child_element_with_nameattr(self, parent, tagName, - name, setid = True): - el = self.add_child_element(parent, tagName) - el.setAttribute('name', name) - if setid: - el.setAttribute('id', '%s/%s' % (parent.getAttribute('id'), name)) - return el - - def add_address(self, parent, address_type, address_str, address_iface=None): - el = self.add_child_element(parent, 'address') - el.setAttribute('type', address_type) - if address_iface is not None: - el.setAttribute('iface', address_iface) - el.appendChild(self.dom.createTextNode(address_str)) - return el - - def add_type(self, parent, type_str): - el = self.add_child_element(parent, 'type') - el.appendChild(self.dom.createTextNode(type_str)) - return el - - def add_platform(self, parent, name): - el = self.add_child_element_with_nameattr(parent, - 'emanePlatform', name) - return el - - def add_transport(self, parent, name): - el = self.add_child_element_with_nameattr(parent, 'transport', name) - return el - - def add_nem(self, parent, name): - el = self.add_child_element_with_nameattr(parent, 'nem', name) - return el - - def add_parameter(self, parent, name, val): - el = self.add_child_element_with_nameattr(parent, 'parameter', - name, False) - el.appendChild(self.dom.createTextNode(val)) - return el - - def add_mapping(self, parent, maptype, mapref): - el = self.add_child_element(parent, 'mapping') - el.setAttribute('type', maptype) - el.setAttribute('ref', mapref) - return el - - def add_host(self, parent, name): - el = self.add_child_element_with_nameattr(parent, 'testHost', name) - return el - - def add_physical_host(self, parent, name, ipv4_addresses, iface_names): - el = self.add_host(parent, name) - self.add_type(el, 'physical') - for i in range(0, len(ipv4_addresses)): - addr = ipv4_addresses[i] - if iface_names: - ifaceName = iface_names[i] - else: - ifaceName = None - self.add_address(el, 'IPv4', addr, ifaceName) - return el - - def add_virtual_host(self, parent, obj): - assert isinstance(obj, nodes.PyCoreNode) - el = self.add_host(parent, obj.name) - device = self.find_device(self.root.baseEle, obj.name) - if device is None: - self.session.warn('corresponding XML device not found for %s' % - (obj.name)) - return - self.add_mapping(device, 'testHost', el.getAttribute('id')) - self.add_type(el, 'virtual') - for netif in obj.netifs(): - for address in netif.addrlist: - addr, slash, prefixlen= address.partition('/') - if ipaddr.isIPv4Address(addr): - addr_type = 'IPv4' - elif ipaddr.isIPv6Address(addr): - addr_type = 'IPv6' - else: - raise NotImplementedError - self.add_address(el, addr_type, address, netif.name) - if isinstance(netif.net, nodes.EmaneNode): - nem = self.add_emane_interface(parent, el, netif) - interface = self.find_interface(device, netif.name) - self.add_mapping(interface, 'nem', nem.getAttribute('id')) - return el - - def add_emane_interface(self, physical_host, virtual_host, netif, - platform_name = 'p1', transport_name = 't1'): - nemid = netif.net.nemidmap[netif] - if self.session.emane.version < self.session.emane.EMANE092: - if self.platform is None: - self.platform = \ - self.add_platform(physical_host, name = platform_name) - platform = self.platform - if self.transport is None: - self.transport = \ - self.add_transport(physical_host, name = transport_name) - transport = self.transport - else: - platform = self.add_platform(virtual_host, name = platform_name) - transport = self.add_transport(virtual_host, name = transport_name) - nem_name = 'nem%s' % nemid - nem = self.add_nem(platform, nem_name) - self.add_parameter(nem, 'nemid', str(nemid)) - self.add_mapping(transport, 'nem', nem.getAttribute('id')) - return nem diff --git a/daemon/core/misc/xmlparser.py b/daemon/core/misc/xmlparser.py deleted file mode 100644 index 529dbf4b..00000000 --- a/daemon/core/misc/xmlparser.py +++ /dev/null @@ -1,46 +0,0 @@ -# CORE -# Copyright (c) 2014 The Boeing Company. -# See the LICENSE file included in this distribution. - -from xml.dom.minidom import parse -from xmlutils import getFirstChildByTagName -from xmlparser0 import CoreDocumentParser0 -from xmlparser1 import CoreDocumentParser1 - -class CoreVersionParser(object): - DEFAULT_SCENARIO_VERSION = '1.0' - - '''\ - Helper class to check the version of Network Plan document. This - simply looks for a "Scenario" element; when present, this - indicates a 0.0 version document. The dom member is set in order - to prevent parsing a file twice (it can be passed to the - appropriate CoreDocumentParser class.) - ''' - def __init__(self, filename, options={}): - if 'dom' in options: - self.dom = options['dom'] - else: - self.dom = parse(filename) - scenario = getFirstChildByTagName(self.dom, 'scenario') - if scenario: - version = scenario.getAttribute('version') - if not version: - version = self.DEFAULT_SCENARIO_VERSION - self.version = version - elif getFirstChildByTagName(self.dom, 'Scenario'): - self.version = '0.0' - else: - self.version = 'unknown' - -def core_document_parser(session, filename, options): - vp = CoreVersionParser(filename, options) - if 'dom' not in options: - options['dom'] = vp.dom - if vp.version == '0.0': - doc = CoreDocumentParser0(session, filename, options) - elif vp.version == '1.0': - doc = CoreDocumentParser1(session, filename, options) - else: - raise ValueError, 'unsupported document version: %s' % vp.version - return doc diff --git a/daemon/core/misc/xmlparser0.py b/daemon/core/misc/xmlparser0.py deleted file mode 100644 index c945326d..00000000 --- a/daemon/core/misc/xmlparser0.py +++ /dev/null @@ -1,420 +0,0 @@ -# -# CORE -# Copyright (c)2011-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# - -from core.netns import nodes -from xml.dom.minidom import parse -from xmlutils import * - -class CoreDocumentParser0(object): - def __init__(self, session, filename, options): - self.session = session - self.verbose = self.session.getcfgitembool('verbose', False) - self.filename = filename - if 'dom' in options: - # this prevents parsing twice when detecting file versions - self.dom = options['dom'] - else: - self.dom = parse(filename) - self.start = options['start'] - self.nodecls = options['nodecls'] - - self.np = getoneelement(self.dom, "NetworkPlan") - if self.np is None: - raise ValueError, "missing NetworkPlan!" - self.mp = getoneelement(self.dom, "MotionPlan") - self.sp = getoneelement(self.dom, "ServicePlan") - self.meta = getoneelement(self.dom, "CoreMetaData") - - self.coords = self.getmotiondict(self.mp) - # link parameters parsed in parsenets(), applied in parsenodes() - self.linkparams = {} - - self.parsedefaultservices() - self.parseorigin() - self.parsenets() - self.parsenodes() - self.parseservices() - self.parsemeta() - - - def warn(self, msg): - if self.session: - warnstr = "XML parsing '%s':" % (self.filename) - self.session.warn("%s %s" % (warnstr, msg)) - - def getmotiondict(self, mp): - ''' Parse a MotionPlan into a dict with node names for keys and coordinates - for values. - ''' - if mp is None: - return {} - coords = {} - for node in mp.getElementsByTagName("Node"): - nodename = str(node.getAttribute("name")) - if nodename == '': - continue - for m in node.getElementsByTagName("motion"): - if m.getAttribute("type") != "stationary": - continue - point = m.getElementsByTagName("point") - if len(point) == 0: - continue - txt = point[0].firstChild - if txt is None: - continue - xyz = map(int, txt.nodeValue.split(',')) - z = None - x, y = xyz[0:2] - if (len(xyz) == 3): - z = xyz[2] - coords[nodename] = (x, y, z) - return coords - - @staticmethod - def getcommonattributes(obj): - ''' Helper to return tuple of attributes common to nodes and nets. - ''' - id = int(obj.getAttribute("id")) - name = str(obj.getAttribute("name")) - type = str(obj.getAttribute("type")) - return(id, name, type) - - def parsenets(self): - linkednets = [] - for net in self.np.getElementsByTagName("NetworkDefinition"): - id, name, type = self.getcommonattributes(net) - nodecls = xmltypetonodeclass(self.session, type) - if not nodecls: - self.warn("skipping unknown network node '%s' type '%s'" % \ - (name, type)) - continue - n = self.session.addobj(cls = nodecls, objid = id, name = name, - start = self.start) - if name in self.coords: - x, y, z = self.coords[name] - n.setposition(x, y, z) - getparamssetattrs(net, ("icon", "canvas", "opaque"), n) - if hasattr(n, "canvas") and n.canvas is not None: - n.canvas = int(n.canvas) - # links between two nets (e.g. switch-switch) - for ifc in net.getElementsByTagName("interface"): - netid = str(ifc.getAttribute("net")) - ifcname = str(ifc.getAttribute("name")) - linkednets.append((n, netid, ifcname)) - self.parsemodels(net, n) - # link networks together now that they all have been parsed - for (n, netid, ifcname) in linkednets: - try: - n2 = n.session.objbyname(netid) - except KeyError: - n.warn("skipping net %s interface: unknown net %s" % \ - (n.name, netid)) - continue - upstream = False - netif = n.getlinknetif(n2) - if netif is None: - netif = n2.linknet(n) - else: - netif.swapparams('_params_up') - upstream = True - key = (n2.name, ifcname) - if key in self.linkparams: - for (k, v) in self.linkparams[key]: - netif.setparam(k, v) - if upstream: - netif.swapparams('_params_up') - - def parsenodes(self): - for node in self.np.getElementsByTagName("Node"): - id, name, type = self.getcommonattributes(node) - if type == "rj45": - nodecls = nodes.RJ45Node - else: - nodecls = self.nodecls - n = self.session.addobj(cls = nodecls, objid = id, name = name, - start = self.start) - if name in self.coords: - x, y, z = self.coords[name] - n.setposition(x, y, z) - n.type = type - getparamssetattrs(node, ("icon", "canvas", "opaque"), n) - if hasattr(n, "canvas") and n.canvas is not None: - n.canvas = int(n.canvas) - for ifc in node.getElementsByTagName("interface"): - self.parseinterface(n, ifc) - - def parseinterface(self, n, ifc): - ''' Parse a interface block such as: - -
00:00:00:aa:00:01
-
10.0.0.2/24
-
2001::2/64
-
- ''' - name = str(ifc.getAttribute("name")) - netid = str(ifc.getAttribute("net")) - hwaddr = None - addrlist = [] - try: - net = n.session.objbyname(netid) - except KeyError: - n.warn("skipping node %s interface %s: unknown net %s" % \ - (n.name, name, netid)) - return - for addr in ifc.getElementsByTagName("address"): - addrstr = gettextchild(addr) - if addrstr is None: - continue - if addr.getAttribute("type") == "mac": - hwaddr = addrstr - else: - addrlist.append(addrstr) - i = n.newnetif(net, addrlist = addrlist, hwaddr = hwaddr, - ifindex = None, ifname = name) - for model in ifc.getElementsByTagName("model"): - self.parsemodel(model, n, n.objid) - key = (n.name, name) - if key in self.linkparams: - netif = n.netif(i) - for (k, v) in self.linkparams[key]: - netif.setparam(k, v) - - def parsemodels(self, dom, obj): - ''' Mobility/wireless model config is stored in a ConfigurableManager's - config dict. - ''' - nodenum = int(dom.getAttribute("id")) - for model in dom.getElementsByTagName("model"): - self.parsemodel(model, obj, nodenum) - - def parsemodel(self, model, obj, nodenum): - ''' Mobility/wireless model config is stored in a ConfigurableManager's - config dict. - ''' - name = model.getAttribute("name") - if name == '': - return - type = model.getAttribute("type") - # convert child text nodes into key=value pairs - kvs = gettextelementstolist(model) - - mgr = self.session.mobility - # TODO: the session.confobj() mechanism could be more generic; - # it only allows registering Conf Message callbacks, but here - # we want access to the ConfigurableManager, not the callback - if name[:5] == "emane": - mgr = self.session.emane - elif name[:5] == "netem": - mgr = None - self.parsenetem(model, obj, kvs) - - elif name[:3] == "xen": - mgr = self.session.xen - # TODO: assign other config managers here - if mgr: - mgr.setconfig_keyvalues(nodenum, name, kvs) - - def parsenetem(self, model, obj, kvs): - ''' Determine interface and invoke setparam() using the parsed - (key, value) pairs. - ''' - ifname = model.getAttribute("netif") - peer = model.getAttribute("peer") - key = (peer, ifname) - # nodes and interfaces do not exist yet, at this point of the parsing, - # save (key, value) pairs for later - try: - #kvs = map(lambda(k, v): (int(v)), kvs) - kvs = map(self.numericvalue, kvs) - except ValueError: - self.warn("error parsing link parameters for '%s' on '%s'" % \ - (ifname, peer)) - self.linkparams[key] = kvs - - @staticmethod - def numericvalue(keyvalue): - (key, value) = keyvalue - if '.' in str(value): - value = float(value) - else: - value = int(value) - return (key, value) - - def parseorigin(self): - ''' Parse any origin tag from the Mobility Plan and set the CoreLocation - reference point appropriately. - ''' - origin = getoneelement(self.mp, "origin") - if not origin: - return - location = self.session.location - geo = [] - attrs = ("lat","lon","alt") - for i in xrange(3): - a = origin.getAttribute(attrs[i]) - if a is not None: - a = float(a) - geo.append(a) - location.setrefgeo(geo[0], geo[1], geo[2]) - scale = origin.getAttribute("scale100") - if scale is not None: - location.refscale = float(scale) - point = getoneelement(origin, "point") - if point is not None and point.firstChild is not None: - xyz = point.firstChild.nodeValue.split(',') - if len(xyz) == 2: - xyz.append('0.0') - if len(xyz) == 3: - xyz = map(lambda(x): float(x), xyz) - location.refxyz = (xyz[0], xyz[1], xyz[2]) - - def parsedefaultservices(self): - ''' Prior to parsing nodes, use session.services manager to store - default services for node types - ''' - for node in self.sp.getElementsByTagName("Node"): - type = node.getAttribute("type") - if type == '': - continue # node-specific service config - services = [] - for service in node.getElementsByTagName("Service"): - services.append(str(service.getAttribute("name"))) - self.session.services.defaultservices[type] = services - self.session.info("default services for type %s set to %s" % \ - (type, services)) - - def parseservices(self): - ''' After node objects exist, parse service customizations and add them - to the nodes. - ''' - svclists = {} - # parse services and store configs into session.services.configs - for node in self.sp.getElementsByTagName("Node"): - name = node.getAttribute("name") - if name == '': - continue # node type without name - n = self.session.objbyname(name) - if n is None: - self.warn("skipping service config for unknown node '%s'" % \ - name) - continue - for service in node.getElementsByTagName("Service"): - svcname = service.getAttribute("name") - if self.parseservice(service, n): - if n.objid in svclists: - svclists[n.objid] += "|" + svcname - else: - svclists[n.objid] = svcname - # nodes in NetworkPlan but not in ServicePlan use the - # default services for their type - for node in self.np.getElementsByTagName("Node"): - id, name, type = self.getcommonattributes(node) - if id in svclists: - continue # custom config exists - else: - svclists[int(id)] = None # use defaults - - # associate nodes with services - for objid in sorted(svclists.keys()): - n = self.session.obj(objid) - self.session.services.addservicestonode(node=n, nodetype=n.type, - services_str=svclists[objid], - verbose=self.verbose) - - def parseservice(self, service, n): - ''' Use session.services manager to store service customizations before - they are added to a node. - ''' - name = service.getAttribute("name") - svc = self.session.services.getservicebyname(name) - if svc is None: - return False - values = [] - startup_idx = service.getAttribute("startup_idx") - if startup_idx is not None: - values.append("startidx=%s" % startup_idx) - startup_time = service.getAttribute("start_time") - if startup_time is not None: - values.append("starttime=%s" % startup_time) - dirs = [] - for dir in service.getElementsByTagName("Directory"): - dirname = dir.getAttribute("name") - dirs.append(dirname) - if len(dirs): - values.append("dirs=%s" % dirs) - - startup = [] - shutdown = [] - validate = [] - for cmd in service.getElementsByTagName("Command"): - type = cmd.getAttribute("type") - cmdstr = gettextchild(cmd) - if cmdstr is None: - continue - if type == "start": - startup.append(cmdstr) - elif type == "stop": - shutdown.append(cmdstr) - elif type == "validate": - validate.append(cmdstr) - if len(startup): - values.append("cmdup=%s" % startup) - if len(shutdown): - values.append("cmddown=%s" % shutdown) - if len(validate): - values.append("cmdval=%s" % validate) - - files = [] - for file in service.getElementsByTagName("File"): - filename = file.getAttribute("name") - files.append(filename) - data = gettextchild(file) - typestr = "service:%s:%s" % (name, filename) - self.session.services.setservicefile(nodenum=n.objid, type=typestr, - filename=filename, - srcname=None, data=data) - if len(files): - values.append("files=%s" % files) - if not bool(service.getAttribute("custom")): - return True - self.session.services.setcustomservice(n.objid, svc, values) - return True - - def parsehooks(self, hooks): - ''' Parse hook scripts from XML into session._hooks. - ''' - for hook in hooks.getElementsByTagName("Hook"): - filename = hook.getAttribute("name") - state = hook.getAttribute("state") - data = gettextchild(hook) - if data is None: - data = "" # allow for empty file - type = "hook:%s" % state - self.session.sethook(type, filename=filename, - srcname=None, data=data) - - def parsemeta(self): - opt = getoneelement(self.meta, "SessionOptions") - if opt: - for param in opt.getElementsByTagName("param"): - k = str(param.getAttribute("name")) - v = str(param.getAttribute("value")) - if v == '': - v = gettextchild(param) # allow attribute/text for newlines - setattr(self.session.options, k, v) - hooks = getoneelement(self.meta, "Hooks") - if hooks: - self.parsehooks(hooks) - meta = getoneelement(self.meta, "MetaData") - if meta: - for param in meta.getElementsByTagName("param"): - k = str(param.getAttribute("name")) - v = str(param.getAttribute("value")) - if v == '': - v = gettextchild(param) - self.session.metadata.additem(k, v) diff --git a/daemon/core/misc/xmlparser1.py b/daemon/core/misc/xmlparser1.py deleted file mode 100644 index 88782917..00000000 --- a/daemon/core/misc/xmlparser1.py +++ /dev/null @@ -1,942 +0,0 @@ -# -# CORE -# Copyright (c) 2015 the Boeing Company. -# See the LICENSE file included in this distribution. -# - -import sys -import random -from core.netns import nodes -from core import constants -from core.misc.ipaddr import MacAddr -from xml.dom.minidom import parse -from xmlutils import * - -class CoreDocumentParser1(object): - - layer2_device_types = 'hub', 'switch' - layer3_device_types = 'host', 'router' - device_types = layer2_device_types + layer3_device_types - - # TODO: support CORE interface classes: - # RJ45Node - # TunnelNode - - def __init__(self, session, filename, options): - self.session = session - self.verbose = self.session.getcfgitembool('verbose', False) - self.filename = filename - if 'dom' in options: - # this prevents parsing twice when detecting file versions - self.dom = options['dom'] - else: - self.dom = parse(filename) - self.start = options['start'] - self.nodecls = options['nodecls'] - self.scenario = self.get_scenario(self.dom) - self.location_refgeo_set = False - self.location_refxyz_set = False - # saved link parameters saved when parsing networks and applied later - self.link_params = {} - # map from id-string to objid, for files having node names but - # not node numbers - self.objidmap = {} - self.objids = set() - self.default_services = {} - if self.scenario: - self.parse_scenario() - - def info(self, msg): - s = 'XML parsing \'%s\': %s' % (self.filename, msg) - if self.session: - self.session.info(s) - else: - sys.stdout.write(s + '\n') - - def warn(self, msg): - s = 'WARNING XML parsing \'%s\': %s' % (self.filename, msg) - if self.session: - self.session.warn(s) - else: - sys.stderr.write(s + '\n') - - @staticmethod - def get_scenario(dom): - scenario = getFirstChildByTagName(dom, 'scenario') - if not scenario: - raise ValueError, 'no scenario element found' - version = scenario.getAttribute('version') - if version and version != '1.0': - raise ValueError, \ - 'unsupported scenario version found: \'%s\'' % version - return scenario - - def parse_scenario(self): - self.parse_default_services() - self.parse_session_config() - self.parse_network_plan() - - def assign_id(self, idstr, idval): - if idstr in self.objidmap: - assert self.objidmap[idstr] == idval and idval in self.objids - return - self.objidmap[idstr] = idval - self.objids.add(idval) - - def rand_id(self): - while True: - x = random.randint(0, 0xffff) - if x not in self.objids: - return x - - def get_id(self, idstr): - '''\ - Get a, possibly new, object id (node number) corresponding to - the given XML string id. - ''' - if not idstr: - idn = self.rand_id() - self.objids.add(idn) - return idn - elif idstr in self.objidmap: - return self.objidmap[idstr] - else: - try: - idn = int(idstr) - except ValueError: - idn = self.rand_id() - self.assign_id(idstr, idn) - return idn - - def get_common_attributes(self, node): - '''\ - Return id, name attributes for the given XML element. These - attributes are common to nodes and networks. - ''' - idstr = node.getAttribute('id') - # use an explicit set COREID if it exists - coreid = self.find_core_id(node) - if coreid: - idn = int(coreid) - if idstr: - self.assign_id(idstr, idn) - else: - idn = self.get_id(idstr) - # TODO: consider supporting unicode; for now convert to an - # ascii string - namestr = str(node.getAttribute('name')) - return idn, namestr - - def iter_network_member_devices(self, element): - # element can be a network or a channel - for interface in iterChildrenWithAttribute(element, 'member', - 'type', 'interface'): - if_id = getChildTextTrim(interface) - assert if_id # XXX for testing - if not if_id: - continue - device, if_name = self.find_device_with_interface(if_id) - assert device, 'no device for if_id: %s' % if_id # XXX for testing - if device: - yield device, if_name - - def network_class(self, network, network_type): - '''\ - Return the corresponding CORE network class for the given - network/network_type. - ''' - if network_type == 'ethernet': - return nodes.PtpNet - elif network_type == 'satcom': - return nodes.PtpNet - elif network_type == 'wireless': - channel = getFirstChildByTagName(network, 'channel') - if channel: - # use an explicit CORE type if it exists - coretype = getFirstChildTextTrimWithAttribute(channel, 'type', - 'domain', 'CORE') - if coretype: - if coretype == 'basic_range': - return nodes.WlanNode - elif coretype.startswith('emane'): - return nodes.EmaneNode - else: - self.warn('unknown network type: \'%s\'' % coretype) - return xmltypetonodeclass(self.session, coretype) - return nodes.WlanNode - self.warn('unknown network type: \'%s\'' % network_type) - return None - - def create_core_object(self, objcls, objid, objname, element, node_type): - obj = self.session.addobj(cls = objcls, objid = objid, - name = objname, start = self.start) - if self.verbose: - self.info('added object objid=%s name=%s cls=%s' % \ - (objid, objname, objcls)) - self.set_object_position(obj, element) - self.set_object_presentation(obj, element, node_type) - return obj - - def get_core_object(self, idstr): - if idstr and idstr in self.objidmap: - objid = self.objidmap[idstr] - return self.session.obj(objid) - return None - - def parse_network_plan(self): - # parse the scenario in the following order: - # 1. layer-2 devices - # 2. other networks (ptp/wlan) - # 3. layer-3 devices - self.parse_layer2_devices() - self.parse_networks() - self.parse_layer3_devices() - - def set_ethernet_link_parameters(self, channel, link_params, - mobility_model_name, mobility_params): - # save link parameters for later use, indexed by the tuple - # (device_id, interface_name) - for dev, if_name in self.iter_network_member_devices(channel): - if self.device_type(dev) in self.device_types: - dev_id = dev.getAttribute('id') - key = (dev_id, if_name) - self.link_params[key] = link_params - if mobility_model_name or mobility_params: - raise NotImplementedError - - def set_wireless_link_parameters(self, channel, link_params, - mobility_model_name, mobility_params): - network = self.find_channel_network(channel) - network_id = network.getAttribute('id') - if network_id in self.objidmap: - nodenum = self.objidmap[network_id] - else: - self.warn('unknown network: %s' % network.toxml('utf-8')) - assert False # XXX for testing - return - model_name = getFirstChildTextTrimWithAttribute(channel, 'type', - 'domain', 'CORE') - if not model_name: - model_name = 'basic_range' - if model_name == 'basic_range': - mgr = self.session.mobility - elif model_name.startswith('emane'): - mgr = self.session.emane - elif model_name.startswith('xen'): - mgr = self.session.xen - else: - # TODO: any other config managers? - raise NotImplementedError - mgr.setconfig_keyvalues(nodenum, model_name, link_params.items()) - if mobility_model_name and mobility_params: - mgr.setconfig_keyvalues(nodenum, mobility_model_name, - mobility_params.items()) - - def link_layer2_devices(self, device1, ifname1, device2, ifname2): - '''\ - Link two layer-2 devices together. - ''' - devid1 = device1.getAttribute('id') - dev1 = self.get_core_object(devid1) - devid2 = device2.getAttribute('id') - dev2 = self.get_core_object(devid2) - assert dev1 and dev2 # XXX for testing - if dev1 and dev2: - # TODO: review this - if isinstance(dev2, nodes.RJ45Node): - # RJ45 nodes have different linknet() - netif = dev2.linknet(dev1) - else: - netif = dev1.linknet(dev2) - self.set_wired_link_parameters(dev1, netif, devid1, ifname1) - - @classmethod - def parse_xml_value(cls, valtext): - if not valtext: - return None - try: - if not valtext.translate(None, '0123456789'): - val = int(valtext) - else: - val = float(valtext) - except ValueError: - val = str(valtext) - return val - - @classmethod - def parse_parameter_children(cls, parent): - params = {} - for parameter in iterChildrenWithName(parent, 'parameter'): - param_name = parameter.getAttribute('name') - assert param_name # XXX for testing - if not param_name: - continue - # TODO: consider supporting unicode; for now convert - # to an ascii string - param_name = str(param_name) - param_val = cls.parse_xml_value(getChildTextTrim(parameter)) - # TODO: check if the name already exists? - if param_name and param_val: - params[param_name] = param_val - return params - - def parse_network_channel(self, channel): - element = self.search_for_element(channel, 'type', - lambda x: not x.hasAttributes()) - channel_type = getChildTextTrim(element) - link_params = self.parse_parameter_children(channel) - - mobility = getFirstChildByTagName(channel, 'CORE:mobility') - if mobility: - mobility_model_name = \ - getFirstChildTextTrimByTagName(mobility, 'type') - mobility_params = self.parse_parameter_children(mobility) - else: - mobility_model_name = None - mobility_params = None - if channel_type == 'wireless': - self.set_wireless_link_parameters(channel, link_params, - mobility_model_name, - mobility_params) - elif channel_type == 'ethernet': - # TODO: maybe this can be done in the loop below to avoid - # iterating through channel members multiple times - self.set_ethernet_link_parameters(channel, link_params, - mobility_model_name, - mobility_params) - else: - raise NotImplementedError - layer2_device = [] - for dev, if_name in self.iter_network_member_devices(channel): - if self.device_type(dev) in self.layer2_device_types: - layer2_device.append((dev, if_name)) - assert len(layer2_device) <= 2 - if len(layer2_device) == 2: - self.link_layer2_devices(layer2_device[0][0], layer2_device[0][1], - layer2_device[1][0], layer2_device[1][1]) - - def parse_network(self, network): - '''\ - Each network element should have an 'id' and 'name' attribute - and include the following child elements: - - type (one) - member (zero or more with type="interface" or type="channel") - channel (zero or more) - ''' - layer2_members = set() - layer3_members = 0 - for dev, if_name in self.iter_network_member_devices(network): - if not dev: - continue - devtype = self.device_type(dev) - if devtype in self.layer2_device_types: - layer2_members.add(dev) - elif devtype in self.layer3_device_types: - layer3_members += 1 - else: - raise NotImplementedError - if len(layer2_members) == 0: - net_type = getFirstChildTextTrimByTagName(network, 'type') - if not net_type: - msg = 'no network type found for network: \'%s\'' % \ - network.toxml('utf-8') - self.warn(msg) - assert False # XXX for testing - return - net_cls = self.network_class(network, net_type) - objid, net_name = self.get_common_attributes(network) - if self.verbose: - self.info('parsing network: %s %s' % (net_name, objid)) - if objid in self.session._objs: - return - n = self.create_core_object(net_cls, objid, net_name, - network, None) - # handle channel parameters - for channel in iterChildrenWithName(network, 'channel'): - self.parse_network_channel(channel) - - def parse_networks(self): - '''\ - Parse all 'network' elements. - ''' - for network in iterDescendantsWithName(self.scenario, 'network'): - self.parse_network(network) - - def parse_addresses(self, interface): - mac = [] - ipv4 = [] - ipv6= [] - hostname = [] - for address in iterChildrenWithName(interface, 'address'): - addr_type = address.getAttribute('type') - if not addr_type: - msg = 'no type attribute found for address ' \ - 'in interface: \'%s\'' % interface.toxml('utf-8') - self.warn(msg) - assert False # XXX for testing - continue - addr_text = getChildTextTrim(address) - if not addr_text: - msg = 'no text found for address ' \ - 'in interface: \'%s\'' % interface.toxml('utf-8') - self.warn(msg) - assert False # XXX for testing - continue - if addr_type == 'mac': - mac.append(addr_text) - elif addr_type == 'IPv4': - ipv4.append(addr_text) - elif addr_type == 'IPv6': - ipv6.append(addr_text) - elif addr_type == 'hostname': - hostname.append(addr_text) - else: - msg = 'skipping unknown address type \'%s\' in ' \ - 'interface: \'%s\'' % (addr_type, interface.toxml('utf-8')) - self.warn(msg) - assert False # XXX for testing - continue - return mac, ipv4, ipv6, hostname - - def parse_interface(self, node, device_id, interface): - '''\ - Each interface can have multiple 'address' elements. - ''' - if_name = interface.getAttribute('name') - network = self.find_interface_network_object(interface) - if not network: - msg = 'skipping node \'%s\' interface \'%s\': ' \ - 'unknown network' % (node.name, if_name) - self.warn(msg) - assert False # XXX for testing - return - mac, ipv4, ipv6, hostname = self.parse_addresses(interface) - if mac: - hwaddr = MacAddr.fromstring(mac[0]) - else: - hwaddr = None - ifindex = node.newnetif(network, addrlist = ipv4 + ipv6, - hwaddr = hwaddr, ifindex = None, - ifname = if_name) - # TODO: 'hostname' addresses are unused - if self.verbose: - msg = 'node \'%s\' interface \'%s\' connected ' \ - 'to network \'%s\'' % (node.name, if_name, network.name) - self.info(msg) - # set link parameters for wired links - if isinstance(network, - (nodes.HubNode, nodes.PtpNet, nodes.SwitchNode)): - netif = node.netif(ifindex) - self.set_wired_link_parameters(network, netif, device_id) - - def set_wired_link_parameters(self, network, netif, - device_id, netif_name = None): - if netif_name is None: - netif_name = netif.name - key = (device_id, netif_name) - if key in self.link_params: - link_params = self.link_params[key] - if self.start: - bw = link_params.get('bw') - delay = link_params.get('delay') - loss = link_params.get('loss') - duplicate = link_params.get('duplicate') - jitter = link_params.get('jitter') - network.linkconfig(netif, bw = bw, delay = delay, loss = loss, - duplicate = duplicate, jitter = jitter) - else: - for k, v in link_params.iteritems(): - netif.setparam(k, v) - - @staticmethod - def search_for_element(node, tagName, match = None): - '''\ - Search the given node and all ancestors for an element named - tagName that satisfies the given matching function. - ''' - while True: - for child in iterChildren(node, Node.ELEMENT_NODE): - if child.tagName == tagName and \ - (match is None or match(child)): - return child - node = node.parentNode - if not node: - break - return None - - @classmethod - def find_core_id(cls, node): - def match(x): - domain = x.getAttribute('domain') - return domain == 'COREID' - alias = cls.search_for_element(node, 'alias', match) - if alias: - return getChildTextTrim(alias) - return None - - @classmethod - def find_point(cls, node): - return cls.search_for_element(node, 'point') - - @staticmethod - def find_channel_network(channel): - p = channel.parentNode - if p and p.tagName == 'network': - return p - return None - - def find_interface_network_object(self, interface): - network_id = getFirstChildTextTrimWithAttribute(interface, 'member', - 'type', 'network') - if not network_id: - # support legacy notation: -# - -''' -Helpers for loading and saving XML files. savesessionxml(session, filename) is -the main public interface here. -''' - -import os.path -from core.netns import nodes -from xmlparser import core_document_parser -from xmlwriter import core_document_writer - -def opensessionxml(session, filename, start=False, nodecls=nodes.CoreNode): - ''' Import a session from the EmulationScript XML format. - ''' - options = {'start': start, 'nodecls': nodecls} - doc = core_document_parser(session, filename, options) - if start: - session.name = os.path.basename(filename) - session.filename = filename - session.node_count = str(session.getnodecount()) - session.instantiate() - -def savesessionxml(session, filename, version): - ''' Export a session to the EmulationScript XML format. - ''' - doc = core_document_writer(session, version) - doc.writexml(filename) diff --git a/daemon/core/misc/xmlutils.py b/daemon/core/misc/xmlutils.py deleted file mode 100644 index 915e5100..00000000 --- a/daemon/core/misc/xmlutils.py +++ /dev/null @@ -1,303 +0,0 @@ -# -# CORE -# Copyright (c)2011-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# - -from core.netns import nodes -from xml.dom.minidom import Node - -def addelementsfromlist(dom, parent, iterable, name, attr_name): - ''' XML helper to iterate through a list and add items to parent using tags - of the given name and the item value as an attribute named attr_name. - Example: addelementsfromlist(dom, parent, ('a','b','c'), "letter", "value") - - - - - - ''' - for item in iterable: - element = dom.createElement(name) - element.setAttribute(attr_name, item) - parent.appendChild(element) - -def addtextelementsfromlist(dom, parent, iterable, name, attrs): - ''' XML helper to iterate through a list and add items to parent using tags - of the given name, attributes specified in the attrs tuple, and having the - text of the item within the tags. - Example: addtextelementsfromlist(dom, parent, ('a','b','c'), "letter", - (('show','True'),)) - - a - b - c - - ''' - for item in iterable: - element = dom.createElement(name) - for k,v in attrs: - element.setAttribute(k, v) - parent.appendChild(element) - txt = dom.createTextNode(item) - element.appendChild(txt) - -def addtextelementsfromtuples(dom, parent, iterable, attrs=()): - ''' XML helper to iterate through a list of tuples and add items to - parent using tags named for the first tuple element, - attributes specified in the attrs tuple, and having the - text of second tuple element. - Example: addtextelementsfromtuples(dom, parent, - (('first','a'),('second','b'),('third','c')), - (('show','True'),)) - - a - b - c - - ''' - for name, value in iterable: - element = dom.createElement(name) - for k,v in attrs: - element.setAttribute(k, v) - parent.appendChild(element) - txt = dom.createTextNode(value) - element.appendChild(txt) - -def gettextelementstolist(parent): - ''' XML helper to parse child text nodes from the given parent and return - a list of (key, value) tuples. - ''' - r = [] - for n in parent.childNodes: - if n.nodeType != Node.ELEMENT_NODE: - continue - k = str(n.nodeName) - v = '' # sometimes want None here? - for c in n.childNodes: - if c.nodeType != Node.TEXT_NODE: - continue - v = str(c.nodeValue) - break - r.append((k,v)) - return r - -def addparamtoparent(dom, parent, name, value): - ''' XML helper to add a tag to the parent - element, when value is not None. - ''' - if value is None: - return None - p = dom.createElement("param") - parent.appendChild(p) - p.setAttribute("name", name) - p.setAttribute("value", "%s" % value) - return p - -def addtextparamtoparent(dom, parent, name, value): - ''' XML helper to add a value tag to the parent - element, when value is not None. - ''' - if value is None: - return None - p = dom.createElement("param") - parent.appendChild(p) - p.setAttribute("name", name) - txt = dom.createTextNode(value) - p.appendChild(txt) - return p - -def addparamlisttoparent(dom, parent, name, values): - ''' XML helper to return a parameter list and optionally add it to the - parent element: - - - - - ''' - if values is None: - return None - p = dom.createElement("paramlist") - if parent: - parent.appendChild(p) - p.setAttribute("name", name) - for v in values: - item = dom.createElement("item") - item.setAttribute("value", str(v)) - p.appendChild(item) - return p - -def getoneelement(dom, name): - e = dom.getElementsByTagName(name) - if len(e) == 0: - return None - return e[0] - -def iterDescendants(dom, max_depth = 0): - '''\ - Iterate over all descendant element nodes in breadth first order. - Only consider nodes up to max_depth deep when max_depth is greater - than zero. - ''' - nodes = [dom] - depth = 0 - current_depth_nodes = 1 - next_depth_nodes = 0 - while nodes: - n = nodes.pop(0) - for child in n.childNodes: - if child.nodeType == Node.ELEMENT_NODE: - yield child - nodes.append(child) - next_depth_nodes += 1 - current_depth_nodes -= 1 - if current_depth_nodes == 0: - depth += 1 - if max_depth > 0 and depth == max_depth: - return - current_depth_nodes = next_depth_nodes - next_depth_nodes = 0 - -def iterMatchingDescendants(dom, matchFunction, max_depth = 0): - '''\ - Iterate over descendant elements where matchFunction(descendant) - returns true. Only consider nodes up to max_depth deep when - max_depth is greater than zero. - ''' - for d in iterDescendants(dom, max_depth): - if matchFunction(d): - yield d - -def iterDescendantsWithName(dom, tagName, max_depth = 0): - '''\ - Iterate over descendant elements whose name is contained in - tagName (or is named tagName if tagName is a string). Only - consider nodes up to max_depth deep when max_depth is greater than - zero. - ''' - if isinstance(tagName, basestring): - tagName = (tagName,) - def match(d): - return d.tagName in tagName - return iterMatchingDescendants(dom, match, max_depth) - -def iterDescendantsWithAttribute(dom, tagName, attrName, attrValue, - max_depth = 0): - '''\ - Iterate over descendant elements whose name is contained in - tagName (or is named tagName if tagName is a string) and have an - attribute named attrName with value attrValue. Only consider - nodes up to max_depth deep when max_depth is greater than zero. - ''' - if isinstance(tagName, basestring): - tagName = (tagName,) - def match(d): - return d.tagName in tagName and \ - d.getAttribute(attrName) == attrValue - return iterMatchingDescendants(dom, match, max_depth) - -def iterChildren(dom, nodeType): - '''\ - Iterate over all child elements of the given type. - ''' - for child in dom.childNodes: - if child.nodeType == nodeType: - yield child - -def gettextchild(dom): - '''\ - Return the text node of the given element. - ''' - for child in iterChildren(dom, Node.TEXT_NODE): - return str(child.nodeValue) - return None - -def getChildTextTrim(dom): - text = gettextchild(dom) - if text: - text = text.strip() - return text - -def getparamssetattrs(dom, param_names, target): - ''' XML helper to get tags and set - the attribute in the target object. String type is used. Target object - attribute is unchanged if the XML attribute is not present. - ''' - params = dom.getElementsByTagName("param") - for param in params: - param_name = param.getAttribute("name") - value = param.getAttribute("value") - if value is None: - continue # never reached? - if param_name in param_names: - setattr(target, param_name, str(value)) - -def xmltypetonodeclass(session, type): - ''' Helper to convert from a type string to a class name in nodes.*. - ''' - if hasattr(nodes, type): - return eval("nodes.%s" % type) - else: - return None - -def iterChildrenWithName(dom, tagName): - return iterDescendantsWithName(dom, tagName, 1) - -def iterChildrenWithAttribute(dom, tagName, attrName, attrValue): - return iterDescendantsWithAttribute(dom, tagName, attrName, attrValue, 1) - -def getFirstChildByTagName(dom, tagName): - '''\ - Return the first child element whose name is contained in tagName - (or is named tagName if tagName is a string). - ''' - for child in iterChildrenWithName(dom, tagName): - return child - return None - -def getFirstChildTextByTagName(dom, tagName): - '''\ - Return the corresponding text of the first child element whose - name is contained in tagName (or is named tagName if tagName is a - string). - ''' - child = getFirstChildByTagName(dom, tagName) - if child: - return gettextchild(child) - return None - -def getFirstChildTextTrimByTagName(dom, tagName): - text = getFirstChildTextByTagName(dom, tagName) - if text: - text = text.strip() - return text - -def getFirstChildWithAttribute(dom, tagName, attrName, attrValue): - '''\ - Return the first child element whose name is contained in tagName - (or is named tagName if tagName is a string) that has an attribute - named attrName with value attrValue. - ''' - for child in \ - iterChildrenWithAttribute(dom, tagName, attrName, attrValue): - return child - return None - -def getFirstChildTextWithAttribute(dom, tagName, attrName, attrValue): - '''\ - Return the corresponding text of the first child element whose - name is contained in tagName (or is named tagName if tagName is a - string) that has an attribute named attrName with value attrValue. - ''' - child = getFirstChildWithAttribute(dom, tagName, attrName, attrValue) - if child: - return gettextchild(child) - return None - -def getFirstChildTextTrimWithAttribute(dom, tagName, attrName, attrValue): - text = getFirstChildTextWithAttribute(dom, tagName, attrName, attrValue) - if text: - text = text.strip() - return text diff --git a/daemon/core/misc/xmlwriter.py b/daemon/core/misc/xmlwriter.py deleted file mode 100644 index d630faa8..00000000 --- a/daemon/core/misc/xmlwriter.py +++ /dev/null @@ -1,15 +0,0 @@ -# CORE -# Copyright (c) 2015 The Boeing Company. -# See the LICENSE file included in this distribution. - -from xmlwriter0 import CoreDocumentWriter0 -from xmlwriter1 import CoreDocumentWriter1 - -def core_document_writer(session, version): - if version == '0.0': - doc = CoreDocumentWriter0(session) - elif version == '1.0': - doc = CoreDocumentWriter1(session) - else: - raise ValueError, 'unsupported document version: %s' % version - return doc diff --git a/daemon/core/misc/xmlwriter0.py b/daemon/core/misc/xmlwriter0.py deleted file mode 100644 index ce025477..00000000 --- a/daemon/core/misc/xmlwriter0.py +++ /dev/null @@ -1,377 +0,0 @@ -# -# CORE -# Copyright (c)2011-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# - -import os -import pwd -from core.netns import nodes -from core.api import coreapi -from xml.dom.minidom import Document -from xmlutils import * - -class CoreDocumentWriter0(Document): - ''' Utility class for writing a CoreSession to XML. The init method builds - an xml.dom.minidom.Document, and the writexml() method saves the XML file. - ''' - def __init__(self, session): - ''' Create an empty Scenario XML Document, then populate it with - objects from the given session. - ''' - Document.__init__(self) - self.session = session - self.scenario = self.createElement("Scenario") - self.np = self.createElement("NetworkPlan") - self.mp = self.createElement("MotionPlan") - self.sp = self.createElement("ServicePlan") - self.meta = self.createElement("CoreMetaData") - - self.appendChild(self.scenario) - self.scenario.appendChild(self.np) - self.scenario.appendChild(self.mp) - self.scenario.appendChild(self.sp) - self.scenario.appendChild(self.meta) - - self.populatefromsession() - - def populatefromsession(self): - self.session.emane.setup() # not during runtime? - self.addorigin() - self.adddefaultservices() - self.addnets() - self.addnodes() - self.addmetadata() - - def writexml(self, filename): - self.session.info("saving session XML file %s" % filename) - f = open(filename, "w") - Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \ - encoding="UTF-8") - f.close() - if self.session.user is not None: - uid = pwd.getpwnam(self.session.user).pw_uid - gid = os.stat(self.session.sessiondir).st_gid - os.chown(filename, uid, gid) - - def addnets(self): - ''' Add PyCoreNet objects as NetworkDefinition XML elements. - ''' - with self.session._objslock: - for net in self.session.objs(): - if not isinstance(net, nodes.PyCoreNet): - continue - self.addnet(net) - - def addnet(self, net): - ''' Add one PyCoreNet object as a NetworkDefinition XML element. - ''' - n = self.createElement("NetworkDefinition") - self.np.appendChild(n) - n.setAttribute("name", net.name) - # could use net.brname - n.setAttribute("id", "%s" % net.objid) - n.setAttribute("type", "%s" % net.__class__.__name__) - self.addnetinterfaces(n, net) - # key used with tunnel node - if hasattr(net, 'grekey') and net.grekey is not None: - n.setAttribute("key", "%s" % net.grekey) - # link parameters - for netif in net.netifs(sort=True): - self.addnetem(n, netif) - # wireless/mobility models - modelconfigs = net.session.mobility.getmodels(net) - modelconfigs += net.session.emane.getmodels(net) - self.addmodels(n, modelconfigs) - self.addposition(net) - - def addnetem(self, n, netif): - ''' Similar to addmodels(); used for writing netem link effects - parameters. TODO: Interface parameters should be moved to the model - construct, then this separate method shouldn't be required. - ''' - params = netif.getparams() - if len(params) == 0: - return - model = self.createElement("model") - model.setAttribute("name", "netem") - model.setAttribute("netif", netif.name) - if hasattr(netif, "node") and netif.node is not None: - model.setAttribute("peer", netif.node.name) - # link between switches uses one veth interface - elif hasattr(netif, "othernet") and netif.othernet is not None: - if netif.othernet.name == n.getAttribute("name"): - model.setAttribute("peer", netif.net.name) - else: - model.setAttribute("peer", netif.othernet.name) - model.setAttribute("netif", netif.localname) - # hack used for upstream parameters for link between switches - # (see LxBrNet.linknet()) - if netif.othernet.objid == int(n.getAttribute("id")): - netif.swapparams('_params_up') - params = netif.getparams() - netif.swapparams('_params_up') - has_params = False - for k, v in params: - # default netem parameters are 0 or None - if v is None or v == 0: - continue - if k == "has_netem" or k == "has_tbf": - continue - key = self.createElement(k) - key.appendChild(self.createTextNode("%s" % v)) - model.appendChild(key) - has_params = True - if has_params: - n.appendChild(model) - - def addmodels(self, n, configs): - ''' Add models from a list of model-class, config values tuples. - ''' - for (m, conf) in configs: - model = self.createElement("model") - n.appendChild(model) - model.setAttribute("name", m._name) - type = "wireless" - if m._type == coreapi.CORE_TLV_REG_MOBILITY: - type = "mobility" - model.setAttribute("type", type) - for i, k in enumerate(m.getnames()): - key = self.createElement(k) - value = conf[i] - if value is None: - value = "" - key.appendChild(self.createTextNode("%s" % value)) - model.appendChild(key) - - def addnodes(self): - ''' Add PyCoreNode objects as node XML elements. - ''' - with self.session._objslock: - for node in self.session.objs(): - if not isinstance(node, nodes.PyCoreNode): - continue - self.addnode(node) - - def addnode(self, node): - ''' Add a PyCoreNode object as node XML elements. - ''' - n = self.createElement("Node") - self.np.appendChild(n) - n.setAttribute("name", node.name) - n.setAttribute("id", "%s" % node.nodeid()) - if node.type: - n.setAttribute("type", node.type) - self.addinterfaces(n, node) - self.addposition(node) - addparamtoparent(self, n, "icon", node.icon) - addparamtoparent(self, n, "canvas", node.canvas) - self.addservices(node) - - def addinterfaces(self, n, node): - ''' Add PyCoreNetIfs to node XML elements. - ''' - for ifc in node.netifs(sort=True): - i = self.createElement("interface") - n.appendChild(i) - i.setAttribute("name", ifc.name) - netmodel = None - if ifc.net: - i.setAttribute("net", ifc.net.name) - if hasattr(ifc.net, "model"): - netmodel = ifc.net.model - if ifc.mtu and ifc.mtu != 1500: - i.setAttribute("mtu", "%s" % ifc.mtu) - # could use ifc.params, transport_type - self.addaddresses(i, ifc) - # per-interface models - if netmodel and netmodel._name[:6] == "emane_": - cfg = self.session.emane.getifcconfig(node.objid, netmodel._name, - None, ifc) - if cfg: - self.addmodels(i, ((netmodel, cfg),) ) - - - def addnetinterfaces(self, n, net): - ''' Similar to addinterfaces(), but only adds interface elements to the - supplied XML node that would not otherwise appear in the Node elements. - These are any interfaces that link two switches/hubs together. - ''' - for ifc in net.netifs(sort=True): - if not hasattr(ifc, "othernet") or not ifc.othernet: - continue - i = self.createElement("interface") - n.appendChild(i) - if net.objid == ifc.net.objid: - i.setAttribute("name", ifc.localname) - i.setAttribute("net", ifc.othernet.name) - else: - i.setAttribute("name", ifc.name) - i.setAttribute("net", ifc.net.name) - - def addposition(self, node): - ''' Add object coordinates as location XML element. - ''' - (x,y,z) = node.position.get() - if x is None or y is None: - return - # - mpn = self.createElement("Node") - mpn.setAttribute("name", node.name) - self.mp.appendChild(mpn) - - # - motion = self.createElement("motion") - motion.setAttribute("type", "stationary") - mpn.appendChild(motion) - - # $X$,$Y$,$Z$ - pt = self.createElement("point") - motion.appendChild(pt) - coordstxt = "%s,%s" % (x,y) - if z: - coordstxt += ",%s" % z - coords = self.createTextNode(coordstxt) - pt.appendChild(coords) - - def addorigin(self): - ''' Add origin to Motion Plan using canvas reference point. - The CoreLocation class maintains this reference point. - ''' - refgeo = self.session.location.refgeo - origin = self.createElement("origin") - attrs = ("lat","lon","alt") - have_origin = False - for i in xrange(3): - if refgeo[i] is not None: - origin.setAttribute(attrs[i], str(refgeo[i])) - have_origin = True - if not have_origin: - return - if self.session.location.refscale != 1.0: # 100 pixels = refscale m - origin.setAttribute("scale100", str(self.session.location.refscale)) - if self.session.location.refxyz != (0.0, 0.0, 0.0): - pt = self.createElement("point") - origin.appendChild(pt) - x,y,z = self.session.location.refxyz - coordstxt = "%s,%s" % (x,y) - if z: - coordstxt += ",%s" % z - coords = self.createTextNode(coordstxt) - pt.appendChild(coords) - - self.mp.appendChild(origin) - - def adddefaultservices(self): - ''' Add default services and node types to the ServicePlan. - ''' - for type in self.session.services.defaultservices: - defaults = self.session.services.getdefaultservices(type) - spn = self.createElement("Node") - spn.setAttribute("type", type) - self.sp.appendChild(spn) - for svc in defaults: - s = self.createElement("Service") - spn.appendChild(s) - s.setAttribute("name", str(svc._name)) - - def addservices(self, node): - ''' Add services and their customizations to the ServicePlan. - ''' - if len(node.services) == 0: - return - defaults = self.session.services.getdefaultservices(node.type) - if node.services == defaults: - return - spn = self.createElement("Node") - spn.setAttribute("name", node.name) - self.sp.appendChild(spn) - - for svc in node.services: - s = self.createElement("Service") - spn.appendChild(s) - s.setAttribute("name", str(svc._name)) - s.setAttribute("startup_idx", str(svc._startindex)) - if svc._starttime != "": - s.setAttribute("start_time", str(svc._starttime)) - # only record service names if not a customized service - if not svc._custom: - continue - s.setAttribute("custom", str(svc._custom)) - addelementsfromlist(self, s, svc._dirs, "Directory", "name") - - for fn in svc._configs: - if len(fn) == 0: - continue - f = self.createElement("File") - f.setAttribute("name", fn) - # all file names are added to determine when a file has been deleted - s.appendChild(f) - data = self.session.services.getservicefiledata(svc, fn) - if data is None: - # this includes only customized file contents and skips - # the auto-generated files - continue - txt = self.createTextNode(data) - f.appendChild(txt) - - addtextelementsfromlist(self, s, svc._startup, "Command", - (("type","start"),)) - addtextelementsfromlist(self, s, svc._shutdown, "Command", - (("type","stop"),)) - addtextelementsfromlist(self, s, svc._validate, "Command", - (("type","validate"),)) - - def addaddresses(self, i, netif): - ''' Add MAC and IP addresses to interface XML elements. - ''' - if netif.hwaddr: - h = self.createElement("address") - i.appendChild(h) - h.setAttribute("type", "mac") - htxt = self.createTextNode("%s" % netif.hwaddr) - h.appendChild(htxt) - for addr in netif.addrlist: - a = self.createElement("address") - i.appendChild(a) - # a.setAttribute("type", ) - atxt = self.createTextNode("%s" % addr) - a.appendChild(atxt) - - def addhooks(self): - ''' Add hook script XML elements to the metadata tag. - ''' - hooks = self.createElement("Hooks") - for state in sorted(self.session._hooks.keys()): - for (filename, data) in self.session._hooks[state]: - hook = self.createElement("Hook") - hook.setAttribute("name", filename) - hook.setAttribute("state", str(state)) - txt = self.createTextNode(data) - hook.appendChild(txt) - hooks.appendChild(hook) - if hooks.hasChildNodes(): - self.meta.appendChild(hooks) - - def addmetadata(self): - ''' Add CORE-specific session meta-data XML elements. - ''' - # options - options = self.createElement("SessionOptions") - defaults = self.session.options.getdefaultvalues() - for i, (k, v) in enumerate(self.session.options.getkeyvaluelist()): - if str(v) != str(defaults[i]): - addtextparamtoparent(self, options, k, v) - #addparamtoparent(self, options, k, v) - if options.hasChildNodes(): - self.meta.appendChild(options) - # hook scripts - self.addhooks() - # meta - meta = self.createElement("MetaData") - self.meta.appendChild(meta) - for (k, v) in self.session.metadata.items(): - addtextparamtoparent(self, meta, k, v) - #addparamtoparent(self, meta, k, v) diff --git a/daemon/core/misc/xmlwriter1.py b/daemon/core/misc/xmlwriter1.py deleted file mode 100644 index 2ee36b78..00000000 --- a/daemon/core/misc/xmlwriter1.py +++ /dev/null @@ -1,989 +0,0 @@ -# -# CORE -# Copyright (c)2011-2015 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# Created on Dec 18, 2014 -# -# @author: santiago -# - -import os -import pwd -import collections -from core.netns import nodes -from core.api import coreapi -from core.misc.ipaddr import * - -from xml.dom.minidom import Document -from xmlutils import * -from xmldeployment import CoreDeploymentWriter - -def enum(**enums): - return type('Enum', (), enums) - -class Attrib(object): - ''' NMF scenario plan attribute constants - ''' - NetType = enum(WIRELESS = 'wireless', ETHERNET = 'ethernet', - PTP_WIRED = 'point-to-point-wired', - PTP_WIRELESS = 'point-to-point-wireless') - MembType = enum(INTERFACE = 'interface', CHANNEL = 'channel', - SWITCH = 'switch', HUB = 'hub', TUNNEL = 'tunnel', - NETWORK = "network") - DevType = enum(HOST = 'host', ROUTER = 'router', SWITCH = 'switch', - HUB = 'hub') - ''' Node types in CORE - ''' - NodeType = enum(ROUTER = 'router', HOST = 'host', MDR = 'mdr', - PC = 'PC', RJ45 = 'rj45', SWITCH = 'lanswitch', - HUB = 'hub') - Alias = enum(ID = "COREID") - -''' A link endpoint in CORE -net: the network that the endpoint belongs to -netif: the network interface at this end -id: the identifier for the endpoint -l2devport: if the other end is a layer 2 device, this is the assigned port in that device -params: link/interface parameters -''' -Endpoint = collections.namedtuple('Endpoint', - ['net', 'netif', 'type', 'id', 'l2devport', 'params']) - - - -class CoreDocumentWriter1(Document): - ''' Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init - method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file. - ''' - - def __init__(self, session): - ''' Create an empty Scenario XML Document, then populate it with - objects from the given session. - ''' - Document.__init__(self) - session.info('Exporting to NMF XML version 1.0') - with session._objslock: - self.scenarioPlan = ScenarioPlan(self, session) - if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE: - deployment = CoreDeploymentWriter(self, self.scenarioPlan, - session) - deployment.add_deployment() - self.scenarioPlan.setAttribute('deployed', 'true') - - def writexml(self, filename): - ''' Commit to file - ''' - self.scenarioPlan.coreSession.info("saving session XML file %s" % filename) - f = open(filename, "w") - Document.writexml(self, writer=f, indent="", addindent=" ", newl="\n", \ - encoding="UTF-8") - f.close() - if self.scenarioPlan.coreSession.user is not None: - uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid - gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid - os.chown(filename, uid, gid) - - -class XmlElement(object): - ''' The base class for all XML elements in the scenario plan. Includes - convenience functions. - ''' - def __init__(self, document, parent, elementType): - self.document = document - self.parent = parent - self.baseEle = document.createElement("%s" % elementType) - if self.parent is not None: - self.parent.appendChild(self.baseEle) - - def createElement(self, elementTag): - return self.document.createElement(elementTag) - - def getTagName(self): - return self.baseEle.tagName - - def createTextNode(self, nodeTag): - return self.document.createTextNode(nodeTag) - - def appendChild(self, child): - if isinstance(child, XmlElement): - self.baseEle.appendChild(child.baseEle) - else: - self.baseEle.appendChild(child) - - @staticmethod - def add_parameter(doc, parent, key, value): - if key and value: - parm = doc.createElement("parameter") - parm.setAttribute("name", str(key)) - parm.appendChild(doc.createTextNode(str(value))) - parent.appendChild(parm) - - def addParameter(self, key, value): - ''' - Add a parameter to the xml element - ''' - self.add_parameter(self.document, self, key, value) - - def setAttribute(self, name, val): - self.baseEle.setAttribute(name, val) - - def getAttribute(self, name): - return self.baseEle.getAttribute(name) - - -class NamedXmlElement(XmlElement): - ''' The base class for all "named" xml elements. Named elements are - xml elements in the scenario plan that have an id and a name attribute. - ''' - def __init__(self, scenPlan, parent, elementType, elementName): - XmlElement.__init__(self, scenPlan.document, parent, elementType) - - self.scenPlan = scenPlan - self.coreSession = scenPlan.coreSession - - elementPath = '' - self.id=None - if self.parent is not None and isinstance(self.parent, XmlElement) and self.parent.getTagName() != "scenario": - elementPath="%s/" % self.parent.getAttribute("id") - - self.id = "%s%s" % (elementPath,elementName) - self.setAttribute("name", elementName) - self.setAttribute("id", self.id) - - - def addPoint(self, coreObj): - ''' Add position to an object - ''' - (x,y,z) = coreObj.position.get() - if x is None or y is None: - return - lat, lon, alt = self.coreSession.location.getgeo(x, y, z) - - pt = self.createElement("point") - pt.setAttribute("type", "gps") - pt.setAttribute("lat", "%s" % lat) - pt.setAttribute("lon", "%s" % lon) - if z: - pt.setAttribute("z", "%s" % alt) - self.appendChild(pt) - - def createAlias(self, domain, valueStr): - ''' Create an alias element for CORE specific information - ''' - a = self.createElement("alias") - a.setAttribute("domain", "%s" % domain) - a.appendChild(self.createTextNode(valueStr)) - return a - - - - - -class ScenarioPlan(XmlElement): - ''' Container class for ScenarioPlan. - ''' - def __init__(self, document, session): - XmlElement.__init__(self, document, parent=document, elementType='scenario') - - self.coreSession = session - - self.setAttribute('version', '1.0') - self.setAttribute("name", "%s" % session.name) - - self.setAttribute('xmlns', 'nmfPlan') - self.setAttribute('xmlns:CORE', 'coreSpecific') - self.setAttribute('compiled', 'true') - - self.allChannelMembers = dict() - self.lastNetIdx = 0 - self.addNetworks() - self.addDevices() - - # XXX Do we need these? - #self.session.emane.setup() # not during runtime? - #self.addorigin() - - self.addDefaultServices() - - self.addSessionConfiguration() - - - - def addNetworks(self): - ''' Add networks in the session to the scenPlan. - ''' - for net in self.coreSession.objs(): - if not isinstance(net, nodes.PyCoreNet): - continue - - if isinstance(net, nodes.CtrlNet): - continue - - # Do not add switches and hubs that belong to another network - if isinstance(net, (nodes.SwitchNode, nodes.HubNode)): - if inOtherNetwork(net): - continue - - try: - NetworkElement(self, self, net) - except: - if hasattr(net, "name") and net.name: - self.coreSession.warn('Unsupported net: %s' % net.name) - else: - self.coreSession.warn('Unsupported net: %s' % net.__class__.__name__) - - - def addDevices(self): - ''' Add device elements to the scenario plan. - ''' - for node in self.coreSession.objs(): - if not isinstance(node, (nodes.PyCoreNode)): - continue - try: - DeviceElement(self, self, node) - except: - if hasattr(node, "name") and node.name: - self.coreSession.warn('Unsupported device: %s' % node.name) - else: - self.coreSession.warn('Unsupported device: %s' % node.__class__.__name__) - - - def addDefaultServices(self): - ''' Add default services and node types to the ServicePlan. - ''' - defaultservices = self.createElement("CORE:defaultservices") - for type in self.coreSession.services.defaultservices: - defaults = self.coreSession.services.getdefaultservices(type) - spn = self.createElement("device") - spn.setAttribute("type", type) - defaultservices.appendChild(spn) - for svc in defaults: - s = self.createElement("service") - spn.appendChild(s) - s.setAttribute("name", str(svc._name)) - if defaultservices.hasChildNodes(): - self.appendChild(defaultservices) - - def addSessionConfiguration(self): - ''' Add CORE-specific session configuration XML elements. - ''' - config = self.createElement("CORE:sessionconfig") - - # origin: geolocation of cartesian coordinate 0,0,0 - refgeo = self.coreSession.location.refgeo - origin = self.createElement("origin") - attrs = ("lat","lon","alt") - have_origin = False - for i in xrange(3): - if refgeo[i] is not None: - origin.setAttribute(attrs[i], str(refgeo[i])) - have_origin = True - if have_origin: - if self.coreSession.location.refscale != 1.0: # 100 pixels = refscale m - origin.setAttribute("scale100", str(self.coreSession.location.refscale)) - if self.coreSession.location.refxyz != (0.0, 0.0, 0.0): - pt = self.createElement("point") - origin.appendChild(pt) - x,y,z = self.coreSession.location.refxyz - coordstxt = "%s,%s" % (x,y) - if z: - coordstxt += ",%s" % z - coords = self.createTextNode(coordstxt) - pt.appendChild(coords) - config.appendChild(origin) - - - # options - options = self.createElement("options") - defaults = self.coreSession.options.getdefaultvalues() - for i, (k, v) in enumerate(self.coreSession.options.getkeyvaluelist()): - if str(v) != str(defaults[i]): - XmlElement.add_parameter(self.document, options, k, v) - if options.hasChildNodes(): - config.appendChild(options) - - # hook scripts - hooks = self.createElement("hooks") - for state in sorted(self.coreSession._hooks.keys()): - for (filename, data) in self.coreSession._hooks[state]: - hook = self.createElement("hook") - hook.setAttribute("name", filename) - hook.setAttribute("state", str(state)) - txt = self.createTextNode(data) - hook.appendChild(txt) - hooks.appendChild(hook) - if hooks.hasChildNodes(): - config.appendChild(hooks) - - # metadata - meta = self.createElement("metadata") - for (k, v) in self.coreSession.metadata.items(): - XmlElement.add_parameter(self.document, meta, k, v) - if meta.hasChildNodes(): - config.appendChild(meta) - - if config.hasChildNodes(): - self.appendChild(config) - - -class NetworkElement(NamedXmlElement): - def __init__(self, scenPlan, parent, netObj): - ''' Add one PyCoreNet object as one network XML element. - ''' - elementName = self.getNetworkName(scenPlan, netObj) - NamedXmlElement.__init__(self, scenPlan, parent, "network", elementName) - - self.scenPlan = scenPlan - - self.addPoint(netObj) - - netType = None - if isinstance(netObj, (nodes.WlanNode, nodes.EmaneNode)): - netType = Attrib.NetType.WIRELESS - elif isinstance(netObj, (nodes.SwitchNode, nodes.HubNode, - nodes.PtpNet, nodes.TunnelNode)): - netType = Attrib.NetType.ETHERNET - else: - netType ="%s" % netObj.__class__.__name__ - - typeEle = self.createElement("type") - typeEle.appendChild(self.createTextNode(netType)) - self.appendChild(typeEle) - - # Gather all endpoints belonging to this network - self.endpoints = getEndpoints(netObj) - - # Special case for a network of switches and hubs - createAlias = True - self.l2devices = [] - if isinstance(netObj, (nodes.SwitchNode, nodes.HubNode)): - createAlias = False - self.appendChild(typeEle) - self.addL2Devices(netObj) - - if createAlias: - a = self.createAlias(Attrib.Alias.ID, "%d" % int(netObj.objid)) - self.appendChild(a) - - # XXXX TODO: Move this to channel? - # key used with tunnel node - if hasattr(netObj, 'grekey') and netObj.grekey is not None: - a = self.createAlias("COREGREKEY", "%s" % netObj.grekey) - self.appendChild(a) - - self.addNetMembers(netObj) - self.addChannels(netObj) - - presentationEle = self.createElement("CORE:presentation") - addPresentationEle = False - if netObj.icon and not netObj.icon.isspace(): - presentationEle.setAttribute("icon", netObj.icon) - addPresentationEle = True - if netObj.canvas: - presentationEle.setAttribute("canvas", str(netObj.canvas)) - addPresentationEle = True - if addPresentationEle: - self.appendChild(presentationEle) - - def getNetworkName(self, scenPlan, netObj): - ''' Determine the name to use for this network element - ''' - if isinstance(netObj, (nodes.PtpNet, nodes.TunnelNode)): - name = "net%s" % scenPlan.lastNetIdx - scenPlan.lastNetIdx += 1 - elif netObj.name: - name = str(netObj.name) # could use net.brname for bridges? - elif isinstance(netObj, (nodes.SwitchNode, nodes.HubNode)): - name = "lan%s" % netObj.objid - else: - name = '' - return name - - - def addL2Devices(self, netObj): - ''' Add switches and hubs - ''' - - # Add the netObj as a device - self.l2devices.append(DeviceElement(self.scenPlan, self, netObj)) - - # Add downstream switches/hubs - l2devs = [] - neweps = [] - for ep in self.endpoints: - if ep.type and ep.net.objid != netObj.objid: - l2s, eps = getDowmstreamL2Devices(ep.net) - l2devs.extend(l2s) - neweps.extend(eps) - - for l2dev in l2devs: - self.l2devices.append(DeviceElement(self.scenPlan, self, l2dev)) - - self.endpoints.extend(neweps) - - # XXX: Optimize later - def addNetMembers(self, netObj): - ''' Add members to a network XML element. - ''' - - for ep in self.endpoints: - if ep.type: - MemberElement(self.scenPlan, self, referencedType=ep.type, referencedId=ep.id) - - if ep.l2devport: - MemberElement(self.scenPlan, - self, - referencedType=Attrib.MembType.INTERFACE, - referencedId="%s/%s" % (self.id,ep.l2devport)) - - # XXX Revisit this - # Create implied members given the network type - if isinstance(netObj, nodes.TunnelNode): - MemberElement(self.scenPlan, - self, - referencedType=Attrib.MembType.TUNNEL, - referencedId="%s/%s" % (netObj.name, netObj.name)) - - # XXX: Optimize later - def addChannels(self, netObj): - ''' Add channels to a network XML element - ''' - - if isinstance(netObj, (nodes.WlanNode, nodes.EmaneNode)): - modelconfigs = netObj.session.mobility.getmodels(netObj) - modelconfigs += netObj.session.emane.getmodels(netObj) - chan = None - for (model, conf) in modelconfigs: - # Handle mobility parameters below - if model._type == coreapi.CORE_TLV_REG_MOBILITY: - continue - - # Create the channel - if chan is None: - name = "wireless" - chan = ChannelElement(self.scenPlan, self, netObj, - channelType=model._name, - channelName=name, - channelDomain="CORE") - - # Add wireless model parameters - for i, key in enumerate(model.getnames()): - value = conf[i] - if value is not None: - chan.addParameter(key, model.valueof(key, conf)) - - for (model, conf) in modelconfigs: - if model._type == coreapi.CORE_TLV_REG_MOBILITY: - # Add wireless mobility parameters - mobility = XmlElement(self.scenPlan, chan, "CORE:mobility") - # Add a type child - typeEle = self.createElement("type") - typeEle.appendChild(self.createTextNode(model._name)) - mobility.appendChild(typeEle) - for i, key in enumerate(model.getnames()): - value = conf[i] - if value is not None: - mobility.addParameter(key, value) - - # Add members to the channel - if chan is not None: - chan.addChannelMembers(self.endpoints) - self.appendChild(chan.baseEle) - elif isinstance(netObj, nodes.PtpNet) : - if len(self.endpoints) < 2: - if len(self.endpoints) == 1: - self.coreSession.warn('Pt2Pt network with only 1 endpoint: %s' % self.endpoints[0].id) - else: - self.coreSession.warn('Pt2Pt network with no endpoints encountered in %s' % netObj.name) - return - name = "chan%d" % (0) - chan = ChannelElement(self.scenPlan, self, netObj, - channelType=Attrib.NetType.ETHERNET, - channelName=name) - - # Add interface parameters - if self.endpoints[0].params != self.endpoints[1].params: - self.coreSession.warn('Pt2Pt Endpoint parameters do not match in %s' % netObj.name) - for key, value in self.endpoints[0].params: - # XXX lifted from original addnetem function. revisit this. - # default netem parameters are 0 or None - if value is None or value == 0: - continue - if key == "has_netem" or key == "has_tbf": - continue - chan.addParameter(key, value) - - # Add members to the channel - chan.addChannelMembers(self.endpoints) - self.appendChild(chan) - - elif isinstance(netObj, (nodes.SwitchNode, - nodes.HubNode, nodes.TunnelNode)): - cidx=0 - channels = [] - for ep in self.endpoints: - # Create one channel member per ep - if ep.type: - name = "chan%d" % (cidx) - chan = ChannelElement(self.scenPlan, self, netObj, - channelType=Attrib.NetType.ETHERNET, - channelName=name) - - # Add interface parameters - for key, value in ep.params: - # XXX lifted from original addnetem function. revisit this. - # default netem parameters are 0 or None - if value is None or value == 0: - continue - if key == "has_netem" or key == "has_tbf": - continue - chan.addParameter(key, value) - - # Add members to the channel - chan.addChannelMembers(ep) - channels.append(chan) - cidx += 1 - - for chan in channels: - self.appendChild(chan) - - - - -class DeviceElement(NamedXmlElement): - ''' A device element in the scenario plan. - ''' - def __init__(self, scenPlan, parent, devObj): - ''' Add a PyCoreNode object as a device element. - ''' - - devType = None - coreDevType = None - if hasattr(devObj, "type") and devObj.type: - coreDevType = devObj.type - if devObj.type == Attrib.NodeType.ROUTER: - devType = Attrib.DevType.ROUTER - elif devObj.type == Attrib.NodeType.MDR: - devType = Attrib.DevType.ROUTER - elif devObj.type == Attrib.NodeType.HOST: - devType = Attrib.DevType.HOST - elif devObj.type == Attrib.NodeType.PC: - devType = Attrib.DevType.HOST - elif devObj.type == Attrib.NodeType.RJ45: - devType = Attrib.DevType.HOST - nodeId = "EMULATOR-HOST" - elif devObj.type == Attrib.NodeType.HUB: - devType = Attrib.DevType.HUB - elif devObj.type == Attrib.NodeType.SWITCH: - devType = Attrib.DevType.SWITCH - else: - # Default custom types (defined in ~/.core/nodes.conf) to HOST - devType = Attrib.DevType.HOST - - - if devType is None: - raise Exception - - - NamedXmlElement.__init__(self, scenPlan, parent, devType, devObj.name) - - if coreDevType is not None: - typeEle = self.createElement("type") - typeEle.setAttribute("domain", "CORE") - typeEle.appendChild(self.createTextNode("%s" % coreDevType)) - self.appendChild(typeEle) - - self.interfaces = [] - self.addInterfaces(devObj) - alias = self.createAlias(Attrib.Alias.ID, "%s" % devObj.objid) - self.appendChild(alias) - self.addPoint(devObj) - self.addServices(devObj) - - - presentationEle = self.createElement("CORE:presentation") - addPresentationEle = False - if devObj.icon and not devObj.icon.isspace(): - presentationEle.setAttribute("icon", devObj.icon) - addPresentationEle = True - if devObj.canvas: - presentationEle.setAttribute("canvas", str(devObj.canvas)) - addPresentationEle = True - if addPresentationEle: - self.appendChild(presentationEle) - - def addInterfaces(self, devObj): - ''' Add interfaces to a device element. - ''' - idx=0 - for ifcObj in devObj.netifs(sort=True): - if ifcObj.net and isinstance(ifcObj.net, nodes.CtrlNet): - continue - if isinstance(devObj, nodes.PyCoreNode): - ifcEle = InterfaceElement(self.scenPlan, self, devObj, ifcObj) - else: # isinstance(node, (nodes.HubNode nodes.SwitchNode)): - ifcEle = InterfaceElement(self.scenPlan, self, devObj, ifcObj, idx) - idx += 1 - - netmodel = None - if ifcObj.net: - if hasattr(ifcObj.net, "model"): - netmodel = ifcObj.net.model - if ifcObj.mtu and ifcObj.mtu != 1500: - ifcEle.setAttribute("mtu", "%s" % ifcObj.mtu) - - # The interfaces returned for Switches and Hubs are the interfaces of the nodes connected to them. - # The addresses are for those interfaces. Don't include them here. - if isinstance(devObj, nodes.PyCoreNode): - # could use ifcObj.params, transport_type - ifcEle.addAddresses(ifcObj) - # per-interface models - # XXX Remove??? - if netmodel and netmodel._name[:6] == "emane_": - cfg = self.coreSession.emane.getifcconfig(devObj.objid, netmodel._name, - None, ifcObj) - if cfg: - ifcEle.addModels(((netmodel, cfg),) ) - - self.interfaces.append(ifcEle) - - - def addServices(self, devObj): - ''' Add services and their customizations to the ServicePlan. - ''' - if not hasattr(devObj, "services") : - return - - if len(devObj.services) == 0: - return - - defaults = self.coreSession.services.getdefaultservices(devObj.type) - if devObj.services == defaults: - return - spn = self.createElement("CORE:services") - spn.setAttribute("name", devObj.name) - self.appendChild(spn) - - for svc in devObj.services: - s = self.createElement("service") - spn.appendChild(s) - s.setAttribute("name", str(svc._name)) - s.setAttribute("startup_idx", str(svc._startindex)) - if svc._starttime != "": - s.setAttribute("start_time", str(svc._starttime)) - # only record service names if not a customized service - if not svc._custom: - continue - s.setAttribute("custom", str(svc._custom)) - addelementsfromlist(self, s, svc._dirs, "directory", "name") - - for fn in svc._configs: - if len(fn) == 0: - continue - f = self.createElement("file") - f.setAttribute("name", fn) - # all file names are added to determine when a file has been deleted - s.appendChild(f) - data = self.coreSession.services.getservicefiledata(svc, fn) - if data is None: - # this includes only customized file contents and skips - # the auto-generated files - continue - txt = self.createTextNode("\n" + data) - f.appendChild(txt) - - addtextelementsfromlist(self, s, svc._startup, "command", - (("type","start"),)) - addtextelementsfromlist(self, s, svc._shutdown, "command", - (("type","stop"),)) - addtextelementsfromlist(self, s, svc._validate, "command", - (("type","validate"),)) - - - -class ChannelElement(NamedXmlElement): - ''' A channel element in the scenario plan - ''' - def __init__(self, scenPlan, parent, netObj, channelType, channelName, channelDomain=None): - NamedXmlElement.__init__(self, scenPlan, parent, "channel", channelName) - ''' - Create a channel element and append a member child referencing this channel element - in the parent element. - ''' - # Create a member element for this channel in the parent - MemberElement(self.scenPlan, - parent, - referencedType=Attrib.MembType.CHANNEL, - referencedId=self.id) - - # Add a type child - typeEle = self.createElement("type") - if channelDomain is not None: - typeEle.setAttribute("domain", "%s" % channelDomain) - typeEle.appendChild(self.createTextNode(channelType)) - self.appendChild(typeEle) - - - def addChannelMembers(self, endpoints): - ''' - Add network channel members referencing interfaces in the channel - ''' - if isinstance(endpoints, list): - # A list of endpoints is given. Create one channel member per endpoint - idx = 0 - for ep in endpoints: - self.addChannelMember(ep.type, ep.id, idx) - idx += 1 - else: - # A single endpoint is given. Create one channel member for the endpoint, - # and if the endpoint is associated with a Layer 2 device port, add the - # port as a second member - ep = endpoints - self.addChannelMember(ep.type, ep.id, 0) - if ep.l2devport is not None: - memId = "%s/%s" % (self.parent.getAttribute("id"), ep.l2devport) - self.addChannelMember(ep.type, memId, 1) - - - def addChannelMember(self, memIfcType, memIfcId, memIdx): - ''' - add a member to a given channel - ''' - - m = MemberElement(self.scenPlan, - self, - referencedType=memIfcType, - referencedId=memIfcId, - index=memIdx) - self.scenPlan.allChannelMembers[memIfcId] = m - - - -class InterfaceElement(NamedXmlElement): - ''' - A network interface element - ''' - def __init__(self, scenPlan, parent, devObj, ifcObj, ifcIdx=None): - ''' - Create a network interface element with references to channel that this - interface is used. - ''' - elementName=None - if ifcIdx is not None: - elementName = "e%d" % ifcIdx - else: - elementName = ifcObj.name - NamedXmlElement.__init__(self, scenPlan, parent, "interface", elementName) - self.ifcObj = ifcObj - self.addChannelReference() - - def addChannelReference(self): - ''' - Add a reference to the channel that uses this interface - ''' - try: - cm = self.scenPlan.allChannelMembers[self.id] - if cm is not None: - ch = cm.baseEle.parentNode - if ch is not None: - net = ch.parentNode - if net is not None: - MemberElement(self.scenPlan, - self, - referencedType=Attrib.MembType.CHANNEL, - referencedId=ch.getAttribute("id"), - index=int(cm.getAttribute("index"))) - MemberElement(self.scenPlan, - self, - referencedType=Attrib.MembType.NETWORK, - referencedId=net.getAttribute("id")) - except KeyError: - pass # Not an error. This occurs when an interface belongs to a switch or a hub within a network and the channel is yet to be defined - - - def addAddresses(self, ifcObj): - ''' - Add MAC and IP addresses to interface XML elements. - ''' - if ifcObj.hwaddr: - h = self.createElement("address") - self.appendChild(h) - h.setAttribute("type", "mac") - htxt = self.createTextNode("%s" % ifcObj.hwaddr) - h.appendChild(htxt) - for addr in ifcObj.addrlist: - a = self.createElement("address") - self.appendChild(a) - (ip, sep, mask) = addr.partition('/') - # mask = int(mask) XXX? - if isIPv4Address(ip): - a.setAttribute("type", "IPv4") - else: - a.setAttribute("type", "IPv6") - - # a.setAttribute("type", ) - atxt = self.createTextNode("%s" % addr) - a.appendChild(atxt) - - - # XXX Remove? - def addModels(self, configs): - ''' - Add models from a list of model-class, config values tuples. - ''' - for (m, conf) in configs: - modelEle = self.createElement("model") - modelEle.setAttribute("name", m._name) - typeStr = "wireless" - if m._type == coreapi.CORE_TLV_REG_MOBILITY: - typeStr = "mobility" - modelEle.setAttribute("type", typeStr) - for i, k in enumerate(m.getnames()): - key = self.createElement(k) - value = conf[i] - if value is None: - value = "" - key.appendChild(self.createTextNode("%s" % value)) - modelEle.appendChild(key) - self.appendChild(modelEle) - - -class MemberElement(XmlElement): - ''' - Member elements are references to other elements in the network plan elements of the scenario. - They are used in networks to reference channels, in channels to reference interfaces, - and in interfaces to reference networks/channels. Member elements provided allow bi-directional - traversal of network plan components. - ''' - def __init__(self, scenPlan, parent, referencedType, referencedId, index=None): - ''' - Create a member element - ''' - XmlElement.__init__(self, scenPlan.document, parent, "member") - self.setAttribute("type", "%s" % referencedType) - # See'Understanding the Network Modeling Framework document' - if index is not None: - self.setAttribute("index", "%d" % index) - self.appendChild(self.createTextNode("%s" % referencedId)) - - -# -# ======================================================================================= -# Helpers -# ======================================================================================= -def getEndpoint(netObj, ifcObj): - ''' - Create an Endpoint object given the network and the interface of interest - ''' - ep = None - l2devport=None - - # if ifcObj references an interface of a node and is part of this network - if ifcObj.net.objid == netObj.objid and hasattr(ifcObj,'node') and ifcObj.node: - params = ifcObj.getparams() - if isinstance(ifcObj.net, (nodes.HubNode, nodes.SwitchNode)): - l2devport="%s/e%d" % (ifcObj.net.name, ifcObj.net.getifindex(ifcObj)) - ep = Endpoint(netObj, - ifcObj, - type = Attrib.MembType.INTERFACE, - id="%s/%s" % (ifcObj.node.name, ifcObj.name), - l2devport=l2devport, - params=params) - - # else if ifcObj references another node and is connected to this network - elif hasattr(ifcObj,"othernet"): - if ifcObj.othernet.objid == netObj.objid: - # #hack used for upstream parameters for link between switches - # #(see LxBrNet.linknet()) - ifcObj.swapparams('_params_up') - params = ifcObj.getparams() - ifcObj.swapparams('_params_up') - owner = ifcObj.net - l2devport="%s/e%d" % (ifcObj.othernet.name, ifcObj.othernet.getifindex(ifcObj)) - - # Create the endpoint. - # XXX the interface index might not match what is shown in the gui. For switches and hubs, - # The gui assigns its index but doesn't pass it to the daemon and vice versa. - # The gui stores it's index in the IMN file, which it reads and writes without daemon intervention. - # Fix this! - ep = Endpoint(owner, - ifcObj, - type = Attrib.MembType.INTERFACE, - id="%s/%s/e%d" % (netObj.name, owner.name, owner.getifindex(ifcObj)), - l2devport=l2devport, - params=params) - # else this node has an interface that belongs to another network - # i.e. a switch/hub interface connected to another switch/hub and CORE has the other switch/hub - # as the containing network - else : - ep = Endpoint(netObj, ifcObj,type=None, id=None, l2devport=None, params=None) - - - return ep - -def getEndpoints(netObj): - ''' - Gather all endpoints of the given network - ''' - # Get all endpoints - endpoints = [] - - # XXX TODO: How to represent physical interfaces. - # - # NOTE: The following code works except it would be missing physical (rj45) interfaces from Pt2pt links - # TODO: Fix data in net.netifs to include Pt2Pt physical interfaces - # - # Iterate through all the nodes in the scenario, then iterate through all the interface for each node, - # and check if the interface is connected to this network. - - for ifcObj in netObj.netifs(sort=True): - try: - ep = getEndpoint(netObj, ifcObj) - if ep is not None: - endpoints.append(ep) - except Exception: - pass - return endpoints - -def getDowmstreamL2Devices(netObj): - ''' - Helper function for getting a list of all downstream layer 2 devices from the given netObj - ''' - l2devObjs = [netObj] - allendpoints = [] - myendpoints = getEndpoints(netObj) - allendpoints.extend(myendpoints) - for ep in myendpoints: - if ep.type and ep.net.objid != netObj.objid: - l2s, eps = getDowmstreamL2Devices(ep.net) - l2devObjs.extend(l2s) - allendpoints.extend(eps) - - return l2devObjs, allendpoints - - - -def getAllNetworkInterfaces(session): - ''' - Gather all network interfacecs in the session - ''' - netifs = [] - for node in session.objs(): - for netif in node.netifs(sort=True): - if netif not in netifs: - netifs.append(netif) - return netifs - -def inOtherNetwork(netObj): - ''' - Determine if CORE considers a given network object to be part of another network. - Note: CORE considers layer 2 devices to be their own networks. However, if a l2 device - is connected to another device, it is possible that one of its ports belong to the other - l2 device's network (thus, "othernet"). - ''' - for netif in netObj.netifs(sort=True): - if hasattr(netif,"othernet"): - if netif.othernet.objid != netObj.objid: - return True - return False diff --git a/daemon/core/mobility.py b/daemon/core/mobility.py index 00a60564..f868ce6d 100644 --- a/daemon/core/mobility.py +++ b/daemon/core/mobility.py @@ -1,326 +1,415 @@ -# -# CORE -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" mobility.py: mobility helpers for moving nodes and calculating wireless range. -''' -import sys, os, time, string, math, threading +""" + import heapq -from core.api import coreapi -from core.conf import ConfigurableManager, Configurable +import math +import os +import subprocess +import threading +import time + +from core.conf import Configurable +from core.conf import ConfigurableManager from core.coreobj import PyCoreNode -from core.misc.utils import check_call -from core.misc.ipaddr import IPAddr +from core.data import EventData, LinkData +from core.enumerations import ConfigDataTypes +from core.enumerations import EventTypes +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import RegisterTlvs +from core.misc import log +from core.misc.ipaddress import IpAddress + +logger = log.get_logger(__name__) + class MobilityManager(ConfigurableManager): - ''' Member of session class for handling configuration data for mobility and + """ + Member of session class for handling configuration data for mobility and range models. - ''' - _name = "MobilityManager" - _type = coreapi.CORE_TLV_REG_WIRELESS - + """ + name = "MobilityManager" + config_type = RegisterTlvs.WIRELESS.value + def __init__(self, session): - ConfigurableManager.__init__(self, session) - self.verbose = self.session.getcfgitembool('verbose', False) + """ + Creates a MobilityManager instance. + + :param core.session.Session session: session this manager is tied to + """ + ConfigurableManager.__init__(self) + self.session = session # configurations for basic range, indexed by WLAN node number, are # stored in self.configs # mapping from model names to their classes - self._modelclsmap = {} + self._modelclsmap = { + BasicRangeModel.name: BasicRangeModel, + Ns2ScriptedMobility.name: Ns2ScriptedMobility + } # dummy node objects for tracking position of nodes on other servers self.phys = {} self.physnets = {} self.session.broker.handlers.add(self.physnodehandlelink) - self.register() - def startup(self, nodenums=None): - ''' Session is transitioning from instantiation to runtime state. + def startup(self, node_ids=None): + """ + Session is transitioning from instantiation to runtime state. Instantiate any mobility models that have been configured for a WLAN. - ''' - if nodenums is None: - nodenums = self.configs.keys() - - for nodenum in nodenums: + + :param list node_ids: node ids to startup + :return: nothing + """ + if node_ids is None: + node_ids = self.configs.keys() + + for node_id in node_ids: + logger.info("checking mobility startup for node: %s", node_id) + try: - n = self.session.obj(nodenum) + node = self.session.get_object(node_id) except KeyError: - self.session.warn("Skipping mobility configuration for unknown" - "node %d." % nodenum) + logger.warn("skipping mobility configuration for unknown node %d." % node_id) continue - if nodenum not in self.configs: - self.session.warn("Missing mobility configuration for node " - "%d." % nodenum) + + if node_id not in self.configs: + logger.warn("missing mobility configuration for node %d." % node_id) continue - v = self.configs[nodenum] + + v = self.configs[node_id] + for model in v: try: + logger.info("setting mobility model to node: %s", model) cls = self._modelclsmap[model[0]] + node.setmodel(cls, model[1]) except KeyError: - self.session.warn("Skipping mobility configuration for " - "unknown model '%s'" % model[0]) + logger.warn("skipping mobility configuration for unknown model '%s'" % model[0]) continue - n.setmodel(cls, model[1]) - if self.session.master: - self.installphysnodes(n) - if n.mobility: - self.session.evq.add_event(0.0, n.mobility.startup) - return () + if self.session.master: + self.installphysnodes(node) + + if node.mobility: + self.session.event_loop.add_event(0.0, node.mobility.startup) def reset(self): - ''' Reset all configs. - ''' + """ + Reset all configs. + + :return: nothing + """ self.clearconfig(nodenum=None) - - def setconfig(self, nodenum, conftype, values): - ''' Normal setconfig() with check for run-time updates for WLANs. - ''' - super(MobilityManager, self).setconfig(nodenum, conftype, values) + + def setconfig(self, node_id, config_type, values): + """ + Normal setconfig() with check for run-time updates for WLANs. + + :param int node_id: node id + :param config_type: configuration type + :param values: configuration value + :return: nothing + """ + super(MobilityManager, self).setconfig(node_id, config_type, values) if self.session is None: return - if self.session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE: + if self.session.state == EventTypes.RUNTIME_STATE.value: try: - n = self.session.obj(nodenum) + node = self.session.get_object(node_id) + node.updatemodel(config_type, values) except KeyError: - self.session.warn("Skipping mobility configuration for unknown" - "node %d." % nodenum) - n.updatemodel(conftype, values) + logger.exception("Skipping mobility configuration for unknown node %d.", node_id) + + def handleevent(self, event_data): + """ + Handle an Event Message used to start, stop, or pause + mobility scripts for a given WlanNode. + + :param EventData event_data: event data to handle + :return: nothing + """ + event_type = event_data.event_type + node_id = event_data.node + name = event_data.name - def register(self): - ''' Register models as configurable object(s) with the Session object. - ''' - models = [BasicRangeModel, Ns2ScriptedMobility] - for m in models: - self.session.addconfobj(m._name, m._type, m.configure_mob) - self._modelclsmap[m._name] = m - - def handleevent(self, msg): - ''' Handle an Event Message used to start, stop, or pause - mobility scripts for a given WlanNode. - ''' - eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE) - nodenum = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE) - name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME) try: - node = self.session.obj(nodenum) + node = self.session.get_object(node_id) except KeyError: - self.session.warn("Ignoring event for model '%s', unknown node " \ - "'%s'" % (name, nodenum)) + logger.exception("Ignoring event for model '%s', unknown node '%s'", name, node_id) return - + # name is e.g. "mobility:ns2script" models = name[9:].split(',') - for m in models: + for model in models: try: - cls = self._modelclsmap[m] + cls = self._modelclsmap[model] except KeyError: - self.session.warn("Ignoring event for unknown model '%s'" % m) + logger.warn("Ignoring event for unknown model '%s'", model) continue - _name = "waypoint" - if cls._type == coreapi.CORE_TLV_REG_WIRELESS: - model = node.mobility - elif cls._type == coreapi.CORE_TLV_REG_MOBILITY: + + if cls.config_type in [RegisterTlvs.WIRELESS.value, RegisterTlvs.MOBILITY.value]: model = node.mobility else: continue + if model is None: - self.session.warn("Ignoring event, %s has no model" % node.name) + logger.warn("Ignoring event, %s has no model", node.name) continue - if cls._name != model._name: - self.session.warn("Ignoring event for %s wrong model %s,%s" % \ - (node.name, cls._name, model._name)) + + if cls.name != model.name: + logger.warn("Ignoring event for %s wrong model %s,%s", node.name, cls.name, model.name) continue - - if eventtype == coreapi.CORE_EVENT_STOP or \ - eventtype == coreapi.CORE_EVENT_RESTART: + + if event_type == EventTypes.STOP.value or event_type == EventTypes.RESTART.value: model.stop(move_initial=True) - if eventtype == coreapi.CORE_EVENT_START or \ - eventtype == coreapi.CORE_EVENT_RESTART: + if event_type == EventTypes.START.value or event_type == EventTypes.RESTART.value: model.start() - if eventtype == coreapi.CORE_EVENT_PAUSE: + if event_type == EventTypes.PAUSE.value: model.pause() - + def sendevent(self, model): - ''' Send an event message on behalf of a mobility model. - This communicates the current and end (max) times to the GUI. - ''' + """ + Send an event message on behalf of a mobility model. + This communicates the current and end (max) times to the GUI. + + :param WayPointMobility model: mobility model to send event for + :return: nothing + """ + event_type = EventTypes.NONE.value if model.state == model.STATE_STOPPED: - eventtype = coreapi.CORE_EVENT_STOP + event_type = EventTypes.STOP.value elif model.state == model.STATE_RUNNING: - eventtype = coreapi.CORE_EVENT_START + event_type = EventTypes.START.value elif model.state == model.STATE_PAUSED: - eventtype = coreapi.CORE_EVENT_PAUSE + event_type = EventTypes.PAUSE.value + data = "start=%d" % int(model.lasttime - model.timezero) data += " end=%d" % int(model.endtime) - tlvdata = "" - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NODE, - model.objid) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - eventtype) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NAME, - "mobility:%s" % model._name) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_DATA, - data) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TIME, - "%s" % time.time()) - msg = coreapi.CoreEventMessage.pack(0, tlvdata) - try: - self.session.broadcastraw(None, msg) - except Exception, e: - self.warn("Error sending Event Message: %s" % e) - + + event_data = EventData( + node=model.object_id, + event_type=event_type, + name="mobility:%s" % model.name, + data=data, + time="%s" % time.time() + ) + + self.session.broadcast_event(event_data) + def updatewlans(self, moved, moved_netifs): - ''' A mobility script has caused nodes in the 'moved' list to move. - Update every WlanNode. This saves range calculations if the model - were to recalculate for each individual node movement. - ''' + """ + A mobility script has caused nodes in the 'moved' list to move. + Update every WlanNode. This saves range calculations if the model + were to recalculate for each individual node movement. + + :param list moved: moved nodes + :param list moved_netifs: moved network interfaces + :return: nothing + """ for nodenum in self.configs: try: - n = self.session.obj(nodenum) + n = self.session.get_object(nodenum) except KeyError: + logger.exception("error getting session object") continue if n.model: n.model.update(moved, moved_netifs) - + def addphys(self, netnum, node): - ''' Keep track of PhysicalNodes and which network they belong to. - ''' + """ + Keep track of PhysicalNodes and which network they belong to. + + :param int netnum: network number + :param core.coreobj.PyCoreNode node: node to add physical network to + :return: nothing + """ nodenum = node.objid self.phys[nodenum] = node if netnum not in self.physnets: - self.physnets[netnum] = [nodenum,] + self.physnets[netnum] = [nodenum, ] else: self.physnets[netnum].append(nodenum) - - def physnodehandlelink(self, msg): - ''' Broker handler. Snoop Link add messages to get - node numbers of PhyiscalNodes and their nets. - Physical nodes exist only on other servers, but a shadow object is - created here for tracking node position. - ''' - if msg.msgtype == coreapi.CORE_API_LINK_MSG and \ - msg.flags & coreapi.CORE_API_ADD_FLAG: - nn = msg.nodenumbers() + + # TODO: remove need for handling old style message + def physnodehandlelink(self, message): + """ + Broker handler. Snoop Link add messages to get + node numbers of PhyiscalNodes and their nets. + Physical nodes exist only on other servers, but a shadow object is + created here for tracking node position. + + :param message: link message to handle + :return: nothing + """ + if message.message_type == MessageTypes.LINK.value and message.flags & MessageFlags.ADD.value: + nn = message.node_numbers() # first node is always link layer node in Link add message - if nn[0] not in self.session.broker.nets: + if nn[0] not in self.session.broker.network_nodes: return - if nn[1] in self.session.broker.phys: + if nn[1] in self.session.broker.physical_nodes: # record the fact that this PhysicalNode is linked to a net dummy = PyCoreNode(session=self.session, objid=nn[1], - name="n%d" % nn[1], start=False) + name="n%d" % nn[1], start=False) self.addphys(nn[0], dummy) - - def physnodeupdateposition(self, msg): - ''' Snoop node messages belonging to physical nodes. The dummy object + + # TODO: remove need to handling old style messages + def physnodeupdateposition(self, message): + """ + Snoop node messages belonging to physical nodes. The dummy object in self.phys[] records the node position. - ''' - nodenum = msg.nodenumbers()[0] + + :param message: message to handle + :return: nothing + """ + nodenum = message.node_numbers()[0] try: dummy = self.phys[nodenum] - nodexpos = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS) - nodeypos = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS) + nodexpos = message.get_tlv(NodeTlvs.X_POSITION.value) + nodeypos = message.get_tlv(NodeTlvs.Y_POSITION.value) dummy.setposition(nodexpos, nodeypos, None) except KeyError: - pass - + logger.exception("error retrieving physical node: %s", nodenum) + def installphysnodes(self, net): - ''' After installing a mobility model on a net, include any physical + """ + After installing a mobility model on a net, include any physical nodes that we have recorded. Use the GreTap tunnel to the physical node as the node's interface. - ''' + + :param net: network to install + :return: nothing + """ try: nodenums = self.physnets[net.objid] except KeyError: + logger.exception("error retriving physical net object") return + for nodenum in nodenums: node = self.phys[nodenum] + # TODO: fix this bad logic, relating to depending on a break to get a valid server for server in self.session.broker.getserversbynode(nodenum): break - netif = self.session.broker.gettunnel(net.objid, - IPAddr.toint(server.host)) + netif = self.session.broker.gettunnel(net.objid, IpAddress.to_int(server.host)) node.addnetif(netif, 0) netif.node = node - (x,y,z) = netif.node.position.get() + x, y, z = netif.node.position.get() netif.poshook(netif, x, y, z) class WirelessModel(Configurable): - ''' Base class used by EMANE models and the basic range model. + """ + Base class used by EMANE models and the basic range model. Used for managing arbitrary configuration parameters. - ''' - _type = coreapi.CORE_TLV_REG_WIRELESS - _bitmap = None - _positioncallback = None + """ + config_type = RegisterTlvs.WIRELESS.value + bitmap = None + position_callback = None - def __init__(self, session, objid, verbose = False, values = None): - Configurable.__init__(self, session, objid) - self.verbose = verbose + def __init__(self, session, object_id, values=None): + """ + Create a WirelessModel instance. + + :param core.session.Session session: core session we are tied to + :param int object_id: object id + :param values: values + """ + Configurable.__init__(self, session, object_id) # 'values' can be retrieved from a ConfigurableManager, or used here # during initialization, depending on the model. - - def tolinkmsgs(self, flags): - ''' May be used if the model can populate the GUI with wireless (green) - link lines. - ''' + + def all_link_data(self, flags): + """ + May be used if the model can populate the GUI with wireless (green) + link lines. + + :param flags: link data flags + :return: link data + :rtype: list + """ return [] - + def update(self, moved, moved_netifs): + """ + Update this wireless model. + + :param bool moved: flag is it was moved + :param list moved_netifs: moved network interfaces + :return: nothing + """ raise NotImplementedError - + def updateconfig(self, values): - ''' For run-time updates of model config. - Returns True when self._positioncallback() and self.setlinkparams() - should be invoked. - ''' + """ + For run-time updates of model config. Returns True when position callback and set link + parameters should be invoked. + + :param values: value to update + :return: False + :rtype: bool + """ return False class BasicRangeModel(WirelessModel): - ''' Basic Range wireless model, calculates range between nodes and links + """ + Basic Range wireless model, calculates range between nodes and links and unlinks nodes based on this distance. This was formerly done from the GUI. - ''' - _name = "basic_range" + """ + name = "basic_range" # configuration parameters are # ( 'name', 'type', 'default', 'possible-value-list', 'caption') - _confmatrix = [ - ("range", coreapi.CONF_DATA_TYPE_UINT32, '275', + config_matrix = [ + ("range", ConfigDataTypes.UINT32.value, '275', '', 'wireless range (pixels)'), - ("bandwidth", coreapi.CONF_DATA_TYPE_UINT32, '54000', + ("bandwidth", ConfigDataTypes.UINT32.value, '54000', '', 'bandwidth (bps)'), - ("jitter", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', + ("jitter", ConfigDataTypes.FLOAT.value, '0.0', '', 'transmission jitter (usec)'), - ("delay", coreapi.CONF_DATA_TYPE_FLOAT, '5000.0', + ("delay", ConfigDataTypes.FLOAT.value, '5000.0', '', 'transmission delay (usec)'), - ("error", coreapi.CONF_DATA_TYPE_FLOAT, '0.0', + ("error", ConfigDataTypes.FLOAT.value, '0.0', '', 'error rate (%)'), ] # value groupings - _confgroups = "Basic Range Parameters:1-%d" % len(_confmatrix) - - def __init__(self, session, objid, verbose = False, values=None): - ''' Range model is only instantiated during runtime. - ''' - super(BasicRangeModel, self).__init__(session = session, objid = objid, - verbose = verbose) - self.wlan = session.obj(objid) + config_groups = "Basic Range Parameters:1-%d" % len(config_matrix) + + def __init__(self, session, object_id, values=None): + """ + Create a BasicRangeModel instance. + + :param core.session.Session session: related core session + :param int object_id: object id + :param values: values + """ + super(BasicRangeModel, self).__init__(session=session, object_id=object_id) + self.wlan = session.get_object(object_id) self._netifs = {} self._netifslock = threading.Lock() if values is None: - values = session.mobility.getconfig(objid, self._name, - self.getdefaultvalues())[1] - self.range = float(self.valueof("range", values)) - if self.verbose: - self.session.info("Basic range model configured for WLAN %d using" \ - " range %d" % (objid, self.range)) + values = session.mobility.getconfig(object_id, self.name, self.getdefaultvalues())[1] + self.range = float(self.valueof("range", values)) + logger.info("Basic range model configured for WLAN %d using range %d", object_id, self.range) self.valuestolinkparams(values) + # link parameters + self.bw = None + self.delay = None + self.loss = None + self.jitter = None + def valuestolinkparams(self, values): + """ + Values to convert to link parameters. + + :param values: values to convert + :return: nothing + """ self.bw = int(self.valueof("bandwidth", values)) if self.bw == 0.0: self.bw = None @@ -335,32 +424,52 @@ class BasicRangeModel(WirelessModel): self.jitter = None @classmethod - def configure_mob(cls, session, msg): - ''' Handle configuration messages for setting up a model. + def configure_mob(cls, session, config_data): + """ + Handle configuration messages for setting up a model. Pass the MobilityManager object as the manager object. - ''' - return cls.configure(session.mobility, msg) - + + :param core.session.Session session: current session calling function + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: configuration data + :rtype: core.data.ConfigData + """ + return cls.configure(session.mobility, config_data) + def setlinkparams(self): - ''' Apply link parameters to all interfaces. This is invoked from + """ + Apply link parameters to all interfaces. This is invoked from WlanNode.setmodel() after the position callback has been set. - ''' + """ with self._netifslock: for netif in self._netifs: self.wlan.linkconfig(netif, bw=self.bw, delay=self.delay, - loss=self.loss, duplicate=None, - jitter=self.jitter) + loss=self.loss, duplicate=None, + jitter=self.jitter) def get_position(self, netif): + """ + Retrieve network interface position. + + :param netif: network interface position to retrieve + :return: network interface position + """ with self._netifslock: return self._netifs[netif] - def set_position(self, netif, x = None, y = None, z = None): - ''' A node has moved; given an interface, a new (x,y,z) position has + def set_position(self, netif, x=None, y=None, z=None): + """ + A node has moved; given an interface, a new (x,y,z) position has been set; calculate the new distance between other nodes and link or unlink node pairs based on the configured range. - ''' - #print "set_position(%s, x=%s, y=%s, z=%s)" % (netif.localname, x, y, z) + + :param netif: network interface to set position for + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + # print "set_position(%s, x=%s, y=%s, z=%s)" % (netif.localname, x, y, z) self._netifslock.acquire() self._netifs[netif] = (x, y, z) if x is None or y is None: @@ -369,15 +478,20 @@ class BasicRangeModel(WirelessModel): for netif2 in self._netifs: self.calclink(netif, netif2) self._netifslock.release() - - _positioncallback = set_position + + position_callback = set_position def update(self, moved, moved_netifs): - ''' Node positions have changed without recalc. Update positions from + """ + Node positions have changed without recalc. Update positions from node.position, then re-calculate links for those that have moved. Assumes bidirectional links, with one calculation per node pair, where one of the nodes has moved. - ''' + + :param bool moved: flag is it was moved + :param list moved_netifs: moved network interfaces + :return: nothing + """ with self._netifslock: while len(moved_netifs): netif = moved_netifs.pop() @@ -390,130 +504,188 @@ class BasicRangeModel(WirelessModel): self.calclink(netif, netif2) def calclink(self, netif, netif2): - ''' Helper used by set_position() and update() to - calculate distance between two interfaces and perform - linking/unlinking. Sends link/unlink messages and updates the - WlanNode's linked dict. - ''' + """ + Helper used by set_position() and update() to + calculate distance between two interfaces and perform + linking/unlinking. Sends link/unlink messages and updates the + WlanNode's linked dict. + + :param netif: interface one + :param netif2: interface two + :return: nothing + """ if netif == netif2: return - try: - (x, y, z) = self._netifs[netif] - (x2, y2, z2) = self._netifs[netif2] - except KeyError: - return - if x2 is None or y2 is None: - return - - d = self.calcdistance( (x,y,z), (x2,y2,z2) ) - # ordering is important, to keep the wlan._linked dict organized - a = min(netif, netif2) - b = max(netif, netif2) - try: - self.wlan._linked_lock.acquire() - linked = self.wlan.linked(a, b) - except KeyError: - return - finally: - self.wlan._linked_lock.release() - if d > self.range: - if linked: - self.wlan.unlink(a, b) - self.sendlinkmsg(a, b, unlink=True) - else: - if not linked: - self.wlan.link(a, b) - self.sendlinkmsg(a, b) + try: + x, y, z = self._netifs[netif] + x2, y2, z2 = self._netifs[netif2] + + if x2 is None or y2 is None: + return + + d = self.calcdistance((x, y, z), (x2, y2, z2)) + + # ordering is important, to keep the wlan._linked dict organized + a = min(netif, netif2) + b = max(netif, netif2) + + with self.wlan._linked_lock: + linked = self.wlan.linked(a, b) + + logger.info("checking if link distance is out of range: %s > %s", d, self.range) + if d > self.range: + if linked: + self.wlan.unlink(a, b) + self.sendlinkmsg(a, b, unlink=True) + else: + if not linked: + self.wlan.link(a, b) + self.sendlinkmsg(a, b) + except KeyError: + logger.exception("error getting interfaces during calclinkS") @staticmethod def calcdistance(p1, p2): - ''' Calculate the distance between two three-dimensional points. - ''' + """ + Calculate the distance between two three-dimensional points. + + :param tuple p1: point one + :param tuple p2: point two + :return: distance petween the points + :rtype: float + """ a = p1[0] - p2[0] b = p1[1] - p2[1] c = 0 if p1[2] is not None and p2[2] is not None: c = p1[2] - p2[2] return math.hypot(math.hypot(a, b), c) - - def updateconfig(self, values): - ''' Configuration has changed during runtime. - MobilityManager.setconfig() -> WlanNode.updatemodel() -> - WirelessModel.updateconfig() - ''' - self.valuestolinkparams(values) - self.range = float(self.valueof("range", values)) - return True - - def linkmsg(self, netif, netif2, flags): - ''' Create a wireless link/unlink API message. - ''' - n1 = netif.localname.split('.')[0] - n2 = netif2.localname.split('.')[0] - tlvdata = coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - netif.node.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, - netif2.node.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_NETID, - self.wlan.objid) - #tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, - # netif.index) - #tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, - # netif2.index) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE, - coreapi.CORE_LINK_WIRELESS) - return coreapi.CoreLinkMessage.pack(flags, tlvdata) - - def sendlinkmsg(self, netif, netif2, unlink=False): - ''' Send a wireless link/unlink API message to the GUI. - ''' - if unlink: - flags = coreapi.CORE_API_DEL_FLAG - else: - flags = coreapi.CORE_API_ADD_FLAG - msg = self.linkmsg(netif, netif2, flags) - self.session.broadcastraw(src=None, data=msg) - self.session.sdt.updatelink(netif.node.objid, netif2.node.objid, flags, - wireless=True) - def tolinkmsgs(self, flags): - ''' Return a list of wireless link messages for when the GUI reconnects. - ''' - r = [] + def updateconfig(self, values): + """ + Configuration has changed during runtime. + MobilityManager.setconfig() -> WlanNode.updatemodel() -> + WirelessModel.updateconfig() + + :param values: values to update configuration + :return: was update successful + :rtype: bool + """ + self.valuestolinkparams(values) + self.range = float(self.valueof("range", values)) + return True + + def create_link_data(self, interface1, interface2, message_type): + """ + Create a wireless link/unlink data message. + + :param core.coreobj.PyCoreNetIf interface1: interface one + :param core.coreobj.PyCoreNetIf interface2: interface two + :param message_type: link message type + :return: link data + :rtype: LinkData + """ + + return LinkData( + message_type=message_type, + node1_id=interface1.node.objid, + node2_id=interface2.node.objid, + network_id=self.wlan.objid, + link_type=LinkTypes.WIRELESS.value + ) + + def sendlinkmsg(self, netif, netif2, unlink=False): + """ + Send a wireless link/unlink API message to the GUI. + + :param core.coreobj.PyCoreNetIf netif: interface one + :param core.coreobj.PyCoreNetIf netif2: interface two + :param bool unlink: unlink or not + :return: nothing + """ + if unlink: + message_type = MessageFlags.DELETE.value + else: + message_type = MessageFlags.ADD.value + + link_data = self.create_link_data(netif, netif2, message_type) + self.session.broadcast_link(link_data) + + # TODO: account for SDT wanting to listen as well + # self.session.sdt.updatelink(netif.node.objid, netif2.node.objid, flags, wireless=True) + + def all_link_data(self, flags): + """ + Return a list of wireless link messages for when the GUI reconnects. + + :param flags: link flags + :return: all link data + :rtype: list + """ + all_links = [] with self.wlan._linked_lock: for a in self.wlan._linked: for b in self.wlan._linked[a]: if self.wlan._linked[a][b]: - r.append(self.linkmsg(a, b, flags)) - return r + all_links.append(self.create_link_data(a, b, flags)) + return all_links + + +class WayPoint(object): + """ + Maintains information regarding waypoints. + """ + + def __init__(self, time, nodenum, coords, speed): + """ + Creates a WayPoint instance. + + :param time: waypoint time + :param int nodenum: node id + :param coords: waypoint coordinates + :param speed: waypoint speed + """ + self.time = time + self.nodenum = nodenum + self.coords = coords + self.speed = speed + + def __cmp__(self, other): + """ + Custom comparison method for waypoints. + + :param WayPoint other: waypoint to compare to + :return: the comparison result against the other waypoint + :rtype: int + """ + tmp = cmp(self.time, other.time) + if tmp == 0: + tmp = cmp(self.nodenum, other.nodenum) + return tmp + class WayPointMobility(WirelessModel): - ''' Abstract class for mobility models that set node waypoints. - ''' - _name = "waypoint" - _type = coreapi.CORE_TLV_REG_MOBILITY - + """ + Abstract class for mobility models that set node waypoints. + """ + name = "waypoint" + config_type = RegisterTlvs.MOBILITY.value + STATE_STOPPED = 0 STATE_RUNNING = 1 STATE_PAUSED = 2 - - class WayPoint(object): - def __init__(self, time, nodenum, coords, speed): - self.time = time - self.nodenum = nodenum - self.coords = coords - self.speed = speed - - def __cmp__(self, other): - tmp = cmp(self.time, other.time) - if tmp == 0: - tmp = cmp(self.nodenum, other.nodenum) - return tmp - def __init__(self, session, objid, verbose = False, values = None): - super(WayPointMobility, self).__init__(session = session, objid = objid, - verbose = verbose, values = values) + def __init__(self, session, object_id, values=None): + """ + Create a WayPointMobility instance. + + :param core.session.Session session: CORE session instance + :param int object_id: object id + :param values: values for this model + :return: + """ + super(WayPointMobility, self).__init__(session=session, object_id=object_id, values=values) self.state = self.STATE_STOPPED self.queue = [] self.queue_copy = [] @@ -521,34 +693,37 @@ class WayPointMobility(WirelessModel): self.initial = {} self.lasttime = None self.endtime = None - self.wlan = session.obj(objid) + self.wlan = session.get_object(object_id) # these are really set in child class via confmatrix self.loop = False self.refresh_ms = 50 # flag whether to stop scheduling when queue is empty # (ns-3 sets this to False as new waypoints may be added from trace) self.empty_queue_stop = True - + def runround(self): - ''' Advance script time and move nodes. - ''' + """ + Advance script time and move nodes. + + :return: nothing + """ if self.state != self.STATE_RUNNING: - return + return t = self.lasttime self.lasttime = time.time() now = self.lasttime - self.timezero dt = self.lasttime - t - #print "runround(now=%.2f, dt=%.2f)" % (now, dt) - + # print "runround(now=%.2f, dt=%.2f)" % (now, dt) + # keep current waypoints up-to-date self.updatepoints(now) - + if not len(self.points): if len(self.queue): # more future waypoints, allow time for self.lasttime update nexttime = self.queue[0].time - now if nexttime > (0.001 * self.refresh_ms): - nexttime -= (0.001 * self.refresh_ms) + nexttime -= 0.001 * self.refresh_ms self.session.evq.add_event(nexttime, self.runround) return else: @@ -563,7 +738,7 @@ class WayPointMobility(WirelessModel): # prevent busy loop return return self.run() - + # only move netifs attached to self.wlan, or all nodenum in script? moved = [] moved_netifs = [] @@ -572,15 +747,20 @@ class WayPointMobility(WirelessModel): if self.movenode(node, dt): moved.append(node) moved_netifs.append(netif) - + # calculate all ranges after moving nodes; this saves calculations - #self.wlan.model.update(moved) + # self.wlan.model.update(moved) self.session.mobility.updatewlans(moved, moved_netifs) - + # TODO: check session state self.session.evq.add_event(0.001 * self.refresh_ms, self.runround) def run(self): + """ + Run the waypoint mobility scenario. + + :return: nothing + """ self.timezero = time.time() self.lasttime = self.timezero - (0.001 * self.refresh_ms) self.movenodesinitial() @@ -588,9 +768,15 @@ class WayPointMobility(WirelessModel): self.session.mobility.sendevent(self) def movenode(self, node, dt): - ''' Calculate next node location and update its coordinates. - Returns True if the node's position has changed. - ''' + """ + Calculate next node location and update its coordinates. + Returns True if the node's position has changed. + + :param core.netns.nodes.CoreNode node: node to move + :param dt: move factor + :return: True if node was moved, False otherwise + :rtype: bool + """ if node.objid not in self.points: return False x1, y1, z1 = node.getposition() @@ -627,17 +813,20 @@ class WayPointMobility(WirelessModel): self.endtime = self.lasttime - self.timezero del self.points[node.objid] return False - #print "node %s dx,dy= <%s, %d>" % (node.name, dx, dy) + # print "node %s dx,dy= <%s, %d>" % (node.name, dx, dy) if (x1 + dx) < 0.0: dx = 0.0 - x1 if (y1 + dy) < 0.0: dy = 0.0 - y1 self.setnodeposition(node, x1 + dx, y1 + dy, z1) return True - + def movenodesinitial(self): - ''' Move nodes to their initial positions. Then calculate the ranges. - ''' + """ + Move nodes to their initial positions. Then calculate the ranges. + + :return: nothing + """ moved = [] moved_netifs = [] for netif in self.wlan.netifs(): @@ -648,69 +837,110 @@ class WayPointMobility(WirelessModel): self.setnodeposition(node, x, y, z) moved.append(node) moved_netifs.append(netif) - #self.wlan.model.update(moved) + # self.wlan.model.update(moved) self.session.mobility.updatewlans(moved, moved_netifs) def addwaypoint(self, time, nodenum, x, y, z, speed): - ''' Waypoints are pushed to a heapq, sorted by time. - ''' - #print "addwaypoint: %s %s %s,%s,%s %s" % (time, nodenum, x, y, z, speed) - wp = self.WayPoint(time, nodenum, coords=(x,y,z), speed=speed) + """ + Waypoints are pushed to a heapq, sorted by time. + + :param time: waypoint time + :param int nodenum: node id + :param x: x position + :param y: y position + :param z: z position + :param speed: speed + :return: nothing + """ + # print "addwaypoint: %s %s %s,%s,%s %s" % (time, nodenum, x, y, z, speed) + wp = WayPoint(time, nodenum, coords=(x, y, z), speed=speed) heapq.heappush(self.queue, wp) - + def addinitial(self, nodenum, x, y, z): - ''' Record initial position in a dict. - ''' - wp = self.WayPoint(0, nodenum, coords=(x,y,z), speed=0) + """ + Record initial position in a dict. + + :param int nodenum: node id + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + wp = WayPoint(0, nodenum, coords=(x, y, z), speed=0) self.initial[nodenum] = wp - + def updatepoints(self, now): - ''' Move items from self.queue to self.points when their time has come. - ''' + """ + Move items from self.queue to self.points when their time has come. + + :param int now: current timestamp + :return: nothing + """ while len(self.queue): if self.queue[0].time > now: - break + break wp = heapq.heappop(self.queue) self.points[wp.nodenum] = wp - + def copywaypoints(self): - ''' Store backup copy of waypoints for looping and stopping. - ''' + """ + Store backup copy of waypoints for looping and stopping. + + :return: nothing + """ self.queue_copy = list(self.queue) - + def loopwaypoints(self): - ''' Restore backup copy of waypoints when looping. - ''' + """ + Restore backup copy of waypoints when looping. + + :return: nothing + """ self.queue = list(self.queue_copy) return self.loop def setnodeposition(self, node, x, y, z): - ''' Helper to move a node, notify any GUI (connected session handlers), - without invoking the interface poshook callback that may perform - range calculation. - ''' + """ + Helper to move a node, notify any GUI (connected session handlers), + without invoking the interface poshook callback that may perform + range calculation. + + :param core.netns.nodes.CoreNode node: node to set position for + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ # this would cause PyCoreNetIf.poshook() callback (range calculation) - #node.setposition(x, y, z) + # node.setposition(x, y, z) node.position.set(x, y, z) - msg = node.tonodemsg(flags=0) - self.session.broadcastraw(None, msg) - self.session.sdt.updatenode(node.objid, flags=0, x=x, y=y, z=z) - + node_data = node.data(message_type=0) + self.session.broadcast_node(node_data) + + # TODO: determine how to add handler for SDT + # self.session.sdt.updatenode(node.objid, flags=0, x=x, y=y, z=z) + def setendtime(self): - ''' Set self.endtime to the time of the last waypoint in the queue of - waypoints. This is just an estimate. The endtime will later be - adjusted, after one round of the script has run, to be the time - that the last moving node has reached its final waypoint. - ''' + """ + Set self.endtime to the time of the last waypoint in the queue of + waypoints. This is just an estimate. The endtime will later be + adjusted, after one round of the script has run, to be the time + that the last moving node has reached its final waypoint. + + :return: nothing + """ try: self.endtime = self.queue[-1].time except IndexError: self.endtime = 0 def start(self): - ''' Run the script from the beginning or unpause from where it - was before. - ''' + """ + Run the script from the beginning or unpause from where it + was before. + + :return: nothing + """ laststate = self.state self.state = self.STATE_RUNNING if laststate == self.STATE_STOPPED or laststate == self.STATE_RUNNING: @@ -723,10 +953,14 @@ class WayPointMobility(WirelessModel): self.timezero += now - self.lasttime self.lasttime = now - (0.001 * self.refresh_ms) self.runround() - + def stop(self, move_initial=True): - ''' Stop the script and move nodes to initial positions. - ''' + """ + Stop the script and move nodes to initial positions. + + :param bool move_initial: flag to check if we should move nodes to initial position + :return: nothing + """ self.state = self.STATE_STOPPED self.loopwaypoints() self.timezero = 0 @@ -734,86 +968,96 @@ class WayPointMobility(WirelessModel): if move_initial: self.movenodesinitial() self.session.mobility.sendevent(self) - + def pause(self): - ''' Pause the script; pause time is stored to self.lasttime. - ''' + """ + Pause the script; pause time is stored to self.lasttime. + + :return: nothing + """ self.state = self.STATE_PAUSED self.lasttime = time.time() class Ns2ScriptedMobility(WayPointMobility): - ''' Handles the ns-2 script format, generated by scengen/setdest or - BonnMotion. - ''' - _name = "ns2script" + """ + Handles the ns-2 script format, generated by scengen/setdest or + BonnMotion. + """ + name = "ns2script" - _confmatrix = [ - ("file", coreapi.CONF_DATA_TYPE_STRING, '', + config_matrix = [ + ("file", ConfigDataTypes.STRING.value, '', '', 'mobility script file'), - ("refresh_ms", coreapi.CONF_DATA_TYPE_UINT32, '50', + ("refresh_ms", ConfigDataTypes.UINT32.value, '50', '', 'refresh time (ms)'), - ("loop", coreapi.CONF_DATA_TYPE_BOOL, '1', + ("loop", ConfigDataTypes.BOOL.value, '1', 'On,Off', 'loop'), - ("autostart", coreapi.CONF_DATA_TYPE_STRING, '', + ("autostart", ConfigDataTypes.STRING.value, '', '', 'auto-start seconds (0.0 for runtime)'), - ("map", coreapi.CONF_DATA_TYPE_STRING, '', + ("map", ConfigDataTypes.STRING.value, '', '', 'node mapping (optional, e.g. 0:1,1:2,2:3)'), - ("script_start", coreapi.CONF_DATA_TYPE_STRING, '', + ("script_start", ConfigDataTypes.STRING.value, '', '', 'script file to run upon start'), - ("script_pause", coreapi.CONF_DATA_TYPE_STRING, '', + ("script_pause", ConfigDataTypes.STRING.value, '', '', 'script file to run upon pause'), - ("script_stop", coreapi.CONF_DATA_TYPE_STRING, '', + ("script_stop", ConfigDataTypes.STRING.value, '', '', 'script file to run upon stop'), ] - _confgroups = "ns-2 Mobility Script Parameters:1-%d" % len(_confmatrix) + config_groups = "ns-2 Mobility Script Parameters:1-%d" % len(config_matrix) - def __init__(self, session, objid, verbose = False, values = None): - ''' - ''' - super(Ns2ScriptedMobility, self).__init__(session = session, objid = objid, - verbose = verbose, values = values) + def __init__(self, session, object_id, values=None): + """ + Creates a Ns2ScriptedMobility instance. + + :param core.session.Session session: CORE session instance + :param int object_id: object id + :param values: values + """ + super(Ns2ScriptedMobility, self).__init__(session=session, object_id=object_id, values=values) self._netifs = {} self._netifslock = threading.Lock() if values is None: - values = session.mobility.getconfig(objid, self._name, - self.getdefaultvalues())[1] - self.file = self.valueof("file", values) + values = session.mobility.getconfig(object_id, self.name, self.getdefaultvalues())[1] + self.file = self.valueof("file", values) self.refresh_ms = int(self.valueof("refresh_ms", values)) - self.loop = (self.valueof("loop", values).lower() == "on") + self.loop = self.valueof("loop", values).lower() == "on" self.autostart = self.valueof("autostart", values) self.parsemap(self.valueof("map", values)) self.script_start = self.valueof("script_start", values) self.script_pause = self.valueof("script_pause", values) self.script_stop = self.valueof("script_stop", values) - if self.verbose: - self.session.info("ns-2 scripted mobility configured for WLAN %d" \ - " using file: %s" % (objid, self.file)) + logger.info("ns-2 scripted mobility configured for WLAN %d using file: %s", object_id, self.file) self.readscriptfile() self.copywaypoints() self.setendtime() @classmethod - def configure_mob(cls, session, msg): - ''' Handle configuration messages for setting up a model. + def configure_mob(cls, session, config_data): + """ + Handle configuration messages for setting up a model. Pass the MobilityManager object as the manager object. - ''' - return cls.configure(session.mobility, msg) - + + :param core.session.Session session: current session calling function + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + """ + return cls.configure(session.mobility, config_data) + def readscriptfile(self): - ''' Read in mobility script from a file. This adds waypoints to a - priority queue, sorted by waypoint time. Initial waypoints are - stored in a separate dict. - ''' + """ + Read in mobility script from a file. This adds waypoints to a + priority queue, sorted by waypoint time. Initial waypoints are + stored in a separate dict. + + :return: nothing + """ filename = self.findfile(self.file) try: f = open(filename, 'r') - except IOError, e: - self.session.warn("ns-2 scripted mobility failed to load file " \ - " '%s' (%s)" % (self.file, e)) + except IOError: + logger.exception("ns-2 scripted mobility failed to load file '%s'", self.file) return - if self.verbose: - self.session.info("reading ns-2 script file: %s" % filename) + logger.info("reading ns-2 script file: %s" % filename) ln = 0 ix = iy = iz = None inodenum = None @@ -830,7 +1074,7 @@ class Ns2ScriptedMobility(WayPointMobility): # $ns_ at 1.00 "$node_(6) setdest 500.0 178.0 25.0" parts = line.split() time = float(parts[2]) - nodenum = parts[3][1+parts[3].index('('):parts[3].index(')')] + nodenum = parts[3][1 + parts[3].index('('):parts[3].index(')')] x = float(parts[5]) y = float(parts[6]) z = None @@ -841,7 +1085,7 @@ class Ns2ScriptedMobility(WayPointMobility): # $node_(6) set X_ 780.0 parts = line.split() time = 0.0 - nodenum = parts[0][1+parts[0].index('('):parts[0].index(')')] + nodenum = parts[0][1 + parts[0].index('('):parts[0].index(')')] if parts[2] == 'X_': if ix is not None and iy is not None: self.addinitial(self.map(inodenum), ix, iy, iz) @@ -856,104 +1100,142 @@ class Ns2ScriptedMobility(WayPointMobility): inodenum = nodenum else: raise ValueError - except ValueError, e: - self.session.warn("skipping line %d of file %s '%s' (%s)" % \ - (ln, self.file, line, e)) + except ValueError: + logger.exception("skipping line %d of file %s '%s'", ln, self.file, line) continue if ix is not None and iy is not None: self.addinitial(self.map(inodenum), ix, iy, iz) - - def findfile(self, fn): - ''' Locate a script file. If the specified file doesn't exist, look in the - same directory as the scenario file (session.filename), or in the default - configs directory (~/.core/configs). This allows for sample files without - absolute pathnames. - ''' - if os.path.exists(fn): - return fn + + def findfile(self, file_name): + """ + Locate a script file. If the specified file doesn't exist, look in the + same directory as the scenario file (session.filename), or in the default + configs directory (~/.core/configs). This allows for sample files without + absolute pathnames. + + :param str file_name: file name to find + :return: absolute path to the file + :rtype: str + """ + if os.path.exists(file_name): + return file_name + if self.session.filename is not None: d = os.path.dirname(self.session.filename) - sessfn = os.path.join(d, fn) - if (os.path.exists(sessfn)): + sessfn = os.path.join(d, file_name) + if os.path.exists(sessfn): return sessfn + if self.session.user is not None: - userfn = os.path.join('/home', self.session.user, '.core', 'configs', fn) - if (os.path.exists(userfn)): + userfn = os.path.join('/home', self.session.user, '.core', 'configs', file_name) + if os.path.exists(userfn): return userfn - return fn - + + return file_name + def parsemap(self, mapstr): - ''' Parse a node mapping string, given as a configuration parameter. - ''' + """ + Parse a node mapping string, given as a configuration parameter. + + :param str mapstr: mapping string to parse + :return: nothing + """ self.nodemap = {} - if mapstr.strip() == '': + if mapstr.strip() == "": return - for pair in mapstr.split(','): - parts = pair.split(':') + + for pair in mapstr.split(","): + parts = pair.split(":") try: if len(parts) != 2: raise ValueError self.nodemap[int(parts[0])] = int(parts[1]) except ValueError: - self.session.warn("ns-2 mobility node map error") - return - + logger.exception("ns-2 mobility node map error") + def map(self, nodenum): - ''' Map one node number (from a script file) to another. - ''' + """ + Map one node number (from a script file) to another. + + :param str nodenum: node id to map + :return: mapped value or the node id itself + :rtype: int + """ nodenum = int(nodenum) try: return self.nodemap[nodenum] except KeyError: + logger.exception("error find value in node map") return nodenum - + def startup(self): - ''' Start running the script if autostart is enabled. - Move node to initial positions when any autostart time is specified. - Ignore the script if autostart is an empty string (can still be - started via GUI controls). - ''' + """ + Start running the script if autostart is enabled. + Move node to initial positions when any autostart time is specified. + Ignore the script if autostart is an empty string (can still be + started via GUI controls). + + :return: nothing + """ if self.autostart == '': - if self.verbose: - self.session.info("not auto-starting ns-2 script for %s" % \ - self.wlan.name) + logger.info("not auto-starting ns-2 script for %s" % self.wlan.name) return try: t = float(self.autostart) except ValueError: - self.session.warn("Invalid auto-start seconds specified '%s' for " \ - "%s" % (self.autostart, self.wlan.name)) + logger.exception("Invalid auto-start seconds specified '%s' for %s", self.autostart, self.wlan.name) return self.movenodesinitial() - if self.verbose: - self.session.info("scheduling ns-2 script for %s autostart at %s" \ - % (self.wlan.name, t)) + logger.info("scheduling ns-2 script for %s autostart at %s" % (self.wlan.name, t)) self.state = self.STATE_RUNNING self.session.evq.add_event(t, self.run) def start(self): - ''' Handle the case when un-paused. - ''' + """ + Handle the case when un-paused. + + :return: nothing + """ laststate = self.state super(Ns2ScriptedMobility, self).start() if laststate == self.STATE_PAUSED: self.statescript("unpause") def run(self): - ''' Start is pressed or autostart is triggered. - ''' + """ + Start is pressed or autostart is triggered. + + :return: nothing + """ super(Ns2ScriptedMobility, self).run() self.statescript("run") - + def pause(self): + """ + Pause the mobility script. + + :return: nothing + """ super(Ns2ScriptedMobility, self).pause() self.statescript("pause") - + def stop(self, move_initial=True): + """ + Stop the mobility script. + + :param bool move_initial: flag to check if we should move node to initial position + :return: nothing + """ super(Ns2ScriptedMobility, self).stop(move_initial=move_initial) self.statescript("stop") - + def statescript(self, typestr): + """ + State of the mobility script. + + :param str typestr: state type string + :return: nothing + """ filename = None if typestr == "run" or typestr == "unpause": filename = self.script_start @@ -965,11 +1247,10 @@ class Ns2ScriptedMobility(WayPointMobility): return filename = self.findfile(filename) try: - check_call(["/bin/sh", filename, typestr], - cwd=self.session.sessiondir, - env=self.session.getenviron()) - except Exception, e: - self.session.warn("Error running script '%s' for WLAN state %s: " \ - "%s" % (filename, typestr, e)) - - + subprocess.check_call( + ["/bin/sh", filename, typestr], + cwd=self.session.sessiondir, + env=self.session.get_environment() + ) + except subprocess.CalledProcessError: + logger.exception("Error running script '%s' for WLAN state %s", filename, typestr) diff --git a/daemon/core/netns/nodes.py b/daemon/core/netns/nodes.py index fd0659c8..f9ded836 100644 --- a/daemon/core/netns/nodes.py +++ b/daemon/core/netns/nodes.py @@ -1,80 +1,114 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -nodes.py: definition of an LxcNode and CoreNode classes, and other node classes -that inherit from the CoreNode, implementing specific node types. -''' +""" +Definition of LxcNode, CoreNode, and other node classes that inherit from the CoreNode, +implementing specific node types. +""" -from vnode import * -from vnet import * -from core.misc.ipaddr import * -from core.api import coreapi +import socket +import subprocess +import threading +from socket import AF_INET +from socket import AF_INET6 + +from core import constants +from core.coreobj import PyCoreNetIf from core.coreobj import PyCoreNode +from core.coreobj import PyCoreObj +from core.data import LinkData +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import ipaddress +from core.misc import log +from core.misc import utils +from core.netns.vnet import GreTapBridge +from core.netns.vnet import LxBrNet +from core.netns.vnode import LxcNode + +logger = log.get_logger(__name__) + class CtrlNet(LxBrNet): + """ + Control network functionality. + """ policy = "ACCEPT" - CTRLIF_IDX_BASE = 99 # base control interface index - DEFAULT_PREFIX_LIST = ["172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", - "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", - "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", - "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24"] - - def __init__(self, session, objid = "ctrlnet", name = None, - verbose = False, prefix = None, - hostid = None, start = True, assign_address = True, - updown_script = None, serverintf = None): - self.prefix = IPv4Prefix(prefix) + # base control interface index + CTRLIF_IDX_BASE = 99 + DEFAULT_PREFIX_LIST = [ + "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", + "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", + "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", + "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24" + ] + + def __init__(self, session, objid="ctrlnet", name=None, prefix=None, + hostid=None, start=True, assign_address=True, + updown_script=None, serverintf=None): + """ + Creates a CtrlNet instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node namee + :param prefix: control network ipv4 prefix + :param hostid: host id + :param bool start: start flag + :param str assign_address: assigned address + :param str updown_script: updown script + :param serverintf: server interface + :return: + """ + self.prefix = ipaddress.Ipv4Prefix(prefix) self.hostid = hostid self.assign_address = assign_address self.updown_script = updown_script self.serverintf = serverintf - LxBrNet.__init__(self, session, objid = objid, name = name, - verbose = verbose, start = start) + LxBrNet.__init__(self, session, objid=objid, name=name, start=start) def startup(self): + """ + Startup functionality for the control network. + + :return: nothing + """ if self.detectoldbridge(): return - + LxBrNet.startup(self) if self.hostid: addr = self.prefix.addr(self.hostid) else: - addr = self.prefix.maxaddr() + addr = self.prefix.max_addr() msg = "Added control network bridge: %s %s" % \ - (self.brname, self.prefix) + (self.brname, self.prefix) addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)] if self.assign_address: - self.addrconfig(addrlist = addrlist) + self.addrconfig(addrlist=addrlist) msg += " address %s" % addr - self.session.info(msg) + logger.info(msg) if self.updown_script is not None: - self.info("interface %s updown script '%s startup' called" % \ - (self.brname, self.updown_script)) - check_call([self.updown_script, self.brname, "startup"]) + logger.info("interface %s updown script '%s startup' called" % \ + (self.brname, self.updown_script)) + subprocess.check_call([self.updown_script, self.brname, "startup"]) if self.serverintf is not None: try: - check_call([BRCTL_BIN, "addif", self.brname, self.serverintf]) - check_call([IP_BIN, "link", "set", self.serverintf, "up"]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname, - "Error joining server interface %s to controlnet bridge %s: %s" % \ - (self.serverintf, self.brname, e)) - + subprocess.check_call([constants.BRCTL_BIN, "addif", self.brname, self.serverintf]) + subprocess.check_call([constants.IP_BIN, "link", "set", self.serverintf, "up"]) + except subprocess.CalledProcessError: + logger.exception("Error joining server interface %s to controlnet bridge %s", + self.serverintf, self.brname) def detectoldbridge(self): - ''' Occassionally, control net bridges from previously closed sessions are not cleaned up. + """ + Occassionally, control net bridges from previously closed sessions are not cleaned up. Check if there are old control net bridges and delete them - ''' - retstat, retstr = cmdresult([BRCTL_BIN,'show']) + + :return: True if an old bridge was detected, False otherwise + :rtype: bool + """ + retstat, retstr = utils.cmdresult([constants.BRCTL_BIN, 'show']) if retstat != 0: - self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, None, - "Unable to retrieve list of installed bridges") + logger.error("Unable to retrieve list of installed bridges") lines = retstr.split('\n') for line in lines[1:]: cols = line.split('\t') @@ -82,248 +116,346 @@ class CtrlNet(LxBrNet): flds = cols[0].split('.') if len(flds) == 3: if flds[0] == 'b' and flds[1] == self.objid: - self.session.exception(coreapi.CORE_EXCP_LEVEL_FATAL, "CtrlNet.startup()", None, - "Error: An active control net bridge (%s) found. "\ - "An older session might still be running. " \ - "Stop all sessions and, if needed, delete %s to continue." % \ - (oldbr, oldbr)) + logger.error( + "Error: An active control net bridge (%s) found. " \ + "An older session might still be running. " \ + "Stop all sessions and, if needed, delete %s to continue." % \ + (oldbr, oldbr) + ) return True - ''' + """ # Do this if we want to delete the old bridge - self.warn("Warning: Old %s bridge found: %s" % (self.objid, oldbr)) + logger.warn("Warning: Old %s bridge found: %s" % (self.objid, oldbr)) try: check_call([BRCTL_BIN, 'delbr', oldbr]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, oldbr, - "Error deleting old bridge %s" % oldbr) - self.info("Deleted %s" % oldbr) - ''' + except subprocess.CalledProcessError as e: + logger.exception("Error deleting old bridge %s", oldbr, e) + logger.info("Deleted %s", oldbr) + """ return False - + def shutdown(self): + """ + Control network shutdown. + + :return: nothing + """ if self.serverintf is not None: try: - check_call([BRCTL_BIN, "delif", self.brname, self.serverintf]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, - "Error deleting server interface %s to controlnet bridge %s: %s" % \ - (self.serverintf, self.brname, e)) - + subprocess.check_call([constants.BRCTL_BIN, "delif", self.brname, self.serverintf]) + except subprocess.CalledProcessError: + logger.exception("Error deleting server interface %s to controlnet bridge %s", + self.serverintf, self.brname) + if self.updown_script is not None: - self.info("interface %s updown script '%s shutdown' called" % \ - (self.brname, self.updown_script)) - check_call([self.updown_script, self.brname, "shutdown"]) + logger.info("interface %s updown script '%s shutdown' called" % (self.brname, self.updown_script)) + subprocess.check_call([self.updown_script, self.brname, "shutdown"]) LxBrNet.shutdown(self) - def tolinkmsgs(self, flags): - ''' Do not include CtrlNet in link messages describing this session. - ''' + def all_link_data(self, flags): + """ + Do not include CtrlNet in link messages describing this session. + + :return: nothing + """ return [] + class CoreNode(LxcNode): - apitype = coreapi.CORE_NODE_DEF + """ + Basic core node class for nodes to extend. + """ + apitype = NodeTypes.DEFAULT.value + class PtpNet(LxBrNet): + """ + Peer to peer network node. + """ policy = "ACCEPT" def attach(self, netif): - if len(self._netif) > 1: - raise ValueError, \ - "Point-to-point links support at most 2 network interfaces" + """ + Attach a network interface, but limit attachment to two interfaces. + + :param core.coreobj.PyCoreNetIf netif: network interface + :return: nothing + """ + if len(self._netif) >= 2: + raise ValueError("Point-to-point links support at most 2 network interfaces") LxBrNet.attach(self, netif) - def tonodemsg(self, flags): - ''' Do not generate a Node Message for point-to-point links. They are - built using a link message instead. - ''' + def data(self, message_type): + """ + Do not generate a Node Message for point-to-point links. They are + built using a link message instead. + + :return: nothing + """ pass - def tolinkmsgs(self, flags): - ''' Build CORE API TLVs for a point-to-point link. One Link message - describes this network. - ''' - tlvdata = "" + def all_link_data(self, flags): + """ + Build CORE API TLVs for a point-to-point link. One Link message + describes this network. + + :return: all link data + :rtype: list[LinkData] + """ + + all_links = [] + if len(self._netif) != 2: - return tlvdata - (if1, if2) = self._netif.items() + return all_links + + if1, if2 = self._netif.items() if1 = if1[1] if2 = if2[1] - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - if1.node.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, - if2.node.objid) - uni = False + + unidirectional = 0 if if1.getparams() != if2.getparams(): - uni = True - tlvdata += self.netifparamstolink(if1) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE, - self.linktype) - if uni: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, 1) + unidirectional = 1 - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \ - if1.node.getifindex(if1)) - if if1.hwaddr: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1MAC, - if1.hwaddr) - for addr in if1.addrlist: - (ip, sep, mask) = addr.partition('/') + interface1_ip4 = None + interface1_ip4_mask = None + interface1_ip6 = None + interface1_ip6_mask = None + for address in if1.addrlist: + ip, sep, mask = address.partition('/') mask = int(mask) - if isIPv4Address(ip): + if ipaddress.is_ipv4_address(ip): family = AF_INET - tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP4 - tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP4MASK + ipl = socket.inet_pton(family, ip) + interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip4_mask = mask else: family = AF_INET6 - tlvtypeip = coreapi.CORE_TLV_LINK_IF1IP6 - tlvtypemask = coreapi.CORE_TLV_LINK_IF1IP6MASK - ipl = socket.inet_pton(family, ip) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, - IPAddr(af=family, addr=ipl)) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask) + ipl = socket.inet_pton(family, ip) + interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip6_mask = mask - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \ - if2.node.getifindex(if2)) - if if2.hwaddr: - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2MAC, - if2.hwaddr) - for addr in if2.addrlist: - (ip, sep, mask) = addr.partition('/') + interface2_ip4 = None + interface2_ip4_mask = None + interface2_ip6 = None + interface2_ip6_mask = None + for address in if2.addrlist: + ip, sep, mask = address.partition('/') mask = int(mask) - if isIPv4Address(ip): + if ipaddress.is_ipv4_address(ip): family = AF_INET - tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP4 - tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP4MASK + ipl = socket.inet_pton(family, ip) + interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip4_mask = mask else: family = AF_INET6 - tlvtypeip = coreapi.CORE_TLV_LINK_IF2IP6 - tlvtypemask = coreapi.CORE_TLV_LINK_IF2IP6MASK - ipl = socket.inet_pton(family, ip) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, - IPAddr(af=family, addr=ipl)) - tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask) - msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) - if not uni: - return [msg,] + ipl = socket.inet_pton(family, ip) + interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip6_mask = mask + + # TODO: not currently used + # loss=netif.getparam('loss') + link_data = LinkData( + message_type=flags, + node1_id=if1.node.objid, + node2_id=if2.node.objid, + link_type=self.linktype, + unidirectional=unidirectional, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + interface1_id=if1.node.getifindex(if1), + interface1_mac=if1.hwaddr, + interface1_ip4=interface1_ip4, + interface1_ip4_mask=interface1_ip4_mask, + interface1_ip6=interface1_ip6, + interface1_ip6_mask=interface1_ip6_mask, + interface2_id=if2.node.getifindex(if2), + interface2_mac=if2.hwaddr, + interface2_ip4=interface2_ip4, + interface2_ip4_mask=interface2_ip4_mask, + interface2_ip6=interface2_ip6, + interface2_ip6_mask=interface2_ip6_mask, + ) + + all_links.append(link_data) + # build a 2nd link message for the upstream link parameters # (swap if1 and if2) - tlvdata = "" - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - if2.node.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, - if1.node.objid) - tlvdata += self.netifparamstolink(if2) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_UNI, 1) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF1NUM, \ - if2.node.getifindex(if2)) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, \ - if1.node.getifindex(if1)) - msg2 = coreapi.CoreLinkMessage.pack(0, tlvdata) - return [msg, msg2] + if unidirectional: + link_data = LinkData( + message_type=0, + node1_id=if2.node.objid, + node2_id=if1.node.objid, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + unidirectional=1, + interface1_id=if2.node.getifindex(if2), + interface2_id=if1.node.getifindex(if1) + ) + all_links.append(link_data) + + return all_links + class SwitchNode(LxBrNet): - apitype = coreapi.CORE_NODE_SWITCH + """ + Provides switch functionality within a core node. + """ + apitype = NodeTypes.SWITCH.value policy = "ACCEPT" type = "lanswitch" + class HubNode(LxBrNet): - apitype = coreapi.CORE_NODE_HUB + """ + Provides hub functionality within a core node, forwards packets to all bridge + ports by turning off MAC address learning. + """ + apitype = NodeTypes.HUB.value policy = "ACCEPT" type = "hub" - def __init__(self, session, objid = None, name = None, verbose = False, - start = True): - ''' the Hub node forwards packets to all bridge ports by turning off - the MAC address learning - ''' - LxBrNet.__init__(self, session, objid, name, verbose, start) + def __init__(self, session, objid=None, name=None, start=True): + """ + Creates a HubNode instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node namee + :param bool start: start flag + """ + LxBrNet.__init__(self, session, objid, name, start) if start: - check_call([BRCTL_BIN, "setageing", self.brname, "0"]) + subprocess.check_call([constants.BRCTL_BIN, "setageing", self.brname, "0"]) class WlanNode(LxBrNet): - apitype = coreapi.CORE_NODE_WLAN - linktype = coreapi.CORE_LINK_WIRELESS + """ + Provides wireless lan functionality within a core node. + """ + apitype = NodeTypes.WIRELESS_LAN.value + linktype = LinkTypes.WIRELESS.value policy = "DROP" type = "wlan" - def __init__(self, session, objid = None, name = None, verbose = False, - start = True, policy = None): - LxBrNet.__init__(self, session, objid, name, verbose, start, policy) + def __init__(self, session, objid=None, name=None, start=True, policy=None): + """ + Create a WlanNode instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node name + :param bool start: start flag + :param policy: wlan policy + """ + LxBrNet.__init__(self, session, objid, name, start, policy) # wireless model such as basic range self.model = None # mobility model such as scripted self.mobility = None def attach(self, netif): + """ + Attach a network interface. + + :param core.coreobj.PyCoreNetIf netif: network interface + :return: nothing + """ LxBrNet.attach(self, netif) if self.model: - netif.poshook = self.model._positioncallback + netif.poshook = self.model.position_callback if netif.node is None: return - (x,y,z) = netif.node.position.get() + x, y, z = netif.node.position.get() # invokes any netif.poshook netif.setposition(x, y, z) - #self.model.setlinkparams() + # self.model.setlinkparams() def setmodel(self, model, config): - ''' Mobility and wireless model. - ''' - if (self.verbose): - self.info("adding model %s" % model._name) - if model._type == coreapi.CORE_TLV_REG_WIRELESS: - self.model = model(session=self.session, objid=self.objid, - verbose=self.verbose, values=config) - if self.model._positioncallback: + """ + Sets the mobility and wireless model. + + :param core.mobility.WirelessModel.cls model: wireless model to set to + :param config: model configuration + :return: nothing + """ + logger.info("adding model %s" % model.name) + if model.config_type == RegisterTlvs.WIRELESS.value: + self.model = model(session=self.session, object_id=self.objid, values=config) + if self.model.position_callback: for netif in self.netifs(): - netif.poshook = self.model._positioncallback + netif.poshook = self.model.position_callback if netif.node is not None: - (x,y,z) = netif.node.position.get() + x, y, z = netif.node.position.get() netif.poshook(netif, x, y, z) self.model.setlinkparams() - elif model._type == coreapi.CORE_TLV_REG_MOBILITY: - self.mobility = model(session=self.session, objid=self.objid, - verbose=self.verbose, values=config) + elif model.config_type == RegisterTlvs.MOBILITY.value: + self.mobility = model(session=self.session, object_id=self.objid, values=config) def updatemodel(self, model_name, values): - ''' Allow for model updates during runtime (similar to setmodel().) - ''' - if (self.verbose): - self.info("updating model %s" % model_name) - if self.model is None or self.model._name != model_name: + """ + Allow for model updates during runtime (similar to setmodel().) + + :param model_name: model name to update + :param values: values to update model with + :return: nothing + """ + logger.info("updating model %s" % model_name) + if self.model is None or self.model.name != model_name: return model = self.model - if model._type == coreapi.CORE_TLV_REG_WIRELESS: + if model.config_type == RegisterTlvs.WIRELESS.value: if not model.updateconfig(values): return - if self.model._positioncallback: + if self.model.position_callback: for netif in self.netifs(): - netif.poshook = self.model._positioncallback + netif.poshook = self.model.position_callback if netif.node is not None: - (x,y,z) = netif.node.position.get() + (x, y, z) = netif.node.position.get() netif.poshook(netif, x, y, z) self.model.setlinkparams() - def tolinkmsgs(self, flags): - msgs = LxBrNet.tolinkmsgs(self, flags) + def all_link_data(self, flags): + """ + Retrieve all link data. + + :param flags: link flags + :return: all link data + :rtype: list[LinkData] + """ + all_links = LxBrNet.all_link_data(self, flags) + if self.model: - msgs += self.model.tolinkmsgs(flags) - return msgs + all_links.extend(self.model.all_link_data(flags)) + + return all_links class RJ45Node(PyCoreNode, PyCoreNetIf): - ''' RJ45Node is a physical interface on the host linked to the emulated - network. - ''' - apitype = coreapi.CORE_NODE_RJ45 + """ + RJ45Node is a physical interface on the host linked to the emulated + network. + """ + apitype = NodeTypes.RJ45.value type = "rj45" - def __init__(self, session, objid = None, name = None, mtu = 1500, - verbose = False, start = True): - PyCoreNode.__init__(self, session, objid, name, verbose=verbose, - start=start) + def __init__(self, session, objid=None, name=None, mtu=1500, start=True): + """ + Create an RJ45Node instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node name + :param mtu: rj45 mtu + :param bool start: start flag + :return: + """ + PyCoreNode.__init__(self, session, objid, name, start=start) # this initializes net, params, poshook - PyCoreNetIf.__init__(self, node=self, name=name, mtu = mtu) + PyCoreNetIf.__init__(self, node=self, name=name, mtu=mtu) self.up = False self.lock = threading.RLock() self.ifindex = None @@ -334,65 +466,101 @@ class RJ45Node(PyCoreNode, PyCoreNetIf): self.startup() def startup(self): - ''' Set the interface in the up state. - ''' + """ + Set the interface in the up state. + + :return: nothing + """ # interface will also be marked up during net.attach() self.savestate() + try: - check_call([IP_BIN, "link", "set", self.localname, "up"]) - except: - self.warn("Failed to run command: %s link set %s up" % \ - (IP_BIN, self.localname)) - return - self.up = True + subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"]) + self.up = True + except subprocess.CalledProcessError: + logger.exception("failed to run command: %s link set %s up", constants.IP_BIN, self.localname) def shutdown(self): - ''' Bring the interface down. Remove any addresses and queuing - disciplines. - ''' + """ + Bring the interface down. Remove any addresses and queuing + disciplines. + + :return: nothing + """ if not self.up: return - check_call([IP_BIN, "link", "set", self.localname, "down"]) - check_call([IP_BIN, "addr", "flush", "dev", self.localname]) - mutecall([TC_BIN, "qdisc", "del", "dev", self.localname, "root"]) + subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "down"]) + subprocess.check_call([constants.IP_BIN, "addr", "flush", "dev", self.localname]) + utils.mutecall([constants.TC_BIN, "qdisc", "del", "dev", self.localname, "root"]) self.up = False self.restorestate() + # TODO: issue in that both classes inherited from provide the same method with different signatures def attachnet(self, net): + """ + Attach a network. + + :param core.coreobj.PyCoreNet net: network to attach + :return: nothing + """ PyCoreNetIf.attachnet(self, net) def detachnet(self): + """ + Detach a network. + + :return: nothing + """ PyCoreNetIf.detachnet(self) - def newnetif(self, net = None, addrlist = [], hwaddr = None, - ifindex = None, ifname = None): - ''' This is called when linking with another node. Since this node - represents an interface, we do not create another object here, - but attach ourselves to the given network. - ''' - self.lock.acquire() - try: + # TODO: parameters are not used + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + """ + This is called when linking with another node. Since this node + represents an interface, we do not create another object here, + but attach ourselves to the given network. + + :param core.coreobj.PyCoreNet net: new network instance + :param list[str] addrlist: address list + :param str hwaddr: hardware address + :param int ifindex: interface index + :param str ifname: interface name + :return: + """ + with self.lock: if ifindex is None: ifindex = 0 + if self.net is not None: - raise ValueError, \ - "RJ45 nodes support at most 1 network interface" + raise ValueError("RJ45 nodes support at most 1 network interface") + self._netif[ifindex] = self - self.node = self # PyCoreNetIf.node is self + # PyCoreNetIf.node is self + self.node = self self.ifindex = ifindex + if net is not None: self.attachnet(net) - for addr in maketuple(addrlist): - self.addaddr(addr) + + if addrlist: + for addr in utils.maketuple(addrlist): + self.addaddr(addr) + return ifindex - finally: - self.lock.release() def delnetif(self, ifindex): + """ + Delete a network interface. + + :param int ifindex: interface index to delete + :return: nothing + """ if ifindex is None: ifindex = 0 + if ifindex not in self._netif: raise ValueError, "ifindex %s does not exist" % ifindex + self._netif.pop(ifindex) if ifindex == self.ifindex: self.shutdown() @@ -400,46 +568,78 @@ class RJ45Node(PyCoreNode, PyCoreNetIf): raise ValueError, "ifindex %s does not exist" % ifindex def netif(self, ifindex, net=None): - ''' This object is considered the network interface, so we only - return self here. This keeps the RJ45Node compatible with - real nodes. - ''' + """ + This object is considered the network interface, so we only + return self here. This keeps the RJ45Node compatible with + real nodes. + + :param int ifindex: interface index to retrieve + :param net: network to retrieve + :return: a network interface + :rtype: core.coreobj.PyCoreNetIf + """ if net is not None and net == self.net: return self + if ifindex is None: ifindex = 0 + if ifindex == self.ifindex: return self + return None def getifindex(self, netif): + """ + Retrieve network interface index. + + :param core.coreobj.PyCoreNetIf netif: network interface to retrieve index for + :return: interface index, None otherwise + :rtype: int + """ if netif != self: return None + return self.ifindex def addaddr(self, addr): + """ + Add address to to network interface. + + :param str addr: address to add + :return: nothing + """ if self.up: - check_call([IP_BIN, "addr", "add", str(addr), "dev", self.name]) + subprocess.check_call([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]) PyCoreNetIf.addaddr(self, addr) def deladdr(self, addr): + """ + Delete address from network interface. + + :param str addr: address to delete + :return: nothing + """ if self.up: - check_call([IP_BIN, "addr", "del", str(addr), "dev", self.name]) + subprocess.check_call([constants.IP_BIN, "addr", "del", str(addr), "dev", self.name]) PyCoreNetIf.deladdr(self, addr) def savestate(self): - ''' Save the addresses and other interface state before using the + """ + Save the addresses and other interface state before using the interface for emulation purposes. TODO: save/restore the PROMISC flag - ''' + + :return: nothing + """ self.old_up = False self.old_addrs = [] - cmd = [IP_BIN, "addr", "show", "dev", self.localname] + cmd = [constants.IP_BIN, "addr", "show", "dev", self.localname] try: - tmp = subprocess.Popen(cmd, stdout = subprocess.PIPE) + tmp = subprocess.Popen(cmd, stdout=subprocess.PIPE) except OSError: - self.warn("Failed to run %s command: %s" % (IP_BIN, cmd)) + logger.exception("Failed to run %s command: %s", constants.IP_BIN, cmd) if tmp.wait(): - self.warn("Command failed: %s" % cmd) + logger.warn("Command failed: %s", cmd) return lines = tmp.stdout.read() tmp.stdout.close() @@ -459,31 +659,34 @@ class RJ45Node(PyCoreNode, PyCoreNetIf): self.old_addrs.append((items[1], None)) def restorestate(self): - ''' Restore the addresses and other interface state after using it. - ''' + """ + Restore the addresses and other interface state after using it. + + :return: nothing + """ for addr in self.old_addrs: if addr[1] is None: - check_call([IP_BIN, "addr", "add", addr[0], "dev", - self.localname]) + subprocess.check_call([constants.IP_BIN, "addr", "add", addr[0], "dev", self.localname]) else: - check_call([IP_BIN, "addr", "add", addr[0], "brd", addr[1], - "dev", self.localname]) + subprocess.check_call([constants.IP_BIN, "addr", "add", addr[0], "brd", addr[1], "dev", self.localname]) if self.old_up: - check_call([IP_BIN, "link", "set", self.localname, "up"]) + subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"]) def setposition(self, x=None, y=None, z=None): - ''' Use setposition() from both parent classes. - ''' + """ + Use setposition() from both parent classes. + + :return: nothing + """ PyCoreObj.setposition(self, x, y, z) # invoke any poshook PyCoreNetIf.setposition(self, x, y, z) - - - class TunnelNode(GreTapBridge): - apitype = coreapi.CORE_NODE_TUNNEL + """ + Provides tunnel functionality in a core node. + """ + apitype = NodeTypes.TUNNEL.value policy = "ACCEPT" type = "tunnel" - diff --git a/daemon/core/netns/vif.py b/daemon/core/netns/vif.py index 5b5be036..87226713 100644 --- a/daemon/core/netns/vif.py +++ b/daemon/core/netns/vif.py @@ -1,58 +1,91 @@ -# -# CORE -# Copyright (c)2011-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -vif.py: PyCoreNetIf classes that implement the interfaces available -under Linux. -''' +""" +virtual ethernet classes that implement the interfaces available under Linux. +""" -import os, signal, shutil, sys, subprocess, vnodeclient, threading, string -import random, time -from core.api import coreapi -from core.misc.utils import * -from core.constants import * -from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position -from core.emane.nodes import EmaneNode +import subprocess +import time + +from core import constants +from core.coreobj import PyCoreNetIf +from core.enumerations import NodeTypes +from core.misc import log +from core.misc import nodeutils +from core.misc import utils + +logger = log.get_logger(__name__) + +utils.check_executables([constants.IP_BIN]) -checkexec([IP_BIN]) class VEth(PyCoreNetIf): - def __init__(self, node, name, localname, mtu = 1500, net = None, - start = True): + """ + Provides virtual ethernet functionality for core nodes. + """ + + # TODO: network is not used, why was it needed? + def __init__(self, node, name, localname, mtu=1500, net=None, start=True): + """ + Creates a VEth instance. + + :param core.netns.nodes.CoreNode node: related core node + :param str name: interface name + :param str localname: interface local name + :param mtu: interface mtu + :param net: network + :param bool start: start flag + :return: + """ # note that net arg is ignored - PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu) + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) self.localname = localname self.up = False if start: self.startup() def startup(self): - check_call([IP_BIN, "link", "add", "name", self.localname, - "type", "veth", "peer", "name", self.name]) - check_call([IP_BIN, "link", "set", self.localname, "up"]) + """ + Interface startup logic. + + :return: nothing + """ + subprocess.check_call([constants.IP_BIN, "link", "add", "name", self.localname, + "type", "veth", "peer", "name", self.name]) + subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"]) self.up = True def shutdown(self): + """ + Interface shutdown logic. + + :return: nothing + """ if not self.up: return if self.node: - self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name]) + self.node.cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) if self.localname: - mutedetach([IP_BIN, "link", "delete", self.localname]) + utils.mutedetach([constants.IP_BIN, "link", "delete", self.localname]) self.up = False class TunTap(PyCoreNetIf): - ''' TUN/TAP virtual device in TAP mode - ''' - def __init__(self, node, name, localname, mtu = 1500, net = None, - start = True): - PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu) + """ + TUN/TAP virtual device in TAP mode + """ + + # TODO: network is not used, why was it needed? + def __init__(self, node, name, localname, mtu=1500, net=None, start=True): + """ + Create a TunTap instance. + + :param core.netns.nodes.CoreNode node: related core node + :param str name: interface name + :param str localname: local interface name + :param mtu: interface mtu + :param net: related network + :param bool start: start flag + """ + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) self.localname = localname self.up = False self.transport_type = "virtual" @@ -60,26 +93,41 @@ class TunTap(PyCoreNetIf): self.startup() def startup(self): + """ + Startup logic for a tunnel tap. + + :return: nothing + """ # TODO: more sophisticated TAP creation here # Debian does not support -p (tap) option, RedHat does. - # For now, this is disabled to allow the TAP to be created by another + # For now, this is disabled to allow the TAP to be created by another # system (e.g. EMANE's emanetransportd) - #check_call(["tunctl", "-t", self.name]) + # check_call(["tunctl", "-t", self.name]) # self.install() self.up = True def shutdown(self): + """ + Shutdown functionality for a tunnel tap. + + :return: nothing + """ if not self.up: return - self.node.cmd([IP_BIN, "-6", "addr", "flush", "dev", self.name]) - #if self.name: + self.node.cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) + # if self.name: # mutedetach(["tunctl", "-d", self.localname]) self.up = False - def waitfor(self, func, attempts = 10, maxretrydelay = 0.25): - '''\ - Wait for func() to return zero with exponential backoff - ''' + def waitfor(self, func, attempts=10, maxretrydelay=0.25): + """ + Wait for func() to return zero with exponential backoff. + + :param func: function to wait for a result of zero + :param int attempts: number of attempts to wait for a zero result + :param float maxretrydelay: maximum retry delay + :return: nothing + """ delay = 0.01 for i in xrange(1, attempts + 1): r = func() @@ -88,98 +136,130 @@ class TunTap(PyCoreNetIf): msg = 'attempt %s failed with nonzero exit status %s' % (i, r) if i < attempts + 1: msg += ', retrying...' - self.node.info(msg) + logger.info(msg) time.sleep(delay) delay = delay + delay if delay > maxretrydelay: delay = maxretrydelay else: msg += ', giving up' - self.node.info(msg) - raise RuntimeError, 'command failed after %s attempts' % attempts + logger.info(msg) + + raise RuntimeError('command failed after %s attempts' % attempts) def waitfordevicelocal(self): - '''\ + """ Check for presence of a local device - tap device may not appear right away waits - ''' + + :return: wait for device local response + :rtype: int + """ + def localdevexists(): - cmd = (IP_BIN, 'link', 'show', self.localname) - return mutecall(cmd) + cmd = (constants.IP_BIN, 'link', 'show', self.localname) + return utils.mutecall(cmd) + self.waitfor(localdevexists) def waitfordevicenode(self): - '''\ - Check for presence of a node device - tap device may not - appear right away waits - ''' + """ + Check for presence of a node device - tap device may not appear right away waits. + + :return: nothing + """ + def nodedevexists(): - cmd = (IP_BIN, 'link', 'show', self.name) + cmd = (constants.IP_BIN, 'link', 'show', self.name) return self.node.cmd(cmd) + count = 0 while True: try: self.waitfor(nodedevexists) break - except RuntimeError: + except RuntimeError as e: # check if this is an EMANE interface; if so, continue # waiting if EMANE is still running - if count < 5 and isinstance(self.net, EmaneNode) and \ - self.node.session.emane.emanerunning(self.node): + # TODO: remove emane code + if count < 5 and nodeutils.is_node(self.net, NodeTypes.EMANE) and \ + self.node.session.emane.emanerunning(self.node): count += 1 else: - raise + raise e def install(self): - ''' Install this TAP into its namespace. This is not done from the - startup() method but called at a later time when a userspace - program (running on the host) has had a chance to open the socket - end of the TAP. - ''' + """ + Install this TAP into its namespace. This is not done from the + startup() method but called at a later time when a userspace + program (running on the host) has had a chance to open the socket + end of the TAP. + + :return: nothing + """ self.waitfordevicelocal() netns = str(self.node.pid) + try: - check_call([IP_BIN, "link", "set", self.localname, "netns", netns]) - except Exception, e: + subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "netns", netns]) + except subprocess.CalledProcessError: msg = "error installing TAP interface %s, command:" % self.localname msg += "ip link set %s netns %s" % (self.localname, netns) - self.node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.localname, msg) - self.node.warn(msg) + logger.exception(msg) return - self.node.cmd([IP_BIN, "link", "set", self.localname, - "name", self.name]) - self.node.cmd([IP_BIN, "link", "set", self.name, "up"]) - + + self.node.cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name]) + self.node.cmd([constants.IP_BIN, "link", "set", self.name, "up"]) + def setaddrs(self): - ''' Set interface addresses based on self.addrlist. - ''' + """ + Set interface addresses based on self.addrlist. + + :return: nothing + """ self.waitfordevicenode() for addr in self.addrlist: - self.node.cmd([IP_BIN, "addr", "add", str(addr), - "dev", self.name]) + self.node.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]) + class GreTap(PyCoreNetIf): - ''' GRE TAP device for tunneling between emulation servers. - Uses the "gretap" tunnel device type from Linux which is a GRE device - having a MAC address. The MAC address is required for bridging. - ''' - def __init__(self, node = None, name = None, session = None, mtu = 1458, - remoteip = None, objid = None, localip = None, ttl = 255, - key = None, start = True): - PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu) + """ + GRE TAP device for tunneling between emulation servers. + Uses the "gretap" tunnel device type from Linux which is a GRE device + having a MAC address. The MAC address is required for bridging. + """ + + def __init__(self, node=None, name=None, session=None, mtu=1458, + remoteip=None, objid=None, localip=None, ttl=255, + key=None, start=True): + """ + Creates a GreTap instance. + + :param core.netns.nodes.CoreNode node: related core node + :param str name: interface name + :param core.session.Session session: core session instance + :param mtu: interface mtu + :param str remoteip: remote address + :param int objid: object id + :param str localip: local address + :param ttl: ttl value + :param key: gre tap key + :param bool start: start flag + """ + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) self.session = session if objid is None: # from PyCoreObj - objid = (((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff) + objid = ((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff self.objid = objid - sessionid = self.session.shortsessionid() + sessionid = self.session.short_session_id() # interface name on the local host machine self.localname = "gt.%s.%s" % (self.objid, sessionid) self.transport_type = "raw" if not start: self.up = False return - + if remoteip is None: raise ValueError, "missing remote IP required for GRE TAP device" cmd = ("ip", "link", "add", self.localname, "type", "gretap", @@ -190,21 +270,39 @@ class GreTap(PyCoreNetIf): cmd += ("ttl", str(ttl)) if key: cmd += ("key", str(key)) - check_call(cmd) + subprocess.check_call(cmd) cmd = ("ip", "link", "set", self.localname, "up") - check_call(cmd) + subprocess.check_call(cmd) self.up = True def shutdown(self): + """ + Shutdown logic for a GreTap. + + :return: nothing + """ if self.localname: cmd = ("ip", "link", "set", self.localname, "down") - check_call(cmd) + subprocess.check_call(cmd) cmd = ("ip", "link", "del", self.localname) - check_call(cmd) + subprocess.check_call(cmd) self.localname = None - - def tonodemsg(self, flags): + + def data(self, message_type): + """ + Data for a gre tap. + + :param message_type: message type for data + :return: None + """ return None - - def tolinkmsgs(self, flags): + + def all_link_data(self, flags): + """ + Retrieve link data. + + :param flags: link flags + :return: link data + :rtype: list[core.data.LinkData] + """ return [] diff --git a/daemon/core/netns/vnet.py b/daemon/core/netns/vnet.py index 8dd1c494..c1a17a19 100644 --- a/daemon/core/netns/vnet.py +++ b/daemon/core/netns/vnet.py @@ -1,42 +1,48 @@ -# -# CORE -# Copyright (c)2010-2016 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -vnet.py: PyCoreNet and LxBrNet classes that implement virtual networks using +""" +PyCoreNet and LxBrNet classes that implement virtual networks using Linux Ethernet bridging and ebtables rules. -''' +""" -import os, sys, threading, time, subprocess +import os +import subprocess +import threading +import time -from core.api import coreapi -from core.misc.utils import * -from core.constants import * -from core.coreobj import PyCoreNet, PyCoreObj -from core.netns.vif import VEth, GreTap +from core import constants +from core.coreobj import PyCoreNet +from core.misc import log +from core.misc import utils +from core.netns.vif import GreTap +from core.netns.vif import VEth -checkexec([BRCTL_BIN, IP_BIN, EBTABLES_BIN, TC_BIN]) +logger = log.get_logger(__name__) + +utils.check_executables([ + constants.BRCTL_BIN, + constants.IP_BIN, + constants.EBTABLES_BIN, + constants.TC_BIN +]) ebtables_lock = threading.Lock() + class EbtablesQueue(object): - ''' Helper class for queuing up ebtables commands into rate-limited + """ + Helper class for queuing up ebtables commands into rate-limited atomic commits. This improves performance and reliability when there are many WLAN link updates. - ''' + """ # update rate is every 300ms rate = 0.3 # ebtables atomic_file = "/tmp/pycore.ebtables.atomic" - + def __init__(self): - ''' Initialize the helper class, but don't start the update thread + """ + Initialize the helper class, but don't start the update thread until a WLAN is instantiated. - ''' + """ self.doupdateloop = False self.updatethread = None # this lock protects cmds and updates lists @@ -48,28 +54,35 @@ class EbtablesQueue(object): # timestamps of last WLAN update; this keeps track of WLANs that are # using this queue self.last_update_time = {} - + def startupdateloop(self, wlan): - ''' Kick off the update loop; only needs to be invoked once. - ''' + """ + Kick off the update loop; only needs to be invoked once. + + :return: nothing + """ self.updatelock.acquire() self.last_update_time[wlan] = time.time() self.updatelock.release() if self.doupdateloop: return self.doupdateloop = True - self.updatethread = threading.Thread(target = self.updateloop) + self.updatethread = threading.Thread(target=self.updateloop) self.updatethread.daemon = True self.updatethread.start() - + def stopupdateloop(self, wlan): - ''' Kill the update loop thread if there are no more WLANs using it. - ''' + """ + Kill the update loop thread if there are no more WLANs using it. + + :return: nothing + """ self.updatelock.acquire() try: del self.last_update_time[wlan] except KeyError: - pass + logger.exception("error deleting last update time for wlan: %s", wlan) + self.updatelock.release() if len(self.last_update_time) > 0: return @@ -77,152 +90,186 @@ class EbtablesQueue(object): if self.updatethread: self.updatethread.join() self.updatethread = None - + def ebatomiccmd(self, cmd): - ''' Helper for building ebtables atomic file command list. - ''' - r = [EBTABLES_BIN, "--atomic-file", self.atomic_file] + """ + Helper for building ebtables atomic file command list. + + :param list[str] cmd: ebtable command + :return: ebtable atomic command + :rtype: list[str] + """ + r = [constants.EBTABLES_BIN, "--atomic-file", self.atomic_file] if cmd: r.extend(cmd) return r - + def lastupdate(self, wlan): - ''' Return the time elapsed since this WLAN was last updated. - ''' + """ + Return the time elapsed since this WLAN was last updated. + + :param wlan: wlan entity + :return: elpased time + :rtype: float + """ try: elapsed = time.time() - self.last_update_time[wlan] except KeyError: self.last_update_time[wlan] = time.time() elapsed = 0.0 + return elapsed - + def updated(self, wlan): - ''' Keep track of when this WLAN was last updated. - ''' + """ + Keep track of when this WLAN was last updated. + + :param wlan: wlan entity + :return: nothing + """ self.last_update_time[wlan] = time.time() self.updates.remove(wlan) - + def updateloop(self): - ''' Thread target that looks for WLANs needing update, and + """ + Thread target that looks for WLANs needing update, and rate limits the amount of ebtables activity. Only one userspace program should use ebtables at any given time, or results can be unpredictable. - ''' + + :return: nothing + """ while self.doupdateloop: self.updatelock.acquire() for wlan in self.updates: - ''' - Check if wlan is from a previously closed session. Because of the - rate limiting scheme employed here, this may happen if a new session + """ + Check if wlan is from a previously closed session. Because of the + rate limiting scheme employed here, this may happen if a new session is started soon after closing a previous session. - ''' + """ try: wlan.session except: - # Just mark as updated to remove from self.updates. + # Just mark as updated to remove from self.updates. self.updated(wlan) continue if self.lastupdate(wlan) > self.rate: self.buildcmds(wlan) - #print "ebtables commit %d rules" % len(self.cmds) + # print "ebtables commit %d rules" % len(self.cmds) self.ebcommit(wlan) self.updated(wlan) self.updatelock.release() time.sleep(self.rate) - + def ebcommit(self, wlan): - ''' Perform ebtables atomic commit using commands built in the - self.cmds list. - ''' + """ + Perform ebtables atomic commit using commands built in the self.cmds list. + + :return: nothing + """ # save kernel ebtables snapshot to a file - cmd = self.ebatomiccmd(["--atomic-save",]) + cmd = self.ebatomiccmd(["--atomic-save", ]) try: - check_call(cmd) - except Exception, e: - self.eberror(wlan, "atomic-save (%s)" % cmd, e) + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + logger.exception("atomic-save (%s)", cmd) # no atomic file, exit return # modify the table file using queued ebtables commands for c in self.cmds: cmd = self.ebatomiccmd(c) try: - check_call(cmd) - except Exception, e: - self.eberror(wlan, "cmd=%s" % cmd, e) - pass + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + logger.exception("cmd=%s", cmd) + self.cmds = [] # commit the table file to the kernel - cmd = self.ebatomiccmd(["--atomic-commit",]) + cmd = self.ebatomiccmd(["--atomic-commit", ]) + try: - check_call(cmd) + subprocess.check_call(cmd) os.unlink(self.atomic_file) - except Exception, e: - self.eberror(wlan, "atomic-commit (%s)" % cmd, e) - + except OSError: + logger.exception("atomic-commit (%s)", cmd) + def ebchange(self, wlan): - ''' Flag a change to the given WLAN's _linked dict, so the ebtables + """ + Flag a change to the given WLAN's _linked dict, so the ebtables chain will be rebuilt at the next interval. - ''' + + :return: nothing + """ self.updatelock.acquire() if wlan not in self.updates: self.updates.append(wlan) self.updatelock.release() - + def buildcmds(self, wlan): - ''' Inspect a _linked dict from a wlan, and rebuild the ebtables chain - for that WLAN. - ''' + """ + Inspect a _linked dict from a wlan, and rebuild the ebtables chain for that WLAN. + + :return: nothing + """ wlan._linked_lock.acquire() # flush the chain - self.cmds.extend([["-F", wlan.brname],]) + self.cmds.extend([["-F", wlan.brname], ]) # rebuild the chain - for (netif1, v) in wlan._linked.items(): - for (netif2, linked) in v.items(): + for netif1, v in wlan._linked.items(): + for netif2, linked in v.items(): if wlan.policy == "DROP" and linked: self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname, - "-o", netif2.localname, "-j", "ACCEPT"], - ["-A", wlan.brname, "-o", netif1.localname, - "-i", netif2.localname, "-j", "ACCEPT"]]) + "-o", netif2.localname, "-j", "ACCEPT"], + ["-A", wlan.brname, "-o", netif1.localname, + "-i", netif2.localname, "-j", "ACCEPT"]]) elif wlan.policy == "ACCEPT" and not linked: self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname, - "-o", netif2.localname, "-j", "DROP"], - ["-A", wlan.brname, "-o", netif1.localname, - "-i", netif2.localname, "-j", "DROP"]]) + "-o", netif2.localname, "-j", "DROP"], + ["-A", wlan.brname, "-o", netif1.localname, + "-i", netif2.localname, "-j", "DROP"]]) wlan._linked_lock.release() - - def eberror(self, wlan, source, error): - ''' Log an ebtables command error and send an exception. - ''' - if not wlan: - return - wlan.exception(coreapi.CORE_EXCP_LEVEL_ERROR, wlan.brname, - "ebtables command error: %s\n%s\n" % (source, error)) - + # a global object because all WLANs share the same queue # cannot have multiple threads invoking the ebtables commnd ebq = EbtablesQueue() + def ebtablescmds(call, cmds): - ebtables_lock.acquire() - try: + """ + Run ebtable commands. + + :param func call: function to call commands + :param list cmds: commands to call + :return: nothing + """ + with ebtables_lock: for cmd in cmds: call(cmd) - finally: - ebtables_lock.release() + class LxBrNet(PyCoreNet): - + """ + Provides linux bridge network functionlity for core nodes. + """ policy = "DROP" - def __init__(self, session, objid = None, name = None, verbose = False, - start = True, policy = None): - PyCoreNet.__init__(self, session, objid, name, verbose, start) + def __init__(self, session, objid=None, name=None, start=True, policy=None): + """ + Creates a LxBrNet instance. + + :param core.session.Session session: core session instance + :param int objid: object id + :param str name: object name + :param bool start: start flag + :param policy: network policy + """ + PyCoreNet.__init__(self, session, objid, name, start) if name is None: name = str(self.objid) if policy is not None: self.policy = policy self.name = name - sessionid = self.session.shortsessionid() + sessionid = self.session.short_session_id() self.brname = "b.%s.%s" % (str(self.objid), sessionid) self.up = False if start: @@ -230,42 +277,50 @@ class LxBrNet(PyCoreNet): ebq.startupdateloop(self) def startup(self): + """ + Linux bridge starup logic. + + :return: nothing + """ try: - check_call([BRCTL_BIN, "addbr", self.brname]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, self.brname, - "Error adding bridge: %s" % e) + subprocess.check_call([constants.BRCTL_BIN, "addbr", self.brname]) + except subprocess.CalledProcessError: + logger.exception("Error adding bridge") + try: # turn off spanning tree protocol and forwarding delay - check_call([BRCTL_BIN, "stp", self.brname, "off"]) - check_call([BRCTL_BIN, "setfd", self.brname, "0"]) - check_call([IP_BIN, "link", "set", self.brname, "up"]) + subprocess.check_call([constants.BRCTL_BIN, "stp", self.brname, "off"]) + subprocess.check_call([constants.BRCTL_BIN, "setfd", self.brname, "0"]) + subprocess.check_call([constants.IP_BIN, "link", "set", self.brname, "up"]) # create a new ebtables chain for this bridge - ebtablescmds(check_call, [ - [EBTABLES_BIN, "-N", self.brname, "-P", self.policy], - [EBTABLES_BIN, "-A", "FORWARD", - "--logical-in", self.brname, "-j", self.brname]]) + ebtablescmds(subprocess.check_call, [ + [constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy], + [constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.brname, "-j", self.brname] + ]) # turn off multicast snooping so mcast forwarding occurs w/o IGMP joins - snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % \ - self.brname + snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % self.brname if os.path.exists(snoop): open(snoop, "w").write('0') - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_WARNING, self.brname, - "Error setting bridge parameters: %s" % e) + except subprocess.CalledProcessError: + logger.exception("Error setting bridge parameters") self.up = True def shutdown(self): + """ + Linux bridge shutdown logic. + + :return: nothing + """ if not self.up: return ebq.stopupdateloop(self) - mutecall([IP_BIN, "link", "set", self.brname, "down"]) - mutecall([BRCTL_BIN, "delbr", self.brname]) - ebtablescmds(mutecall, [ - [EBTABLES_BIN, "-D", "FORWARD", + utils.mutecall([constants.IP_BIN, "link", "set", self.brname, "down"]) + utils.mutecall([constants.BRCTL_BIN, "delbr", self.brname]) + ebtablescmds(utils.mutecall, [ + [constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.brname, "-j", self.brname], - [EBTABLES_BIN, "-X", self.brname]]) + [constants.EBTABLES_BIN, "-X", self.brname]]) for netif in self.netifs(): # removes veth pairs used for bridge-to-bridge connections netif.shutdown() @@ -275,34 +330,52 @@ class LxBrNet(PyCoreNet): self.up = False def attach(self, netif): + """ + Attach a network interface. + + :param core.netns.vif.VEth netif: network interface to attach + :return: nothing + """ if self.up: try: - check_call([BRCTL_BIN, "addif", self.brname, netif.localname]) - check_call([IP_BIN, "link", "set", netif.localname, "up"]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, - "Error joining interface %s to bridge %s: %s" % \ - (netif.localname, self.brname, e)) + subprocess.check_call([constants.BRCTL_BIN, "addif", self.brname, netif.localname]) + subprocess.check_call([constants.IP_BIN, "link", "set", netif.localname, "up"]) + except subprocess.CalledProcessError: + logger.exception("Error joining interface %s to bridge %s", netif.localname, self.brname) return PyCoreNet.attach(self, netif) def detach(self, netif): + """ + Detach a network interface. + + :param core.netns.vif.Veth netif: network interface to detach + :return: nothing + """ if self.up: try: - check_call([BRCTL_BIN, "delif", self.brname, netif.localname]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, - "Error removing interface %s from bridge %s: %s" % \ - (netif.localname, self.brname, e)) + subprocess.check_call([constants.BRCTL_BIN, "delif", self.brname, netif.localname]) + except subprocess.CalledProcessError: + logger.exception("Error removing interface %s from bridge %s", netif.localname, self.brname) return PyCoreNet.detach(self, netif) def linked(self, netif1, netif2): + """ + Determine if the provided network interfaces are linked. + + :param core.netns.vif.Veth netif1: interface one + :param core.netns.vif.Veth netif2: interface two + :return: True if interfaces are linked, False otherwise + :rtype: bool + """ # check if the network interfaces are attached to this network if self._netif[netif1.netifi] != netif1: - raise ValueError, "inconsistency for netif %s" % netif1.name + raise ValueError("inconsistency for netif %s" % netif1.name) + if self._netif[netif2.netifi] != netif2: - raise ValueError, "inconsistency for netif %s" % netif2.name + raise ValueError("inconsistency for netif %s" % netif2.name) + try: linked = self._linked[netif1][netif2] except KeyError: @@ -311,14 +384,20 @@ class LxBrNet(PyCoreNet): elif self.policy == "DROP": linked = False else: - raise Exception, "unknown policy: %s" % self.policy + raise Exception("unknown policy: %s" % self.policy) self._linked[netif1][netif2] = linked + return linked def unlink(self, netif1, netif2): - ''' Unlink two PyCoreNetIfs, resulting in adding or removing ebtables + """ + Unlink two PyCoreNetIfs, resulting in adding or removing ebtables filtering rules. - ''' + + :param core.netns.vif.Veth netif1: interface one + :param core.netns.vif.Veth netif2: interface two + :return: nothing + """ self._linked_lock.acquire() if not self.linked(netif1, netif2): self._linked_lock.release() @@ -328,9 +407,14 @@ class LxBrNet(PyCoreNet): ebq.ebchange(self) def link(self, netif1, netif2): - ''' Link two PyCoreNetIfs together, resulting in adding or removing + """ + Link two PyCoreNetIfs together, resulting in adding or removing ebtables filtering rules. - ''' + + :param core.netns.vif.Veth netif1: interface one + :param core.netns.vif.Veth netif2: interface two + :return: nothing + """ self._linked_lock.acquire() if self.linked(netif1, netif2): self._linked_lock.release() @@ -339,37 +423,45 @@ class LxBrNet(PyCoreNet): self._linked_lock.release() ebq.ebchange(self) - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2 = None, - devname = None): - ''' Configure link parameters by applying tc queuing disciplines on the - interface. - ''' + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, + jitter=None, netif2=None, devname=None): + """ + Configure link parameters by applying tc queuing disciplines on the interface. + + :param core.netns.vif.Veth netif: interface one + :param bw: bandwidth to set to + :param delay: packet delay to set to + :param loss: packet loss to set to + :param duplicate: duplicate percentage to set to + :param jitter: jitter to set to + :param core.netns.vif.Veth netif2: interface two + :param devname: device name + :return: nothing + """ if devname is None: devname = netif.localname - tc = [TC_BIN, "qdisc", "replace", "dev", devname] + tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname] parent = ["root"] changed = False if netif.setparam('bw', bw): # from tc-tbf(8): minimum value for burst is rate / kernel_hz if bw is not None: burst = max(2 * netif.mtu, bw / 1000) - limit = 0xffff # max IP payload + # max IP payload + limit = 0xffff tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)] if bw > 0: if self.up: - if (self.verbose): - self.info("linkconfig: %s" % \ - ([tc + parent + ["handle", "1:"] + tbf],)) - check_call(tc + parent + ["handle", "1:"] + tbf) + logger.info("linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],)) + subprocess.check_call(tc + parent + ["handle", "1:"] + tbf) netif.setparam('has_tbf', True) changed = True elif netif.getparam('has_tbf') and bw <= 0: tcd = [] + tc tcd[2] = "delete" if self.up: - check_call(tcd + parent) + subprocess.check_call(tcd + parent) netif.setparam('has_tbf', False) # removing the parent removes the child netif.setparam('has_netem', False) @@ -395,7 +487,7 @@ class LxBrNet(PyCoreNet): netem += ["delay", "0us", "%sus" % jitter, "25%"] else: netem += ["%sus" % jitter, "25%"] - + if loss is not None: netem += ["loss", "%s%%" % min(loss, 100)] if duplicate is not None: @@ -406,47 +498,47 @@ class LxBrNet(PyCoreNet): return tc[2] = "delete" if self.up: - if self.verbose: - self.info("linkconfig: %s" % \ - ([tc + parent + ["handle", "10:"]],)) - check_call(tc + parent + ["handle", "10:"]) + logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],)) + subprocess.check_call(tc + parent + ["handle", "10:"]) netif.setparam('has_netem', False) elif len(netem) > 1: if self.up: - if self.verbose: - self.info("linkconfig: %s" % \ - ([tc + parent + ["handle", "10:"] + netem],)) - check_call(tc + parent + ["handle", "10:"] + netem) + logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],)) + subprocess.check_call(tc + parent + ["handle", "10:"] + netem) netif.setparam('has_netem', True) def linknet(self, net): - ''' Link this bridge with another by creating a veth pair and installing - each device into each bridge. - ''' - sessionid = self.session.shortsessionid() + """ + Link this bridge with another by creating a veth pair and installing + each device into each bridge. + + :param core.netns.vnet.LxBrNet net: network to link with + :return: created interface + :rtype: Veth + """ + sessionid = self.session.short_session_id() try: - self_objid = '%x' % self.objid + self_objid = "%x" % self.objid except TypeError: - self_objid = '%s' % self.objid + self_objid = "%s" % self.objid try: - net_objid = '%x' % net.objid + net_objid = "%x" % net.objid except TypeError: - net_objid = '%s' % net.objid - localname = 'veth%s.%s.%s' % (self_objid, net_objid, sessionid) + net_objid = "%s" % net.objid + localname = "veth%s.%s.%s" % (self_objid, net_objid, sessionid) if len(localname) >= 16: - raise ValueError, "interface local name '%s' too long" % \ - localname - name = 'veth%s.%s.%s' % (net_objid, self_objid, sessionid) + raise ValueError("interface local name %s too long" % localname) + name = "veth%s.%s.%s" % (net_objid, self_objid, sessionid) if len(name) >= 16: - raise ValueError, "interface name '%s' too long" % name - netif = VEth(node = None, name = name, localname = localname, - mtu = 1500, net = self, start = self.up) + raise ValueError("interface name %s too long" % name) + netif = VEth(node=None, name=name, localname=localname, + mtu=1500, net=self, start=self.up) self.attach(netif) if net.up: - # this is similar to net.attach() but uses netif.name instead + # this is similar to net.attach() but uses netif.name instead # of localname - check_call([BRCTL_BIN, "addif", net.brname, netif.name]) - check_call([IP_BIN, "link", "set", netif.name, "up"]) + subprocess.check_call([constants.BRCTL_BIN, "addif", net.brname, netif.name]) + subprocess.check_call([constants.IP_BIN, "link", "set", netif.name, "up"]) i = net.newifindex() net._netif[i] = netif with net._linked_lock: @@ -454,38 +546,60 @@ class LxBrNet(PyCoreNet): netif.net = self netif.othernet = net return netif - + def getlinknetif(self, net): - ''' Return the interface of that links this net with another net + """ + Return the interface of that links this net with another net (that were linked using linknet()). - ''' + + :param core.netns.vnet.LxBrNet net: interface to get link for + :return: interface the provided network is linked to + :rtype: core.netns.vnet.LxBrNet + """ for netif in self.netifs(): - if hasattr(netif, 'othernet') and netif.othernet == net: + if hasattr(netif, "othernet") and netif.othernet == net: return netif return None def addrconfig(self, addrlist): - ''' Set addresses on the bridge. - ''' + """ + Set addresses on the bridge. + + :param list[str] addrlist: address list + :return: nothing + """ if not self.up: return for addr in addrlist: try: - check_call([IP_BIN, "addr", "add", str(addr), "dev", self.brname]) - except Exception, e: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, self.brname, - "Error adding IP address: %s" % e) + subprocess.check_call([constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname]) + except subprocess.CalledProcessError: + logger.exception("Error adding IP address") + class GreTapBridge(LxBrNet): - ''' A network consisting of a bridge with a gretap device for tunneling to - another system. - ''' - def __init__(self, session, remoteip = None, objid = None, name = None, - policy = "ACCEPT", localip = None, ttl = 255, key = None, - verbose = False, start = True): - LxBrNet.__init__(self, session = session, objid = objid, - name = name, verbose = verbose, policy = policy, - start = False) + """ + A network consisting of a bridge with a gretap device for tunneling to + another system. + """ + + def __init__(self, session, remoteip=None, objid=None, name=None, + policy="ACCEPT", localip=None, ttl=255, key=None, start=True): + """ + Create a GreTapBridge instance. + + :param core.session.Session session: core session instance + :param str remoteip: remote address + :param int objid: object id + :param str name: object name + :param policy: network policy + :param str localip: local address + :param ttl: ttl value + :param key: gre tap key + :param bool start: start flag + :return: + """ + LxBrNet.__init__(self, session=session, objid=objid, name=name, policy=policy, start=False) self.grekey = key if self.grekey is None: self.grekey = self.session.sessionid ^ self.objid @@ -497,47 +611,59 @@ class GreTapBridge(LxBrNet): if remoteip is None: self.gretap = None else: - self.gretap = GreTap(node = self, name = None, session = session, - remoteip = remoteip, objid = None, localip = localip, ttl = ttl, - key = self.grekey) + self.gretap = GreTap(node=self, name=None, session=session, remoteip=remoteip, + objid=None, localip=localip, ttl=ttl, key=self.grekey) if start: self.startup() def startup(self): - ''' Creates a bridge and adds the gretap device to it. - ''' + """ + Creates a bridge and adds the gretap device to it. + + :return: nothing + """ LxBrNet.startup(self) if self.gretap: self.attach(self.gretap) def shutdown(self): - ''' Detach the gretap device and remove the bridge. - ''' + """ + Detach the gretap device and remove the bridge. + + :return: nothing + """ if self.gretap: self.detach(self.gretap) self.gretap.shutdown() self.gretap = None LxBrNet.shutdown(self) - + def addrconfig(self, addrlist): - ''' Set the remote tunnel endpoint. This is a one-time method for - creating the GreTap device, which requires the remoteip at startup. - The 1st address in the provided list is remoteip, 2nd optionally - specifies localip. - ''' + """ + Set the remote tunnel endpoint. This is a one-time method for + creating the GreTap device, which requires the remoteip at startup. + The 1st address in the provided list is remoteip, 2nd optionally + specifies localip. + + :param list addrlist: address list + :return: nothing + """ if self.gretap: - raise ValueError, "gretap already exists for %s" % self.name + raise ValueError("gretap already exists for %s" % self.name) remoteip = addrlist[0].split('/')[0] localip = None if len(addrlist) > 1: localip = addrlist[1].split('/')[0] - self.gretap = GreTap(session = self.session, remoteip = remoteip, - objid = None, name = None, - localip = localip, ttl = self.ttl, key = self.grekey) + self.gretap = GreTap(session=self.session, remoteip=remoteip, objid=None, name=None, + localip=localip, ttl=self.ttl, key=self.grekey) self.attach(self.gretap) def setkey(self, key): - ''' Set the GRE key used for the GreTap device. This needs to be set - prior to instantiating the GreTap device (before addrconfig). - ''' + """ + Set the GRE key used for the GreTap device. This needs to be set + prior to instantiating the GreTap device (before addrconfig). + + :param key: gre key + :return: nothing + """ self.grekey = key diff --git a/daemon/core/netns/vnode.py b/daemon/core/netns/vnode.py index 9ec605af..ea4e35a2 100644 --- a/daemon/core/netns/vnode.py +++ b/daemon/core/netns/vnode.py @@ -1,35 +1,50 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' -vnode.py: PyCoreNode and LxcNode classes that implement the network namespace -virtual node. -''' +""" +PyCoreNode and LxcNode classes that implement the network namespac virtual node. +""" -import os, signal, sys, subprocess, vnodeclient, threading, string, shutil -import random, time -from core.api import coreapi -from core.misc.utils import * -from core.constants import * -from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf, Position -from core.netns.vif import VEth, TunTap -from core.emane.nodes import EmaneNode +import os +import random +import shutil +import signal +import string +import subprocess +import threading + +from core import constants +from core.coreobj import PyCoreNetIf +from core.coreobj import PyCoreNode +from core.enumerations import NodeTypes +from core.misc import log +from core.misc import nodeutils +from core.misc import utils +from core.netns import vnodeclient +from core.netns.vif import TunTap +from core.netns.vif import VEth + +logger = log.get_logger(__name__) + +utils.check_executables([constants.IP_BIN]) -checkexec([IP_BIN]) class SimpleLxcNode(PyCoreNode): - def __init__(self, session, objid = None, name = None, nodedir = None, - verbose = False, start = True): - PyCoreNode.__init__(self, session, objid, name, verbose=verbose, - start=start) + """ + Provides simple lxc functionality for core nodes. + """ + valid_deladdrtype = ("inet", "inet6", "inet6link") + + def __init__(self, session, objid=None, name=None, nodedir=None, start=True): + """ + Create a SimpleLxcNode instance. + + :param core.session.Session session: core session instance + :param int objid: object id + :param str name: object name + :param str nodedir: node directory + :param bool start: start flag + """ + PyCoreNode.__init__(self, session, objid, name, start=start) self.nodedir = nodedir - self.ctrlchnlname = \ - os.path.abspath(os.path.join(self.session.sessiondir, self.name)) + self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) self.vnodeclient = None self.pid = None self.up = False @@ -37,234 +52,411 @@ class SimpleLxcNode(PyCoreNode): self._mounts = [] def alive(self): + """ + Check if the node is alive. + + :return: True if node is alive, False otherwise + :rtype: bool + """ try: os.kill(self.pid, 0) except OSError: return False + return True def startup(self): - ''' Start a new namespace node by invoking the vnoded process that - allocates a new namespace. Bring up the loopback device and set - the hostname. - ''' + """ + Start a new namespace node by invoking the vnoded process that + allocates a new namespace. Bring up the loopback device and set + the hostname. + + :return: nothing + """ if self.up: - raise Exception, "already up" - vnoded = ["%s/vnoded" % CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname, + raise Exception("already up") + vnoded = ["%s/vnoded" % constants.CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname, "-l", self.ctrlchnlname + ".log", "-p", self.ctrlchnlname + ".pid"] if self.nodedir: vnoded += ["-C", self.nodedir] - env = self.session.getenviron(state=False) + env = self.session.get_environment(state=False) env['NODE_NUMBER'] = str(self.objid) env['NODE_NAME'] = str(self.name) try: - tmp = subprocess.Popen(vnoded, stdout = subprocess.PIPE, env = env) - except OSError, e: - msg = "error running vnoded command: %s (%s)" % (vnoded, e) - self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, - "SimpleLxcNode.startup()", msg) - raise Exception, msg + tmp = subprocess.Popen(vnoded, stdout=subprocess.PIPE, env=env) + except OSError: + msg = "error running vnoded command: %s" % vnoded + logger.exception("SimpleLxcNode.startup(): %s", msg) + raise Exception(msg) + try: self.pid = int(tmp.stdout.read()) tmp.stdout.close() - except Exception: + except ValueError: msg = "vnoded failed to create a namespace; " msg += "check kernel support and user priveleges" - self.exception(coreapi.CORE_EXCP_LEVEL_FATAL, - "SimpleLxcNode.startup()", msg) + logger.exception("SimpleLxcNode.startup(): %s", msg) + if tmp.wait(): - raise Exception, ("command failed: %s" % vnoded) - self.vnodeclient = vnodeclient.VnodeClient(self.name, - self.ctrlchnlname) - self.info("bringing up loopback interface") - self.cmd([IP_BIN, "link", "set", "lo", "up"]) - self.info("setting hostname: %s" % self.name) + raise Exception("command failed: %s" % vnoded) + + self.vnodeclient = vnodeclient.VnodeClient(self.name, self.ctrlchnlname) + logger.info("bringing up loopback interface") + self.cmd([constants.IP_BIN, "link", "set", "lo", "up"]) + logger.info("setting hostname: %s" % self.name) self.cmd(["hostname", self.name]) self.up = True def shutdown(self): + """ + Shutdown logic for simple lxc nodes. + + :return: nothing + """ + # nothing to do if node is not up if not self.up: return + + # unmount all targets while self._mounts: source, target = self._mounts.pop(-1) self.umount(target) + + # shutdown all interfaces for netif in self.netifs(): netif.shutdown() + + # attempt to kill node process and wait for termination of children try: os.kill(self.pid, signal.SIGTERM) os.waitpid(self.pid, 0) except OSError: - pass + logger.exception("error killing process") + + # remove node directory if present try: - os.unlink(self.ctrlchnlname) + if os.path.exists(self.ctrlchnlname): + os.unlink(self.ctrlchnlname) except OSError: - pass + logger.exception("error removing file") + + # clear interface data, close client, and mark self and not up self._netif.clear() self.vnodeclient.close() self.up = False - def cmd(self, args, wait = True): + # TODO: potentially remove all these wrapper methods, just make use of object itself. + def cmd(self, args, wait=True): + """ + Wrapper around vnodeclient cmd. + + :param args: arguments for ocmmand + :param wait: wait or not + :return: + """ return self.vnodeclient.cmd(args, wait) def cmdresult(self, args): + """ + Wrapper around vnodeclient cmdresult. + + :param args: arguments for ocmmand + :return: + """ return self.vnodeclient.cmdresult(args) def popen(self, args): + """ + Wrapper around vnodeclient popen. + + :param args: arguments for ocmmand + :return: + """ return self.vnodeclient.popen(args) def icmd(self, args): + """ + Wrapper around vnodeclient icmd. + + :param args: arguments for ocmmand + :return: + """ return self.vnodeclient.icmd(args) - def redircmd(self, infd, outfd, errfd, args, wait = True): + def redircmd(self, infd, outfd, errfd, args, wait=True): + """ + Wrapper around vnodeclient redircmd. + + :param infd: input file descriptor + :param outfd: output file descriptor + :param errfd: err file descriptor + :param args: command arguments + :param wait: wait or not + :return: + """ return self.vnodeclient.redircmd(infd, outfd, errfd, args, wait) - def term(self, sh = "/bin/sh"): - return self.vnodeclient.term(sh = sh) + def term(self, sh="/bin/sh"): + """ + Wrapper around vnodeclient term. - def termcmdstring(self, sh = "/bin/sh"): - return self.vnodeclient.termcmdstring(sh = sh) + :param sh: shell to create terminal for + :return: + """ + return self.vnodeclient.term(sh=sh) - def shcmd(self, cmdstr, sh = "/bin/sh"): - return self.vnodeclient.shcmd(cmdstr, sh = sh) + def termcmdstring(self, sh="/bin/sh"): + """ + Wrapper around vnodeclient termcmdstring. + + :param sh: shell to run command in + :return: + """ + return self.vnodeclient.termcmdstring(sh=sh) + + def shcmd(self, cmdstr, sh="/bin/sh"): + """ + Wrapper around vnodeclient shcmd. + + :param str cmdstr: command string + :param sh: shell to run command in + :return: + """ + return self.vnodeclient.shcmd(cmdstr, sh=sh) def boot(self): + """ + Boot logic. + + :return: nothing + """ pass def mount(self, source, target): + """ + Create and mount a directory. + + :param str source: source directory to mount + :param str target: target directory to create + :return: nothing + """ source = os.path.abspath(source) - self.info("mounting %s at %s" % (source, target)) + logger.info("mounting %s at %s" % (source, target)) try: - shcmd = "mkdir -p '%s' && %s -n --bind '%s' '%s'" % \ - (target, MOUNT_BIN, source, target) + shcmd = "mkdir -p '%s' && %s -n --bind '%s' '%s'" % ( + target, constants.MOUNT_BIN, source, target) self.shcmd(shcmd) self._mounts.append((source, target)) - except: - self.warn("mounting failed for %s at %s" % (source, target)) + except IOError: + logger.exception("mounting failed for %s at %s", source, target) def umount(self, target): - self.info("unmounting '%s'" % target) + """ + Unmount a target directory. + + :param str target: target directory to unmount + :return: nothing + """ + logger.info("unmounting '%s'" % target) try: - self.cmd([UMOUNT_BIN, "-n", "-l", target]) - except: - self.warn("unmounting failed for %s" % target) + self.cmd([constants.UMOUNT_BIN, "-n", "-l", target]) + except IOError: + logger.exception("unmounting failed for %s" % target) def newifindex(self): - with self.lock: - return PyCoreNode.newifindex(self) + """ + Retrieve a new interface index. - def newveth(self, ifindex = None, ifname = None, net = None): + :return: new interface index + :rtype: int + """ + with self.lock: + return super(SimpleLxcNode, self).newifindex() + + def newveth(self, ifindex=None, ifname=None, net=None): + """ + Create a new interface. + + :param int ifindex: index for the new interface + :param str ifname: name for the new interface + :param net: network to associate interface with + :return: nothing + """ self.lock.acquire() try: if ifindex is None: ifindex = self.newifindex() + if ifname is None: ifname = "eth%d" % ifindex - sessionid = self.session.shortsessionid() + + sessionid = self.session.short_session_id() + try: suffix = '%x.%s.%s' % (self.objid, ifindex, sessionid) except TypeError: suffix = '%s.%s.%s' % (self.objid, ifindex, sessionid) + localname = 'veth' + suffix if len(localname) >= 16: - raise ValueError, "interface local name '%s' too long" % \ - localname + raise ValueError("interface local name '%s' too long" % localname) name = localname + 'p' if len(name) >= 16: raise ValueError, "interface name '%s' too long" % name - ifclass = VEth - veth = ifclass(node = self, name = name, localname = localname, - mtu = 1500, net = net, start = self.up) + veth = VEth(node=self, name=name, localname=localname, mtu=1500, net=net, start=self.up) + if self.up: - check_call([IP_BIN, "link", "set", veth.name, - "netns", str(self.pid)]) - self.cmd([IP_BIN, "link", "set", veth.name, "name", ifname]) + subprocess.check_call([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)]) + self.cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname]) + veth.name = ifname + + # retrieve interface information + result, output = self.cmdresult(["ip", "link", "show", veth.name]) + logger.info("interface command output: %s", output) + output = output.split("\n") + veth.flow_id = int(output[0].strip().split(":")[0]) + 1 + logger.info("interface flow index: %s - %s", veth.name, veth.flow_id) + veth.hwaddr = output[1].strip().split()[1] + logger.info("interface mac: %s - %s", veth.name, veth.hwaddr) + try: self.addnetif(veth, ifindex) except: veth.shutdown() del veth raise + return ifindex finally: self.lock.release() - def newtuntap(self, ifindex = None, ifname = None, net = None): + def newtuntap(self, ifindex=None, ifname=None, net=None): + """ + Create a new tunnel tap. + + :param int ifindex: interface index + :param str ifname: interface name + :param net: network to associate with + :return: interface index + :rtype: int + """ self.lock.acquire() try: if ifindex is None: ifindex = self.newifindex() if ifname is None: ifname = "eth%d" % ifindex - sessionid = self.session.shortsessionid() + sessionid = self.session.short_session_id() localname = "tap%s.%s.%s" % (self.objid, ifindex, sessionid) name = ifname ifclass = TunTap - tuntap = ifclass(node = self, name = name, localname = localname, - mtu = 1500, net = net, start = self.up) + tuntap = ifclass(node=self, name=name, localname=localname, + mtu=1500, net=net, start=self.up) try: self.addnetif(tuntap, ifindex) - except: + except Exception as e: tuntap.shutdown() del tuntap - raise + raise e return ifindex finally: self.lock.release() def sethwaddr(self, ifindex, addr): + """ + Set hardware addres for an interface. + + :param int ifindex: index of interface to set hardware address for + :param core.misc.ipaddress.MacAddress addr: hardware address to set + :return: mothing + """ self._netif[ifindex].sethwaddr(addr) if self.up: - (status, result) = self.cmdresult([IP_BIN, "link", "set", "dev", - self.ifname(ifindex), "address", str(addr)]) + (status, result) = self.cmdresult([constants.IP_BIN, "link", "set", "dev", + self.ifname(ifindex), "address", str(addr)]) if status: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "SimpleLxcNode.sethwaddr()", - "error setting MAC address %s" % str(addr)) + logger.error("error setting MAC address %s", str(addr)) + def addaddr(self, ifindex, addr): + """ + Add interface address. + + :param int ifindex: index of interface to add address to + :param str addr: address to add to interface + :return: nothing + """ if self.up: if ":" in str(addr): # check if addr is ipv6 - self.cmd([IP_BIN, "addr", "add", str(addr), + self.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)]) else: - self.cmd([IP_BIN, "addr", "add", str(addr), "broadcast", "+", + self.cmd([constants.IP_BIN, "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)]) self._netif[ifindex].addaddr(addr) def deladdr(self, ifindex, addr): + """ + Delete address from an interface. + + :param int ifindex: index of interface to delete address from + :param str addr: address to delete from interface + :return: nothing + """ try: self._netif[ifindex].deladdr(addr) except ValueError: - self.warn("trying to delete unknown address: %s" % addr) - if self.up: - self.cmd([IP_BIN, "addr", "del", str(addr), - "dev", self.ifname(ifindex)]) + logger.exception("trying to delete unknown address: %s" % addr) - valid_deladdrtype = ("inet", "inet6", "inet6link") - def delalladdr(self, ifindex, addrtypes = valid_deladdrtype): - addr = self.getaddr(self.ifname(ifindex), rescan = True) + if self.up: + self.cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)]) + + def delalladdr(self, ifindex, addrtypes=valid_deladdrtype): + """ + Delete all addresses from an interface. + + :param int ifindex: index of interface to delete all addresses from + :param tuple addrtypes: address types to delete + :return: nothing + """ + addr = self.getaddr(self.ifname(ifindex), rescan=True) for t in addrtypes: if t not in self.valid_deladdrtype: - raise ValueError, "addr type must be in: " + \ - " ".join(self.valid_deladdrtype) + raise ValueError("addr type must be in: " + " ".join(self.valid_deladdrtype)) for a in addr[t]: self.deladdr(ifindex, a) # update cached information - self.getaddr(self.ifname(ifindex), rescan = True) + self.getaddr(self.ifname(ifindex), rescan=True) def ifup(self, ifindex): - if self.up: - self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"]) + """ + Bring an interface up. - def newnetif(self, net = None, addrlist = [], hwaddr = None, - ifindex = None, ifname = None): + :param int ifindex: index of interface to bring up + :return: nothing + """ + if self.up: + self.cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"]) + + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + """ + Create a new network interface. + + :param net: network to associate with + :param list addrlist: addresses to add on the interface + :param core.misc.ipaddress.MacAddress hwaddr: hardware address to set for interface + :param int ifindex: index of interface to create + :param str ifname: name for interface + :return: interface index + :rtype: int + """ self.lock.acquire() try: - if isinstance(net, EmaneNode): - ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname, - net = net) + # TODO: see if you can move this to emane specific code + if nodeutils.is_node(net, NodeTypes.EMANE): + ifindex = self.newtuntap(ifindex=ifindex, ifname=ifname, net=net) # TUN/TAP is not ready for addressing yet; the device may # take some time to appear, and installing it into a # namespace after it has been bound removes addressing; @@ -272,143 +464,235 @@ class SimpleLxcNode(PyCoreNode): self.attachnet(ifindex, net) netif = self.netif(ifindex) netif.sethwaddr(hwaddr) - for addr in maketuple(addrlist): + for addr in utils.maketuple(addrlist): netif.addaddr(addr) return ifindex else: - ifindex = self.newveth(ifindex = ifindex, ifname = ifname, - net = net) + ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) + if net is not None: self.attachnet(ifindex, net) + if hwaddr: self.sethwaddr(ifindex, hwaddr) - for addr in maketuple(addrlist): - self.addaddr(ifindex, addr) + + if addrlist: + for addr in utils.maketuple(addrlist): + self.addaddr(ifindex, addr) + self.ifup(ifindex) return ifindex finally: self.lock.release() def connectnode(self, ifname, othernode, otherifname): + """ + Connect a node. + + :param str ifname: name of interface to connect + :param core.netns.nodes.LxcNode othernode: node to connect to + :param str otherifname: interface name to connect to + :return: nothing + """ tmplen = 8 tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase) for x in xrange(tmplen)]) tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase) for x in xrange(tmplen)]) - check_call([IP_BIN, "link", "add", "name", tmp1, - "type", "veth", "peer", "name", tmp2]) + subprocess.check_call([constants.IP_BIN, "link", "add", "name", tmp1, + "type", "veth", "peer", "name", tmp2]) - check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)]) - self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname]) + subprocess.call([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)]) + self.cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname]) self.addnetif(PyCoreNetIf(self, ifname), self.newifindex()) - check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]) - othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname]) + subprocess.check_call([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]) + othernode.cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname]) othernode.addnetif(PyCoreNetIf(othernode, otherifname), othernode.newifindex()) def addfile(self, srcname, filename): - shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \ - (filename, srcname, filename) + """ + Add a file. + + :param str srcname: source file name + :param str filename: file name to add + :return: nothing + """ + shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % (filename, srcname, filename) self.shcmd(shcmd) - def getaddr(self, ifname, rescan = False): - return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan) + def getaddr(self, ifname, rescan=False): + """ + Wrapper around vnodeclient getaddr. - def netifstats(self, ifname = None): - return self.vnodeclient.netifstats(ifname = ifname) + :param str ifname: interface name to get address for + :param bool rescan: rescan flag + :return: + """ + return self.vnodeclient.getaddr(ifname=ifname, rescan=rescan) + + def netifstats(self, ifname=None): + """ + Wrapper around vnodeclient netifstate. + + :param str ifname: interface name to get state for + :return: + """ + return self.vnodeclient.netifstats(ifname=ifname) class LxcNode(SimpleLxcNode): - def __init__(self, session, objid = None, name = None, - nodedir = None, bootsh = "boot.sh", verbose = False, - start = True): - super(LxcNode, self).__init__(session = session, objid = objid, - name = name, nodedir = nodedir, - verbose = verbose, start = start) + """ + Provides lcx node functionality for core nodes. + """ + + def __init__(self, session, objid=None, name=None, + nodedir=None, bootsh="boot.sh", start=True): + """ + Create a LxcNode instance. + + :param core.session.Session session: core session instance + :param int objid: object id + :param str name: object name + :param str nodedir: node directory + :param bootsh: boot shell + :param bool start: start flag + """ + super(LxcNode, self).__init__(session=session, objid=objid, + name=name, nodedir=nodedir, start=start) self.bootsh = bootsh if start: self.startup() def boot(self): + """ + Boot the node. + + :return: nothing + """ self.session.services.bootnodeservices(self) - + def validate(self): + """ + Validate the node. + + :return: nothing + """ self.session.services.validatenodeservices(self) def startup(self): + """ + Startup logic for the node. + + :return: nothing + """ self.lock.acquire() try: self.makenodedir() super(LxcNode, self).startup() self.privatedir("/var/run") self.privatedir("/var/log") - except OSError, e: - self.warn("Error with LxcNode.startup(): %s" % e) - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "LxcNode.startup()", "%s" % e) + except OSError: + logger.exception("error during LxcNode.startup()") finally: self.lock.release() def shutdown(self): + """ + Shutdown logic for the node. + + :return: nothing + """ if not self.up: return self.lock.acquire() # services are instead stopped when session enters datacollect state - #self.session.services.stopnodeservices(self) + # self.session.services.stopnodeservices(self) try: super(LxcNode, self).shutdown() + except: + logger.exception("error during shutdown") finally: self.rmnodedir() self.lock.release() def privatedir(self, path): + """ + Create a private directory. + + :param str path: path to create + :return: nothing + """ if path[0] != "/": - raise ValueError, "path not fully qualified: " + path - hostpath = os.path.join(self.nodedir, - os.path.normpath(path).strip('/').replace('/', '.')) + raise ValueError("path not fully qualified: %s" % path) + hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip('/').replace('/', '.')) + try: os.mkdir(hostpath) except OSError: - pass - except Exception, e: - raise Exception, e + logger.exception("error creating directory: %s", hostpath) + self.mount(hostpath, path) def hostfilename(self, filename): - ''' Return the name of a node's file on the host filesystem. - ''' + """ + Return the name of a node's file on the host filesystem. + + :param str filename: host file name + :return: path to file + """ dirname, basename = os.path.split(filename) if not basename: - raise ValueError, "no basename for filename: " + filename + raise ValueError("no basename for filename: " + filename) if dirname and dirname[0] == "/": dirname = dirname[1:] dirname = dirname.replace("/", ".") dirname = os.path.join(self.nodedir, dirname) return os.path.join(dirname, basename) - def opennodefile(self, filename, mode = "w"): + def opennodefile(self, filename, mode="w"): + """ + Open a node file, within it's directory. + + :param str filename: file name to open + :param str mode: mode to open file in + :return: open file + :rtype: file + """ hostfilename = self.hostfilename(filename) dirname, basename = os.path.split(hostfilename) if not os.path.isdir(dirname): - os.makedirs(dirname, mode = 0755) + os.makedirs(dirname, mode=0755) return open(hostfilename, mode) - def nodefile(self, filename, contents, mode = 0644): + def nodefile(self, filename, contents, mode=0644): + """ + Create a node file with a given mode. + + :param str filename: name of file to create + :param contents: contents of file + :param int mode: mode for file + :return: nothing + """ f = self.opennodefile(filename, "w") f.write(contents) os.chmod(f.name, mode) f.close() - self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) - - def nodefilecopy(self, filename, srcfilename, mode = None): - ''' Copy a file to a node, following symlinks and preserving metadata. + logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) + + def nodefilecopy(self, filename, srcfilename, mode=None): + """ + Copy a file to a node, following symlinks and preserving metadata. Change file mode if specified. - ''' + + :param str filename: file name to copy file to + :param str srcfilename: file to copy + :param int mode: mode to copy to + :return: nothing + """ hostfilename = self.hostfilename(filename) shutil.copy2(srcfilename, hostfilename) if mode is not None: os.chmod(hostfilename, mode) - self.info("copied nodefile: '%s'; mode: %s" % (hostfilename, mode)) - - + logger.info("copied nodefile: '%s'; mode: %s" % (hostfilename, mode)) diff --git a/daemon/core/netns/vnodeclient.py b/daemon/core/netns/vnodeclient.py index df8194c2..114415e9 100644 --- a/daemon/core/netns/vnodeclient.py +++ b/daemon/core/netns/vnodeclient.py @@ -1,19 +1,17 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Tom Goff -# -''' +""" vnodeclient.py: implementation of the VnodeClient class for issuing commands over a control channel to the vnoded process running in a network namespace. The control channel can be accessed via calls to the vcmd Python module or by invoking the vcmd shell command. -''' +""" -import os, stat, sys -from core.constants import * +import os +import stat + +from core import constants +from core.misc import log + +logger = log.get_logger(__name__) USE_VCMD_MODULE = True @@ -22,10 +20,21 @@ if USE_VCMD_MODULE: else: import subprocess -VCMD = os.path.join(CORE_SBIN_DIR, "vcmd") +VCMD = os.path.join(constants.CORE_SBIN_DIR, "vcmd") + class VnodeClient(object): + """ + Provides client functionality for interacting with a virtual node. + """ + def __init__(self, name, ctrlchnlname): + """ + Create a VnodeClient instance. + + :param str name: name for client + :param str ctrlchnlname: control channel name + """ self.name = name self.ctrlchnlname = ctrlchnlname if USE_VCMD_MODULE: @@ -34,25 +43,39 @@ class VnodeClient(object): self.cmdchnl = None self._addr = {} - def warn(self, msg): - print >> sys.stderr, "%s: %s" % (self.name, msg) - def connected(self): + """ + Check if node is connected or not. + + :return: True if connected, False otherwise + :rtype: bool + """ if USE_VCMD_MODULE: return self.cmdchnl.connected() else: return True def close(self): + """ + Close the client connection. + + :return: nothing + """ if USE_VCMD_MODULE: self.cmdchnl.close() - def cmd(self, args, wait = True): - ''' Execute a command on a node and return the status (return code). - ''' + def cmd(self, args, wait=True): + """ + Execute a command on a node and return the status (return code). + + :param list args: command arguments + :param bool wait: wait for command to end or not + :return: command status + :rtype: int + """ if USE_VCMD_MODULE: if not self.cmdchnl.connected(): - raise ValueError, "self.cmdchnl not connected" + raise ValueError("self.cmdchnl not connected") tmp = self.cmdchnl.qcmd(args) if not wait: return tmp @@ -62,19 +85,25 @@ class VnodeClient(object): mode = os.P_WAIT else: mode = os.P_NOWAIT - tmp = os.spawnlp(mode, VCMD, VCMD, "-c", - self.ctrlchnlname, "-q", "--", *args) + tmp = os.spawnlp(mode, VCMD, VCMD, "-c", self.ctrlchnlname, "-q", "--", *args) if not wait: return tmp + if tmp: - self.warn("cmd exited with status %s: %s" % (tmp, str(args))) + logger.warn("cmd exited with status %s: %s" % (tmp, str(args))) + return tmp def cmdresult(self, args): - ''' Execute a command on a node and return a tuple containing the - exit status and result string. stderr output - is folded into the stdout result string. - ''' + """ + Execute a command on a node and return a tuple containing the + exit status and result string. stderr output + is folded into the stdout result string. + + :param list args: command arguments + :return: command status and combined stdout and stderr output + :rtype: tuple[int, str] + """ cmdid, cmdin, cmdout, cmderr = self.popen(args) result = cmdout.read() result += cmderr.read() @@ -82,43 +111,69 @@ class VnodeClient(object): cmdout.close() cmderr.close() status = cmdid.wait() - return (status, result) + return status, result def popen(self, args): + """ + Execute a popen command against the node. + + :param list args: command arguments + :return: popen object, stdin, stdout, and stderr + :rtype: tuple + """ if USE_VCMD_MODULE: if not self.cmdchnl.connected(): - raise ValueError, "self.cmdchnl not connected" + raise ValueError("self.cmdchnl not connected") return self.cmdchnl.popen(args) else: cmd = [VCMD, "-c", self.ctrlchnlname, "--"] cmd.extend(args) - tmp = subprocess.Popen(cmd, stdin = subprocess.PIPE, - stdout = subprocess.PIPE, - stderr = subprocess.PIPE) + tmp = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return tmp, tmp.stdin, tmp.stdout, tmp.stderr def icmd(self, args): - return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname, - "--", *args) + """ + Execute an icmd against a node. - def redircmd(self, infd, outfd, errfd, args, wait = True): - ''' + :param list args: command arguments + :return: command result + :rtype: int + """ + return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname, "--", *args) + + def redircmd(self, infd, outfd, errfd, args, wait=True): + """ Execute a command on a node with standard input, output, and error redirected according to the given file descriptors. - ''' + + :param infd: stdin file descriptor + :param outfd: stdout file descriptor + :param errfd: stderr file descriptor + :param list args: command arguments + :param bool wait: wait flag + :return: command status + :rtype: int + """ if not USE_VCMD_MODULE: raise NotImplementedError if not self.cmdchnl.connected(): - raise ValueError, "self.cmdchnl not connected" + raise ValueError("self.cmdchnl not connected") tmp = self.cmdchnl.redircmd(infd, outfd, errfd, args) if not wait: return tmp tmp = tmp.wait() if tmp: - self.warn("cmd exited with status %s: %s" % (tmp, str(args))) + logger.warn("cmd exited with status %s: %s" % (tmp, str(args))) return tmp - def term(self, sh = "/bin/sh"): + def term(self, sh="/bin/sh"): + """ + Open a terminal on a node. + + :param str sh: shell to open terminal with + :return: terminal command result + :rtype: int + """ cmd = ("xterm", "-ut", "-title", self.name, "-e", VCMD, "-c", self.ctrlchnlname, "--", sh) if "SUDO_USER" in os.environ: @@ -127,19 +182,42 @@ class VnodeClient(object): os.environ["SUDO_USER"]) return os.spawnvp(os.P_NOWAIT, cmd[0], cmd) - def termcmdstring(self, sh = "/bin/sh"): + def termcmdstring(self, sh="/bin/sh"): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ return "%s -c %s -- %s" % (VCMD, self.ctrlchnlname, sh) - def shcmd(self, cmdstr, sh = "/bin/sh"): + def shcmd(self, cmdstr, sh="/bin/sh"): + """ + Execute a shell command. + + :param str cmdstr: command string + :param str sh: shell to run command in + :return: command result + :rtype: int + """ return self.cmd([sh, "-c", cmdstr]) - def getaddr(self, ifname, rescan = False): + def getaddr(self, ifname, rescan=False): + """ + Get address for interface on node. + + :param str ifname: interface name to get address for + :param bool rescan: rescan flag + :return: interface information + :rtype: dict + """ if ifname in self._addr and not rescan: return self._addr[ifname] tmp = {"ether": [], "inet": [], "inet6": [], "inet6link": []} - cmd = [IP_BIN, "addr", "show", "dev", ifname] + cmd = [constants.IP_BIN, "addr", "show", "dev", ifname] cmdid, cmdin, cmdout, cmderr = self.popen(cmd) cmdin.close() + for line in cmdout: line = line.strip().split() if line[0] == "link/ether": @@ -152,21 +230,27 @@ class VnodeClient(object): elif line[3] == "link": tmp["inet6link"].append(line[1]) else: - self.warn("unknown scope: %s" % line[3]) - else: - pass + logger.warn("unknown scope: %s" % line[3]) + err = cmderr.read() cmdout.close() cmderr.close() status = cmdid.wait() if status: - self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd)) + logger.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd)) if err: - self.warn("error output: %s" % err) + logger.warn("error output: %s" % err) self._addr[ifname] = tmp return tmp - def netifstats(self, ifname = None): + def netifstats(self, ifname=None): + """ + Retrieve network interface state. + + :param str ifname: name of interface to get state for + :return: interface state information + :rtype: dict + """ stats = {} cmd = ["cat", "/proc/net/dev"] cmdid, cmdin, cmdout, cmderr = self.popen(cmd) @@ -195,34 +279,47 @@ class VnodeClient(object): cmderr.close() status = cmdid.wait() if status: - self.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd)) + logger.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd)) if err: - self.warn("error output: %s" % err) + logger.warn("error output: %s" % err) if ifname is not None: return stats[ifname] else: return stats -def createclients(sessiondir, clientcls = VnodeClient, - cmdchnlfilterfunc = None): - direntries = map(lambda x: os.path.join(sessiondir, x), - os.listdir(sessiondir)) + +def createclients(sessiondir, clientcls=VnodeClient, cmdchnlfilterfunc=None): + """ + Create clients + + :param str sessiondir: session directory to create clients + :param class clientcls: class to create clients from + :param func cmdchnlfilterfunc: command channel filter function + :return: list of created clients + :rtype: list + """ + direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir)) cmdchnls = filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries) if cmdchnlfilterfunc: cmdchnls = filter(cmdchnlfilterfunc, cmdchnls) cmdchnls.sort() return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls) -def createremoteclients(sessiondir, clientcls = VnodeClient, - filterfunc = None): - ''' Creates remote VnodeClients, for nodes emulated on other machines. The + +def createremoteclients(sessiondir, clientcls=VnodeClient, filterfunc=None): + """ + Creates remote VnodeClients, for nodes emulated on other machines. The session.Broker writes a n1.conf/server file having the server's info. - ''' - direntries = map(lambda x: os.path.join(sessiondir, x), - os.listdir(sessiondir)) + + :param str sessiondir: session directory to create clients + :param class clientcls: class to create clients from + :param func filterfunc: filter function + :return: list of remove clients + :rtype: list + """ + direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir)) nodedirs = filter(lambda x: stat.S_ISDIR(os.stat(x).st_mode), direntries) - nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")), - nodedirs) + nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")), nodedirs) if filterfunc: nodedirs = filter(filterfunc, nodedirs) nodedirs.sort() diff --git a/daemon/core/phys/pnodes.py b/daemon/core/phys/pnodes.py index ebc0342c..81ac4f56 100644 --- a/daemon/core/phys/pnodes.py +++ b/daemon/core/phys/pnodes.py @@ -1,42 +1,34 @@ -# -# CORE -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' PhysicalNode class for including real systems in the emulated network. -''' -import os, threading, subprocess +""" +PhysicalNode class for including real systems in the emulated network. +""" -from core.misc.ipaddr import * -from core.misc.utils import * -from core.constants import * -from core.api import coreapi -from core.coreobj import PyCoreNode, PyCoreNetIf -from core.emane.nodes import EmaneNode -if os.uname()[0] == "Linux": - from core.netns.vnet import LxBrNet - from core.netns.vif import GreTap -elif os.uname()[0] == "FreeBSD": - from core.bsd.vnet import NetgraphNet +import os +import subprocess +import threading + +from core import constants +from core.coreobj import PyCoreNode +from core.misc import log +from core.misc import utils +from core.netns.vnet import GreTap +from core.netns.vnet import LxBrNet + +logger = log.get_logger(__name__) class PhysicalNode(PyCoreNode): - def __init__(self, session, objid = None, name = None, - nodedir = None, verbose = False, start = True): - PyCoreNode.__init__(self, session, objid, name, verbose=verbose, - start=start) + def __init__(self, session, objid=None, name=None, nodedir=None, start=True): + PyCoreNode.__init__(self, session, objid, name, start=start) self.nodedir = nodedir self.up = start self.lock = threading.RLock() self._mounts = [] if start: self.startup() - + def boot(self): self.session.services.bootnodeservices(self) - + def validate(self): self.session.services.validatenodeservices(self) @@ -44,11 +36,10 @@ class PhysicalNode(PyCoreNode): self.lock.acquire() try: self.makenodedir() - #self.privatedir("/var/run") - #self.privatedir("/var/log") - except OSError, e: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "PhysicalNode.startup()", e) + # self.privatedir("/var/run") + # self.privatedir("/var/log") + except OSError: + logger.exception("startup error") finally: self.lock.release() @@ -64,16 +55,17 @@ class PhysicalNode(PyCoreNode): self.rmnodedir() self.lock.release() - - def termcmdstring(self, sh = "/bin/sh"): - ''' The broker will add the appropriate SSH command to open a terminal + def termcmdstring(self, sh="/bin/sh"): + """ + The broker will add the appropriate SSH command to open a terminal on this physical node. - ''' + """ return sh - - def cmd(self, args, wait = True): - ''' run a command on the physical node - ''' + + def cmd(self, args, wait=True): + """ + run a command on the physical node + """ os.chdir(self.nodedir) try: if wait: @@ -82,109 +74,105 @@ class PhysicalNode(PyCoreNode): else: # os.spawnlp(os.P_NOWAIT, args) subprocess.Popen(args) - except CalledProcessError, e: - self.warn("cmd exited with status %s: %s" % (e, str(args))) - + except subprocess.CalledProcessError: + logger.exception("cmd exited with status: %s", str(args)) + def cmdresult(self, args): - ''' run a command on the physical node and get the result - ''' + """ + run a command on the physical node and get the result + """ os.chdir(self.nodedir) # in Python 2.7 we can use subprocess.check_output() here - tmp = subprocess.Popen(args, stdin = open(os.devnull, 'r'), - stdout = subprocess.PIPE, - stderr = subprocess.STDOUT) - result, err = tmp.communicate() # err will always be None + tmp = subprocess.Popen(args, stdin=open(os.devnull, 'r'), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + # err will always be None + result, err = tmp.communicate() status = tmp.wait() - return (status, result) - - def shcmd(self, cmdstr, sh = "/bin/sh"): + return status, result + + def shcmd(self, cmdstr, sh="/bin/sh"): return self.cmd([sh, "-c", cmdstr]) def sethwaddr(self, ifindex, addr): - ''' same as SimpleLxcNode.sethwaddr() - ''' + """ + same as SimpleLxcNode.sethwaddr() + """ self._netif[ifindex].sethwaddr(addr) ifname = self.ifname(ifindex) if self.up: - (status, result) = self.cmdresult([IP_BIN, "link", "set", "dev", - ifname, "address", str(addr)]) + (status, result) = self.cmdresult( + [constants.IP_BIN, "link", "set", "dev", ifname, "address", str(addr)]) if status: - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "PhysicalNode.sethwaddr()", - "error setting MAC address %s" % str(addr)) - + logger.error("error setting MAC address %s", str(addr)) + def addaddr(self, ifindex, addr): - ''' same as SimpleLxcNode.addaddr() - ''' + """ + same as SimpleLxcNode.addaddr() + """ if self.up: - self.cmd([IP_BIN, "addr", "add", str(addr), - "dev", self.ifname(ifindex)]) + self.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)]) + self._netif[ifindex].addaddr(addr) def deladdr(self, ifindex, addr): - ''' same as SimpleLxcNode.deladdr() - ''' + """ + same as SimpleLxcNode.deladdr() + """ try: self._netif[ifindex].deladdr(addr) except ValueError: - self.warn("trying to delete unknown address: %s" % addr) + logger.exception("trying to delete unknown address: %s", addr) + if self.up: - self.cmd([IP_BIN, "addr", "del", str(addr), - "dev", self.ifname(ifindex)]) + self.cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)]) def adoptnetif(self, netif, ifindex, hwaddr, addrlist): - ''' The broker builds a GreTap tunnel device to this physical node. + """ + The broker builds a GreTap tunnel device to this physical node. When a link message is received linking this node to another part of the emulation, no new interface is created; instead, adopt the GreTap netif as the node interface. - ''' + """ netif.name = "gt%d" % ifindex netif.node = self self.addnetif(netif, ifindex) # use a more reasonable name, e.g. "gt0" instead of "gt.56286.150" if self.up: - self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "down"]) - self.cmd([IP_BIN, "link", "set", netif.localname, "name", netif.name]) + self.cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "down"]) + self.cmd([constants.IP_BIN, "link", "set", netif.localname, "name", netif.name]) netif.localname = netif.name if hwaddr: self.sethwaddr(ifindex, hwaddr) - for addr in maketuple(addrlist): + for addr in utils.maketuple(addrlist): self.addaddr(ifindex, addr) if self.up: - self.cmd([IP_BIN, "link", "set", "dev", netif.localname, "up"]) - - def linkconfig(self, netif, bw = None, delay = None, - loss = None, duplicate = None, jitter = None, netif2 = None): - ''' Apply tc queing disciplines using LxBrNet.linkconfig() - ''' - if os.uname()[0] == "Linux": - netcls = LxBrNet - elif os.uname()[0] == "FreeBSD": - netcls = NetgraphNet - else: - raise NotImplementedError, "unsupported platform" + self.cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "up"]) + + def linkconfig(self, netif, bw=None, delay=None, + loss=None, duplicate=None, jitter=None, netif2=None): + """ + Apply tc queing disciplines using LxBrNet.linkconfig() + """ # borrow the tc qdisc commands from LxBrNet.linkconfig() - tmp = netcls(session=self.session, start=False) - tmp.up = True - tmp.linkconfig(netif, bw=bw, delay=delay, loss=loss, - duplicate=duplicate, jitter=jitter, netif2=netif2) - del tmp + linux_bridge = LxBrNet(session=self.session, start=False) + linux_bridge.up = True + linux_bridge.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate, + jitter=jitter, netif2=netif2) + del linux_bridge def newifindex(self): - self.lock.acquire() - try: + with self.lock: while self.ifindex in self._netif: self.ifindex += 1 ifindex = self.ifindex self.ifindex += 1 return ifindex - finally: - self.lock.release() - def newnetif(self, net = None, addrlist = [], hwaddr = None, - ifindex = None, ifname = None): + def newnetif(self, net=None, addrlist=[], hwaddr=None, ifindex=None, ifname=None): if self.up and net is None: raise NotImplementedError + if ifindex is None: ifindex = self.newifindex() @@ -193,56 +181,52 @@ class PhysicalNode(PyCoreNode): # tunnel to net not built yet, so build it now and adopt it gt = self.session.broker.addnettunnel(net.objid) if gt is None or len(gt) != 1: - self.session.warn("Error building tunnel from PhysicalNode." - "newnetif()") + raise ValueError("error building tunnel from adding a new network interface: %s" % gt) gt = gt[0] net.detach(gt) self.adoptnetif(gt, ifindex, hwaddr, addrlist) return ifindex - + # this is reached when configuring services (self.up=False) if ifname is None: ifname = "gt%d" % ifindex - netif = GreTap(node = self, name = ifname, session = self.session, - start = False) + + netif = GreTap(node=self, name=ifname, session=self.session, start=False) self.adoptnetif(netif, ifindex, hwaddr, addrlist) return ifindex - - + def privatedir(self, path): if path[0] != "/": raise ValueError, "path not fully qualified: " + path - hostpath = os.path.join(self.nodedir, - os.path.normpath(path).strip('/').replace('/', '.')) + hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip('/').replace('/', '.')) try: os.mkdir(hostpath) except OSError: - pass - except Exception, e: - raise Exception, e + logger.exception("error creating directory: %s", hostpath) + self.mount(hostpath, path) def mount(self, source, target): source = os.path.abspath(source) - self.info("mounting %s at %s" % (source, target)) + logger.info("mounting %s at %s" % (source, target)) + try: os.makedirs(target) - except OSError: - pass - try: - self.cmd([MOUNT_BIN, "--bind", source, target]) + self.cmd([constants.MOUNT_BIN, "--bind", source, target]) self._mounts.append((source, target)) + except OSError: + logger.exception("error making directories") except: - self.warn("mounting failed for %s at %s" % (source, target)) + logger.exception("mounting failed for %s at %s", source, target) def umount(self, target): - self.info("unmounting '%s'" % target) + logger.info("unmounting '%s'" % target) try: - self.cmd([UMOUNT_BIN, "-l", target]) + self.cmd([constants.UMOUNT_BIN, "-l", target]) except: - self.warn("unmounting failed for %s" % target) + logger.exception("unmounting failed for %s", target) - def opennodefile(self, filename, mode = "w"): + def opennodefile(self, filename, mode="w"): dirname, basename = os.path.split(filename) if not basename: raise ValueError, "no basename for filename: " + filename @@ -251,15 +235,13 @@ class PhysicalNode(PyCoreNode): dirname = dirname.replace("/", ".") dirname = os.path.join(self.nodedir, dirname) if not os.path.isdir(dirname): - os.makedirs(dirname, mode = 0755) + os.makedirs(dirname, mode=0755) hostfilename = os.path.join(dirname, basename) return open(hostfilename, mode) - def nodefile(self, filename, contents, mode = 0644): + def nodefile(self, filename, contents, mode=0644): f = self.opennodefile(filename, "w") f.write(contents) os.chmod(f.name, mode) f.close() - self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) - - + logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) diff --git a/daemon/core/pycore.py b/daemon/core/pycore.py deleted file mode 100644 index 5743c4b9..00000000 --- a/daemon/core/pycore.py +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. - -""" -This is a convenience module that imports a set of platform-dependent -defaults. -""" - -from misc.utils import ensurepath -ensurepath(["/sbin", "/bin", "/usr/sbin", "/usr/bin"]) -del ensurepath - -from session import Session - -import os - -if os.uname()[0] == "Linux": - from netns import nodes - try: - from xen import xen - except ImportError: - #print "Xen support disabled." - pass -elif os.uname()[0] == "FreeBSD": - from bsd import nodes -from phys import pnodes -del os diff --git a/daemon/core/sdt.py b/daemon/core/sdt.py index 2eeb0db2..785fe5d8 100644 --- a/daemon/core/sdt.py +++ b/daemon/core/sdt.py @@ -1,74 +1,102 @@ -# -# CORE -# Copyright (c)2012-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" sdt.py: Scripted Display Tool (SDT3D) helper -''' +""" -from core.constants import * -from core.api import coreapi -from coreobj import PyCoreNet, PyCoreObj -from core.netns import nodes -from urlparse import urlparse import socket +from urlparse import urlparse + +from core import constants +from core.api import coreapi +from core.coreobj import PyCoreNet +from core.coreobj import PyCoreObj +from core.enumerations import EventTypes +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.misc import log +from core.misc import nodeutils + +logger = log.get_logger(__name__) + + +# TODO: A named tuple may be more appropriate, than abusing a class dict like this +class Bunch(object): + """ + Helper class for recording a collection of attributes. + """ + + def __init__(self, **kwds): + """ + Create a Bunch instance. + + :param dict kwds: keyword arguments + :return: + """ + self.__dict__.update(kwds) + class Sdt(object): - ''' Helper class for exporting session objects to NRL's SDT3D. + """ + Helper class for exporting session objects to NRL"s SDT3D. The connect() method initializes the display, and can be invoked when a node position or link has changed. - ''' + """ DEFAULT_SDT_URL = "tcp://127.0.0.1:50000/" # default altitude (in meters) for flyto view DEFAULT_ALT = 2500 - # TODO: read in user's nodes.conf here; below are default node types - # from the GUI - DEFAULT_SPRITES = [('router', 'router.gif'), ('host', 'host.gif'), - ('PC', 'pc.gif'), ('mdr', 'mdr.gif'), - ('prouter', 'router_green.gif'), ('xen', 'xen.gif'), - ('hub', 'hub.gif'), ('lanswitch','lanswitch.gif'), - ('wlan', 'wlan.gif'), ('rj45','rj45.gif'), - ('tunnel','tunnel.gif'), - ] + # TODO: read in user"s nodes.conf here; below are default node types from the GUI + DEFAULT_SPRITES = [ + ("router", "router.gif"), ("host", "host.gif"), + ("PC", "pc.gif"), ("mdr", "mdr.gif"), + ("prouter", "router_green.gif"), ("xen", "xen.gif"), + ("hub", "hub.gif"), ("lanswitch", "lanswitch.gif"), + ("wlan", "wlan.gif"), ("rj45", "rj45.gif"), + ("tunnel", "tunnel.gif"), + ] - class Bunch: - ''' Helper class for recording a collection of attributes. - ''' - def __init__(self, **kwds): - self.__dict__.update(kwds) - def __init__(self, session): + """ + Creates a Sdt instance. + + :param core.session.Session session: session this manager is tied to + """ self.session = session self.sock = None self.connected = False self.showerror = True self.url = self.DEFAULT_SDT_URL - self.verbose = self.session.getcfgitembool('verbose', False) # node information for remote nodes not in session._objs # local nodes also appear here since their obj may not exist yet self.remotes = {} session.broker.handlers.add(self.handledistributed) - + def is_enabled(self): - ''' Check for 'enablesdt' session option. Return False by default if - the option is missing. - ''' - if not hasattr(self.session.options, 'enablesdt'): + """ + Check for "enablesdt" session option. Return False by default if + the option is missing. + + :return: True if enabled, False otherwise + :rtype: bool + """ + if not hasattr(self.session.options, "enablesdt"): return False enabled = self.session.options.enablesdt - if enabled in ('1', 'true', 1, True): + if enabled in ("1", "true", 1, True): return True return False def seturl(self): - ''' Read 'sdturl' from session options, or use the default value. - Set self.url, self.address, self.protocol - ''' + """ + Read "sdturl" from session options, or use the default value. + Set self.url, self.address, self.protocol + + :return: nothing + """ url = None - if hasattr(self.session.options,'sdturl'): + if hasattr(self.session.options, "sdturl"): if self.session.options.sdturl != "": url = self.session.options.sdturl if url is None or url == "": @@ -78,147 +106,193 @@ class Sdt(object): self.protocol = self.url.scheme def connect(self, flags=0): - ''' Connect to the SDT address/port if enabled. - ''' + """ + Connect to the SDT address/port if enabled. + + :return: True if connected, False otherwise + :rtype: bool + """ if not self.is_enabled(): return False if self.connected: return True - if self.session.getstate() == coreapi.CORE_EVENT_SHUTDOWN_STATE: + if self.session.state == EventTypes.SHUTDOWN_STATE.value: return False self.seturl() - if self.showerror: - self.session.info("connecting to SDT at %s://%s" \ - % (self.protocol, self.address)) + logger.info("connecting to SDT at %s://%s" % (self.protocol, self.address)) if self.sock is None: try: - if (self.protocol.lower() == 'udp'): + if self.protocol.lower() == "udp": self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.connect(self.address) else: # Default to tcp self.sock = socket.create_connection(self.address, 5) - except Exception, e: - if self.showerror: - self.session.warn("SDT socket connect error: %s" % e) - self.showerror = False + except IOError: + logger.exception("SDT socket connect error") return False + if not self.initialize(): return False + self.connected = True # refresh all objects in SDT3D when connecting after session start - if not flags & coreapi.CORE_API_ADD_FLAG: - if not self.sendobjs(): - return False + if not flags & MessageFlags.ADD.value and not self.sendobjs(): + return False + return True - + def initialize(self): - ''' Load icon sprites, and fly to the reference point location on - the virtual globe. - ''' - if not self.cmd('path "%s/icons/normal"' % CORE_DATA_DIR): + """ + Load icon sprites, and fly to the reference point location on + the virtual globe. + + :return: initialize command status + :rtype: bool + """ + if not self.cmd("path \"%s/icons/normal\"" % constants.CORE_DATA_DIR): return False # send node type to icon mappings - for (type, icon) in self.DEFAULT_SPRITES: - if not self.cmd('sprite %s image %s' % (type, icon)): + for type, icon in self.DEFAULT_SPRITES: + if not self.cmd("sprite %s image %s" % (type, icon)): return False (lat, long) = self.session.location.refgeo[:2] - return self.cmd('flyto %.6f,%.6f,%d' % (long, lat, self.DEFAULT_ALT)) - + return self.cmd("flyto %.6f,%.6f,%d" % (long, lat, self.DEFAULT_ALT)) + def disconnect(self): - try: - self.sock.close() - except: - pass - self.sock = None + """ + Disconnect from SDT. + + :return: nothing + """ + if self.sock: + try: + self.sock.close() + except IOError: + logger.error("error closing socket") + finally: + self.sock = None + self.connected = False - + def shutdown(self): - ''' Invoked from Session.shutdown() and Session.checkshutdown(). - ''' - self.cmd('clear all') + """ + Invoked from Session.shutdown() and Session.checkshutdown(). + + :return: nothing + """ + self.cmd("clear all") self.disconnect() self.showerror = True - + def cmd(self, cmdstr): - ''' Send an SDT command over a UDP socket. socket.sendall() is used - as opposed to socket.sendto() because an exception is raised when - there is no socket listener. - ''' + """ + Send an SDT command over a UDP socket. socket.sendall() is used + as opposed to socket.sendto() because an exception is raised when + there is no socket listener. + + :param str cmdstr: command to send + :return: True if command was successful, False otherwise + :rtype: bool + """ if self.sock is None: return False try: - if self.verbose: - self.session.info("sdt: %s" % cmdstr) + logger.info("sdt: %s" % cmdstr) self.sock.sendall("%s\n" % cmdstr) return True - except Exception, e: - if self.showerror: - self.session.warn("SDT connection error: %s" % e) - self.showerror = False + except IOError: + logger.exception("SDT connection error") self.sock = None self.connected = False return False - - def updatenode(self, nodenum, flags, x, y, z, - name=None, type=None, icon=None): - ''' Node is updated from a Node Message or mobility script. - ''' + + def updatenode(self, nodenum, flags, x, y, z, name=None, type=None, icon=None): + """ + Node is updated from a Node Message or mobility script. + + :param int nodenum: node id to update + :param flags: update flags + :param x: x position + :param y: y position + :param z: z position + :param str name: node name + :param type: node type + :param icon: node icon + :return: nothing + """ if not self.connect(): return - if flags & coreapi.CORE_API_DEL_FLAG: - self.cmd('delete node,%d' % nodenum) + if flags & MessageFlags.DELETE.value: + self.cmd("delete node,%d" % nodenum) return if x is None or y is None: return - (lat, long, alt) = self.session.location.getgeo(x, y, z) + lat, long, alt = self.session.location.getgeo(x, y, z) pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt) - if flags & coreapi.CORE_API_ADD_FLAG: + if flags & MessageFlags.ADD.value: if icon is not None: type = name - icon = icon.replace("$CORE_DATA_DIR", CORE_DATA_DIR) - icon = icon.replace("$CORE_CONF_DIR", CORE_CONF_DIR) - self.cmd('sprite %s image %s' % (type, icon)) - self.cmd('node %d type %s label on,"%s" %s' % \ - (nodenum, type, name, pos)) + icon = icon.replace("$CORE_DATA_DIR", constants.CORE_DATA_DIR) + icon = icon.replace("$CORE_CONF_DIR", constants.CORE_CONF_DIR) + self.cmd("sprite %s image %s" % (type, icon)) + self.cmd("node %d type %s label on,\"%s\" %s" % (nodenum, type, name, pos)) else: - self.cmd('node %d %s' % (nodenum, pos)) + self.cmd("node %d %s" % (nodenum, pos)) def updatenodegeo(self, nodenum, lat, long, alt): - ''' Node is updated upon receiving an EMANE Location Event. - TODO: received Node Message with lat/long/alt. - ''' + """ + Node is updated upon receiving an EMANE Location Event. + + :param int nodenum: node id to update geospatial for + :param lat: latitude + :param long: longitude + :param alt: altitude + :return: nothing + """ + + # TODO: received Node Message with lat/long/alt. if not self.connect(): return pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt) - self.cmd('node %d %s' % (nodenum, pos)) - + self.cmd("node %d %s" % (nodenum, pos)) + def updatelink(self, node1num, node2num, flags, wireless=False): - ''' Link is updated from a Link Message or by a wireless model. - ''' + """ + Link is updated from a Link Message or by a wireless model. + + :param int node1num: node one id + :param int node2num: node two id + :param flags: link flags + :param bool wireless: flag to check if wireless or not + :return: nothing + """ if node1num is None or node2num is None: return if not self.connect(): return - if flags & coreapi.CORE_API_DEL_FLAG: - self.cmd('delete link,%s,%s' % (node1num, node2num)) - elif flags & coreapi.CORE_API_ADD_FLAG: + if flags & MessageFlags.DELETE.value: + self.cmd("delete link,%s,%s" % (node1num, node2num)) + elif flags & MessageFlags.ADD.value: attr = "" if wireless: attr = " line green,2" else: attr = " line red,2" - self.cmd('link %s,%s%s' % (node1num, node2num, attr)) - + self.cmd("link %s,%s%s" % (node1num, node2num, attr)) + def sendobjs(self): - ''' Session has already started, and the SDT3D GUI later connects. - Send all node and link objects for display. Otherwise, nodes and - links will only be drawn when they have been updated (e.g. moved). - ''' + """ + Session has already started, and the SDT3D GUI later connects. + Send all node and link objects for display. Otherwise, nodes and + links will only be drawn when they have been updated (e.g. moved). + + :return: nothing + """ nets = [] - with self.session._objslock: - for obj in self.session.objs(): + with self.session._objects_lock: + for obj in self.session.objects.itervalues(): if isinstance(obj, PyCoreNet): nets.append(obj) if not isinstance(obj, PyCoreObj): @@ -226,88 +300,96 @@ class Sdt(object): (x, y, z) = obj.getposition() if x is None or y is None: continue - self.updatenode(obj.objid, coreapi.CORE_API_ADD_FLAG, x, y, z, + self.updatenode(obj.objid, MessageFlags.ADD.value, x, y, z, obj.name, obj.type, obj.icon) for nodenum in sorted(self.remotes.keys()): r = self.remotes[nodenum] - (x, y, z) = r.pos - self.updatenode(nodenum, coreapi.CORE_API_ADD_FLAG, x, y, z, + x, y, z = r.pos + self.updatenode(nodenum, MessageFlags.ADD.value, x, y, z, r.name, r.type, r.icon) for net in nets: # use tolinkmsgs() to handle various types of links - msgs = net.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG) - for msg in msgs: - msghdr = msg[:coreapi.CoreMessage.hdrsiz] - flags = coreapi.CoreMessage.unpackhdr(msghdr)[1] - m = coreapi.CoreLinkMessage(flags, msghdr, - msg[coreapi.CoreMessage.hdrsiz:]) - n1num = m.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER) - n2num = m.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER) - link_msg_type = m.gettlv(coreapi.CORE_TLV_LINK_TYPE) - if isinstance(net, nodes.WlanNode) or \ - isinstance(net, nodes.EmaneNode): - if (n1num == net.objid): + messages = net.all_link_data(flags=MessageFlags.ADD.value) + for message in messages: + msghdr = message[:coreapi.CoreMessage.header_len] + flags = coreapi.CoreMessage.unpack_header(msghdr)[1] + m = coreapi.CoreLinkMessage(flags, msghdr, message[coreapi.CoreMessage.header_len:]) + n1num = m.get_tlv(LinkTlvs.N1_NUMBER.value) + n2num = m.get_tlv(LinkTlvs.N2_NUMBER.value) + link_msg_type = m.get_tlv(LinkTlvs.TYPE.value) + if nodeutils.is_node(net, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): + if n1num == net.objid: continue - wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS) - self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl) + wl = link_msg_type == LinkTypes.WIRELESS.value + self.updatelink(n1num, n2num, MessageFlags.ADD.value, wl) + for n1num in sorted(self.remotes.keys()): r = self.remotes[n1num] - for (n2num, wl) in r.links: - self.updatelink(n1num, n2num, coreapi.CORE_API_ADD_FLAG, wl) - - def handledistributed(self, msg): - ''' Broker handler for processing CORE API messages as they are - received. This is used to snoop the Node messages and update - node positions. - ''' - if msg.msgtype == coreapi.CORE_API_LINK_MSG: - return self.handlelinkmsg(msg) - elif msg.msgtype == coreapi.CORE_API_NODE_MSG: - return self.handlenodemsg(msg) - + for n2num, wl in r.links: + self.updatelink(n1num, n2num, MessageFlags.ADD.value, wl) + + # TODO: remove the need for this + def handledistributed(self, message): + """ + Broker handler for processing CORE API messages as they are + received. This is used to snoop the Node messages and update + node positions. + + :param message: message to handle + :return: replies + """ + if message.message_type == MessageTypes.LINK.value: + return self.handlelinkmsg(message) + elif message.message_type == MessageTypes.NODE.value: + return self.handlenodemsg(message) + + # TODO: remove the need for this def handlenodemsg(self, msg): - ''' Process a Node Message to add/delete or move a node on - the SDT display. Node properties are found in session._objs or - self.remotes for remote nodes (or those not yet instantiated). - ''' + """ + Process a Node Message to add/delete or move a node on + the SDT display. Node properties are found in session._objs or + self.remotes for remote nodes (or those not yet instantiated). + + :param msg: node message to handle + :return: nothing + """ # for distributed sessions to work properly, the SDT option should be # enabled prior to starting the session if not self.is_enabled(): return False # node.(objid, type, icon, name) are used. - nodenum = msg.gettlv(coreapi.CORE_TLV_NODE_NUMBER) + nodenum = msg.get_tlv(NodeTlvs.NUMBER.value) if not nodenum: return - x = msg.gettlv(coreapi.CORE_TLV_NODE_XPOS) - y = msg.gettlv(coreapi.CORE_TLV_NODE_YPOS) + x = msg.get_tlv(NodeTlvs.X_POSITION.value) + y = msg.get_tlv(NodeTlvs.Y_POSITION.value) z = None - name = msg.gettlv(coreapi.CORE_TLV_NODE_NAME) - - nodetype = msg.gettlv(coreapi.CORE_TLV_NODE_TYPE) - model = msg.gettlv(coreapi.CORE_TLV_NODE_MODEL) - icon = msg.gettlv(coreapi.CORE_TLV_NODE_ICON) + name = msg.get_tlv(NodeTlvs.NAME.value) + + nodetype = msg.get_tlv(NodeTlvs.TYPE.value) + model = msg.get_tlv(NodeTlvs.MODEL.value) + icon = msg.get_tlv(NodeTlvs.ICON.value) net = False - if nodetype == coreapi.CORE_NODE_DEF or \ - nodetype == coreapi.CORE_NODE_PHYS or \ - nodetype == coreapi.CORE_NODE_XEN: + if nodetype == NodeTypes.DEFAULT.value or \ + nodetype == NodeTypes.PHYSICAL.value or \ + nodetype == NodeTypes.XEN.value: if model is None: model = "router" type = model - elif nodetype != None: - type = coreapi.node_class(nodetype).type + elif nodetype is not None: + type = nodeutils.get_node_class(NodeTypes(nodetype)).type net = True else: type = None - + try: - node = self.session.obj(nodenum) + node = self.session.get_object(nodenum) except KeyError: node = None if node: - self.updatenode(node.objid, msg.flags, x, y, z, - node.name, node.type, node.icon) + self.updatenode(node.objid, msg.flags, x, y, z, node.name, node.type, node.icon) else: if nodenum in self.remotes: remote = self.remotes[nodenum] @@ -318,29 +400,33 @@ class Sdt(object): if icon is None: icon = remote.icon else: - remote = self.Bunch(objid=nodenum, type=type, icon=icon, - name=name, net=net, links=set()) + remote = Bunch(objid=nodenum, type=type, icon=icon, name=name, net=net, links=set()) self.remotes[nodenum] = remote remote.pos = (x, y, z) self.updatenode(nodenum, msg.flags, x, y, z, name, type, icon) - + + # TODO: remove the need for this def handlelinkmsg(self, msg): - ''' Process a Link Message to add/remove links on the SDT display. - Links are recorded in the remotes[nodenum1].links set for updating - the SDT display at a later time. - ''' + """ + Process a Link Message to add/remove links on the SDT display. + Links are recorded in the remotes[nodenum1].links set for updating + the SDT display at a later time. + + :param msg: link message to handle + :return: nothing + """ if not self.is_enabled(): return False - nodenum1 = msg.gettlv(coreapi.CORE_TLV_LINK_N1NUMBER) - nodenum2 = msg.gettlv(coreapi.CORE_TLV_LINK_N2NUMBER) - link_msg_type = msg.gettlv(coreapi.CORE_TLV_LINK_TYPE) + nodenum1 = msg.get_tlv(LinkTlvs.N1_NUMBER.value) + nodenum2 = msg.get_tlv(LinkTlvs.N2_NUMBER.value) + link_msg_type = msg.get_tlv(LinkTlvs.TYPE.value) # this filters out links to WLAN and EMANE nodes which are not drawn if self.wlancheck(nodenum1): return - wl = (link_msg_type == coreapi.CORE_LINK_WIRELESS) + wl = link_msg_type == LinkTypes.WIRELESS.value if nodenum1 in self.remotes: r = self.remotes[nodenum1] - if msg.flags & coreapi.CORE_API_DEL_FLAG: + if msg.flags & MessageFlags.DELETE.value: if (nodenum2, wl) in r.links: r.links.remove((nodenum2, wl)) else: @@ -348,18 +434,22 @@ class Sdt(object): self.updatelink(nodenum1, nodenum2, msg.flags, wireless=wl) def wlancheck(self, nodenum): - ''' Helper returns True if a node number corresponds to a WlanNode - or EmaneNode. - ''' + """ + Helper returns True if a node number corresponds to a WlanNode or EmaneNode. + + :param int nodenum: node id to check + :return: True if node is wlan or emane, False otherwise + :rtype: bool + """ if nodenum in self.remotes: type = self.remotes[nodenum].type if type in ("wlan", "emane"): return True else: try: - n = self.session.obj(nodenum) + n = self.session.get_object(nodenum) except KeyError: return False - if isinstance(n, (nodes.WlanNode, nodes.EmaneNode)): + if nodeutils.is_node(n, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): return True return False diff --git a/daemon/core/service.py b/daemon/core/service.py index e6d5ee44..92b735fe 100644 --- a/daemon/core/service.py +++ b/daemon/core/service.py @@ -1,154 +1,197 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' -service.py: definition of CoreService class that is subclassed to define +""" +Definition of CoreService class that is subclassed to define startup services and routing for nodes. A service is typically a daemon -program launched when a node starts that provides some sort of -service. The CoreServices class handles configuration messages for sending +program launched when a node starts that provides some sort of service. +The CoreServices class handles configuration messages for sending a list of available services to the GUI and for configuring individual services. -''' - -import sys, os, shlex -import imp +""" +import os +import shlex +import sys +import time from itertools import repeat + from core.api import coreapi -from core.conf import ConfigurableManager, Configurable -from core.misc.utils import maketuplefromstr, expandcorepath +from core.conf import Configurable +from core.conf import ConfigurableManager +from core.data import EventData, ConfigData +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import EventTypes +from core.enumerations import FileTlvs +from core.enumerations import MessageFlags +from core.enumerations import RegisterTlvs +from core.misc import log +from core.misc import utils -servicelist = [] +logger = log.get_logger(__name__) + + +class ServiceManager(object): + """ + Manages services available for CORE nodes to use. + """ + services = [] + + @classmethod + def add(cls, service): + """ + Add a service to manager. + + :param CoreService service: service to add + :return: nothing + """ + insert = 0 + for index, known_service in enumerate(cls.services): + if known_service._group == service._group: + insert = index + 1 + break + + logger.info("loading service: %s - %s: %s", insert, service, service._name) + cls.services.insert(insert, service) + + @classmethod + def get(cls, name): + """ + Retrieve a service from the manager. + + :param str name: name of the service to retrieve + :return: service if it exists, None otherwise + :rtype: CoreService + """ + for service in cls.services: + if service._name == name: + return service + return None -def addservice(service): - global servicelist - i = 0 - found = -1 - for s in servicelist: - if s._group == service._group: - found = i - elif (found >= 0): - # insert service into list next to existing group - i = found + 1 - break - i += 1 - servicelist.insert(i, service) class CoreServices(ConfigurableManager): - ''' Class for interacting with a list of available startup services for - nodes. Mostly used to convert a CoreService into a Config API - message. This class lives in the Session object and remembers - the default services configured for each node type, and any - custom service configuration. A CoreService is not a Configurable. - ''' - _name = "services" - _type = coreapi.CORE_TLV_REG_UTILITY + """ + Class for interacting with a list of available startup services for + nodes. Mostly used to convert a CoreService into a Config API + message. This class lives in the Session object and remembers + the default services configured for each node type, and any + custom service configuration. A CoreService is not a Configurable. + """ + name = "services" + config_type = RegisterTlvs.UTILITY.value - service_path = set() + _invalid_custom_names = ( + 'core', 'addons', 'api', 'bsd', 'emane', 'misc', 'netns', 'phys', 'services', 'xen' + ) def __init__(self, session): - ConfigurableManager.__init__(self, session) + """ + Creates a CoreServices instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + ConfigurableManager.__init__(self) + self.session = session # dict of default services tuples, key is node type self.defaultservices = {} # dict of tuple of service objects, key is node number self.customservices = {} - importcmd = "from core.services import *" - exec(importcmd) - paths = self.session.getcfgitem('custom_services_dir') + + paths = self.session.get_config_item('custom_services_dir') if paths: for path in paths.split(','): path = path.strip() self.importcustom(path) - self.isStartupService = startup.Startup.isStartupService + + # TODO: remove need for cyclic import + from core.services import startup + self.is_startup_service = startup.Startup.is_startup_service @classmethod def add_service_path(cls, path): cls.service_path.add(path) def importcustom(self, path): - ''' Import services from a myservices directory. - ''' - if not path or path in self.service_path: + """ + Import services from a myservices directory. + + :param str path: path to import custom services from + :return: nothing + """ + if not path or len(path) == 0: return + if not os.path.isdir(path): - self.session.warn("invalid custom service directory specified" \ - ": %s" % path) + logger.warn("invalid custom service directory specified" ": %s" % path) return - self.add_service_path(path) + try: parentdir, childdir = os.path.split(path) - f, pathname, description = imp.find_module(childdir, [parentdir]) - name = 'core.services.custom.' + childdir - if name in sys.modules: - i = 1 - while name + str(i) in sys.modules: - i += 1 - name += str(i) - m = imp.load_module(name, f, pathname, description) - if hasattr(m, '__all__'): - for x in m.__all__: - f, pathname, description = imp.find_module(x, [path]) - imp.load_module(name + '.' + x, f, pathname, description) - except Exception, e: - self.session.warn("error importing custom services from " \ - "%s:\n%s" % (path, e)) + if childdir in self._invalid_custom_names: + raise ValueError("use a unique custom services dir name, " "not '%s'" % childdir) + if parentdir not in sys.path: + sys.path.append(parentdir) + # TODO: remove use of this exec statement + statement = "from %s import *" % childdir + logger.info("custom import: %s", statement) + exec (statement) + except: + logger.exception("error importing custom services from %s", path) def reset(self): - ''' Called when config message with reset flag is received - ''' + """ + Called when config message with reset flag is received + """ self.defaultservices.clear() self.customservices.clear() - - def get(self): - ''' Get the list of available services. - ''' - global servicelist - return servicelist - - def getservicebyname(self, name): - ''' Get a service class from the global servicelist given its name. - Returns None when the name is not found. - ''' - global servicelist - for s in servicelist: - if s._name == name: - return s - return None - - def getdefaultservices(self, type): - ''' Get the list of default services that should be enabled for a - node for the given node type. - ''' - r = [] - if type in self.defaultservices: - defaults = self.defaultservices[type] + + def getdefaultservices(self, service_type): + """ + Get the list of default services that should be enabled for a + node for the given node type. + + :param service_type: service type to get default services for + :return: default services + :rtype: list + """ + logger.debug("getting default services for type: %s", service_type) + results = [] + if service_type in self.defaultservices: + defaults = self.defaultservices[service_type] for name in defaults: - s = self.getservicebyname(name) - if s is None: - self.session.warn("default service %s is unknown" % name) + logger.debug("checking for service with service manager: %s", name) + service = ServiceManager.get(name) + if not service: + logger.warn("default service %s is unknown", name) else: - r.append(s) - return r - - def getcustomservice(self, objid, service): - ''' Get any custom service configured for the given node that - matches the specified service name. If no custom service - is found, return the specified service. - ''' - if objid in self.customservices: - for s in self.customservices[objid]: + results.append(service) + return results + + def getcustomservice(self, object_id, service): + """ + Get any custom service configured for the given node that matches the specified service name. + If no custom service is found, return the specified service. + + :param int object_id: object id to get service from + :param CoreService service: custom service to retrieve + :return: custom service from the node + :rtype: CoreService + """ + if object_id in self.customservices: + for s in self.customservices[object_id]: if s._name == service._name: return s return service - def setcustomservice(self, objid, service, values): - ''' Store service customizations in an instantiated service object - using a list of values that came from a config message. - ''' + def setcustomservice(self, object_id, service, values): + """ + Store service customizations in an instantiated service object + using a list of values that came from a config message. + + :param int object_id: object id to set custom service for + :param class service: service to set + :param list values: values to + :return: + """ if service._custom: s = service else: @@ -163,361 +206,404 @@ class CoreServices(ConfigurableManager): # old-style config, list of values else: s.fromvaluelist(values) - + # assume custom service already in dict if service._custom: return # add the custom service to dict - if objid in self.customservices: - self.customservices[objid] += (s, ) + if object_id in self.customservices: + self.customservices[object_id] += (s,) else: - self.customservices[objid] = (s, ) + self.customservices[object_id] = (s,) - def addservicestonode(self, node, nodetype, services_str, verbose): - ''' Populate the node.service list using (1) the list of services - requested from the services TLV, (2) using any custom service - configuration, or (3) using the default services for this node type. - ''' + def addservicestonode(self, node, nodetype, services_str): + """ + Populate the node.service list using (1) the list of services + requested from the services TLV, (2) using any custom service + configuration, or (3) using the default services for this node type. + + :param core.coreobj.PyCoreNode node: node to add services to + :param str nodetype: node type to add services to + :param str services_str: string formatted service list + :return: nothing + """ if services_str is not None: - services = services_str.split('|') + services = services_str.split("|") for name in services: - s = self.getservicebyname(name) + s = ServiceManager.get(name) if s is None: - self.session.warn("configured service %s for node %s is " \ - "unknown" % (name, node.name)) + logger.warn("configured service %s for node %s is unknown", name, node.name) continue - if verbose: - self.session.info("adding configured service %s to " \ - "node %s" % (s._name, node.name)) + logger.info("adding configured service %s to node %s", s._name, node.name) s = self.getcustomservice(node.objid, s) node.addservice(s) else: services = self.getdefaultservices(nodetype) for s in services: - if verbose: - self.session.info("adding default service %s to node %s" % \ - (s._name, node.name)) + logger.info("adding default service %s to node %s", s._name, node.name) s = self.getcustomservice(node.objid, s) node.addservice(s) - def getallconfigs(self): - ''' Return (nodenum, service) tuples for all stored configs. - Used when reconnecting to a session or opening XML. - ''' - r = [] + def getallconfigs(self, use_clsmap=True): + """ + Return (nodenum, service) tuples for all stored configs. Used when reconnecting to a + session or opening XML. + + :param bool use_clsmap: should a class map be used, default to True + :return: list of tuples of node ids and services + :rtype: list + """ + configs = [] for nodenum in self.customservices: - for s in self.customservices[nodenum]: - r.append( (nodenum, s) ) - return r + for service in self.customservices[nodenum]: + configs.append((nodenum, service)) + return configs def getallfiles(self, service): - ''' Return all customized files stored with a service. + """ + Return all customized files stored with a service. Used when reconnecting to a session or opening XML. - ''' - r = [] + + :param CoreService service: service to get files for + :return: + """ + files = [] + if not service._custom: - return r + return files + for filename in service._configs: data = self.getservicefiledata(service, filename) if data is None: continue - r.append( (filename, data) ) - return r + files.append((filename, data)) + + return files def bootnodeservices(self, node): - ''' Start all services on a node. - ''' - services = sorted(node.services, - key=lambda service: service._startindex) - useStartupService = any(map(self.isStartupService, services)) + """ + Start all services on a node. + + :param core.netns.nodes.CoreNode node: node to start services on + :return: + """ + services = sorted(node.services, key=lambda service: service._startindex) + use_startup_service = any(map(self.is_startup_service, services)) for s in services: if len(str(s._starttime)) > 0: try: t = float(s._starttime) if t > 0.0: - fn = self.bootnodeservice - self.session.evq.add_event(t, fn, node, s, services, False) + fn = self.bootnodeservice + self.session.event_loop.add_event(t, fn, node, s, services, False) continue except ValueError: - pass - self.bootnodeservice(node, s, services, useStartupService) - - def bootnodeservice(self, node, s, services, useStartupService): - ''' Start a service on a node. Create private dirs, generate config - files, and execute startup commands. - ''' - if s._custom: - self.bootnodecustomservice(node, s, services, useStartupService) + logger.exception("error converting start time to float") + self.bootnodeservice(node, s, services, use_startup_service) + + def bootnodeservice(self, node, service, services, use_startup_service): + """ + Start a service on a node. Create private dirs, generate config + files, and execute startup commands. + + :param core.netns.nodes.CoreNode node: node to boot services on + :param CoreService service: service to start + :param list services: service list + :param bool use_startup_service: flag to use startup services or not + :return: nothing + """ + if service._custom: + self.bootnodecustomservice(node, service, services, use_startup_service) return - if node.verbose: - node.info("starting service %s (%s)" % (s._name, s._startindex)) - for d in s._dirs: + + logger.info("starting service %s (%s)" % (service._name, service._startindex)) + for directory in service._dirs: try: - node.privatedir(d) - except Exception, e: - node.warn("Error making node %s dir %s: %s" % \ - (node.name, d, e)) - for filename in s.getconfigfilenames(node.objid, services): - cfg = s.generateconfig(node, filename, services) - node.nodefile(filename, cfg) - if useStartupService and not self.isStartupService(s): + node.privatedir(directory) + except: + logger.exception("Error making node %s dir %s", node.name, directory) + + for filename in service.getconfigfilenames(node.objid, services): + cfg = service.generateconfig(node, filename, services) + node.nodefile(filename, cfg) + + if use_startup_service and not self.is_startup_service(service): return - for cmd in s.getstartup(node, services): + + for cmd in service.getstartup(node, services): try: # NOTE: this wait=False can be problematic! - node.cmd(shlex.split(cmd), wait = False) - except Exception, e: - node.warn("error starting command %s: %s" % (cmd, e)) + node.cmd(shlex.split(cmd), wait=False) + except: + logger.exception("error starting command %s", cmd) - def bootnodecustomservice(self, node, s, services, useStartupService): - ''' Start a custom service on a node. Create private dirs, use supplied - config files, and execute supplied startup commands. - ''' - if node.verbose: - node.info("starting service %s (%s)(custom)" % (s._name, s._startindex)) - for d in s._dirs: + def bootnodecustomservice(self, node, service, services, use_startup_service): + """ + Start a custom service on a node. Create private dirs, use supplied + config files, and execute supplied startup commands. + + :param core.netns.nodes.CoreNode node: node to boot services on + :param CoreService service: service to start + :param list services: service list + :param bool use_startup_service: flag to use startup services or not + :return: nothing + """ + logger.info("starting service %s (%s)(custom)" % (service._name, service._startindex)) + for directory in service._dirs: try: - node.privatedir(d) - except Exception, e: - node.warn("Error making node %s dir %s: %s" % \ - (node.name, d, e)) - for i, filename in enumerate(s._configs): + node.privatedir(directory) + except: + logger.exception("Error making node %s dir %s", node.name, directory) + + for i, filename in enumerate(service._configs): if len(filename) == 0: continue - cfg = self.getservicefiledata(s, filename) + cfg = self.getservicefiledata(service, filename) if cfg is None: - cfg = s.generateconfig(node, filename, services) + cfg = service.generateconfig(node, filename, services) # cfg may have a file:/// url for copying from a file try: if self.copyservicefile(node, filename, cfg): continue - except IOError, e: - node.warn("Error copying service file %s" % filename) - node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "service:%s" % s._name, - "error copying service file '%s': %s" % (filename, e)) + except IOError: + logger.exception("error copying service file '%s'", filename) continue - node.nodefile(filename, cfg) - - if useStartupService and not self.isStartupService(s): + node.nodefile(filename, cfg) + + if use_startup_service and not self.is_startup_service(service): return - for cmd in s._startup: + for cmd in service._startup: try: # NOTE: this wait=False can be problematic! - node.cmd(shlex.split(cmd), wait = False) - except Exception, e: - node.warn("error starting command %s: %s" % (cmd, e)) - + node.cmd(shlex.split(cmd), wait=False) + except: + logger.exception("error starting command %s", cmd) + def copyservicefile(self, node, filename, cfg): - ''' Given a configured service filename and config, determine if the + """ + Given a configured service filename and config, determine if the config references an existing file that should be copied. Returns True for local files, False for generated. - ''' + + :param core.netns.nodes.CoreNode node: node to copy service for + :param str filename: file name for a configured service + :param str cfg: configuration string + :return: True if successful, False otherwise + :rtype: bool + """ if cfg[:7] == 'file://': src = cfg[7:] src = src.split('\n')[0] - src = expandcorepath(src, node.session, node) + src = utils.expandcorepath(src, node.session, node) # TODO: glob here - node.nodefilecopy(filename, src, mode = 0644) + node.nodefilecopy(filename, src, mode=0644) return True return False - def validatenodeservices(self, node): - ''' Run validation commands for all services on a node. - ''' - services = sorted(node.services, - key=lambda service: service._startindex) + """ + Run validation commands for all services on a node. + + :param core.netns.nodes.CoreNode node: node to validate services for + :return: nothing + """ + services = sorted(node.services, key=lambda service: service._startindex) for s in services: self.validatenodeservice(node, s, services) - def validatenodeservice(self, node, s, services): - ''' Run the validation command(s) for a service. - ''' - if node.verbose: - node.info("validating service %s (%s)" % (s._name, s._startindex)) - if s._custom: - validate_cmds = s._validate - else: - validate_cmds = s.getvalidate(node, services) - if len(validate_cmds) == 0: - # doesn't have a validate command - status = 0 + def validatenodeservice(self, node, service, services): + """ + Run the validation command(s) for a service. + + :param core.netns.nodes.CoreNode node: node to validate service for + :param CoreService service: service to validate + :param list services: services for node + :return: service validation status + :rtype: int + """ + logger.info("validating service for node (%s - %s): %s (%s)", + node.objid, node.name, service._name, service._startindex) + if service._custom: + validate_cmds = service._validate else: + validate_cmds = service.getvalidate(node, services) + + status = 0 + # has validate commands + if len(validate_cmds) > 0: for cmd in validate_cmds: - if node.verbose: - node.info("validating service %s using: %s" % (s._name, cmd)) + logger.info("validating service %s using: %s", service._name, cmd) try: - (status, result) = node.cmdresult(shlex.split(cmd)) + status, result = node.cmdresult(shlex.split(cmd)) if status != 0: - raise ValueError, "non-zero exit status" + raise ValueError("non-zero exit status") except: - node.warn("validation command '%s' failed" % cmd) - node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "service:%s" % s._name, - "validate command failed: %s" % cmd) + logger.exception("validate command failed: %s", cmd) status = -1 + return status - + def stopnodeservices(self, node): - ''' Stop all services on a node. - ''' - services = sorted(node.services, - key=lambda service: service._startindex) + """ + Stop all services on a node. + + :param core.netns.nodes.CoreNode node: node to stop services on + :return: nothing + """ + services = sorted(node.services, key=lambda service: service._startindex) for s in services: self.stopnodeservice(node, s) - - def stopnodeservice(self, node, s): - ''' Stop a service on a node. - ''' + + def stopnodeservice(self, node, service): + """ + Stop a service on a node. + + :param core.netns.nodes.CoreNode node: node to stop a service on + :param CoreService service: service to stop + :return: status for stopping the services + :rtype: str + """ status = "" - if len(s._shutdown) == 0: + if len(service._shutdown) == 0: # doesn't have a shutdown command status += "0" else: - for cmd in s._shutdown: + for cmd in service._shutdown: try: - tmp = node.cmd(shlex.split(cmd), wait = True) - status += "%s" % (tmp) + tmp = node.cmd(shlex.split(cmd), wait=True) + status += "%s" % tmp except: - node.warn("error running stop command %s" % cmd) + logger.exception("error running stop command %s", cmd) status += "-1" return status + def configure_request(self, config_data): + """ + Receive configuration message for configuring services. + With a request flag set, a list of services has been requested. + When the opaque field is present, a specific service is being + configured or requested. - def configure_request(self, msg): - ''' Receive configuration message for configuring services. - With a request flag set, a list of services has been requested. - When the opaque field is present, a specific service is being - configured or requested. - ''' - objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) - conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE) - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) - sessionnum = msg.gettlv(coreapi.CORE_TLV_CONF_SESSION) - opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE) + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: response messages + :rtype: ConfigData + """ + node_id = config_data.node + session_id = config_data.session + opaque = config_data.opaque # send back a list of available services if opaque is None: - global servicelist - tf = coreapi.CONF_TYPE_FLAGS_NONE - datatypes = tuple(repeat(coreapi.CONF_DATA_TYPE_BOOL, - len(servicelist))) - vals = "|".join(repeat('0', len(servicelist))) - names = map(lambda x: x._name, servicelist) + type_flag = ConfigFlags.NONE.value + data_types = tuple(repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services))) + values = "|".join(repeat('0', len(ServiceManager.services))) + names = map(lambda x: x._name, ServiceManager.services) captions = "|".join(names) - possiblevals = "" - for s in servicelist: + possible_values = "" + for s in ServiceManager.services: if s._custom_needed: - possiblevals += '1' - possiblevals += '|' - groups = self.buildgroups(servicelist) + possible_values += '1' + possible_values += '|' + groups = self.buildgroups(ServiceManager.services) # send back the properties for this service else: - if nodenum is None: + if node_id is None: return None - n = self.session.obj(nodenum) + n = self.session.get_object(node_id) if n is None: - self.session.warn("Request to configure service %s for " \ - "unknown node %s" % (svc._name, nodenum)) + logger.warn("Request to configure service for unknown node %s", node_id) return None servicesstring = opaque.split(':') - services,unknown = self.servicesfromopaque(opaque, n.objid) + services, unknown = self.servicesfromopaque(opaque, n.objid) for u in unknown: - self.session.warn("Request for unknown service '%s'" % u) + logger.warn("Request for unknown service '%s'" % u) if len(services) < 1: return None if len(servicesstring) == 3: # a file request: e.g. "service:zebra:quagga.conf" return self.getservicefile(services, n, servicesstring[2]) - + # the first service in the list is the one being configured svc = services[0] # send back: # dirs, configs, startindex, startup, shutdown, metadata, config - tf = coreapi.CONF_TYPE_FLAGS_UPDATE - datatypes = tuple(repeat(coreapi.CONF_DATA_TYPE_STRING, - len(svc.keys))) - vals = svc.tovaluelist(n, services) + type_flag = ConfigFlags.UPDATE.value + data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(svc.keys))) + values = svc.tovaluelist(n, services) captions = None - possiblevals = None + possible_values = None groups = None - - tlvdata = "" - if nodenum is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, - nodenum) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, - self._name) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, tf) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES, - datatypes) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES, - vals) - if captions: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS, - captions) - if possiblevals: - tlvdata += coreapi.CoreConfTlv.pack( - coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals) - if groups: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS, - groups) - if sessionnum is not None: - tlvdata += coreapi.CoreConfTlv.pack( - coreapi.CORE_TLV_CONF_SESSION, sessionnum) - if opaque: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE, - opaque) - return coreapi.CoreConfMessage.pack(0, tlvdata) + return ConfigData( + message_type=0, + node=node_id, + object=self.name, + type=type_flag, + data_types=data_types, + data_values=values, + captions=captions, + possible_values=possible_values, + groups=groups, + session=session_id, + opaque=opaque + ) - def configure_values(self, msg, values): - ''' Receive configuration message for configuring services. - With a request flag set, a list of services has been requested. - When the opaque field is present, a specific service is being - configured or requested. - ''' - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) - opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE) - - errmsg = "services config message that I don't know how to handle" + def configure_values(self, config_data): + """ + Receive configuration message for configuring services. + With a request flag set, a list of services has been requested. + When the opaque field is present, a specific service is being + configured or requested. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: None + """ + data_types = config_data.data_types + values = config_data.data_values + node_id = config_data.node + opaque = config_data.opaque + + error_message = "services config message that I don't know how to handle" if values is None: - self.session.info(errmsg) + logger.error(error_message) return None else: values = values.split('|') if opaque is None: # store default services for a node type in self.defaultservices[] - data_types = msg.gettlv(coreapi.CORE_TLV_CONF_DATA_TYPES) - if values is None or data_types is None or \ - data_types[0] != coreapi.CONF_DATA_TYPE_STRING: - self.session.info(errmsg) + if data_types is None or data_types[0] != ConfigDataTypes.STRING.value: + logger.info(error_message) return None key = values.pop(0) self.defaultservices[key] = values - self.session.info("default services for type %s set to %s" % \ - (key, values)) + logger.info("default services for type %s set to %s" % (key, values)) else: # store service customized config in self.customservices[] - if nodenum is None: + if node_id is None: return None - services,unknown = self.servicesfromopaque(opaque, nodenum) + services, unknown = self.servicesfromopaque(opaque, node_id) for u in unknown: - self.session.warn("Request for unknown service '%s'" % u) + logger.warn("Request for unknown service '%s'" % u) if len(services) < 1: return None svc = services[0] - self.setcustomservice(nodenum, svc, values) + self.setcustomservice(node_id, svc, values) + return None - def servicesfromopaque(self, opaque, objid): - ''' Build a list of services from an opaque data string. - ''' + def servicesfromopaque(self, opaque, object_id): + """ + Build a list of services from an opaque data string. + + :param str opaque: opaque data string + :param int object_id: object id + :return: services and unknown services lists tuple + :rtype: tuple + """ services = [] unknown = [] servicesstring = opaque.split(':') @@ -525,21 +611,26 @@ class CoreServices(ConfigurableManager): return [] servicenames = servicesstring[1].split(',') for name in servicenames: - s = self.getservicebyname(name) - s = self.getcustomservice(objid, s) + s = ServiceManager.get(name) + s = self.getcustomservice(object_id, s) if s is None: unknown.append(name) else: services.append(s) - return services,unknown + return services, unknown - def buildgroups(self, servicelist): - ''' Build a string of groups for use in a configuration message given - a list of services. The group list string has the format - "title1:1-5|title2:6-9|10-12", where title is an optional group title - and i-j is a numeric range of value indices; groups are - separated by commas. - ''' + def buildgroups(self, servicelist): + """ + Build a string of groups for use in a configuration message given + a list of services. The group list string has the format + "title1:1-5|title2:6-9|10-12", where title is an optional group title + and i-j is a numeric range of value indices; groups are + separated by commas. + + :param list servicelist: service list to build group string from + :return: groups string + :rtype: str + """ i = 0 r = "" lastgroup = "" @@ -550,32 +641,38 @@ class CoreServices(ConfigurableManager): lastgroup = group # finish previous group if i > 1: - r += "-%d|" % (i -1) + r += "-%d|" % (i - 1) # optionally include group title if group == "": r += "%d" % i else: - r += "%s:%d" % (group, i) + r += "%s:%d" % (group, i) # finish the last group list if i > 0: r += "-%d" % i return r - + + # TODO: need to remove depenency on old message structure below def getservicefile(self, services, node, filename): - ''' Send a File Message when the GUI has requested a service file. + """ + Send a File Message when the GUI has requested a service file. The file data is either auto-generated or comes from an existing config. - ''' + + :param list services: service list + :param core.netns.nodes.CoreNode node: node to get service file from + :param str filename: file name to retrieve + :return: file message for node + """ svc = services[0] # get the filename and determine the config file index if svc._custom: cfgfiles = svc._configs else: - cfgfiles = svc.getconfigfilenames(node.objid, services) + cfgfiles = svc.getconfigfilenames(node.objid, services) if filename not in cfgfiles: - self.session.warn("Request for unknown file '%s' for service '%s'" \ - % (filename, services[0])) + logger.warn("Request for unknown file '%s' for service '%s'" % (filename, services[0])) return None - + # get the file data data = self.getservicefiledata(svc, filename) if data is None: @@ -583,20 +680,25 @@ class CoreServices(ConfigurableManager): else: data = "%s" % data filetypestr = "service:%s" % svc._name - + # send a file message - flags = coreapi.CORE_API_ADD_FLAG - tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE, node.objid) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME, filename) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE, filetypestr) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA, data) + flags = MessageFlags.ADD.value + tlvdata = coreapi.CoreFileTlv.pack(FileTlvs.NODE.value, node.objid) + tlvdata += coreapi.CoreFileTlv.pack(FileTlvs.NAME.value, filename) + tlvdata += coreapi.CoreFileTlv.pack(FileTlvs.TYPE.value, filetypestr) + tlvdata += coreapi.CoreFileTlv.pack(FileTlvs.FILE_DATA.value, data) reply = coreapi.CoreFileMessage.pack(flags, tlvdata) return reply - + def getservicefiledata(self, service, filename): - ''' Get the customized file data associated with a service. Return None + """ + Get the customized file data associated with a service. Return None for invalid filenames or missing file data. - ''' + + :param CoreService service: service to get file data from + :param str filename: file name to get data from + :return: file data + """ try: i = service._configs.index(filename) except ValueError: @@ -604,63 +706,71 @@ class CoreServices(ConfigurableManager): if i >= len(service._configtxt) or service._configtxt[i] is None: return None return service._configtxt[i] - + def setservicefile(self, nodenum, type, filename, srcname, data): - ''' Receive a File Message from the GUI and store the customized file + """ + Receive a File Message from the GUI and store the customized file in the service config. The filename must match one from the list of config files in the service. - ''' + + :param int nodenum: node id to set service file + :param str type: file type to set + :param str filename: file name to set + :param str srcname: source name of file to set + :param data: data for file to set + :return: nothing + """ if len(type.split(':')) < 2: - self.session.warn("Received file type did not contain service info.") + logger.warn("Received file type did not contain service info.") return if srcname is not None: raise NotImplementedError - (svcid, svcname) = type.split(':')[:2] - svc = self.getservicebyname(svcname) + svcid, svcname = type.split(':')[:2] + svc = ServiceManager.get(svcname) svc = self.getcustomservice(nodenum, svc) if svc is None: - self.session.warn("Received filename for unknown service '%s'" % \ - svcname) + logger.warn("Received filename for unknown service '%s'" % svcname) return cfgfiles = svc._configs if filename not in cfgfiles: - self.session.warn("Received unknown file '%s' for service '%s'" \ - % (filename, svcname)) + logger.warn("Received unknown file '%s' for service '%s'" % (filename, svcname)) return i = cfgfiles.index(filename) configtxtlist = list(svc._configtxt) numitems = len(configtxtlist) - if numitems < i+1: + if numitems < i + 1: # add empty elements to list to support index assignment for j in range(1, (i + 2) - numitems): configtxtlist += None, configtxtlist[i] = data svc._configtxt = configtxtlist - - def handleevent(self, msg): - ''' Handle an Event Message used to start, stop, restart, or validate - a service on a given node. - ''' - eventtype = msg.gettlv(coreapi.CORE_TLV_EVENT_TYPE) - nodenum = msg.gettlv(coreapi.CORE_TLV_EVENT_NODE) - name = msg.gettlv(coreapi.CORE_TLV_EVENT_NAME) + + def handleevent(self, event_data): + """ + Handle an Event Message used to start, stop, restart, or validate + a service on a given node. + + :param EventData event_data: event data to handle + :return: nothing + """ + event_type = event_data.event_type + node_id = event_data.node + name = event_data.name + try: - node = self.session.obj(nodenum) + node = self.session.get_object(node_id) except KeyError: - self.session.warn("Ignoring event for service '%s', unknown node " \ - "'%s'" % (name, nodenum)) + logger.warn("Ignoring event for service '%s', unknown node '%s'", name, node_id) return - + fail = "" - services,unknown = self.servicesfromopaque(name, nodenum) + services, unknown = self.servicesfromopaque(name, node_id) for s in services: - if eventtype == coreapi.CORE_EVENT_STOP or \ - eventtype == coreapi.CORE_EVENT_RESTART: + if event_type == EventTypes.STOP.value or event_type == EventTypes.RESTART.value: status = self.stopnodeservice(node, s) if status != "0": - fail += "Stop %s," % (s._name) - if eventtype == coreapi.CORE_EVENT_START or \ - eventtype == coreapi.CORE_EVENT_RESTART: + fail += "Stop %s," % s._name + if event_type == EventTypes.START.value or event_type == EventTypes.RESTART.value: if s._custom: cmds = s._startup else: @@ -668,22 +778,22 @@ class CoreServices(ConfigurableManager): if len(cmds) > 0: for cmd in cmds: try: - #node.cmd(shlex.split(cmd), wait = False) - status = node.cmd(shlex.split(cmd), wait = True) + # node.cmd(shlex.split(cmd), wait = False) + status = node.cmd(shlex.split(cmd), wait=True) if status != 0: fail += "Start %s(%s)," % (s._name, cmd) except: - node.warn("error starting command %s" % cmd) - fail += "Start %s," % (s._name) - if eventtype == coreapi.CORE_EVENT_PAUSE: + logger.exception("error starting command %s", cmd) + fail += "Start %s," % s._name + if event_type == EventTypes.PAUSE.value: status = self.validatenodeservice(node, s, services) if status != 0: - fail += "%s," % (s._name) - if eventtype == coreapi.CORE_EVENT_RECONFIGURE: + fail += "%s," % s._name + if event_type == EventTypes.RECONFIGURE.value: if s._custom: cfgfiles = s._configs else: - cfgfiles = s.getconfigfilenames(node.objid, services) + cfgfiles = s.getconfigfilenames(node.objid, services) if len(cfgfiles) > 0: for filename in cfgfiles: if filename[:7] == "file:///": @@ -694,49 +804,45 @@ class CoreServices(ConfigurableManager): try: node.nodefile(filename, cfg) except: - self.warn("error in configure file: %s" % filename) - fail += "%s," % (s._name) + logger.exception("error in configure file: %s", filename) + fail += "%s," % s._name - fdata = "" + fail_data = "" if len(fail) > 0: - fdata += "Fail:" + fail - udata = "" - num = len(unknown) + fail_data += "Fail:" + fail + unknown_data = "" + num = len(unknown) if num > 0: for u in unknown: - udata += u + unknown_data += u if num > 1: - udata += ", " + unknown_data += ", " num -= 1 - self.session.warn("Event requested for unknown service(s): %s" % udata); - udata = "Unknown:" + udata + logger.warn("Event requested for unknown service(s): %s", unknown_data) + unknown_data = "Unknown:" + unknown_data + + event_data = EventData( + node=node_id, + event_type=event_type, + name=name, + data=fail_data + ";" + unknown_data, + time="%s" % time.time() + ) + + self.session.broadcast_event(event_data) - tlvdata = "" - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NODE, - nodenum) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - eventtype) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_NAME, - name) - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_DATA, - fdata + ";" + udata) - msg = coreapi.CoreEventMessage.pack(0, tlvdata) - try: - self.session.broadcastraw(None, msg) - except Exception, e: - self.warn("Error sending Event Message: %s" % e) - class CoreService(object): - ''' Parent class used for defining services. - ''' + """ + Parent class used for defining services. + """ # service name should not include spaces _name = "" # group string allows grouping services together _group = "" # list name(s) of services that this service depends upon _depends = () - keys = ["dirs","files","startidx","cmdup","cmddown","cmdval","meta","starttime"] + keys = ["dirs", "files", "startidx", "cmdup", "cmddown", "cmdval", "meta", "starttime"] # private, per-node directories required by this service _dirs = () # config files written by this service @@ -759,77 +865,119 @@ class CoreService(object): _custom_needed = False def __init__(self): - ''' Services are not necessarily instantiated. Classmethods may be used - against their config. Services are instantiated when a custom - configuration is used to override their default parameters. - ''' + """ + Services are not necessarily instantiated. Classmethods may be used + against their config. Services are instantiated when a custom + configuration is used to override their default parameters. + """ self._custom = True - + @classmethod - def getconfigfilenames(cls, nodenum, services): - ''' Return the tuple of configuration file filenames. This default method - returns the cls._configs tuple, but this method may be overriden to - provide node-specific filenames that may be based on other services. - ''' + def getconfigfilenames(cls, nodenum, services): + """ + Return the tuple of configuration file filenames. This default method + returns the cls._configs tuple, but this method may be overriden to + provide node-specific filenames that may be based on other services. + + :param int nodenum: node id to get config file names for + :param list services: node services + :return: class configuration files + :rtype: tuple + """ return cls._configs - + @classmethod - def generateconfig(cls, node, filename, services): - ''' Generate configuration file given a node object. The filename is - provided to allow for multiple config files. The other services are - provided to allow interdependencies (e.g. zebra and OSPF). - Return the configuration string to be written to a file or sent - to the GUI for customization. - ''' + def generateconfig(cls, node, filename, services): + """ + Generate configuration file given a node object. The filename is + provided to allow for multiple config files. The other services are + provided to allow interdependencies (e.g. zebra and OSPF). + Return the configuration string to be written to a file or sent + to the GUI for customization. + + :param core.netns.nodes.CoreNode node: node to generate config for + :param str filename: file name to generate config for + :param list services: services for node + :return: nothing + """ raise NotImplementedError - + @classmethod - def getstartup(cls, node, services): - ''' Return the tuple of startup commands. This default method - returns the cls._startup tuple, but this method may be - overriden to provide node-specific commands that may be - based on other services. - ''' + def getstartup(cls, node, services): + """ + Return the tuple of startup commands. This default method + returns the cls._startup tuple, but this method may be + overridden to provide node-specific commands that may be + based on other services. + + :param core.netns.nodes.CoreNode node: node to get startup for + :param list services: services for node + :return: startup commands + :rtype: tuple + """ return cls._startup @classmethod - def getvalidate(cls, node, services): - ''' Return the tuple of validate commands. This default method - returns the cls._validate tuple, but this method may be - overriden to provide node-specific commands that may be - based on other services. - ''' + def getvalidate(cls, node, services): + """ + Return the tuple of validate commands. This default method + returns the cls._validate tuple, but this method may be + overriden to provide node-specific commands that may be + based on other services. + + :param core.netns.nodes.CoreNode node: node to validate + :param list services: services for node + :return: validation commands + :rtype: tuple + """ return cls._validate - + @classmethod def tovaluelist(cls, node, services): - ''' Convert service properties into a string list of key=value pairs, - separated by "|". - ''' - valmap = [cls._dirs, cls._configs, cls._startindex, cls._startup, + """ + Convert service properties into a string list of key=value pairs, + separated by "|". + + :param core.netns.nodes.CoreNode node: node to get value list for + :param list services: services for node + :return: value list string + :rtype: str + """ + valmap = [cls._dirs, cls._configs, cls._startindex, cls._startup, cls._shutdown, cls._validate, cls._meta, cls._starttime] if not cls._custom: # this is always reached due to classmethod valmap[valmap.index(cls._configs)] = \ cls.getconfigfilenames(node.objid, services) valmap[valmap.index(cls._startup)] = \ - cls.getstartup(node, services) - vals = map( lambda a,b: "%s=%s" % (a, str(b)), cls.keys, valmap) + cls.getstartup(node, services) + vals = map(lambda a, b: "%s=%s" % (a, str(b)), cls.keys, valmap) return "|".join(vals) def fromvaluelist(self, values): - ''' Convert list of values into properties for this instantiated - (customized) service. - ''' + """ + Convert list of values into properties for this instantiated + (customized) service. + + :param list values: value list to set properties from + :return: nothing + """ # TODO: support empty value? e.g. override default meta with '' for key in self.keys: try: self.setvalue(key, values[self.keys.index(key)]) except IndexError: # old config does not need to have new keys - pass + logger.exception("error indexing into key") def setvalue(self, key, value): + """ + Set values for this service. + + :param str key: key to set value for + :param value: value of key to set + :return: nothing + """ if key not in self.keys: raise ValueError('key `%s` not in `%s`' % (key, self.keys)) # this handles data conversion to int, string, and tuples @@ -839,8 +987,8 @@ class CoreService(object): elif key == "meta": value = str(value) else: - value = maketuplefromstr(value, str) - + value = utils.maketuplefromstr(value, str) + if key == "dirs": self._dirs = value elif key == "files": diff --git a/daemon/core/services/__init__.py b/daemon/core/services/__init__.py index b1117fe3..46d5755d 100644 --- a/daemon/core/services/__init__.py +++ b/daemon/core/services/__init__.py @@ -1,6 +1,6 @@ -"""Services +""" +Services Services available to nodes can be put in this directory. Everything listed in __all__ is automatically loaded by the main core module. """ -__all__ = ["quagga", "nrl", "xorp", "bird", "utility", "security", "ucarp", "dockersvc", 'startup'] diff --git a/daemon/core/services/bird.py b/daemon/core/services/bird.py index 3c1a41f5..c24fe341 100644 --- a/daemon/core/services/bird.py +++ b/daemon/core/services/bird.py @@ -1,24 +1,15 @@ -# -# CORE -# Copyright (c)2012 Jean-Tiare Le Bigot. -# See the LICENSE file included in this distribution. -# -# authors: Jean-Tiare Le Bigot -# Jeff Ahrenholz -# -''' +""" bird.py: defines routing services provided by the BIRD Internet Routing Daemon. -''' +""" -import os +from core.service import CoreService +from core.service import ServiceManager -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix -from core.constants import * class Bird(CoreService): - ''' Bird router support - ''' + """ + Bird router support + """ _name = "bird" _group = "BIRD" _depends = () @@ -26,13 +17,14 @@ class Bird(CoreService): _configs = ("/etc/bird/bird.conf",) _startindex = 35 _startup = ("bird -c %s" % (_configs[0]),) - _shutdown = ("killall bird", ) - _validate = ("pidof bird", ) + _shutdown = ("killall bird",) + _validate = ("pidof bird",) @classmethod def generateconfig(cls, node, filename, services): - ''' Return the bird.conf file contents. - ''' + """ + Return the bird.conf file contents. + """ if filename == cls._configs[0]: return cls.generateBirdConf(node, services) else: @@ -40,28 +32,30 @@ class Bird(CoreService): @staticmethod def routerid(node): - ''' Helper to return the first IPv4 address of a node as its router ID. - ''' + """ + Helper to return the first IPv4 address of a node as its router ID. + """ for ifc in node.netifs(): if hasattr(ifc, 'control') and ifc.control == True: continue for a in ifc.addrlist: if a.find(".") >= 0: - return a .split('/') [0] - #raise ValueError, "no IPv4 address found for router ID" + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" return "0.0.0.0" @classmethod def generateBirdConf(cls, node, services): - ''' Returns configuration file text. Other services that depend on bird - will have generatebirdifcconfig() and generatebirdconfig() - hooks that are invoked here. - ''' - cfg = """\ + """ + Returns configuration file text. Other services that depend on bird + will have generatebirdifcconfig() and generatebirdconfig() + hooks that are invoked here. + """ + cfg = """\ /* Main configuration file for BIRD. This is ony a template, * you will *need* to customize it according to your needs * Beware that only double quotes \'"\' are valid. No singles. */ - + log "/var/log/%s.log" all; #debug protocols all; @@ -90,14 +84,16 @@ protocol device { return cfg + class BirdService(CoreService): - ''' Parent class for Bird services. Defines properties and methods + """ + Parent class for Bird services. Defines properties and methods common to Bird's routing daemons. - ''' + """ _name = "BirdDaemon" _group = "BIRD" - _depends = ("bird", ) + _depends = ("bird",) _dirs = () _configs = () _startindex = 40 @@ -106,7 +102,7 @@ class BirdService(CoreService): _meta = "The config file for this service can be found in the bird service." @classmethod - def generatebirdconfig(cls, node): + def generatebirdconfig(cls, node): return "" @classmethod @@ -118,20 +114,23 @@ class BirdService(CoreService): cfg = "" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: continue - cfg += ' interface "%s";\n'% ifc.name + if hasattr(ifc, 'control') and ifc.control == True: + continue + cfg += ' interface "%s";\n' % ifc.name return cfg class BirdBgp(BirdService): - '''BGP BIRD Service (configuration generation)''' + """ + BGP BIRD Service (configuration generation) + """ _name = "BIRD_BGP" _custom_needed = True @classmethod - def generatebirdconfig(cls, node): + def generatebirdconfig(cls, node): return """ /* This is a sample config that should be customized with appropriate AS numbers * and peers; add one section like this for each neighbor */ @@ -152,13 +151,16 @@ protocol bgp { """ + class BirdOspf(BirdService): - '''OSPF BIRD Service (configuration generation)''' + """ + OSPF BIRD Service (configuration generation) + """ _name = "BIRD_OSPFv2" @classmethod - def generatebirdconfig(cls, node): + def generatebirdconfig(cls, node): cfg = 'protocol ospf {\n' cfg += ' export filter {\n' cfg += ' if source = RTS_BGP then {\n' @@ -168,7 +170,7 @@ class BirdOspf(BirdService): cfg += ' accept;\n' cfg += ' };\n' cfg += ' area 0.0.0.0 {\n' - cfg += cls.generatebirdifcconfig(node) + cfg += cls.generatebirdifcconfig(node) cfg += ' };\n' cfg += '}\n\n' @@ -176,17 +178,19 @@ class BirdOspf(BirdService): class BirdRadv(BirdService): - '''RADV BIRD Service (configuration generation)''' + """ + RADV BIRD Service (configuration generation) + """ _name = "BIRD_RADV" @classmethod - def generatebirdconfig(cls, node): - cfg = '/* This is a sample config that must be customized */\n' + def generatebirdconfig(cls, node): + cfg = '/* This is a sample config that must be customized */\n' cfg += 'protocol radv {\n' cfg += ' # auto configuration on all interfaces\n' - cfg += cls.generatebirdifcconfig(node) + cfg += cls.generatebirdifcconfig(node) cfg += ' # Advertise DNS\n' cfg += ' rdnss {\n' cfg += '# lifetime mult 10;\n' @@ -202,16 +206,18 @@ class BirdRadv(BirdService): class BirdRip(BirdService): - '''RIP BIRD Service (configuration generation)''' + """ + RIP BIRD Service (configuration generation) + """ _name = "BIRD_RIP" @classmethod - def generatebirdconfig(cls, node): + def generatebirdconfig(cls, node): cfg = 'protocol rip {\n' cfg += ' period 10;\n' cfg += ' garbage time 60;\n' - cfg += cls.generatebirdifcconfig(node) + cfg += cls.generatebirdifcconfig(node) cfg += ' honor neighbor;\n' cfg += ' authentication none;\n' cfg += ' import all;\n' @@ -222,13 +228,15 @@ class BirdRip(BirdService): class BirdStatic(BirdService): - '''Static Bird Service (configuration generation)''' + """ + Static Bird Service (configuration generation) + """ _name = "BIRD_static" _custom_needed = True @classmethod - def generatebirdconfig(cls, node): + def generatebirdconfig(cls, node): cfg = '/* This is a sample config that must be customized */\n' cfg += 'protocol static {\n' @@ -240,10 +248,11 @@ class BirdStatic(BirdService): return cfg -# Register all protocols -addservice(Bird) -addservice(BirdOspf) -addservice(BirdBgp) -#addservice(BirdRadv) # untested -addservice(BirdRip) -addservice(BirdStatic) +def load_services(): + # Register all protocols + ServiceManager.add(Bird) + ServiceManager.add(BirdOspf) + ServiceManager.add(BirdBgp) + # ServiceManager.add(BirdRadv) # untested + ServiceManager.add(BirdRip) + ServiceManager.add(BirdStatic) diff --git a/daemon/core/services/dockersvc.py b/daemon/core/services/dockersvc.py index 78fb1262..2f682e26 100644 --- a/daemon/core/services/dockersvc.py +++ b/daemon/core/services/dockersvc.py @@ -1,12 +1,5 @@ -# -# CORE -# Copyright (c)2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Stuart Marsden -# Jeff Ahrenholz -# -''' Docker service allows running docker containers within CORE nodes. +""" +Docker service allows running docker containers within CORE nodes. The running of Docker within a CORE node allows for additional extensibility to the CORE services. This allows network applications and protocols to be easily @@ -20,7 +13,7 @@ service to the Docker group. The image will then be auto run if that service is selected. This requires a recent version of Docker. This was tested using a PPA on Ubuntu - with version 1.2.0. The version in the standard Ubuntu repo is to old for + with version 1.2.0. The version in the standard Ubuntu repo is to old for this purpose (we need --net host). It also requires docker-py (https://pypi.python.org/pypi/docker-py) which can be @@ -47,13 +40,13 @@ The id will be different on your machine so use it in the following command: sudo docker tag 4833487e66d2 stuartmarsden/multicastping:core -This image will be listed in the services after we restart the core-daemon: +This image will be listed in the services after we restart the core-daemon: sudo service core-daemon restart You can set up a simple network with a number of PCs connected to a switch. Set the stuartmarsden/multicastping service for all the PCs. When started they will -all begin sending Multicast pings. +all begin sending Multicast pings. In order to see what is happening you can go in to the terminal of a node and look at the docker log. Easy shorthand is: @@ -89,11 +82,11 @@ Datagram 'Client: Ping' received from ('10.0.5.20', 8005) Limitations: -1. Docker images must be downloaded on the host as usually a CORE node does not +1. Docker images must be downloaded on the host as usually a CORE node does not have access to the internet. 2. Each node isolates running containers (keeps things simple) -3. Recent version of docker needed so that --net host can be used. This does - not further abstract the network within a node and allows multicast which +3. Recent version of docker needed so that --net host can be used. This does + not further abstract the network within a node and allows multicast which is not enabled within Docker containers at the moment. 4. The core-daemon must be restarted for new images to show up. 5. A Docker-daemon is run within each node but the images are shared. This @@ -101,43 +94,46 @@ Limitations: host. At startup all the nodes will try to access this and it will be locked for most due to contention. The service just does a hackish wait for 1 second and retry. This means all the docker containers can take a while to come up - depending on how many nodes you have. + depending on how many nodes you have. +""" -''' +from core.misc import log +from core.service import CoreService +from core.service import ServiceManager + +logger = log.get_logger(__name__) -import os -import sys try: from docker import Client -except Exception: - pass +except ImportError: + logger.error("failure to import docker") -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix, IPv6Prefix class DockerService(CoreService): - ''' This is a service which will allow running docker containers in a CORE - node. - ''' + """ + This is a service which will allow running docker containers in a CORE + node. + """ _name = "Docker" _group = "Docker" _depends = () _dirs = ('/var/lib/docker/containers/', '/run/shm', '/run/resolvconf',) - _configs = ('docker.sh', ) + _configs = ('docker.sh',) _startindex = 50 _startup = ('sh docker.sh',) - _shutdown = ('service docker stop', ) + _shutdown = ('service docker stop',) # Container image to start _image = "" @classmethod def generateconfig(cls, node, filename, services): - ''' Returns a string having contents of a docker.sh script that - can be modified to start a specific docker image. - ''' + """ + Returns a string having contents of a docker.sh script that + can be modified to start a specific docker image. + """ cfg = "#!/bin/sh\n" cfg += "# auto-generated by Docker (docker.py)\n" - # Docker likes to think it has DNS set up or it complains. + # Docker likes to think it has DNS set up or it complains. # Unless your network was attached to the Internet this is # non-functional but hides error messages. cfg += 'echo "nameserver 8.8.8.8" > /run/resolvconf/resolv.conf\n' @@ -156,27 +152,30 @@ until [ $result -eq 0 ]; do # this is to alleviate contention to docker's SQLite database sleep 0.3 done -""" % (cls._image, ) +""" % (cls._image,) return cfg -addservice(DockerService) -# This auto-loads Docker images having a :core tag, adding them to the list -# of services under the "Docker" group. -if 'Client' in globals(): - client = Client(version='1.10') - images = client.images() - del client -else: - images = [] -for image in images: - if u'' in image['RepoTags'][0]: - continue - for repo in image['RepoTags']: - if u':core' not in repo: +def load_services(): + ServiceManager.add(DockerService) + + # This auto-loads Docker images having a :core tag, adding them to the list + # of services under the "Docker" group. + # TODO: change this logic, should be a proper configurable, or docker needs to be a required library + # TODO: also should make this call possible real time for reloading removing "magic" auto loading on import + if 'Client' in globals(): + client = Client(version='1.10') + images = client.images() + del client + else: + images = [] + for image in images: + if u'' in image['RepoTags'][0]: continue - dockerid = repo.encode('ascii','ignore').split(':')[0] - SubClass = type('SubClass', (DockerService,), - {'_name': dockerid, '_image': dockerid}) - addservice(SubClass) -del images + for repo in image['RepoTags']: + if u':core' not in repo: + continue + dockerid = repo.encode('ascii', 'ignore').split(':')[0] + sub_class = type('SubClass', (DockerService,), {'_name': dockerid, '_image': dockerid}) + ServiceManager.add(sub_class) + del images diff --git a/daemon/core/services/nrl.py b/daemon/core/services/nrl.py index 481185bf..22266319 100644 --- a/daemon/core/services/nrl.py +++ b/daemon/core/services/nrl.py @@ -1,24 +1,19 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" nrl.py: defines services provided by NRL protolib tools hosted here: http://www.nrl.navy.mil/itd/ncs/products -''' +""" + +from core.misc import utils +from core.misc.ipaddress import Ipv4Prefix +from core.service import CoreService +from core.service import ServiceManager -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix, IPv6Prefix -from core.misc.utils import * -from core.constants import * class NrlService(CoreService): - ''' Parent class for NRL services. Defines properties and methods - common to NRL's routing daemons. - ''' + """ + Parent class for NRL services. Defines properties and methods + common to NRL's routing daemons. + """"" _name = "Protean" _group = "ProtoSvc" _depends = () @@ -29,108 +24,111 @@ class NrlService(CoreService): _shutdown = () @classmethod - def generateconfig(cls, node, filename, services): + def generateconfig(cls, node, filename, services): return "" - + @staticmethod def firstipv4prefix(node, prefixlen=24): - ''' Similar to QuaggaService.routerid(). Helper to return the first IPv4 + """ + Similar to QuaggaService.routerid(). Helper to return the first IPv4 prefix of a node, using the supplied prefix length. This ignores the interface's prefix length, so e.g. '/32' can turn into '/24'. - ''' + """ for ifc in node.netifs(): if hasattr(ifc, 'control') and ifc.control == True: continue for a in ifc.addrlist: if a.find(".") >= 0: addr = a.split('/')[0] - pre = IPv4Prefix("%s/%s" % (addr, prefixlen)) + pre = Ipv4Prefix("%s/%s" % (addr, prefixlen)) return str(pre) - #raise ValueError, "no IPv4 address found" + # raise ValueError, "no IPv4 address found" return "0.0.0.0/%s" % prefixlen + class MgenSinkService(NrlService): _name = "MGEN_Sink" - _configs = ("sink.mgen", ) + _configs = ("sink.mgen",) _startindex = 5 - _startup = ("mgen input sink.mgen", ) - _validate = ("pidof mgen", ) - _shutdown = ("killall mgen", ) + _startup = ("mgen input sink.mgen",) + _validate = ("pidof mgen",) + _shutdown = ("killall mgen",) @classmethod def generateconfig(cls, node, filename, services): cfg = "0.0 LISTEN UDP 5000\n" for ifc in node.netifs(): - name = sysctldevname(ifc.name) + name = utils.sysctldevname(ifc.name) cfg += "0.0 Join 224.225.1.2 INTERFACE %s\n" % name return cfg @classmethod - def getstartup(cls, node, services): - cmd =cls._startup[0] + def getstartup(cls, node, services): + cmd = cls._startup[0] cmd += " output /tmp/mgen_%s.log" % node.name - return (cmd, ) + return cmd, -addservice(MgenSinkService) class NrlNhdp(NrlService): - ''' NeighborHood Discovery Protocol for MANET networks. - ''' + """ + NeighborHood Discovery Protocol for MANET networks. + """ _name = "NHDP" - _startup = ("nrlnhdp", ) - _shutdown = ("killall nrlnhdp", ) - _validate = ("pidof nrlnhdp", ) + _startup = ("nrlnhdp",) + _shutdown = ("killall nrlnhdp",) + _validate = ("pidof nrlnhdp",) @classmethod - def getstartup(cls, node, services): - ''' Generate the appropriate command-line based on node interfaces. - ''' + def getstartup(cls, node, services): + """ + Generate the appropriate command-line based on node interfaces. + """ cmd = cls._startup[0] cmd += " -l /var/log/nrlnhdp.log" cmd += " -rpipe %s_nhdp" % node.name - - servicenames = map(lambda x: x._name, services) + + servicenames = map(lambda x: x._name, services) if "SMF" in servicenames: cmd += " -flooding ecds-etx sticky" cmd += " -smfClient %s_smf" % node.name - + netifs = filter(lambda x: not getattr(x, 'control', False), \ node.netifs()) if len(netifs) > 0: interfacenames = map(lambda x: x.name, netifs) cmd += " -i " cmd += " -i ".join(interfacenames) - - return (cmd, ) - -addservice(NrlNhdp) + + return cmd, + class NrlSmf(NrlService): - ''' Simplified Multicast Forwarding for MANET networks. - ''' + """ + Simplified Multicast Forwarding for MANET networks. + """ _name = "SMF" - _startup = ("sh startsmf.sh", ) - _shutdown = ("killall nrlsmf", ) - _validate = ("pidof nrlsmf", ) - _configs = ("startsmf.sh", ) - + _startup = ("sh startsmf.sh",) + _shutdown = ("killall nrlsmf",) + _validate = ("pidof nrlsmf",) + _configs = ("startsmf.sh",) + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a startup script for SMF. Because nrlsmf does not + """ + Generate a startup script for SMF. Because nrlsmf does not daemonize, it can cause problems in some situations when launched directly using vcmd. - ''' + """ cfg = "#!/bin/sh\n" cfg += "# auto-generated by nrl.py:NrlSmf.generateconfig()\n" comments = "" - cmd = "nrlsmf instance %s_smf" % (node.name) + cmd = "nrlsmf instance %s_smf" % node.name - servicenames = map(lambda x: x._name, services) - netifs = filter(lambda x: not getattr(x, 'control', False), \ - node.netifs()) + servicenames = map(lambda x: x._name, services) + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) if len(netifs) == 0: return () - + if "arouted" in servicenames: comments += "# arouted service is enabled\n" cmd += " tap %s_tap" % (node.name,) @@ -145,29 +143,30 @@ class NrlSmf(NrlService): cmd += " smpr " else: cmd += " cf " - interfacenames = map(lambda x: x.name, netifs) + interfacenames = map(lambda x: x.name, netifs) cmd += ",".join(interfacenames) - + cmd += " hash MD5" cmd += " log /var/log/nrlsmf.log" cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" return cfg - -addservice(NrlSmf) + class NrlOlsr(NrlService): - ''' Optimized Link State Routing protocol for MANET networks. - ''' + """ + Optimized Link State Routing protocol for MANET networks. + """ _name = "OLSR" - _startup = ("nrlolsrd", ) - _shutdown = ("killall nrlolsrd", ) - _validate = ("pidof nrlolsrd", ) - + _startup = ("nrlolsrd",) + _shutdown = ("killall nrlolsrd",) + _validate = ("pidof nrlolsrd",) + @classmethod - def getstartup(cls, node, services): - ''' Generate the appropriate command-line based on node interfaces. - ''' + def getstartup(cls, node, services): + """ + Generate the appropriate command-line based on node interfaces. + """ cmd = cls._startup[0] # are multiple interfaces supported? No. netifs = list(node.netifs()) @@ -177,78 +176,80 @@ class NrlOlsr(NrlService): cmd += " -l /var/log/nrlolsrd.log" cmd += " -rpipe %s_olsr" % node.name - servicenames = map(lambda x: x._name, services) + servicenames = map(lambda x: x._name, services) if "SMF" in servicenames and not "NHDP" in servicenames: cmd += " -flooding s-mpr" cmd += " -smfClient %s_smf" % node.name if "zebra" in servicenames: cmd += " -z" - return (cmd, ) - -addservice(NrlOlsr) + return cmd, + class NrlOlsrv2(NrlService): - ''' Optimized Link State Routing protocol version 2 for MANET networks. - ''' + """ + Optimized Link State Routing protocol version 2 for MANET networks. + """ _name = "OLSRv2" - _startup = ("nrlolsrv2", ) - _shutdown = ("killall nrlolsrv2", ) - _validate = ("pidof nrlolsrv2", ) + _startup = ("nrlolsrv2",) + _shutdown = ("killall nrlolsrv2",) + _validate = ("pidof nrlolsrv2",) @classmethod - def getstartup(cls, node, services): - ''' Generate the appropriate command-line based on node interfaces. - ''' + def getstartup(cls, node, services): + """ + Generate the appropriate command-line based on node interfaces. + """ cmd = cls._startup[0] cmd += " -l /var/log/nrlolsrv2.log" cmd += " -rpipe %s_olsrv2" % node.name - - servicenames = map(lambda x: x._name, services) + + servicenames = map(lambda x: x._name, services) if "SMF" in servicenames: cmd += " -flooding ecds" cmd += " -smfClient %s_smf" % node.name cmd += " -p olsr" - netifs = filter(lambda x: not getattr(x, 'control', False), \ - node.netifs()) + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) if len(netifs) > 0: interfacenames = map(lambda x: x.name, netifs) cmd += " -i " cmd += " -i ".join(interfacenames) - - return (cmd, ) - -addservice(NrlOlsrv2) + + return cmd, + class OlsrOrg(NrlService): - ''' Optimized Link State Routing protocol from olsr.org for MANET networks. - ''' + """ + Optimized Link State Routing protocol from olsr.org for MANET networks. + """ _name = "OLSRORG" _configs = ("/etc/olsrd/olsrd.conf",) _dirs = ("/etc/olsrd",) - _startup = ("olsrd", ) - _shutdown = ("killall olsrd", ) - _validate = ("pidof olsrd", ) + _startup = ("olsrd",) + _shutdown = ("killall olsrd",) + _validate = ("pidof olsrd",) @classmethod - def getstartup(cls, node, services): - ''' Generate the appropriate command-line based on node interfaces. - ''' + def getstartup(cls, node, services): + """ + Generate the appropriate command-line based on node interfaces. + """ cmd = cls._startup[0] - netifs = filter(lambda x: not getattr(x, 'control', False), \ - node.netifs()) + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) if len(netifs) > 0: interfacenames = map(lambda x: x.name, netifs) cmd += " -i " cmd += " -i ".join(interfacenames) - return (cmd, ) + return cmd, + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a default olsrd config file to use the broadcast address of 255.255.255.255. - ''' + """ + Generate a default olsrd config file to use the broadcast address of 255.255.255.255. + """ cfg = """\ # # OLSR.org routing daemon config file @@ -314,7 +315,7 @@ class OlsrOrg(NrlService): # 1 gets remapped by olsrd to 0 UNSPECIFIED (1 is reserved for ICMP redirects) # 2 KERNEL routes (not very wise to use) # 3 BOOT (should in fact not be used by routing daemons) -# 4 STATIC +# 4 STATIC # 8 .. 15 various routing daemons (gated, zebra, bird, & co) # (defaults to 0 which gets replaced by an OS-specific default value # under linux 3 (BOOT) (for backward compatibility) @@ -510,7 +511,7 @@ LinkQualityFishEye 0 # Olsrd plugins to load # This must be the absolute path to the file # or the loader will use the following scheme: -# - Try the paths in the LD_LIBRARY_PATH +# - Try the paths in the LD_LIBRARY_PATH # environment variable. # - The list of libraries cached in /etc/ld.so.cache # - /lib, followed by /usr/lib @@ -566,11 +567,11 @@ InterfaceDefaults { """ return cfg -addservice(OlsrOrg) class MgenActor(NrlService): - ''' ZpcMgenActor. - ''' + """ + ZpcMgenActor. + """ # a unique name is required, without spaces _name = "MgenActor" @@ -582,53 +583,53 @@ class MgenActor(NrlService): _dirs = () # generated files (without a full path this file goes in the node's dir, # e.g. /tmp/pycore.12345/n1.conf/) - _configs = ('start_mgen_actor.sh', ) + _configs = ('start_mgen_actor.sh',) # this controls the starting order vs other enabled services _startindex = 50 # list of startup commands, also may be generated during startup - _startup = ("sh start_mgen_actor.sh", ) + _startup = ("sh start_mgen_actor.sh",) # list of validation commands - _validate = ("pidof mgen", ) + _validate = ("pidof mgen",) # list of shutdown commands - _shutdown = ("killall mgen", ) + _shutdown = ("killall mgen",) @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a startup script for MgenActor. Because mgenActor does not + """ + Generate a startup script for MgenActor. Because mgenActor does not daemonize, it can cause problems in some situations when launched directly using vcmd. - ''' + """ cfg = "#!/bin/sh\n" cfg += "# auto-generated by nrl.py:MgenActor.generateconfig()\n" comments = "" - cmd = "mgenBasicActor.py -n %s -a 0.0.0.0" % (node.name) + cmd = "mgenBasicActor.py -n %s -a 0.0.0.0" % node.name - servicenames = map(lambda x: x._name, services) - netifs = filter(lambda x: not getattr(x, 'control', False), \ - node.netifs()) + servicenames = map(lambda x: x._name, services) + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) if len(netifs) == 0: return () cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" return cfg -# this line is required to add the above class to the list of available services -addservice(MgenActor) class Arouted(NrlService): - ''' Adaptive Routing - ''' + """ + Adaptive Routing + """ _name = "arouted" - _configs = ("startarouted.sh", ) + _configs = ("startarouted.sh",) _startindex = NrlService._startindex + 10 - _startup = ("sh startarouted.sh", ) - _shutdown = ("pkill arouted", ) - _validate = ("pidof arouted", ) - + _startup = ("sh startarouted.sh",) + _shutdown = ("pkill arouted",) + _validate = ("pidof arouted",) + @classmethod def generateconfig(cls, node, filename, services): - ''' Return the Quagga.conf or quaggaboot.sh file contents. - ''' + """ + Return the Quagga.conf or quaggaboot.sh file contents. + """ cfg = """ #!/bin/sh for f in "/tmp/%s_smf"; do @@ -643,12 +644,23 @@ for f in "/tmp/%s_smf"; do done done -""" % (node.name) +""" % node.name cfg += "ip route add %s dev lo\n" % cls.firstipv4prefix(node, 24) cfg += "arouted instance %s_smf tap %s_tap" % (node.name, node.name) - cfg += " stability 10" # seconds to consider a new route valid + # seconds to consider a new route valid + cfg += " stability 10" cfg += " 2>&1 > /var/log/arouted.log &\n\n" return cfg -# experimental -#addservice(Arouted) + +def load_services(): + ServiceManager.add(MgenSinkService) + ServiceManager.add(NrlNhdp) + ServiceManager.add(NrlSmf) + ServiceManager.add(NrlOlsr) + ServiceManager.add(NrlOlsrv2) + ServiceManager.add(OlsrOrg) + # this line is required to add the above class to the list of available services + ServiceManager.add(MgenActor) + # experimental + # ServiceManager.add(Arouted) diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index 7d57ae92..1a0943aa 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -1,42 +1,33 @@ -# -# CORE -# Copyright (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' -quagga.py: defines routing services provided by Quagga. -''' +""" +quagga.py: defines routing services provided by Quagga. +""" import os -if os.uname()[0] == "Linux": - from core.netns import nodes -elif os.uname()[0] == "FreeBSD": - from core.bsd import nodes -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix, isIPv4Address, isIPv6Address -from core.api import coreapi -from core.constants import * +from core import constants +from core.enumerations import LinkTypes, NodeTypes +from core.misc import ipaddress +from core.misc import nodeutils +from core.service import CoreService +from core.service import ServiceManager + class Zebra(CoreService): - ''' - ''' _name = "zebra" _group = "Quagga" - _dirs = ("/usr/local/etc/quagga", "/var/run/quagga") + _dirs = ("/usr/local/etc/quagga", "/var/run/quagga") _configs = ("/usr/local/etc/quagga/Quagga.conf", - "quaggaboot.sh","/usr/local/etc/quagga/vtysh.conf") + "quaggaboot.sh", "/usr/local/etc/quagga/vtysh.conf") _startindex = 35 _startup = ("sh quaggaboot.sh zebra",) - _shutdown = ("killall zebra", ) - _validate = ("pidof zebra", ) + _shutdown = ("killall zebra",) + _validate = ("pidof zebra",) @classmethod def generateconfig(cls, node, filename, services): - ''' Return the Quagga.conf or quaggaboot.sh file contents. - ''' + """ + Return the Quagga.conf or quaggaboot.sh file contents. + """ if filename == cls._configs[0]: return cls.generateQuaggaConf(node, services) elif filename == cls._configs[1]: @@ -45,19 +36,21 @@ class Zebra(CoreService): return cls.generateVtyshConf(node, services) else: raise ValueError - + @classmethod def generateVtyshConf(cls, node, services): - ''' Returns configuration file text. - ''' + """ + Returns configuration file text. + """ return "service integrated-vtysh-config\n" @classmethod def generateQuaggaConf(cls, node, services): - ''' Returns configuration file text. Other services that depend on zebra - will have generatequaggaifcconfig() and generatequaggaconfig() - hooks that are invoked here. - ''' + """ + Returns configuration file text. Other services that depend on zebra + will have generatequaggaifcconfig() and generatequaggaconfig() + hooks that are invoked here. + """ # we could verify here that filename == Quagga.conf cfg = "" for ifc in node.netifs(): @@ -75,7 +68,7 @@ class Zebra(CoreService): for s in services: if cls._name not in s._depends: continue - ifccfg = s.generatequaggaifcconfig(node, ifc) + ifccfg = s.generatequaggaifcconfig(node, ifc) if s._ipv4_routing: want_ipv4 = True if s._ipv6_routing: @@ -83,47 +76,47 @@ class Zebra(CoreService): cfgv6 += ifccfg else: cfgv4 += ifccfg - + if want_ipv4: - ipv4list = filter(lambda x: isIPv4Address(x.split('/')[0]), - ifc.addrlist) + ipv4list = filter(lambda x: ipaddress.is_ipv4_address(x.split('/')[0]), ifc.addrlist) cfg += " " cfg += "\n ".join(map(cls.addrstr, ipv4list)) cfg += "\n" cfg += cfgv4 if want_ipv6: - ipv6list = filter(lambda x: isIPv6Address(x.split('/')[0]), - ifc.addrlist) + ipv6list = filter(lambda x: ipaddress.is_ipv6_address(x.split('/')[0]), ifc.addrlist) cfg += " " cfg += "\n ".join(map(cls.addrstr, ipv6list)) cfg += "\n" cfg += cfgv6 cfg += "!\n" - + for s in services: if cls._name not in s._depends: continue cfg += s.generatequaggaconfig(node) return cfg - + @staticmethod def addrstr(x): - ''' helper for mapping IP addresses to zebra config statements - ''' + """ + helper for mapping IP addresses to zebra config statements + """ if x.find(".") >= 0: return "ip address %s" % x elif x.find(":") >= 0: return "ipv6 address %s" % x else: - raise Value, "invalid address: %s", x - + raise ValueError("invalid address: %s", x) + @classmethod def generateQuaggaBoot(cls, node, services): - ''' Generate a shell script used to boot the Quagga daemons. - ''' + """ + Generate a shell script used to boot the Quagga daemons. + """ try: - quagga_bin_search = node.session.cfg['quagga_bin_search'] - quagga_sbin_search = node.session.cfg['quagga_sbin_search'] + quagga_bin_search = node.session.config['quagga_bin_search'] + quagga_sbin_search = node.session.config['quagga_sbin_search'] except KeyError: quagga_bin_search = '"/usr/local/bin /usr/bin /usr/lib/quagga"' quagga_sbin_search = '"/usr/local/sbin /usr/sbin /usr/lib/quagga"' @@ -220,73 +213,74 @@ if [ "$1" != "zebra" ]; then fi confcheck bootquagga -""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, \ - QUAGGA_STATE_DIR) +""" % (cls._configs[0], quagga_sbin_search, quagga_bin_search, constants.QUAGGA_STATE_DIR) -addservice(Zebra) class QuaggaService(CoreService): - ''' Parent class for Quagga services. Defines properties and methods - common to Quagga's routing daemons. - ''' + """ + Parent class for Quagga services. Defines properties and methods + common to Quagga's routing daemons. + """ _name = "QuaggaDaemon" _group = "Quagga" - _depends = ("zebra", ) + _depends = ("zebra",) _dirs = () _configs = () _startindex = 40 _startup = () _shutdown = () _meta = "The config file for this service can be found in the Zebra service." - + _ipv4_routing = False _ipv6_routing = False @staticmethod def routerid(node): - ''' Helper to return the first IPv4 address of a node as its router ID. - ''' + """ + Helper to return the first IPv4 address of a node as its router ID. + """ for ifc in node.netifs(): if hasattr(ifc, 'control') and ifc.control == True: continue for a in ifc.addrlist: if a.find(".") >= 0: - return a .split('/') [0] - #raise ValueError, "no IPv4 address found for router ID" + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" return "0.0.0.0" - + @staticmethod def rj45check(ifc): - ''' Helper to detect whether interface is connected an external RJ45 + """ + Helper to detect whether interface is connected an external RJ45 link. - ''' + """ if ifc.net: for peerifc in ifc.net.netifs(): if peerifc == ifc: continue - if isinstance(peerifc, nodes.RJ45Node): + if nodeutils.is_node(peerifc, NodeTypes.RJ45): return True return False @classmethod - def generateconfig(cls, node, filename, services): + def generateconfig(cls, node, filename, services): return "" @classmethod - def generatequaggaifcconfig(cls, node, ifc): + def generatequaggaifcconfig(cls, node, ifc): return "" @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): return "" - class Ospfv2(QuaggaService): - ''' The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified Quagga.conf file. - ''' + """ + The OSPFv2 service provides IPv4 routing for wired networks. It does + not build its own configuration file but has hooks for adding to the + unified Quagga.conf file. + """ _name = "OSPFv2" _startup = () _shutdown = ("killall ospfd", ) @@ -295,10 +289,11 @@ class Ospfv2(QuaggaService): @staticmethod def mtucheck(ifc): - ''' Helper to detect MTU mismatch and add the appropriate OSPF + """ + Helper to detect MTU mismatch and add the appropriate OSPF mtu-ignore command. This is needed when e.g. a node is linked via a GreTap device. - ''' + """ if ifc.mtu != 1500: # a workaround for PhysicalNode GreTap, which has no knowledge of # the other nodes/nets @@ -312,52 +307,55 @@ class Ospfv2(QuaggaService): @staticmethod def ptpcheck(ifc): - ''' Helper to detect whether interface is connected to a notional + """ + Helper to detect whether interface is connected to a notional point-to-point link. - ''' - if isinstance(ifc.net, nodes.PtpNet): + """ + if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER): return " ip ospf network point-to-point\n" return "" @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): cfg = "router ospf\n" rtrid = cls.routerid(node) cfg += " router-id %s\n" % rtrid # network 10.0.0.0/24 area 0 for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue for a in ifc.addrlist: if a.find(".") < 0: continue - net = IPv4Prefix(a) + net = ipaddress.Ipv4Prefix(a) cfg += " network %s area 0\n" % net - cfg += "!\n" + cfg += "!\n" return cfg - - @classmethod - def generatequaggaifcconfig(cls, node, ifc): - return cls.mtucheck(ifc) - #cfg = cls.mtucheck(ifc) - # external RJ45 connections will use default OSPF timers - #if cls.rj45check(ifc): - # return cfg - #cfg += cls.ptpcheck(ifc) - #return cfg + """\ -# ip ospf hello-interval 2 + @classmethod + def generatequaggaifcconfig(cls, node, ifc): + return cls.mtucheck(ifc) + # cfg = cls.mtucheck(ifc) + # external RJ45 connections will use default OSPF timers + # if cls.rj45check(ifc): + # return cfg + # cfg += cls.ptpcheck(ifc) + + # return cfg + """\ + + +# ip ospf hello-interval 2 # ip ospf dead-interval 6 # ip ospf retransmit-interval 5 -#""" - -addservice(Ospfv2) +# """ + class Ospfv3(QuaggaService): - ''' The OSPFv3 service provides IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified Quagga.conf file. - ''' + """ + The OSPFv3 service provides IPv6 routing for wired networks. It does + not build its own configuration file but has hooks for adding to the + unified Quagga.conf file. + """ _name = "OSPFv3" _startup = () _shutdown = ("killall ospf6d", ) @@ -367,9 +365,10 @@ class Ospfv3(QuaggaService): @staticmethod def minmtu(ifc): - ''' Helper to discover the minimum MTU of interfaces linked with the + """ + Helper to discover the minimum MTU of interfaces linked with the given interface. - ''' + """ mtu = ifc.mtu if not ifc.net: return mtu @@ -377,13 +376,14 @@ class Ospfv3(QuaggaService): if i.mtu < mtu: mtu = i.mtu return mtu - + @classmethod def mtucheck(cls, ifc): - ''' Helper to detect MTU mismatch and add the appropriate OSPFv3 + """ + Helper to detect MTU mismatch and add the appropriate OSPFv3 ifmtu command. This is needed when e.g. a node is linked via a GreTap device. - ''' + """ minmtu = cls.minmtu(ifc) if minmtu < ifc.mtu: return " ipv6 ospf6 ifmtu %d\n" % minmtu @@ -392,57 +392,59 @@ class Ospfv3(QuaggaService): @staticmethod def ptpcheck(ifc): - ''' Helper to detect whether interface is connected to a notional + """ + Helper to detect whether interface is connected to a notional point-to-point link. - ''' - if isinstance(ifc.net, nodes.PtpNet): + """ + if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER): return " ipv6 ospf6 network point-to-point\n" return "" @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): cfg = "router ospf6\n" rtrid = cls.routerid(node) cfg += " router-id %s\n" % rtrid for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += " interface %s area 0.0.0.0\n" % ifc.name cfg += "!\n" return cfg - - @classmethod - def generatequaggaifcconfig(cls, node, ifc): - return cls.mtucheck(ifc) - #cfg = cls.mtucheck(ifc) - # external RJ45 connections will use default OSPF timers - #if cls.rj45check(ifc): - # return cfg - #cfg += cls.ptpcheck(ifc) - #return cfg + """\ -# ipv6 ospf6 hello-interval 2 + @classmethod + def generatequaggaifcconfig(cls, node, ifc): + return cls.mtucheck(ifc) + # cfg = cls.mtucheck(ifc) + # external RJ45 connections will use default OSPF timers + # if cls.rj45check(ifc): + # return cfg + # cfg += cls.ptpcheck(ifc) + + # return cfg + """\ + + +# ipv6 ospf6 hello-interval 2 # ipv6 ospf6 dead-interval 6 # ipv6 ospf6 retransmit-interval 5 -#""" +# """ -addservice(Ospfv3) class Ospfv3mdr(Ospfv3): - ''' The OSPFv3 MANET Designated Router (MDR) service provides IPv6 - routing for wireless networks. It does not build its own - configuration file but has hooks for adding to the - unified Quagga.conf file. - ''' + """ + The OSPFv3 MANET Designated Router (MDR) service provides IPv6 + routing for wireless networks. It does not build its own + configuration file but has hooks for adding to the + unified Quagga.conf file. + """ _name = "OSPFv3MDR" _ipv4_routing = True @classmethod - def generatequaggaifcconfig(cls, node, ifc): + def generatequaggaifcconfig(cls, node, ifc): cfg = cls.mtucheck(ifc) cfg += " ipv6 ospf6 instance-id 65\n" - if ifc.net is not None and \ - isinstance(ifc.net, (nodes.WlanNode, nodes.EmaneNode)): + if ifc.net is not None and nodeutils.is_node(ifc.net, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): return cfg + """\ ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 6 @@ -455,13 +457,13 @@ class Ospfv3mdr(Ospfv3): else: return cfg -addservice(Ospfv3mdr) class Bgp(QuaggaService): - '''' The BGP service provides interdomain routing. - Peers must be manually configured, with a full mesh for those - having the same AS number. - ''' + """ + The BGP service provides interdomain routing. + Peers must be manually configured, with a full mesh for those + having the same AS number. + """ _name = "BGP" _startup = () _shutdown = ("killall bgpd", ) @@ -471,7 +473,7 @@ class Bgp(QuaggaService): _ipv6_routing = True @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): cfg = "!\n! BGP configuration\n!\n" cfg += "! You should configure the AS number below,\n" cfg += "! along with this router's peers.\n!\n" @@ -482,11 +484,11 @@ class Bgp(QuaggaService): cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n" return cfg -addservice(Bgp) class Rip(QuaggaService): - ''' The RIP service provides IPv4 routing for wired networks. - ''' + """ + The RIP service provides IPv4 routing for wired networks. + """ _name = "RIP" _startup = () _shutdown = ("killall ripd", ) @@ -494,7 +496,7 @@ class Rip(QuaggaService): _ipv4_routing = True @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): cfg = """\ router rip redistribute static @@ -505,11 +507,11 @@ router rip """ return cfg -addservice(Rip) class Ripng(QuaggaService): - ''' The RIP NG service provides IPv6 routing for wired networks. - ''' + """ + The RIP NG service provides IPv6 routing for wired networks. + """ _name = "RIPNG" _startup = () _shutdown = ("killall ripngd", ) @@ -517,7 +519,7 @@ class Ripng(QuaggaService): _ipv6_routing = True @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): cfg = """\ router ripng redistribute static @@ -528,12 +530,12 @@ router ripng """ return cfg -addservice(Ripng) class Babel(QuaggaService): - ''' The Babel service provides a loop-avoiding distance-vector routing + """ + The Babel service provides a loop-avoiding distance-vector routing protocol for IPv6 and IPv4 with fast convergence properties. - ''' + """ _name = "Babel" _startup = () _shutdown = ("killall babeld", ) @@ -541,29 +543,28 @@ class Babel(QuaggaService): _ipv6_routing = True @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): cfg = "router babel\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += " network %s\n" % ifc.name cfg += " redistribute static\n redistribute connected\n" return cfg - + @classmethod - def generatequaggaifcconfig(cls, node, ifc): + def generatequaggaifcconfig(cls, node, ifc): type = "wired" - if ifc.net and ifc.net.linktype == coreapi.CORE_LINK_WIRELESS: + if ifc.net and ifc.net.linktype == LinkTypes.WIRELESS.value: return " babel wireless\n no babel split-horizon\n" else: return " babel wired\n babel split-horizon\n" -addservice(Babel) class Xpimd(QuaggaService): - '''\ + """ PIM multicast routing based on XORP. - ''' + """ _name = 'Xpimd' _startup = () _shutdown = ('killall xpimd', ) @@ -571,7 +572,7 @@ class Xpimd(QuaggaService): _ipv4_routing = True @classmethod - def generatequaggaconfig(cls, node): + def generatequaggaconfig(cls, node): ifname = 'eth0' for ifc in node.netifs(): if ifc.name != 'lo': @@ -587,7 +588,17 @@ class Xpimd(QuaggaService): return cfg @classmethod - def generatequaggaifcconfig(cls, node, ifc): + def generatequaggaifcconfig(cls, node, ifc): return ' ip mfea\n ip igmp\n ip pim\n' -addservice(Xpimd) + +def load_services(): + ServiceManager.add(Zebra) + ServiceManager.add(Ospfv2) + ServiceManager.add(Ospfv3) + ServiceManager.add(Ospfv3mdr) + ServiceManager.add(Bgp) + ServiceManager.add(Rip) + ServiceManager.add(Ripng) + ServiceManager.add(Babel) + ServiceManager.add(Xpimd) diff --git a/daemon/core/services/security.py b/daemon/core/services/security.py index 29f8ab3b..5331ef18 100644 --- a/daemon/core/services/security.py +++ b/daemon/core/services/security.py @@ -1,83 +1,75 @@ -# -# CORE - define security services : vpnclient, vpnserver, ipsec and firewall -# -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -''' -security.py: defines security services (vpnclient, vpnserver, ipsec and +""" +security.py: defines security services (vpnclient, vpnserver, ipsec and firewall) -''' +""" -import os +from core import constants +from core.misc import log +from core.service import CoreService +from core.service import ServiceManager + +logger = log.get_logger(__name__) -from core.service import CoreService, addservice -from core.constants import * class VPNClient(CoreService): - ''' - ''' _name = "VPNClient" _group = "Security" - _configs = ('vpnclient.sh', ) + _configs = ('vpnclient.sh',) _startindex = 60 _startup = ('sh vpnclient.sh',) _shutdown = ("killall openvpn",) - _validate = ("pidof openvpn", ) + _validate = ("pidof openvpn",) _custom_needed = True @classmethod def generateconfig(cls, node, filename, services): - ''' Return the client.conf and vpnclient.sh file contents to - ''' + """ + Return the client.conf and vpnclient.sh file contents to + """ cfg = "#!/bin/sh\n" cfg += "# custom VPN Client configuration for service (security.py)\n" - fname = "%s/examples/services/sampleVPNClient" % CORE_DATA_DIR + fname = "%s/examples/services/sampleVPNClient" % constants.CORE_DATA_DIR + try: cfg += open(fname, "rb").read() - except e: - print "Error opening VPN client configuration template (%s): %s" % \ - (fname, e) + except IOError: + logger.exception("Error opening VPN client configuration template (%s)", fname) + return cfg -# this line is required to add the above class to the list of available services -addservice(VPNClient) class VPNServer(CoreService): - ''' - ''' _name = "VPNServer" _group = "Security" - _configs = ('vpnserver.sh', ) + _configs = ('vpnserver.sh',) _startindex = 50 _startup = ('sh vpnserver.sh',) _shutdown = ("killall openvpn",) - _validate = ("pidof openvpn", ) + _validate = ("pidof openvpn",) _custom_needed = True @classmethod def generateconfig(cls, node, filename, services): - ''' Return the sample server.conf and vpnserver.sh file contents to - GUI for user customization. - ''' + """ + Return the sample server.conf and vpnserver.sh file contents to + GUI for user customization. + """ cfg = "#!/bin/sh\n" cfg += "# custom VPN Server Configuration for service (security.py)\n" - fname = "%s/examples/services/sampleVPNServer" % CORE_DATA_DIR + fname = "%s/examples/services/sampleVPNServer" % constants.CORE_DATA_DIR + try: cfg += open(fname, "rb").read() - except e: - print "Error opening VPN server configuration template (%s): %s" % \ - (fname, e) + except IOError: + logger.exception("Error opening VPN server configuration template (%s)", fname) + return cfg -addservice(VPNServer) class IPsec(CoreService): - ''' - ''' _name = "IPsec" _group = "Security" - _configs = ('ipsec.sh', ) + _configs = ('ipsec.sh',) _startindex = 60 _startup = ('sh ipsec.sh',) _shutdown = ("killall racoon",) @@ -85,45 +77,51 @@ class IPsec(CoreService): @classmethod def generateconfig(cls, node, filename, services): - ''' Return the ipsec.conf and racoon.conf file contents to - GUI for user customization. - ''' + """ + Return the ipsec.conf and racoon.conf file contents to + GUI for user customization. + """ cfg = "#!/bin/sh\n" cfg += "# set up static tunnel mode security assocation for service " cfg += "(security.py)\n" - fname = "%s/examples/services/sampleIPsec" % CORE_DATA_DIR + fname = "%s/examples/services/sampleIPsec" % constants.CORE_DATA_DIR + try: cfg += open(fname, "rb").read() - except e: - print "Error opening IPsec configuration template (%s): %s" % \ - (fname, e) + except IOError: + logger.exception("Error opening IPsec configuration template (%s)", fname) + return cfg -addservice(IPsec) class Firewall(CoreService): - ''' - ''' _name = "Firewall" _group = "Security" - _configs = ('firewall.sh', ) + _configs = ('firewall.sh',) _startindex = 20 _startup = ('sh firewall.sh',) _custom_needed = True @classmethod def generateconfig(cls, node, filename, services): - ''' Return the firewall rule examples to GUI for user customization. - ''' + """ + Return the firewall rule examples to GUI for user customization. + """ cfg = "#!/bin/sh\n" cfg += "# custom node firewall rules for service (security.py)\n" - fname = "%s/examples/services/sampleFirewall" % CORE_DATA_DIR + fname = "%s/examples/services/sampleFirewall" % constants.CORE_DATA_DIR + try: cfg += open(fname, "rb").read() - except e: - print "Error opening Firewall configuration template (%s): %s" % \ - (fname, e) + except IOError: + logger.exception("Error opening Firewall configuration template (%s)", fname) + return cfg -addservice(Firewall) +def load_services(): + # this line is required to add the above class to the list of available services + ServiceManager.add(VPNClient) + ServiceManager.add(VPNServer) + ServiceManager.add(IPsec) + ServiceManager.add(Firewall) diff --git a/daemon/core/services/startup.py b/daemon/core/services/startup.py index 8039ee62..0e749bcd 100644 --- a/daemon/core/services/startup.py +++ b/daemon/core/services/startup.py @@ -1,23 +1,27 @@ -from core.service import CoreService, addservice -from sys import maxint from inspect import isclass +from sys import maxint + +from core.service import CoreService +from core.service import ServiceManager + class Startup(CoreService): - 'A CORE service to start other services in order, serially' + """ + A CORE service to start other services in order, serially + """ _name = 'startup' _group = 'Utility' _depends = () _dirs = () - _configs = ('startup.sh', ) + _configs = ('startup.sh',) _startindex = maxint - _startup = ('sh startup.sh', ) + _startup = ('sh startup.sh',) _shutdown = () _validate = () @staticmethod - def isStartupService(s): - return isinstance(s, Startup) or \ - (isclass(s) and issubclass(s, Startup)) + def is_startup_service(s): + return isinstance(s, Startup) or (isclass(s) and issubclass(s, Startup)) @classmethod def generateconfig(cls, node, filename, services): @@ -26,12 +30,14 @@ class Startup(CoreService): script = '#!/bin/sh\n' \ '# auto-generated by Startup (startup.py)\n\n' \ 'exec > startup.log 2>&1\n\n' - for s in sorted(services, key = lambda x: x._startindex): - if cls.isStartupService(s) or len(str(s._starttime)) > 0: + for s in sorted(services, key=lambda x: x._startindex): + if cls.is_startup_service(s) or len(str(s._starttime)) > 0: continue start = '\n'.join(s.getstartup(node, services)) if start: script += start + '\n' return script -addservice(Startup) + +def load_services(): + ServiceManager.add(Startup) diff --git a/daemon/core/services/ucarp.py b/daemon/core/services/ucarp.py index b3c5c411..4a8fb2ea 100644 --- a/daemon/core/services/ucarp.py +++ b/daemon/core/services/ucarp.py @@ -1,189 +1,185 @@ -# -# CORE configuration for UCARP -# Copyright (c) 2012 Jonathan deBoer -# See the LICENSE file included in this distribution. -# -# -# author: Jonathan deBoer -# -''' -ucarp.py: defines high-availability IP address controlled by ucarp -''' - -import os - -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix -from core.constants import * - - -UCARP_ETC="/usr/local/etc/ucarp" - -class Ucarp(CoreService): - ''' - ''' - _name = "ucarp" - _group = "Utility" - _depends = ( ) - _dirs = (UCARP_ETC, ) - _configs = (UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",) - _startindex = 65 - _startup = ("sh ucarpboot.sh",) - _shutdown = ("killall ucarp", ) - _validate = ("pidof ucarp", ) - - @classmethod - def generateconfig(cls, node, filename, services): - ''' Return the default file contents - ''' - if filename == cls._configs[0]: - return cls.generateUcarpConf(node, services) - elif filename == cls._configs[1]: - return cls.generateVipUp(node, services) - elif filename == cls._configs[2]: - return cls.generateVipDown(node, services) - elif filename == cls._configs[3]: - return cls.generateUcarpBoot(node, services) - else: - raise ValueError - - @classmethod - def generateUcarpConf(cls, node, services): - ''' Returns configuration file text. - ''' - try: - ucarp_bin = node.session.cfg['ucarp_bin'] - except KeyError: - ucarp_bin = "/usr/sbin/ucarp" - return """\ -#!/bin/sh -# Location of UCARP executable -UCARP_EXEC=%s - -# Location of the UCARP config directory -UCARP_CFGDIR=%s - -# Logging Facility -FACILITY=daemon - -# Instance ID -# Any number from 1 to 255 -INSTANCE_ID=1 - -# Password -# Master and Backup(s) need to be the same -PASSWORD="changeme" - -# The failover application address -VIRTUAL_ADDRESS=127.0.0.254 -VIRTUAL_NET=8 - -# Interface for IP Address -INTERFACE=lo - -# Maintanence address of the local machine -SOURCE_ADDRESS=127.0.0.1 - -# The ratio number to be considered before marking the node as dead -DEAD_RATIO=3 - -# UCARP base, lower number will be preferred master -# set to same to have master stay as long as possible -UCARP_BASE=1 -SKEW=0 - -# UCARP options -# -z run shutdown script on exit -# -P force preferred master -# -n don't run down script at start up when we are backup -# -M use broadcast instead of multicast -# -S ignore interface state -OPTIONS="-z -n -M" - -# Send extra parameter to down and up scripts -#XPARAM="-x " -XPARAM="-x ${VIRTUAL_NET}" - -# The start and stop scripts -START_SCRIPT=${UCARP_CFGDIR}/default-up.sh -STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh - -# These line should not need to be touched -UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM" - -${UCARP_EXEC} -B ${UCARP_OPTS} -""" % (ucarp_bin, UCARP_ETC) - - @classmethod - def generateUcarpBoot(cls, node, services): - ''' Generate a shell script used to boot the Ucarp daemons. - ''' - try: - ucarp_bin = node.session.cfg['ucarp_bin'] - except KeyError: - ucarp_bin = "/usr/sbin/ucarp" - return """\ -#!/bin/sh -# Location of the UCARP config directory -UCARP_CFGDIR=%s - -chmod a+x ${UCARP_CFGDIR}/*.sh - -# Start the default ucarp daemon configuration -${UCARP_CFGDIR}/default.sh - -""" % (UCARP_ETC) - - @classmethod - def generateVipUp(cls, node, services): - ''' Generate a shell script used to start the virtual ip - ''' - try: - ucarp_bin = node.session.cfg['ucarp_bin'] - except KeyError: - ucarp_bin = "/usr/sbin/ucarp" - return """\ -#!/bin/bash - -# Should be invoked as "default-up.sh " -exec 2> /dev/null - -IP="${2}" -NET="${3}" -if [ -z "$NET" ]; then - NET="24" -fi - -/sbin/ip addr add ${IP}/${NET} dev "$1" - - -""" - - @classmethod - def generateVipDown(cls, node, services): - ''' Generate a shell script used to stop the virtual ip - ''' - try: - ucarp_bin = node.session.cfg['ucarp_bin'] - except KeyError: - ucarp_bin = "/usr/sbin/ucarp" - return """\ -#!/bin/bash - -# Should be invoked as "default-down.sh " -exec 2> /dev/null - -IP="${2}" -NET="${3}" -if [ -z "$NET" ]; then - NET="24" -fi - -/sbin/ip addr del ${IP}/${NET} dev "$1" - - -""" - - -addservice(Ucarp) - +""" +ucarp.py: defines high-availability IP address controlled by ucarp +""" + +from core.service import CoreService +from core.service import ServiceManager + +UCARP_ETC = "/usr/local/etc/ucarp" + + +class Ucarp(CoreService): + _name = "ucarp" + _group = "Utility" + _depends = ( ) + _dirs = (UCARP_ETC,) + _configs = ( + UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",) + _startindex = 65 + _startup = ("sh ucarpboot.sh",) + _shutdown = ("killall ucarp",) + _validate = ("pidof ucarp",) + + @classmethod + def generateconfig(cls, node, filename, services): + """ + Return the default file contents + """ + if filename == cls._configs[0]: + return cls.generateUcarpConf(node, services) + elif filename == cls._configs[1]: + return cls.generateVipUp(node, services) + elif filename == cls._configs[2]: + return cls.generateVipDown(node, services) + elif filename == cls._configs[3]: + return cls.generateUcarpBoot(node, services) + else: + raise ValueError + + @classmethod + def generateUcarpConf(cls, node, services): + """ + Returns configuration file text. + """ + try: + ucarp_bin = node.session.cfg['ucarp_bin'] + except KeyError: + ucarp_bin = "/usr/sbin/ucarp" + + return """\ +#!/bin/sh +# Location of UCARP executable +UCARP_EXEC=%s + +# Location of the UCARP config directory +UCARP_CFGDIR=%s + +# Logging Facility +FACILITY=daemon + +# Instance ID +# Any number from 1 to 255 +INSTANCE_ID=1 + +# Password +# Master and Backup(s) need to be the same +PASSWORD="changeme" + +# The failover application address +VIRTUAL_ADDRESS=127.0.0.254 +VIRTUAL_NET=8 + +# Interface for IP Address +INTERFACE=lo + +# Maintanence address of the local machine +SOURCE_ADDRESS=127.0.0.1 + +# The ratio number to be considered before marking the node as dead +DEAD_RATIO=3 + +# UCARP base, lower number will be preferred master +# set to same to have master stay as long as possible +UCARP_BASE=1 +SKEW=0 + +# UCARP options +# -z run shutdown script on exit +# -P force preferred master +# -n don't run down script at start up when we are backup +# -M use broadcast instead of multicast +# -S ignore interface state +OPTIONS="-z -n -M" + +# Send extra parameter to down and up scripts +#XPARAM="-x " +XPARAM="-x ${VIRTUAL_NET}" + +# The start and stop scripts +START_SCRIPT=${UCARP_CFGDIR}/default-up.sh +STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh + +# These line should not need to be touched +UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM" + +${UCARP_EXEC} -B ${UCARP_OPTS} +""" % (ucarp_bin, UCARP_ETC) + + @classmethod + def generateUcarpBoot(cls, node, services): + """ + Generate a shell script used to boot the Ucarp daemons. + """ + + try: + ucarp_bin = node.session.cfg['ucarp_bin'] + except KeyError: + ucarp_bin = "/usr/sbin/ucarp" + return """\ +#!/bin/sh +# Location of the UCARP config directory +UCARP_CFGDIR=%s + +chmod a+x ${UCARP_CFGDIR}/*.sh + +# Start the default ucarp daemon configuration +${UCARP_CFGDIR}/default.sh + +""" % UCARP_ETC + + @classmethod + def generateVipUp(cls, node, services): + """ + Generate a shell script used to start the virtual ip + """ + try: + ucarp_bin = node.session.cfg['ucarp_bin'] + except KeyError: + ucarp_bin = "/usr/sbin/ucarp" + + return """\ +#!/bin/bash + +# Should be invoked as "default-up.sh " +exec 2> /dev/null + +IP="${2}" +NET="${3}" +if [ -z "$NET" ]; then + NET="24" +fi + +/sbin/ip addr add ${IP}/${NET} dev "$1" + + +""" + + @classmethod + def generateVipDown(cls, node, services): + """ + Generate a shell script used to stop the virtual ip + """ + try: + ucarp_bin = node.session.cfg['ucarp_bin'] + except KeyError: + ucarp_bin = "/usr/sbin/ucarp" + return """\ +#!/bin/bash + +# Should be invoked as "default-down.sh " +exec 2> /dev/null + +IP="${2}" +NET="${3}" +if [ -z "$NET" ]; then + NET="24" +fi + +/sbin/ip addr del ${IP}/${NET} dev "$1" + + +""" + + +def load_services(): + ServiceManager.add(Ucarp) diff --git a/daemon/core/services/utility.py b/daemon/core/services/utility.py index 49ddf844..2c64492a 100644 --- a/daemon/core/services/utility.py +++ b/daemon/core/services/utility.py @@ -1,24 +1,22 @@ -# -# CORE -# Copyright (c)2010-2014 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' -utility.py: defines miscellaneous utility services. -''' +""" +utility.py: defines miscellaneous utility services. +""" import os +import subprocess + +from core import constants +from core.misc import utils +from core.misc.ipaddress import Ipv4Prefix +from core.misc.ipaddress import Ipv6Prefix +from core.service import CoreService +from core.service import ServiceManager -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix, IPv6Prefix -from core.misc.utils import * -from core.constants import * class UtilService(CoreService): - ''' Parent class for utility services. - ''' + """ + Parent class for utility services. + """ _name = "UtilityProcess" _group = "Utility" _depends = () @@ -29,15 +27,16 @@ class UtilService(CoreService): _shutdown = () @classmethod - def generateconfig(cls, node, filename, services): + def generateconfig(cls, node, filename, services): return "" + class IPForwardService(UtilService): _name = "IPForward" - _configs = ("ipforward.sh", ) + _configs = ("ipforward.sh",) _startindex = 5 - _startup = ("sh ipforward.sh", ) - + _startup = ("sh ipforward.sh",) + @classmethod def generateconfig(cls, node, filename, services): if os.uname()[0] == "Linux": @@ -60,13 +59,13 @@ class IPForwardService(UtilService): %(sysctl)s -w net.ipv4.conf.default.send_redirects=0 %(sysctl)s -w net.ipv4.conf.all.rp_filter=0 %(sysctl)s -w net.ipv4.conf.default.rp_filter=0 -""" % {'sysctl': SYSCTL_BIN} +""" % {'sysctl': constants.SYSCTL_BIN} for ifc in node.netifs(): - name = sysctldevname(ifc.name) - cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (SYSCTL_BIN, name) + name = utils.sysctldevname(ifc.name) + cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (constants.SYSCTL_BIN, name) cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \ - (SYSCTL_BIN, name) - cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (SYSCTL_BIN, name) + (constants.SYSCTL_BIN, name) + cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (constants.SYSCTL_BIN, name) return cfg @classmethod @@ -78,9 +77,8 @@ class IPForwardService(UtilService): %s -w net.inet6.ip6.forwarding=1 %s -w net.inet.icmp.bmcastecho=1 %s -w net.inet.icmp.icmplim=0 -""" % (SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN, SYSCTL_BIN) +""" % (constants.SYSCTL_BIN, constants.SYSCTL_BIN, constants.SYSCTL_BIN, constants.SYSCTL_BIN) -addservice(IPForwardService) class DefaultRouteService(UtilService): _name = "DefaultRoute" @@ -92,21 +90,21 @@ class DefaultRouteService(UtilService): cfg = "#!/bin/sh\n" cfg += "# auto-generated by DefaultRoute service (utility.py)\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\n".join(map(cls.addrstr, ifc.addrlist)) cfg += "\n" return cfg - + @staticmethod def addrstr(x): if x.find(":") >= 0: - net = IPv6Prefix(x) + net = Ipv6Prefix(x) fam = "inet6 ::" else: - net = IPv4Prefix(x) + net = Ipv4Prefix(x) fam = "inet 0.0.0.0" - if net.maxaddr() == net.minaddr(): + if net.max_addr() == net.min_addr(): return "" else: if os.uname()[0] == "Linux": @@ -115,9 +113,8 @@ class DefaultRouteService(UtilService): rtcmd = "route add -%s" % fam else: raise Exception, "unknown platform" - return "%s %s" % (rtcmd, net.minaddr()) - -addservice(DefaultRouteService) + return "%s %s" % (rtcmd, net.min_addr()) + class DefaultMulticastRouteService(UtilService): _name = "DefaultMulticastRoute" @@ -144,8 +141,7 @@ class DefaultMulticastRouteService(UtilService): cfg += "\n" break return cfg - -addservice(DefaultMulticastRouteService) + class StaticRouteService(UtilService): _name = "StaticRoute" @@ -160,23 +156,23 @@ class StaticRouteService(UtilService): cfg += "# NOTE: this service must be customized to be of any use\n" cfg += "# Below are samples that you can uncomment and edit.\n#\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\n".join(map(cls.routestr, ifc.addrlist)) cfg += "\n" return cfg - + @staticmethod def routestr(x): if x.find(":") >= 0: - net = IPv6Prefix(x) + net = Ipv6Prefix(x) fam = "inet6" dst = "3ffe:4::/64" else: - net = IPv4Prefix(x) + net = Ipv4Prefix(x) fam = "inet" dst = "10.9.8.0/24" - if net.maxaddr() == net.minaddr(): + if net.max_addr() == net.min_addr(): return "" else: if os.uname()[0] == "Linux": @@ -185,9 +181,8 @@ class StaticRouteService(UtilService): rtcmd = "#/sbin/route add -%s %s" % (fam, dst) else: raise Exception, "unknown platform" - return "%s %s" % (rtcmd, net.minaddr()) + return "%s %s" % (rtcmd, net.min_addr()) -addservice(StaticRouteService) class SshService(UtilService): _name = "SSH" @@ -200,12 +195,13 @@ class SshService(UtilService): _startup = ("sh startsshd.sh",) _shutdown = ("killall sshd",) _validate = () - + @classmethod def generateconfig(cls, node, filename, services): - ''' Use a startup script for launching sshd in order to wait for host - key generation. - ''' + """ + Use a startup script for launching sshd in order to wait for host + key generation. + """ if os.uname()[0] == "FreeBSD": sshcfgdir = node.nodedir sshstatedir = node.nodedir @@ -264,7 +260,6 @@ UsePAM yes UseDNS no """ % (sshcfgdir, sshstatedir, sshlibdir) -addservice(SshService) class DhcpService(UtilService): _name = "DHCP" @@ -273,12 +268,13 @@ class DhcpService(UtilService): _startup = ("dhcpd",) _shutdown = ("killall dhcpd",) _validate = ("pidof dhcpd",) - + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a dhcpd config file using the network address of - each interface. - ''' + """ + Generate a dhcpd config file using the network address of + each interface. + """ cfg = """\ # auto-generated by DHCP service (utility.py) # NOTE: move these option lines into the desired pool { } block(s) below @@ -294,25 +290,26 @@ max-lease-time 7200; ddns-update-style none; """ for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\n".join(map(cls.subnetentry, ifc.addrlist)) cfg += "\n" return cfg - + @staticmethod def subnetentry(x): - ''' Generate a subnet declaration block given an IPv4 prefix string - for inclusion in the dhcpd3 config file. - ''' + """ + Generate a subnet declaration block given an IPv4 prefix string + for inclusion in the dhcpd3 config file. + """ if x.find(":") >= 0: return "" else: addr = x.split("/")[0] - net = IPv4Prefix(x) + net = Ipv4Prefix(x) # divide the address space in half - rangelow = net.addr(net.numaddr() / 2) - rangehigh = net.maxaddr() + rangelow = net.addr(net.num_addr() / 2) + rangehigh = net.max_addr() return """ subnet %s netmask %s { pool { @@ -321,23 +318,24 @@ subnet %s netmask %s { option routers %s; } } -""" % (net.prefixstr(), net.netmaskstr(), rangelow, rangehigh, addr) +""" % (net.prefix_str(), net.netmask_str(), rangelow, rangehigh, addr) -addservice(DhcpService) class DhcpClientService(UtilService): - ''' Use a DHCP client for all interfaces for addressing. - ''' + """ + Use a DHCP client for all interfaces for addressing. + """ _name = "DHCPClient" _configs = ("startdhcpclient.sh",) _startup = ("sh startdhcpclient.sh",) _shutdown = ("killall dhclient",) _validate = ("pidof dhclient",) - + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a script to invoke dhclient on all interfaces. - ''' + """ + Generate a script to invoke dhclient on all interfaces. + """ cfg = "#!/bin/sh\n" cfg += "# auto-generated by DHCPClient service (utility.py)\n" cfg += "# uncomment this mkdir line and symlink line to enable client-" @@ -350,25 +348,26 @@ class DhcpClientService(UtilService): cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name cfg += " /var/run/resolvconf/resolv.conf\n" cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name - cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name) + cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name) return cfg - -addservice(DhcpClientService) + class FtpService(UtilService): - ''' Start a vsftpd server. - ''' + """ + Start a vsftpd server. + """ _name = "FTP" _configs = ("vsftpd.conf",) _dirs = ("/var/run/vsftpd/empty", "/var/ftp",) _startup = ("vsftpd ./vsftpd.conf",) _shutdown = ("killall vsftpd",) _validate = ("pidof vsftpd",) - + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a vsftpd.conf configuration file. - ''' + """ + Generate a vsftpd.conf configuration file. + """ return """\ # vsftpd.conf auto-generated by FTP service (utility.py) listen=YES @@ -384,26 +383,27 @@ secure_chroot_dir=/var/run/vsftpd/empty anon_root=/var/ftp """ -addservice(FtpService) class HttpService(UtilService): - ''' Start an apache server. - ''' + """ + Start an apache server. + """ _name = "HTTP" _configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars", "/var/www/index.html",) _dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2", - "/run/lock", "/var/lock/apache2", "/var/www", ) + "/run/lock", "/var/lock/apache2", "/var/www",) _startup = ("chown www-data /var/lock/apache2", "apache2ctl start",) _shutdown = ("apache2ctl stop",) _validate = ("pidof apache2",) APACHEVER22, APACHEVER24 = (22, 24) - + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate an apache2.conf configuration file. - ''' + """ + Generate an apache2.conf configuration file. + """ if filename == cls._configs[0]: return cls.generateapache2conf(node, filename, services) elif filename == cls._configs[1]: @@ -415,43 +415,45 @@ class HttpService(UtilService): @classmethod def detectversionfromcmd(cls): - ''' Detect the apache2 version using the 'a2query' command. - ''' + """ + Detect the apache2 version using the 'a2query' command. + """ try: - status, result = cmdresult(['a2query', '-v']) - except Exception: + status, result = utils.cmdresult(['a2query', '-v']) + except subprocess.CalledProcessError: status = -1 + if status == 0 and result[:3] == '2.4': return cls.APACHEVER24 - return cls.APACHEVER22 + return cls.APACHEVER22 @classmethod def generateapache2conf(cls, node, filename, services): - lockstr = { cls.APACHEVER22: - 'LockFile ${APACHE_LOCK_DIR}/accept.lock\n', - cls.APACHEVER24: - 'Mutex file:${APACHE_LOCK_DIR} default\n', } - mpmstr = { cls.APACHEVER22: '', cls.APACHEVER24: - 'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', } + lockstr = {cls.APACHEVER22: + 'LockFile ${APACHE_LOCK_DIR}/accept.lock\n', + cls.APACHEVER24: + 'Mutex file:${APACHE_LOCK_DIR} default\n', } + mpmstr = {cls.APACHEVER22: '', cls.APACHEVER24: + 'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', } - permstr = { cls.APACHEVER22: - ' Order allow,deny\n Deny from all\n Satisfy all\n', - cls.APACHEVER24: - ' Require all denied\n', } + permstr = {cls.APACHEVER22: + ' Order allow,deny\n Deny from all\n Satisfy all\n', + cls.APACHEVER24: + ' Require all denied\n', } - authstr = { cls.APACHEVER22: - 'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n', - cls.APACHEVER24: - 'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', } + authstr = {cls.APACHEVER22: + 'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n', + cls.APACHEVER24: + 'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', } - permstr2 = { cls.APACHEVER22: - '\t\tOrder allow,deny\n\t\tallow from all\n', + permstr2 = {cls.APACHEVER22: + '\t\tOrder allow,deny\n\t\tallow from all\n', cls.APACHEVER24: - '\t\tRequire all granted\n', } + '\t\tRequire all granted\n', } version = cls.detectversionfromcmd() - cfg ="# apache2.conf generated by utility.py:HttpService\n" + cfg = "# apache2.conf generated by utility.py:HttpService\n" cfg += lockstr[version] cfg += """\ PidFile ${APACHE_PID_FILE} @@ -474,7 +476,7 @@ KeepAliveTimeout 5 StartServers 2 MinSpareThreads 25 - MaxSpareThreads 75 + MaxSpareThreads 75 ThreadLimit 64 ThreadsPerChild 25 MaxClients 150 @@ -484,7 +486,7 @@ KeepAliveTimeout 5 StartServers 2 MinSpareThreads 25 - MaxSpareThreads 75 + MaxSpareThreads 75 ThreadLimit 64 ThreadsPerChild 25 MaxClients 150 @@ -590,16 +592,16 @@ export LANG for ifc in node.netifs(): if hasattr(ifc, 'control') and ifc.control == True: continue - body += "
  • %s - %s
  • \n" % (ifc.name, ifc.addrlist) + body += "
  • %s - %s
  • \n" % (ifc.name, ifc.addrlist) return "%s" % body -addservice(HttpService) class PcapService(UtilService): - ''' Pcap service for logging packets. - ''' + """ + Pcap service for logging packets. + """ _name = "pcap" - _configs = ("pcap.sh", ) + _configs = ("pcap.sh",) _dirs = () _startindex = 1 _startup = ("sh pcap.sh start",) @@ -609,8 +611,9 @@ class PcapService(UtilService): @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a startpcap.sh traffic logging script. - ''' + """ + Generate a startpcap.sh traffic logging script. + """ cfg = """ #!/bin/sh # set tcpdump options here (see 'man tcpdump' for help) @@ -625,7 +628,7 @@ if [ "x$1" = "xstart" ]; then cfg += '# ' redir = "< /dev/null" cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \ - (node.name, ifc.name, ifc.name, redir) + (node.name, ifc.name, ifc.name, redir) cfg += """ elif [ "x$1" = "xstop" ]; then @@ -635,7 +638,6 @@ fi; """ return cfg -addservice(PcapService) class RadvdService(UtilService): _name = "radvd" @@ -644,12 +646,13 @@ class RadvdService(UtilService): _startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",) _shutdown = ("pkill radvd",) _validate = ("pidof radvd",) - + @classmethod def generateconfig(cls, node, filename, services): - ''' Generate a RADVD router advertisement daemon config file + """ + Generate a RADVD router advertisement daemon config file using the network address of each interface. - ''' + """ cfg = "# auto-generated by RADVD service (utility.py)\n" for ifc in node.netifs(): if hasattr(ifc, 'control') and ifc.control == True: @@ -679,29 +682,30 @@ interface %s """ % prefix cfg += "};\n" return cfg - + @staticmethod def subnetentry(x): - ''' Generate a subnet declaration block given an IPv6 prefix string - for inclusion in the RADVD config file. - ''' + """ + Generate a subnet declaration block given an IPv6 prefix string + for inclusion in the RADVD config file. + """ if x.find(":") >= 0: - net = IPv6Prefix(x) + net = Ipv6Prefix(x) return str(net) else: return "" -addservice(RadvdService) class AtdService(UtilService): - ''' Atd service for scheduling at jobs - ''' + """ + Atd service for scheduling at jobs + """ _name = "atd" _configs = ("startatd.sh",) _dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool") - _startup = ("sh startatd.sh", ) - _shutdown = ("pkill atd", ) - + _startup = ("sh startatd.sh",) + _shutdown = ("pkill atd",) + @classmethod def generateconfig(cls, node, filename, services): return """ @@ -711,14 +715,28 @@ chown -R daemon /var/spool/cron/* chmod -R 700 /var/spool/cron/* atd """ - -addservice(AtdService) + class UserDefinedService(UtilService): - ''' Dummy service allowing customization of anything. - ''' + """ + Dummy service allowing customization of anything. + """ _name = "UserDefined" _startindex = 50 _meta = "Customize this service to do anything upon startup." -addservice(UserDefinedService) + +def load_services(): + ServiceManager.add(IPForwardService) + ServiceManager.add(DefaultRouteService) + ServiceManager.add(DefaultMulticastRouteService) + ServiceManager.add(StaticRouteService) + ServiceManager.add(SshService) + ServiceManager.add(DhcpService) + ServiceManager.add(DhcpClientService) + ServiceManager.add(FtpService) + ServiceManager.add(HttpService) + ServiceManager.add(PcapService) + ServiceManager.add(RadvdService) + ServiceManager.add(AtdService) + ServiceManager.add(UserDefinedService) diff --git a/daemon/core/services/xorp.py b/daemon/core/services/xorp.py index 062f4901..f03b6731 100644 --- a/daemon/core/services/xorp.py +++ b/daemon/core/services/xorp.py @@ -1,24 +1,19 @@ -# -# CORE -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" xorp.py: defines routing services provided by the XORP routing suite. -''' +""" -import os +from core.misc import log +from core.service import CoreService +from core.service import ServiceManager + +logger = log.get_logger(__name__) -from core.service import CoreService, addservice -from core.misc.ipaddr import IPv4Prefix -from core.constants import * class XorpRtrmgr(CoreService): - ''' XORP router manager service builds a config.boot file based on other + """ + XORP router manager service builds a config.boot file based on other enabled XORP services, and launches necessary daemons upon startup. - ''' + """ _name = "xorp_rtrmgr" _group = "XORP" _depends = () @@ -26,15 +21,16 @@ class XorpRtrmgr(CoreService): _configs = ("/etc/xorp/config.boot",) _startindex = 35 _startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (_configs[0], _name, _name),) - _shutdown = ("killall xorp_rtrmgr", ) - _validate = ("pidof xorp_rtrmgr", ) + _shutdown = ("killall xorp_rtrmgr",) + _validate = ("pidof xorp_rtrmgr",) @classmethod def generateconfig(cls, node, filename, services): - ''' Returns config.boot configuration file text. Other services that - depend on this will have generatexorpconfig() hooks that are + """ + Returns config.boot configuration file text. Other services that + depend on this will have generatexorpconfig() hooks that are invoked here. Filename currently ignored. - ''' + """ cfg = "interfaces {\n" for ifc in node.netifs(): cfg += " interface %s {\n" % ifc.name @@ -50,40 +46,40 @@ class XorpRtrmgr(CoreService): s._depends.index(cls._name) cfg += s.generatexorpconfig(node) except ValueError: - pass + logger.exception("error getting value from service: %s", cls._name) + return cfg - + @staticmethod def addrstr(x): - ''' helper for mapping IP addresses to XORP config statements - ''' - try: - (addr, plen) = x.split("/") - except Exception: - raise ValueError, "invalid address" + """ + helper for mapping IP addresses to XORP config statements + """ + addr, plen = x.split("/") cfg = "\t address %s {\n" % addr cfg += "\t\tprefix-length: %s\n" % plen - cfg +="\t }\n" + cfg += "\t }\n" return cfg - + @staticmethod def lladdrstr(ifc): - ''' helper for adding link-local address entries (required by OSPFv3) - ''' + """ + helper for adding link-local address entries (required by OSPFv3) + """ cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal() cfg += "\t\tprefix-length: 64\n" cfg += "\t }\n" return cfg - -addservice(XorpRtrmgr) + class XorpService(CoreService): - ''' Parent class for XORP services. Defines properties and methods - common to XORP's routing daemons. - ''' + """ + Parent class for XORP services. Defines properties and methods + common to XORP's routing daemons. + """ _name = "XorpDaemon" _group = "XORP" - _depends = ("xorp_rtrmgr", ) + _depends = ("xorp_rtrmgr",) _dirs = () _configs = () _startindex = 40 @@ -93,22 +89,24 @@ class XorpService(CoreService): @staticmethod def fea(forwarding): - ''' Helper to add a forwarding engine entry to the config file. - ''' + """ + Helper to add a forwarding engine entry to the config file. + """ cfg = "fea {\n" cfg += " %s {\n" % forwarding cfg += "\tdisable:false\n" cfg += " }\n" cfg += "}\n" return cfg - + @staticmethod def mfea(forwarding, ifcs): - ''' Helper to add a multicast forwarding engine entry to the config file. - ''' + """ + Helper to add a multicast forwarding engine entry to the config file. + """ names = [] for ifc in ifcs: - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue names.append(ifc.name) names.append("register_vif") @@ -125,11 +123,11 @@ class XorpService(CoreService): cfg += "}\n" return cfg - @staticmethod def policyexportconnected(): - ''' Helper to add a policy statement for exporting connected routes. - ''' + """ + Helper to add a policy statement for exporting connected routes. + """ cfg = "policy {\n" cfg += " policy-statement export-connected {\n" cfg += "\tterm 100 {\n" @@ -143,34 +141,37 @@ class XorpService(CoreService): @staticmethod def routerid(node): - ''' Helper to return the first IPv4 address of a node as its router ID. - ''' + """ + Helper to return the first IPv4 address of a node as its router ID. + """ for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue for a in ifc.addrlist: if a.find(".") >= 0: - return a.split('/')[0] - #raise ValueError, "no IPv4 address found for router ID" + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" return "0.0.0.0" @classmethod - def generateconfig(cls, node, filename, services): + def generateconfig(cls, node, filename, services): return "" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): return "" + class XorpOspfv2(XorpService): - ''' The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified XORP configuration file. - ''' + """ + The OSPFv2 service provides IPv4 routing for wired networks. It does + not build its own configuration file but has hooks for adding to the + unified XORP configuration file. + """ _name = "XORP_OSPFv2" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding4") rtrid = cls.routerid(node) cfg += "\nprotocols {\n" @@ -178,7 +179,7 @@ class XorpOspfv2(XorpService): cfg += "\trouter-id: %s\n" % rtrid cfg += "\tarea 0.0.0.0 {\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\t interface %s {\n" % ifc.name cfg += "\t\tvif %s {\n" % ifc.name @@ -194,18 +195,18 @@ class XorpOspfv2(XorpService): cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpOspfv2) + class XorpOspfv3(XorpService): - ''' The OSPFv3 service provides IPv6 routing. It does - not build its own configuration file but has hooks for adding to the - unified XORP configuration file. - ''' + """ + The OSPFv3 service provides IPv6 routing. It does + not build its own configuration file but has hooks for adding to the + unified XORP configuration file. + """ _name = "XORP_OSPFv3" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding6") rtrid = cls.routerid(node) cfg += "\nprotocols {\n" @@ -213,7 +214,7 @@ class XorpOspfv3(XorpService): cfg += "\trouter-id: %s\n" % rtrid cfg += "\tarea 0.0.0.0 {\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\t interface %s {\n" % ifc.name cfg += "\t\tvif %s {\n" % ifc.name @@ -223,15 +224,15 @@ class XorpOspfv3(XorpService): cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpOspfv3) + class XorpBgp(XorpService): - ''' IPv4 inter-domain routing. AS numbers and peers must be customized. - ''' + """ + IPv4 inter-domain routing. AS numbers and peers must be customized. + """ _name = "XORP_BGP" _custom_needed = True - + @classmethod def generatexorpconfig(cls, node): cfg = "/* This is a sample config that should be customized with\n" @@ -253,22 +254,23 @@ class XorpBgp(XorpService): cfg += "}\n" return cfg -addservice(XorpBgp) class XorpRip(XorpService): - ''' RIP IPv4 unicast routing. - ''' + """ + RIP IPv4 unicast routing. + """ + _name = "XORP_RIP" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding4") cfg += cls.policyexportconnected() cfg += "\nprotocols {\n" cfg += " rip {\n" cfg += "\texport: \"export-connected\"\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\tinterface %s {\n" % ifc.name cfg += "\t vif %s {\n" % ifc.name @@ -284,68 +286,68 @@ class XorpRip(XorpService): cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpRip) + class XorpRipng(XorpService): - ''' RIP NG IPv6 unicast routing. - ''' + """ + RIP NG IPv6 unicast routing. + """ _name = "XORP_RIPNG" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding6") cfg += cls.policyexportconnected() cfg += "\nprotocols {\n" cfg += " ripng {\n" cfg += "\texport: \"export-connected\"\n" for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\tinterface %s {\n" % ifc.name cfg += "\t vif %s {\n" % ifc.name -# for a in ifc.addrlist: -# if a.find(":") < 0: -# continue -# addr = a.split("/")[0] -# cfg += "\t\taddress %s {\n" % addr -# cfg += "\t\t disable: false\n" -# cfg += "\t\t}\n" + # for a in ifc.addrlist: + # if a.find(":") < 0: + # continue + # addr = a.split("/")[0] + # cfg += "\t\taddress %s {\n" % addr + # cfg += "\t\t disable: false\n" + # cfg += "\t\t}\n" cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal() cfg += "\t\t disable: false\n" cfg += "\t\t}\n" cfg += "\t }\n" - cfg += "\t}\n" + cfg += "\t}\n" cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpRipng) + class XorpPimSm4(XorpService): - ''' PIM Sparse Mode IPv4 multicast routing. - ''' + """ + PIM Sparse Mode IPv4 multicast routing. + """ _name = "XORP_PIMSM4" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.mfea("mfea4", node.netifs()) - + cfg += "\nprotocols {\n" cfg += " igmp {\n" names = [] for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue names.append(ifc.name) cfg += "\tinterface %s {\n" % ifc.name cfg += "\t vif %s {\n" % ifc.name cfg += "\t\tdisable: false\n" cfg += "\t }\n" - cfg += "\t}\n" + cfg += "\t}\n" cfg += " }\n" cfg += "}\n" - + cfg += "\nprotocols {\n" cfg += " pimsm4 {\n" @@ -368,46 +370,46 @@ class XorpPimSm4(XorpService): cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t}\n" - + cfg += " }\n" cfg += "}\n" - + cfg += "\nprotocols {\n" cfg += " fib2mrib {\n" cfg += "\tdisable: false\n" cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpPimSm4) + class XorpPimSm6(XorpService): - ''' PIM Sparse Mode IPv6 multicast routing. - ''' + """ + PIM Sparse Mode IPv6 multicast routing. + """ _name = "XORP_PIMSM6" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.mfea("mfea6", node.netifs()) - + cfg += "\nprotocols {\n" cfg += " mld {\n" names = [] for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue names.append(ifc.name) cfg += "\tinterface %s {\n" % ifc.name cfg += "\t vif %s {\n" % ifc.name cfg += "\t\tdisable: false\n" cfg += "\t }\n" - cfg += "\t}\n" + cfg += "\t}\n" cfg += " }\n" cfg += "}\n" - + cfg += "\nprotocols {\n" cfg += " pimsm6 {\n" - + names.append("register_vif") for name in names: cfg += "\tinterface %s {\n" % name @@ -427,33 +429,33 @@ class XorpPimSm6(XorpService): cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t}\n" - + cfg += " }\n" cfg += "}\n" - + cfg += "\nprotocols {\n" cfg += " fib2mrib {\n" cfg += "\tdisable: false\n" cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpPimSm6) + class XorpOlsr(XorpService): - ''' OLSR IPv4 unicast MANET routing. - ''' + """ + OLSR IPv4 unicast MANET routing. + """ _name = "XORP_OLSR" @classmethod - def generatexorpconfig(cls, node): + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding4") rtrid = cls.routerid(node) cfg += "\nprotocols {\n" cfg += " olsr4 {\n" cfg += "\tmain-address: %s\n" % rtrid for ifc in node.netifs(): - if hasattr(ifc, 'control') and ifc.control == True: + if hasattr(ifc, 'control') and ifc.control is True: continue cfg += "\tinterface %s {\n" % ifc.name cfg += "\t vif %s {\n" % ifc.name @@ -468,5 +470,15 @@ class XorpOlsr(XorpService): cfg += " }\n" cfg += "}\n" return cfg - -addservice(XorpOlsr) + + +def load_services(): + ServiceManager.add(XorpRtrmgr) + ServiceManager.add(XorpOspfv2) + ServiceManager.add(XorpOspfv3) + ServiceManager.add(XorpBgp) + ServiceManager.add(XorpRip) + ServiceManager.add(XorpRipng) + ServiceManager.add(XorpPimSm4) + ServiceManager.add(XorpPimSm6) + ServiceManager.add(XorpOlsr) diff --git a/daemon/core/session.py b/daemon/core/session.py index f078151c..d171fd8f 100644 --- a/daemon/core/session.py +++ b/daemon/core/session.py @@ -1,1257 +1,1589 @@ -# -# CORE -# Copyright (c)2010-2013 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# authors: Tom Goff -# Jeff Ahrenholz -# -''' +""" session.py: defines the Session class used by the core-daemon daemon program that manages a CORE session. -''' +""" -import os, sys, tempfile, shutil, shlex, atexit, gc, pwd -import threading, time, random -import traceback +import atexit +import os +import random +import shlex +import shutil import subprocess +import tempfile +import threading +import time +import pwd + +from core import constants from core.api import coreapi -if os.uname()[0] == "Linux": - from core.netns import nodes - from core.netns.vnet import GreTapBridge -elif os.uname()[0] == "FreeBSD": - from core.bsd import nodes -from core.emane import emane -from core.misc.utils import check_call, mutedetach, readfileintodict, \ - filemunge, filedemunge - -from core.conf import ConfigurableManager, Configurable -from core.location import CoreLocation -from core.service import CoreServices from core.broker import CoreBroker -from core.mobility import MobilityManager -from core.sdt import Sdt -from core.misc.ipaddr import MacAddr +from core.conf import Configurable +from core.conf import ConfigurableManager +from core.data import ConfigData +from core.data import EventData +from core.data import ExceptionData +from core.data import FileData +from core.emane.emanemanager import EmaneManager +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import EventTypes +from core.enumerations import ExceptionLevels +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.location import CoreLocation +from core.misc import log +from core.misc import nodeutils +from core.misc import utils from core.misc.event import EventLoop -from core.constants import * -from core.misc.xmlsession import savesessionxml +from core.misc.ipaddress import MacAddress +from core.mobility import BasicRangeModel +from core.mobility import MobilityManager +from core.mobility import Ns2ScriptedMobility +from core.netns import nodes +from core.sdt import Sdt +from core.service import CoreServices +from core.xen.xenconfig import XenConfigManager +from core.xml.xmlsession import save_session_xml + +logger = log.get_logger(__name__) + + +class SessionManager(object): + """ + Manages currently known sessions. + """ + sessions = set() + session_lock = threading.Lock() + + @classmethod + def add(cls, session): + """ + Add a session to the manager. + + :param Session session: session to add + :return: nothing + """ + with cls.session_lock: + logger.info("adding session to manager: %s", session.session_id) + cls.sessions.add(session) + + @classmethod + def remove(cls, session): + """ + Remove session from the manager. + + :param Session session: session to remove + :return: nothing + """ + with cls.session_lock: + logger.info("removing session from manager: %s", session.session_id) + if session in cls.sessions: + cls.sessions.remove(session) + else: + logger.info("session was already removed: %s", session.session_id) + + @classmethod + def on_exit(cls): + """ + Method used to shutdown all currently known sessions, in case of unexpected exit. + + :return: nothing + """ + logger.info("caught program exit, shutting down all known sessions") + while cls.sessions: + with cls.session_lock: + session = cls.sessions.pop() + logger.error("WARNING: automatically shutting down non-persistent session %s - %s", + session.session_id, session.name) + session.shutdown() -from core.xen import xenconfig class Session(object): + """ + CORE session manager. + """ - # sessions that get automatically shutdown when the process - # terminates normally - __sessions = set() + def __init__(self, session_id, config=None, persistent=False, mkdir=True): + """ + Create a Session instance. - ''' CORE session manager. - ''' - def __init__(self, sessionid = None, cfg = {}, server = None, - persistent = False, mkdir = True): - if sessionid is None: - # try to keep this short since it's used to construct - # network interface names - pid = os.getpid() - sessionid = ((pid >> 16) ^ - (pid & ((1 << 16) - 1))) - sessionid ^= ((id(self) >> 16) ^ (id(self) & ((1 << 16) - 1))) - sessionid &= 0xffff - self.sessionid = sessionid - self.sessiondir = os.path.join(tempfile.gettempdir(), - "pycore.%s" % self.sessionid) + :param int session_id: session id + :param dict config: session configuration + :param bool persistent: flag is session is considered persistent + :param bool mkdir: flag to determine if a directory should be made + """ + self.session_id = session_id + + # dict of configuration items from /etc/core/core.conf config file + if not config: + config = {} + self.config = config + + # define and create session directory when desired + self.session_dir = os.path.join(tempfile.gettempdir(), "pycore.%s" % self.session_id) if mkdir: - os.mkdir(self.sessiondir) + os.mkdir(self.session_dir) + self.name = None - self.filename = None + self.file_name = None self.thumbnail = None self.user = None - self.node_count = None - self._time = time.time() - self.evq = EventLoop() + self._state_time = time.time() + self.event_loop = EventLoop() + # dict of objects: all nodes and nets - self._objs = {} - self._objslock = threading.Lock() + self.objects = {} + self._objects_lock = threading.Lock() + # dict of configurable objects - self._confobjs = {} - self._confobjslock = threading.Lock() - self._handlers = set() - self._handlerslock = threading.Lock() - self._state = None + self.config_objects = {} + self._config_objects_lock = threading.Lock() + + # TODO: should the default state be definition? + self.state = EventTypes.NONE.value + self._state_file = os.path.join(self.session_dir, "state") + self._hooks = {} self._state_hooks = {} - # dict of configuration items from /etc/core/core.conf config file - self.cfg = cfg - self.add_state_hook(coreapi.CORE_EVENT_RUNTIME_STATE, - self.runtime_state_hook) - self.setstate(state=coreapi.CORE_EVENT_DEFINITION_STATE, - info=False, sendevent=False) - self.server = server + + self.add_state_hook(state=EventTypes.RUNTIME_STATE.value, hook=self.runtime_state_hook) + if not persistent: - self.addsession(self) + SessionManager.add(self) + self.master = False - self.broker = CoreBroker(session=self, verbose=True) - self.location = CoreLocation(self) - self.mobility = MobilityManager(self) - self.services = CoreServices(self) - self.emane = emane.Emane(self) - self.xen = xenconfig.XenConfigManager(self) - self.sdt = Sdt(self) + + # setup broker + self.broker = CoreBroker(session=self) + self.add_config_object(CoreBroker.name, CoreBroker.config_type, self.broker.configure) + + # setup location + self.location = CoreLocation() + self.add_config_object(CoreLocation.name, CoreLocation.config_type, self.location.configure) + + # setup mobiliy + self.mobility = MobilityManager(session=self) + self.add_config_object(MobilityManager.name, MobilityManager.config_type, self.mobility.configure) + self.add_config_object(BasicRangeModel.name, BasicRangeModel.config_type, BasicRangeModel.configure_mob) + self.add_config_object(Ns2ScriptedMobility.name, Ns2ScriptedMobility.config_type, + Ns2ScriptedMobility.configure_mob) + + # setup services + self.services = CoreServices(session=self) + self.add_config_object(CoreServices.name, CoreServices.config_type, self.services.configure) + + # setup emane + self.emane = EmaneManager(session=self) + self.add_config_object(EmaneManager.name, EmaneManager.config_type, self.emane.configure) + + # setup xen + self.xen = XenConfigManager(session=self) + self.add_config_object(XenConfigManager.name, XenConfigManager.config_type, self.xen.configure) + + # setup sdt + self.sdt = Sdt(session=self) + # future parameters set by the GUI may go here - self.options = SessionConfig(self) - self.metadata = SessionMetaData(self) + self.options = SessionConfig(session=self) + self.add_config_object(SessionConfig.name, SessionConfig.config_type, self.options.configure) + self.metadata = SessionMetaData() + self.add_config_object(SessionMetaData.name, SessionMetaData.config_type, self.metadata.configure) - @classmethod - def addsession(cls, session): - cls.__sessions.add(session) - - @classmethod - def delsession(cls, session): - try: - cls.__sessions.remove(session) - except KeyError: - pass - - @classmethod - def atexit(cls): - while cls.__sessions: - s = cls.__sessions.pop() - print >> sys.stderr, "WARNING: automatically shutting down " \ - "non-persistent session %s" % s.sessionid - s.shutdown() + # handlers for broadcasting information + self.event_handlers = [] + self.exception_handlers = [] + self.node_handlers = [] + self.link_handlers = [] + self.file_handlers = [] + self.config_handlers = [] + self.shutdown_handlers = [] def shutdown(self): - ''' Shut down all emulation objects and remove the session directory. - ''' - if hasattr(self, 'emane'): - self.emane.shutdown() - if hasattr(self, 'broker'): - self.broker.shutdown() - if hasattr(self, 'sdt'): - self.sdt.shutdown() - self.delobjs() + """ + Shutdown all emulation objects and remove the session directory. + """ + + # shutdown emane + self.emane.shutdown() + + # shutdown broker + self.broker.shutdown() + + # shutdown NRL's SDT3D + self.sdt.shutdown() + + # delete all current objects + self.delete_objects() + preserve = False - if hasattr(self.options, 'preservedir'): - if self.options.preservedir == '1': - preserve = True + if hasattr(self.options, "preservedir") and self.options.preservedir == "1": + preserve = True + + # remove this sessions working directory if not preserve: - shutil.rmtree(self.sessiondir, ignore_errors = True) - if self.server: - self.server.delsession(self) - self.delsession(self) + shutil.rmtree(self.session_dir, ignore_errors=True) - def isconnected(self): - ''' Returns true if this session has a request handler. - ''' - with self._handlerslock: - if len(self._handlers) == 0: - return False - else: - return True + # remove this session from the manager + SessionManager.remove(self) - def connect(self, handler): - ''' Set the request handler for this session, making it connected. - ''' - # the master flag will only be set after a GUI has connected with the - # handler, e.g. not during normal startup - if handler.master is True: - self.master = True - with self._handlerslock: - self._handlers.add(handler) + # call session shutdown handlers + for handler in self.shutdown_handlers: + handler(self) - def disconnect(self, handler): - ''' Disconnect a request handler from this session. Shutdown this - session if there is no running emulation. - ''' - with self._handlerslock: - try: - self._handlers.remove(handler) - except KeyError: - raise ValueError, \ - "Handler %s not associated with this session" % handler - num_handlers = len(self._handlers) - if num_handlers == 0: - # shut down this session unless we are instantiating, running, - # or collecting final data - if self.getstate() < coreapi.CORE_EVENT_INSTANTIATION_STATE or \ - self.getstate() > coreapi.CORE_EVENT_DATACOLLECT_STATE: - self.shutdown() + def broadcast_event(self, event_data): + """ + Handle event data that should be provided to event handler. - def broadcast(self, src, msg): - ''' Send Node and Link CORE API messages to all handlers connected to this session. - ''' - self._handlerslock.acquire() - for handler in self._handlers: - if handler == src: - continue - if isinstance(msg, coreapi.CoreNodeMessage) or \ - isinstance(msg, coreapi.CoreLinkMessage): - try: - handler.sendall(msg.rawmsg) - except Exception, e: - self.warn("sendall() error: %s" % e) - self._handlerslock.release() + :param core.data.EventData event_data: event data to send out + :return: nothing + """ - def broadcastraw(self, src, data): - ''' Broadcast raw data to all handlers except src. - ''' - self._handlerslock.acquire() - for handler in self._handlers: - if handler == src: - continue - try: - handler.sendall(data) - except Exception, e: - self.warn("sendall() error: %s" % e) - self._handlerslock.release() + for handler in self.event_handlers: + handler(event_data) - def gethandler(self): - ''' Get one of the connected handlers, preferrably the master. - ''' - with self._handlerslock: - if len(self._handlers) == 0: - return None - for handler in self._handlers: - if handler.master: - return handler - for handler in self._handlers: - return handler + def broadcast_exception(self, exception_data): + """ + Handle exception data that should be provided to exception handlers. - def setstate(self, state, info = False, sendevent = False, - returnevent = False): - ''' Set the session state. When info is true, log the state change - event using the session handler's info method. When sendevent is - true, generate a CORE API Event Message and send to the connected - entity. - ''' - if state == self._state: - return [] - self._time = time.time() - self._state = state + :param core.data.ExceptionData exception_data: exception data to send out + :return: nothing + """ + + for handler in self.exception_handlers: + handler(exception_data) + + def broadcast_node(self, node_data): + """ + Handle node data that should be provided to node handlers. + + :param core.data.ExceptionData node_data: node data to send out + :return: nothing + """ + + for handler in self.node_handlers: + handler(node_data) + + def broadcast_file(self, file_data): + """ + Handle file data that should be provided to file handlers. + + :param core.data.FileData file_data: file data to send out + :return: nothing + """ + + for handler in self.file_handlers: + handler(file_data) + + def broadcast_config(self, config_data): + """ + Handle config data that should be provided to config handlers. + + :param core.data.ConfigData config_data: config data to send out + :return: nothing + """ + + for handler in self.config_handlers: + handler(config_data) + + def broadcast_link(self, link_data): + """ + Handle link data that should be provided to link handlers. + + :param core.data.ExceptionData link_data: link data to send out + :return: nothing + """ + + for handler in self.link_handlers: + handler(link_data) + + def set_state(self, state, send_event=False): + """ + Set the session's current state. + + :param int state: state to set to + :param send_event: if true, generate core API event messages + :return: nothing + """ + state_name = coreapi.state_name(state) + + if self.state == state: + logger.info("session is already in state: %s, skipping change", state_name) + return + + self.state = state + self._state_time = time.time() + logger.info("changing session %s to state %s(%s) at %s", + self.session_id, state, state_name, self._state_time) + + self.write_state(state) + self.run_hooks(state) self.run_state_hooks(state) - replies = [] - if self.isconnected() and info: - statename = coreapi.state_name(state) - with self._handlerslock: - for handler in self._handlers: - handler.info("SESSION %s STATE %d: %s at %s" % \ - (self.sessionid, state, statename, - time.ctime())) - self.writestate(state) - self.runhook(state) - if sendevent: - tlvdata = "" - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - state) - msg = coreapi.CoreEventMessage.pack(0, tlvdata) - # send Event Message to connected handlers (e.g. GUI) - if self.isconnected(): - try: - if returnevent: - replies.append(msg) - else: - self.broadcastraw(None, msg) - except Exception, e: - self.warn("Error sending Event Message: %s" % e) + + if send_event: + event_data = EventData(event_type=state, time="%s" % time.time()) + self.broadcast_event(event_data) + # also inform slave servers - tmp = self.broker.handlerawmsg(msg) - return replies + # TODO: deal with broker, potentially broker should really live within the core server/handlers + # self.broker.handlerawmsg(message) + def write_state(self, state): + """ + Write the current state to a state file in the session dir. - def getstate(self): - ''' Retrieve the current state of the session. - ''' - return self._state - - def writestate(self, state): - ''' Write the current state to a state file in the session dir. - ''' + :param int state: state to write to file + :return: nothing + """ try: - f = open(os.path.join(self.sessiondir, "state"), "w") - f.write("%d %s\n" % (state, coreapi.state_name(state))) - f.close() - except Exception, e: - self.warn("Error writing state file: %s" % e) + state_file = open(self._state_file, "w") + state_file.write("%d %s\n" % (state, coreapi.state_name(state))) + state_file.close() + except IOError: + logger.exception("error writing state file: %s", state) - def runhook(self, state, hooks=None): - ''' Run hook scripts upon changing states. - If hooks is not specified, run all hooks in the given state. - ''' + def run_hooks(self, state): + """ + Run hook scripts upon changing states. If hooks is not specified, run all hooks in the given state. + + :param int state: state to run hooks for + :return: nothing + """ + + # check that state change hooks exist if state not in self._hooks: return - if hooks is None: - hooks = self._hooks[state] - for (filename, data) in hooks: - try: - f = open(os.path.join(self.sessiondir, filename), "w") - f.write(data) - f.close() - except Exception, e: - self.warn("Error writing hook '%s': %s" % (filename, e)) - self.info("Running hook %s for state %s" % (filename, state)) - try: - stdout = open(os.path.join(self.sessiondir, - filename + '.log'), 'w') - stderr = subprocess.STDOUT - except: - stdout = None - stderr = None - try: - check_call(["/bin/sh", filename], stdin=open(os.devnull, 'r'), - stdout=stdout, stderr=stderr, close_fds=True, - cwd=self.sessiondir, env=self.getenviron()) - except Exception, e: - self.warn("Error running hook '%s' for state %s: %s" % - (filename, state, e)) - def sethook(self, type, filename, srcname, data): - ''' Store a hook from a received File Message. - ''' - if srcname is not None: - raise NotImplementedError - (hookid, state) = type.split(':')[:2] - if not state.isdigit(): - self.warn("Error setting hook having state '%s'" % state) - return - state = int(state) - hook = (filename, data) - if state not in self._hooks: - self._hooks[state] = [hook,] + # retrieve all state hooks + hooks = self._hooks.get(state, []) + + # execute all state hooks + for hook in hooks: + self.run_hook(hook) else: - self._hooks[state].append(hook) + logger.info("no state hooks for %s", state) + + def set_hook(self, hook_type, file_name, source_name, data): + """ + Store a hook from a received file message. + + :param str hook_type: hook type + :param str file_name: file name for hook + :param str source_name: source name + :param data: hook data + :return: nothing + """ + logger.info("setting state hook: %s - %s from %s", hook_type, file_name, source_name) + + hook_id, state = hook_type.split(':')[:2] + if not state.isdigit(): + logger.error("error setting hook having state '%s'", state) + return + + state = int(state) + hook = file_name, data + + # append hook to current state hooks + state_hooks = self._hooks.setdefault(state, []) + state_hooks.append(hook) + # immediately run a hook if it is in the current state # (this allows hooks in the definition and configuration states) - if self.getstate() == state: - self.runhook(state, hooks = [hook,]) + if self.state == state: + logger.info("immediately running new state hook") + self.run_hook(hook) - def delhooks(self): - ''' Clear the hook scripts dict. - ''' - self._hooks = {} + def del_hooks(self): + """ + Clear the hook scripts dict. + """ + self._hooks.clear() + + def run_hook(self, hook): + """ + Run a hook. + + :param tuple hook: hook to run + :return: nothing + """ + file_name, data = hook + logger.info("running hook %s", file_name) + + # write data to hook file + try: + hook_file = open(os.path.join(self.session_dir, file_name), "w") + hook_file.write(data) + hook_file.close() + except IOError: + logger.exception("error writing hook '%s'", file_name) + + # setup hook stdout and stderr + try: + stdout = open(os.path.join(self.session_dir, file_name + ".log"), "w") + stderr = subprocess.STDOUT + except IOError: + logger.exception("error setting up hook stderr and stdout") + stdout = None + stderr = None + + # execute hook file + try: + subprocess.check_call(["/bin/sh", file_name], stdin=open(os.devnull, 'r'), + stdout=stdout, stderr=stderr, close_fds=True, + cwd=self.session_dir, env=self.get_environment()) + except subprocess.CalledProcessError: + logger.exception("error running hook '%s'", file_name) def run_state_hooks(self, state): - if state not in self._state_hooks: - return - for hook in self._state_hooks[state]: + """ + Run state hooks. + + :param int state: state to run hooks for + :return: nothing + """ + for hook in self._state_hooks.get(state, []): try: hook(state) - except Exception, e: - self.warn("ERROR: exception occured when running %s state " - "hook: %s: %s\n%s" % (coreapi.state_name(state), - hook, e, - traceback.format_exc())) + except: + message = "exception occured when running %s state hook: %s" % (coreapi.state_name(state), hook) + logger.exception(message) + self.exception( + ExceptionLevels.ERROR, + "Session.run_state_hooks", + None, + message + ) def add_state_hook(self, state, hook): - try: - hooks = self._state_hooks[state] - assert hook not in hooks - hooks.append(hook) - except KeyError: - self._state_hooks[state] = [hook] - if self._state == state: + """ + Add a state hook. + + :param int state: state to add hook for + :param func hook: hook callback for the state + :return: nothing + """ + hooks = self._state_hooks.setdefault(state, []) + assert hook not in hooks + hooks.append(hook) + + if self.state == state: hook(state) def del_state_hook(self, state, hook): - try: - hooks = self._state_hooks[state] - self._state_hooks[state] = filter(lambda x: x != hook, hooks) - except KeyError: - pass + """ + Delete a state hook. + + :param int state: state to delete hook for + :param func hook: hook to delete + :return: + """ + hooks = self._state_hooks.setdefault(state, []) + hooks.remove(hook) def runtime_state_hook(self, state): - if state == coreapi.CORE_EVENT_RUNTIME_STATE: - self.emane.poststartup() - xmlfilever = self.getcfgitem('xmlfilever') - if xmlfilever in ('1.0',): - xmlfilename = os.path.join(self.sessiondir, - 'session-deployed.xml') - savesessionxml(self, xmlfilename, xmlfilever) + """ + Runtime state hook check. - def getenviron(self, state=True): - ''' Get an environment suitable for a subprocess.Popen call. - This is the current process environment with some session-specific - variables. - ''' + :param int state: state to check + :return: nothing + """ + if state == EventTypes.RUNTIME_STATE.value: + self.emane.poststartup() + xml_file_version = self.get_config_item("xmlfilever") + if xml_file_version in ('1.0',): + xml_file_name = os.path.join(self.session_dir, "session-deployed.xml") + save_session_xml(self, xml_file_name, xml_file_version) + + def get_environment(self, state=True): + """ + Get an environment suitable for a subprocess.Popen call. + This is the current process environment with some session-specific + variables. + + :param bool state: flag to determine if session state should be included + :return: + """ env = os.environ.copy() - env['SESSION'] = "%s" % self.sessionid - env['SESSION_SHORT'] = "%s" % self.shortsessionid() - env['SESSION_DIR'] = "%s" % self.sessiondir - env['SESSION_NAME'] = "%s" % self.name - env['SESSION_FILENAME'] = "%s" % self.filename - env['SESSION_USER'] = "%s" % self.user - env['SESSION_NODE_COUNT'] = "%s" % self.node_count + env["SESSION"] = "%s" % self.session_id + env["SESSION_SHORT"] = "%s" % self.short_session_id() + env["SESSION_DIR"] = "%s" % self.session_dir + env["SESSION_NAME"] = "%s" % self.name + env["SESSION_FILENAME"] = "%s" % self.file_name + env["SESSION_USER"] = "%s" % self.user + env["SESSION_NODE_COUNT"] = "%s" % self.get_node_count() + if state: - env['SESSION_STATE'] = "%s" % self.getstate() + env["SESSION_STATE"] = "%s" % self.state + + # attempt to read and add environment config file + environment_config_file = os.path.join(constants.CORE_CONF_DIR, "environment") try: - readfileintodict(os.path.join(CORE_CONF_DIR, "environment"), env) + if os.path.isfile(environment_config_file): + utils.readfileintodict(environment_config_file, env) except IOError: - pass + logger.exception("error reading environment configuration file: %s", environment_config_file) + + # attempt to read and add user environment file if self.user: + environment_user_file = os.path.join("/home", self.user, ".core", "environment") try: - readfileintodict(os.path.join('/home', self.user, ".core", - "environment"), env) + utils.readfileintodict(environment_user_file, env) except IOError: - pass + logger.exception("error reading user core environment settings file: %s", environment_user_file) + return env - def setthumbnail(self, thumbfile): - ''' Set the thumbnail filename. Move files from /tmp to session dir. - ''' - if not os.path.exists(thumbfile): + def set_thumbnail(self, thumb_file): + """ + Set the thumbnail filename. Move files from /tmp to session dir. + + :param str thumb_file: tumbnail file to set for session + :return: nothing + """ + if not os.path.exists(thumb_file): + logger.error("thumbnail file to set does not exist: %s", thumb_file) self.thumbnail = None return - dstfile = os.path.join(self.sessiondir, os.path.basename(thumbfile)) - shutil.move(thumbfile, dstfile) - #print "thumbnail: %s -> %s" % (thumbfile, dstfile) - self.thumbnail = dstfile - def setuser(self, user): - ''' Set the username for this session. Update the permissions of the - session dir to allow the user write access. - ''' - if user is not None: + destination_file = os.path.join(self.session_dir, os.path.basename(thumb_file)) + shutil.copy(thumb_file, destination_file) + self.thumbnail = destination_file + + def set_user(self, user): + """ + Set the username for this session. Update the permissions of the + session dir to allow the user write access. + + :param str user: user to give write permissions to for the session directory + :return: nothing + """ + if user: try: uid = pwd.getpwnam(user).pw_uid - gid = os.stat(self.sessiondir).st_gid - os.chown(self.sessiondir, uid, gid) - except Exception, e: - self.warn("Failed to set permission on %s: %s" % (self.sessiondir, e)) + gid = os.stat(self.session_dir).st_gid + os.chown(self.session_dir, uid, gid) + except IOError: + logger.exception("failed to set permission on %s", self.session_dir) + self.user = user - def objs(self): - ''' Return iterator over the emulation object dictionary. - ''' - return self._objs.itervalues() + def get_object_id(self): + """ + Return a unique, new random object id. + """ + object_id = None - def getobjid(self): - ''' Return a unique, random object id. - ''' - self._objslock.acquire() - while True: - id = random.randint(1, 0xFFFF) - if id not in self._objs: - break - self._objslock.release() - return id + with self._objects_lock: + while True: + object_id = random.randint(1, 0xFFFF) + if object_id not in self.objects: + break - def addobj(self, cls, *clsargs, **clskwds): - ''' Add an emulation object. - ''' + return object_id + + def add_object(self, cls, *clsargs, **clskwds): + """ + Add an emulation object. + + :param class cls: object class to add + :param list clsargs: list of arguments for the class to create + :param dict clskwds: dictionary of arguments for the class to create + :return: the created class instance + """ obj = cls(self, *clsargs, **clskwds) - self._objslock.acquire() - if obj.objid in self._objs: - self._objslock.release() + + self._objects_lock.acquire() + if obj.objid in self.objects: + self._objects_lock.release() obj.shutdown() - raise KeyError, "non-unique object id %s for %s" % (obj.objid, obj) - self._objs[obj.objid] = obj - self._objslock.release() + raise KeyError("duplicate object id %s for %s" % (obj.objid, obj)) + self.objects[obj.objid] = obj + self._objects_lock.release() + return obj - def obj(self, objid): - ''' Get an emulation object. - ''' - if objid not in self._objs: - raise KeyError, "unknown object id %s" % (objid) - return self._objs[objid] + def get_object(self, object_id): + """ + Get an emulation object. - def objbyname(self, name): - ''' Get an emulation object using its name attribute. - ''' - with self._objslock: - for obj in self.objs(): + :param int object_id: object id to retrieve + :return: object for the given id + """ + if object_id not in self.objects: + raise KeyError("unknown object id %s" % object_id) + return self.objects[object_id] + + def get_object_by_name(self, name): + """ + Get an emulation object using its name attribute. + + :param str name: name of object to retrieve + :return: object for the name given + """ + with self._objects_lock: + for obj in self.objects.itervalues(): if hasattr(obj, "name") and obj.name == name: return obj - raise KeyError, "unknown object with name %s" % (name) + raise KeyError("unknown object with name %s" % name) - def delobj(self, objid): - ''' Remove an emulation object. - ''' - self._objslock.acquire() + def delete_object(self, object_id): + """ + Remove an emulation object. + + :param int object_id: object id to remove + :return: nothing + """ + with self._objects_lock: + try: + obj = self.objects.pop(object_id) + obj.shutdown() + except KeyError: + logger.error("failed to remove object, object with id was not found: %s", object_id) + + def delete_objects(self): + """ + Clear the objects dictionary, and call shutdown for each object. + """ + with self._objects_lock: + while self.objects: + _, obj = self.objects.popitem() + obj.shutdown() + + def write_objects(self): + """ + Write objects to a 'nodes' file in the session dir. + The 'nodes' file lists: number, name, api-type, class-type + """ try: - o = self._objs.pop(objid) - except KeyError: - o = None - self._objslock.release() - if o: - o.shutdown() - del o - gc.collect() -# print "gc count:", gc.get_count() -# for o in gc.get_objects(): -# if isinstance(o, PyCoreObj): -# print "XXX XXX XXX PyCoreObj:", o -# for r in gc.get_referrers(o): -# print "XXX XXX XXX referrer:", gc.get_referrers(o) + nodes_file = open(os.path.join(self.session_dir, "nodes"), "w") + with self._objects_lock: + for object_id in sorted(self.objects.keys()): + obj = self.objects[object_id] + nodes_file.write("%s %s %s %s\n" % (object_id, obj.name, obj.apitype, type(obj))) + nodes_file.close() + except IOError: + logger.exception("error writing nodes file") - def delobjs(self): - ''' Clear the _objs dictionary, and call each obj.shutdown() routine. - ''' - self._objslock.acquire() - while self._objs: - k, o = self._objs.popitem() - o.shutdown() - self._objslock.release() + def add_config_object(self, name, object_type, callback): + """ + Objects can register configuration objects that are included in + the Register Message and may be configured via the Configure + Message. The callback is invoked when receiving a Configure Message. - def writeobjs(self): - ''' Write objects to a 'nodes' file in the session dir. - The 'nodes' file lists: - number, name, api-type, class-type - ''' - try: - f = open(os.path.join(self.sessiondir, "nodes"), "w") - with self._objslock: - for objid in sorted(self._objs.keys()): - o = self._objs[objid] - f.write("%s %s %s %s\n" % (objid, o.name, o.apitype, type(o))) - f.close() - except Exception, e: - self.warn("Error writing nodes file: %s" % e) + :param str name: name of configuration object to add + :param int object_type: register tlv type + :param func callback: callback function for object + :return: nothing + """ + register_tlv = RegisterTlvs(object_type) + logger.info("adding config object callback: %s - %s", name, register_tlv) + with self._config_objects_lock: + self.config_objects[name] = (object_type, callback) - def addconfobj(self, objname, type, callback): - ''' Objects can register configuration objects that are included in - the Register Message and may be configured via the Configure - Message. The callback is invoked when receiving a Configure Message. - ''' - if type not in coreapi.reg_tlvs: - raise Exception, "invalid configuration object type" - self._confobjslock.acquire() - self._confobjs[objname] = (type, callback) - self._confobjslock.release() + def config_object(self, config_data): + """ + Invoke the callback for an object upon receipt of configuration data for that object. + A no-op if the object doesn't exist. + + :param core.data.ConfigData config_data: configuration data to execute against + :return: responses to the configuration data + :rtype: list + """ + name = config_data.object + logger.info("session(%s): handling config message(%s): \n%s", + self.session_id, name, config_data) - def confobj(self, objname, session, msg): - ''' Invoke the callback for an object upon receipt of a Configure - Message for that object. A no-op if the object doesn't exist. - ''' replies = [] - self._confobjslock.acquire() - if objname == "all": - for objname in self._confobjs: - (type, callback) = self._confobjs[objname] - reply = callback(session, msg) - if reply is not None: - replies.append(reply) - self._confobjslock.release() - return replies - if objname in self._confobjs: - (type, callback) = self._confobjs[objname] - self._confobjslock.release() - reply = callback(session, msg) - if reply is not None: + + if name == "all": + with self._config_objects_lock: + for name in self.config_objects: + config_type, callback = self.config_objects[name] + reply = callback(self, config_data) + + if reply: + replies.append(reply) + + return replies + + if name in self.config_objects: + with self._config_objects_lock: + config_type, callback = self.config_objects[name] + + reply = callback(self, config_data) + + if reply: replies.append(reply) + return replies else: - self.info("session object doesn't own model '%s', ignoring" % \ - objname) - self._confobjslock.release() + logger.info("session object doesn't own model '%s', ignoring", name) + return replies - def confobjs_to_tlvs(self): - ''' Turn the configuration objects into a list of Register Message TLVs. - ''' - tlvdata = "" - self._confobjslock.acquire() - for objname in self._confobjs: - (type, callback) = self._confobjs[objname] - # type must be in coreapi.reg_tlvs - tlvdata += coreapi.CoreRegTlv.pack(type, objname) - self._confobjslock.release() - return tlvdata + def dump_session(self): + """ + Log information about the session in its current state. + """ + logger.info("session id=%s name=%s state=%s", self.session_id, self.name, self.state) + logger.info("file=%s thumbnail=%s node_count=%s/%s", + self.file_name, self.thumbnail, self.get_node_count(), len(self.objects)) - def info(self, msg): - ''' Utility method for writing output to stdout. - ''' - print msg - sys.stdout.flush() + def exception(self, level, source, object_id, text): + """ + Generate and broadcast an exception event. - def warn(self, msg): - ''' Utility method for writing output to stderr. - ''' - print >> sys.stderr, msg - sys.stderr.flush() + :param str level: exception level + :param str source: source name + :param int object_id: object id + :param str text: exception message + :return: nothing + """ - def dumpsession(self): - ''' Debug print this session. - ''' - self.info("session id=%s name=%s state=%s connected=%s" % \ - (self.sessionid, self.name, self._state, self.isconnected())) - num = len(self._objs) - self.info(" file=%s thumb=%s nc=%s/%s" % \ - (self.filename, self.thumbnail, self.node_count, num)) + exception_data = ExceptionData( + node=object_id, + session=str(self.session_id), + level=level, + source=source, + date=time.ctime(), + text=text + ) - def exception(self, level, source, objid, text): - ''' Generate an Exception Message - ''' - vals = (objid, str(self.sessionid), level, source, time.ctime(), text) - types = ("NODE", "SESSION", "LEVEL", "SOURCE", "DATE", "TEXT") - tlvdata = "" - for (t,v) in zip(types, vals): - if v is not None: - tlvdata += coreapi.CoreExceptionTlv.pack( - eval("coreapi.CORE_TLV_EXCP_%s" % t), v) - msg = coreapi.CoreExceptionMessage.pack(0, tlvdata) - self.warn("exception: %s (%s) %s" % (source, objid, text)) - # send Exception Message to connected handlers (e.g. GUI) - self.broadcastraw(None, msg) + self.broadcast_exception(exception_data) - def getcfgitem(self, cfgname): - ''' Return an entry from the configuration dictionary that comes from - command-line arguments and/or the core.conf config file. - ''' - if cfgname not in self.cfg: - return None - else: - return self.cfg[cfgname] + def get_config_item(self, name): + """ + Return an entry from the configuration dictionary that comes from + command-line arguments and/or the core.conf config file. - def getcfgitembool(self, cfgname, defaultifnone = None): - ''' Return a boolean entry from the configuration dictionary, may - return None if undefined. - ''' - item = self.getcfgitem(cfgname) + :param str name: name of configuration to retrieve + :return: config value + """ + return self.config.get(name) + + def get_config_item_bool(self, name, default=None): + """ + Return a boolean entry from the configuration dictionary, may + return None if undefined. + + :param str name: configuration item name + :param default: default value to return if not found + :return: boolean value of the configuration item + :rtype: bool + """ + item = self.get_config_item(name) if item is None: - return defaultifnone + return default return bool(item.lower() == "true") - def getcfgitemint(self, cfgname, defaultifnone = None): - ''' Return an integer entry from the configuration dictionary, may - return None if undefined. - ''' - item = self.getcfgitem(cfgname) + def get_config_item_int(self, name, default=None): + """ + Return an integer entry from the configuration dictionary, may + return None if undefined. + + :param str name: configuration item name + :param default: default value to return if not found + :return: integer value of the configuration item + :rtype: int + """ + item = self.get_config_item(name) if item is None: - return defaultifnone + return default return int(item) - def instantiate(self, handler=None): - ''' We have entered the instantiation state, invoke startup methods - of various managers and boot the nodes. Validate nodes and check - for transition to the runtime state. - ''' - - self.writeobjs() + def instantiate(self): + """ + We have entered the instantiation state, invoke startup methods + of various managers and boot the nodes. Validate nodes and check + for transition to the runtime state. + """ + + # write current objects out to session directory file + self.write_objects() + # controlnet may be needed by some EMANE models - self.addremovectrlif(node=None, remove=False) + self.add_remove_control_interface(node=None, remove=False) + + # instantiate will be invoked again upon Emane configure if self.emane.startup() == self.emane.NOT_READY: - return # instantiate() will be invoked again upon Emane.configure() + return + + # startup broker self.broker.startup() + + # startup mobility self.mobility.startup() + # boot the services on each node - self.bootnodes(handler) + self.boot_nodes() + # allow time for processes to start time.sleep(0.125) - self.validatenodes() + + # validate nodes + self.validate_nodes() + + # set broker local instantiation to complete self.broker.local_instantiation_complete() - if self.isconnected(): - tlvdata = '' - tlvdata += coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - coreapi.CORE_EVENT_INSTANTIATION_COMPLETE) - msg = coreapi.CoreEventMessage.pack(0, tlvdata) - self.broadcastraw(None, msg) + # assume either all nodes have booted already, or there are some # nodes on slave servers that will be booted and those servers will - # send a status response message - self.checkruntime() + # send a node status response message + self.check_runtime() - def getnodecount(self): - ''' Returns the number of CoreNodes and CoreNets, except for those + def get_node_count(self): + """ + Returns the number of CoreNodes and CoreNets, except for those that are not considered in the GUI's node count. - ''' + """ + + with self._objects_lock: + count = len(filter(lambda x: not nodeutils.is_node(x, (NodeTypes.PEER_TO_PEER, NodeTypes.CONTROL_NET)), + self.objects)) + + # on Linux, GreTapBridges are auto-created, not part of GUI's node count + count -= len(filter( + lambda (x): nodeutils.is_node(x, NodeTypes.TAP_BRIDGE) and not nodeutils.is_node(x, NodeTypes.TUNNEL), + self.objects)) - with self._objslock: - count = len(filter(lambda(x): \ - not isinstance(x, (nodes.PtpNet, nodes.CtrlNet)), - self.objs())) - # on Linux, GreTapBridges are auto-created, not part of - # GUI's node count - if 'GreTapBridge' in globals(): - count -= len(filter(lambda(x): \ - isinstance(x, GreTapBridge) and not \ - isinstance(x, nodes.TunnelNode), - self.objs())) return count - def checkruntime(self): - ''' Check if we have entered the runtime state, that all nodes have been - started and the emulation is running. Start the event loop once we - have entered runtime (time=0). - ''' + def check_runtime(self): + """ + Check if we have entered the runtime state, that all nodes have been + started and the emulation is running. Start the event loop once we + have entered runtime (time=0). + """ # this is called from instantiate() after receiving an event message # for the instantiation state, and from the broker when distributed # nodes have been started - if self.node_count is None: + if self.state == EventTypes.RUNTIME_STATE.value: + logger.info("valid runtime state found, returning") return - if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE: - return - # check if all servers have completed instantiation + + # check to verify that all nodes and networks are running if not self.broker.instantiation_complete(): return - state = coreapi.CORE_EVENT_RUNTIME_STATE - self.evq.run() - self.setstate(state, info=True, sendevent=True) - def datacollect(self): - ''' Tear down a running session. Stop the event loop and any running - nodes, and perform clean-up. - ''' - self.evq.stop() - with self._objslock: - for obj in self.objs(): + # start event loop and set to runtime + self.event_loop.run() + self.set_state(EventTypes.RUNTIME_STATE.value, send_event=True) + + def data_collect(self): + """ + Tear down a running session. Stop the event loop and any running + nodes, and perform clean-up. + """ + # stop event loop + self.event_loop.stop() + + # stop node services + with self._objects_lock: + for obj in self.objects.itervalues(): + # TODO: determine if checking for CoreNode alone is ok if isinstance(obj, nodes.PyCoreNode): self.services.stopnodeservices(obj) + + # shutdown emane self.emane.shutdown() - self.updatectrlifhosts(remove=True) - # Remove all four possible control networks. Does nothing if ctrlnet is not installed. - self.addremovectrlif(node=None, remove=True) - self.addremovectrlif(node=None, netidx=1, remove=True) - self.addremovectrlif(node=None, netidx=2, remove=True) - self.addremovectrlif(node=None, netidx=3, remove=True) - # self.checkshutdown() is currently invoked from node delete handler + # update control interface hosts + self.update_control_interface_hosts(remove=True) - def checkshutdown(self): - ''' Check if we have entered the shutdown state, when no running nodes - and links remain. - ''' - nc = self.getnodecount() - # TODO: this doesn't consider slave server node counts - # wait for slave servers to enter SHUTDOWN state, then master session - # can enter SHUTDOWN - replies = () - if self.getcfgitembool('verbose', False): - self.info("Session %d shutdown: %d nodes remaining" % \ - (self.sessionid, nc)) - if nc == 0: - replies = self.setstate(state=coreapi.CORE_EVENT_SHUTDOWN_STATE, - info=True, sendevent=True, returnevent=True) + # remove all four possible control networks. Does nothing if ctrlnet is not installed. + self.add_remove_control_interface(node=None, net_index=0, remove=True) + self.add_remove_control_interface(node=None, net_index=1, remove=True) + self.add_remove_control_interface(node=None, net_index=2, remove=True) + self.add_remove_control_interface(node=None, net_index=3, remove=True) + + def check_shutdown(self): + """ + Check if we have entered the shutdown state, when no running nodes + and links remain. + """ + node_count = self.get_node_count() + + logger.info("checking shutdown for session %d: %d nodes remaining", self.session_id, node_count) + + # TODO: do we really want a check that finds 0 nodes to initiate a shutdown state? + if node_count == 0: + self.set_state(state=EventTypes.SHUTDOWN_STATE.value, send_event=True) self.sdt.shutdown() - return replies - def setmaster(self, handler): - ''' Look for the specified handler and set our master flag - appropriately. Returns True if we are connected to the given - handler. - ''' - with self._handlerslock: - for h in self._handlers: - if h != handler: - continue - self.master = h.master - return True - return False - - def shortsessionid(self): - ''' Return a shorter version of the session ID, appropriate for - interface names, where length may be limited. - ''' - ssid = (self.sessionid >> 8) ^ (self.sessionid & ((1 << 8) - 1)) + def short_session_id(self): + """ + Return a shorter version of the session ID, appropriate for + interface names, where length may be limited. + """ + ssid = (self.session_id >> 8) ^ (self.session_id & ((1 << 8) - 1)) return "%x" % ssid - def sendnodeemuid(self, handler, nodenum): - ''' Send back node messages to the GUI for node messages that had - the status request flag. - ''' - if handler is None: - return - if nodenum in handler.nodestatusreq: - tlvdata = "" - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_NUMBER, - nodenum) - tlvdata += coreapi.CoreNodeTlv.pack(coreapi.CORE_TLV_NODE_EMUID, - nodenum) - reply = coreapi.CoreNodeMessage.pack(coreapi.CORE_API_ADD_FLAG \ - | coreapi.CORE_API_LOC_FLAG, - tlvdata) - try: - handler.sendall(reply) - except Exception, e: - self.warn("sendall() for node: %d error: %s" % (nodenum, e)) - del handler.nodestatusreq[nodenum] - - def bootnodes(self, handler): - ''' Invoke the boot() procedure for all nodes and send back node - messages to the GUI for node messages that had the status - request flag. - ''' - with self._objslock: - for n in self.objs(): - if isinstance(n, nodes.PyCoreNode) and \ - not isinstance(n, nodes.RJ45Node): + def boot_nodes(self): + """ + Invoke the boot() procedure for all nodes and send back node + messages to the GUI for node messages that had the status + request flag. + """ + with self._objects_lock: + for obj in self.objects.itervalues(): + # TODO: determine instance type we need to check, due to method issue below + if isinstance(obj, nodes.PyCoreNode) and not nodeutils.is_node(obj, NodeTypes.RJ45): # add a control interface if configured - self.addremovectrlif(node=n, remove=False) - n.boot() - self.sendnodeemuid(handler, n.objid) - self.updatectrlifhosts() + self.add_remove_control_interface(node=obj, remove=False) + obj.boot() - def validatenodes(self): - with self._objslock: - for n in self.objs(): - # TODO: this can be extended to validate everything + self.update_control_interface_hosts() + + def validate_nodes(self): + """ + Validate all nodes that are known by the session. + + :return: nothing + """ + with self._objects_lock: + for obj in self.objects.itervalues(): + # TODO: this can be extended to validate everything, bad node check here as well # such as vnoded process, bridges, etc. - if not isinstance(n, nodes.PyCoreNode): + if not isinstance(obj, nodes.PyCoreNode): continue - if isinstance(n, nodes.RJ45Node): - continue - n.validate() - def getctrlnetprefixes(self): - p = getattr(self.options, 'controlnet', self.cfg.get('controlnet')) - p0 = getattr(self.options, 'controlnet0', self.cfg.get('controlnet0')) - p1 = getattr(self.options, 'controlnet1', self.cfg.get('controlnet1')) - p2 = getattr(self.options, 'controlnet2', self.cfg.get('controlnet2')) - p3 = getattr(self.options, 'controlnet3', self.cfg.get('controlnet3')) + if nodeutils.is_node(obj, NodeTypes.RJ45): + continue + + obj.validate() + + def get_control_net_prefixes(self): + """ + Retrieve control net prefixes. + + :return: control net prefix list + :rtype: list + """ + p = getattr(self.options, "controlnet", self.config.get("controlnet")) + p0 = getattr(self.options, "controlnet0", self.config.get("controlnet0")) + p1 = getattr(self.options, "controlnet1", self.config.get("controlnet1")) + p2 = getattr(self.options, "controlnet2", self.config.get("controlnet2")) + p3 = getattr(self.options, "controlnet3", self.config.get("controlnet3")) + if not p0 and p: p0 = p - return [p0,p1,p2,p3] - - def getctrlnetserverintf(self): - d0 = self.cfg.get('controlnetif0') + return [p0, p1, p2, p3] + + def get_control_net_server_interfaces(self): + """ + Retrieve control net server interfaces. + + :return: list of control net server interfaces + :rtype: list + """ + d0 = self.config.get("controlnetif0") if d0: - self.warn("controlnet0 cannot be assigned with a host interface") - d1 = self.cfg.get('controlnetif1') - d2 = self.cfg.get('controlnetif2') - d3 = self.cfg.get('controlnetif3') - return [None,d1,d2,d3] - - def getctrlnetidx(self, dev): - if dev[0:4] == 'ctrl' and int(dev[4]) in [0,1,2,3]: - idx = int(dev[4]) - if idx == 0: - return idx - if idx < 4 and self.getctrlnetprefixes()[idx] is not None: - return idx + logger.error("controlnet0 cannot be assigned with a host interface") + d1 = self.config.get("controlnetif1") + d2 = self.config.get("controlnetif2") + d3 = self.config.get("controlnetif3") + return [None, d1, d2, d3] + + def get_control_net_index(self, dev): + """ + Retrieve control net index. + + :param str dev: device to get control net index for + :return: control net index, -1 otherwise + :rtype: int + """ + if dev[0:4] == "ctrl" and int(dev[4]) in [0, 1, 2, 3]: + index = int(dev[4]) + if index == 0: + return index + if index < 4 and self.get_control_net_prefixes()[index] is not None: + return index return -1 - - def getctrlnetobj(self, netidx): - oid = "ctrl%dnet" % netidx - return self.obj(oid) - + def get_control_net_object(self, net_index): + # TODO: all nodes use an integer id and now this wants to use a string =( + object_id = "ctrl%dnet" % net_index + return self.get_object(object_id) - def addremovectrlnet(self, netidx, remove=False, conf_reqd=True): - ''' Create a control network bridge as necessary. + def add_remove_control_net(self, net_index, remove=False, conf_required=True): + """ + Create a control network bridge as necessary. When the remove flag is True, remove the bridge that connects control interfaces. The conf_reqd flag, when False, causes a control network bridge to be added even if one has not been configured. - ''' - prefixspeclist = self.getctrlnetprefixes() - prefixspec = prefixspeclist[netidx] - if not prefixspec: - if conf_reqd: - return None # no controlnet needed + + :param int net_index: network index + :param bool remove: flag to check if it should be removed + :param bool conf_required: flag to check if conf is required + :return: control net object + :rtype: core.netns.nodes.CtrlNet + """ + prefix_spec_list = self.get_control_net_prefixes() + prefix_spec = prefix_spec_list[net_index] + if not prefix_spec: + if conf_required: + # no controlnet needed + return None else: - prefixspec = nodes.CtrlNet.DEFAULT_PREFIX_LIST[netidx] - - serverintf = self.getctrlnetserverintf()[netidx] + control_net_class = nodeutils.get_node_class(NodeTypes.CONTROL_NET) + prefix_spec = control_net_class.DEFAULT_PREFIX_LIST[net_index] + + server_interface = self.get_control_net_server_interfaces()[net_index] # return any existing controlnet bridge try: - ctrlnet = self.getctrlnetobj(netidx) + control_net = self.get_control_net_object(net_index) + if remove: - self.delobj(ctrlnet.objid) + self.delete_object(control_net.objid) return None - return ctrlnet + + return control_net except KeyError: if remove: return None # build a new controlnet bridge - oid = "ctrl%dnet" % netidx + object_id = "ctrl%dnet" % net_index # use the updown script for control net 0 only. updown_script = None - if netidx == 0: - try: - if self.cfg['controlnet_updown_script']: - updown_script = self.cfg['controlnet_updown_script'] - except KeyError: - pass - # Check if session option set, overwrite if so - if hasattr(self.options, 'controlnet_updown_script'): - new_uds = self.options.controlnet_updown_script - if new_uds: - updown_script = new_uds - - prefixes = prefixspec.split() - if len(prefixes) > 1: - # A list of per-host prefixes is provided + if net_index == 0: + try: + if self.config["controlnet_updown_script"]: + updown_script = self.config["controlnet_updown_script"] + except KeyError: + logger.exception("error retreiving controlnet updown script") + + # Check if session option set, overwrite if so + new_updown_script = getattr(self.options, "controlnet_updown_script", default=None) + if new_updown_script: + updown_script = new_updown_script + + prefixes = prefix_spec.split() + if len(prefixes) > 1: + # a list of per-host prefixes is provided assign_address = True if self.master: try: # split first (master) entry into server and prefix - prefix = prefixes[0].split(':', 1)[1] + prefix = prefixes[0].split(":", 1)[1] except IndexError: # no server name. possibly only one server - prefix = prefixes[0] + prefix = prefixes[0] else: # slave servers have their name and localhost in the serverlist servers = self.broker.getservernames() - servers.remove('localhost') + servers.remove("localhost") prefix = None + for server_prefix in prefixes: try: # split each entry into server and prefix - server, p = server_prefix.split(':') + server, p = server_prefix.split(":") except ValueError: server = "" p = None - + if server == servers[0]: # the server name in the list matches this server prefix = p break + if not prefix: - msg = "Control network prefix not found for server '%s'" % \ - servers[0] - self.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "Session.addremovectrlnet()", None, msg) + logger.error("Control network prefix not found for server '%s'" % servers[0]) assign_address = False try: prefix = prefixes[0].split(':', 1)[1] except IndexError: prefix = prefixes[0] - else: # len(prefixes) == 1 + # len(prefixes) == 1 + else: # TODO: can we get the server name from the servers.conf or from the node assignments? # with one prefix, only master gets a ctrlnet address assign_address = self.master prefix = prefixes[0] - ctrlnet = self.addobj(cls=nodes.CtrlNet, objid=oid, prefix=prefix, - assign_address=assign_address, - updown_script=updown_script, serverintf=serverintf) + control_net_class = nodeutils.get_node_class(NodeTypes.CONTROL_NET) + control_net = self.add_object(cls=control_net_class, objid=object_id, prefix=prefix, + assign_address=assign_address, + updown_script=updown_script, serverintf=server_interface) # tunnels between controlnets will be built with Broker.addnettunnels() - self.broker.addnet(oid) + # TODO: potentialy remove documentation saying object ids are ints + self.broker.addnet(object_id) for server in self.broker.getservers(): - self.broker.addnodemap(server, oid) - - return ctrlnet + self.broker.addnodemap(server, object_id) - def addremovectrlif(self, node, netidx=0, remove=False, conf_reqd=True): - ''' Add a control interface to a node when a 'controlnet' prefix is - listed in the config file or session options. Uses - addremovectrlnet() to build or remove the control bridge. - If conf_reqd is False, the control network may be built even - when the user has not configured one (e.g. for EMANE.) - ''' - ctrlnet = self.addremovectrlnet(netidx, remove, conf_reqd) - if ctrlnet is None: + return control_net + + def add_remove_control_interface(self, node, net_index=0, remove=False, conf_required=True): + """ + Add a control interface to a node when a 'controlnet' prefix is + listed in the config file or session options. Uses + addremovectrlnet() to build or remove the control bridge. + If conf_reqd is False, the control network may be built even + when the user has not configured one (e.g. for EMANE.) + + :param core.netns.nodes.CoreNode node: node to add or remove control interface + :param int net_index: network index + :param bool remove: flag to check if it should be removed + :param bool conf_required: flag to check if conf is required + :return: nothing + """ + control_net = self.add_remove_control_net(net_index, remove, conf_required) + if not control_net: return - if node is None: + + if not node: return - if node.netif(ctrlnet.CTRLIF_IDX_BASE + netidx): - return # ctrl# already exists - ctrlip = node.objid + + # ctrl# already exists + if node.netif(control_net.CTRLIF_IDX_BASE + net_index): + return + + control_ip = node.objid + try: - addrlist = ["%s/%s" % (ctrlnet.prefix.addr(ctrlip), - ctrlnet.prefix.prefixlen)] + addrlist = ["%s/%s" % (control_net.prefix.addr(control_ip), control_net.prefix.prefixlen)] except ValueError: msg = "Control interface not added to node %s. " % node.objid - msg += "Invalid control network prefix (%s). " % ctrlnet.prefix + msg += "Invalid control network prefix (%s). " % control_net.prefix msg += "A longer prefix length may be required for this many nodes." - node.exception(coreapi.CORE_EXCP_LEVEL_ERROR, - "Session.addremovectrlif()", msg) + logger.exception(msg) return - ifi = node.newnetif(net = ctrlnet, ifindex = ctrlnet.CTRLIF_IDX_BASE + netidx, - ifname = "ctrl%d" % netidx, hwaddr = MacAddr.random(), - addrlist = addrlist) - node.netif(ifi).control = True - def updatectrlifhosts(self, netidx=0, remove=False): - ''' Add the IP addresses of control interfaces to the /etc/hosts file. - ''' - if not self.getcfgitembool('update_etc_hosts', False): + interface1 = node.newnetif(net=control_net, + ifindex=control_net.CTRLIF_IDX_BASE + net_index, + ifname="ctrl%d" % net_index, hwaddr=MacAddress.random(), + addrlist=addrlist) + node.netif(interface1).control = True + + def update_control_interface_hosts(self, net_index=0, remove=False): + """ + Add the IP addresses of control interfaces to the /etc/hosts file. + + :param int net_index: network index to update + :param bool remove: flag to check if it should be removed + :return: nothing + """ + if not self.get_config_item_bool("update_etc_hosts", False): return - + try: - ctrlnet = self.getctrlnetobj(netidx) + control_net = self.get_control_net_object(net_index) except KeyError: + logger.exception("error retrieving control net object") return - header = "CORE session %s host entries" % self.sessionid + + header = "CORE session %s host entries" % self.session_id if remove: - if self.getcfgitembool('verbose', False): - self.info("Removing /etc/hosts file entries.") - filedemunge('/etc/hosts', header) + logger.info("Removing /etc/hosts file entries.") + utils.filedemunge("/etc/hosts", header) return + entries = [] - for ifc in ctrlnet.netifs(): - name = ifc.node.name - for addr in ifc.addrlist: - entries.append("%s %s" % (addr.split('/')[0], ifc.node.name)) - if self.getcfgitembool('verbose', False): - self.info("Adding %d /etc/hosts file entries." % len(entries)) - filemunge('/etc/hosts', header, '\n'.join(entries) + '\n') + for interface in control_net.netifs(): + name = interface.node.name + for address in interface.addrlist: + entries.append("%s %s" % (address.split("/")[0], name)) + + logger.info("Adding %d /etc/hosts file entries." % len(entries)) + + utils.filemunge("/etc/hosts", header, "\n".join(entries) + "\n") def runtime(self): - ''' Return the current time we have been in the runtime state, or zero - if not in runtime. - ''' - if self.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE: - return time.time() - self._time + """ + Return the current time we have been in the runtime state, or zero + if not in runtime. + """ + if self.state == EventTypes.RUNTIME_STATE.value: + return time.time() - self._state_time else: return 0.0 - def addevent(self, etime, node=None, name=None, data=None): - ''' Add an event to the event queue, with a start time relative to the - start of the runtime state. - ''' - etime = float(etime) - runtime = self.runtime() - if runtime > 0.0: - if time <= runtime: - self.warn("Could not schedule past event for time %s " \ - "(run time is now %s)" % (time, runtime)) + def add_event(self, event_time, node=None, name=None, data=None): + """ + Add an event to the event queue, with a start time relative to the + start of the runtime state. + + :param event_time: event time + :param core.netns.nodes.CoreNode node: node to add event for + :param str name: name of event + :param data: data for event + :return: nothing + """ + event_time = float(event_time) + current_time = self.runtime() + + if current_time > 0.0: + if time <= current_time: + logger.warn("could not schedule past event for time %s (run time is now %s)", time, current_time) return - etime = etime - runtime - func = self.runevent - self.evq.add_event(etime, func, node=node, name=name, data=data) - if name is None: - name = "" - self.info("scheduled event %s at time %s data=%s" % \ - (name, etime + runtime, data)) + event_time = event_time - current_time - def runevent(self, node=None, name=None, data=None): - ''' Run a scheduled event, executing commands in the data string. - ''' + self.event_loop.add_event(event_time, self.run_event, node=node, name=name, data=data) + + if not name: + name = "" + logger.info("scheduled event %s at time %s data=%s", name, event_time + current_time, data) + + def run_event(self, node_id=None, name=None, data=None): + """ + Run a scheduled event, executing commands in the data string. + + :param int node_id: node id to run event + :param str name: event name + :param data: event data + :return: nothing + """ now = self.runtime() - if name is None: + if not name: name = "" - self.info("running event %s at time %s cmd=%s" % (name, now, data)) - if node is None: - mutedetach(shlex.split(data)) + + logger.info("running event %s at time %s cmd=%s" % (name, now, data)) + commands = shlex.split(data) + if not node_id: + utils.mutedetach(commands) else: - n = self.obj(node) - n.cmd(shlex.split(data), wait=False) + node = self.get_object(node_id) + node.cmd(commands, wait=False) + + def send_objects(self): + """ + Return API messages that describe the current session. + """ - def sendobjs(self): - ''' Return API messages that describe the current session. - ''' - replies = [] - nn = 0 # send node messages for node and network objects - with self._objslock: - for obj in self.objs(): - msg = obj.tonodemsg(flags = coreapi.CORE_API_ADD_FLAG) - if msg is not None: - replies.append(msg) - nn += 1 - - nl = 0 # send link messages from net objects - with self._objslock: - for obj in self.objs(): - linkmsgs = obj.tolinkmsgs(flags = coreapi.CORE_API_ADD_FLAG) - for msg in linkmsgs: - replies.append(msg) - nl += 1 + number_nodes = 0 + number_links = 0 + with self._objects_lock: + for obj in self.objects.itervalues(): + node_data = obj.data(message_type=MessageFlags.ADD.value) + if node_data: + self.broadcast_node(node_data) + # replies.append(message) + number_nodes += 1 + + links_data = obj.all_link_data(flags=MessageFlags.ADD.value) + for link_data in links_data: + self.broadcast_link(link_data) + # replies.append(link_data) + number_links += 1 + # send model info configs = self.mobility.getallconfigs() configs += self.emane.getallconfigs() - for (nodenum, cls, values) in configs: - #cls = self.mobility._modelclsmap[conftype] - msg = cls.toconfmsg(flags=0, nodenum=nodenum, - typeflags=coreapi.CONF_TYPE_FLAGS_UPDATE, - values=values) - replies.append(msg) + for node_number, cls, values in configs: + config_data = cls.config_data( + flags=0, + node_id=node_number, + type_flags=ConfigFlags.UPDATE.value, + values=values + ) + self.broadcast_config(config_data) + # service customizations - svc_configs = self.services.getallconfigs() - for (nodenum, svc) in svc_configs: - opaque = "service:%s" % svc._name - tlvdata = "" - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, - nodenum) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE, - opaque) - tmp = coreapi.CoreConfMessage(flags=0, hdr="", data=tlvdata) - replies.append(self.services.configure_request(tmp)) - for (filename, data) in self.services.getallfiles(svc): - flags = coreapi.CORE_API_ADD_FLAG - tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NODE, - nodenum) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME, - str(filename)) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE, - opaque) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA, - str(data)) - replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata)) + service_configs = self.services.getallconfigs() + for node_number, service in service_configs: + opaque = "service:%s" % service._name + config_data = ConfigData( + node=node_number, + opaque=opaque + ) + # replies.append(self.services.configure_request(config_data)) + config_response = self.services.configure_request(config_data) + self.broadcast_config(config_response) + + for file_name, config_data in self.services.getallfiles(service): + # flags = MessageFlags.ADD.value + # tlv_data = coreapi.CoreFileTlv.pack(FileTlvs.NODE.value, node_number) + # tlv_data += coreapi.CoreFileTlv.pack(FileTlvs.NAME.value, str(file_name)) + # tlv_data += coreapi.CoreFileTlv.pack(FileTlvs.TYPE.value, opaque) + # tlv_data += coreapi.CoreFileTlv.pack(FileTlvs.DATA.value, str(config_data)) + # replies.append(coreapi.CoreFileMessage.pack(flags, tlv_data)) + + file_data = FileData( + message_type=MessageFlags.ADD.value, + node=node_number, + name=str(file_name), + type=opaque, + data=str(config_data) + ) + self.broadcast_file(file_data) # TODO: send location info - # replies.append(self.location.toconfmsg()) + # send hook scripts for state in sorted(self._hooks.keys()): - for (filename, data) in self._hooks[state]: - flags = coreapi.CORE_API_ADD_FLAG - tlvdata = coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_NAME, - str(filename)) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_TYPE, - "hook:%s" % state) - tlvdata += coreapi.CoreFileTlv.pack(coreapi.CORE_TLV_FILE_DATA, - str(data)) - replies.append(coreapi.CoreFileMessage.pack(flags, tlvdata)) + for file_name, config_data in self._hooks[state]: + # flags = MessageFlags.ADD.value + # tlv_data = coreapi.CoreFileTlv.pack(FileTlvs.NAME.value, str(file_name)) + # tlv_data += coreapi.CoreFileTlv.pack(FileTlvs.TYPE.value, "hook:%s" % state) + # tlv_data += coreapi.CoreFileTlv.pack(FileTlvs.DATA.value, str(config_data)) + # replies.append(coreapi.CoreFileMessage.pack(flags, tlv_data)) - # send meta data - tmp = coreapi.CoreConfMessage(flags=0, hdr="", data="") - opts = self.options.configure_request(tmp, - typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE) - if opts: - replies.append(opts) - meta = self.metadata.configure_request(tmp, - typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE) - if meta: - replies.append(meta) + file_data = FileData( + message_type=MessageFlags.ADD.value, + name=str(file_name), + type="hook:%s" % state, + data=str(config_data) + ) + self.broadcast_file(file_data) - self.info("informing GUI about %d nodes and %d links" % (nn, nl)) - return replies + config_data = ConfigData() + # retrieve session configuration data + options_config = self.options.configure_request(config_data, type_flags=ConfigFlags.UPDATE.value) + self.broadcast_config(options_config) + + # retrieve session metadata + metadata_config = self.metadata.configure_request(config_data, type_flags=ConfigFlags.UPDATE.value) + self.broadcast_config(metadata_config) + + logger.info("informing GUI about %d nodes and %d links", number_nodes, number_links) class SessionConfig(ConfigurableManager, Configurable): - _name = 'session' - _type = coreapi.CORE_TLV_REG_UTILITY - _confmatrix = [ - ("controlnet", coreapi.CONF_DATA_TYPE_STRING, '', '', - 'Control network'), - ("controlnet_updown_script", coreapi.CONF_DATA_TYPE_STRING, '', '', - 'Control network script'), - ("enablerj45", coreapi.CONF_DATA_TYPE_BOOL, '1', 'On,Off', - 'Enable RJ45s'), - ("preservedir", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off', - 'Preserve session dir'), - ("enablesdt", coreapi.CONF_DATA_TYPE_BOOL, '0', 'On,Off', - 'Enable SDT3D output'), - ("sdturl", coreapi.CONF_DATA_TYPE_STRING, Sdt.DEFAULT_SDT_URL, '', - 'SDT3D URL'), - ] - _confgroups = "Options:1-%d" % len(_confmatrix) + """ + Session configuration object. + """ + name = "session" + config_type = RegisterTlvs.UTILITY.value + config_matrix = [ + ("controlnet", ConfigDataTypes.STRING.value, "", "", "Control network"), + ("controlnet_updown_script", ConfigDataTypes.STRING.value, "", "", "Control network script"), + ("enablerj45", ConfigDataTypes.BOOL.value, "1", "On,Off", "Enable RJ45s"), + ("preservedir", ConfigDataTypes.BOOL.value, "0", "On,Off", "Preserve session dir"), + ("enablesdt", ConfigDataTypes.BOOL.value, "0", "On,Off", "Enable SDT3D output"), + ("sdturl", ConfigDataTypes.STRING.value, Sdt.DEFAULT_SDT_URL, "", "SDT3D URL"), + ] + config_groups = "Options:1-%d" % len(config_matrix) def __init__(self, session): - ConfigurableManager.__init__(self, session) - session.broker.handlers.add(self.handledistributed) + """ + Creates a SessionConfig instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + ConfigurableManager.__init__(self) + self.session = session + self.session.broker.handlers.add(self.handle_distributed) self.reset() def reset(self): + """ + Reset the session configuration. + + :return: nothing + """ defaults = self.getdefaultvalues() - for k in self.getnames(): + for key in self.getnames(): # value may come from config file - v = self.session.getcfgitem(k) - if v is None: - v = self.valueof(k, defaults) - v = self.offontobool(v) - setattr(self, k, v) + value = self.session.get_config_item(key) + if value is None: + value = self.valueof(key, defaults) + value = self.offontobool(value) + setattr(self, key, value) - def configure_values(self, msg, values): - return self.configure_values_keyvalues(msg, values, self, - self.getnames()) + def configure_values(self, config_data): + """ + Handle configuration values. - def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE): - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: None + """ + return self.configure_values_keyvalues(config_data, self, self.getnames()) + + def configure_request(self, config_data, type_flags=ConfigFlags.NONE.value): + """ + Handle a configuration request. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :param type_flags: + :return: + """ + node_id = config_data.node values = [] - for k in self.getnames(): - v = getattr(self, k) - if v is None: - v = "" - values.append("%s" % v) - return self.toconfmsg(0, nodenum, typeflags, values) - def handledistributed(self, msg): - ''' Handle the session options config message as it has reached the + for key in self.getnames(): + value = getattr(self, key) + if value is None: + value = "" + values.append("%s" % value) + + return self.config_data(0, node_id, type_flags, values) + + # TODO: update logic to not be tied to old style messages + def handle_distributed(self, message): + """ + Handle the session options config message as it has reached the broker. Options requiring modification for distributed operation should be handled here. - ''' + + :param message: message to handle + :return: nothing + """ if not self.session.master: return - if msg.msgtype != coreapi.CORE_API_CONF_MSG or \ - msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) != "session": + + if message.message_type != MessageTypes.CONFIG.value or message.get_tlv(ConfigTlvs.OBJECT.value) != "session": return - values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES) + + values_str = message.get_tlv(ConfigTlvs.VALUES.value) if values_str is None: return - values = values_str.split('|') - if not self.haskeyvalues(values): - return - for v in values: - key, value = v.split('=', 1) - if key == "controlnet": - self.handledistributedcontrolnet(msg, values, values.index(v)) - def handledistributedcontrolnet(self, msg, values, idx): - ''' Modify Config Message if multiple control network prefixes are + value_strings = values_str.split('|') + if not self.haskeyvalues(value_strings): + return + + for value_string in value_strings: + key, value = value_string.split('=', 1) + if key == "controlnet": + self.handle_distributed_control_net(message, value_strings, value_strings.index(value_string)) + + # TODO: update logic to not be tied to old style messages + def handle_distributed_control_net(self, message, values, index): + """ + Modify Config Message if multiple control network prefixes are defined. Map server names to prefixes and repack the message before it is forwarded to slave servers. - ''' - kv = values[idx] - key, value = kv.split('=', 1) - controlnets = value.split() - if len(controlnets) < 2: - return # multiple controlnet prefixes do not exist + + :param message: message to handle + :param list values: values to handle + :param int index: index ti get key value from + :return: nothing + """ + key_value = values[index] + key, value = key_value.split('=', 1) + control_nets = value.split() + + if len(control_nets) < 2: + logger.warn("multiple controlnet prefixes do not exist") + return + servers = self.session.broker.getservernames() if len(servers) < 2: - return # not distributed + logger.warn("not distributed") + return + servers.remove("localhost") - servers.insert(0, "localhost") # master always gets first prefix + # master always gets first prefix + servers.insert(0, "localhost") # create list of "server1:ctrlnet1 server2:ctrlnet2 ..." - controlnets = map(lambda(x): "%s:%s" % (x[0],x[1]), - zip(servers, controlnets)) - values[idx] = "controlnet=%s" % (' '.join(controlnets)) - values_str = '|'.join(values) - msg.tlvdata[coreapi.CORE_TLV_CONF_VALUES] = values_str - msg.repack() + control_nets = map(lambda x: "%s:%s" % (x[0], x[1]), zip(servers, control_nets)) + values[index] = "controlnet=%s" % (" ".join(control_nets)) + values_str = "|".join(values) + message.tlvdata[ConfigTlvs.VALUES.value] = values_str + message.repack() class SessionMetaData(ConfigurableManager): - ''' Metadata is simply stored in a configs[] dict. Key=value pairs are + """ + Metadata is simply stored in a configs[] dict. Key=value pairs are passed in from configure messages destined to the "metadata" object. The data is not otherwise interpreted or processed. - ''' - _name = "metadata" - _type = coreapi.CORE_TLV_REG_UTILITY + """ + name = "metadata" + config_type = RegisterTlvs.UTILITY.value - def configure_values(self, msg, values): + def configure_values(self, config_data): + """ + Handle configuration values. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :return: None + """ + values = config_data.data_values if values is None: return None - kvs = values.split('|') - for kv in kvs: + + key_values = values.split('|') + for key_value in key_values: try: - (key, value) = kv.split('=', 1) + key, value = key_value.split('=', 1) except ValueError: - raise ValueError, "invalid key in metdata: %s" % kv - self.additem(key, value) + raise ValueError("invalid key in metdata: %s", key_value) + + self.add_item(key, value) + return None - def configure_request(self, msg, typeflags = coreapi.CONF_TYPE_FLAGS_NONE): - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) - values_str = "|".join(map(lambda(k,v): "%s=%s" % (k,v), self.items())) - return self.toconfmsg(0, nodenum, typeflags, values_str) + def configure_request(self, config_data, type_flags=ConfigFlags.NONE.value): + """ + Handle a configuration request. - def toconfmsg(self, flags, nodenum, typeflags, values_str): - tlvdata = "" - if nodenum is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, - nodenum) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, - self._name) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, - typeflags) - datatypes = tuple( map(lambda(k,v): coreapi.CONF_DATA_TYPE_STRING, - self.items()) ) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES, - datatypes) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES, - values_str) - msg = coreapi.CoreConfMessage.pack(flags, tlvdata) - return msg + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + :param int type_flags: configuration request flag value + :return: configuration data + :rtype: ConfigData + """ + node_number = config_data.node + values_str = "|".join(map(lambda item: "%s=%s" % item, self.items())) + return self.config_data(0, node_number, type_flags, values_str) - def additem(self, key, value): + def config_data(self, flags, node_id, type_flags, values_str): + """ + Retrieve configuration data object, leveraging provided data. + + :param flags: configuration data flags + :param int node_id: node id + :param type_flags: type flags + :param values_str: values string + :return: configuration data + :rtype: ConfigData + """ + data_types = tuple(map(lambda (k, v): ConfigDataTypes.STRING.value, self.items())) + + return ConfigData( + message_type=flags, + node=node_id, + object=self.name, + type=type_flags, + data_types=data_types, + data_values=values_str + ) + + def add_item(self, key, value): + """ + Add configuration key/value pair. + + :param key: configuration key + :param value: configuration value + :return: nothing + """ self.configs[key] = value - def getitem(self, key): + def get_item(self, key): + """ + Retrieve configuration value. + + :param key: key for configuration value to retrieve + :return: configuration value + """ try: return self.configs[key] except KeyError: - pass + logger.exception("error retrieving item from configs: %s", key) + return None def items(self): + """ + Retrieve configuration items. + + :return: configuration items iterator + """ return self.configs.iteritems() -atexit.register(Session.atexit) + +# configure the program exit function to run +atexit.register(SessionManager.on_exit) diff --git a/daemon/core/xen/xen.py b/daemon/core/xen/xen.py index eae3f770..8d7e4ac5 100644 --- a/daemon/core/xen/xen.py +++ b/daemon/core/xen/xen.py @@ -1,51 +1,43 @@ -# -# CORE -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -''' -xen.py: implementation of the XenNode and XenVEth classes that support +""" +xen.py: implementation of the XenNode and XenVEth classes that support generating Xen domUs based on an ISO image and persistent configuration area -''' +""" -from core.netns.vnet import * +import base64 +import os +import shutil +import string +import subprocess +import sys +import threading + +import crypt + +from core import constants +from core.coreobj import PyCoreNetIf +from core.coreobj import PyCoreNode +from core.enumerations import NodeTypes +from core.misc import log +from core.misc import nodeutils +from core.misc import utils from core.netns.vnode import LxcNode -from core.coreobj import PyCoreObj, PyCoreNode, PyCoreNetIf -from core.misc.ipaddr import * -from core.misc.utils import * -from core.constants import * -from core.api import coreapi -from core.netns.vif import TunTap -from core.emane.nodes import EmaneNode + +logger = log.get_logger(__name__) try: import parted -except ImportError, e: - #print "Failed to load parted Python module required by Xen support." - #print "Error was:", e - raise ImportError +except ImportError: + logger.error("failed to import parted for xen nodes") -import base64 -import crypt -import subprocess try: import fsimage -except ImportError, e: +except ImportError: # fix for fsimage under Ubuntu sys.path.append("/usr/lib/xen-default/lib/python") try: import fsimage - except ImportError, e: - #print "Failed to load fsimage Python module required by Xen support." - #print "Error was:", e - raise ImportError - - - -import os -import time -import shutil -import string + except ImportError: + logger.error("failed to import fsimage for xen nodes") # XXX move these out to config file AWK_PATH = "/bin/awk" @@ -60,11 +52,12 @@ SED_PATH = "/bin/sed" XM_PATH = "/usr/sbin/xm" UDEVADM_PATH = "/sbin/udevadm" + class XenVEth(PyCoreNetIf): - def __init__(self, node, name, localname, mtu = 1500, net = None, - start = True, hwaddr = None): + def __init__(self, node, name, localname, mtu=1500, net=None, + start=True, hwaddr=None): # note that net arg is ignored - PyCoreNetIf.__init__(self, node = node, name = name, mtu = mtu) + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) self.localname = localname self.up = False self.hwaddr = hwaddr @@ -76,8 +69,8 @@ class XenVEth(PyCoreNetIf): 'vifname=%s' % self.localname, 'script=vif-core'] if self.hwaddr is not None: cmd.append('mac=%s' % self.hwaddr) - check_call(cmd) - check_call([IP_BIN, "link", "set", self.localname, "up"]) + subprocess.check_call(cmd) + subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"]) self.up = True def shutdown(self): @@ -87,28 +80,28 @@ class XenVEth(PyCoreNetIf): if self.hwaddr is not None: pass # this should be doable, but some argument isn't a string - #check_call([XM_PATH, 'network-detach', self.node.vmname, + # check_call([XM_PATH, 'network-detach', self.node.vmname, # self.hwaddr]) self.up = False class XenNode(PyCoreNode): - apitype = coreapi.CORE_NODE_XEN + apitype = NodeTypes.XEN.value FilesToIgnore = frozenset([ - #'ipforward.sh', + # 'ipforward.sh', 'quaggaboot.sh', ]) FilesRedirection = { - 'ipforward.sh' : '/core-tmp/ipforward.sh', + 'ipforward.sh': '/core-tmp/ipforward.sh', } CmdsToIgnore = frozenset([ - #'sh ipforward.sh', - #'sh quaggaboot.sh zebra', - #'sh quaggaboot.sh ospfd', - #'sh quaggaboot.sh ospf6d', + # 'sh ipforward.sh', + # 'sh quaggaboot.sh zebra', + # 'sh quaggaboot.sh ospfd', + # 'sh quaggaboot.sh ospf6d', 'killall zebra', 'killall ospfd', 'killall ospf6d', @@ -116,43 +109,39 @@ class XenNode(PyCoreNode): ]) def RedirCmd_ipforward(self): - sysctlFile = open(os.path.join(self.mountdir, self.etcdir, - 'sysctl.conf'), 'a') - p1 = subprocess.Popen([AWK_PATH, - '/^\/sbin\/sysctl -w/ {print $NF}', - os.path.join(self.nodedir, - 'core-tmp/ipforward.sh') ], - stdout=sysctlFile) + sysctlFile = open(os.path.join(self.mountdir, self.etcdir, 'sysctl.conf'), 'a') + p1 = subprocess.Popen([AWK_PATH, '/^\/sbin\/sysctl -w/ {print $NF}', + os.path.join(self.nodedir, 'core-tmp/ipforward.sh')], stdout=sysctlFile) p1.wait() sysctlFile.close() def RedirCmd_zebra(self): - check_call([SED_PATH, '-i', '-e', 's/^zebra=no/zebra=yes/', - os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')]) + subprocess.check_call([SED_PATH, '-i', '-e', 's/^zebra=no/zebra=yes/', + os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')]) + def RedirCmd_ospfd(self): - check_call([SED_PATH, '-i', '-e', 's/^ospfd=no/ospfd=yes/', - os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')]) + subprocess.check_call([SED_PATH, '-i', '-e', 's/^ospfd=no/ospfd=yes/', + os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')]) + def RedirCmd_ospf6d(self): - check_call([SED_PATH, '-i', '-e', - 's/^ospf6d=no/ospf6d=yes/', - os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')]) + subprocess.check_call([SED_PATH, '-i', '-e', + 's/^ospf6d=no/ospf6d=yes/', + os.path.join(self.mountdir, self.etcdir, 'quagga/daemons')]) CmdsRedirection = { - 'sh ipforward.sh' : RedirCmd_ipforward, - 'sh quaggaboot.sh zebra' : RedirCmd_zebra, - 'sh quaggaboot.sh ospfd' : RedirCmd_ospfd, - 'sh quaggaboot.sh ospf6d' : RedirCmd_ospf6d, + 'sh ipforward.sh': RedirCmd_ipforward, + 'sh quaggaboot.sh zebra': RedirCmd_zebra, + 'sh quaggaboot.sh ospfd': RedirCmd_ospfd, + 'sh quaggaboot.sh ospf6d': RedirCmd_ospf6d, } # CoreNode: no __init__, take from LxcNode & SimpleLxcNode - def __init__(self, session, objid = None, name = None, - nodedir = None, bootsh = "boot.sh", verbose = False, - start = True, model = None, - vgname = None, ramsize = None, disksize = None, - isofile = None): + def __init__(self, session, objid=None, name=None, + nodedir=None, bootsh="boot.sh", start=True, model=None, + vgname=None, ramsize=None, disksize=None, + isofile=None): # SimpleLxcNode initialization - PyCoreNode.__init__(self, session = session, objid = objid, name = name, - verbose = verbose) + PyCoreNode.__init__(self, session=session, objid=objid, name=name) self.nodedir = nodedir self.model = model # indicates startup() has been invoked and disk has been initialized @@ -180,36 +169,35 @@ class XenNode(PyCoreNode): # TODO: remove this temporary hack self.FilesRedirection['/usr/local/etc/quagga/Quagga.conf'] = \ os.path.join(self.getconfigitem('mount_path'), self.etcdir, - 'quagga/Quagga.conf') + 'quagga/Quagga.conf') # LxcNode initialization # self.makenodedir() if self.nodedir is None: - self.nodedir = \ - os.path.join(session.sessiondir, self.name + ".conf") + self.nodedir = os.path.join(session.sessiondir, self.name + ".conf") self.mountdir = self.nodedir + self.getconfigitem('mount_path') if not os.path.isdir(self.mountdir): os.makedirs(self.mountdir) self.tmpnodedir = True else: - raise Exception, "Xen PVM node requires a temporary nodedir" + raise Exception("Xen PVM node requires a temporary nodedir") self.tmpnodedir = False self.bootsh = bootsh if start: self.startup() def getconfigitem(self, name, default=None): - ''' Configuration items come from the xen.conf file and/or input from - the GUI, and are stored in the session using the XenConfigManager - object. self.model is used to identify particular profiles - associated with a node type in the GUI. - ''' - return self.session.xen.getconfigitem(name=name, model=self.model, - node=self, value=default) + """ + Configuration items come from the xen.conf file and/or input from + the GUI, and are stored in the session using the XenConfigManager + object. self.model is used to identify particular profiles + associated with a node type in the GUI. + """ + return self.session.xen.getconfigitem(name=name, model=self.model, node=self, value=default) # from class LxcNode (also SimpleLxcNode) def startup(self): - self.warn("XEN PVM startup() called: preparing disk for %s" % self.name) + logger.warn("XEN PVM startup() called: preparing disk for %s" % self.name) self.lock.acquire() try: if self.up: @@ -217,10 +205,10 @@ class XenNode(PyCoreNode): self.createlogicalvolume() self.createpartitions() persistdev = self.createfilesystems() - check_call([MOUNT_BIN, '-t', 'ext4', persistdev, self.mountdir]) + subprocess.check_call([constants.MOUNT_BIN, '-t', 'ext4', persistdev, self.mountdir]) self.untarpersistent(tarname=self.getconfigitem('persist_tar_iso'), iso=True) - self.setrootpassword(pw = self.getconfigitem('root_password')) + self.setrootpassword(pw=self.getconfigitem('root_password')) self.sethostname(old='UBASE', new=self.name) self.setupssh(keypath=self.getconfigitem('ssh_key_path')) self.createvm() @@ -230,11 +218,11 @@ class XenNode(PyCoreNode): # from class LxcNode (also SimpleLxcNode) def boot(self): - self.warn("XEN PVM boot() called") + logger.warn("XEN PVM boot() called") self.lock.acquire() if not self.up: - raise Exception, "Can't boot VM without initialized disk" + raise Exception("Can't boot VM without initialized disk") if self.booted: self.lock.release() @@ -246,18 +234,17 @@ class XenNode(PyCoreNode): self.untarpersistent(tarname=tarname, iso=False) try: - check_call([UMOUNT_BIN, self.mountdir]) + subprocess.check_call([constants.UMOUNT_BIN, self.mountdir]) self.unmount_all(self.mountdir) - check_call([UDEVADM_PATH, 'settle']) - check_call([KPARTX_PATH, '-d', self.lvpath]) + subprocess.check_call([UDEVADM_PATH, 'settle']) + subprocess.check_call([KPARTX_PATH, '-d', self.lvpath]) - #time.sleep(5) - #time.sleep(1) + # time.sleep(5) + # time.sleep(1) # unpause VM - if self.verbose: - self.warn("XEN PVM boot() unpause domU %s" % self.vmname) - mutecheck_call([XM_PATH, 'unpause', self.vmname]) + logger.warn("XEN PVM boot() unpause domU %s" % self.vmname) + utils.mutecheck_call([XM_PATH, 'unpause', self.vmname]) self.booted = True finally: @@ -265,10 +252,10 @@ class XenNode(PyCoreNode): def validate(self): self.session.services.validatenodeservices(self) - + # from class LxcNode (also SimpleLxcNode) def shutdown(self): - self.warn("XEN PVM shutdown() called") + logger.warn("XEN PVM shutdown() called") if not self.up: return self.lock.acquire() @@ -281,27 +268,25 @@ class XenNode(PyCoreNode): try: # RJE XXX what to do here if self.booted: - mutecheck_call([XM_PATH, 'destroy', self.vmname]) + utils.mutecheck_call([XM_PATH, 'destroy', self.vmname]) self.booted = False - except OSError: - pass - except subprocess.CalledProcessError: + except (OSError, subprocess.CalledProcessError): # ignore this error too, the VM may have exited already - pass + logger.exception("error during shutdown") # discard LVM volume lvmRemoveCount = 0 while os.path.exists(self.lvpath): try: - check_call([UDEVADM_PATH, 'settle']) - mutecall([LVCHANGE_PATH, '-an', self.lvpath]) + subprocess.check_call([UDEVADM_PATH, 'settle']) + utils.mutecall([LVCHANGE_PATH, '-an', self.lvpath]) lvmRemoveCount += 1 - mutecall([LVREMOVE_PATH, '-f', self.lvpath]) + utils.mutecall([LVREMOVE_PATH, '-f', self.lvpath]) except OSError: - pass - if (lvmRemoveCount > 1): - self.warn("XEN PVM shutdown() required %d lvremove " \ - "executions." % lvmRemoveCount) + logger.exception("error during shutdown") + + if lvmRemoveCount > 1: + logger.warn("XEN PVM shutdown() required %d lvremove executions." % lvmRemoveCount) self._netif.clear() del self.session @@ -313,117 +298,124 @@ class XenNode(PyCoreNode): self.lock.release() def createlogicalvolume(self): - ''' Create a logical volume for this Xen domU. Called from startup(). - ''' + """ + Create a logical volume for this Xen domU. Called from startup(). + """ if os.path.exists(self.lvpath): raise Exception, "LVM volume already exists" - mutecheck_call([LVCREATE_PATH, '--size', self.disksize, - '--name', self.lvname, self.vgname]) + utils.mutecheck_call([LVCREATE_PATH, '--size', self.disksize, + '--name', self.lvname, self.vgname]) def createpartitions(self): - ''' Partition the LVM volume into persistent and swap partitions - using the parted module. - ''' + """ + Partition the LVM volume into persistent and swap partitions + using the parted module. + """ dev = parted.Device(path=self.lvpath) dev.removeFromCache() disk = parted.freshDisk(dev, 'msdos') constraint = parted.Constraint(device=dev) - persist_size = int(0.75 * constraint.maxSize); + persist_size = int(0.75 * constraint.maxSize) self.createpartition(device=dev, disk=disk, start=1, - end=(persist_size - 1) , type="ext4") + end=persist_size - 1, type="ext4") self.createpartition(device=dev, disk=disk, start=persist_size, - end=(constraint.maxSize - 1) , type="linux-swap(v1)") + end=constraint.maxSize - 1, type="linux-swap(v1)") disk.commit() def createpartition(self, device, disk, start, end, type): - ''' Create a single partition of the specified type and size and add - it to the disk object, using the parted module. - ''' + """ + Create a single partition of the specified type and size and add + it to the disk object, using the parted module. + """ geo = parted.Geometry(device=device, start=start, end=end) fs = parted.FileSystem(type=type, geometry=geo) - part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL, - geometry=geo) + part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL, geometry=geo) constraint = parted.Constraint(exactGeom=geo) disk.addPartition(partition=part, constraint=constraint) def createfilesystems(self): - ''' Make an ext4 filesystem and swap space. Return the device name for - the persistent partition so we can mount it. - ''' + """ + Make an ext4 filesystem and swap space. Return the device name for + the persistent partition so we can mount it. + """ output = subprocess.Popen([KPARTX_PATH, '-l', self.lvpath], stdout=subprocess.PIPE).communicate()[0] lines = output.splitlines() persistdev = '/dev/mapper/' + lines[0].strip().split(' ')[0].strip() swapdev = '/dev/mapper/' + lines[1].strip().split(' ')[0].strip() - check_call([KPARTX_PATH, '-a', self.lvpath]) - mutecheck_call([MKFSEXT4_PATH, '-L', 'persist', persistdev]) - mutecheck_call([MKSWAP_PATH, '-f', '-L', 'swap', swapdev]) + subprocess.check_call([KPARTX_PATH, '-a', self.lvpath]) + utils.mutecheck_call([MKFSEXT4_PATH, '-L', 'persist', persistdev]) + utils.mutecheck_call([MKSWAP_PATH, '-f', '-L', 'swap', swapdev]) return persistdev def untarpersistent(self, tarname, iso): - ''' Unpack a persistent template tar file to the mounted mount dir. - Uses fsimage library to read from an ISO file. - ''' - tarname = tarname.replace('%h', self.name) # filename may use hostname + """ + Unpack a persistent template tar file to the mounted mount dir. + Uses fsimage library to read from an ISO file. + """ + tarname = tarname.replace('%h', self.name) # filename may use hostname if iso: try: fs = fsimage.open(self.isofile, 0) - except IOError, e: - self.warn("Failed to open ISO file: %s (%s)" % (self.isofile,e)) + except IOError: + logger.exception("Failed to open ISO file: %s", self.isofile) return try: - tardata = fs.open_file(tarname).read(); - except IOError, e: - self.warn("Failed to open tar file: %s (%s)" % (tarname, e)) + tardata = fs.open_file(tarname).read() + except IOError: + logger.exception("Failed to open tar file: %s", tarname) return finally: - del fs; + del fs else: try: f = open(tarname) tardata = f.read() f.close() - except IOError, e: - self.warn("Failed to open tar file: %s (%s)" % (tarname, e)) + except IOError: + logger.exception("Failed to open tar file: %s", tarname) return p = subprocess.Popen([TAR_PATH, '-C', self.mountdir, '--numeric-owner', - '-xf', '-'], stdin=subprocess.PIPE) + '-xf', '-'], stdin=subprocess.PIPE) p.communicate(input=tardata) p.wait() def setrootpassword(self, pw): - ''' Set the root password by updating the shadow password file that - is on the filesystem mounted in the temporary area. - ''' - saltedpw = crypt.crypt(pw, '$6$'+base64.b64encode(os.urandom(12))) - check_call([SED_PATH, '-i', '-e', - '/^root:/s_^root:\([^:]*\):_root:' + saltedpw + ':_', - os.path.join(self.mountdir, self.etcdir, 'shadow')]) + """ + Set the root password by updating the shadow password file that + is on the filesystem mounted in the temporary area. + """ + saltedpw = crypt.crypt(pw, '$6$' + base64.b64encode(os.urandom(12))) + subprocess.check_call([SED_PATH, '-i', '-e', + '/^root:/s_^root:\([^:]*\):_root:' + saltedpw + ':_', + os.path.join(self.mountdir, self.etcdir, 'shadow')]) def sethostname(self, old, new): - ''' Set the hostname by updating the hostname and hosts files that - reside on the filesystem mounted in the temporary area. - ''' - check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new), - os.path.join(self.mountdir, self.etcdir, 'hostname')]) - check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new), - os.path.join(self.mountdir, self.etcdir, 'hosts')]) + """ + Set the hostname by updating the hostname and hosts files that + reside on the filesystem mounted in the temporary area. + """ + subprocess.check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new), + os.path.join(self.mountdir, self.etcdir, 'hostname')]) + subprocess.check_call([SED_PATH, '-i', '-e', 's/%s/%s/' % (old, new), + os.path.join(self.mountdir, self.etcdir, 'hosts')]) def setupssh(self, keypath): - ''' Configure SSH access by installing host keys and a system-wide - authorized_keys file. - ''' + """ + Configure SSH access by installing host keys and a system-wide + authorized_keys file. + """ sshdcfg = os.path.join(self.mountdir, self.etcdir, 'ssh/sshd_config') - check_call([SED_PATH, '-i', '-e', - 's/PermitRootLogin no/PermitRootLogin yes/', sshdcfg]) + subprocess.check_call([SED_PATH, '-i', '-e', + 's/PermitRootLogin no/PermitRootLogin yes/', sshdcfg]) sshdir = os.path.join(self.getconfigitem('mount_path'), self.etcdir, 'ssh') - sshdir = sshdir.replace('/','\\/') # backslash slashes for use in sed - check_call([SED_PATH, '-i', '-e', - 's/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/' + \ - 'AuthorizedKeysFile ' + sshdir + '\/authorized_keys/', - sshdcfg]) - for f in ('ssh_host_rsa_key','ssh_host_rsa_key.pub','authorized_keys'): + sshdir = sshdir.replace('/', '\\/') # backslash slashes for use in sed + subprocess.check_call([SED_PATH, '-i', '-e', + 's/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/' + \ + 'AuthorizedKeysFile ' + sshdir + '\/authorized_keys/', + sshdcfg]) + for f in 'ssh_host_rsa_key', 'ssh_host_rsa_key.pub', 'authorized_keys': src = os.path.join(keypath, f) dst = os.path.join(self.mountdir, self.etcdir, 'ssh', f) shutil.copy(src, dst) @@ -431,10 +423,11 @@ class XenNode(PyCoreNode): os.chmod(dst, 0600) def createvm(self): - ''' Instantiate a *paused* domU VM - Instantiate it now, so we can add network interfaces, - pause it so we can have the filesystem open for configuration. - ''' + """ + Instantiate a *paused* domU VM + Instantiate it now, so we can add network interfaces, + pause it so we can have the filesystem open for configuration. + """ args = [XM_PATH, 'create', os.devnull, '--paused'] args.extend(['name=' + self.vmname, 'memory=' + str(self.ramsize)]) args.append('disk=tap:aio:' + self.isofile + ',hda,r') @@ -445,110 +438,109 @@ class XenNode(PyCoreNode): for action in ('poweroff', 'reboot', 'suspend', 'crash', 'halt'): args.append('on_%s=destroy' % action) args.append('extra=' + self.getconfigitem('xm_create_extra')) - mutecheck_call(args) + utils.mutecheck_call(args) # from class LxcNode def privatedir(self, path): - #self.warn("XEN PVM privatedir() called") + # self.warn("XEN PVM privatedir() called") # Do nothing, Xen PVM nodes are fully private pass # from class LxcNode - def opennodefile(self, filename, mode = "w"): - self.warn("XEN PVM opennodefile() called") - raise Exception, "Can't open VM file with opennodefile()" + def opennodefile(self, filename, mode="w"): + logger.warn("XEN PVM opennodefile() called") + raise Exception("Can't open VM file with opennodefile()") # from class LxcNode # open a file on a paused Xen node - def openpausednodefile(self, filename, mode = "w"): + def openpausednodefile(self, filename, mode="w"): dirname, basename = os.path.split(filename) if not basename: raise ValueError, "no basename for filename: " + filename if dirname and dirname[0] == "/": dirname = dirname[1:] - #dirname = dirname.replace("/", ".") + # dirname = dirname.replace("/", ".") dirname = os.path.join(self.nodedir, dirname) if not os.path.isdir(dirname): - os.makedirs(dirname, mode = 0755) + os.makedirs(dirname, mode=0755) hostfilename = os.path.join(dirname, basename) return open(hostfilename, mode) # from class LxcNode - def nodefile(self, filename, contents, mode = 0644): + def nodefile(self, filename, contents, mode=0644): if filename in self.FilesToIgnore: - #self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename]) + # self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename]) return if filename in self.FilesRedirection: redirFilename = self.FilesRedirection[filename] - self.warn("XEN PVM nodefile(filename=%s) redirected to %s" % (filename, redirFilename)) + logger.warn("XEN PVM nodefile(filename=%s) redirected to %s" % (filename, redirFilename)) filename = redirFilename - - self.warn("XEN PVM nodefile(filename=%s) called" % [filename]) + + logger.warn("XEN PVM nodefile(filename=%s) called" % [filename]) self.lock.acquire() if not self.up: self.lock.release() - raise Exception, "Can't access VM file as VM disk isn't ready" - return + raise Exception("Can't access VM file as VM disk isn't ready") if self.booted: self.lock.release() - raise Exception, "Can't access VM file as VM is already running" - return + raise Exception("Can't access VM file as VM is already running") try: f = self.openpausednodefile(filename, "w") f.write(contents) os.chmod(f.name, mode) f.close() - self.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) + logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode)) finally: self.lock.release() # from class SimpleLxcNode def alive(self): # is VM running? - return False # XXX + return False # XXX - def cmd(self, args, wait = True): + def cmd(self, args, wait=True): cmdAsString = string.join(args, ' ') if cmdAsString in self.CmdsToIgnore: - #self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString) + # self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString) return 0 if cmdAsString in self.CmdsRedirection: self.CmdsRedirection[cmdAsString](self) return 0 - self.warn("XEN PVM cmd(args=[%s]) called, but not yet implemented" % cmdAsString) - return 0 + logger("XEN PVM cmd(args=[%s]) called, but not yet implemented" % cmdAsString) + return 0 def cmdresult(self, args): cmdAsString = string.join(args, ' ') if cmdAsString in self.CmdsToIgnore: - #self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString) - return (0, "") - self.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented" % cmdAsString) - return (0, "") + # self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString) + return 0, "" + logger.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented" % cmdAsString) + return 0, "" def popen(self, args): cmdAsString = string.join(args, ' ') - self.warn("XEN PVM popen(args=[%s]) called, but not yet implemented" % cmdAsString) + logger.warn("XEN PVM popen(args=[%s]) called, but not yet implemented" % cmdAsString) return def icmd(self, args): cmdAsString = string.join(args, ' ') - self.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented" % cmdAsString) + logger.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented" % cmdAsString) return - def term(self, sh = "/bin/sh"): - self.warn("XEN PVM term() called, but not yet implemented") + def term(self, sh="/bin/sh"): + logger.warn("XEN PVM term() called, but not yet implemented") return - def termcmdstring(self, sh = "/bin/sh"): - ''' We may add 'sudo' to the command string because the GUI runs as a - normal user. Use SSH if control interface is available, otherwise - use Xen console with a keymapping for easy login. - ''' + def termcmdstring(self, sh="/bin/sh"): + """ + We may add 'sudo' to the command string because the GUI runs as a + normal user. Use SSH if control interface is available, otherwise + use Xen console with a keymapping for easy login. + """ controlifc = None for ifc in self.netifs(): if hasattr(ifc, 'control') and ifc.control == True: @@ -560,33 +552,22 @@ class XenNode(PyCoreNode): controlip = controlifc.addrlist[0].split('/')[0] cmd += "-e ssh root@%s" % controlip return cmd - # otherwise use 'xm console' - #pw = self.getconfigitem('root_password') - #cmd += "-xrm 'XTerm*VT100.translations: #override F1: " - #cmd += "string(\"root\\n\") \\n F2: string(\"%s\\n\")' " % pw + # otherwise use 'xm console' + # pw = self.getconfigitem('root_password') + # cmd += "-xrm 'XTerm*VT100.translations: #override F1: " + # cmd += "string(\"root\\n\") \\n F2: string(\"%s\\n\")' " % pw cmd += "-e sudo %s console %s" % (XM_PATH, self.vmname) return cmd - def shcmd(self, cmdstr, sh = "/bin/sh"): - self.warn("XEN PVM shcmd(args=[%s]) called, but not yet implemented" % cmdstr) + def shcmd(self, cmdstr, sh="/bin/sh"): + logger("XEN PVM shcmd(args=[%s]) called, but not yet implemented" % cmdstr) return - # from class SimpleLxcNode - def info(self, msg): - if self.verbose: - print "%s: %s" % (self.name, msg) - sys.stdout.flush() - - # from class SimpleLxcNode - def warn(self, msg): - print >> sys.stderr, "%s: %s" % (self.name, msg) - sys.stderr.flush() - def mount(self, source, target): - self.warn("XEN PVM Nodes can't bind-mount filesystems") + logger.warn("XEN PVM Nodes can't bind-mount filesystems") def umount(self, target): - self.warn("XEN PVM Nodes can't bind-mount filesystems") + logger.warn("XEN PVM Nodes can't bind-mount filesystems") def newifindex(self): self.lock.acquire() @@ -606,16 +587,16 @@ class XenNode(PyCoreNode): return -1 def addnetif(self, netif, ifindex): - self.warn("XEN PVM addnetif() called") + logger.warn("XEN PVM addnetif() called") PyCoreNode.addnetif(self, netif, ifindex) def delnetif(self, ifindex): - self.warn("XEN PVM delnetif() called") + logger.warn("XEN PVM delnetif() called") PyCoreNode.delnetif(self, ifindex) - def newveth(self, ifindex = None, ifname = None, net = None, hwaddr = None): - self.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called" % - (ifindex, ifname)) + def newveth(self, ifindex=None, ifname=None, net=None, hwaddr=None): + logger.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called" % + (ifindex, ifname)) self.lock.acquire() try: @@ -623,12 +604,12 @@ class XenNode(PyCoreNode): ifindex = self.newifindex() if ifname is None: ifname = "eth%d" % ifindex - sessionid = self.session.shortsessionid() + sessionid = self.session.short_session_id() name = "n%s.%s.%s" % (self.objid, ifindex, sessionid) localname = "n%s.%s.%s" % (self.objid, ifname, sessionid) ifclass = XenVEth - veth = ifclass(node = self, name = name, localname = localname, - mtu = 1500, net = net, hwaddr = hwaddr) + veth = ifclass(node=self, name=name, localname=localname, + mtu=1500, net=net, hwaddr=hwaddr) veth.name = ifname try: @@ -641,14 +622,14 @@ class XenNode(PyCoreNode): finally: self.lock.release() - def newtuntap(self, ifindex = None, ifname = None, net = None): - self.warn("XEN PVM newtuntap() called but not implemented") + def newtuntap(self, ifindex=None, ifname=None, net=None): + logger.warn("XEN PVM newtuntap() called but not implemented") def sethwaddr(self, ifindex, addr): self._netif[ifindex].sethwaddr(addr) if self.up: pass - #self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex), + # self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex), # "address", str(addr)]) def addaddr(self, ifindex, addr): @@ -662,49 +643,49 @@ class XenNode(PyCoreNode): try: self._netif[ifindex].deladdr(addr) except ValueError: - self.warn("trying to delete unknown address: %s" % addr) + logger.exception("trying to delete unknown address: %s", addr) + if self.up: pass # self.cmd([IP_BIN, "addr", "del", str(addr), # "dev", self.ifname(ifindex)]) valid_deladdrtype = ("inet", "inet6", "inet6link") - def delalladdr(self, ifindex, addrtypes = valid_deladdrtype): - addr = self.getaddr(self.ifname(ifindex), rescan = True) + + def delalladdr(self, ifindex, addrtypes=valid_deladdrtype): + addr = self.getaddr(self.ifname(ifindex), rescan=True) for t in addrtypes: if t not in self.valid_deladdrtype: raise ValueError, "addr type must be in: " + \ - " ".join(self.valid_deladdrtype) + " ".join(self.valid_deladdrtype) for a in addr[t]: self.deladdr(ifindex, a) # update cached information - self.getaddr(self.ifname(ifindex), rescan = True) + self.getaddr(self.ifname(ifindex), rescan=True) # Xen PVM relies on boot process to bring up links - #def ifup(self, ifindex): + # def ifup(self, ifindex): # if self.up: # self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"]) - def newnetif(self, net = None, addrlist = [], hwaddr = None, - ifindex = None, ifname = None): - self.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called" % - (ifindex, ifname)) + def newnetif(self, net=None, addrlist=[], hwaddr=None, + ifindex=None, ifname=None): + logger.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called" % + (ifindex, ifname)) self.lock.acquire() if not self.up: self.lock.release() - raise Exception, "Can't access add veth as VM disk isn't ready" - return + raise Exception("Can't access add veth as VM disk isn't ready") if self.booted: self.lock.release() - raise Exception, "Can't access add veth as VM is already running" - return + raise Exception("Can't access add veth as VM is already running") try: - if isinstance(net, EmaneNode): - raise Exception, "Xen PVM doesn't yet support Emane nets" + if nodeutils.is_node(net, NodeTypes.EMANE): + raise Exception("Xen PVM doesn't yet support Emane nets") # ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname, # net = net) @@ -719,8 +700,8 @@ class XenNode(PyCoreNode): # netif.addaddr(addr) # return ifindex else: - ifindex = self.newveth(ifindex = ifindex, ifname = ifname, - net = net, hwaddr = hwaddr) + ifindex = self.newveth(ifindex=ifindex, ifname=ifname, + net=net, hwaddr=hwaddr) if net is not None: self.attachnet(ifindex, net) @@ -728,24 +709,27 @@ class XenNode(PyCoreNode): self.etcdir, 'udev/rules.d/70-persistent-net.rules') f = self.openpausednodefile(rulefile, "a") - f.write('\n# Xen PVM virtual interface #%s %s with MAC address %s\n' % (ifindex, self.ifname(ifindex), hwaddr)) + f.write( + '\n# Xen PVM virtual interface #%s %s with MAC address %s\n' % (ifindex, self.ifname(ifindex), hwaddr)) # Using MAC address as we're now loading PVM net driver "early" # OLD: Would like to use MAC address, but udev isn't working with paravirtualized NICs. Perhaps the "set hw address" isn't triggering a rescan. - f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % (hwaddr, self.ifname(ifindex))) - #f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n' % (ifindex, self.ifname(ifindex))) + f.write( + 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % ( + hwaddr, self.ifname(ifindex))) + # f.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n' % (ifindex, self.ifname(ifindex))) f.close() if hwaddr: self.sethwaddr(ifindex, hwaddr) - for addr in maketuple(addrlist): + for addr in utils.maketuple(addrlist): self.addaddr(ifindex, addr) - #self.ifup(ifindex) + # self.ifup(ifindex) return ifindex finally: self.lock.release() def connectnode(self, ifname, othernode, otherifname): - self.warn("XEN PVM connectnode() called") + logger.warn("XEN PVM connectnode() called") # tmplen = 8 # tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase) @@ -768,21 +752,19 @@ class XenNode(PyCoreNode): self.lock.acquire() if not self.up: self.lock.release() - raise Exception, "Can't access VM file as VM disk isn't ready" - return + raise Exception("Can't access VM file as VM disk isn't ready") if self.booted: self.lock.release() - raise Exception, "Can't access VM file as VM is already running" - return + raise Exception("Can't access VM file as VM is already running") if filename in self.FilesToIgnore: - #self.warn("XEN PVM addfile(filename=%s) ignored" % [filename]) + # self.warn("XEN PVM addfile(filename=%s) ignored" % [filename]) return if filename in self.FilesRedirection: redirFilename = self.FilesRedirection[filename] - self.warn("XEN PVM addfile(filename=%s) redirected to %s" % (filename, redirFilename)) + logger.warn("XEN PVM addfile(filename=%s) redirected to %s" % (filename, redirFilename)) filename = redirFilename try: @@ -794,24 +776,24 @@ class XenNode(PyCoreNode): fout.write(contents) os.chmod(fout.name, mode) fout.close() - self.info("created nodefile: '%s'; mode: 0%o" % (fout.name, mode)) + logger.info("created nodefile: '%s'; mode: 0%o" % (fout.name, mode)) finally: self.lock.release() - self.warn("XEN PVM addfile(filename=%s) called" % [filename]) + logger.warn("XEN PVM addfile(filename=%s) called" % [filename]) - #shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \ + # shcmd = "mkdir -p $(dirname '%s') && mv '%s' '%s' && sync" % \ # (filename, srcname, filename) - #self.shcmd(shcmd) + # self.shcmd(shcmd) def unmount_all(self, path): - ''' Namespaces inherit the host mounts, so we need to ensure that all - namespaces have unmounted our temporary mount area so that the - kpartx command will succeed. - ''' + """ + Namespaces inherit the host mounts, so we need to ensure that all + namespaces have unmounted our temporary mount area so that the + kpartx command will succeed. + """ # Session.bootnodes() already has self.session._objslock - for o in self.session.objs(): + for o in self.session.objects.itervalues(): if not isinstance(o, LxcNode): continue o.umount(path) - diff --git a/daemon/core/xen/xenconfig.py b/daemon/core/xen/xenconfig.py index 7e9ad829..c5cbd390 100644 --- a/daemon/core/xen/xenconfig.py +++ b/daemon/core/xen/xenconfig.py @@ -1,11 +1,4 @@ -# -# CORE -# Copyright (c)2011-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz -# -''' +""" xenconfig.py: Implementation of the XenConfigManager class for managing configurable items for XenNodes. @@ -17,72 +10,93 @@ Node type config: XenConfigManager.configs[0] = (type='mytype', values) All nodes of this type have this config. Node-specific config: XenConfigManager.configs[nodenumber] = (type, values) The node having this specific number has this config. -''' +""" -import sys, os, threading, subprocess, time, string import ConfigParser -from xml.dom.minidom import parseString, Document -from core.constants import * +import os +import string + +from core import constants from core.api import coreapi -from core.conf import ConfigurableManager, Configurable +from core.conf import Configurable +from core.conf import ConfigurableManager +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import RegisterTlvs +from core.misc import log + +logger = log.get_logger(__name__) class XenConfigManager(ConfigurableManager): - ''' Xen controller object. Lives in a Session instance and is used for - building Xen profiles. - ''' - _name = "xen" - _type = coreapi.CORE_TLV_REG_EMULSRV - + """ + Xen controller object. Lives in a Session instance and is used for + building Xen profiles. + """ + name = "xen" + config_type = RegisterTlvs.EMULATION_SERVER.value + def __init__(self, session): - ConfigurableManager.__init__(self, session) - self.verbose = self.session.getcfgitembool('verbose', False) - self.default_config = XenDefaultConfig(session, objid=None) + """ + Creates a XenConfigManager instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + ConfigurableManager.__init__(self) + self.default_config = XenDefaultConfig(session, object_id=None) self.loadconfigfile() def setconfig(self, nodenum, conftype, values): - ''' add configuration values for a node to a dictionary; values are - usually received from a Configuration Message, and may refer to a - node for which no object exists yet - ''' - if nodenum is None: - nodenum = 0 # used for storing the global default config + """ + add configuration values for a node to a dictionary; values are + usually received from a Configuration Message, and may refer to a + node for which no object exists yet + """ + if nodenum is None: + nodenum = 0 # used for storing the global default config return ConfigurableManager.setconfig(self, nodenum, conftype, values) def getconfig(self, nodenum, conftype, defaultvalues): - ''' get configuration values for a node; if the values don't exist in - our dictionary then return the default values supplied; if conftype - is None then we return a match on any conftype. - ''' - if nodenum is None: - nodenum = 0 # used for storing the global default config - return ConfigurableManager.getconfig(self, nodenum, conftype, - defaultvalues) + """ + get configuration values for a node; if the values don't exist in + our dictionary then return the default values supplied; if conftype + is None then we return a match on any conftype. + """ + if nodenum is None: + nodenum = 0 # used for storing the global default config + return ConfigurableManager.getconfig(self, nodenum, conftype, defaultvalues) def clearconfig(self, nodenum): - ''' remove configuration values for a node - ''' + """ + remove configuration values for a node + """ ConfigurableManager.clearconfig(self, nodenum) if 0 in self.configs: self.configs.pop(0) - def configure(self, session, msg): - ''' Handle configuration messages for global Xen config. - ''' - return self.default_config.configure(self, msg) + def configure(self, session, config_data): + """ + Handle configuration messages for global Xen config. + + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + """ + return self.default_config.configure(self, config_data) def loadconfigfile(self, filename=None): - ''' Load defaults from the /etc/core/xen.conf file into dict object. - ''' + """ + Load defaults from the /etc/core/xen.conf file into dict object. + """ if filename is None: - filename = os.path.join(CORE_CONF_DIR, 'xen.conf') + filename = os.path.join(constants.CORE_CONF_DIR, 'xen.conf') cfg = ConfigParser.SafeConfigParser() if filename not in cfg.read(filename): - self.session.warn("unable to read Xen config file: %s" % filename) + logger.warn("unable to read Xen config file: %s" % filename) return section = "xen" if not cfg.has_section(section): - self.session.warn("%s is missing a xen section!" % filename) + logger.warn("%s is missing a xen section!" % filename) return self.configfile = dict(cfg.items(section)) # populate default config items from config file entries @@ -92,13 +106,14 @@ class XenConfigManager(ConfigurableManager): if names[i] in self.configfile: vals[i] = self.configfile[names[i]] # this sets XenConfigManager.configs[0] = (type='xen', vals) - self.setconfig(None, self.default_config._name, vals) + self.setconfig(None, self.default_config.name, vals) def getconfigitem(self, name, model=None, node=None, value=None): - ''' Get a config item of the given name, first looking for node-specific - configuration, then model specific, and finally global defaults. - If a value is supplied, it will override any stored config. - ''' + """ + Get a config item of the given name, first looking for node-specific + configuration, then model specific, and finally global defaults. + If a value is supplied, it will override any stored config. + """ if value is not None: return value n = None @@ -111,8 +126,8 @@ class XenConfigManager(ConfigurableManager): defaultvalues=None) if v is None: # get item from default config for the machine type - (t, v) = self.getconfig(nodenum=None, - conftype=self.default_config._name, + (t, v) = self.getconfig(nodenum=None, + conftype=self.default_config.name, defaultvalues=None) confignames = self.default_config.getnames() @@ -124,142 +139,136 @@ class XenConfigManager(ConfigurableManager): if name in self.configfile: return self.configfile[name] else: - #self.warn("missing config item '%s'" % name) + # logger.warn("missing config item '%s'" % name) return None class XenConfig(Configurable): - ''' Manage Xen configuration profiles. - ''' - - @classmethod - def configure(cls, xen, msg): - ''' Handle configuration messages for setting up a model. - Similar to Configurable.configure(), but considers opaque data - for indicating node types. - ''' - reply = None - nodenum = msg.gettlv(coreapi.CORE_TLV_CONF_NODE) - objname = msg.gettlv(coreapi.CORE_TLV_CONF_OBJ) - conftype = msg.gettlv(coreapi.CORE_TLV_CONF_TYPE) - opaque = msg.gettlv(coreapi.CORE_TLV_CONF_OPAQUE) + """ + Manage Xen configuration profiles. + """ - nodetype = objname + @classmethod + def configure(cls, xen, config_data): + """ + Handle configuration messages for setting up a model. + Similar to Configurable.configure(), but considers opaque data + for indicating node types. + + :param xen: xen instance to configure + :param core.conf.ConfigData config_data: configuration data for carrying out a configuration + """ + reply = None + node_id = config_data.node + object_name = config_data.object + config_type = config_data.type + opaque = config_data.opaque + values_str = config_data.data_values + + nodetype = object_name if opaque is not None: opaque_items = opaque.split(':') if len(opaque_items) != 2: - xen.warn("xen config: invalid opaque data in conf message") + logger.warn("xen config: invalid opaque data in conf message") return None nodetype = opaque_items[1] - if xen.verbose: - xen.info("received configure message for %s" % nodetype) - if conftype == coreapi.CONF_TYPE_FLAGS_REQUEST: - if xen.verbose: - xen.info("replying to configure request for %s " % nodetype) + logger.info("received configure message for %s", nodetype) + if config_type == ConfigFlags.REQUEST.value: + logger.info("replying to configure request for %s " % nodetype) # when object name is "all", the reply to this request may be None # if this node has not been configured for this model; otherwise we # reply with the defaults for this model - if objname == "all": - typeflags = coreapi.CONF_TYPE_FLAGS_UPDATE + if object_name == "all": + typeflags = ConfigFlags.UPDATE.value else: - typeflags = coreapi.CONF_TYPE_FLAGS_NONE - values = xen.getconfig(nodenum, nodetype, defaultvalues=None)[1] + typeflags = ConfigFlags.NONE.value + values = xen.getconfig(node_id, nodetype, defaultvalues=None)[1] if values is None: # get defaults from default "xen" config which includes - # settings from both cls._confdefaultvalues and xen.conf + # settings from both cls._confdefaultvalues and xen.conf defaults = cls.getdefaultvalues() - values = xen.getconfig(nodenum, cls._name, defaults)[1] + values = xen.getconfig(node_id, cls.name, defaults)[1] if values is None: return None # reply with config options - if nodenum is None: - nodenum = 0 - reply = cls.toconfmsg(0, nodenum, typeflags, nodetype, values) - elif conftype == coreapi.CONF_TYPE_FLAGS_RESET: - if objname == "all": - xen.clearconfig(nodenum) - #elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE: + if node_id is None: + node_id = 0 + reply = cls.config_data(0, node_id, typeflags, nodetype, values) + elif config_type == ConfigFlags.RESET.value: + if object_name == "all": + xen.clearconfig(node_id) + # elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE: else: # store the configuration values for later use, when the XenNode # object has been created - if objname is None: - xen.info("no configuration object for node %s" % nodenum) + if object_name is None: + logger.info("no configuration object for node %s" % node_id) return None - values_str = msg.gettlv(coreapi.CORE_TLV_CONF_VALUES) if values_str is None: # use default or preconfigured values defaults = cls.getdefaultvalues() - values = xen.getconfig(nodenum, cls._name, defaults)[1] + values = xen.getconfig(node_id, cls.name, defaults)[1] else: # use new values supplied from the conf message values = values_str.split('|') - xen.setconfig(nodenum, nodetype, values) + xen.setconfig(node_id, nodetype, values) + return reply @classmethod - def toconfmsg(cls, flags, nodenum, typeflags, nodetype, values): - ''' Convert this class to a Config API message. Some TLVs are defined - by the class, but node number, conf type flags, and values must - be passed in. - ''' + def config_data(cls, flags, node_id, type_flags, nodetype, values): + """ + Convert this class to a Config API message. Some TLVs are defined + by the class, but node number, conf type flags, and values must + be passed in. + """ values_str = string.join(values, '|') tlvdata = "" - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_NODE, nodenum) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OBJ, - cls._name) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_TYPE, - typeflags) - datatypes = tuple( map(lambda x: x[1], cls._confmatrix) ) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_DATA_TYPES, - datatypes) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_VALUES, - values_str) - captions = reduce( lambda a,b: a + '|' + b, \ - map(lambda x: x[4], cls._confmatrix)) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_CAPTIONS, - captions) - possiblevals = reduce( lambda a,b: a + '|' + b, \ - map(lambda x: x[3], cls._confmatrix)) - tlvdata += coreapi.CoreConfTlv.pack( - coreapi.CORE_TLV_CONF_POSSIBLE_VALUES, possiblevals) - if cls._bitmap is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_BITMAP, - cls._bitmap) - if cls._confgroups is not None: - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_GROUPS, - cls._confgroups) - opaque = "%s:%s" % (cls._name, nodetype) - tlvdata += coreapi.CoreConfTlv.pack(coreapi.CORE_TLV_CONF_OPAQUE, - opaque) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.NODE.value, node_id) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, cls.name) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, type_flags) + datatypes = tuple(map(lambda x: x[1], cls.config_matrix)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.DATA_TYPES.value, datatypes) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, values_str) + captions = reduce(lambda a, b: a + '|' + b, map(lambda x: x[4], cls.config_matrix)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.CAPTIONS, captions) + possiblevals = reduce(lambda a, b: a + '|' + b, map(lambda x: x[3], cls.config_matrix)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.POSSIBLE_VALUES.value, possiblevals) + if cls.bitmap is not None: + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.BITMAP.value, cls.bitmap) + if cls.config_groups is not None: + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.GROUPS.value, cls.config_groups) + opaque = "%s:%s" % (cls.name, nodetype) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OPAQUE.value, opaque) msg = coreapi.CoreConfMessage.pack(flags, tlvdata) return msg class XenDefaultConfig(XenConfig): - ''' Global default Xen configuration options. - ''' - _name = "xen" + """ + Global default Xen configuration options. + """ + name = "xen" # Configuration items: # ('name', 'type', 'default', 'possible-value-list', 'caption') - _confmatrix = [ - ('ram_size', coreapi.CONF_DATA_TYPE_STRING, '256', '', + config_matrix = [ + ('ram_size', ConfigDataTypes.STRING.value, '256', '', 'ram size (MB)'), - ('disk_size', coreapi.CONF_DATA_TYPE_STRING, '256M', '', + ('disk_size', ConfigDataTypes.STRING.value, '256M', '', 'disk size (use K/M/G suffix)'), - ('iso_file', coreapi.CONF_DATA_TYPE_STRING, '', '', + ('iso_file', ConfigDataTypes.STRING.value, '', '', 'iso file'), - ('mount_path', coreapi.CONF_DATA_TYPE_STRING, '', '', + ('mount_path', ConfigDataTypes.STRING.value, '', '', 'mount path'), - ('etc_path', coreapi.CONF_DATA_TYPE_STRING, '', '', + ('etc_path', ConfigDataTypes.STRING.value, '', '', 'etc path'), - ('persist_tar_iso', coreapi.CONF_DATA_TYPE_STRING, '', '', + ('persist_tar_iso', ConfigDataTypes.STRING.value, '', '', 'iso persist tar file'), - ('persist_tar', coreapi.CONF_DATA_TYPE_STRING, '', '', + ('persist_tar', ConfigDataTypes.STRING.value, '', '', 'persist tar file'), - ('root_password', coreapi.CONF_DATA_TYPE_STRING, 'password', '', + ('root_password', ConfigDataTypes.STRING.value, 'password', '', 'root password'), - ] - - _confgroups = "domU properties:1-%d" % len(_confmatrix) + ] + config_groups = "domU properties:1-%d" % len(config_matrix) diff --git a/daemon/data/core.conf b/daemon/data/core.conf index ebee4778..8ee2a532 100644 --- a/daemon/data/core.conf +++ b/daemon/data/core.conf @@ -23,29 +23,29 @@ quagga_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/quagga" # and not named 'services' #custom_services_dir = /home/username/.core/myservices # -# uncomment to establish a standalone control backchannel for accessing nodes +# uncomment to establish a standalone control backchannel for accessing nodes # (overriden by the session option of the same name) -#controlnet = 172.16.0.0/24 -# +#controlnet = 172.16.0.0/24 +# # # uncomment and edit to establish a distributed control backchannel -#controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4 :172.16.4.0/24 core5:172.16.5.0/24 +#controlnet = core1:172.16.1.0/24 core:172.16.2.0/24 core3:172.16.3.0/24 core4 :172.16.4.0/24 core5:172.16.5.0/24 # uncomment and edit to establish distributed auxiliary control channels. -#controlnet1 = core1:172.17.1.0/24 core2:172.17.2.0/24 core3:172.17.3.0/24 core4 :172.17.4.0/24 core5:172.17.5.0/24 -#controlnet2 = core1:172.18.1.0/24 core2:172.18.2.0/24 core3:172.18.3.0/24 core4 :172.18.4.0/24 core5:172.18.5.0/24 -#controlnet3 = core1:172.19.1.0/24 core2:172.19.2.0/24 core3:172.19.3.0/24 core4 :172.19.4.0/24 core5:172.19.5.0/24 +#controlnet1 = core1:172.17.1.0/24 core:172.17.2.0/24 core3:172.17.3.0/24 core4 :172.17.4.0/24 core5:172.17.5.0/24 +#controlnet2 = core1:172.18.1.0/24 core:172.18.2.0/24 core3:172.18.3.0/24 core4 :172.18.4.0/24 core5:172.18.5.0/24 +#controlnet3 = core1:172.19.1.0/24 core:172.19.2.0/24 core3:172.19.3.0/24 core4 :172.19.4.0/24 core5:172.19.5.0/24 -# uncomment and edit to assign host interfaces to auxilary control channels -# for use in connecting with other servers in a distributed environments. +# uncomment and edit to assign host interfaces to auxilary control channels +# for use in connecting with other servers in a distributed environments. # Note: This is valid for auxiliary control channels only. The primary control -# channel, specified by 'controlnet', is tunneled across servers. +# channel, specified by 'controlnet', is tunneled across servers. #controlnetif1 = eth2 #controlnetif2 = eth3 #controlnetif3 = eth4 -# optional controlnet configuration script for controlnet, uncomment to -# activate, and likely edit the script. +# optional controlnet configuration script for controlnet, uncomment to +# activate, and likely edit the script. # Note: the controlnet_updown_script is not used by the auxiliary control # channels. # controlnet_updown_script = /usr/local/share/core/examples/controlnet_updown @@ -62,4 +62,4 @@ emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass, Tdma #emane_log_level = 2 emane_realtime = True -aux_request_handler = core.addons.api2handler.CoreApi2RequestHandler:12222 +#aux_request_handler = core.addons.api2handler.CoreApi2RequestHandler:12222 diff --git a/daemon/examples/netns/basicrange.py b/daemon/examples/netns/basicrange.py index 0b93400c..d8fdd869 100755 --- a/daemon/examples/netns/basicrange.py +++ b/daemon/examples/netns/basicrange.py @@ -8,65 +8,83 @@ # -import optparse, sys, os, datetime, time +import datetime +import optparse +import sys +import time -from core import pycore -from core.misc import ipaddr -from core.misc.utils import mutecall +from core.misc import ipaddress, nodeutils +from core.misc import nodemaps from core.mobility import BasicRangeModel +from core.netns.nodes import WlanNode from core.netns.vnet import EbtablesQueue +from core.netns.vnode import LxcNode +from core.session import Session # node list - global so you can play using 'python -i' # e.g. >>> n[0].session.shutdown() n = [] + def test(options): - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") - session = pycore.Session(persistent = True) + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1, persistent=True) if options.enablesdt: - session.location.setrefgeo(47.57917,-122.13232,50.0) # GUI default + # GUI default + session.location.setrefgeo(47.57917, -122.13232, 50.0) session.location.refscale = 100.0 session.options.enablesdt = True session.options.sdturl = options.sdturl wlanid = options.numnodes + 1 - net = session.addobj(cls = pycore.nodes.WlanNode, name = "wlan%d" % wlanid, - objid = wlanid, verbose = True) + net = session.add_object( + cls=WlanNode, + name="wlan%d" % wlanid, + objid=wlanid + ) + values = list(BasicRangeModel.getdefaultvalues()) - #values[0] = 5000000 # 5000km range + # values[0] = 5000000 # 5000km range net.setmodel(BasicRangeModel, values) for i in xrange(1, options.numnodes + 1): - tmp = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i, - objid = i) - tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) + node = session.add_object(cls=LxcNode, name="n%d" % i, objid=i) + address = "%s/%s" % (prefix.addr(i), prefix.prefixlen) + print "setting node address: %s - %s" % (node.objid, address) + node.newnetif(net, [address]) # set increasing Z coordinates - tmp.setposition(10, 10, 100*i) - n.append(tmp) + node.setposition(10, 10, 100) + n.append(node) # example setting node n2 to a high altitude - #n[1].setposition(10, 10, 2000000) # 2000km - #session.sdt.updatenode(n[1].objid, 0, 10, 10, 2000000) + # n[1].setposition(10, 10, 2000000) # 2000km + # session.sdt.updatenode(n[1].objid, 0, 10, 10, 2000000) + + # launches terminal for the first node + # n[0].term("bash") + n[0].icmd(["ping", "-c", "5", "127.0.0.1"]) - n[0].term("bash") # wait for rate seconds to allow ebtables commands to commit time.sleep(EbtablesQueue.rate) - #session.shutdown() + + raw_input("press enter to exit") + session.shutdown() + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) + parser = optparse.OptionParser(usage=usagestr) - parser.set_defaults(numnodes = 2, enablesdt = False, - sdturl = "tcp://127.0.0.1:50000/") - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes to test; default = %s" % - parser.defaults["numnodes"]) - parser.add_option("-s", "--sdt", dest = "enablesdt", action = "store_true", - help = "enable SDT output") - parser.add_option("-u", "--sdturl", dest = "sdturl", type = "string", - help = "URL for SDT connection, default = %s" % \ - parser.defaults["sdturl"]) + parser.set_defaults(numnodes=2, enablesdt=False, sdturl="tcp://127.0.0.1:50000/") + parser.add_option( + "-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes to test; default = %s" % parser.defaults["numnodes"] + ) + parser.add_option("-s", "--sdt", dest="enablesdt", action="store_true", help="enable SDT output") + parser.add_option( + "-u", "--sdturl", dest="sdturl", type="string", + help="URL for SDT connection, default = %s" % parser.defaults["sdturl"] + ) - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -86,8 +104,12 @@ def main(): test(options) - print >> sys.stderr, \ - "elapsed time: %s" % (datetime.datetime.now() - start) + print >> sys.stderr, "elapsed time: %s" % (datetime.datetime.now() - start) + if __name__ == "__main__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + main() diff --git a/daemon/examples/netns/daemonnodes.py b/daemon/examples/netns/daemonnodes.py index a3112392..0f9399bd 100755 --- a/daemon/examples/netns/daemonnodes.py +++ b/daemon/examples/netns/daemonnodes.py @@ -5,71 +5,86 @@ # A distributed example where CORE API messaging is used to create a session # on a daemon server. The daemon server defaults to 127.0.0.1:4038 -# to target a remote machine specify '-d ' parameter, it needs to be +# to target a remote machine specify "-d " parameter, it needs to be # running the daemon with listenaddr=0.0.0.0 in the core.conf file. # This script creates no nodes locally and therefore can be run as an # unprivileged user. -import sys, datetime, optparse, time +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr -from core.constants import * from core.api import coreapi +from core.api import dataconversion +from core.api.coreapi import CoreExecuteTlv +from core.enumerations import CORE_API_PORT +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.misc import ipaddress, nodeutils, nodemaps +from core.netns import nodes # declare classes for use with Broker -import select -coreapi.add_node_class("CORE_NODE_DEF", - coreapi.CORE_NODE_DEF, pycore.nodes.CoreNode) -coreapi.add_node_class("CORE_NODE_SWITCH", - coreapi.CORE_NODE_SWITCH, pycore.nodes.SwitchNode) +from core.session import Session # node list (count from 1) n = [None] exec_num = 1 + def cmd(node, exec_cmd): - ''' + """ :param node: The node the command should be issued too :param exec_cmd: A string with the command to be run :return: Returns the result of the command - ''' + """ global exec_num # Set up the command api message - tlvdata = coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NODE, node.objid) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_NUM, exec_num) - tlvdata += coreapi.CoreExecTlv.pack(coreapi.CORE_TLV_EXEC_CMD, exec_cmd) - msg = coreapi.CoreExecMessage.pack(coreapi.CORE_API_STR_FLAG | coreapi.CORE_API_TXT_FLAG, tlvdata) + tlvdata = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.objid) + tlvdata += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, exec_num) + tlvdata += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd) + msg = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlvdata) node.session.broker.handlerawmsg(msg) exec_num += 1 # Now wait for the response - (h, p, sock) = node.session.broker.servers['localhost'] - sock.settimeout(50.0) - msghdr = sock.recv(coreapi.CoreMessage.hdrsiz) - msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(msghdr) - msgdata = sock.recv(msglen) + server = node.session.broker.servers["localhost"] + server.sock.settimeout(50.0) + + # receive messages until we get our execute response + result = None + while True: + msghdr = server.sock.recv(coreapi.CoreMessage.header_len) + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) + msgdata = server.sock.recv(msglen) + + # If we get the right response return the results + print "received response message: %s" % MessageTypes(msgtype) + if msgtype == MessageTypes.EXECUTE.value: + msg = coreapi.CoreExecMessage(msgflags, msghdr, msgdata) + result = msg.get_tlv(ExecuteTlvs.RESULT.value) + break + + return result - # If we get the right response return the results - if msgtype == coreapi.CORE_API_EXEC_MSG: - msg = coreapi.CoreExecMessage(msgflags, msghdr, msgdata) - return msg.gettlv(coreapi.CORE_TLV_EXEC_RESULT) - else: - return None def main(): usagestr = "usage: %prog [-n] number of nodes [-d] daemon address" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 5, daemon = '127.0.0.1:'+str(coreapi.CORE_API_PORT)) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5, daemon="127.0.0.1:" + str(CORE_API_PORT)) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") - parser.add_option("-d", "--daemon-server", dest = "daemon", type = str, - help = "daemon server IP address") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-d", "--daemon-server", dest="daemon", type=str, + help="daemon server IP address") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -85,94 +100,95 @@ def main(): usage("daemon server IP address (-d) is a required argument") for a in args: - sys.stderr.write("ignoring command line argument: '%s'\n" % a) + sys.stderr.write("ignoring command line argument: %s\n" % a) start = datetime.datetime.now() - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") - session = pycore.Session(persistent=True) - if 'server' in globals(): + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1, persistent=True) + if "server" in globals(): server.addsession(session) # distributed setup - connect to daemon server - daemonport = options.daemon.split(':') + daemonport = options.daemon.split(":") daemonip = daemonport[0] # Localhost is already set in the session but we change it to be the remote daemon # This stops the remote daemon trying to build a tunnel back which would fail - daemon = 'localhost' + daemon = "localhost" if len(daemonport) > 1: port = int(daemonport[1]) else: - port = coreapi.CORE_API_PORT + port = CORE_API_PORT print "connecting to daemon at %s:%d" % (daemon, port) session.broker.addserver(daemon, daemonip, port) # Set the local session id to match the port. # Not necessary but seems neater. - session.sessionid = session.broker.getserver('localhost')[2].getsockname()[1] + # session.sessionid = session.broker.getserver("localhost")[2].getsockname()[1] session.broker.setupserver(daemon) # We do not want the recvloop running as we will deal ourselves session.broker.dorecvloop = False # Change to configuration state on both machines - session.setstate(coreapi.CORE_EVENT_CONFIGURATION_STATE) - tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - coreapi.CORE_EVENT_CONFIGURATION_STATE) + session.set_state(EventTypes.CONFIGURATION_STATE.value) + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value) session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata)) - flags = coreapi.CORE_API_ADD_FLAG - switch = pycore.nodes.SwitchNode(session = session, name='switch', start=False) - switch.setposition(x=80,y=50) + flags = MessageFlags.ADD.value + switch = nodes.SwitchNode(session=session, name="switch", start=False) + switch.setposition(x=80, y=50) switch.server = daemon - session.broker.handlerawmsg(switch.tonodemsg(flags=flags)) + switch_data = switch.data(flags) + switch_message = dataconversion.convert_node(switch_data) + session.broker.handlerawmsg(switch_message) - numberOfNodes = options.numnodes + number_of_nodes = options.numnodes - print "creating %d remote nodes with addresses from %s" % \ - (options.numnodes, prefix) + print "creating %d remote nodes with addresses from %s" % (options.numnodes, prefix) # create remote nodes via API - for i in xrange(1, numberOfNodes + 1): - tmp = pycore.nodes.CoreNode(session = session, objid = i, - name = "n%d" % i, start=False) - tmp.setposition(x=150*i,y=150) - tmp.server = daemon - session.broker.handlerawmsg(tmp.tonodemsg(flags=flags)) - n.append(tmp) + for i in xrange(1, number_of_nodes + 1): + node = nodes.CoreNode(session=session, objid=i, name="n%d" % i, start=False) + node.setposition(x=150 * i, y=150) + node.server = daemon + node_data = node.data(flags) + node_message = dataconversion.convert_node(node_data) + session.broker.handlerawmsg(node_message) + n.append(node) # create remote links via API - for i in xrange(1, numberOfNodes + 1): - tlvdata = coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - switch.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, i) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE, - coreapi.CORE_LINK_WIRED) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, 0) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2IP4, - prefix.addr(i)) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2IP4MASK, - prefix.prefixlen) + for i in xrange(1, number_of_nodes + 1): + tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.objid) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen) msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) session.broker.handlerawmsg(msg) # We change the daemon to Instantiation state # We do not change the local session as it would try and build a tunnel and fail - tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - coreapi.CORE_EVENT_INSTANTIATION_STATE) + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value) msg = coreapi.CoreEventMessage.pack(0, tlvdata) session.broker.handlerawmsg(msg) # Get the ip or last node and ping it from the first - print 'Pinging from the first to the last node' - pingip = cmd(n[-1], 'ip -4 -o addr show dev eth0').split()[3].split('/')[0] - print cmd(n[1], 'ping -c 5 ' + pingip) + print "Pinging from the first to the last node" + pingip = cmd(n[-1], "ip -4 -o addr show dev eth0").split()[3].split("/")[0] + print cmd(n[1], "ping -c 5 " + pingip) print "elapsed time: %s" % (datetime.datetime.now() - start) - print "To stop this session, use the 'core-cleanup' script on the remote daemon server." + print "To stop this session, use the core-cleanup script on the remote daemon server." + raw_input("press enter to exit") + if __name__ == "__main__" or __name__ == "__builtin__": - main() + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + main() diff --git a/daemon/examples/netns/distributed.py b/daemon/examples/netns/distributed.py index 5badf053..62caeb92 100755 --- a/daemon/examples/netns/distributed.py +++ b/daemon/examples/netns/distributed.py @@ -9,33 +9,32 @@ # running the daemon with listenaddr=0.0.0.0 in the core.conf file. # -import sys, datetime, optparse, time +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr -from core.constants import * -from core.api import coreapi - -# declare classes for use with Broker -coreapi.add_node_class("CORE_NODE_DEF", - coreapi.CORE_NODE_DEF, pycore.nodes.CoreNode) -coreapi.add_node_class("CORE_NODE_SWITCH", - coreapi.CORE_NODE_SWITCH, pycore.nodes.SwitchNode) +from core import constants +from core.api import coreapi, dataconversion +from core.enumerations import CORE_API_PORT, EventTypes, EventTlvs, LinkTlvs, LinkTypes, MessageFlags +from core.misc import ipaddress, nodeutils, nodemaps +from core.netns import nodes +from core.session import Session # node list (count from 1) n = [None] + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 5, slave = None) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5, slave=None) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") - parser.add_option("-s", "--slave-server", dest = "slave", type = str, - help = "slave server IP address") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-s", "--slave-server", dest="slave", type=str, + help="slave server IP address") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -55,8 +54,8 @@ def main(): start = datetime.datetime.now() - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") - session = pycore.Session(persistent=True) + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1, persistent=True) if 'server' in globals(): server.addsession(session) @@ -66,59 +65,53 @@ def main(): if len(slaveport) > 1: port = int(slaveport[1]) else: - port = coreapi.CORE_API_PORT + port = CORE_API_PORT print "connecting to slave at %s:%d" % (slave, port) session.broker.addserver(slave, slave, port) session.broker.setupserver(slave) - session.setstate(coreapi.CORE_EVENT_CONFIGURATION_STATE) - tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - coreapi.CORE_EVENT_CONFIGURATION_STATE) + session.set_state(EventTypes.CONFIGURATION_STATE.value) + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value) session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata)) - switch = session.addobj(cls = pycore.nodes.SwitchNode, name = "switch") - switch.setposition(x=80,y=50) + switch = session.add_object(cls=nodes.SwitchNode, name="switch") + switch.setposition(x=80, y=50) num_local = options.numnodes / 2 - num_remote = options.numnodes / 2 + options.numnodes % 2 + num_remote = options.numnodes / 2 + options.numnodes % 2 print "creating %d (%d local / %d remote) nodes with addresses from %s" % \ (options.numnodes, num_local, num_remote, prefix) for i in xrange(1, num_local + 1): - tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i, - objid=i) - tmp.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) - tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) - tmp.setposition(x=150*i,y=150) - n.append(tmp) + node = session.add_object(cls=nodes.CoreNode, name="n%d" % i, objid=i) + node.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) + node.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + node.setposition(x=150 * i, y=150) + n.append(node) - flags = coreapi.CORE_API_ADD_FLAG + flags = MessageFlags.ADD.value session.broker.handlerawmsg(switch.tonodemsg(flags=flags)) # create remote nodes via API for i in xrange(num_local + 1, options.numnodes + 1): - tmp = pycore.nodes.CoreNode(session = session, objid = i, - name = "n%d" % i, start=False) - tmp.setposition(x=150*i,y=150) - tmp.server = slave - n.append(tmp) - session.broker.handlerawmsg(tmp.tonodemsg(flags=flags)) + node = nodes.CoreNode(session=session, objid=i, name="n%d" % i, start=False) + node.setposition(x=150 * i, y=150) + node.server = slave + n.append(node) + node_data = node.data(flags) + node_message = dataconversion.convert_node(node_data) + session.broker.handlerawmsg(node_message) # create remote links via API for i in xrange(num_local + 1, options.numnodes + 1): - tlvdata = coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N1NUMBER, - switch.objid) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_N2NUMBER, i) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_TYPE, - coreapi.CORE_LINK_WIRED) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2NUM, 0) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2IP4, - prefix.addr(i)) - tlvdata += coreapi.CoreLinkTlv.pack(coreapi.CORE_TLV_LINK_IF2IP4MASK, - prefix.prefixlen) + tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.objid) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen) msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) session.broker.handlerawmsg(msg) session.instantiate() - tlvdata = coreapi.CoreEventTlv.pack(coreapi.CORE_TLV_EVENT_TYPE, - coreapi.CORE_EVENT_INSTANTIATION_STATE) + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value) msg = coreapi.CoreEventMessage.pack(0, tlvdata) session.broker.handlerawmsg(msg) @@ -132,6 +125,10 @@ def main(): print "To stop this session, use the 'core-cleanup' script on this server" print "and on the remote slave server." -if __name__ == "__main__" or __name__ == "__builtin__": - main() +if __name__ == "__main__" or __name__ == "__builtin__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + + main() diff --git a/daemon/examples/netns/emane80211.py b/daemon/examples/netns/emane80211.py index 1a494721..857b577d 100755 --- a/daemon/examples/netns/emane80211.py +++ b/daemon/examples/netns/emane80211.py @@ -6,25 +6,31 @@ # Example CORE Python script that attaches N nodes to an EMANE 802.11abg # network. One of the parameters is changed, the pathloss mode. -import sys, datetime, optparse +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr -from core.constants import * +from core import constants from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emane.nodes import EmaneNode +from core.misc import ipaddress, nodeutils, nodemaps +from core.netns import nodes # node list (count from 1) +from core.session import Session + n = [None] + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 5) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -43,44 +49,44 @@ def main(): start = datetime.datetime.now() # IP subnet - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") # session with some EMANE initialization cfg = {'verbose': 'false'} - session = pycore.Session(cfg = cfg, persistent = True) + session = Session(1, config=cfg, persistent=True) session.master = True - session.location.setrefgeo(47.57917,-122.13232,2.00000) + session.location.setrefgeo(47.57917, -122.13232, 2.00000) session.location.refscale = 150.0 - session.cfg['emane_models'] = "RfPipe, Ieee80211abg, Bypass" + session.config['emane_models'] = "RfPipe, Ieee80211abg, Bypass" session.emane.loadmodels() if 'server' in globals(): server.addsession(session) # EMANE WLAN print "creating EMANE WLAN wlan1" - wlan = session.addobj(cls = pycore.nodes.EmaneNode, name = "wlan1") - wlan.setposition(x=80,y=50) + wlan = session.add_object(cls=EmaneNode, name="wlan1") + wlan.setposition(x=80, y=50) names = EmaneIeee80211abgModel.getnames() values = list(EmaneIeee80211abgModel.getdefaultvalues()) # TODO: change any of the EMANE 802.11 parameter values here for i in range(0, len(names)): print "EMANE 80211 \"%s\" = \"%s\"" % (names[i], values[i]) try: - values[ names.index('pathlossmode') ] = '2ray' + values[names.index('pathlossmode')] = '2ray' except ValueError: - values[ names.index('propagationmodel') ] = '2ray' - - session.emane.setconfig(wlan.objid, EmaneIeee80211abgModel._name, values) + values[names.index('propagationmodel')] = '2ray' + + session.emane.setconfig(wlan.objid, EmaneIeee80211abgModel.name, values) services_str = "zebra|OSPFv3MDR|IPForward" print "creating %d nodes with addresses from %s" % \ (options.numnodes, prefix) for i in xrange(1, options.numnodes + 1): - tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i, - objid=i) + tmp = session.add_object(cls=nodes.CoreNode, name="n%d" % i, + objid=i) tmp.newnetif(wlan, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) - tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) - tmp.setposition(x=150*i,y=150) - session.services.addservicestonode(tmp, "", services_str, verbose=False) + tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + tmp.setposition(x=150 * i, y=150) + session.services.addservicestonode(tmp, "", services_str) n.append(tmp) # this starts EMANE, etc. @@ -92,6 +98,10 @@ def main(): print "elapsed time: %s" % (datetime.datetime.now() - start) -if __name__ == "__main__" or __name__ == "__builtin__": - main() +if __name__ == "__main__" or __name__ == "__builtin__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + + main() diff --git a/daemon/examples/netns/howmanynodes.py b/daemon/examples/netns/howmanynodes.py index ab3f94cc..90f8b727 100755 --- a/daemon/examples/netns/howmanynodes.py +++ b/daemon/examples/netns/howmanynodes.py @@ -5,49 +5,48 @@ # # author: Jeff Ahrenholz # -''' + +""" howmanynodes.py - This is a CORE script that creates network namespace nodes having one virtual Ethernet interface connected to a bridge. It continues to add nodes until an exception occurs. The number of nodes per bridge can be -specified. -''' +specified. +""" -import optparse, sys, os, datetime, time, shutil -try: - from core import pycore -except ImportError: - # hack for Fedora autoconf that uses the following pythondir: - if "/usr/lib/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.6/site-packages") - if "/usr/lib64/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.6/site-packages") - if "/usr/lib/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.7/site-packages") - if "/usr/lib64/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.7/site-packages") - from core import pycore -from core.misc import ipaddr -from core.constants import * +import datetime +import optparse +import shutil +import sys +import time + +from core import constants +from core.misc import ipaddress, nodeutils, nodemaps +from core.netns import nodes +from core.session import Session GBD = 1024.0 * 1024.0 + def linuxversion(): - ''' Return a string having the Linux kernel version. - ''' - f = open('/proc/version', 'r') + """ Return a string having the Linux kernel version. + """ + f = open("/proc/version", "r") v = f.readline().split() - version_str = ' '.join(v[:3]) + version_str = " ".join(v[:3]) f.close() return version_str -MEMKEYS = ('total', 'free', 'buff', 'cached', 'stotal', 'sfree') + +MEMKEYS = ("total", "free", "buff", "cached", "stotal", "sfree") + + def memfree(): - ''' Returns kilobytes memory [total, free, buff, cached, stotal, sfree]. + """ Returns kilobytes memory [total, free, buff, cached, stotal, sfree]. useful stats are: free memory = free + buff + cached swap used = stotal - sfree - ''' - f = open('/proc/meminfo', 'r') + """ + f = open("/proc/meminfo", "r") lines = f.readlines() f.close() kbs = {} @@ -55,20 +54,21 @@ def memfree(): kbs[k] = 0 for l in lines: if l[:9] == "MemTotal:": - kbs['total'] = int(l.split()[1]) + kbs["total"] = int(l.split()[1]) elif l[:8] == "MemFree:": - kbs['free'] = int(l.split()[1]) + kbs["free"] = int(l.split()[1]) elif l[:8] == "Buffers:": - kbs['buff'] = int(l.split()[1]) + kbs["buff"] = int(l.split()[1]) elif l[:8] == "Cached:": - kbs['cache'] = int(l.split()[1]) + kbs["cache"] = int(l.split()[1]) elif l[:10] == "SwapTotal:": - kbs['stotal'] = int(l.split()[1]) + kbs["stotal"] = int(l.split()[1]) elif l[:9] == "SwapFree:": - kbs['sfree'] = int(l.split()[1]) + kbs["sfree"] = int(l.split()[1]) break return kbs + # node list (count from 1) nodelist = [None] switchlist = [] @@ -76,30 +76,30 @@ switchlist = [] def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(waittime = 0.2, numnodes = 0, bridges = 0, retries = 0, - logfile = None, services = None) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(waittime=0.2, numnodes=0, bridges=0, retries=0, + logfile=None, services=None) - parser.add_option("-w", "--waittime", dest = "waittime", type = float, - help = "number of seconds to wait between node creation" \ - " (default = %s)" % parser.defaults["waittime"]) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes (default = unlimited)") - parser.add_option("-b", "--bridges", dest = "bridges", type = int, - help = "number of nodes per bridge; 0 = one bridge " \ - "(def. = %s)" % parser.defaults["bridges"]) - parser.add_option("-r", "--retry", dest = "retries", type = int, - help = "number of retries on error (default = %s)" % \ - parser.defaults["retries"]) - parser.add_option("-l", "--log", dest = "logfile", type = str, - help = "log memory usage to this file (default = %s)" % \ - parser.defaults["logfile"]) - parser.add_option("-s", "--services", dest = "services", type = str, - help = "pipe-delimited list of services added to each " \ - "node (default = %s)\n(Example: 'zebra|OSPFv2|OSPFv3|" \ - "IPForward')" % parser.defaults["services"]) + parser.add_option("-w", "--waittime", dest="waittime", type=float, + help="number of seconds to wait between node creation" \ + " (default = %s)" % parser.defaults["waittime"]) + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes (default = unlimited)") + parser.add_option("-b", "--bridges", dest="bridges", type=int, + help="number of nodes per bridge; 0 = one bridge " \ + "(def. = %s)" % parser.defaults["bridges"]) + parser.add_option("-r", "--retry", dest="retries", type=int, + help="number of retries on error (default = %s)" % \ + parser.defaults["retries"]) + parser.add_option("-l", "--log", dest="logfile", type=str, + help="log memory usage to this file (default = %s)" % \ + parser.defaults["logfile"]) + parser.add_option("-s", "--services", dest="services", type=str, + help="pipe-delimited list of services added to each " + "node (default = %s)\n(Example: zebra|OSPFv2|OSPFv3|" + "IPForward)" % parser.defaults["services"]) - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -109,16 +109,16 @@ def main(): (options, args) = parser.parse_args() for a in args: - sys.stderr.write("ignoring command line argument: '%s'\n" % a) + sys.stderr.write("ignoring command line argument: %s\n" % a) start = datetime.datetime.now() - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") print "Testing how many network namespace nodes this machine can create." print " - %s" % linuxversion() mem = memfree() print " - %.02f GB total memory (%.02f GB swap)" % \ - (mem['total']/GBD, mem['stotal']/GBD) + (mem["total"] / GBD, mem["stotal"] / GBD) print " - using IPv4 network prefix %s" % prefix print " - using wait time of %s" % options.waittime print " - using %d nodes per bridge" % options.bridges @@ -132,11 +132,11 @@ def main(): lfp = open(options.logfile, "a") lfp.write("# log from howmanynodes.py %s\n" % time.ctime()) lfp.write("# options = %s\n#\n" % options) - lfp.write("# numnodes,%s\n" % ','.join(MEMKEYS)) + lfp.write("# numnodes,%s\n" % ",".join(MEMKEYS)) lfp.flush() - session = pycore.Session(persistent=True) - switch = session.addobj(cls = pycore.nodes.SwitchNode) + session = Session(1, persistent=True) + switch = session.add_object(cls=nodes.SwitchNode) switchlist.append(switch) print "Added bridge %s (%d)." % (switch.brname, len(switchlist)) @@ -147,33 +147,32 @@ def main(): # optionally add a bridge (options.bridges nodes per bridge) try: if options.bridges > 0 and switch.numnetif() >= options.bridges: - switch = session.addobj(cls = pycore.nodes.SwitchNode) + switch = session.add_object(cls=nodes.SwitchNode) switchlist.append(switch) print "\nAdded bridge %s (%d) for node %d." % \ - (switch.brname, len(switchlist), i) + (switch.brname, len(switchlist), i) except Exception, e: print "At %d bridges (%d nodes) caught exception:\n%s\n" % \ - (len(switchlist), i-1, e) + (len(switchlist), i - 1, e) break # create a node try: - n = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i) + n = session.add_object(cls=nodes.LxcNode, name="n%d" % i) n.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) - n.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + n.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) if options.services is not None: - session.services.addservicestonode(n, "", options.services, - verbose=False) + session.services.addservicestonode(n, "", options.services) n.boot() nodelist.append(n) if i % 25 == 0: print "\n%s nodes created " % i, mem = memfree() - free = mem['free'] + mem['buff'] + mem['cached'] - swap = mem['stotal'] - mem['sfree'] - print "(%.02f/%.02f GB free/swap)" % (free/GBD , swap/GBD), + free = mem["free"] + mem["buff"] + mem["cached"] + swap = mem["stotal"] - mem["sfree"] + print "(%.02f/%.02f GB free/swap)" % (free / GBD, swap / GBD), if lfp: lfp.write("%d," % i) - lfp.write("%s\n" % ','.join(str(mem[x]) for x in MEMKEYS)) + lfp.write("%s\n" % ",".join(str(mem[x]) for x in MEMKEYS)) lfp.flush() else: sys.stdout.write(".") @@ -183,7 +182,7 @@ def main(): print "At %d nodes caught exception:\n" % i, e if retry_count > 0: print "\nWill retry creating node %d." % i - shutil.rmtree(n.nodedir, ignore_errors = True) + shutil.rmtree(n.nodedir, ignore_errors=True) retry_count -= 1 i -= 1 time.sleep(options.waittime) @@ -205,5 +204,10 @@ def main(): print "elapsed time: %s" % (datetime.datetime.now() - start) print "Use the core-cleanup script to remove nodes and bridges." + if __name__ == "__main__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + main() diff --git a/daemon/examples/netns/iperf-performance-chain.py b/daemon/examples/netns/iperf-performance-chain.py index b905174d..d2ac98ba 100755 --- a/daemon/examples/netns/iperf-performance-chain.py +++ b/daemon/examples/netns/iperf-performance-chain.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python # Copyright (c)2013 the Boeing Company. # See the LICENSE file included in this distribution. @@ -14,24 +14,29 @@ # Use core-cleanup to clean up after this script as the session is left running. # -import sys, datetime, optparse +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr -from core.constants import * +from core import constants +from core.misc import ipaddress, nodeutils, nodemaps +from core.netns import nodes # node list (count from 1) +from core.session import Session + n = [None] + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 5) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -52,48 +57,51 @@ def main(): start = datetime.datetime.now() - session = pycore.Session(persistent=True) + session = Session(1, persistent=True) if 'server' in globals(): server.addsession(session) - print "creating %d nodes" % options.numnodes + print "creating %d nodes" % options.numnodes left = None prefix = None for i in xrange(1, options.numnodes + 1): - tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i, - objid=i) + tmp = session.add_object(cls=nodes.CoreNode, name="n%d" % i, objid=i) if left: tmp.newnetif(left, ["%s/%s" % (prefix.addr(2), prefix.prefixlen)]) - prefix = ipaddr.IPv4Prefix("10.83.%d.0/24" % i) # limit: i < 255 - right = session.addobj(cls = pycore.nodes.PtpNet) + # limit: i < 255 + prefix = ipaddress.Ipv4Prefix("10.83.%d.0/24" % i) + right = session.add_object(cls=nodes.PtpNet) tmp.newnetif(right, ["%s/%s" % (prefix.addr(1), prefix.prefixlen)]) - tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) - tmp.cmd([SYSCTL_BIN, "net.ipv4.conf.all.forwarding=1"]) - tmp.cmd([SYSCTL_BIN, "net.ipv4.conf.default.rp_filter=0"]) - tmp.setposition(x=100*i,y=150) + tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.conf.all.forwarding=1"]) + tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.conf.default.rp_filter=0"]) + tmp.setposition(x=100 * i, y=150) n.append(tmp) left = right - - prefixes = map(lambda(x): ipaddr.IPv4Prefix("10.83.%d.0/24" % x), + + prefixes = map(lambda (x): ipaddress.Ipv4Prefix("10.83.%d.0/24" % x), xrange(1, options.numnodes + 1)) - + # set up static routing in the chain for i in xrange(1, options.numnodes + 1): for j in xrange(1, options.numnodes + 1): if j < i - 1: - gw = prefixes[i-2].addr(1) + gw = prefixes[i - 2].addr(1) elif j > i: if i > len(prefixes) - 1: continue - gw = prefixes[i-1].addr(2) + gw = prefixes[i - 1].addr(2) else: continue - net = prefixes[j-1] - n[i].cmd([IP_BIN, "route", "add", str(net), "via", str(gw)]) - + net = prefixes[j - 1] + n[i].cmd([constants.IP_BIN, "route", "add", str(net), "via", str(gw)]) print "elapsed time: %s" % (datetime.datetime.now() - start) -if __name__ == "__main__" or __name__ == "__builtin__": - main() +if __name__ == "__main__" or __name__ == "__builtin__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + + main() diff --git a/daemon/examples/netns/ospfmanetmdrtest.py b/daemon/examples/netns/ospfmanetmdrtest.py index 303834b7..97391cc3 100755 --- a/daemon/examples/netns/ospfmanetmdrtest.py +++ b/daemon/examples/netns/ospfmanetmdrtest.py @@ -7,26 +7,22 @@ # that all neighbor states are either full or two-way, and check the routes # in zebra vs those installed in the kernel. -import os, sys, random, time, optparse, datetime +import datetime +import optparse +import os +import random +import sys +import time from string import Template -try: - from core import pycore -except ImportError: - # hack for Fedora autoconf that uses the following pythondir: - if "/usr/lib/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.6/site-packages") - if "/usr/lib64/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.6/site-packages") - if "/usr/lib/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.7/site-packages") - if "/usr/lib64/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.7/site-packages") - from core import pycore -from core.misc import ipaddr -from core.misc.utils import mutecall + from core.constants import QUAGGA_STATE_DIR +from core.misc import ipaddress, nodeutils, nodemaps +from core.misc.utils import mutecall +from core.netns import nodes # this is the /etc/core/core.conf default +from core.session import Session + quagga_sbin_search = ("/usr/local/sbin", "/usr/sbin", "/usr/lib/quagga") quagga_path = "zebra" @@ -37,12 +33,13 @@ try: quagga_path = p break mutecall([os.path.join(quagga_path, "zebra"), - "-u", "root", "-g", "root", "-v"]) + "-u", "root", "-g", "root", "-v"]) except OSError: sys.stderr.write("ERROR: running zebra failed\n") sys.exit(1) -class ManetNode(pycore.nodes.LxcNode): + +class ManetNode(nodes.LxcNode): """ An Lxc namespace node configured for Quagga OSPFv3 MANET MDR """ conftemp = Template("""\ @@ -66,19 +63,19 @@ ip forwarding confdir = "/usr/local/etc/quagga" - def __init__(self, core, ipaddr, routerid = None, - objid = None, name = None, nodedir = None): + def __init__(self, core, ipaddr, routerid=None, + objid=None, name=None, nodedir=None): if routerid is None: routerid = ipaddr.split("/")[0] self.ipaddr = ipaddr self.routerid = routerid - pycore.nodes.LxcNode.__init__(self, core, objid, name, nodedir) + nodes.LxcNode.__init__(self, core, objid, name, nodedir) self.privatedir(self.confdir) self.privatedir(QUAGGA_STATE_DIR) def qconf(self): - return self.conftemp.substitute(ipaddr = self.ipaddr, - routerid = self.routerid) + return self.conftemp.substitute(ipaddr=self.ipaddr, + routerid=self.routerid) def config(self): filename = os.path.join(self.confdir, "Quagga.conf") @@ -87,7 +84,7 @@ ip forwarding f.close() tmp = self.bootscript() if tmp: - self.nodefile(self.bootsh, tmp, mode = 0755) + self.nodefile(self.bootsh, tmp, mode=0755) def boot(self): self.config() @@ -125,14 +122,16 @@ waitfile $STATEDIR/ospf6d.vty vtysh -b """ % (QUAGGA_STATE_DIR, quagga_path, quagga_path) + class Route(object): """ Helper class for organzing routing table entries. """ - def __init__(self, prefix = None, gw = None, metric = None): - try: - self.prefix = ipaddr.IPv4Prefix(prefix) + + def __init__(self, prefix=None, gw=None, metric=None): + try: + self.prefix = ipaddress.Ipv4Prefix(prefix) except Exception, e: raise ValueError, "Invalid prefix given to Route object: %s\n%s" % \ - (prefix, e) + (prefix, e) self.gw = gw self.metric = metric @@ -156,7 +155,8 @@ class Route(object): class ManetExperiment(object): """ A class for building an MDR network and checking and logging its state. """ - def __init__(self, options, start): + + def __init__(self, options, start): """ Initialize with options and start time. """ self.session = None # node list @@ -168,8 +168,8 @@ class ManetExperiment(object): self.options = options self.start = start self.logbegin() - - def info(self, msg): + + def info(self, msg): ''' Utility method for writing output to stdout. ''' print msg sys.stdout.flush() @@ -180,7 +180,7 @@ class ManetExperiment(object): print >> sys.stderr, msg sys.stderr.flush() self.log(msg) - + def logbegin(self): """ Start logging. """ self.logfp = None @@ -188,32 +188,32 @@ class ManetExperiment(object): return self.logfp = open(self.options.logfile, "w") self.log("ospfmanetmdrtest begin: %s\n" % self.start.ctime()) - + def logend(self): """ End logging. """ if not self.logfp: return end = datetime.datetime.now() self.log("ospfmanetmdrtest end: %s (%s)\n" % \ - (end.ctime(), end - self.start)) + (end.ctime(), end - self.start)) self.logfp.flush() self.logfp.close() self.logfp = None - + def log(self, msg): """ Write to the log file, if any. """ if not self.logfp: return - print >> self.logfp, msg - - def logdata(self, nbrs, mdrs, lsdbs, krs, zrs): + print >> self.logfp, msg + + def logdata(self, nbrs, mdrs, lsdbs, krs, zrs): """ Dump experiment parameters and data to the log file. """ self.log("ospfmantetmdrtest data:") self.log("----- parameters -----") self.log("%s" % self.options) self.log("----- neighbors -----") for rtrid in sorted(nbrs.keys()): - self.log("%s: %s" % (rtrid, nbrs[rtrid])) + self.log("%s: %s" % (rtrid, nbrs[rtrid])) self.log("----- mdr levels -----") self.log(mdrs) self.log("----- link state databases -----") @@ -233,20 +233,20 @@ class ManetExperiment(object): for rt in zrs[rtrid]: msg += "%s" % rt self.log(msg) - - def topology(self, numnodes, linkprob, verbose = False): + + def topology(self, numnodes, linkprob, verbose=False): """ Build a topology consisting of the given number of ManetNodes connected to a WLAN and probabilty of links and set the session, WLAN, and node list objects. """ # IP subnet - prefix = ipaddr.IPv4Prefix("10.14.0.0/16") - self.session = pycore.Session() + prefix = ipaddress.Ipv4Prefix("10.14.0.0/16") + self.session = Session(1) # emulated network - self.net = self.session.addobj(cls = pycore.nodes.WlanNode) + self.net = self.session.add_object(cls=nodes.WlanNode) for i in xrange(1, numnodes + 1): addr = "%s/%s" % (prefix.addr(i), 32) - tmp = self.session.addobj(cls = ManetNode, ipaddr = addr, objid= "%d" % i, name = "n%d" % i) + tmp = self.session.add_object(cls=ManetNode, ipaddr=addr, objid="%d" % i, name="n%d" % i) tmp.newnetif(self.net, [addr]) self.nodes.append(tmp) # connect nodes with probability linkprob @@ -277,9 +277,9 @@ class ManetExperiment(object): if kr != zr: self.warn("kernel and zebra routes differ") if self.verbose: - msg = "kernel: " + msg = "kernel: " for r in kr: - msg += "%s " % r + msg += "%s " % r msg += "\nzebra: " for r in zr: msg += "%s " % r @@ -317,15 +317,15 @@ class ManetExperiment(object): if lsdbs[prev.routerid] != db: msg = "LSDBs of all routers are not consistent" self.warn("XXX LSDBs inconsistent for %s and %s" % \ - (n.routerid, prev.routerid)) + (n.routerid, prev.routerid)) i = 0 for entry in lsdbs[n.routerid].split("\n"): preventries = lsdbs[prev.routerid].split("\n") try: preventry = preventries[i] except IndexError: - preventry = None - if entry != preventry: + preventry = None + if entry != preventry: self.warn("%s: %s" % (n.routerid, entry)) self.warn("%s: %s" % (prev.routerid, preventry)) i += 1 @@ -343,19 +343,21 @@ class ManetExperiment(object): v = self.verbose for n in self.nodes: self.info("checking %s" % n.name) - nbrs[n.routerid] = Ospf6NeighState(n, verbose=v).run() - krs[n.routerid] = KernelRoutes(n, verbose=v).run() - zrs[n.routerid] = ZebraRoutes(n, verbose=v).run() + nbrs[n.routerid] = Ospf6NeighState(n, verbose=v).run() + krs[n.routerid] = KernelRoutes(n, verbose=v).run() + zrs[n.routerid] = ZebraRoutes(n, verbose=v).run() self.compareroutes(n, krs[n.routerid], zrs[n.routerid]) - mdrs[n.routerid] = Ospf6MdrLevel(n, verbose=v).run() - lsdbs[n.routerid] = Ospf6Database(n, verbose=v).run() + mdrs[n.routerid] = Ospf6MdrLevel(n, verbose=v).run() + lsdbs[n.routerid] = Ospf6Database(n, verbose=v).run() self.comparemdrlevels(nbrs, mdrs) self.comparelsdbs(lsdbs) - self.logdata(nbrs, mdrs, lsdbs, krs, zrs) + self.logdata(nbrs, mdrs, lsdbs, krs, zrs) + class Cmd: """ Helper class for running a command on a node and parsing the result. """ args = "" + def __init__(self, node, verbose=False): """ Initialize with a CoreNode (LxcNode) """ self.id = None @@ -363,35 +365,35 @@ class Cmd: self.out = None self.node = node self.verbose = verbose - - def info(self, msg): + + def info(self, msg): ''' Utility method for writing output to stdout.''' print msg sys.stdout.flush() def warn(self, msg): ''' Utility method for writing output to stderr. ''' - print >> sys.stderr, "XXX %s:" % self.node.routerid, msg + print >> sys.stderr, "XXX %s:" % self.node.routerid, msg sys.stderr.flush() - + def run(self): """ This is the primary method used for running this command. """ self.open() r = self.parse() self.cleanup() return r - + def open(self): """ Exceute call to node.popen(). """ self.id, self.stdin, self.out, self.err = \ - self.node.popen((self.args)) - + self.node.popen(self.args) + def parse(self): """ This method is overloaded by child classes and should return some result. """ return None - + def cleanup(self): """ Close the Popen channels.""" self.stdin.close() @@ -401,18 +403,22 @@ class Cmd: if tmp: self.warn("nonzero exit status:", tmp) + class VtyshCmd(Cmd): """ Runs a vtysh command. """ + def open(self): - args = ("vtysh", "-c", self.args) - self.id, self.stdin, self.out, self.err = self.node.popen((args)) - + args = ("vtysh", "-c", self.args) + self.id, self.stdin, self.out, self.err = self.node.popen(args) + + class Ospf6NeighState(VtyshCmd): """ Check a node for OSPFv3 neighbors in the full/two-way states. """ args = "show ipv6 ospf6 neighbor" - + def parse(self): - self.out.readline() # skip first line + # skip first line + self.out.readline() nbrlist = [] for line in self.out: field = line.split() @@ -428,13 +434,14 @@ class Ospf6NeighState(VtyshCmd): self.info(" %s has %d neighbors" % (self.node.routerid, len(nbrlist))) return nbrlist + class Ospf6MdrLevel(VtyshCmd): """ Retrieve the OSPFv3 MDR level for a node. """ args = "show ipv6 ospf6 mdrlevel" - + def parse(self): line = self.out.readline() - # TODO: handle multiple interfaces + # TODO: handle multiple interfaces field = line.split() mdrlevel = field[4] if not mdrlevel in ("MDR", "BMDR", "OTHER"): @@ -443,12 +450,13 @@ class Ospf6MdrLevel(VtyshCmd): self.info(" %s is %s" % (self.node.routerid, mdrlevel)) return mdrlevel + class Ospf6Database(VtyshCmd): """ Retrieve the OSPFv3 LSDB summary for a node. """ args = "show ipv6 ospf6 database" - + def parse(self): - db = "" + db = "" for line in self.out: field = line.split() if len(field) < 8: @@ -458,15 +466,17 @@ class Ospf6Database(VtyshCmd): db += " ".join(filtered) + "\n" return db + class ZebraRoutes(VtyshCmd): """ Return a list of Route objects for a node based on its zebra routing table. """ args = "show ip route" - + def parse(self): - for i in xrange(0,3): - self.out.readline() # skip first three lines + for i in xrange(0, 3): + # skip first three lines + self.out.readline() r = [] prefix = None for line in self.out: @@ -497,12 +507,13 @@ class ZebraRoutes(VtyshCmd): self.info(" %s has %d zebra routes" % (self.node.routerid, len(r))) return r + class KernelRoutes(Cmd): - """ Return a list of Route objects for a node based on its kernel + """ Return a list of Route objects for a node based on its kernel routing table. """ - args = ("/sbin/ip", "route", "show") - + args = ("/sbin/ip", "route", "show") + def parse(self): r = [] prefix = None @@ -521,7 +532,8 @@ class KernelRoutes(Cmd): if field[1] == "proto": # nexthop entry is on the next line continue - gw = field[2] # nexthop IP or interface + # nexthop IP or interface + gw = field[2] r.append(Route(prefix, gw, metric)) prefix = None @@ -531,25 +543,26 @@ class KernelRoutes(Cmd): self.info(" %s has %d kernel routes" % (self.node.routerid, len(r))) return r + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 10, linkprob = 0.35, delay = 20, seed = None) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=10, linkprob=0.35, delay=20, seed=None) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") - parser.add_option("-p", "--linkprob", dest = "linkprob", type = float, - help = "link probabilty") - parser.add_option("-d", "--delay", dest = "delay", type = float, - help = "wait time before checking") - parser.add_option("-s", "--seed", dest = "seed", type = int, - help = "specify integer to use for random seed") - parser.add_option("-v", "--verbose", dest = "verbose", - action = "store_true", help = "be more verbose") - parser.add_option("-l", "--logfile", dest = "logfile", type = str, - help = "log detailed output to the specified file") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-p", "--linkprob", dest="linkprob", type=float, + help="link probabilty") + parser.add_option("-d", "--delay", dest="delay", type=float, + help="wait time before checking") + parser.add_option("-s", "--seed", dest="seed", type=int, + help="specify integer to use for random seed") + parser.add_option("-v", "--verbose", dest="verbose", + action="store_true", help="be more verbose") + parser.add_option("-l", "--logfile", dest="logfile", type=str, + help="log detailed output to the specified file") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -572,11 +585,11 @@ def main(): if options.seed: random.seed(options.seed) - me = ManetExperiment(options = options, start=datetime.datetime.now()) + me = ManetExperiment(options=options, start=datetime.datetime.now()) me.info("creating topology: numnodes = %s; linkprob = %s" % \ - (options.numnodes, options.linkprob)) + (options.numnodes, options.linkprob)) me.topology(options.numnodes, options.linkprob) - + me.info("waiting %s sec" % options.delay) time.sleep(options.delay) me.info("checking neighbor state and routes") @@ -584,8 +597,13 @@ def main(): me.info("done") me.info("elapsed time: %s" % (datetime.datetime.now() - me.start)) me.logend() - + return me + if __name__ == "__main__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + me = main() diff --git a/daemon/examples/netns/switch.py b/daemon/examples/netns/switch.py index 594c37fa..30fefc52 100755 --- a/daemon/examples/netns/switch.py +++ b/daemon/examples/netns/switch.py @@ -5,24 +5,29 @@ # connect n nodes to a virtual switch/hub -import sys, datetime, optparse +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr -from core.constants import * +from core import constants +from core.misc import ipaddress, nodeutils, nodemaps +from core.netns import nodes # node list (count from 1) +from core.session import Session + n = [None] + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 5) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5) - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -41,30 +46,35 @@ def main(): start = datetime.datetime.now() # IP subnet - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") - session = pycore.Session(persistent=True) + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1, persistent=True) if 'server' in globals(): server.addsession(session) # emulated Ethernet switch - switch = session.addobj(cls = pycore.nodes.SwitchNode, name = "switch") - switch.setposition(x=80,y=50) - print "creating %d nodes with addresses from %s" % \ - (options.numnodes, prefix) + switch = session.add_object(cls=nodes.SwitchNode, name="switch") + switch.setposition(x=80, y=50) + print "creating %d nodes with addresses from %s" % (options.numnodes, prefix) for i in xrange(1, options.numnodes + 1): - tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i, - objid=i) + tmp = session.add_object(cls=nodes.CoreNode, name="n%d" % i, objid=i) tmp.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) - tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) - tmp.setposition(x=150*i,y=150) + tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + tmp.setposition(x=150 * i, y=150) n.append(tmp) session.node_count = str(options.numnodes + 1) session.instantiate() + print "elapsed time: %s" % (datetime.datetime.now() - start) + # start a shell on node 1 n[1].term("bash") - print "elapsed time: %s" % (datetime.datetime.now() - start) + raw_input("press enter to exit") + session.shutdown() + if __name__ == "__main__" or __name__ == "__builtin__": - main() + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + main() diff --git a/daemon/examples/netns/switchtest.py b/daemon/examples/netns/switchtest.py index defc5400..dcfac37c 100755 --- a/daemon/examples/netns/switchtest.py +++ b/daemon/examples/netns/switchtest.py @@ -8,11 +8,14 @@ # and repeat for minnodes <= n <= maxnodes with a step size of # nodestep -import optparse, sys, os, datetime +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr +from core.misc import ipaddress, nodeutils, nodemaps from core.misc.utils import mutecall +from core.netns import nodes +from core.session import Session try: mutecall(["iperf", "-v"]) @@ -20,48 +23,52 @@ except OSError: sys.stderr.write("ERROR: running iperf failed\n") sys.exit(1) + def test(numnodes, testsec): # node list n = [] # IP subnet - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") - session = pycore.Session() + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1) # emulated network - net = session.addobj(cls = pycore.nodes.SwitchNode) + net = session.add_object(cls=nodes.SwitchNode) for i in xrange(1, numnodes + 1): - tmp = session.addobj(cls = pycore.nodes.LxcNode, name = "n%d" % i) + tmp = session.add_object(cls=nodes.LxcNode, name="n%d" % i) tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) n.append(tmp) n[0].cmd(["iperf", "-s", "-D"]) n[-1].icmd(["iperf", "-t", str(int(testsec)), "-c", str(prefix.addr(1))]) n[0].cmd(["killall", "-9", "iperf"]) + + raw_input("press enter to exit") session.shutdown() + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) + parser = optparse.OptionParser(usage=usagestr) - parser.set_defaults(minnodes = 2) - parser.add_option("-m", "--minnodes", dest = "minnodes", type = int, - help = "min number of nodes to test; default = %s" % - parser.defaults["minnodes"]) + parser.set_defaults(minnodes=2) + parser.add_option("-m", "--minnodes", dest="minnodes", type=int, + help="min number of nodes to test; default = %s" % + parser.defaults["minnodes"]) - parser.set_defaults(maxnodes = 2) - parser.add_option("-n", "--maxnodes", dest = "maxnodes", type = int, - help = "max number of nodes to test; default = %s" % - parser.defaults["maxnodes"]) + parser.set_defaults(maxnodes=2) + parser.add_option("-n", "--maxnodes", dest="maxnodes", type=int, + help="max number of nodes to test; default = %s" % + parser.defaults["maxnodes"]) - parser.set_defaults(testsec = 10) - parser.add_option("-t", "--testsec", dest = "testsec", type = int, - help = "test time in seconds; default = %s" % - parser.defaults["testsec"]) + parser.set_defaults(testsec=10) + parser.add_option("-t", "--testsec", dest="testsec", type=int, + help="test time in seconds; default = %s" % + parser.defaults["testsec"]) - parser.set_defaults(nodestep = 1) - parser.add_option("-s", "--nodestep", dest = "nodestep", type = int, - help = "number of nodes step size; default = %s" % - parser.defaults["nodestep"]) + parser.set_defaults(nodestep=1) + parser.add_option("-s", "--nodestep", dest="nodestep", type=int, + help="number of nodes step size; default = %s" % + parser.defaults["nodestep"]) - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -90,8 +97,12 @@ def main(): test(i, options.testsec) print >> sys.stderr, "" - print >> sys.stderr, \ - "elapsed time: %s" % (datetime.datetime.now() - start) + print >> sys.stderr, "elapsed time: %s" % (datetime.datetime.now() - start) + if __name__ == "__main__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + main() diff --git a/daemon/examples/netns/wlanemanetests.py b/daemon/examples/netns/wlanemanetests.py index 7414114d..3db769ad 100755 --- a/daemon/examples/netns/wlanemanetests.py +++ b/daemon/examples/netns/wlanemanetests.py @@ -5,14 +5,14 @@ # # author: Jeff Ahrenholz # -''' +""" wlanemanetests.py - This script tests the performance of the WLAN device in CORE by measuring various metrics: - delay experienced when pinging end-to-end - maximum TCP throughput achieved using iperf end-to-end - the CPU used and loss experienced when running an MGEN flow of UDP traffic -All MANET nodes are arranged in a row, so that any given node can only +All MANET nodes are arranged in a row, so that any given node can only communicate with the node to its right or to its left. Performance is measured using traffic that travels across each hop in the network. Static /32 routing is used instead of any dynamic routing protocol. @@ -28,29 +28,22 @@ Various underlying network types are tested: Results are printed/logged in CSV format. -''' +""" -import os, sys, time, optparse, datetime, math -from string import Template -try: - from core import pycore -except ImportError: - # hack for Fedora autoconf that uses the following pythondir: - if "/usr/lib/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.6/site-packages") - if "/usr/lib64/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.6/site-packages") - if "/usr/lib/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.7/site-packages") - if "/usr/lib64/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.7/site-packages") - from core import pycore -from core.misc import ipaddr -from core.misc.utils import mutecall -from core.constants import QUAGGA_STATE_DIR -from core.emane.emane import Emane +import datetime +import math +import optparse +import os +import sys +import time + +from core import emane from core.emane.bypass import EmaneBypassModel +from core.emane.nodes import EmaneNode from core.emane.rfpipe import EmaneRfPipeModel +from core.misc import ipaddress, nodemaps, nodeutils +from core.netns import nodes +from core.session import Session try: import emaneeventservice @@ -60,12 +53,12 @@ except Exception, e: from emanesh.events import EventService from emanesh.events import PathlossEvent except Exception, e2: - raise ImportError, "failed to import EMANE Python bindings:\n%s\n%s" % \ - (e, e2) + raise ImportError("failed to import EMANE Python bindings:\n%s\n%s" % (e, e2)) -# global Experiment object (for interaction with 'python -i') +# global Experiment object (for interaction with "python -i") exp = None + # move these to core.misc.utils def readstat(): f = open("/proc/stat", "r") @@ -73,6 +66,7 @@ def readstat(): f.close() return lines + def numcpus(): lines = readstat() n = 0 @@ -82,14 +76,16 @@ def numcpus(): n += 1 return n + def getcputimes(line): # return (user, nice, sys, idle) from a /proc/stat cpu line # assume columns are: # cpu# user nice sys idle iowait irq softirq steal guest (man 5 proc) items = line.split() - (user, nice, sys, idle) = map(lambda(x): int(x), items[1:5]) + (user, nice, sys, idle) = map(lambda (x): int(x), items[1:5]) return [user, nice, sys, idle] + def calculatecpu(timesa, timesb): for i in range(len(timesa)): timesb[i] -= timesa[i] @@ -100,50 +96,51 @@ def calculatecpu(timesa, timesb): # subtract % time spent in idle time return 100 - ((100.0 * timesb[-1]) / total) + # end move these to core.misc.utils class Cmd(object): - ''' Helper class for running a command on a node and parsing the result. ''' + """ Helper class for running a command on a node and parsing the result. """ args = "" + def __init__(self, node, verbose=False): - ''' Initialize with a CoreNode (LxcNode) ''' + """ Initialize with a CoreNode (LxcNode) """ self.id = None self.stdin = None self.out = None self.node = node self.verbose = verbose - - def info(self, msg): - ''' Utility method for writing output to stdout.''' + + def info(self, msg): + """ Utility method for writing output to stdout.""" print msg sys.stdout.flush() def warn(self, msg): - ''' Utility method for writing output to stderr. ''' - print >> sys.stderr, "XXX %s:" % self.node.name, msg + """ Utility method for writing output to stderr. """ + print >> sys.stderr, "XXX %s:" % self.node.name, msg sys.stderr.flush() - + def run(self): - ''' This is the primary method used for running this command. ''' + """ This is the primary method used for running this command. """ self.open() status = self.id.wait() r = self.parse() self.cleanup() return r - + def open(self): - ''' Exceute call to node.popen(). ''' - self.id, self.stdin, self.out, self.err = \ - self.node.popen((self.args)) - + """ Exceute call to node.popen(). """ + self.id, self.stdin, self.out, self.err = self.node.popen(self.args) + def parse(self): - ''' This method is overloaded by child classes and should return some + """ This method is overloaded by child classes and should return some result. - ''' + """ return None - + def cleanup(self): - ''' Close the Popen channels.''' + """ Close the Popen channels.""" self.stdin.close() self.out.close() self.err.close() @@ -153,70 +150,71 @@ class Cmd(object): class ClientServerCmd(Cmd): - ''' Helper class for running a command on a node and parsing the result. ''' + """ Helper class for running a command on a node and parsing the result. """ args = "" client_args = "" + def __init__(self, node, client_node, verbose=False): - ''' Initialize with two CoreNodes, node is the server ''' + """ Initialize with two CoreNodes, node is the server """ Cmd.__init__(self, node, verbose) self.client_node = client_node - + def run(self): - ''' Run the server command, then the client command, then - kill the server ''' - self.open() # server - self.client_open() # client + """ Run the server command, then the client command, then + kill the server """ + self.open() # server + self.client_open() # client status = self.client_id.wait() - self.node.cmdresult(['killall', self.args[0]]) # stop the server + # stop the server + self.node.cmdresult(["killall", self.args[0]]) r = self.parse() self.cleanup() return r - + def client_open(self): - ''' Exceute call to client_node.popen(). ''' + """ Exceute call to client_node.popen(). """ self.client_id, self.client_stdin, self.client_out, self.client_err = \ - self.client_node.popen((self.client_args)) - + self.client_node.popen(self.client_args) + def parse(self): - ''' This method is overloaded by child classes and should return some + """ This method is overloaded by child classes and should return some result. - ''' + """ return None - + def cleanup(self): - ''' Close the Popen channels.''' + """ Close the Popen channels.""" self.stdin.close() self.out.close() self.err.close() tmp = self.id.wait() if tmp: self.warn("nonzero exit status: %s" % tmp) - self.warn("command was: %s" % ((self.args, ))) + self.warn("command was: %s" % (self.args,)) class PingCmd(Cmd): - ''' Test latency using ping. - ''' + """ Test latency using ping. + """ + def __init__(self, node, verbose=False, addr=None, count=50, interval=0.1, ): Cmd.__init__(self, node, verbose) self.addr = addr self.count = count self.interval = interval - self.args = ['ping', '-q', '-c', '%s' % count, '-i', '%s' % interval, - addr] + self.args = ["ping", "-q", "-c", "%s" % count, "-i", "%s" % interval, addr] def run(self): if self.verbose: self.info("%s initial test ping (max 1 second)..." % self.node.name) - (status, result) = self.node.cmdresult(["ping", "-q", "-c", "1", "-w", - "1", self.addr]) + (status, result) = self.node.cmdresult(["ping", "-q", "-c", "1", "-w", "1", self.addr]) if status != 0: - self.warn("initial ping from %s to %s failed! result:\n%s" % \ + self.warn("initial ping from %s to %s failed! result:\n%s" % (self.node.name, self.addr, result)) - return (0.0, 0.0) + return 0.0, 0.0 if self.verbose: - self.info("%s pinging %s (%d seconds)..." % \ - (self.node.name, self.addr, self.count * self.interval)) + self.info("%s pinging %s (%d seconds)..." % + (self.node.name, self.addr, self.count * self.interval)) return Cmd.run(self) def parse(self): @@ -224,17 +222,19 @@ class PingCmd(Cmd): avg_latency = 0 mdev = 0 try: - stats_str = lines[-1].split('=')[1] - stats = stats_str.split('/') + stats_str = lines[-1].split("=")[1] + stats = stats_str.split("/") avg_latency = float(stats[1]) - mdev = float(stats[3].split(' ')[0]) + mdev = float(stats[3].split(" ")[0]) except Exception, e: self.warn("ping parsing exception: %s" % e) - return (avg_latency, mdev) + return avg_latency, mdev + class IperfCmd(ClientServerCmd): - ''' Test throughput using iperf. - ''' + """ Test throughput using iperf. + """ + def __init__(self, node, client_node, verbose=False, addr=None, time=10): # node is the server ClientServerCmd.__init__(self, node, client_node, verbose) @@ -248,41 +248,43 @@ class IperfCmd(ClientServerCmd): if self.verbose: self.info("Launching the iperf server on %s..." % self.node.name) self.info("Running the iperf client on %s (%s seconds)..." % \ - (self.client_node.name, self.time)) + (self.client_node.name, self.time)) return ClientServerCmd.run(self) def parse(self): lines = self.out.readlines() try: - bps = int(lines[-1].split(',')[-1].strip('\n')) + bps = int(lines[-1].split(",")[-1].strip("\n")) except Exception, e: self.warn("iperf parsing exception: %s" % e) bps = 0 return bps - + + class MgenCmd(ClientServerCmd): - ''' Run a test traffic flow using an MGEN sender and receiver. - ''' + """ Run a test traffic flow using an MGEN sender and receiver. + """ + def __init__(self, node, client_node, verbose=False, addr=None, time=10, rate=512): ClientServerCmd.__init__(self, node, client_node, verbose) self.addr = addr self.time = time - self.args = ['mgen', 'event', 'listen udp 5000', 'output', - '/var/log/mgen.log'] + self.args = ["mgen", "event", "listen udp 5000", "output", + "/var/log/mgen.log"] self.rate = rate sendevent = "ON 1 UDP DST %s/5000 PERIODIC [%s]" % \ - (addr, self.mgenrate(self.rate)) + (addr, self.mgenrate(self.rate)) stopevent = "%s OFF 1" % time - self.client_args = ['mgen', 'event', sendevent, 'event', stopevent, - 'output', '/var/log/mgen.log'] + self.client_args = ["mgen", "event", sendevent, "event", stopevent, + "output", "/var/log/mgen.log"] @staticmethod def mgenrate(kbps): - ''' Return a MGEN periodic rate string for the given kilobits-per-sec. + """ Return a MGEN periodic rate string for the given kilobits-per-sec. Assume 1500 byte MTU, 20-byte IP + 8-byte UDP headers, leaving 1472 bytes for data. - ''' + """ bps = (kbps / 8) * 1000.0 maxdata = 1472 pps = math.ceil(bps / maxdata) @@ -292,30 +294,31 @@ class MgenCmd(ClientServerCmd): if self.verbose: self.info("Launching the MGEN receiver on %s..." % self.node.name) self.info("Running the MGEN sender on %s (%s seconds)..." % \ - (self.client_node.name, self.time)) + (self.client_node.name, self.time)) return ClientServerCmd.run(self) - + def cleanup(self): - ''' Close the Popen channels.''' + """ Close the Popen channels.""" self.stdin.close() self.out.close() self.err.close() - tmp = self.id.wait() # non-zero mgen exit status OK - + # non-zero mgen exit status OK + tmp = self.id.wait() + def parse(self): - ''' Check MGEN receiver's log file for packet sequence numbers, and + """ Check MGEN receiver"s log file for packet sequence numbers, and return the percentage of lost packets. - ''' - logfile = os.path.join(self.node.nodedir, 'var.log/mgen.log') - f = open(logfile, 'r') + """ + logfile = os.path.join(self.node.nodedir, "var.log/mgen.log") + f = open(logfile, "r") numlost = 0 lastseq = 0 for line in f.readlines(): fields = line.split() - if fields[1] != 'RECV': + if fields[1] != "RECV": continue try: - seq = int(fields[4].split('>')[1]) + seq = int(fields[4].split(">")[1]) except: self.info("Unexpected MGEN line:\n%s" % fields) if seq > (lastseq + 1): @@ -333,10 +336,11 @@ class MgenCmd(ClientServerCmd): class Experiment(object): - ''' Experiment object to organize tests. - ''' - def __init__(self, opt, start): - ''' Initialize with opt and start time. ''' + """ Experiment object to organize tests. + """ + + def __init__(self, opt, start): + """ Initialize with opt and start time. """ self.session = None # node list self.nodes = [] @@ -350,21 +354,21 @@ class Experiment(object): self.numiperf = opt.numiperf self.nummgen = opt.nummgen self.logbegin() - - def info(self, msg): - ''' Utility method for writing output to stdout. ''' + + def info(self, msg): + """ Utility method for writing output to stdout. """ print msg sys.stdout.flush() self.log(msg) def warn(self, msg): - ''' Utility method for writing output to stderr. ''' + """ Utility method for writing output to stderr. """ print >> sys.stderr, msg sys.stderr.flush() self.log(msg) - + def logbegin(self): - ''' Start logging. ''' + """ Start logging. """ self.logfp = None if not self.opt.logfile: return @@ -373,117 +377,110 @@ class Experiment(object): self.log("%s args: %s\n" % (sys.argv[0], sys.argv[1:])) (sysname, rel, ver, machine, nodename) = os.uname() self.log("%s %s %s %s on %s" % (sysname, rel, ver, machine, nodename)) - + def logend(self): - ''' End logging. ''' + """ End logging. """ if not self.logfp: return end = datetime.datetime.now() self.log("%s end: %s (%s)\n" % \ - (sys.argv[0], end.ctime(), end - self.start)) + (sys.argv[0], end.ctime(), end - self.start)) self.logfp.flush() self.logfp.close() self.logfp = None - + def log(self, msg): - ''' Write to the log file, if any. ''' + """ Write to the log file, if any. """ if not self.logfp: return - print >> self.logfp, msg + print >> self.logfp, msg def reset(self): - ''' Prepare for another experiment run. - ''' + """ Prepare for another experiment run. + """ if self.session: self.session.shutdown() del self.session self.session = None self.nodes = [] self.net = None - - def createbridgedsession(self, numnodes, verbose = False): - ''' Build a topology consisting of the given number of LxcNodes + + def createbridgedsession(self, numnodes, verbose=False): + """ Build a topology consisting of the given number of LxcNodes connected to a WLAN. - ''' + """ # IP subnet - prefix = ipaddr.IPv4Prefix("10.0.0.0/16") - self.session = pycore.Session() + prefix = ipaddress.Ipv4Prefix("10.0.0.0/16") + self.session = Session(1) # emulated network - self.net = self.session.addobj(cls = pycore.nodes.WlanNode, - name = "wlan1") + self.net = self.session.add_object(cls=nodes.WlanNode, name="wlan1") prev = None for i in xrange(1, numnodes + 1): addr = "%s/%s" % (prefix.addr(i), 32) - tmp = self.session.addobj(cls = pycore.nodes.CoreNode, objid = i, - name = "n%d" % i) + tmp = self.session.add_object(cls=nodes.CoreNode, objid=i, name="n%d" % i) tmp.newnetif(self.net, [addr]) self.nodes.append(tmp) - self.session.services.addservicestonode(tmp, "router", - "IPForward", self.verbose) + self.session.services.addservicestonode(tmp, "router", "IPForward") self.session.services.bootnodeservices(tmp) self.staticroutes(i, prefix, numnodes) - + # link each node in a chain, with the previous node if prev: self.net.link(prev.netif(0), tmp.netif(0)) prev = tmp - - def createemanesession(self, numnodes, verbose = False, cls = None, - values = None): - ''' Build a topology consisting of the given number of LxcNodes + + def createemanesession(self, numnodes, verbose=False, cls=None, + values=None): + """ Build a topology consisting of the given number of LxcNodes connected to an EMANE WLAN. - ''' - prefix = ipaddr.IPv4Prefix("10.0.0.0/16") - self.session = pycore.Session() + """ + prefix = ipaddress.Ipv4Prefix("10.0.0.0/16") + self.session = Session(2) self.session.node_count = str(numnodes + 1) self.session.master = True - self.session.location.setrefgeo(47.57917,-122.13232,2.00000) + self.session.location.setrefgeo(47.57917, -122.13232, 2.00000) self.session.location.refscale = 150.0 - self.session.cfg['emane_models'] = "RfPipe, Ieee80211abg, Bypass" + self.session.config["emane_models"] = "RfPipe, Ieee80211abg, Bypass" self.session.emane.loadmodels() - self.net = self.session.addobj(cls = pycore.nodes.EmaneNode, - objid = numnodes + 1, name = "wlan1") + self.net = self.session.add_object(cls=EmaneNode, objid=numnodes + 1, name="wlan1") self.net.verbose = verbose - #self.session.emane.addobj(self.net) + # self.session.emane.addobj(self.net) for i in xrange(1, numnodes + 1): addr = "%s/%s" % (prefix.addr(i), 32) - tmp = self.session.addobj(cls = pycore.nodes.CoreNode, objid = i, - name = "n%d" % i) - #tmp.setposition(i * 20, 50, None) + tmp = self.session.add_object(cls=nodes.CoreNode, objid=i, + name="n%d" % i) + # tmp.setposition(i * 20, 50, None) tmp.setposition(50, 50, None) tmp.newnetif(self.net, [addr]) self.nodes.append(tmp) - self.session.services.addservicestonode(tmp, "router", - "IPForward", self.verbose) + self.session.services.addservicestonode(tmp, "router", "IPForward") if values is None: values = cls.getdefaultvalues() - self.session.emane.setconfig(self.net.objid, cls._name, values) + self.session.emane.setconfig(self.net.objid, cls.name, values) self.session.instantiate() self.info("waiting %s sec (TAP bring-up)" % 2) time.sleep(2) for i in xrange(1, numnodes + 1): - tmp = self.nodes[i-1] + tmp = self.nodes[i - 1] self.session.services.bootnodeservices(tmp) self.staticroutes(i, prefix, numnodes) - - + def setnodes(self): - ''' Set the sender and receiver nodes for use in this experiment, + """ Set the sender and receiver nodes for use in this experiment, along with the address of the receiver to be used. - ''' + """ self.firstnode = self.nodes[0] self.lastnode = self.nodes[-1] - self.lastaddr = self.lastnode.netif(0).addrlist[0].split('/')[0] - + self.lastaddr = self.lastnode.netif(0).addrlist[0].split("/")[0] def staticroutes(self, i, prefix, numnodes): - ''' Add static routes on node number i to the other nodes in the chain. - ''' + """ Add static routes on node number i to the other nodes in the chain. + """ routecmd = ["/sbin/ip", "route", "add"] - node = self.nodes[i-1] + node = self.nodes[i - 1] neigh_left = "" neigh_right = "" # add direct interface routes first @@ -515,17 +512,17 @@ class Experiment(object): self.warn("failed to add route: %s" % cmd) def setpathloss(self, numnodes): - ''' Send EMANE pathloss events to connect all NEMs in a chain. - ''' + """ Send EMANE pathloss events to connect all NEMs in a chain. + """ if self.session.emane.version < self.session.emane.EMANE091: service = emaneeventservice.EventService() e = emaneeventpathloss.EventPathloss(1) old = True else: if self.session.emane.version == self.session.emane.EMANE091: - dev = 'lo' + dev = "lo" else: - dev = self.session.obj('ctrlnet').brname + dev = self.session.obj("ctrlnet").brname service = EventService(eventchannel=("224.1.2.8", 45703, dev), otachannel=None) old = False @@ -538,8 +535,8 @@ class Experiment(object): if old: e.set(0, txnem, 10.0, 10.0) service.publish(emaneeventpathloss.EVENT_ID, - emaneeventservice.PLATFORMID_ANY, rxnem, - emaneeventservice.COMPONENTID_ANY, e.export()) + emaneeventservice.PLATFORMID_ANY, rxnem, + emaneeventservice.COMPONENTID_ANY, e.export()) else: e = PathlossEvent() e.append(txnem, forward=10.0, reverse=10.0) @@ -558,26 +555,26 @@ class Experiment(object): e.append(txnem, forward=10.0, reverse=10.0) service.publish(rxnem, e) - def setneteffects(self, bw = None, delay = None): - ''' Set link effects for all interfaces attached to the network node. - ''' + def setneteffects(self, bw=None, delay=None): + """ Set link effects for all interfaces attached to the network node. + """ if not self.net: self.warn("failed to set effects: no network node") return for netif in self.net.netifs(): - self.net.linkconfig(netif, bw = bw, delay = delay) + self.net.linkconfig(netif, bw=bw, delay=delay) def runalltests(self, title=""): - ''' Convenience helper to run all defined experiment tests. + """ Convenience helper to run all defined experiment tests. If tests are run multiple times, this returns the average of those runs. - ''' + """ duration = self.opt.duration rate = self.opt.rate if len(title) > 0: self.info("----- running %s tests (duration=%s, rate=%s) -----" % \ (title, duration, rate)) - (latency, mdev, throughput, cpu, loss) = (0,0,0,0,0) + (latency, mdev, throughput, cpu, loss) = (0, 0, 0, 0, 0) self.info("number of runs: ping=%d, iperf=%d, mgen=%d" % \ (self.numping, self.numiperf, self.nummgen)) @@ -591,7 +588,8 @@ class Experiment(object): throughput = self.iperftest(time=duration) if self.numiperf > 1: throughputs += throughput - time.sleep(1) # iperf is very CPU intensive + # iperf is very CPU intensive + time.sleep(1) if self.numiperf > 1: throughput = sum(throughputs) / len(throughputs) self.info("throughputs=%s" % ["%.2f" % v for v in throughputs]) @@ -610,30 +608,30 @@ class Experiment(object): self.info("cpus=%s" % ["%.2f" % v for v in cpus]) self.info("losses=%s" % ["%.2f" % v for v in losses]) - return (latency, mdev, throughput, cpu, loss) + return latency, mdev, throughput, cpu, loss def pingtest(self, count=50): - ''' Ping through a chain of nodes and report the average latency. - ''' - p = PingCmd(node=self.firstnode, verbose=self.verbose, - addr = self.lastaddr, count=count, interval=0.1).run() + """ Ping through a chain of nodes and report the average latency. + """ + p = PingCmd(node=self.firstnode, verbose=self.verbose, + addr=self.lastaddr, count=count, interval=0.1).run() (latency, mdev) = p self.info("latency (ms): %.03f, %.03f" % (latency, mdev)) return p def iperftest(self, time=10): - ''' Run iperf through a chain of nodes and report the maximum + """ Run iperf through a chain of nodes and report the maximum throughput. - ''' + """ bps = IperfCmd(node=self.lastnode, client_node=self.firstnode, verbose=False, addr=self.lastaddr, time=time).run() self.info("throughput (bps): %s" % bps) return bps def cputest(self, time=10, rate=512): - ''' Run MGEN through a chain of nodes and report the CPU usage and + """ Run MGEN through a chain of nodes and report the CPU usage and percent of lost packets. Rate is in kbps. - ''' + """ if self.verbose: self.info("%s initial test ping (max 1 second)..." % \ self.firstnode.name) @@ -642,7 +640,7 @@ class Experiment(object): if status != 0: self.warn("initial ping from %s to %s failed! result:\n%s" % \ (self.firstnode.name, self.lastaddr, result)) - return (0.0, 0.0) + return 0.0, 0.0 lines = readstat() cpustart = getcputimes(lines[0]) loss = MgenCmd(node=self.lastnode, client_node=self.firstnode, @@ -654,35 +652,36 @@ class Experiment(object): self.info("CPU usage (%%): %.02f, %.02f loss" % (percent, loss)) return percent, loss + def main(): - ''' Main routine when running from command-line. - ''' + """ Main routine when running from command-line. + """ usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(numnodes = 10, delay = 3, duration = 10, rate = 512, - verbose = False, - numping = 50, numiperf = 1, nummgen = 1) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=10, delay=3, duration=10, rate=512, + verbose=False, + numping=50, numiperf=1, nummgen=1) - parser.add_option("-d", "--delay", dest = "delay", type = float, - help = "wait time before testing") - parser.add_option("-l", "--logfile", dest = "logfile", type = str, - help = "log detailed output to the specified file") - parser.add_option("-n", "--numnodes", dest = "numnodes", type = int, - help = "number of nodes") - parser.add_option("-r", "--rate", dest = "rate", type = float, - help = "kbps rate to use for MGEN CPU tests") - parser.add_option("--numping", dest = "numping", type = int, - help = "number of ping latency test runs") - parser.add_option("--numiperf", dest = "numiperf", type = int, - help = "number of iperf throughput test runs") - parser.add_option("--nummgen", dest = "nummgen", type = int, - help = "number of MGEN CPU tests runs") - parser.add_option("-t", "--time", dest = "duration", type = int, - help = "duration in seconds of throughput and CPU tests") - parser.add_option("-v", "--verbose", dest = "verbose", - action = "store_true", help = "be more verbose") + parser.add_option("-d", "--delay", dest="delay", type=float, + help="wait time before testing") + parser.add_option("-l", "--logfile", dest="logfile", type=str, + help="log detailed output to the specified file") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-r", "--rate", dest="rate", type=float, + help="kbps rate to use for MGEN CPU tests") + parser.add_option("--numping", dest="numping", type=int, + help="number of ping latency test runs") + parser.add_option("--numiperf", dest="numiperf", type=int, + help="number of iperf throughput test runs") + parser.add_option("--nummgen", dest="nummgen", type=int, + help="number of MGEN CPU tests runs") + parser.add_option("-t", "--time", dest="duration", type=int, + help="duration in seconds of throughput and CPU tests") + parser.add_option("-v", "--verbose", dest="verbose", + action="store_true", help="be more verbose") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -700,157 +699,160 @@ def main(): usage("invalid rate: %s" % opt.rate) for a in args: - sys.stderr.write("ignoring command line argument: '%s'\n" % a) + sys.stderr.write("ignoring command line argument: %s\n" % a) - results = {} - starttime = datetime.datetime.now() - exp = Experiment(opt = opt, start=starttime) - exp.info("Starting wlanemanetests.py tests %s" % starttime.ctime()) + results = {} + starttime = datetime.datetime.now() + exp = Experiment(opt=opt, start=starttime) + exp.info("Starting wlanemanetests.py tests %s" % starttime.ctime()) - # system sanity checks here - emanever, emaneverstr = Emane.detectversionfromcmd() - if opt.verbose: - exp.info("Detected EMANE version %s" % (emaneverstr,)) + # system sanity checks here + emanever, emaneverstr = emane.VERSION, emane.VERSIONSTR + if opt.verbose: + exp.info("Detected EMANE version %s" % (emaneverstr,)) - # bridged - exp.info("setting up bridged tests 1/2 no link effects") - exp.info("creating topology: numnodes = %s" % \ - (opt.numnodes, )) - exp.createbridgedsession(numnodes=opt.numnodes, verbose=opt.verbose) - exp.setnodes() - exp.info("waiting %s sec (node/route bring-up)" % opt.delay) - time.sleep(opt.delay) - results['0 bridged'] = exp.runalltests("bridged") - exp.info("done; elapsed time: %s" % (datetime.datetime.now() - exp.start)) + # bridged + exp.info("setting up bridged tests 1/2 no link effects") + exp.info("creating topology: numnodes = %s" % (opt.numnodes,)) + exp.createbridgedsession(numnodes=opt.numnodes, verbose=opt.verbose) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["0 bridged"] = exp.runalltests("bridged") + exp.info("done; elapsed time: %s" % (datetime.datetime.now() - exp.start)) - # bridged with netem - exp.info("setting up bridged tests 2/2 with netem") - exp.setneteffects(bw=54000000, delay=0) - exp.info("waiting %s sec (queue bring-up)" % opt.delay) - results['1.0 netem'] = exp.runalltests("netem") - exp.info("shutting down bridged session") + # bridged with netem + exp.info("setting up bridged tests 2/2 with netem") + exp.setneteffects(bw=54000000, delay=0) + exp.info("waiting %s sec (queue bring-up)" % opt.delay) + results["1.0 netem"] = exp.runalltests("netem") + exp.info("shutting down bridged session") - # bridged with netem (1 Mbps,200ms) - exp.info("setting up bridged tests 3/2 with netem") - exp.setneteffects(bw=1000000, delay=20000) - exp.info("waiting %s sec (queue bring-up)" % opt.delay) - results['1.2 netem_1M'] = exp.runalltests("netem_1M") - exp.info("shutting down bridged session") + # bridged with netem (1 Mbps,200ms) + exp.info("setting up bridged tests 3/2 with netem") + exp.setneteffects(bw=1000000, delay=20000) + exp.info("waiting %s sec (queue bring-up)" % opt.delay) + results["1.2 netem_1M"] = exp.runalltests("netem_1M") + exp.info("shutting down bridged session") - # bridged with netem (54 kbps,500ms) - exp.info("setting up bridged tests 3/2 with netem") - exp.setneteffects(bw=54000, delay=100000) - exp.info("waiting %s sec (queue bring-up)" % opt.delay) - results['1.4 netem_54K'] = exp.runalltests("netem_54K") - exp.info("shutting down bridged session") - exp.reset() + # bridged with netem (54 kbps,500ms) + exp.info("setting up bridged tests 3/2 with netem") + exp.setneteffects(bw=54000, delay=100000) + exp.info("waiting %s sec (queue bring-up)" % opt.delay) + results["1.4 netem_54K"] = exp.runalltests("netem_54K") + exp.info("shutting down bridged session") + exp.reset() - # EMANE bypass model - exp.info("setting up EMANE tests 1/2 with bypass model") - exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, - cls=EmaneBypassModel, values=None) - exp.setnodes() - exp.info("waiting %s sec (node/route bring-up)" % opt.delay) - time.sleep(opt.delay) - results['2.0 bypass'] = exp.runalltests("bypass") - exp.info("shutting down bypass session") - exp.reset() - - exp.info("waiting %s sec (between EMANE tests)" % opt.delay) - time.sleep(opt.delay) + # EMANE bypass model + exp.info("setting up EMANE tests 1/2 with bypass model") + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, cls=EmaneBypassModel, values=None) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["2.0 bypass"] = exp.runalltests("bypass") + exp.info("shutting down bypass session") + exp.reset() - # EMANE RF-PIPE model: no restrictions (max datarate) - exp.info("setting up EMANE tests 2/4 with RF-PIPE model") - rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) - rfpnames = EmaneRfPipeModel.getnames() - rfpipevals[ rfpnames.index('datarate') ] = '4294967295' # max value - if emanever < Emane.EMANE091: - rfpipevals[ rfpnames.index('pathlossmode') ] = '2ray' - rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '1' - else: - rfpipevals[ rfpnames.index('propagationmodel') ] = '2ray' - exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, - cls=EmaneRfPipeModel, values=rfpipevals) - exp.setnodes() - exp.info("waiting %s sec (node/route bring-up)" % opt.delay) - time.sleep(opt.delay) - results['3.0 rfpipe'] = exp.runalltests("rfpipe") - exp.info("shutting down RF-PIPE session") - exp.reset() + exp.info("waiting %s sec (between EMANE tests)" % opt.delay) + time.sleep(opt.delay) - # EMANE RF-PIPE model: 54M datarate - exp.info("setting up EMANE tests 3/4 with RF-PIPE model 54M") - rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) - rfpnames = EmaneRfPipeModel.getnames() - rfpipevals[ rfpnames.index('datarate') ] = '54000000' - # TX delay != propagation delay - #rfpipevals[ rfpnames.index('delay') ] = '5000' - if emanever < Emane.EMANE091: - rfpipevals[ rfpnames.index('pathlossmode') ] = '2ray' - rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '1' - else: - rfpipevals[ rfpnames.index('propagationmodel') ] = '2ray' - exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, - cls=EmaneRfPipeModel, values=rfpipevals) - exp.setnodes() - exp.info("waiting %s sec (node/route bring-up)" % opt.delay) - time.sleep(opt.delay) - results['4.0 rfpipe54m'] = exp.runalltests("rfpipe54m") - exp.info("shutting down RF-PIPE session") - exp.reset() + # EMANE RF-PIPE model: no restrictions (max datarate) + exp.info("setting up EMANE tests 2/4 with RF-PIPE model") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + # max value + rfpipevals[rfpnames.index("datarate")] = "4294967295" + if emanever < emane.EMANE091: + rfpipevals[rfpnames.index("pathlossmode")] = "2ray" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "1" + else: + rfpipevals[rfpnames.index("propagationmodel")] = "2ray" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["3.0 rfpipe"] = exp.runalltests("rfpipe") + exp.info("shutting down RF-PIPE session") + exp.reset() - # EMANE RF-PIPE model: 54K datarate - exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss") - rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) - rfpnames = EmaneRfPipeModel.getnames() - rfpipevals[ rfpnames.index('datarate') ] = '54000' - if emanever < Emane.EMANE091: - rfpipevals[ rfpnames.index('pathlossmode') ] = 'pathloss' - rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '0' - else: - rfpipevals[ rfpnames.index('propagationmodel') ] = 'precomputed' - exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, - cls=EmaneRfPipeModel, values=rfpipevals) - exp.setnodes() - exp.info("waiting %s sec (node/route bring-up)" % opt.delay) - time.sleep(opt.delay) - exp.info("sending pathloss events to govern connectivity") - exp.setpathloss(opt.numnodes) - results['5.0 pathloss'] = exp.runalltests("pathloss") - exp.info("shutting down RF-PIPE session") - exp.reset() + # EMANE RF-PIPE model: 54M datarate + exp.info("setting up EMANE tests 3/4 with RF-PIPE model 54M") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + rfpipevals[rfpnames.index("datarate")] = "54000000" + # TX delay != propagation delay + # rfpipevals[ rfpnames.index("delay") ] = "5000" + if emanever < emane.EMANE091: + rfpipevals[rfpnames.index("pathlossmode")] = "2ray" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "1" + else: + rfpipevals[rfpnames.index("propagationmodel")] = "2ray" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, + cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["4.0 rfpipe54m"] = exp.runalltests("rfpipe54m") + exp.info("shutting down RF-PIPE session") + exp.reset() - # EMANE RF-PIPE model (512K, 200ms) - exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss") - rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) - rfpnames = EmaneRfPipeModel.getnames() - rfpipevals[ rfpnames.index('datarate') ] = '512000' - rfpipevals[ rfpnames.index('delay') ] = '200' - rfpipevals[ rfpnames.index('pathlossmode') ] = 'pathloss' - rfpipevals[ rfpnames.index('defaultconnectivitymode') ] = '0' - exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, - cls=EmaneRfPipeModel, values=rfpipevals) - exp.setnodes() - exp.info("waiting %s sec (node/route bring-up)" % opt.delay) - time.sleep(opt.delay) - exp.info("sending pathloss events to govern connectivity") - exp.setpathloss(opt.numnodes) - results['5.1 pathloss'] = exp.runalltests("pathloss") - exp.info("shutting down RF-PIPE session") - exp.reset() - - # summary of results in CSV format - exp.info("----- summary of results (%s nodes, rate=%s, duration=%s) -----" \ - % (opt.numnodes, opt.rate, opt.duration)) - exp.info("netname:latency,mdev,throughput,cpu,loss") + # EMANE RF-PIPE model: 54K datarate + exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + rfpipevals[rfpnames.index("datarate")] = "54000" + if emanever < emane.EMANE091: + rfpipevals[rfpnames.index("pathlossmode")] = "pathloss" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "0" + else: + rfpipevals[rfpnames.index("propagationmodel")] = "precomputed" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, + cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + exp.info("sending pathloss events to govern connectivity") + exp.setpathloss(opt.numnodes) + results["5.0 pathloss"] = exp.runalltests("pathloss") + exp.info("shutting down RF-PIPE session") + exp.reset() - for test in sorted(results.keys()): - (latency, mdev, throughput, cpu, loss) = results[test] - exp.info("%s:%.03f,%.03f,%d,%.02f,%.02f" % \ - (test, latency, mdev, throughput, cpu,loss)) + # EMANE RF-PIPE model (512K, 200ms) + exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + rfpipevals[rfpnames.index("datarate")] = "512000" + rfpipevals[rfpnames.index("delay")] = "200" + rfpipevals[rfpnames.index("pathlossmode")] = "pathloss" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "0" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, + cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + exp.info("sending pathloss events to govern connectivity") + exp.setpathloss(opt.numnodes) + results["5.1 pathloss"] = exp.runalltests("pathloss") + exp.info("shutting down RF-PIPE session") + exp.reset() + + # summary of results in CSV format + exp.info("----- summary of results (%s nodes, rate=%s, duration=%s) -----" \ + % (opt.numnodes, opt.rate, opt.duration)) + exp.info("netname:latency,mdev,throughput,cpu,loss") + + for test in sorted(results.keys()): + (latency, mdev, throughput, cpu, loss) = results[test] + exp.info("%s:%.03f,%.03f,%d,%.02f,%.02f" % \ + (test, latency, mdev, throughput, cpu, loss)) + + exp.logend() + return exp - exp.logend() - return exp if __name__ == "__main__": - exp = main() + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + + main() diff --git a/daemon/examples/netns/wlantest.py b/daemon/examples/netns/wlantest.py index ba17f66f..f4d46519 100755 --- a/daemon/examples/netns/wlantest.py +++ b/daemon/examples/netns/wlantest.py @@ -8,11 +8,14 @@ # and repeat for minnodes <= n <= maxnodes with a step size of # nodestep -import optparse, sys, os, datetime +import datetime +import optparse +import sys -from core import pycore -from core.misc import ipaddr +from core.misc import ipaddress, nodeutils, nodemaps from core.misc.utils import mutecall +from core.netns import nodes +from core.session import Session try: mutecall(["iperf", "-v"]) @@ -20,16 +23,17 @@ except OSError: sys.stderr.write("ERROR: running iperf failed\n") sys.exit(1) + def test(numnodes, testsec): # node list n = [] # IP subnet - prefix = ipaddr.IPv4Prefix("10.83.0.0/16") - session = pycore.Session() + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1) # emulated network - net = session.addobj(cls = pycore.nodes.WlanNode) + net = session.add_object(cls=nodes.WlanNode) for i in xrange(1, numnodes + 1): - tmp = session.addobj(cls = pycore.nodes.LxcNode, objid= "%d" % i, name = "n%d" % i) + tmp = session.add_object(cls=nodes.LxcNode, objid="%d" % i, name="n%d" % i) tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) n.append(tmp) net.link(n[0].netif(0), n[-1].netif(0)) @@ -38,31 +42,31 @@ def test(numnodes, testsec): n[0].cmd(["killall", "-9", "iperf"]) session.shutdown() + def main(): usagestr = "usage: %prog [-h] [options] [args]" - parser = optparse.OptionParser(usage = usagestr) + parser = optparse.OptionParser(usage=usagestr) - parser.set_defaults(minnodes = 2) - parser.add_option("-m", "--minnodes", dest = "minnodes", type = int, - help = "min number of nodes to test; default = %s" % - parser.defaults["minnodes"]) + parser.set_defaults(minnodes=2) + parser.add_option("-m", "--minnodes", dest="minnodes", type=int, + help="min number of nodes to test; default = %s" % parser.defaults["minnodes"]) - parser.set_defaults(maxnodes = 2) - parser.add_option("-n", "--maxnodes", dest = "maxnodes", type = int, - help = "max number of nodes to test; default = %s" % - parser.defaults["maxnodes"]) + parser.set_defaults(maxnodes=2) + parser.add_option("-n", "--maxnodes", dest="maxnodes", type=int, + help="max number of nodes to test; default = %s" % + parser.defaults["maxnodes"]) - parser.set_defaults(testsec = 10) - parser.add_option("-t", "--testsec", dest = "testsec", type = int, - help = "test time in seconds; default = %s" % - parser.defaults["testsec"]) + parser.set_defaults(testsec=10) + parser.add_option("-t", "--testsec", dest="testsec", type=int, + help="test time in seconds; default = %s" % + parser.defaults["testsec"]) - parser.set_defaults(nodestep = 1) - parser.add_option("-s", "--nodestep", dest = "nodestep", type = int, - help = "number of nodes step size; default = %s" % - parser.defaults["nodestep"]) + parser.set_defaults(nodestep=1) + parser.add_option("-s", "--nodestep", dest="nodestep", type=int, + help="number of nodes step size; default = %s" % + parser.defaults["nodestep"]) - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -91,8 +95,12 @@ def main(): test(i, options.testsec) print >> sys.stderr, "" - print >> sys.stderr, \ - "elapsed time: %s" % (datetime.datetime.now() - start) + print >> sys.stderr, "elapsed time: %s" % (datetime.datetime.now() - start) + if __name__ == "__main__": + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + nodeutils.set_node_map(node_map) + main() diff --git a/daemon/examples/stopsession.py b/daemon/examples/stopsession.py index 3b890e2f..9cbfc2cb 100755 --- a/daemon/examples/stopsession.py +++ b/daemon/examples/stopsession.py @@ -5,41 +5,45 @@ # List and stop CORE sessions from the command line. # -import socket, optparse -from core.constants import * +import optparse +import socket + from core.api import coreapi +from core.enumerations import MessageFlags, SessionTlvs, CORE_API_PORT + def main(): - parser = optparse.OptionParser(usage = "usage: %prog [-l] ") - parser.add_option("-l", "--list", dest = "list", action = "store_true", - help = "list running sessions") + parser = optparse.OptionParser(usage="usage: %prog [-l] ") + parser.add_option("-l", "--list", dest="list", action="store_true", + help="list running sessions") (options, args) = parser.parse_args() if options.list is True: num = '0' - flags = coreapi.CORE_API_STR_FLAG + flags = MessageFlags.STRING.value else: num = args[0] - flags = coreapi.CORE_API_DEL_FLAG - tlvdata = coreapi.CoreSessionTlv.pack(coreapi.CORE_TLV_SESS_NUMBER, num) - msg = coreapi.CoreSessionMessage.pack(flags, tlvdata) + flags = MessageFlags.DELETE.value + tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, num) + message = coreapi.CoreSessionMessage.pack(flags, tlvdata) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - sock.connect(('localhost', coreapi.CORE_API_PORT)) - sock.send(msg) + sock.connect(('localhost', CORE_API_PORT)) + sock.send(message) # receive and print a session list if options.list is True: - hdr = sock.recv(coreapi.CoreMessage.hdrsiz) - msgtype, msgflags, msglen = coreapi.CoreMessage.unpackhdr(hdr) + hdr = sock.recv(coreapi.CoreMessage.header_len) + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(hdr) data = "" if msglen: data = sock.recv(msglen) - msg = coreapi.CoreMessage(msgflags, hdr, data) - sessions = msg.gettlv(coreapi.CORE_TLV_SESS_NUMBER) - print "sessions:", sessions + message = coreapi.CoreMessage(msgflags, hdr, data) + sessions = message.get_tlv(coreapi.SessionTlvs.NUMBER.value) + print "sessions:", sessions sock.close() + if __name__ == "__main__": main() diff --git a/daemon/sbin/core-daemon b/daemon/sbin/core-daemon index 2229c866..e25be6c9 100755 --- a/daemon/sbin/core-daemon +++ b/daemon/sbin/core-daemon @@ -7,160 +7,177 @@ # authors: Tom Goff # Jeff Ahrenholz # -''' -core-daemon: the CORE daemon is a server process that receives CORE API + +""" +core-daemon: the CORE daemon is a server process that receives CORE API messages and instantiates emulated nodes and networks within the kernel. Various message handlers are defined and some support for sending messages. -''' +""" -import os, optparse, ConfigParser, gc, shlex, socket, shutil +import ConfigParser import atexit -import signal import importlib +import logging +import optparse +import os +import signal +import socket +import sys +import threading +import time -try: - from core import pycore -except ImportError: - # hack for Fedora autoconf that uses the following pythondir: - if "/usr/lib/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.6/site-packages") - if "/usr/lib64/python2.6/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.6/site-packages") - if "/usr/lib/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib/python2.7/site-packages") - if "/usr/lib64/python2.7/site-packages" in sys.path: - sys.path.append("/usr/local/lib64/python2.7/site-packages") - from core import pycore -from core.coreserver import * -from core.constants import * +from core import constants +from core import corehandlers +from core import coreserver +from core import enumerations from core.api import coreapi -from core.misc.utils import daemonize, closeonexec +from core.corehandlers import CoreDatagramRequestHandler +from core.enumerations import MessageFlags +from core.enumerations import RegisterTlvs +from core.misc import log +from core.misc import nodemaps +from core.misc import nodeutils +from core.misc.utils import closeonexec +from core.misc.utils import daemonize +from core.services import bird +from core.services import dockersvc +from core.services import nrl +from core.services import quagga +from core.services import security +from core.services import startup +from core.services import ucarp +from core.services import utility +from core.services import xorp + +logger = log.get_logger(__name__) DEFAULT_MAXFD = 1024 -# garbage collection debugging -# gc.set_debug(gc.DEBUG_STATS | gc.DEBUG_LEAK) + +def startudp(core_server, server_address): + """ + Start a thread running a UDP server on the same host,port for connectionless requests. + + :param core.coreserver.CoreServer core_server: core server instance + :param tuple[str, int] server_address: server address + :return: created core udp server + :rtype: core.coreserver.CoreUdpServer + """ + core_server.udpserver = coreserver.CoreUdpServer(server_address, CoreDatagramRequestHandler, core_server) + core_server.udpthread = threading.Thread(target=core_server.udpserver.start) + core_server.udpthread.daemon = True + core_server.udpthread.start() + return core_server.udpserver -coreapi.add_node_class("CORE_NODE_DEF", - coreapi.CORE_NODE_DEF, pycore.nodes.CoreNode) -coreapi.add_node_class("CORE_NODE_PHYS", - coreapi.CORE_NODE_PHYS, pycore.pnodes.PhysicalNode) -try: - coreapi.add_node_class("CORE_NODE_XEN", - coreapi.CORE_NODE_XEN, pycore.xen.XenNode) -except Exception: - #print "XenNode class unavailable." - pass -coreapi.add_node_class("CORE_NODE_TBD", - coreapi.CORE_NODE_TBD, None) -coreapi.add_node_class("CORE_NODE_SWITCH", - coreapi.CORE_NODE_SWITCH, pycore.nodes.SwitchNode) -coreapi.add_node_class("CORE_NODE_HUB", - coreapi.CORE_NODE_HUB, pycore.nodes.HubNode) -coreapi.add_node_class("CORE_NODE_WLAN", - coreapi.CORE_NODE_WLAN, pycore.nodes.WlanNode) -coreapi.add_node_class("CORE_NODE_RJ45", - coreapi.CORE_NODE_RJ45, pycore.nodes.RJ45Node) -coreapi.add_node_class("CORE_NODE_TUNNEL", - coreapi.CORE_NODE_TUNNEL, pycore.nodes.TunnelNode) -coreapi.add_node_class("CORE_NODE_EMANE", - coreapi.CORE_NODE_EMANE, pycore.nodes.EmaneNode) - -# -# UDP server startup -# -def startudp(mainserver, server_address): - ''' Start a thread running a UDP server on the same host,port for - connectionless requests. - ''' - mainserver.udpserver = CoreUdpServer(server_address, - CoreDatagramRequestHandler, mainserver) - mainserver.udpthread = threading.Thread(target = mainserver.udpserver.start) - mainserver.udpthread.daemon = True - mainserver.udpthread.start() - return mainserver.udpserver - - -# -# Auxiliary server startup -# -def startaux(mainserver, aux_address, aux_handler): - ''' Start a thread running an auxiliary TCP server on the given address. +def startaux(core_server, aux_address, aux_handler): + """ + Start a thread running an auxiliary TCP server on the given address. This server will communicate with client requests using a handler - using the aux_handler class. The aux_handler can provide an alternative + using the aux_handler class. The aux_handler can provide an alternative API to CORE. - ''' - handlermodname,dot,handlerclassname = aux_handler.rpartition('.') + + :param core.coreserver.CoreServer core_server: core server instance + :param tuple[str, int] aux_address: auxiliary server address + :param str aux_handler: auxiliary handler string to import + :return: auxiliary server + """ + handlermodname, dot, handlerclassname = aux_handler.rpartition(".") handlermod = importlib.import_module(handlermodname) handlerclass = getattr(handlermod, handlerclassname) - mainserver.auxserver = CoreAuxServer(aux_address, - handlerclass, - mainserver) - mainserver.auxthread = threading.Thread(target = mainserver.auxserver.start) - mainserver.auxthread.daemon = True - mainserver.auxthread.start() - return mainserver.auxserver + core_server.auxserver = coreserver.CoreAuxServer(aux_address, handlerclass, core_server) + core_server.auxthread = threading.Thread(target=core_server.auxserver.start) + core_server.auxthread.daemon = True + core_server.auxthread.start() + return core_server.auxserver def banner(): - ''' Output the program banner printed to the terminal or log file. - ''' - sys.stdout.write("CORE daemon v.%s started %s\n" % \ - (COREDPY_VERSION, time.ctime())) - sys.stdout.flush() + """ + Output the program banner printed to the terminal or log file. + + :return: nothing + """ + logger.info("CORE daemon v.%s started %s\n" % (constants.COREDPY_VERSION, time.ctime())) -def cored(cfg = None): - ''' Start the CoreServer object and enter the server loop. - ''' - host = cfg['listenaddr'] - port = int(cfg['port']) - if host == '' or host is None: +def cored(cfg=None): + """ + Start the CoreServer object and enter the server loop. + + :param dict cfg: core configuration + :return: nothing + """ + host = cfg["listenaddr"] + port = int(cfg["port"]) + if host == "" or host is None: host = "localhost" try: - server = CoreServer((host, port), CoreRequestHandler, cfg) - except Exception, e: - sys.stderr.write("error starting main server on: %s:%s\n\t%s\n" % \ - (host, port, e)) - sys.stderr.flush() + server = coreserver.CoreServer((host, port), corehandlers.CoreRequestHandler, cfg) + except: + logger.exception("error starting main server on: %s:%s", host, port) sys.exit(1) - closeonexec(server.fileno()) - sys.stdout.write("main server started, listening on: %s:%s\n" % (host, port)) - sys.stdout.flush() - udpserver = startudp(server, (host,port)) + closeonexec(server.fileno()) + logger.info("main server started, listening on: %s:%s\n" % (host, port)) + + udpserver = startudp(server, (host, port)) closeonexec(udpserver.fileno()) - auxreqhandler = cfg['aux_request_handler'] + auxreqhandler = cfg["aux_request_handler"] if auxreqhandler: - try: - handler, auxport = auxreqhandler.rsplit(':') - auxserver = startaux(server, (host,int(auxport)), handler) - closeonexec(auxserver.fileno()) - except Exception as e: - raise ValueError, "invalid auxreqhandler:(%s)\nError: %s" % (auxreqhandler, e) + handler, auxport = auxreqhandler.rsplit(":") + auxserver = startaux(server, (host, int(auxport)), handler) + closeonexec(auxserver.fileno()) server.serve_forever() + +# TODO: should sessions and the main core daemon both catch at exist to shutdown independently? def cleanup(): - while CoreServer.servers: - server = CoreServer.servers.pop() + """ + Runs server shutdown and cleanup when catching an exit signal. + + :return: nothing + """ + while coreserver.CoreServer.servers: + server = coreserver.CoreServer.servers.pop() server.shutdown() + atexit.register(cleanup) + def sighandler(signum, stackframe): - print >> sys.stderr, "terminated by signal:", signum + """ + Signal handler when different signals are sent. + + :param int signum: singal number sent + :param stackframe: stack frame sent + :return: nothing + """ + logger.error("terminated by signal: %s", signum) sys.exit(signum) + signal.signal(signal.SIGHUP, sighandler) signal.signal(signal.SIGINT, sighandler) signal.signal(signal.SIGTERM, sighandler) signal.signal(signal.SIGUSR1, sighandler) signal.signal(signal.SIGUSR2, sighandler) -def logrotate(stdout, stderr, stdoutmode = 0644, stderrmode = 0644): + +def logrotate(stdout, stderr, stdoutmode=0644, stderrmode=0644): + """ + Log rotation method. + + :param stdout: stdout + :param stderr: stderr + :param int stdoutmode: stdout mode + :param int stderrmode: stderr mode + :return: + """ + def reopen(fileno, filename, mode): err = 0 fd = -1 @@ -174,6 +191,7 @@ def logrotate(stdout, stderr, stdoutmode = 0644, stderrmode = 0644): if fd >= 0: os.close(fd) return err + if stdout: err = reopen(1, stdout, stdoutmode) if stderr: @@ -185,59 +203,64 @@ def logrotate(stdout, stderr, stdoutmode = 0644, stderrmode = 0644): else: reopen(2, stderr, stderrmode) -def getMergedConfig(filename): - ''' Return a configuration after merging config file and command-line - arguments. - ''' + +def get_merged_config(filename): + """ + Return a configuration after merging config file and command-line arguments. + + :param str filename: file name to merge configuration settings with + :return: merged configuration + :rtype: dict + """ # these are the defaults used in the config file - defaults = { 'port' : '%d' % coreapi.CORE_API_PORT, - 'listenaddr' : 'localhost', - 'pidfile' : '%s/run/core-daemon.pid' % CORE_STATE_DIR, - 'logfile' : '%s/log/core-daemon.log' % CORE_STATE_DIR, - 'xmlfilever' : '1.0', - 'numthreads' : '1', - 'verbose' : 'False', - 'daemonize' : 'False', - 'debug' : 'False', - 'execfile' : None, - 'aux_request_handler' : None, - } + defaults = {"port": "%d" % enumerations.CORE_API_PORT, + "listenaddr": "localhost", + "pidfile": "%s/run/core-daemon.pid" % constants.CORE_STATE_DIR, + "logfile": "%s/log/core-daemon.log" % constants.CORE_STATE_DIR, + "xmlfilever": "1.0", + "numthreads": "1", + "verbose": "False", + "daemonize": "False", + "debug": "False", + "execfile": None, + "aux_request_handler": None, + } usagestr = "usage: %prog [-h] [options] [args]\n\n" + \ "CORE daemon v.%s instantiates Linux network namespace " \ - "nodes." % COREDPY_VERSION - parser = optparse.OptionParser(usage = usagestr) - parser.add_option("-f", "--configfile", dest = "configfile", - type = "string", - help = "read config from specified file; default = %s" % - filename) - parser.add_option("-d", "--daemonize", dest = "daemonize", + "nodes." % constants.COREDPY_VERSION + parser = optparse.OptionParser(usage=usagestr) + parser.add_option("-f", "--configfile", dest="configfile", + type="string", + help="read config from specified file; default = %s" % + filename) + parser.add_option("-d", "--daemonize", dest="daemonize", action="store_true", - help = "run in background as daemon; default=%s" % \ - defaults["daemonize"]) - parser.add_option("-e", "--execute", dest = "execfile", type = "string", - help = "execute a Python/XML-based session") - parser.add_option("-l", "--logfile", dest = "logfile", type = "string", - help = "log output to specified file; default = %s" % - defaults["logfile"]) - parser.add_option("-p", "--port", dest = "port", type = int, - help = "port number to listen on; default = %s" % \ - defaults["port"]) - parser.add_option("-i", "--pidfile", dest = "pidfile", - help = "filename to write pid to; default = %s" % \ - defaults["pidfile"]) - parser.add_option("-t", "--numthreads", dest = "numthreads", type = int, - help = "number of server threads; default = %s" % \ - defaults["numthreads"]) - parser.add_option("-v", "--verbose", dest = "verbose", action="store_true", - help = "enable verbose logging; default = %s" % \ - defaults["verbose"]) - parser.add_option("-g", "--debug", dest = "debug", action="store_true", - help = "enable debug logging; default = %s" % \ - defaults["debug"]) + help="run in background as daemon; default=%s" % \ + defaults["daemonize"]) + parser.add_option("-e", "--execute", dest="execfile", type="string", + help="execute a Python/XML-based session") + parser.add_option("-l", "--logfile", dest="logfile", type="string", + help="log output to specified file; default = %s" % + defaults["logfile"]) + parser.add_option("-p", "--port", dest="port", type=int, + help="port number to listen on; default = %s" % \ + defaults["port"]) + parser.add_option("-i", "--pidfile", dest="pidfile", + help="filename to write pid to; default = %s" % \ + defaults["pidfile"]) + parser.add_option("-t", "--numthreads", dest="numthreads", type=int, + help="number of server threads; default = %s" % \ + defaults["numthreads"]) + parser.add_option("-v", "--verbose", dest="verbose", action="store_true", + help="enable verbose logging; default = %s" % \ + defaults["verbose"]) + parser.add_option("-g", "--debug", dest="debug", action="store_true", + help="enable debug logging; default = %s" % \ + defaults["debug"]) # parse command line options - (options, args) = parser.parse_args() + options, args = parser.parse_args() # read the config file if options.configfile is not None: @@ -252,17 +275,17 @@ def getMergedConfig(filename): # gracefully support legacy configs (cored.py/cored now core-daemon) if cfg.has_section("cored.py"): for name, val in cfg.items("cored.py"): - if name == 'pidfile' or name == 'logfile': - bn = os.path.basename(val).replace('coredpy', 'core-daemon') + if name == "pidfile" or name == "logfile": + bn = os.path.basename(val).replace("coredpy", "core-daemon") val = os.path.join(os.path.dirname(val), bn) cfg.set(section, name, val) if cfg.has_section("cored"): for name, val in cfg.items("cored"): - if name == 'pidfile' or name == 'logfile': - bn = os.path.basename(val).replace('cored', 'core-daemon') + if name == "pidfile" or name == "logfile": + bn = os.path.basename(val).replace("cored", "core-daemon") val = os.path.join(os.path.dirname(val), bn) cfg.set(section, name, val) - + # merge command line with config file for opt in options.__dict__: val = options.__dict__[opt] @@ -271,49 +294,74 @@ def getMergedConfig(filename): return dict(cfg.items(section)), args + def exec_file(cfg): - ''' Send a Register Message to execute a new session based on XML or Python - script file. - ''' - filename = cfg['execfile'] - sys.stdout.write("Telling daemon to execute file: '%s'...\n" % filename) - sys.stdout.flush() - tlvdata = coreapi.CoreRegTlv.pack(coreapi.CORE_TLV_REG_EXECSRV, filename) - msg = coreapi.CoreRegMessage.pack(coreapi.CORE_API_ADD_FLAG, tlvdata) + """ + Send a Register Message to execute a new session based on XML or Python script file. + + :param dict cfg: configuration settings + :return: 0 + """ + filename = cfg["execfile"] + logger.info("Telling daemon to execute file: %s...", filename) + tlvdata = coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, filename) + msg = coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlvdata) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - sock.connect(("localhost", int(cfg['port']))) # TODO: connect address option + sock.connect(("localhost", int(cfg["port"]))) # TODO: connect address option sock.sendall(msg) return 0 -def main(): - ''' Main program startup. - ''' - # get a configuration merged from config file and command-line arguments - cfg, args = getMergedConfig("%s/core.conf" % CORE_CONF_DIR) - for a in args: - sys.stderr.write("ignoring command line argument: '%s'\n" % a) - if cfg['daemonize'] == 'True': - daemonize(rootdir = None, umask = 0, close_fds = False, - stdin = os.devnull, - stdout = cfg['logfile'], stderr = cfg['logfile'], - pidfilename = cfg['pidfile'], - defaultmaxfd = DEFAULT_MAXFD) +def main(): + """ + Main program startup. + + :return: nothing + """ + # get a configuration merged from config file and command-line arguments + cfg, args = get_merged_config("%s/core.conf" % constants.CORE_CONF_DIR) + for a in args: + logger.error("ignoring command line argument: %s", a) + + if cfg["daemonize"] == "True": + daemonize(rootdir=None, umask=0, close_fds=False, + stdin=os.devnull, + stdout=cfg["logfile"], stderr=cfg["logfile"], + pidfilename=cfg["pidfile"], + defaultmaxfd=DEFAULT_MAXFD) signal.signal(signal.SIGUSR1, lambda signum, stackframe: - logrotate(stdout = cfg['logfile'], - stderr = cfg['logfile'])) + logrotate(stdout=cfg["logfile"], stderr=cfg["logfile"])) banner() - if cfg['execfile']: - cfg['execfile'] = os.path.abspath(cfg['execfile']) + if cfg["execfile"]: + cfg["execfile"] = os.path.abspath(cfg["execfile"]) sys.exit(exec_file(cfg)) try: cored(cfg) except KeyboardInterrupt: - pass + logger.info("keyboard interrupt, stopping core daemon") sys.exit(0) if __name__ == "__main__": + log.setup(level=logging.INFO) + + # configure nodes to use + node_map = nodemaps.CLASSIC_NODES + if len(sys.argv) == 2 and sys.argv[1] == "ovs": + node_map = nodemaps.OVS_NODES + nodeutils.set_node_map(node_map) + + # load default services + quagga.load_services() + nrl.load_services() + xorp.load_services() + bird.load_services() + utility.load_services() + security.load_services() + ucarp.load_services() + dockersvc.load_services() + startup.load_services() + main() diff --git a/daemon/sbin/core-manage b/daemon/sbin/core-manage index 6d2d4efe..82857fe5 100755 --- a/daemon/sbin/core-manage +++ b/daemon/sbin/core-manage @@ -6,29 +6,31 @@ # # author: Jeff Ahrenholz # -''' +""" core-manage: Helper tool to add, remove, or check for services, models, and node types in a CORE installation. -''' +""" -import os -import sys import ast import optparse +import os import re +import sys -from core import pycore +from core import services from core.constants import CORE_CONF_DIR + class FileUpdater(object): - ''' Helper class for changing configuration files. - ''' + """ + Helper class for changing configuration files. + """ actions = ("add", "remove", "check") targets = ("service", "model", "nodetype") def __init__(self, action, target, data, options): - ''' - ''' + """ + """ self.action = action self.target = target self.data = data @@ -37,13 +39,13 @@ class FileUpdater(object): self.search, self.filename = self.get_filename(target) def process(self): - ''' Invoke update_file() using a helper method depending on target. - ''' + """ Invoke update_file() using a helper method depending on target. + """ if self.verbose: txt = "Updating" if self.action == "check": txt = "Checking" - sys.stdout.write("%s file: '%s'\n" % (txt, self.filename)) + sys.stdout.write("%s file: %s\n" % (txt, self.filename)) if self.target == "service": r = self.update_file(fn=self.update_services) @@ -64,41 +66,40 @@ class FileUpdater(object): return r def update_services(self, line): - ''' Modify the __init__.py file having this format: + """ Modify the __init__.py file having this format: __all__ = ["quagga", "nrl", "xorp", "bird", ] Returns True or False when "check" is the action, a modified line otherwise. - ''' - line = line.strip('\n') - key, valstr = line.split('= ') + """ + line = line.strip("\n") + key, valstr = line.split("= ") vals = ast.literal_eval(valstr) r = self.update_keyvals(key, vals) if self.action == "check": return r - valstr = '%s' % r - return '= '.join([key, valstr]) + '\n' - + valstr = "%s" % r + return "= ".join([key, valstr]) + "\n" def update_emane_models(self, line): - ''' Modify the core.conf file having this format: + """ Modify the core.conf file having this format: emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass Returns True or False when "check" is the action, a modified line otherwise. - ''' - line = line.strip('\n') - key, valstr = line.split('= ') - vals = valstr.split(', ') + """ + line = line.strip("\n") + key, valstr = line.split("= ") + vals = valstr.split(", ") r = self.update_keyvals(key, vals) if self.action == "check": return r - valstr = ', '.join(r) - return '= '.join([key, valstr]) + '\n' + valstr = ", ".join(r) + return "= ".join([key, valstr]) + "\n" def update_keyvals(self, key, vals): - ''' Perform self.action on (key, vals). + """ Perform self.action on (key, vals). Returns True or False when "check" is the action, a modified line otherwise. - ''' + """ if self.action == "check": if self.data in vals: return True @@ -115,11 +116,10 @@ class FileUpdater(object): return vals def get_filename(self, target): - ''' Return search string and filename based on target. - ''' + """ Return search string and filename based on target. + """ if target == "service": - pypath = os.path.dirname(pycore.__file__) - filename = os.path.join(pypath, "services", "__init__.py") + filename = os.path.abspath(services.__file__) search = "__all__ =" elif target == "model": filename = os.path.join(CORE_CONF_DIR, "core.conf") @@ -132,21 +132,21 @@ class FileUpdater(object): else: raise ValueError, "unknown target" if not os.path.exists(filename): - raise ValueError, "file '%s' does not exist" % filename + raise ValueError, "file %s does not exist" % filename return search, filename def update_file(self, fn=None): - ''' Open a file and search for self.search, invoking the supplied + """ Open a file and search for self.search, invoking the supplied function on the matching line. Write file changes if necessary. Returns True if the file has changed (or action is "check" and the search string is found), False otherwise. - ''' + """ changed = False - output = "" # this accumulates output, assumes input is small + output = "" # this accumulates output, assumes input is small with open(self.filename, "r") as f: for line in f: if line[:len(self.search)] == self.search: - r = fn(line) # line may be modified by fn() here + r = fn(line) # line may be modified by fn() here if self.action == "check": return r else: @@ -157,17 +157,17 @@ class FileUpdater(object): if changed: with open(self.filename, "w") as f: f.write(output) - + return changed def update_nodes_conf(self): - ''' Add/remove/check entries from nodes.conf. This file + """ Add/remove/check entries from nodes.conf. This file contains a Tcl-formatted array of node types. The array index must be properly set for new entries. Uses self.{action, filename, search, data} variables as input and returns the same value as update_file(). - ''' + """ changed = False - output = "" # this accumulates output, assumes input is small + output = "" # this accumulates output, assumes input is small with open(self.filename, "r") as f: for line in f: # make sure data is not added twice @@ -181,14 +181,15 @@ class FileUpdater(object): continue else: output += line + if self.action == "add": - index = int(re.match('^\d+', line).group(0)) - output += str(index + 1) + ' ' + self.data + '\n' + index = int(re.match("^\d+", line).group(0)) + output += str(index + 1) + " " + self.data + "\n" changed = True if changed: with open(self.filename, "w") as f: f.write(output) - + return changed @@ -200,21 +201,21 @@ def main(): usagestr += "\n %prog -v check model RfPipe" usagestr += "\n %prog --userpath=\"$HOME/.core\" add nodetype \"{ftp ftp.gif ftp.gif {DefaultRoute FTP} netns {FTP server} }\" \n" usagestr += "\nArguments:\n should be one of: %s" % \ - ', '.join(FileUpdater.actions) + ", ".join(FileUpdater.actions) usagestr += "\n should be one of: %s" % \ - ', '.join(FileUpdater.targets) + ", ".join(FileUpdater.targets) usagestr += "\n is the text to %s" % \ - ', '.join(FileUpdater.actions) - parser = optparse.OptionParser(usage = usagestr) - parser.set_defaults(userpath = None, verbose = False,) + ", ".join(FileUpdater.actions) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(userpath=None, verbose=False, ) - parser.add_option("--userpath", dest = "userpath", type = "string", - help = "use the specified user path (e.g. \"$HOME/.core" \ - "\") to access nodes.conf") - parser.add_option("-v", "--verbose", dest = "verbose", action="store_true", - help = "be verbose when performing action") + parser.add_option("--userpath", dest="userpath", type="string", + help="use the specified user path (e.g. \"$HOME/.core" \ + "\") to access nodes.conf") + parser.add_option("-v", "--verbose", dest="verbose", action="store_true", + help="be verbose when performing action") - def usage(msg = None, err = 0): + def usage(msg=None, err=0): sys.stdout.write("\n") if msg: sys.stdout.write(msg + "\n\n") @@ -228,11 +229,11 @@ def main(): action = args[0] if action not in FileUpdater.actions: - usage("invalid action '%s'" % action, 1) + usage("invalid action %s" % action, 1) target = args[1] if target not in FileUpdater.targets: - usage("invalid target '%s'" % target, 1) + usage("invalid target %s" % target, 1) if target == "nodetype" and not options.userpath: usage("user path option required for this target (%s)" % target) @@ -249,5 +250,6 @@ def main(): sys.exit(1) sys.exit(0) + if __name__ == "__main__": main() diff --git a/daemon/setup.py b/daemon/setup.py index fd033e23..e610ab86 100644 --- a/daemon/setup.py +++ b/daemon/setup.py @@ -1,28 +1,37 @@ # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. -import os, glob -from distutils.core import setup +""" +Defines how CORE will be built for installation. +""" + +from setuptools import setup + from core.constants import COREDPY_VERSION -setup(name = "core-python", - version = COREDPY_VERSION, - packages = [ - "core", - "core.addons", - "core.api", - "core.emane", - "core.misc", - "core.bsd", - "core.netns", - "core.phys", - "core.xen", - "core.services", - ], - description = "Python components of CORE", - url = "http://www.nrl.navy.mil/itd/ncs/products/core", - author = "Boeing Research & Technology", - author_email = "core-dev@pf.itd.nrl.navy.mil", - license = "BSD", - long_description="Python scripts and modules for building virtual " \ - "emulated networks.") +setup(name="core-python", + version=COREDPY_VERSION, + packages=[ + "core", + "core.addons", + "core.api", + "core.bsd", + "core.emane", + "core.misc", + "core.netns", + "core.phys", + "core.services", + "core.xen", + "core.xml", + ], + install_requires=[ + "enum34" + ], + setup_requires=["pytest-runner"], + tests_require=["pytest"], + description="Python components of CORE", + url="http://www.nrl.navy.mil/itd/ncs/products/core", + author="Boeing Research & Technology", + author_email="core-dev@pf.itd.nrl.navy.mil", + license="BSD", + long_description="Python scripts and modules for building virtual emulated networks.") diff --git a/daemon/src/setup.py b/daemon/src/setup.py index 89de2535..b653f51b 100644 --- a/daemon/src/setup.py +++ b/daemon/src/setup.py @@ -1,29 +1,43 @@ # Copyright (c)2010-2012 the Boeing Company. # See the LICENSE file included in this distribution. -import os, glob -from distutils.core import setup, Extension +from setuptools import setup, Extension -netns = Extension("netns", sources = ["netnsmodule.c", "netns.c"]) -vcmd = Extension("vcmd", - sources = ["vcmdmodule.c", - "vnode_client.c", - "vnode_chnl.c", - "vnode_io.c", - "vnode_msg.c", - "vnode_cmd.c", - ], - library_dirs = ["build/lib"], - libraries = ["ev"]) +netns = Extension( + "netns", + sources=[ + "netnsmodule.c", + "netns.c" + ] +) -setup(name = "core-python-netns", - version = "1.0", - description = "Extension modules to support virtual nodes using " \ - "Linux network namespaces", - ext_modules = [netns, vcmd], - url = "http://www.nrl.navy.mil/itd/ncs/products/core", - author = "Boeing Research & Technology", - author_email = "core-dev@pf.itd.nrl.navy.mil", - license = "BSD", - long_description="Extension modules and utilities to support virtual " \ - "nodes using Linux network namespaces") +vcmd = Extension( + "vcmd", + sources=[ + "vcmdmodule.c", + "vnode_client.c", + "vnode_chnl.c", + "vnode_io.c", + "vnode_msg.c", + "vnode_cmd.c", + ], + library_dirs=["build/lib"], + libraries=["ev"] +) + +setup( + name="core-python-netns", + version="1.0", + description="Extension modules to support virtual nodes using " + "Linux network namespaces", + ext_modules=[ + netns, + vcmd + ], + url="http://www.nrl.navy.mil/itd/ncs/products/core", + author="Boeing Research & Technology", + author_email="core-dev@pf.itd.nrl.navy.mil", + license="BSD", + long_description="Extension modules and utilities to support virtual " + "nodes using Linux network namespaces" +) diff --git a/python-prefix.py b/python-prefix.py index c5ebdde8..20a50d75 100755 --- a/python-prefix.py +++ b/python-prefix.py @@ -1,26 +1,26 @@ #!/usr/bin/env python -import sys import os.path import site +import sys + def main(): - '''\ + """ Check if the given prefix is included in sys.path for the given python version; if not find an alternate valid prefix. Print the result to standard out. - ''' + """ if len(sys.argv) != 3: - msg = 'usage: %s \n' % \ - os.path.basename(sys.argv[0]) + msg = "usage: %s \n" % os.path.basename(sys.argv[0]) sys.stderr.write(msg) return 1 python_prefix = sys.argv[1] python_version = sys.argv[2] - path = '%s/lib/python%s' % (python_prefix, python_version) + path = "%s/lib/python%s" % (python_prefix, python_version) path = os.path.normpath(path) - if path[-1] != '/': - path = path + '/' + if path[-1] != "/": + path += "/" prefix = None for p in sys.path: if p.startswith(path): @@ -28,8 +28,9 @@ def main(): break if not prefix: prefix = site.PREFIXES[-1] - sys.stdout.write('%s\n' % prefix) + sys.stdout.write("%s\n" % prefix) return 0 -if __name__ == '__main__': + +if __name__ == "__main__": sys.exit(main())