Merge branch 'rel/5.1'

This commit is contained in:
Blake J. Harnden 2018-06-01 08:43:21 -07:00
commit f7e2bb73b9
303 changed files with 6907 additions and 35200 deletions

19
.gitignore vendored
View file

@ -4,6 +4,7 @@
.version
.version.date
Makefile
!kernel/**/Makefile
Makefile.in
aclocal.m4
autom4te.cache
@ -13,10 +14,12 @@ config.h.in
config.log
config.status
configure
core-*.tar.gz
debian
stamp-h1
# python build directory
dist
# intellij
*.iml
.idea
@ -26,3 +29,17 @@ stamp-h1
# ignore test coverage files
coverage.xml
# python files
*.egg-info
# ignore package files
*.rpm
*.deb
*.tar.gz
# pytest cache files
.cache
# ignore swap files
*.swp

View file

@ -1,3 +1,17 @@
2018-XX-XX CORE 5.1
* DAEMON:
- default nodes are now set in the node map
- moved ns3 and netns directories to the top of the repo
- changes to make use of fpm as the tool for building packages
- removed usage of logzero to avoid dependency issues for built packages
- removed daemon addons directory
* TEST:
- fixed some broken tests
* BUGFIXES:
- #142 - duplication of custom services
- #136 - sphinx-apidoc command not found
- #137 - make command fails when using distclean
2017-09-01 CORE 5.0
* DEVELOPMENT:
- support for editorconfig to help standardize development across IDEs, from the defined configuration file

View file

@ -1,4 +1,4 @@
Copyright (c) 2005-2017, the Boeing Company.
Copyright (c) 2005-2018, the Boeing Company.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

View file

@ -8,77 +8,189 @@
#
if WANT_DOCS
DOCS = doc daemon/doc
DOCS = doc
endif
if WANT_GUI
GUI = gui
GUI = gui
endif
if WANT_DAEMON
DAEMON = scripts daemon
DAEMON = scripts daemon
endif
if WANT_NETNS
NETNS = netns ns3
endif
# keep docs last due to dependencies on binaries
SUBDIRS = ${GUI} ${DAEMON} ${DOCS}
SUBDIRS = $(GUI) $(DAEMON) $(NETNS) $(DOCS)
ACLOCAL_AMFLAGS = -I config
ACLOCAL_AMFLAGS = -I config
# extra files to include with distribution tarball
EXTRA_DIST = bootstrap.sh LICENSE README-Xen Changelog kernel \
python-prefix.py revision.sh \
.version .version.date \
packaging/bsd \
packaging/deb/compat \
packaging/deb/copyright \
packaging/deb/changelog \
packaging/deb/rules \
packaging/deb/control \
packaging/deb/core-daemon.install.in \
packaging/deb/core-daemon.prerm.in \
packaging/deb/core-gui.install.in \
packaging/rpm/core.spec.in \
packaging/rpm/specfiles.sh
EXTRA_DIST = bootstrap.sh \
LICENSE \
README.md \
ASSIGNMENT_OF_COPYRIGHT.pdf \
Changelog \
.version \
.version.date
DISTCLEAN_TARGETS = aclocal.m4 config.h.in
DISTCLEAN_TARGETS = aclocal.m4 config.h.in
# extra cruft to remove
DISTCLEANFILES = aclocal.m4 config.h.in configure Makefile.in config/compile
DISTCLEANFILES = aclocal.m4 \
config.h.in \
configure \
Makefile.in \
config/compile
MAINTAINERCLEANFILES = \
.version \
.version.date
MAINTAINERCLEANFILES = .version \
.version.date
# don't include svn dirs in source tarball
dist-hook:
rm -rf `find $(distdir)/kernel -name .svn`
rm -rf $(distdir)/packaging/bsd/.svn
define fpm-python =
fpm -s python -t $1 \
-m "$(PACKAGE_MAINTAINERS)" \
--vendor "$(PACKAGE_VENDOR)" \
$2
endef
# build a source RPM
.PHONY: rpm
rpm: dist
mkdir -p $(HOME)/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
echo '%_topdir $(HOME)/rpmbuild' > ~/.rpmmacros
cp -afv core-@CORE_VERSION@.tar.gz ~/rpmbuild/SOURCES
cp -afv packaging/rpm/core.spec ~/rpmbuild/SPECS
rpmbuild -bs ~/rpmbuild/SPECS/core.spec
define fpm-gui =
fpm -s dir -t $1 -n core-gui \
-m "$(PACKAGE_MAINTAINERS)" \
--license "BSD" \
--description "Common Open Research Emulator GUI front-end" \
--url http://www.nrl.navy.mil/itd/ncs/products/core \
--vendor "$(PACKAGE_VENDOR)" \
-p core-gui_VERSION_ARCH.$1 \
-v $(PACKAGE_VERSION) \
-d "bash" \
-d "tcl" \
-d "tk" \
$2 \
-C $(DESTDIR)
endef
# build a Ubuntu deb package using CDBS
.PHONY: deb
deb:
rm -rf debian
mkdir -p debian
cp -vf packaging/deb/* debian/
@echo "First create source archive with: dpkg-source -b core-@CORE_VERSION@"
@echo "Then build with: pbuilder-dist precise i386 build core*.dsc"
define fpm-daemon-rpm =
fpm -s python -t rpm \
-p NAME_sysv_VERSION_ARCH.rpm \
--rpm-init scripts/core-daemon \
--python-install-bin $(bindir) \
--python-install-data $(prefix) \
--python-install-lib $(pythondir) \
-m "$(PACKAGE_MAINTAINERS)" \
--vendor "$(PACKAGE_VENDOR)" \
-d "procps-ng" \
-d "bash >= 3.0" \
-d "bridge-utils" \
-d "ebtables" \
-d "iproute" \
-d "libev" \
-d "net-tools" \
-d "python >= 2.7, python < 3.0" \
netns/setup.py daemon/setup.py
endef
.PHONY: core-restart
core-restart:
/etc/init.d/core-daemon stop
daemon/sbin/core-cleanup
rm -f /var/log/core-daemon.log
/etc/init.d/core-daemon start
define fpm-daemon-deb =
fpm -s python -t deb \
-p NAME_$1_VERSION_ARCH.deb \
--python-install-bin $(bindir) \
--python-install-data $(prefix) \
--python-install-lib $(pythondir) \
$2 $3 \
-m "$(PACKAGE_MAINTAINERS)" \
--vendor "$(PACKAGE_VENDOR)" \
-d "procps" \
-d "libc6 >= 2.14" \
-d "bash >= 3.0" \
-d "bridge-utils" \
-d "ebtables" \
-d "iproute2" \
-d "libev4" \
-d "python (>= 2.7), python (<< 3.0)" \
--deb-recommends quagga \
netns/setup.py daemon/setup.py
endef
.PHONY: fpm
fpm: clean-local-fpm
$(MAKE) -C gui install DESTDIR=$(DESTDIR)
$(call fpm-gui,rpm)
$(call fpm-gui,deb,-d "libtk-img")
$(call fpm-python,rpm,ns3/setup.py)
$(call fpm-python,deb,ns3/setup.py)
$(call fpm-daemon-rpm)
$(call fpm-daemon-deb,sysv,--deb-init,scripts/core-daemon)
$(call fpm-daemon-deb,systemd,--deb-systemd,scripts/core-daemon.service)
.PHONY: clean-local-fpm
clean-local-fpm:
-rm -rf *.deb
-rm -rf *.rpm
clean-local: clean-local-fpm
.version: Makefile
echo $(CORE_VERSION) > $@
echo $(PACKAGE_VERSION) > $@
.version.date: Makefile
echo $(CORE_VERSION_DATE) > $@
echo $(PACKAGE_DATE) > $@
define change-files =
$(info creating file $1 from $1.in)
@$(SED) -e 's,[@]sbindir[@],$(sbindir),g' \
-e 's,[@]bindir[@],$(bindir),g' \
-e 's,[@]pythondir[@],$(pythondir),g' \
-e 's,[@]PYTHON[@],$(PYTHON),g' \
-e 's,[@]PACKAGE_VERSION[@],$(PACKAGE_VERSION),g' \
-e 's,[@]PACKAGE_DATE[@],$(PACKAGE_DATE),g' \
-e 's,[@]CORE_LIB_DIR[@],$(CORE_LIB_DIR),g' \
-e 's,[@]CORE_STATE_DIR[@],$(CORE_STATE_DIR),g' \
-e 's,[@]CORE_DATA_DIR[@],$(CORE_DATA_DIR),g' \
-e 's,[@]CORE_CONF_DIR[@],$(CORE_CONF_DIR),g' \
-e 's,[@]CORE_GUI_CONF_DIR[@],$(CORE_GUI_CONF_DIR),g' \
-e 's,[@]brctl_path[@],$(brctl_path),g' \
-e 's,[@]sysctl_path[@],$(sysctl_path),g' \
-e 's,[@]ip_path[@],$(ip_path),g' \
-e 's,[@]tc_path[@],$(tc_path),g' \
-e 's,[@]ebtables_path[@],$(ebtables_path),g' \
-e 's,[@]mount_path[@],$(mount_path),g' \
-e 's,[@]umount_path[@],$(umount_path),g' \
-e 's,[@]ovs_vs_path[@],$(ovs_vs_path),g' \
-e 's,[@]ovs_of_path[@],$(ovs_of_path),g' \
< $1.in > $1
endef
all: change-files
.PHONY: change-files
change-files:
$(call change-files,gui/core-gui)
$(call change-files,scripts/core-daemon.service)
$(call change-files,scripts/core-daemon)
$(call change-files,daemon/core/constants.py)
CORE_DOC_HTML = core-html-$(PACKAGE_VERSION)
CORE_DOC_PDF = core-manual-$(PACKAGE_VERSION)
CORE_DOC_SRC = core-python-$(PACKAGE_VERSION)
.PHONY: doc
doc: doc-clean
$(MAKE) -C doc html
mv doc/_build/html doc/$(CORE_DOC_HTML)
tar -C doc -czf $(CORE_DOC_HTML).tgz $(CORE_DOC_HTML)
$(MAKE) -C doc latexpdf
mv doc/_build/latex/CORE.pdf $(CORE_DOC_PDF).pdf
$(MAKE) -C daemon/doc html
mv daemon/doc/_build/html daemon/doc/$(CORE_DOC_SRC)
tar -C daemon/doc -czf $(CORE_DOC_SRC).tgz $(CORE_DOC_SRC)
.PHONY: doc-clean
doc-clean:
-rm -rf doc/_build
-rm -rf doc/$(CORE_DOC_HTML)
-rm -rf daemon/doc/_build
-rm -rf daemon/doc/$(CORE_DOC_SRC)
-rm -f $(CORE_DOC_HTML).tgz
-rm -f $(CORE_DOC_SRC).tgz
-rm -f $(CORE_DOC_PDF).pdf

View file

@ -1,87 +0,0 @@
CORE Xen README
This file describes the xen branch of the CORE development tree which enables
machines based on Xen domUs. When you edit node types, you are given the option
of changing the machine type (netns, physical, or xen) and the profile for
each node type.
CORE will create each domU machine on the fly, having a bootable ISO image that
contains the root filesystem, and a special persitent area (/rtr/persistent)
using a LVM volume where configuration is stored. See the /etc/core/xen.conf
file for related settings here.
INSTALLATION
1. Tested under OpenSUSE 11.3 which allows installing a Xen dom0 during the
install process.
2. Create an LVM volume group having enough free space available for CORE to
build logical volumes for domU nodes. The name of this group is set with the
'vg_name=' option in /etc/core/xen.conf. (With 256M per persistent area,
10GB would allow for 40 nodes.)
3. To get libev-devel in OpenSUSE, use:
zypper ar http://download.opensuse.org/repositories/X11:/windowmanagers/openSUSE_11.3 WindowManagers
zypper install libev-devel
4. In addition to the normal CORE dependencies
(see http://code.google.com/p/coreemu/wiki/Quickstart), pyparted-3.2 is used
when creating LVM partitions and decorator-3.3.0 is a dependency for
pyparted. The 'python setup.py install' and 'make install' need to be
performed on these source tarballs as no packages are available.
tar xzf decorator-3.3.0.tar.gz
cd decorator-3.3.0
python setup.py build
python setup.py install
tar xzf pyparted-3.2.tar.gz
cd pyparted-3.2
./configure
make
make install
5. These Xen parameters were used for dom0, by editing /boot/grub/menu.lst:
a) Add options to "kernel /xen.gz" line:
gnttab_max_nr_frames=128 dom0_mem=1G dom0_max_vcpus=2 dom0_vcpus_pin
b) Make Xen default boot by editing the "default" line with the
index for the Xen boot option. e.g. change "default 0" to "default 2"
Reboot to enable the Xen kernel.
5. Run CORE's ./configure script as root to properly discover sbin binaries.
tar xzf core-xen.tgz
cd core-xen
./bootstrap.sh
./configure
make
make install
6. Put your ISO images in /opt/core-xen/iso-files and set the "iso_file="
xen.conf option appropriately.
7. Uncomment the controlnet entry in /etc/core/core.conf:
# establish a control backchannel for accessing nodes
controlnet = 172.16.0.0/24
This setting governs what IP addresses will be used for a control channel.
Given this default setting the host dom0 will have the address 172.16.0.254
assigned to a bridge device; domU VMs will get interfaces joined to this
bridge, having addresses such as 172.16.0.1 for node n1, 172.16.0.2 for n2,
etc.
When 'controlnet =' is unspecified in the core.conf file, double-clicking on
a node results in the 'xm console' method. A key mapping is set up so you
can press 'F1' then 'F2' to login as root. The key ctrl+']' detaches from the
console. Only one console is available per domU VM.
8. When 'controlnet =' is specified, double-clicking on a node results in an
attempt to ssh to that node's control IP address.
Put a host RSA key for use on the domUs in /opt/core-xen/ssh:
mkdir -p /opt/core-xen/ssh
ssh-keygen -t rsa -f /opt/core-xen/ssh/ssh_host_rsa_key
cp ~/.ssh/id_rsa.pub /opt/core-xen/ssh/authorized_keys
chmod 600 /opt/core-xen/ssh/authorized_keys

103
README.md Normal file
View file

@ -0,0 +1,103 @@
# CORE
CORE: Common Open Research Emulator
Copyright (c)2005-2018 the Boeing Company.
See the LICENSE file included in this distribution.
## About
The Common Open Research Emulator (CORE) is a tool for emulating
networks on one or more machines. You can connect these emulated
networks to live networks. CORE consists of a GUI for drawing
topologies of lightweight virtual machines, and Python modules for
scripting network emulation.
## Documentation and Examples
* Documentation hosted on GitHub
* http://coreemu.github.io/core/
* Basic Script Examples
* [Examples](daemon/examples/api)
* Custom Service Example
* [sample.py](daemon/examples/myservices/sample.py)
* Custom Emane Model Example
* [examplemodel.py](daemon/examples/myemane/examplemodel.py)
## Support
We are leveraging Discord for persistent chat rooms, voice chat, and
GitHub integration. This allows for more dynamic conversations and the
capability to respond faster. Feel free to join us at the link below.
https://discord.gg/AKd7kmP
You can also get help with questions, comments, or trouble, by using
the CORE mailing lists:
* [core-users](https://pf.itd.nrl.navy.mil/mailman/listinfo/core-users) for general comments and questions
* [core-dev](https://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev) for bugs, compile errors, and other development issues
## Building CORE
```shell
./bootstrap.sh
./configure
make
sudo make install
```
Building Documentation
----------------------
```shell
./bootstrap.sh
./configure
make doc
```
Building Packages
-----------------
Install fpm: http://fpm.readthedocs.io/en/latest/installing.html
Build package commands, DESTDIR is used for gui packaging only
```shell
./bootstrap.sh
./configure
make
mkdir /tmp/core-gui
make fpm DESTDIR=/tmp/core-gui
```
This will produce:
* CORE GUI rpm/deb files
* core-gui_$VERSION_$ARCH
* CORE ns3 rpm/deb files
* python-core-ns3_$VERSION_$ARCH
* CORE python rpm/deb files for SysV and systemd service types
* python-core-sysv_$VERSION_$ARCH
* python-core-systemd_$VERSION_$ARCH
Running CORE
------------
First start the CORE services:
```shell
# sysv
sudo service core-daemon start
# systemd
sudo systemctl start core-daemon
```
This automatically runs the core-daemon program.
Assuming the GUI is in your PATH, run the CORE GUI by typing the following:
```shell
core-gui
```
This launches the CORE GUI. You do not need to run the GUI as root.

View file

@ -1,97 +0,0 @@
====
CORE
====
CORE: Common Open Research Emulator
Copyright (c)2005-2017 the Boeing Company.
See the LICENSE file included in this distribution.
About
=====
CORE is a tool for emulating networks using a GUI or Python scripts. The CORE
project site (1) is a good source of introductory information, with a manual,
screenshots, and demos about this software. The GitHub project (2) hosts the
source repos, wiki, and bug tracker. There is a deprecated
Google Code page (3) with the old wiki, blog, bug tracker, and quickstart guide.
1. http://www.nrl.navy.mil/itd/ncs/products/core
2. https://github.com/coreemu/core
3. http://code.google.com/p/coreemu/
4. `Official Documentation`_
.. _Official Documentation: https://downloads.pf.itd.nrl.navy.mil/docs/core/core-html/index.html
Building CORE
=============
To build this software you should use:
./bootstrap.sh
./configure
make
sudo make install
Note: You may need to pass the proxy settings to sudo make install:
sudo make install HTTP_PROXY=<proxy>
Here is what is installed with 'make install':
/usr/local/bin/core-gui
/usr/local/sbin/core-daemon
/usr/local/sbin/[vcmd, vnoded, coresendmsg, core-cleanup.sh]
/usr/local/lib/core/*
/usr/local/share/core/*
/usr/local/lib/python2.6/dist-packages/core/*
/usr/local/lib/python2.6/dist-packages/[netns,vcmd].so
/etc/core/*
/etc/init.d/core
See the manual for the software required for building CORE.
Building Documentation
======================
Being able to build documentation depends on help2man being installed.
Once that has been done you can run the following commands:
./bootstrap.sh
./configure
make html
Running CORE
============
First start the CORE services:
sudo /etc/init.d/core-daemon start
This automatically runs the core-daemon program.
Assuming the GUI is in your PATH, run the CORE GUI by typing the following:
core-gui
This launches the CORE GUI. You do not need to run the GUI as root.
Support
=======
If you have questions, comments, or trouble, please use the CORE mailing lists:
- `core-users`_ for general comments and questions
- `core-dev`_ for bugs, compile errors, and other development issues
.. _core-users: https://pf.itd.nrl.navy.mil/mailman/listinfo/core-users
.. _core-dev: https://pf.itd.nrl.navy.mil/mailman/listinfo/core-dev

View file

@ -7,16 +7,17 @@
# Bootstrap the autoconf system.
#
if [ x$1 = x ]; then # PASS
# PASS
if [ x$1 = x ]; then
echo "Bootstrapping the autoconf system..."
# echo " These autotools programs should be installed for this script to work:"
# echo " aclocal, libtoolize, autoheader, automake, autoconf"
echo "(Messages below about copying and installing files are normal.)"
elif [ x$1 = xclean ]; then # clean - take out the trash
# clean - take out the trash
elif [ x$1 = xclean ]; then
echo "Cleaning up the autoconf mess..."
rm -rf autom4te.cache config BSDmakefile
rm -rf autom4te.cache config
exit 0;
else # help text
# help text
else
echo "usage: $0 [clean]"
echo -n " Use this script to bootstrap the autoconf build system prior to "
echo "running the "
@ -29,15 +30,6 @@ if ! [ -d "config" ]; then
mkdir config
fi
# on FreeBSD, discourage use of make
UNAME=`uname`
if [ x${UNAME} = xFreeBSD ]; then
echo "all:" > BSDmakefile
echo ' @echo "Please use GNU make instead by typing:"' >> BSDmakefile
echo ' @echo " gmake"' >> BSDmakefile
echo ' @echo ""' >> BSDmakefile
fi
# bootstrapping
echo "(1/4) Running aclocal..." && aclocal -I config \
&& echo "(2/4) Running autoheader..." && autoheader \

View file

@ -1,86 +1,37 @@
#
# Copyright (c) 2010-2013 the Boeing Company
# See the LICENSE file included in this distribution.
#
# CORE configure script
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
#
# this defines the CORE version number, must be static for AC_INIT
#
AC_INIT(core, m4_esyscmd_s([./revision.sh 5.0]), core-dev@nrl.navy.mil)
VERSION=$PACKAGE_VERSION
CORE_VERSION=$PACKAGE_VERSION
CORE_VERSION_DATE=m4_esyscmd_s([./revision.sh -d])
COREDPY_VERSION=$PACKAGE_VERSION
AC_INIT(core, 5.1, core-dev@nrl.navy.mil)
#
# autoconf and automake initialization
#
AC_CONFIG_SRCDIR([daemon/src/version.h.in])
AC_CONFIG_SRCDIR([netns/version.h.in])
AC_CONFIG_AUX_DIR(config)
AC_CONFIG_MACRO_DIR(config)
AC_CONFIG_HEADERS([config.h])
AM_INIT_AUTOMAKE([tar-ustar])
AC_SUBST(CORE_VERSION)
AC_SUBST(CORE_VERSION_DATE)
AC_SUBST(COREDPY_VERSION)
# define variables used for packaging and date display
PACKAGE_DATE=m4_esyscmd_s([date +%Y%m%d])
PACKAGE_VENDOR="CORE Developers"
PACKAGE_MAINTAINERS="$PACKAGE_VENDOR <$PACKAGE_BUGREPORT>"
#
# some of the following directory variables are not expanded at configure-time,
# so we have special checks to expand them
#
# core specific variables
CORE_LIB_DIR="\${prefix}/lib/core"
CORE_CONF_DIR="/etc/core"
CORE_DATA_DIR="\${datadir}/core"
CORE_STATE_DIR="/var"
# CORE GUI files in LIBDIR
# AC_PREFIX_DEFAULT is /usr/local, but not expanded yet
if test "x$prefix" = "xNONE" ; then
prefix="/usr/local"
fi
if test "x$exec_prefix" = "xNONE" ; then
exec_prefix="$prefix"
fi
if test "$libdir" = "\${exec_prefix}/lib" ; then
libdir="${exec_prefix}/lib"
fi
if test "$sbindir" = "\${exec_prefix}/sbin" ; then
sbindir="${exec_prefix}/sbin"
fi
if test "$bindir" = "\${exec_prefix}/bin" ; then
bindir="${exec_prefix}/bin"
fi
# this can be /usr/lib or /usr/lib64
LIB_DIR="${libdir}"
# don't let the Tcl files install to /usr/lib64/core
CORE_LIB_DIR="${prefix}/lib/core"
AC_SUBST(LIB_DIR)
AC_SUBST(PACKAGE_DATE)
AC_SUBST(PACKAGE_MAINTAINERS)
AC_SUBST(PACKAGE_VENDOR)
AC_SUBST(CORE_LIB_DIR)
SBINDIR="${sbindir}"
AC_SUBST(SBINDIR)
BINDIR="${bindir}"
AC_SUBST(BINDIR)
# CORE daemon configuration file (core.conf) in CORE_CONF_DIR
if test "$sysconfdir" = "\${prefix}/etc" ; then
sysconfdir="/etc"
CORE_CONF_DIR="/etc/core"
else
CORE_CONF_DIR="$sysconfdir/core"
fi
AC_SUBST(CORE_CONF_DIR)
if test "$datarootdir" = "\${prefix}/share" ; then
datarootdir="${prefix}/share"
fi
CORE_DATA_DIR="$datarootdir/core"
AC_SUBST(CORE_DATA_DIR)
AC_SUBST(CORE_STATE_DIR)
# CORE GUI configuration files and preferences in CORE_GUI_CONF_DIR
# scenario files in ~/.core/configs/
#AC_ARG_VAR(CORE_GUI_CONF_DIR, [GUI configuration directory.])
AC_ARG_WITH([guiconfdir],
[AS_HELP_STRING([--with-guiconfdir=dir],
[specify GUI configuration directory])],
@ -98,86 +49,27 @@ AC_ARG_ENABLE([daemon],
(default is yes)])],
[], [enable_daemon=yes])
AC_SUBST(enable_daemon)
if test "x$enable_daemon" = "xno"; then
want_python=no
want_bsd=no
want_linux_netns=no
fi
# CORE state files
if test "$localstatedir" = "\${prefix}/var" ; then
# use /var instead of /usr/local/var (/usr/local/var/log isn't standard)
CORE_STATE_DIR="/var"
else
CORE_STATE_DIR="$localstatedir"
fi
AC_SUBST(CORE_STATE_DIR)
SEARCHPATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin"
# default compiler flags
# _GNU_SOURCE is defined to get c99 defines for lrint()
CFLAGS="$CFLAGS -O3 -Werror -Wall -D_GNU_SOURCE"
# debug flags
#CFLAGS="$CFLAGS -g -Werror -Wall -D_GNU_SOURCE"
# Checks for programs.
# checks for programs
AC_PROG_AWK
AC_PROG_CC
AC_PROG_INSTALL
AC_PROG_MAKE_SET
AC_PROG_RANLIB
AC_PROG_SED
AM_PATH_PYTHON(2.6, want_python=yes, want_python=no)
SEARCHPATH="/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin"
#
# daemon dependencies
#
if test "x$enable_daemon" = "xyes" ; then
AC_CHECK_PROG(brctl_path, brctl, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(ebtables_path, ebtables, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(ifconfig_path, ifconfig, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(ngctl_path, ngctl, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(vimage_path, vimage, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH)
AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH)
fi
want_python=no
want_linux_netns=no
if test "x$enable_daemon" = "xyes"; then
want_python=yes
want_linux_netns=yes
#AC_CHECK_PROG(dia, dia, yes, no)
AC_CHECK_PROG(help2man, help2man, yes, no, $SEARCHPATH)
if test "x$convert" = "xno" ; then
AC_MSG_WARN([Could not locate ImageMagick convert.])
#want_docs_missing="convert"
fi
#if test "x$dia" = "xno" ; then
# AC_MSG_WARN([Could not locate dia.])
# want_docs_missing="dia"
#fi
if test "x$help2man" = "xno" ; then
AC_MSG_WARN([Could not locate help2man.])
want_docs_missing="$want_docs_missing help2man"
fi
if test "x$want_docs_missing" = "x" ; then
want_docs=yes
else
AC_MSG_WARN([Could not find required helper utilities (${want_docs_missing}) so the CORE documentation will not be built.])
want_docs=no
fi
#AC_PATH_PROGS(tcl_path, [tclsh tclsh8.5 tclsh8.4], no)
#if test "x$tcl_path" = "xno" ; then
# AC_MSG_ERROR([Could not locate tclsh. Please install Tcl/Tk.])
#fi
#AC_PATH_PROGS(wish_path, [wish wish8.5 wish8.4], no)
#if test "x$wish_path" = "xno" ; then
# AC_MSG_ERROR([Could not locate wish. Please install Tcl/Tk.])
#fi
if test "x$enable_daemon" = "xyes" ; then
# Checks for libraries.
AC_CHECK_LIB([netgraph], [NgMkSockNode])
@ -198,92 +90,93 @@ if test "x$enable_daemon" = "xyes" ; then
AC_FUNC_MALLOC
AC_FUNC_REALLOC
AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname])
AM_PATH_PYTHON(2.7)
AC_CHECK_PROG(brctl_path, brctl, $as_dir, no, $SEARCHPATH)
if test "x$brctl_path" = "xno" ; then
AC_MSG_ERROR([Could not locate brctl (from bridge-utils package).])
fi
AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(ebtables_path, ebtables, $as_dir, no, $SEARCHPATH)
if test "x$ebtables_path" = "xno" ; then
AC_MSG_ERROR([Could not locate ebtables (from ebtables package).])
fi
AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH)
if test "x$ip_path" = "xno" ; then
AC_MSG_ERROR([Could not locate ip (from iproute package).])
fi
AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH)
if test "x$tc_path" = "xno" ; then
AC_MSG_ERROR([Could not locate tc (from iproute package).])
fi
AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH)
AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH)
if test "x$convert" = "xno" ; then
AC_MSG_WARN([Could not locate ImageMagick convert.])
fi
AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH)
if test "x$ovs_vs_path" = "xno" ; then
AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS nodes])
fi
AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH)
if test "x$ovs_of_path" = "xno" ; then
AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS nodes])
fi
CFLAGS_save=$CFLAGS
CPPFLAGS_save=$CPPFLAGS
if test "x$PYTHON_INCLUDE_DIR" = "x"; then
PYTHON_INCLUDE_DIR=`$PYTHON -c "import distutils.sysconfig; print distutils.sysconfig.get_python_inc()"`
fi
CFLAGS="-I$PYTHON_INCLUDE_DIR"
CPPFLAGS="-I$PYTHON_INCLUDE_DIR"
AC_CHECK_HEADERS([Python.h], [],
AC_MSG_ERROR([Python bindings require Python development headers (try installing your 'python-devel' or 'python-dev' package)]))
CFLAGS=$CFLAGS_save
CPPFLAGS=$CPPFLAGS_save
PKG_CHECK_MODULES(libev, libev,
AC_MSG_RESULT([found libev using pkgconfig OK])
AC_SUBST(libev_CFLAGS)
AC_SUBST(libev_LIBS),
AC_MSG_RESULT([did not find libev using pkconfig...])
AC_CHECK_LIB([ev], ev_set_allocator,
AC_MSG_RESULT([found libev OK])
AC_SUBST(libev_CFLAGS)
AC_SUBST(libev_LIBS, [-lev]),
AC_MSG_ERROR([Python bindings require libev (try installing your 'libev-devel' or 'libev-dev' package)])))
fi
# Host-specific detection
want_linux_netns=no
want_bsd=no
if test `uname -s` = "FreeBSD"; then
want_bsd=yes
AC_CHECK_PROGS(gmake)
# FreeBSD fix for linking libev port below
CFLAGS="$CFLAGS -L/usr/local/lib"
AC_CHECK_PROG(help2man, help2man, yes, no, $SEARCHPATH)
if test "x$help2man" = "xno" ; then
AC_MSG_WARN([Could not locate help2man.])
want_docs_missing="$want_docs_missing help2man"
fi
if test "x$want_docs_missing" = "x" ; then
want_docs=yes
else
want_linux_netns=yes
fi
if test "x$want_python" = "xno"; then
want_bsd=no
want_linux_netns=no
AC_MSG_WARN([Could not find required helper utilities (${want_docs_missing}) so the CORE documentation will not be built.])
want_docs=no
fi
if test "x$want_python" = "xyes"; then
if test "x$want_linux_netns" = "xyes"; then
CFLAGS_save=$CFLAGS
CPPFLAGS_save=$CPPFLAGS
if test "x$PYTHON_INCLUDE_DIR" = "x"; then
PYTHON_INCLUDE_DIR=`$PYTHON -c "import distutils.sysconfig; print distutils.sysconfig.get_python_inc()"`
fi
CFLAGS="-I$PYTHON_INCLUDE_DIR"
CPPFLAGS="-I$PYTHON_INCLUDE_DIR"
AC_CHECK_HEADERS([Python.h], [],
AC_MSG_ERROR([Python bindings require Python development headers (try installing your 'python-devel' or 'python-dev' package)]))
CFLAGS=$CFLAGS_save
CPPFLAGS=$CPPFLAGS_save
PKG_CHECK_MODULES(libev, libev,
AC_MSG_RESULT([found libev using pkgconfig OK])
AC_SUBST(libev_CFLAGS)
AC_SUBST(libev_LIBS),
AC_MSG_RESULT([did not find libev using pkconfig...])
AC_CHECK_LIB([ev], ev_set_allocator,
AC_MSG_RESULT([found libev OK])
AC_SUBST(libev_CFLAGS)
AC_SUBST(libev_LIBS, [-lev]),
AC_MSG_ERROR([Python bindings require libev (try installing your 'libev-devel' or 'libev-dev' package)])))
fi
AC_SUBST(pyprefix, `eval ${PYTHON} ./python-prefix.py ${PYTHON_PREFIX} ${PYTHON_VERSION}`)
if test "${pyprefix}" != "${PYTHON_PREFIX}"; then
pythondir=`echo ${pythondir} | sed -e 's,[$][{]prefix[}],${pyprefix},g'`
pyexecdir=`echo ${pyexecdir} | sed -e 's,[$][{]exec_prefix[}],${pyprefix},g'`
fi
else
# Namespace support requires Python support
want_linux_netns=no
# check for sphinx required during make
AC_CHECK_PROG(sphinxapi_path, sphinx-apidoc, $as_dir, no, $SEARCHPATH)
if test "x$sphinxapi_path" = "xno" ; then
AC_MSG_ERROR(["Could not location sphinx-apidoc, from the python-sphinx package"])
fi
progs_missing=""
if test "x$want_linux_netns" = "xyes"; then
if test "x$brctl_path" = "xno" ; then
progs_missing="${progs_missing}brctl "
brctl_path="/usr/sbin"
AC_MSG_ERROR([Could not locate brctl (from bridge-utils package).])
fi
if test "x$ebtables_path" = "xno" ; then
progs_missing="${progs_missing}ebtables "
ebtables_path="/sbin"
AC_MSG_ERROR([Could not locate ebtables (from ebtables package).])
fi
if test "x$ip_path" = "xno" ; then
progs_missing="${progs_missing}ip "
ip_path="/sbin"
AC_MSG_ERROR([Could not locate ip (from iproute package).])
fi
if test "x$tc_path" = "xno" ; then
progs_missing="${progs_missing}tc "
tc_path="/sbin"
AC_MSG_ERROR([Could not locate tc (from iproute package).])
fi
fi
if test "x$want_bsd" = "xyes"; then
if test "x$ifconfig_path" = "xno" ; then
AC_MSG_ERROR([Could not locate the 'ifconfig' utility.])
fi
if test "x$ngctl_path" = "xno" ; then
AC_MSG_ERROR([Could not locate the 'ngctl' utility.])
fi
if test "x$vimage_path" = "xno" ; then
AC_MSG_ERROR([Could not locate the 'vimage' utility.])
fi
fi
#AC_PATH_PROGS(tcl_path, [tclsh tclsh8.5 tclsh8.4], no)
#if test "x$tcl_path" = "xno" ; then
# AC_MSG_ERROR([Could not locate tclsh. Please install Tcl/Tk.])
#fi
#AC_PATH_PROGS(wish_path, [wish wish8.5 wish8.4], no)
#if test "x$wish_path" = "xno" ; then
# AC_MSG_ERROR([Could not locate wish. Please install Tcl/Tk.])
#fi
AC_ARG_WITH([startup],
[AS_HELP_STRING([--with-startup=option],
@ -296,14 +189,11 @@ AC_MSG_RESULT([using startup option $with_startup])
# Variable substitutions
AM_CONDITIONAL(WANT_GUI, test x$enable_gui = xyes)
AM_CONDITIONAL(WANT_DAEMON, test x$enable_daemon = xyes)
AM_CONDITIONAL(WANT_BSD, test x$want_bsd = xyes)
AM_CONDITIONAL(WANT_DOCS, test x$want_docs = xyes)
AM_CONDITIONAL(WANT_PYTHON, test x$want_python = xyes)
AM_CONDITIONAL(WANT_NETNS, test x$want_linux_netns = xyes)
AM_CONDITIONAL(WANT_INITD, test x$with_startup = xinitd)
AM_CONDITIONAL(WANT_SYSTEMD, test x$with_startup = xsystemd)
AM_CONDITIONAL(WANT_SUSE, test x$with_startup = xsuse)
if test $cross_compiling = no; then
AM_MISSING_PROG(HELP2MAN, help2man)
@ -311,33 +201,23 @@ else
HELP2MAN=:
fi
# Output files
AC_CONFIG_FILES([Makefile
gui/core-gui
gui/version.tcl
gui/Makefile
gui/icons/Makefile
scripts/Makefile
scripts/core-daemon.service
scripts/perf/Makefile
scripts/xen/Makefile
doc/Makefile
doc/conf.py
doc/man/Makefile
doc/figures/Makefile
daemon/Makefile
daemon/src/Makefile
daemon/src/version.h
daemon/core/constants.py
daemon/ns3/Makefile
daemon/ns3/corens3/constants.py
daemon/doc/Makefile
daemon/doc/conf.py
packaging/deb/core-daemon.install
packaging/deb/core-daemon.prerm
packaging/deb/core-gui.install
packaging/rpm/core.spec],)
netns/Makefile
netns/version.h
ns3/Makefile],)
AC_OUTPUT
# Summary text
@ -345,36 +225,33 @@ echo \
"------------------------------------------------------------------------
${PACKAGE_STRING} Configuration:
Build:
Host System Type: ${host}
C Compiler and flags: ${CC} ${CFLAGS}
Install prefix: ${prefix}
Build GUI: ${enable_gui}
Prefix: ${prefix}
Exec Prefix: ${exec_prefix}
GUI:
GUI path: ${CORE_LIB_DIR}
GUI config: ${CORE_GUI_CONF_DIR}
Daemon path: ${SBINDIR}
Daemon:
Daemon path: ${bindir}
Daemon config: ${CORE_CONF_DIR}
Python install prefix: ${pyprefix}
Python modules: ${pythondir}
Logs: ${CORE_STATE_DIR}/log
Startup: ${with_startup}
Features to build:
Python bindings: ${want_python}
Linux Namespaces emulation: ${want_linux_netns}
FreeBSD Jails emulation: ${want_bsd}
Build GUI: ${enable_gui}
Build Daemon: ${enable_daemon}
Documentation: ${want_docs}
------------------------------------------------------------------------"
if test "x${want_bsd}" = "xyes" ; then
# TODO: more sophisticated checks of gmake vs make
echo ">>> NOTE: on FreeBSD you should use 'gmake' instead of 'make'
------------------------------------------------------------------------"
fi
if test "x${want_linux_netns}" = "xyes" ; then
echo "On this platform you should run core-gui as a normal user.
------------------------------------------------------------------------"
fi
if test "x${progs_missing}" != "x" ; then
echo ">>> NOTE: the following programs could not be found:"
echo " $progs_missing
------------------------------------------------------------------------"
fi

View file

@ -1,223 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Project SYSTEM "Project-4.6.dtd">
<!-- eric4 project file for project CORE -->
<!-- Saved: 2013-08-06, 13:58:14 -->
<!-- Copyright (C) 2013 , -->
<Project version="4.6">
<Language>en</Language>
<ProgLanguage mixed="0">Python</ProgLanguage>
<ProjectType>Console</ProjectType>
<Description></Description>
<Version>4.0</Version>
<Author></Author>
<Email></Email>
<Sources>
<Source>setup.py</Source>
<Source>examples/netns/switchtest.py</Source>
<Source>examples/netns/ospfmanetmdrtest.py</Source>
<Source>examples/netns/switch.py</Source>
<Source>examples/netns/wlantest.py</Source>
<Source>examples/stopsession.py</Source>
<Source>src/setup.py</Source>
<Source>core/emane/__init__.py</Source>
<Source>core/emane/emane.py</Source>
<Source>core/emane/ieee80211abg.py</Source>
<Source>core/emane/rfpipe.py</Source>
<Source>core/emane/nodes.py</Source>
<Source>core/netns/vif.py</Source>
<Source>core/netns/vnet.py</Source>
<Source>core/netns/__init__.py</Source>
<Source>core/netns/vnode.py</Source>
<Source>core/netns/vnodeclient.py</Source>
<Source>core/netns/nodes.py</Source>
<Source>core/service.py</Source>
<Source>core/__init__.py</Source>
<Source>core/addons/__init__.py</Source>
<Source>core/broker.py</Source>
<Source>core/services/__init__.py</Source>
<Source>core/services/quagga.py</Source>
<Source>core/misc/LatLongUTMconversion.py</Source>
<Source>core/misc/__init__.py</Source>
<Source>core/misc/ipaddr.py</Source>
<Source>core/misc/quagga.py</Source>
<Source>core/misc/utils.py</Source>
<Source>core/pycore.py</Source>
<Source>core/coreobj.py</Source>
<Source>core/location.py</Source>
<Source>core/session.py</Source>
<Source>core/api/__init__.py</Source>
<Source>core/api/data.py</Source>
<Source>core/api/coreapi.py</Source>
<Source>core/services/nrl.py</Source>
<Source>core/services/utility.py</Source>
<Source>core/bsd/netgraph.py</Source>
<Source>core/bsd/__init__.py</Source>
<Source>core/bsd/nodes.py</Source>
<Source>core/bsd/vnet.py</Source>
<Source>core/bsd/vnode.py</Source>
<Source>core/xen/xen.py</Source>
<Source>core/xen/xenconfig.py</Source>
<Source>core/xen/__init__.py</Source>
<Source>examples/myservices/sample.py</Source>
<Source>examples/myservices/__init__.py</Source>
<Source>core/services/security.py</Source>
<Source>core/emane/universal.py</Source>
<Source>examples/netns/wlanemanetests.py</Source>
<Source>core/services/xorp.py</Source>
<Source>core/misc/xmlutils.py</Source>
<Source>core/mobility.py</Source>
<Source>core/phys/pnodes.py</Source>
<Source>core/phys/__init__.py</Source>
<Source>ns3/setup.py</Source>
<Source>ns3/corens3/__init__.py</Source>
<Source>ns3/corens3/constants.py</Source>
<Source>ns3/corens3/obj.py</Source>
<Source>ns3/examples/ns3wifi.py</Source>
<Source>ns3/examples/ns3lte.py</Source>
<Source>ns3/examples/ns3wimax.py</Source>
<Source>core/emane/commeffect.py</Source>
<Source>core/services/ucarp.py</Source>
<Source>core/emane/bypass.py</Source>
<Source>core/conf.py</Source>
<Source>core/misc/event.py</Source>
<Source>core/sdt.py</Source>
<Source>core/services/bird.py</Source>
<Source>examples/netns/basicrange.py</Source>
<Source>examples/netns/howmanynodes.py</Source>
<Source>sbin/core-daemon</Source>
<Source>sbin/coresendmsg</Source>
<Source>sbin/core-cleanup</Source>
<Source>sbin/core-xen-cleanup</Source>
<Source>ns3/examples/ns3wifirandomwalk.py</Source>
<Source>core/misc/utm.py</Source>
</Sources>
<Forms>
</Forms>
<Translations>
</Translations>
<Resources>
</Resources>
<Interfaces>
</Interfaces>
<Others>
</Others>
<Vcs>
<VcsType>Subversion</VcsType>
<VcsOptions>
<dict>
<key>
<string>add</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>checkout</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>commit</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>diff</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>export</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>global</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>history</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>log</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>remove</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>status</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>tag</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
<key>
<string>update</string>
</key>
<value>
<list>
<string></string>
</list>
</value>
</dict>
</VcsOptions>
<VcsOtherData>
<dict>
<key>
<string>standardLayout</string>
</key>
<value>
<bool>True</bool>
</value>
</dict>
</VcsOtherData>
</Vcs>
<FiletypeAssociations>
<FiletypeAssociation pattern="*.pyw" type="SOURCES" />
<FiletypeAssociation pattern="*.idl" type="INTERFACES" />
<FiletypeAssociation pattern="*.py" type="SOURCES" />
<FiletypeAssociation pattern="*.ptl" type="SOURCES" />
</FiletypeAssociations>
</Project>

View file

@ -1,4 +0,0 @@
recursive-include sbin *.sh *.py
include data/core.conf
recursive-include examples/netns *.py *.sh
recursive-exclude examples/netns *.pyc *.pyo

View file

@ -7,76 +7,15 @@
# Makefile for building netns components.
#
SETUPPY = setup.py
SETUPPYFLAGS = -v
SETUPPY = setup.py
SETUPPYFLAGS = -v
if WANT_NETNS
SUBDIRS = src ns3
if WANT_DOCS
SUBDIRS = doc
endif
SBIN_FILES = \
sbin/core-cleanup \
sbin/core-daemon \
sbin/core-manage \
sbin/core-xen-cleanup \
sbin/coresendmsg
dist_sbin_SCRIPTS = $(SBIN_FILES)
CONF_FILES = \
data/core.conf \
data/xen.conf
coreconfdir = $(CORE_CONF_DIR)
dist_coreconf_DATA = $(CONF_FILES)
EXAMPLE_FILES = \
examples/controlnet_updown \
examples/emanemanifest2core.py \
examples/emanemodel2core.py \
examples/findcore.py \
examples/stopsession.py
coreexdir = $(datadir)/core/examples
dist_coreex_SCRIPTS = $(EXAMPLE_FILES)
EXAMPLE_MYSERVICES_FILES = \
examples/myservices/README.txt \
examples/myservices/__init__.py \
examples/myservices/sample.py
coreexmyservicesdir = $(coreexdir)/myservices
dist_coreexmyservices_DATA = $(EXAMPLE_MYSERVICES_FILES)
EXAMPLE_NETNS_FILES = \
examples/netns/basicrange.py \
examples/netns/daemonnodes.py \
examples/netns/distributed.py \
examples/netns/emane80211.py \
examples/netns/howmanynodes.py \
examples/netns/iperf-performance-chain.py \
examples/netns/iperf-performance.sh \
examples/netns/ospfmanetmdrtest.py \
examples/netns/switch.py \
examples/netns/switchtest.py \
examples/netns/twonodes.sh \
examples/netns/wlanemanetests.py \
examples/netns/wlantest.py
coreexnetnsdir = $(coreexdir)/netns
dist_coreexnetns_SCRIPTS= $(EXAMPLE_NETNS_FILES)
EXAMPLE_SERVICES_FILES = \
examples/services/sampleFirewall \
examples/services/sampleIPsec \
examples/services/sampleVPNClient \
examples/services/sampleVPNServer
coreexservicesdir = $(coreexdir)/services
dist_coreexservices_DATA= $(EXAMPLE_SERVICES_FILES)
LOGROTATE_DIR = $(sysconfdir)/logrotate.d
LOGROTATE_FILE = data/core-daemon.logrotate
SCRIPT_FILES := $(notdir $(wildcard scripts/*))
MAN_FILES := $(notdir $(wildcard ../doc/man/*.1))
# Python package build
noinst_SCRIPTS = build
@ -85,48 +24,44 @@ build:
# Python package install
install-exec-hook:
$(MKDIR_P) ${DESTDIR}/${pythondir}
$(MKDIR_P) ${DESTDIR}/${pyexecdir}
PYTHONPATH=${DESTDIR}/${pythondir} $(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) install \
--prefix=${DESTDIR}/${pyprefix} \
--install-purelib=${DESTDIR}/${pythondir} \
--install-platlib=${DESTDIR}/${pyexecdir}
install-data-local:
$(MKDIR_P) $(DESTDIR)$(LOGROTATE_DIR)
$(INSTALL_DATA) $(LOGROTATE_FILE) \
$(DESTDIR)$(LOGROTATE_DIR)/`basename $(LOGROTATE_FILE) .logrotate`
uninstall-local:
rm -f $(DESTDIR)$(LOGROTATE_DIR)/`basename $(LOGROTATE_FILE) .logrotate`
$(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) install \
--root=/$(DESTDIR) \
--prefix=$(prefix) \
--install-lib=$(pythondir) \
--single-version-externally-managed
# Python package uninstall
uninstall-hook:
rm -rf ${pythondir}/core_python-${COREDPY_VERSION}-py${PYTHON_VERSION}.egg-info
rm -f ${pythondir}/core_python_netns-1.0-py${PYTHON_VERSION}.egg-info
rm -rf ${pythondir}/core
rmdir -p $(coreexservicesdir) || true
rmdir -p $(coreexnetnsdir) || true
rmdir -p $(coreexmyservicesdir) || true
rmdir -p $(coreexdir) || true
rmdir -p $(coreconfdir) || true
rmdir -p $(LOGROTATE_DIR) || true
rm -rf $(DESTDIR)/etc/core
rm -rf $(DESTDIR)/$(datadir)/core
rm -f $(addprefix $(DESTDIR)/$(datarootdir)/man/man1/, $(MAN_FILES))
rm -f $(addprefix $(DESTDIR)/$(bindir)/,$(SCRIPT_FILES))
rm -rf $(DESTDIR)/$(pythondir)/core-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info
rm -rf $(DESTDIR)/$(pythondir)/core
# Python package cleanup
clean-local:
-rm -rf build
# Python RPM package
rpm:
$(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) bdist_rpm
# because we include entire directories with EXTRA_DIST, we need to clean up
# the source control files
dist-hook:
rm -rf `find $(distdir)/ -name .svn` `find $(distdir)/ -name '*.pyc'`
-rm -rf `find $(distdir)/ -name '*.pyc'`
DISTCLEANFILES = Makefile.in core/*.pyc MANIFEST doc/Makefile.in doc/Makefile \
doc/conf.py core/addons/*.pyc
distclean-local:
-rm -rf core.egg-info
DISTCLEANFILES = Makefile.in
# files to include with distribution tarball
EXTRA_DIST = $(SETUPPY) MANIFEST.in CORE.e4p core doc $(LOGROTATE_FILE)
EXTRA_DIST = $(SETUPPY) \
core \
data \
doc/conf.py.in \
examples \
scripts \
tests \
test.py \
setup.cfg \
requirements.txt

View file

@ -1,9 +1,24 @@
import logzero
import json
import logging
import logging.config
import os
import subprocess
# configure custom format with function name
_format_template = "%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(funcName)s:" \
"%(lineno)d]%(end_color)s %(message)s"
_formatter = logzero.LogFormatter(fmt=_format_template)
logzero.formatter(_formatter)
from core import constants
logger = logzero.logger
# setup logging
log_config_path = os.path.join(constants.CORE_CONF_DIR, "logging.conf")
with open(log_config_path, "r") as log_config_file:
log_config = json.load(log_config_file)
logging.config.dictConfig(log_config)
logger = logging.getLogger()
class CoreCommandError(subprocess.CalledProcessError):
"""
Used when encountering internal CORE command errors.
"""
def __str__(self):
return "Command(%s), Status(%s):\n%s" % (self.cmd, self.returncode, self.output)

View file

@ -1,6 +0,0 @@
"""
Optional add ons can be put in this directory. Everything listed in __all__ is automatically
loaded by the main core module.
"""
__all__ = []

View file

@ -10,7 +10,6 @@ import struct
from enum import Enum
from core import logger
from core.enumerations import ConfigTlvs
from core.enumerations import EventTlvs
from core.enumerations import EventTypes
@ -275,7 +274,6 @@ class CoreTlvDataIpv4Addr(CoreTlvDataObj):
:return: Ipv4 address
:rtype: core.misc.ipaddress.IpAddress
"""
logger.info("getting new ipv4 address for: %s", value)
return IpAddress(af=socket.AF_INET, address=value)

View file

@ -142,14 +142,13 @@ class CoreBroker(ConfigurableManager):
Close all active sockets; called when the session enters the
data collect state
"""
self.reset()
with self.servers_lock:
while len(self.servers) > 0:
name, server = self.servers.popitem()
if server.sock is not None:
logger.info("closing connection with %s @ %s:%s" %
(name, server.host, server.port))
logger.info("closing connection with %s: %s:%s", name, server.host, server.port)
server.close()
self.reset()
self.dorecvloop = False
if self.recvthread is not None:
self.recvthread.join()
@ -158,7 +157,7 @@ class CoreBroker(ConfigurableManager):
"""
Reset to initial state.
"""
logger.info("broker reset")
logger.info("clearing state")
self.nodemap_lock.acquire()
self.nodemap.clear()
for server, count in self.nodecounts.iteritems():
@ -214,8 +213,7 @@ class CoreBroker(ConfigurableManager):
continue
rcvlen = self.recv(server)
if rcvlen == 0:
logger.info("connection with %s @ %s:%s has closed" % (
server.name, server.host, server.port))
logger.info("connection with server(%s) closed: %s:%s", server.name, server.host, server.port)
def recv(self, server):
"""
@ -236,18 +234,18 @@ class CoreBroker(ConfigurableManager):
return 0
if len(msghdr) != coreapi.CoreMessage.header_len:
logger.info("warning: broker received not enough data len=%s" % len(msghdr))
logger.warn("warning: broker received not enough data len=%s", len(msghdr))
return len(msghdr)
msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr)
msgdata = server.sock.recv(msglen)
data = msghdr + msgdata
count = None
logger.info("received message type: %s", MessageTypes(msgtype))
logger.debug("received message type: %s", MessageTypes(msgtype))
# snoop exec response for remote interactive TTYs
if msgtype == MessageTypes.EXECUTE.value and msgflags & MessageFlags.TTY.value:
data = self.fixupremotetty(msghdr, msgdata, server.host)
logger.info("created remote tty message: %s", data)
logger.debug("created remote tty message: %s", data)
elif msgtype == MessageTypes.NODE.value:
# snoop node delete response to decrement node counts
if msgflags & MessageFlags.DELETE.value:
@ -293,22 +291,21 @@ class CoreBroker(ConfigurableManager):
with self.servers_lock:
server = self.servers.get(name)
if server is not None:
if host == server.host and port == server.port and \
server.sock is not None:
if host == server.host and port == server.port and server.sock is not None:
# leave this socket connected
return
logger.info("closing connection with %s @ %s:%s" % (name, server.host, server.port))
logger.info("closing connection with %s @ %s:%s", name, server.host, server.port)
server.close()
del self.servers[name]
logger.info("adding server: %s @ %s:%s" % (name, host, port))
logger.info("adding broker server(%s): %s:%s", name, host, port)
server = CoreDistributedServer(name, host, port)
if host is not None and port is not None:
try:
server.connect()
except IOError:
logger.exception("error connecting to server %s:%s" % (host, port))
logger.exception("error connecting to server(%s): %s:%s", name, host, port)
if server.sock is not None:
self.startrecvloop()
self.servers[name] = server
@ -328,7 +325,7 @@ class CoreBroker(ConfigurableManager):
logger.exception("error deleting server")
if server.sock is not None:
logger.info("closing connection with %s @ %s:%s" % (server.name, server.host, server.port))
logger.info("closing connection with %s @ %s:%s", server.name, server.host, server.port)
server.close()
def getserverbyname(self, name):
@ -412,7 +409,7 @@ class CoreBroker(ConfigurableManager):
remotenum = n2num
if key in self.tunnels.keys():
logger.warn("tunnel with key %s (%s-%s) already exists!" % (key, n1num, n2num))
logger.warn("tunnel with key %s (%s-%s) already exists!", key, n1num, n2num)
else:
objid = key & ((1 << 16) - 1)
logger.info("adding tunnel for %s-%s to %s with key %s", n1num, n2num, remoteip, key)
@ -432,7 +429,7 @@ class CoreBroker(ConfigurableManager):
Add GreTaps between network devices on different machines.
The GreTapBridge is not used since that would add an extra bridge.
"""
logger.info("adding network tunnels for nodes: %s", self.network_nodes)
logger.debug("adding network tunnels for nodes: %s", self.network_nodes)
for n in self.network_nodes:
self.addnettunnel(n)
@ -494,7 +491,7 @@ class CoreBroker(ConfigurableManager):
gt = self.tunnels[key]
r.append(gt)
continue
logger.info("adding tunnel for net %s to %s with key %s" % (node_id, host, key))
logger.info("adding tunnel for net %s to %s with key %s", node_id, host, key)
gt = GreTap(node=None, name=None, session=self.session, remoteip=host, key=key)
self.tunnels[key] = gt
r.append(gt)
@ -531,7 +528,7 @@ class CoreBroker(ConfigurableManager):
:return: gre tap between nodes or none
"""
key = self.tunnelkey(n1num, n2num)
logger.info("checking for tunnel(%s) in: %s", key, self.tunnels.keys())
logger.debug("checking for tunnel(%s) in: %s", key, self.tunnels.keys())
if key in self.tunnels.keys():
return self.tunnels[key]
else:
@ -698,8 +695,7 @@ class CoreBroker(ConfigurableManager):
elif message.message_type == MessageTypes.CONFIG.value:
# broadcast location and services configuration everywhere
confobj = message.get_tlv(ConfigTlvs.OBJECT.value)
if confobj == "location" or confobj == "services" or \
confobj == "session" or confobj == "all":
if confobj == "location" or confobj == "services" or confobj == "session" or confobj == "all":
servers = self.getservers()
elif message.message_type == MessageTypes.FILE.value:
# broadcast hook scripts and custom service files everywhere
@ -709,8 +705,6 @@ class CoreBroker(ConfigurableManager):
if message.message_type == MessageTypes.LINK.value:
# prepare a server list from two node numbers in link message
handle_locally, servers, message = self.handlelinkmsg(message)
logger.info("broker handle link message: %s - %s", handle_locally,
map(lambda x: "%s:%s" % (x.host, x.port), servers))
elif len(servers) == 0:
# check for servers based on node numbers in all messages but link
nn = message.node_numbers()
@ -737,10 +731,10 @@ class CoreBroker(ConfigurableManager):
"""
server = self.getserverbyname(servername)
if server is None:
logger.warn("ignoring unknown server: %s" % servername)
logger.warn("ignoring unknown server: %s", servername)
return
if server.sock is None or server.host is None or server.port is None:
logger.info("ignoring disconnected server: %s" % servername)
logger.info("ignoring disconnected server: %s", servername)
return
# communicate this session"s current state to the server
@ -813,10 +807,10 @@ class CoreBroker(ConfigurableManager):
try:
nodecls = nodeutils.get_node_class(NodeTypes(nodetype))
except KeyError:
logger.warn("broker invalid node type %s" % nodetype)
logger.warn("broker invalid node type %s", nodetype)
return handle_locally, servers
if nodecls is None:
logger.warn("broker unimplemented node type %s" % nodetype)
logger.warn("broker unimplemented node type %s", nodetype)
return handle_locally, servers
if issubclass(nodecls, PyCoreNet) and nodetype != NodeTypes.WIRELESS_LAN.value:
# network node replicated on all servers; could be optimized
@ -868,7 +862,7 @@ class CoreBroker(ConfigurableManager):
# determine link message destination using non-network nodes
nn = message.node_numbers()
logger.info("checking link nodes (%s) with network nodes (%s)", nn, self.network_nodes)
logger.debug("checking link nodes (%s) with network nodes (%s)", nn, self.network_nodes)
if nn[0] in self.network_nodes:
if nn[1] in self.network_nodes:
# two network nodes linked together - prevent loops caused by
@ -879,11 +873,11 @@ class CoreBroker(ConfigurableManager):
elif nn[1] in self.network_nodes:
servers = self.getserversbynode(nn[0])
else:
logger.info("link nodes are not network nodes")
logger.debug("link nodes are not network nodes")
servers1 = self.getserversbynode(nn[0])
logger.info("servers for node(%s): %s", nn[0], servers1)
logger.debug("servers for node(%s): %s", nn[0], servers1)
servers2 = self.getserversbynode(nn[1])
logger.info("servers for node(%s): %s", nn[1], servers2)
logger.debug("servers for node(%s): %s", nn[1], servers2)
# nodes are on two different servers, build tunnels as needed
if servers1 != servers2:
localn = None
@ -912,7 +906,7 @@ class CoreBroker(ConfigurableManager):
if host is None:
host = self.getlinkendpoint(message, localn == nn[0])
logger.info("handle locally(%s) and local node(%s)", handle_locally, localn)
logger.debug("handle locally(%s) and local node(%s)", handle_locally, localn)
if localn is None:
message = self.addlinkendpoints(message, servers1, servers2)
elif message.flags & MessageFlags.ADD.value:
@ -1015,11 +1009,10 @@ class CoreBroker(ConfigurableManager):
# local emulation server, handle this locally
handle_locally = True
elif server.sock is None:
logger.info("server %s @ %s:%s is disconnected" % (
server.name, server.host, server.port))
logger.info("server %s @ %s:%s is disconnected", server.name, server.host, server.port)
else:
logger.info("forwarding message to server: %s - %s:\n%s",
server.host, server.port, message)
logger.info("forwarding message to server(%s): %s:%s", server.name, server.host, server.port)
logger.debug("message being forwarded:\n%s", message)
server.sock.send(message.raw_message)
return handle_locally
@ -1047,7 +1040,7 @@ class CoreBroker(ConfigurableManager):
lhost, lport = server.sock.getsockname()
f.write("%s %s %s %s %s\n" % (server.name, server.host, server.port, lhost, lport))
except IOError:
logger.exception("error writing server list to the file: %s" % filename)
logger.exception("error writing server list to the file: %s", filename)
def writenodeserver(self, nodestr, server):
"""
@ -1074,7 +1067,7 @@ class CoreBroker(ConfigurableManager):
with open(filename, "w") as f:
f.write("%s\n%s\n" % (serverstr, nodestr))
except IOError:
logger.exception("error writing server file %s for node %s" % (filename, name))
logger.exception("error writing server file %s for node %s", filename, name)
def local_instantiation_complete(self):
"""
@ -1128,9 +1121,9 @@ class CoreBroker(ConfigurableManager):
if values_str is None:
return
value_strings = values_str.split('|')
value_strings = values_str.split("|")
for value_string in value_strings:
key, value = value_string.split('=', 1)
key, value = value_string.split("=", 1)
if key == "controlnet":
self.handle_distributed_control_net(message, value_strings, value_strings.index(value_string))
@ -1146,7 +1139,7 @@ class CoreBroker(ConfigurableManager):
:return: nothing
"""
key_value = values[index]
key, value = key_value.split('=', 1)
key, value = key_value.split("=", 1)
control_nets = value.split()
if len(control_nets) < 2:
@ -1165,5 +1158,5 @@ class CoreBroker(ConfigurableManager):
control_nets = map(lambda x: "%s:%s" % (x[0], x[1]), zip(servers, control_nets))
values[index] = "controlnet=%s" % (" ".join(control_nets))
values_str = "|".join(values)
message.tlvdata[ConfigTlvs.VALUES.value] = values_str
message.tlv_data[ConfigTlvs.VALUES.value] = values_str
message.repack()

View file

@ -1,89 +0,0 @@
"""
netgraph.py: Netgraph helper functions; for now these are wrappers around
ngctl commands.
"""
import subprocess
from core import constants
from core.misc import utils
utils.check_executables([constants.NGCTL_BIN])
def createngnode(node_type, hookstr, name=None):
"""
Create a new Netgraph node of type and optionally assign name. The
hook string hookstr should contain two names. This is a string so
other commands may be inserted after the two names.
Return the name and netgraph ID of the new node.
:param node_type: node type to create
:param hookstr: hook string
:param name: name
:return: name and id
:rtype: tuple
"""
hook1 = hookstr.split()[0]
ngcmd = "mkpeer %s %s \n show .%s" % (node_type, hookstr, hook1)
cmd = [constants.NGCTL_BIN, "-f", "-"]
cmdid = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# err will always be None
result, err = cmdid.communicate(input=ngcmd)
status = cmdid.wait()
if status > 0:
raise Exception("error creating Netgraph node %s (%s): %s" % (node_type, ngcmd, result))
results = result.split()
ngname = results[1]
ngid = results[5]
if name:
subprocess.check_call([constants.NGCTL_BIN, "name", "[0x%s]:" % ngid, name])
return ngname, ngid
def destroyngnode(name):
"""
Shutdown a Netgraph node having the given name.
:param str name: node name
:return: nothing
"""
subprocess.check_call([constants.NGCTL_BIN, "shutdown", "%s:" % name])
def connectngnodes(name1, name2, hook1, hook2):
"""
Connect two hooks of two Netgraph nodes given by their names.
:param str name1: name one
:param str name2: name two
:param str hook1: hook one
:param str hook2: hook two
:return: nothing
"""
node1 = "%s:" % name1
node2 = "%s:" % name2
subprocess.check_call([constants.NGCTL_BIN, "connect", node1, node2, hook1, hook2])
def ngmessage(name, msg):
"""
Send a Netgraph message to the node named name.
:param str name: node name
:param list msg: message
:return: nothing
"""
cmd = [constants.NGCTL_BIN, "msg", "%s:" % name] + msg
subprocess.check_call(cmd)
def ngloadkernelmodule(name):
"""
Load a kernel module by invoking kldstat. This is needed for the
ng_ether module which automatically creates Netgraph nodes when loaded.
:param str name: module name
:return: nothing
"""
utils.mutecall(["kldload", name])

View file

@ -1,212 +0,0 @@
"""
nodes.py: definition of CoreNode classes and other node classes that inherit
from the CoreNode, implementing specific node types.
"""
import socket
import subprocess
from core import constants
from core import logger
from core.api import coreapi
from core.bsd.netgraph import connectngnodes
from core.bsd.netgraph import ngloadkernelmodule
from core.bsd.vnet import NetgraphNet
from core.bsd.vnet import NetgraphPipeNet
from core.bsd.vnode import JailNode
from core.enumerations import LinkTlvs
from core.enumerations import LinkTypes
from core.enumerations import NodeTypes
from core.enumerations import RegisterTlvs
from core.misc import ipaddress
from core.misc import utils
utils.check_executables([constants.IFCONFIG_BIN])
class CoreNode(JailNode):
apitype = NodeTypes.DEFAULT.value
class PtpNet(NetgraphPipeNet):
def tonodemsg(self, flags):
"""
Do not generate a Node Message for point-to-point links. They are
built using a link message instead.
"""
pass
def tolinkmsgs(self, flags):
"""
Build CORE API TLVs for a point-to-point link. One Link message
describes this network.
"""
tlvdata = ""
if len(self._netif) != 2:
return tlvdata
(if1, if2) = self._netif.items()
if1 = if1[1]
if2 = if2[1]
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, if1.node.objid)
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, if2.node.objid)
delay = if1.getparam("delay")
bw = if1.getparam("bw")
loss = if1.getparam("loss")
duplicate = if1.getparam("duplicate")
jitter = if1.getparam("jitter")
if delay is not None:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DELAY.value, delay)
if bw is not None:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.BANDWIDTH.value, bw)
if loss is not None:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.PER.value, str(loss))
if duplicate is not None:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.DUP.value, str(duplicate))
if jitter is not None:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.JITTER.value, jitter)
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, self.linktype)
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE1_NUMBER.value, if1.node.getifindex(if1))
if if1.hwaddr:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE1_MAC.value, if1.hwaddr)
for addr in if1.addrlist:
ip, sep, mask = addr.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = socket.AF_INET
tlvtypeip = LinkTlvs.INTERFACE1_IP4.value
tlvtypemask = LinkTlvs.INTERFACE1_IP4_MASK
else:
family = socket.AF_INET6
tlvtypeip = LinkTlvs.INTERFACE1_IP6.value
tlvtypemask = LinkTlvs.INTERFACE1_IP6_MASK.value
ipl = socket.inet_pton(family, ip)
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, ipaddress.IpAddress(af=family, address=ipl))
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, if2.node.getifindex(if2))
if if2.hwaddr:
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_MAC.value, if2.hwaddr)
for addr in if2.addrlist:
ip, sep, mask = addr.partition("/")
mask = int(mask)
if ipaddress.is_ipv4_address(ip):
family = socket.AF_INET
tlvtypeip = LinkTlvs.INTERFACE2_IP4.value
tlvtypemask = LinkTlvs.INTERFACE2_IP4_MASK
else:
family = socket.AF_INET6
tlvtypeip = LinkTlvs.INTERFACE2_IP6.value
tlvtypemask = LinkTlvs.INTERFACE2_IP6_MASK.value
ipl = socket.inet_pton(family, ip)
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypeip, ipaddress.IpAddress(af=family, address=ipl))
tlvdata += coreapi.CoreLinkTlv.pack(tlvtypemask, mask)
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
return [msg, ]
class SwitchNode(NetgraphNet):
ngtype = "bridge"
nghooks = "link0 link0\nmsg .link0 setpersistent"
apitype = NodeTypes.SWITCH.value
policy = "ACCEPT"
class HubNode(NetgraphNet):
ngtype = "hub"
nghooks = "link0 link0\nmsg .link0 setpersistent"
apitype = NodeTypes.HUB.value
policy = "ACCEPT"
class WlanNode(NetgraphNet):
ngtype = "wlan"
nghooks = "anchor anchor"
apitype = NodeTypes.WIRELESS_LAN.value
linktype = LinkTypes.WIRELESS.value
policy = "DROP"
def __init__(self, session, objid=None, name=None, start=True, policy=None):
NetgraphNet.__init__(self, session, objid, name, start, policy)
# wireless model such as basic range
self.model = None
# mobility model such as scripted
self.mobility = None
def attach(self, netif):
NetgraphNet.attach(self, netif)
if self.model:
netif.poshook = self.model.position_callback
if netif.node is None:
return
x, y, z = netif.node.position.get()
netif.poshook(netif, x, y, z)
def setmodel(self, model, config):
"""
Mobility and wireless model.
:param core.mobility.WirelessModel.cls model: model to set
:param dict config: configuration for model
:return:
"""
logger.info("adding model %s" % model.name)
if model.config_type == RegisterTlvs.WIRELESS.value:
self.model = model(session=self.session, objid=self.objid, values=config)
if self.model.position_callback:
for netif in self.netifs():
netif.poshook = self.model.position_callback
if netif.node is not None:
x, y, z = netif.node.position.get()
netif.poshook(netif, x, y, z)
self.model.setlinkparams()
elif model.config_type == RegisterTlvs.MOBILITY.value:
self.mobility = model(session=self.session, objid=self.objid, values=config)
class RJ45Node(NetgraphPipeNet):
apitype = NodeTypes.RJ45.value
policy = "ACCEPT"
def __init__(self, session, objid, name, start=True):
if start:
ngloadkernelmodule("ng_ether")
NetgraphPipeNet.__init__(self, session, objid, name, start)
if start:
self.setpromisc(True)
def shutdown(self):
self.setpromisc(False)
NetgraphPipeNet.shutdown(self)
def setpromisc(self, promisc):
p = "promisc"
if not promisc:
p = "-" + p
subprocess.check_call([constants.IFCONFIG_BIN, self.name, "up", p])
def attach(self, netif):
if len(self._netif) > 0:
raise ValueError("RJ45 networks support at most 1 network interface")
NetgraphPipeNet.attach(self, netif)
connectngnodes(self.ngname, self.name, self.gethook(), "lower")
class TunnelNode(NetgraphNet):
ngtype = "pipe"
nghooks = "upper lower"
apitype = NodeTypes.TUNNEL.value
policy = "ACCEPT"
BSD_NODES = {
NodeTypes.DEFAULT: CoreNode,
NodeTypes.SWITCH: SwitchNode,
NodeTypes.HUB: HubNode,
NodeTypes.WIRELESS_LAN: WlanNode,
NodeTypes.RJ45: RJ45Node,
NodeTypes.TUNNEL: TunnelNode,
NodeTypes.PEER_TO_PEER: PtpNet,
NodeTypes.CONTROL_NET: None
}

View file

@ -1,206 +0,0 @@
"""
vnet.py: NetgraphNet and NetgraphPipeNet classes that implement virtual networks
using the FreeBSD Netgraph subsystem.
"""
from core import logger
from core.bsd.netgraph import connectngnodes
from core.bsd.netgraph import createngnode
from core.bsd.netgraph import destroyngnode
from core.bsd.netgraph import ngmessage
from core.coreobj import PyCoreNet
class NetgraphNet(PyCoreNet):
ngtype = None
nghooks = ()
def __init__(self, session, objid=None, name=None, start=True, policy=None):
PyCoreNet.__init__(self, session, objid, name)
if name is None:
name = str(self.objid)
if policy is not None:
self.policy = policy
self.name = name
self.ngname = "n_%s_%s" % (str(self.objid), self.session.session_id)
self.ngid = None
self._netif = {}
self._linked = {}
self.up = False
if start:
self.startup()
def startup(self):
tmp, self.ngid = createngnode(node_type=self.ngtype, hookstr=self.nghooks, name=self.ngname)
self.up = True
def shutdown(self):
if not self.up:
return
self.up = False
while self._netif:
k, netif = self._netif.popitem()
if netif.pipe:
pipe = netif.pipe
netif.pipe = None
pipe.shutdown()
else:
netif.shutdown()
self._netif.clear()
self._linked.clear()
del self.session
destroyngnode(self.ngname)
def attach(self, netif):
"""
Attach an interface to this netgraph node. Create a pipe between
the interface and the hub/switch/wlan node.
(Note that the PtpNet subclass overrides this method.)
"""
if self.up:
pipe = self.session.addobj(cls=NetgraphPipeNet, start=True)
pipe.attach(netif)
hook = "link%d" % len(self._netif)
pipe.attachnet(self, hook)
PyCoreNet.attach(self, netif)
def detach(self, netif):
PyCoreNet.detach(self, netif)
def linked(self, netif1, netif2):
# check if the network interfaces are attached to this network
if self._netif[netif1] != netif1:
raise ValueError("inconsistency for netif %s" % netif1.name)
if self._netif[netif2] != netif2:
raise ValueError("inconsistency for netif %s" % netif2.name)
try:
linked = self._linked[netif1][netif2]
except KeyError:
linked = False
self._linked[netif1][netif2] = linked
return linked
def unlink(self, netif1, netif2):
if not self.linked(netif1, netif2):
return
msg = ["unlink", "{", "node1=0x%s" % netif1.pipe.ngid]
msg += ["node2=0x%s" % netif2.pipe.ngid, "}"]
ngmessage(self.ngname, msg)
self._linked[netif1][netif2] = False
def link(self, netif1, netif2):
if self.linked(netif1, netif2):
return
msg = ["link", "{", "node1=0x%s" % netif1.pipe.ngid]
msg += ["node2=0x%s" % netif2.pipe.ngid, "}"]
ngmessage(self.ngname, msg)
self._linked[netif1][netif2] = True
def linknet(self, net):
"""
Link this bridge with another by creating a veth pair and installing
each device into each bridge.
"""
raise NotImplementedError
def linkconfig(self, netif, bw=None, delay=None,
loss=None, duplicate=None, jitter=None, netif2=None):
"""
Set link effects by modifying the pipe connected to an interface.
"""
if not netif.pipe:
logger.warn("linkconfig for %s but interface %s has no pipe", self.name, netif.name)
return
return netif.pipe.linkconfig(netif, bw, delay, loss, duplicate, jitter, netif2)
class NetgraphPipeNet(NetgraphNet):
ngtype = "pipe"
nghooks = "upper lower"
def __init__(self, session, objid=None, name=None, start=True, policy=None):
NetgraphNet.__init__(self, session, objid, name, start, policy)
if start:
# account for Ethernet header
ngmessage(self.ngname, ["setcfg", "{", "header_offset=14", "}"])
def attach(self, netif):
"""
Attach an interface to this pipe node.
The first interface is connected to the "upper" hook, the second
connected to the "lower" hook.
"""
if len(self._netif) > 1:
raise ValueError("Netgraph pipes support at most 2 network interfaces")
if self.up:
hook = self.gethook()
connectngnodes(self.ngname, netif.localname, hook, netif.hook)
if netif.pipe:
raise ValueError("Interface %s already attached to pipe %s" % (netif.name, netif.pipe.name))
netif.pipe = self
self._netif[netif] = netif
self._linked[netif] = {}
def attachnet(self, net, hook):
"""
Attach another NetgraphNet to this pipe node.
"""
localhook = self.gethook()
connectngnodes(self.ngname, net.ngname, localhook, hook)
def gethook(self):
"""
Returns the first hook (e.g. "upper") then the second hook
(e.g. "lower") based on the number of connections.
"""
hooks = self.nghooks.split()
if len(self._netif) == 0:
return hooks[0]
else:
return hooks[1]
def linkconfig(self, netif, bw=None, delay=None,
loss=None, duplicate=None, jitter=None, netif2=None):
"""
Set link effects by sending a Netgraph setcfg message to the pipe.
"""
netif.setparam("bw", bw)
netif.setparam("delay", delay)
netif.setparam("loss", loss)
netif.setparam("duplicate", duplicate)
netif.setparam("jitter", jitter)
if not self.up:
return
params = []
upstream = []
downstream = []
if bw is not None:
if str(bw) == "0":
bw = "-1"
params += ["bandwidth=%s" % bw, ]
if delay is not None:
if str(delay) == "0":
delay = "-1"
params += ["delay=%s" % delay, ]
if loss is not None:
if str(loss) == "0":
loss = "-1"
upstream += ["BER=%s" % loss, ]
downstream += ["BER=%s" % loss, ]
if duplicate is not None:
if str(duplicate) == "0":
duplicate = "-1"
upstream += ["duplicate=%s" % duplicate, ]
downstream += ["duplicate=%s" % duplicate, ]
if jitter:
logger.warn("jitter parameter ignored for link %s", self.name)
if len(params) > 0 or len(upstream) > 0 or len(downstream) > 0:
setcfg = ["setcfg", "{", ] + params
if len(upstream) > 0:
setcfg += ["upstream={", ] + upstream + ["}", ]
if len(downstream) > 0:
setcfg += ["downstream={", ] + downstream + ["}", ]
setcfg += ["}", ]
ngmessage(self.ngname, setcfg)

View file

@ -1,387 +0,0 @@
"""
vnode.py: SimpleJailNode and JailNode classes that implement the FreeBSD
jail-based virtual node.
"""
import os
import subprocess
import threading
from core import constants
from core import logger
from core.bsd.netgraph import createngnode
from core.bsd.netgraph import destroyngnode
from core.coreobj import PyCoreNetIf
from core.coreobj import PyCoreNode
from core.misc import utils
utils.check_executables([constants.IFCONFIG_BIN, constants.VIMAGE_BIN])
class VEth(PyCoreNetIf):
def __init__(self, node, name, localname, mtu=1500, net=None,
start=True):
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
# name is the device name (e.g. ngeth0, ngeth1, etc.) before it is
# installed in a node; the Netgraph name is renamed to localname
# e.g. before install: name = ngeth0 localname = n0_0_123
# after install: name = eth0 localname = n0_0_123
self.localname = localname
self.ngid = None
self.net = None
self.pipe = None
self.addrlist = []
self.hwaddr = None
self.up = False
self.hook = "ether"
if start:
self.startup()
def startup(self):
hookstr = "%s %s" % (self.hook, self.hook)
ngname, ngid = createngnode(node_type="eiface", hookstr=hookstr, name=self.localname)
self.name = ngname
self.ngid = ngid
subprocess.check_call([constants.IFCONFIG_BIN, ngname, "up"])
self.up = True
def shutdown(self):
if not self.up:
return
destroyngnode(self.localname)
self.up = False
def attachnet(self, net):
if self.net:
self.detachnet()
self.net = None
net.attach(self)
self.net = net
def detachnet(self):
if self.net is not None:
self.net.detach(self)
def addaddr(self, addr):
self.addrlist.append(addr)
def deladdr(self, addr):
self.addrlist.remove(addr)
def sethwaddr(self, addr):
self.hwaddr = addr
class TunTap(PyCoreNetIf):
"""
TUN/TAP virtual device in TAP mode
"""
def __init__(self, node, name, localname, mtu=None, net=None, start=True):
raise NotImplementedError
class SimpleJailNode(PyCoreNode):
def __init__(self, session, objid=None, name=None, nodedir=None):
PyCoreNode.__init__(self, session, objid, name)
self.nodedir = nodedir
self.pid = None
self.up = False
self.lock = threading.RLock()
self._mounts = []
def startup(self):
if self.up:
raise Exception("already up")
vimg = [constants.VIMAGE_BIN, "-c", self.name]
try:
os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, *vimg)
except OSError:
raise Exception("vimage command not found while running: %s" % vimg)
logger.info("bringing up loopback interface")
self.cmd([constants.IFCONFIG_BIN, "lo0", "127.0.0.1"])
logger.info("setting hostname: %s", self.name)
self.cmd(["hostname", self.name])
self.cmd([constants.SYSCTL_BIN, "vfs.morphing_symlinks=1"])
self.up = True
def shutdown(self):
if not self.up:
return
for netif in self.netifs():
netif.shutdown()
self._netif.clear()
del self.session
vimg = [constants.VIMAGE_BIN, "-d", self.name]
try:
os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, *vimg)
except OSError:
raise Exception("vimage command not found while running: %s" % vimg)
self.up = False
def cmd(self, args, wait=True):
if wait:
mode = os.P_WAIT
else:
mode = os.P_NOWAIT
tmp = subprocess.call([constants.VIMAGE_BIN, self.name] + args, cwd=self.nodedir)
if not wait:
tmp = None
if tmp:
logger.warn("cmd exited with status %s: %s", tmp, str(args))
return tmp
def cmdresult(self, args, wait=True):
cmdid, cmdin, cmdout, cmderr = self.popen(args)
result = cmdout.read()
result += cmderr.read()
cmdin.close()
cmdout.close()
cmderr.close()
if wait:
status = cmdid.wait()
else:
status = 0
return status, result
def popen(self, args):
cmd = [constants.VIMAGE_BIN, self.name]
cmd.extend(args)
tmp = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.nodedir)
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
def icmd(self, args):
return os.spawnlp(os.P_WAIT, constants.VIMAGE_BIN, constants.VIMAGE_BIN, self.name, *args)
def term(self, sh="/bin/sh"):
return os.spawnlp(os.P_WAIT, "xterm", "xterm", "-ut",
"-title", self.name, "-e", constants.VIMAGE_BIN, self.name, sh)
def termcmdstring(self, sh="/bin/sh"):
"""
We add "sudo" to the command string because the GUI runs as a
normal user.
"""
return "cd %s && sudo %s %s %s" % (self.nodedir, constants.VIMAGE_BIN, self.name, sh)
def shcmd(self, cmdstr, sh="/bin/sh"):
return self.cmd([sh, "-c", cmdstr])
def boot(self):
pass
def mount(self, source, target):
source = os.path.abspath(source)
logger.info("mounting %s at %s", source, target)
self.addsymlink(path=target, file=None)
def umount(self, target):
logger.info("unmounting %s", target)
def newveth(self, ifindex=None, ifname=None, net=None):
self.lock.acquire()
try:
if ifindex is None:
ifindex = self.newifindex()
if ifname is None:
ifname = "eth%d" % ifindex
sessionid = self.session.short_session_id()
name = "n%s_%s_%s" % (self.objid, ifindex, sessionid)
localname = name
ifclass = VEth
veth = ifclass(node=self, name=name, localname=localname,
mtu=1500, net=net, start=self.up)
if self.up:
# install into jail
subprocess.check_call([constants.IFCONFIG_BIN, veth.name, "vnet", self.name])
# rename from "ngeth0" to "eth0"
self.cmd([constants.IFCONFIG_BIN, veth.name, "name", ifname])
veth.name = ifname
try:
self.addnetif(veth, ifindex)
except:
veth.shutdown()
del veth
raise
return ifindex
finally:
self.lock.release()
def sethwaddr(self, ifindex, addr):
self._netif[ifindex].sethwaddr(addr)
if self.up:
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), "link", str(addr)])
def addaddr(self, ifindex, addr):
if self.up:
if ":" in addr:
family = "inet6"
else:
family = "inet"
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), family, "alias", str(addr)])
self._netif[ifindex].addaddr(addr)
def deladdr(self, ifindex, addr):
try:
self._netif[ifindex].deladdr(addr)
except ValueError:
logger.warn("trying to delete unknown address: %s", addr)
if self.up:
if ":" in addr:
family = "inet6"
else:
family = "inet"
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), family, "-alias",
str(addr)])
valid_deladdrtype = ("inet", "inet6", "inet6link")
def delalladdr(self, ifindex, addrtypes=valid_deladdrtype):
addr = self.getaddr(self.ifname(ifindex), rescan=True)
for t in addrtypes:
if t not in self.valid_deladdrtype:
raise ValueError("addr type must be in: " + " ".join(self.valid_deladdrtype))
for a in addr[t]:
self.deladdr(ifindex, a)
# update cached information
self.getaddr(self.ifname(ifindex), rescan=True)
def ifup(self, ifindex):
if self.up:
self.cmd([constants.IFCONFIG_BIN, self.ifname(ifindex), "up"])
def newnetif(self, net=None, addrlist=[], hwaddr=None,
ifindex=None, ifname=None):
self.lock.acquire()
try:
ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net)
if net is not None:
self.attachnet(ifindex, net)
if hwaddr:
self.sethwaddr(ifindex, hwaddr)
for addr in utils.maketuple(addrlist):
self.addaddr(ifindex, addr)
self.ifup(ifindex)
return ifindex
finally:
self.lock.release()
def attachnet(self, ifindex, net):
self._netif[ifindex].attachnet(net)
def detachnet(self, ifindex):
self._netif[ifindex].detachnet()
def addfile(self, srcname, filename):
shcmd = 'mkdir -p $(dirname "%s") && mv "%s" "%s" && sync' % (filename, srcname, filename)
self.shcmd(shcmd)
def getaddr(self, ifname, rescan=False):
return None
# return self.vnodeclient.getaddr(ifname = ifname, rescan = rescan)
def addsymlink(self, path, file):
"""
Create a symbolic link from /path/name/file ->
/tmp/pycore.nnnnn/@.conf/path.name/file
"""
dirname = path
if dirname and dirname[0] == "/":
dirname = dirname[1:]
dirname = dirname.replace("/", ".")
if file:
pathname = os.path.join(path, file)
sym = os.path.join(self.session.session_dir, "@.conf", dirname, file)
else:
pathname = path
sym = os.path.join(self.session.session_dir, "@.conf", dirname)
if os.path.islink(pathname):
if os.readlink(pathname) == sym:
# this link already exists - silently return
return
os.unlink(pathname)
else:
if os.path.exists(pathname):
logger.warn("did not create symlink for %s since path exists on host", pathname)
return
logger.info("creating symlink %s -> %s", pathname, sym)
os.symlink(sym, pathname)
class JailNode(SimpleJailNode):
def __init__(self, session, objid=None, name=None, nodedir=None, bootsh="boot.sh", start=True):
super(JailNode, self).__init__(session=session, objid=objid, name=name, nodedir=nodedir)
self.bootsh = bootsh
if not start:
return
# below here is considered node startup/instantiation code
self.makenodedir()
self.startup()
def boot(self):
self.session.services.bootnodeservices(self)
def validate(self):
self.session.services.validatenodeservices(self)
def startup(self):
self.lock.acquire()
try:
super(JailNode, self).startup()
# self.privatedir("/var/run")
# self.privatedir("/var/log")
finally:
self.lock.release()
def shutdown(self):
if not self.up:
return
self.lock.acquire()
# services are instead stopped when session enters datacollect state
# self.session.services.stopnodeservices(self)
try:
super(JailNode, self).shutdown()
finally:
self.rmnodedir()
self.lock.release()
def privatedir(self, path):
if path[0] != "/":
raise ValueError, "path not fully qualified: " + path
hostpath = os.path.join(
self.nodedir,
os.path.normpath(path).strip("/").replace("/", ".")
)
try:
os.mkdir(hostpath)
except OSError:
pass
except Exception, e:
raise Exception, e
self.mount(hostpath, path)
def opennodefile(self, filename, mode="w"):
dirname, basename = os.path.split(filename)
# self.addsymlink(path=dirname, file=basename)
if not basename:
raise ValueError("no basename for filename: %s" % filename)
if dirname and dirname[0] == "/":
dirname = dirname[1:]
dirname = dirname.replace("/", ".")
dirname = os.path.join(self.nodedir, dirname)
if not os.path.isdir(dirname):
os.makedirs(dirname, mode=0755)
hostfilename = os.path.join(dirname, basename)
return open(hostfilename, mode)
def nodefile(self, filename, contents, mode=0644):
f = self.opennodefile(filename, "w")
f.write(contents)
os.chmod(f.name, mode)
f.close()
logger.info("created nodefile: %s; mode: 0%o", f.name, mode)

View file

@ -212,7 +212,7 @@ class ConfigurableManager(object):
:return: nothing
"""
if conftype not in self._modelclsmap:
logger.warn("Unknown model type '%s'" % conftype)
logger.warn("unknown model type '%s'", conftype)
return
model = self._modelclsmap[conftype]
keys = model.getnames()
@ -220,8 +220,7 @@ class ConfigurableManager(object):
values = list(model.getdefaultvalues())
for key, value in keyvalues:
if key not in keys:
logger.warn("Skipping unknown configuration key for %s: '%s'" % \
(conftype, key))
logger.warn("Skipping unknown configuration key for %s: '%s'", conftype, key)
continue
i = keys.index(key)
values[i] = value
@ -327,7 +326,7 @@ class Configurable(object):
if interface_id is not None:
node_id = node_id * 1000 + interface_id
logger.info("received configure message for %s nodenum:%s", cls.name, str(node_id))
logger.debug("received configure message for %s nodenum:%s", cls.name, str(node_id))
if config_type == ConfigFlags.REQUEST.value:
logger.info("replying to configure request for %s model", cls.name)
# when object name is "all", the reply to this request may be None

View file

@ -1,21 +1,27 @@
# Constants created by autoconf ./configure script
COREDPY_VERSION = "@COREDPY_VERSION@"
CORE_STATE_DIR = "@CORE_STATE_DIR@"
CORE_CONF_DIR = "@CORE_CONF_DIR@"
CORE_DATA_DIR = "@CORE_DATA_DIR@"
CORE_LIB_DIR = "@CORE_LIB_DIR@"
CORE_SBIN_DIR = "@SBINDIR@"
import os
BRCTL_BIN = "@brctl_path@/brctl"
SYSCTL_BIN = "@sysctl_path@/sysctl"
IP_BIN = "@ip_path@/ip"
TC_BIN = "@tc_path@/tc"
EBTABLES_BIN = "@ebtables_path@/ebtables"
IFCONFIG_BIN = "@ifconfig_path@/ifconfig"
NGCTL_BIN = "@ngctl_path@/ngctl"
VIMAGE_BIN = "@vimage_path@/vimage"
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
MOUNT_BIN = "@mount_path@/mount"
UMOUNT_BIN = "@umount_path@/umount"
OVS_BIN = "@ovs_vs_path@/ovs-vsctl"
OVS_FLOW_BIN = "@ovs_of_path@/ovs-ofctl"
COREDPY_VERSION = "@PACKAGE_VERSION@"
CORE_STATE_DIR = "@CORE_STATE_DIR@"
CORE_CONF_DIR = "@CORE_CONF_DIR@"
CORE_DATA_DIR = "@CORE_DATA_DIR@"
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
def which(command):
for path in os.environ["PATH"].split(os.pathsep):
command_path = os.path.join(path, command)
if os.path.isfile(command_path) and os.access(command_path, os.X_OK):
return command_path
VNODED_BIN = which("vnoded")
VCMD_BIN = which("vcmd")
BRCTL_BIN = which("brctl")
SYSCTL_BIN = which("sysctl")
IP_BIN = which("ip")
TC_BIN = which("tc")
EBTABLES_BIN = which("ebtables")
MOUNT_BIN = which("mount")
UMOUNT_BIN = which("umount")
OVS_BIN = which("ovs-vsctl")
OVS_FLOW_BIN = which("ovs-ofctl")

File diff suppressed because it is too large Load diff

View file

@ -37,9 +37,9 @@ class Position(object):
"""
Returns True if the position has actually changed.
:param x: x position
:param y: y position
:param z: z position
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
@ -113,9 +113,9 @@ class PyCoreObj(object):
"""
Set the (x,y,z) position of the object.
:param x: x position
:param y: y position
:param z: z position
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
@ -323,7 +323,7 @@ class PyCoreNode(PyCoreObj):
if ifindex in self._netif:
raise ValueError("ifindex %s already exists" % ifindex)
self._netif[ifindex] = netif
# TODO: this hould have probably been set ahead, seems bad to me, check for failure and fix
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
netif.netindex = ifindex
def delnetif(self, ifindex):
@ -412,6 +412,47 @@ class PyCoreNode(PyCoreObj):
return common
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when a non-zero exit status occurs
"""
raise NotImplementedError
def cmd(self, args, wait=True):
"""
Runs shell command on node, with option to not wait for a result.
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
raise NotImplementedError
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
raise NotImplementedError
def termcmdstring(self, sh):
"""
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
raise NotImplementedError
class PyCoreNet(PyCoreObj):
"""
@ -419,6 +460,22 @@ class PyCoreNet(PyCoreObj):
"""
linktype = LinkTypes.WIRED.value
def startup(self):
"""
Each object implements its own startup method.
:return: nothing
"""
raise NotImplementedError
def shutdown(self):
"""
Each object implements its own shutdown method.
:return: nothing
"""
raise NotImplementedError
def __init__(self, session, objid, name, start=True):
"""
Create a PyCoreNet instance.
@ -556,7 +613,7 @@ class PyCoreNetIf(object):
"""
Creates a PyCoreNetIf instance.
:param node: node for interface
:param core.coreobj.PyCoreNode node: node for interface
:param str name: interface name
:param mtu: mtu value
"""
@ -598,8 +655,8 @@ class PyCoreNetIf(object):
"""
Attach network.
:param core.coreobj.PyCoreNet net: network to attach to
:return:nothing
:param core.coreobj.PyCoreNet net: network to attach
:return: nothing
"""
if self.net:
self.detachnet()

View file

@ -1,18 +1,10 @@
"""
Defines server classes and request handlers for TCP and UDP. Also defined here is a TCP based
auxiliary server class for supporting externally defined handlers.
Defines core server for handling TCP connections.
"""
import SocketServer
import os
import threading
import time
from core import logger
from core.api import coreapi
from core.enumerations import EventTypes
from core.enumerations import SessionTlvs
from core.session import Session
from core.emulator.coreemu import CoreEmu
class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
@ -22,7 +14,6 @@ class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
"""
daemon_threads = True
allow_reuse_address = True
servers = set()
def __init__(self, server_address, handler_class, config=None):
"""
@ -34,342 +25,6 @@ class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
:param dict config: configuration setting
:return:
"""
self.coreemu = CoreEmu(config)
self.config = config
self.sessions = {}
self.udpserver = None
self.udpthread = None
self.auxserver = None
self.auxthread = None
self._sessions_lock = threading.Lock()
CoreServer.add_server(self)
SocketServer.TCPServer.__init__(self, server_address, handler_class)
@classmethod
def add_server(cls, server):
"""
Add a core server to the known servers set.
:param CoreServer server: server to add
:return: nothing
"""
cls.servers.add(server)
@classmethod
def remove_server(cls, server):
"""
Remove a core server from the known servers set.
:param CoreServer server: server to remove
:return: nothing
"""
if server in cls.servers:
cls.servers.remove(server)
def shutdown(self):
"""
Shutdown the server, all known sessions, and remove server from known servers set.
:return: nothing
"""
# shutdown all known sessions
for session in self.sessions.values():
session.shutdown()
# if we are a daemon remove pid file
if self.config["daemonize"]:
pid_file = self.config["pidfile"]
try:
os.unlink(pid_file)
except OSError:
logger.exception("error daemon pid file: %s", pid_file)
# remove server from server list
CoreServer.remove_server(self)
def add_session(self, session):
"""
Add a session to our dictionary of sessions, ensuring a unique session number.
:param core.session.Session session: session to add
:return: added session
:raise KeyError: when a session with the same id already exists
"""
with self._sessions_lock:
if session.session_id in self.sessions:
raise KeyError("non-unique session id %s for %s" % (session.session_id, session))
self.sessions[session.session_id] = session
return session
def remove_session(self, session):
"""
Remove a session from our dictionary of sessions.
:param core.session.Session session: session to remove
:return: removed session
:rtype: core.session.Session
"""
with self._sessions_lock:
if session.session_id not in self.sessions:
logger.info("session id %s not found (sessions=%s)", session.session_id, self.sessions.keys())
else:
del self.sessions[session.session_id]
return session
def get_session_ids(self):
"""
Return a list of active session numbers.
:return: known session ids
:rtype: list
"""
with self._sessions_lock:
session_ids = self.sessions.keys()
return session_ids
def create_session(self, session_id=None):
"""
Convenience method for creating sessions with the servers config.
:param int session_id: session id for new session
:return: create session
:rtype: core.session.Session
"""
# create random id when necessary, seems to be 1 case wanted, based on legacy code
# creating a value so high, typical client side generation schemes hopefully wont collide
if not session_id:
session_id = next(
session_id for session_id in xrange(60000, 65000)
if session_id not in self.sessions
)
# create and add session to local manager
session = Session(session_id, config=self.config)
self.add_session(session)
# add shutdown handler to remove session from manager
session.shutdown_handlers.append(self.session_shutdown)
return session
def get_session(self, session_id=None):
"""
Create a new session or retrieve an existing one from our
dictionary of sessions. When the session_id=0 and the use_existing
flag is set, return on of the existing sessions.
:param int session_id: session id of session to retrieve, defaults to returning random session
:return: session
:rtype: core.session.Session
"""
with self._sessions_lock:
# return specified session or none
if session_id:
return self.sessions.get(session_id)
# retrieving known session
session = None
# find runtime session with highest node count
for known_session in filter(lambda x: x.state == EventTypes.RUNTIME_STATE.value,
self.sessions.itervalues()):
if not session or known_session.get_node_count() > session.get_node_count():
session = known_session
# return first known session otherwise
if not session:
for known_session in self.sessions.itervalues():
session = known_session
break
return session
def session_shutdown(self, session):
"""
Handler method to be used as a callback when a session has shutdown.
:param core.session.Session session: session shutting down
:return: nothing
"""
self.remove_session(session)
def to_session_message(self, flags=0):
"""
Build CORE API Sessions message based on current session info.
:param int flags: message flags
:return: session message
"""
id_list = []
name_list = []
file_list = []
node_count_list = []
date_list = []
thumb_list = []
num_sessions = 0
with self._sessions_lock:
for session_id in self.sessions:
session = self.sessions[session_id]
# debug: session.dumpsession()
num_sessions += 1
id_list.append(str(session_id))
name = session.name
if not name:
name = ""
name_list.append(name)
file = session.file_name
if not file:
file = ""
file_list.append(file)
node_count_list.append(str(session.get_node_count()))
date_list.append(time.ctime(session._state_time))
thumb = session.thumbnail
if not thumb:
thumb = ""
thumb_list.append(thumb)
session_ids = "|".join(id_list)
names = "|".join(name_list)
files = "|".join(file_list)
node_counts = "|".join(node_count_list)
dates = "|".join(date_list)
thumbs = "|".join(thumb_list)
if num_sessions > 0:
tlv_data = ""
if len(session_ids) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session_ids)
if len(names) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NAME.value, names)
if len(files) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.FILE.value, files)
if len(node_counts) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NODE_COUNT.value, node_counts)
if len(dates) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.DATE.value, dates)
if len(thumbs) > 0:
tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.THUMB.value, thumbs)
message = coreapi.CoreSessionMessage.pack(flags, tlv_data)
else:
message = None
return message
def dump_sessions(self):
"""
Log currently known session information.
"""
logger.info("sessions:")
with self._sessions_lock:
for session_id in self.sessions:
logger.info(session_id)
# def set_session_master(self, handler):
# """
# Call the setmaster() method for every session. Returns True when
# a session having the given handler was updated.
# """
# found = False
#
# with self._sessions_lock:
# for session_id in self.sessions:
# found = self.sessions[session_id].set_master(handler)
# if found is True:
# break
#
# return found
class CoreUdpServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
"""
UDP server class, manages sessions and spawns request handlers for
incoming connections.
"""
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class, main_server):
"""
Server class initialization takes configuration data and calls
the SocketServer constructor
:param tuple[str, int] server_address: server address
:param class handler_class: class for handling requests
:param main_server: main server to associate with
"""
self.mainserver = main_server
SocketServer.UDPServer.__init__(self, server_address, handler_class)
def start(self):
"""
Thread target to run concurrently with the TCP server.
:return: nothing
"""
self.serve_forever()
class CoreAuxServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
"""
An auxiliary TCP server.
"""
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, handler_class, main_server):
"""
Create a CoreAuxServer instance.
:param tuple[str, int] server_address: server address
:param class handler_class: class for handling requests
:param main_server: main server to associate with
"""
self.mainserver = main_server
logger.info("auxiliary server started, listening on: %s", server_address)
SocketServer.TCPServer.__init__(self, server_address, handler_class)
def start(self):
"""
Start the core auxiliary server.
:return: nothing
"""
self.serve_forever()
def set_session_master(self, handler):
"""
Set the session master handler.
:param func handler: session master handler
:return:
"""
return self.mainserver.set_session_master(handler)
def get_session(self, session_id=None):
"""
Retrieve a session.
:param int session_id: id of session to retrieve
:return: core.session.Session
"""
return self.mainserver.get_session(session_id)
def to_session_message(self, flags=0):
"""
Retrieve a session message.
:param flags: message flags
:return: session message
"""
return self.mainserver.to_session_message(flags)

View file

@ -1,52 +0,0 @@
import subprocess
from core import logger
from core.misc import utils
EMANEUNK = 0
EMANE074 = 7
EMANE081 = 8
EMANE091 = 91
EMANE092 = 92
EMANE093 = 93
EMANE101 = 101
VERSION = None
VERSIONSTR = None
def emane_version():
"""
Return the locally installed EMANE version identifier and string.
"""
global VERSION
global VERSIONSTR
cmd = ("emane", "--version")
try:
status, result = utils.cmdresult(cmd)
except (OSError, subprocess.CalledProcessError):
logger.exception("error checking emane version")
status = -1
result = ""
VERSION = EMANEUNK
if status == 0:
if result.startswith("0.7.4"):
VERSION = EMANE074
elif result.startswith("0.8.1"):
VERSION = EMANE081
elif result.startswith("0.9.1"):
VERSION = EMANE091
elif result.startswith("0.9.2"):
VERSION = EMANE092
elif result.startswith("0.9.3"):
VERSION = EMANE093
elif result.startswith("1.0.1"):
VERSION = EMANE101
VERSIONSTR = result.strip()
# set version variables for the Emane class
emane_version()

View file

@ -1,54 +1,27 @@
"""
bypass.py: EMANE Bypass model for CORE
EMANE Bypass model for CORE
"""
from core.emane.emanemodel import EmaneModel
from core.emane import emanemodel
from core.enumerations import ConfigDataTypes
class EmaneBypassModel(EmaneModel):
def __init__(self, session, object_id=None):
EmaneModel.__init__(self, session, object_id)
class EmaneBypassModel(emanemodel.EmaneModel):
name = "emane_bypass"
config_matrix = [
("none", ConfigDataTypes.BOOL.value, "0",
"True,False", "There are no parameters for the bypass model."),
# values to ignore, when writing xml files
config_ignore = {"none"}
# mac definitions
mac_library = "bypassmaclayer"
mac_config = [
("none", ConfigDataTypes.BOOL.value, "0", "True,False",
"There are no parameters for the bypass model."),
]
# value groupings
config_groups = "Bypass Parameters:1-1"
# phy definitions
phy_library = "bypassphylayer"
phy_config = []
def buildnemxmlfiles(self, e, ifc):
"""
Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide nXXemane_bypassnem.xml,
nXXemane_bypassmac.xml, nXXemane_bypassphy.xml are used.
"""
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
if values is None:
return
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "BYPASS NEM")
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
mactag = nemdoc.createElement("mac")
mactag.setAttribute("definition", self.macxmlname(ifc))
nem.appendChild(mactag)
phytag = nemdoc.createElement("phy")
phytag.setAttribute("definition", self.phyxmlname(ifc))
nem.appendChild(phytag)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
macdoc = e.xmldoc("mac")
mac = macdoc.getElementsByTagName("mac").pop()
mac.setAttribute("name", "BYPASS MAC")
mac.setAttribute("library", "bypassmaclayer")
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = e.xmldoc("phy")
phy = phydoc.getElementsByTagName("phy").pop()
phy.setAttribute("name", "BYPASS PHY")
phy.setAttribute("library", "bypassphylayer")
e.xmlwrite(phydoc, self.phyxmlname(ifc))
# override gui display tabs
config_groups_override = "Bypass Parameters:1-1"

View file

@ -2,128 +2,121 @@
commeffect.py: EMANE CommEffect model for CORE
"""
from core import emane
from core import logger
from core.emane.emanemodel import EmaneModel
from core.enumerations import ConfigDataTypes
from core.emane import emanemanifest
from core.emane import emanemodel
try:
import emaneeventservice
import emaneeventcommeffect
from emane.events.commeffectevent import CommEffectEvent
except ImportError:
logger.error("error importing emaneeventservice and emaneeventcommeffect")
try:
from emanesh.events.commeffectevent import CommEffectEvent
except ImportError:
logger.warn("compatible emane python bindings not installed")
class EmaneCommEffectModel(EmaneModel):
def __init__(self, session, object_id=None):
EmaneModel.__init__(self, session, object_id)
# model name
name = "emane_commeffect"
# CommEffect parameters
_confmatrix_shim_base = [
("filterfile", ConfigDataTypes.STRING.value, "",
"", "filter file"),
("groupid", ConfigDataTypes.UINT32.value, "0",
"", "NEM Group ID"),
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable promiscuous mode"),
("receivebufferperiod", ConfigDataTypes.FLOAT.value, "1.0",
"", "receivebufferperiod"),
]
_confmatrix_shim_081 = [
("defaultconnectivity", ConfigDataTypes.BOOL.value, "0",
"On,Off", "defaultconnectivity"),
("enabletighttimingmode", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable tight timing mode"),
]
_confmatrix_shim_091 = [
("defaultconnectivitymode", ConfigDataTypes.BOOL.value, "0",
"On,Off", "defaultconnectivity"),
]
if emane.VERSION >= emane.EMANE091:
_confmatrix_shim = _confmatrix_shim_base + _confmatrix_shim_091
def convert_none(x):
"""
Helper to use 0 for None values.
"""
if type(x) is str:
x = float(x)
if x is None:
return 0
else:
_confmatrix_shim = _confmatrix_shim_base + _confmatrix_shim_081
return int(x)
config_matrix = _confmatrix_shim
# value groupings
config_groups = "CommEffect SHIM Parameters:1-%d" % len(_confmatrix_shim)
def buildnemxmlfiles(self, e, ifc):
class EmaneCommEffectModel(emanemodel.EmaneModel):
name = "emane_commeffect"
shim_library = "commeffectshim"
shim_xml = "/usr/share/emane/manifest/commeffectshim.xml"
shim_defaults = {}
config_shim = emanemanifest.parse(shim_xml, shim_defaults)
config_groups_override = "CommEffect SHIM Parameters:1-%d" % len(config_shim)
config_matrix_override = config_shim
def build_xml_files(self, emane_manager, interface):
"""
Build the necessary nem and commeffect XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
:param interface: interface for the emane node
:return: nothing
"""
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
values = emane_manager.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), interface)
if values is None:
return
shimdoc = e.xmldoc("shim")
shim = shimdoc.getElementsByTagName("shim").pop()
shim.setAttribute("name", "commeffect SHIM")
shim.setAttribute("library", "commeffectshim")
# retrieve xml names
nem_name = self.nem_name(interface)
shim_name = self.shim_name(interface)
nem_document = emane_manager.xmldoc("nem")
nem_element = nem_document.getElementsByTagName("nem").pop()
nem_element.setAttribute("name", "%s NEM" % self.name)
nem_element.setAttribute("type", "unstructured")
emane_manager.appendtransporttonem(nem_document, nem_element, self.object_id, interface)
shim_xml = emane_manager.xmlshimdefinition(nem_document, shim_name)
nem_element.appendChild(shim_xml)
emane_manager.xmlwrite(nem_document, nem_name)
names = self.getnames()
shimnames = list(names[:len(self._confmatrix_shim)])
shimnames.remove("filterfile")
shim_names = list(names)
shim_names.remove("filterfile")
shim_document = emane_manager.xmldoc("shim")
shim_element = shim_document.getElementsByTagName("shim").pop()
shim_element.setAttribute("name", "%s SHIM" % self.name)
shim_element.setAttribute("library", self.shim_library)
# append all shim options (except filterfile) to shimdoc
map(lambda n: shim.appendChild(e.xmlparam(shimdoc, n, self.valueof(n, values))), shimnames)
for name in shim_names:
value = self.valueof(name, values)
param = emane_manager.xmlparam(shim_document, name, value)
shim_element.appendChild(param)
# empty filterfile is not allowed
ff = self.valueof("filterfile", values)
if ff.strip() != "":
shim.appendChild(e.xmlparam(shimdoc, "filterfile", ff))
e.xmlwrite(shimdoc, self.shimxmlname(ifc))
shim_element.appendChild(emane_manager.xmlparam(shim_document, "filterfile", ff))
emane_manager.xmlwrite(shim_document, shim_name)
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "commeffect NEM")
nem.setAttribute("type", "unstructured")
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
nem.appendChild(e.xmlshimdefinition(nemdoc, self.shimxmlname(ifc)))
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
def linkconfig(self, netif, bw=None, delay=None,
loss=None, duplicate=None, jitter=None, netif2=None):
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
"""
Generate CommEffect events when a Link Message is received having
link parameters.
"""
if emane.VERSION >= emane.EMANE091:
raise NotImplementedError("CommEffect linkconfig() not implemented for EMANE 0.9.1+")
def z(x):
"""
Helper to use 0 for None values.
"""
if type(x) is str:
x = float(x)
if x is None:
return 0
else:
return int(x)
service = self.session.emane.service
if service is None:
logger.warn("%s: EMANE event service unavailable" % self.name)
logger.warn("%s: EMANE event service unavailable", self.name)
return
if netif is None or netif2 is None:
logger.warn("%s: missing NEM information" % self.name)
logger.warn("%s: missing NEM information", self.name)
return
# TODO: batch these into multiple events per transmission
# TODO: may want to split out seconds portion of delay and jitter
event = emaneeventcommeffect.EventCommEffect(1)
index = 0
e = self.session.get_object(self.object_id)
nemid = e.getnemid(netif)
nemid2 = e.getnemid(netif2)
event = CommEffectEvent()
emane_node = self.session.get_object(self.object_id)
nemid = emane_node.getnemid(netif)
nemid2 = emane_node.getnemid(netif2)
mbw = bw
event.set(index, nemid, 0, z(delay), 0, z(jitter), z(loss),
z(duplicate), long(z(bw)), long(z(mbw)))
service.publish(emaneeventcommeffect.EVENT_ID,
emaneeventservice.PLATFORMID_ANY,
nemid2, emaneeventservice.COMPONENTID_ANY,
event.export())
logger.info("sending comm effect event")
event.append(
nemid,
latency=convert_none(delay),
jitter=convert_none(jitter),
loss=convert_none(loss),
duplicate=convert_none(duplicate),
unicast=long(convert_none(bw)),
broadcast=long(convert_none(mbw))
)
service.publish(nemid2, event)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,122 @@
from core import logger
from core.enumerations import ConfigDataTypes
manifest = None
try:
from emane.shell import manifest
except ImportError:
try:
from emanesh import manifest
except ImportError:
logger.warn("compatible emane python bindings not installed")
def _type_value(config_type):
"""
Convert emane configuration type to core configuration value.
:param str config_type: emane configuration type
:return:
"""
config_type = config_type.upper()
if config_type == "DOUBLE":
config_type = "FLOAT"
elif config_type == "INETADDR":
config_type = "STRING"
return ConfigDataTypes[config_type].value
def _get_possible(config_type, config_regex):
"""
Retrieve possible config value options based on emane regexes.
:param str config_type: emane configuration type
:param str config_regex: emane configuration regex
:return: a string listing comma delimited values, if needed, empty string otherwise
:rtype: str
"""
if config_type == "bool":
return "On,Off"
if config_type == "string" and config_regex:
possible = config_regex[2:-2]
possible = possible.replace("|", ",")
return possible
return ""
def _get_default(config_type_name, config_value):
"""
Convert default configuration values to one used by core.
:param str config_type_name: emane configuration type name
:param list config_value: emane configuration value list
:return: default core config value
:rtype: str
"""
config_default = ""
if config_type_name == "bool":
if config_value and config_value[0] == "true":
config_default = "1"
else:
config_default = "0"
elif config_value:
config_default = config_value[0]
if config_default is None:
config_default = ""
return config_default
def parse(manifest_path, defaults):
"""
Parses a valid emane manifest file and converts the provided configuration values into ones used by core.
:param str manifest_path: absolute manifest file path
:param dict defaults: used to override default values for configurations
:return: list of core configuration values
:rtype: list
"""
# no results when emane bindings are not present
if not manifest:
return []
# load configuration file
manifest_file = manifest.Manifest(manifest_path)
manifest_configurations = manifest_file.getAllConfiguration()
configurations = []
for config_name in sorted(manifest_configurations):
config_info = manifest_file.getConfigurationInfo(config_name)
# map type to internal config data type value for core
config_type = config_info.get("numeric")
if not config_type:
config_type = config_info.get("nonnumeric")
config_type_name = config_type["type"]
config_type_value = _type_value(config_type_name)
# get default values, using provided defaults
if config_name in defaults:
config_default = defaults[config_name]
else:
config_value = config_info["values"]
config_default = _get_default(config_type_name, config_value)
# map to possible values used as options within the gui
config_regex = config_info.get("regex")
possible = _get_possible(config_type_name, config_regex)
# define description and account for gui quirks
config_descriptions = config_name
if config_name.endswith("uri"):
config_descriptions = "%s file" % config_descriptions
config_tuple = (config_name, config_type_value, config_default, possible, config_descriptions)
configurations.append(config_tuple)
return configurations

View file

@ -2,75 +2,251 @@
Defines Emane Models used within CORE.
"""
from core import emane
from core import logger
from core.emane import emanemanifest
from core.misc import utils
from core.mobility import WirelessModel
from core.xml import xmlutils
def value_to_params(doc, name, value):
"""
Helper to convert a parameter to a paramlist. Returns an XML paramlist, or None if the value does not expand to
multiple values.
:param xml.dom.minidom.Document doc: xml document
:param name: name of element for params
:param str value: value string to convert to tuple
:return: xml document with added params or None, when an invalid value has been provided
"""
try:
values = utils.make_tuple_fromstr(value, str)
except SyntaxError:
logger.exception("error in value string to param list")
return None
if not hasattr(values, "__iter__"):
return None
if len(values) < 2:
return None
return xmlutils.add_param_list_to_parent(doc, parent=None, name=name, values=values)
class EmaneModelMetaClass(type):
"""
Hack into making class level properties to streamline emane model creation, until the Configurable class is
removed or refactored.
"""
@property
def config_matrix(cls):
"""
Convenience method for creating the config matrix, allow for a custom override.
:param EmaneModel cls: emane class
:return: config matrix value
:rtype: list
"""
if cls.config_matrix_override:
return cls.config_matrix_override
else:
return cls.mac_config + cls.phy_config
@property
def config_groups(cls):
"""
Convenience method for creating the config groups, allow for a custom override.
:param EmaneModel cls: emane class
:return: config groups value
:rtype: str
"""
if cls.config_groups_override:
return cls.config_groups_override
else:
mac_len = len(cls.mac_config)
config_len = len(cls.config_matrix)
return "MAC Parameters:1-%d|PHY Parameters:%d-%d" % (mac_len, mac_len + 1, config_len)
class EmaneModel(WirelessModel):
"""
EMANE models inherit from this parent class, which takes care of
handling configuration messages based on the _confmatrix list of
handling configuration messages based on the list of
configurable parameters. Helper functions also live here.
"""
_prefix = {
"y": 1e-24, # yocto
"z": 1e-21, # zepto
"a": 1e-18, # atto
"f": 1e-15, # femto
"p": 1e-12, # pico
"n": 1e-9, # nano
"u": 1e-6, # micro
"m": 1e-3, # mili
"c": 1e-2, # centi
"d": 1e-1, # deci
"k": 1e3, # kilo
"M": 1e6, # mega
"G": 1e9, # giga
"T": 1e12, # tera
"P": 1e15, # peta
"E": 1e18, # exa
"Z": 1e21, # zetta
"Y": 1e24, # yotta
__metaclass__ = EmaneModelMetaClass
# default mac configuration settings
mac_library = None
mac_xml = None
mac_defaults = {}
mac_config = []
# default phy configuration settings, using the universal model
phy_library = None
phy_xml = "/usr/share/emane/manifest/emanephy.xml"
phy_defaults = {
"subid": "1",
"propagationmodel": "2ray",
"noisemode": "none"
}
phy_config = emanemanifest.parse(phy_xml, phy_defaults)
config_ignore = set()
config_groups_override = None
config_matrix_override = None
def __init__(self, session, object_id=None):
WirelessModel.__init__(self, session, object_id)
def build_xml_files(self, emane_manager, interface):
"""
Builds xml files for emane. Includes a nem.xml file that points to both mac.xml and phy.xml definitions.
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
:param interface: interface for the emane node
:return: nothing
"""
# retrieve configuration values
values = emane_manager.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), interface)
if values is None:
return
# create document and write to disk
nem_name = self.nem_name(interface)
nem_document = self.create_nem_doc(emane_manager, interface)
emane_manager.xmlwrite(nem_document, nem_name)
# create mac document and write to disk
mac_name = self.mac_name(interface)
mac_document = self.create_mac_doc(emane_manager, values)
emane_manager.xmlwrite(mac_document, mac_name)
# create phy document and write to disk
phy_name = self.phy_name(interface)
phy_document = self.create_phy_doc(emane_manager, values)
emane_manager.xmlwrite(phy_document, phy_name)
def create_nem_doc(self, emane_manager, interface):
"""
Create the nem xml document.
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
:param interface: interface for the emane node
:return: nem document
:rtype: xml.dom.minidom.Document
"""
mac_name = self.mac_name(interface)
phy_name = self.phy_name(interface)
nem_document = emane_manager.xmldoc("nem")
nem_element = nem_document.getElementsByTagName("nem").pop()
nem_element.setAttribute("name", "%s NEM" % self.name)
emane_manager.appendtransporttonem(nem_document, nem_element, self.object_id, interface)
mac_element = nem_document.createElement("mac")
mac_element.setAttribute("definition", mac_name)
nem_element.appendChild(mac_element)
phy_element = nem_document.createElement("phy")
phy_element.setAttribute("definition", phy_name)
nem_element.appendChild(phy_element)
return nem_document
def create_mac_doc(self, emane_manager, values):
"""
Create the mac xml document.
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
:param tuple values: all current configuration values, mac + phy
:return: nem document
:rtype: xml.dom.minidom.Document
"""
names = list(self.getnames())
mac_names = names[:len(self.mac_config)]
mac_document = emane_manager.xmldoc("mac")
mac_element = mac_document.getElementsByTagName("mac").pop()
mac_element.setAttribute("name", "%s MAC" % self.name)
if not self.mac_library:
raise ValueError("must define emane model library")
mac_element.setAttribute("library", self.mac_library)
for name in mac_names:
# ignore custom configurations
if name in self.config_ignore:
continue
# check if value is a multi param
value = self.valueof(name, values)
param = value_to_params(mac_document, name, value)
if not param:
param = emane_manager.xmlparam(mac_document, name, value)
mac_element.appendChild(param)
return mac_document
def create_phy_doc(self, emane_manager, values):
"""
Create the phy xml document.
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
:param tuple values: all current configuration values, mac + phy
:return: nem document
:rtype: xml.dom.minidom.Document
"""
names = list(self.getnames())
phy_names = names[len(self.mac_config):]
phy_document = emane_manager.xmldoc("phy")
phy_element = phy_document.getElementsByTagName("phy").pop()
phy_element.setAttribute("name", "%s PHY" % self.name)
if self.phy_library:
phy_element.setAttribute("library", self.phy_library)
# append all phy options
for name in phy_names:
# ignore custom configurations
if name in self.config_ignore:
continue
# check if value is a multi param
value = self.valueof(name, values)
param = value_to_params(phy_document, name, value)
if not param:
param = emane_manager.xmlparam(phy_document, name, value)
phy_element.appendChild(param)
return phy_document
@classmethod
def configure_emane(cls, session, config_data):
"""
Handle configuration messages for setting up a model.
Pass the Emane object as the manager object.
Handle configuration messages for configuring an emane model.
:param core.session.Session session: session to configure emane
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
"""
return cls.configure(session.emane, config_data)
@classmethod
def emane074_fixup(cls, value, div=1.0):
def post_startup(self, emane_manager):
"""
Helper for converting 0.8.1 and newer values to EMANE 0.7.4
compatible values.
NOTE: This should be removed when support for 0.7.4 has been
deprecated.
"""
if div == 0:
return "0"
if type(value) is not str:
return str(value / div)
if value.endswith(tuple(cls._prefix.keys())):
suffix = value[-1]
value = float(value[:-1]) * cls._prefix[suffix]
return str(int(value / div))
Logic to execute after the emane manager is finished with startup.
def buildnemxmlfiles(self, e, ifc):
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager for the session
:return: nothing
"""
Build the necessary nem, mac, and phy XMLs in the given path.
"""
raise NotImplementedError
logger.info("emane model(%s) has no post setup tasks", self.name)
def buildplatformxmlnementry(self, doc, n, ifc):
def build_nem_xml(self, doc, emane_node, interface):
"""
Build the NEM definition that goes into the platform.xml file.
@ -80,93 +256,125 @@ class EmaneModel(WirelessModel):
or per-EmaneNode config (e.g. <nem definition="n1emane_rfpipe.xml" id="1">.
This can be overriden by a model for NEM flexibility; n is the EmaneNode.
<nem name="NODE-001" definition="rfpipenem.xml">
:param xml.dom.minidom.Document doc: xml document
:param core.emane.nodes.EmaneNode emane_node: emane node to get information from
:param interface: interface for the emane node
:return: created platform xml
"""
nem = doc.createElement("nem")
nem.setAttribute("name", ifc.localname)
# if this netif contains a non-standard (per-interface) config,
# then we need to use a more specific xml file here
nem.setAttribute("definition", self.nemxmlname(ifc))
nem_name = self.nem_name(interface)
nem = doc.createElement("nem")
nem.setAttribute("name", interface.localname)
nem.setAttribute("definition", nem_name)
return nem
def buildplatformxmltransportentry(self, doc, n, ifc):
def build_transport_xml(self, doc, emane_node, interface):
"""
Build the transport definition that goes into the platform.xml file.
This returns an XML element that will added to the nem definition.
This returns an XML element that will be added to the nem definition.
This default method supports raw and virtual transport types, but may be
overriden by a model to support the e.g. pluggable virtual transport.
n is the EmaneNode.
overridden by a model to support the e.g. pluggable virtual transport.
<transport definition="transvirtual.xml" group="1">
<param name="device" value="n1.0.158" />
</transport>
:param xml.dom.minidom.Document doc: xml document
:param core.emane.nodes.EmaneNode emane_node: emane node to get information from
:param interface: interface for the emane node
:return: created transport xml
"""
ttype = ifc.transport_type
if not ttype:
logger.info("warning: %s interface type unsupported!" % ifc.name)
ttype = "raw"
trans = doc.createElement("transport")
trans.setAttribute("definition", n.transportxmlname(ttype))
if emane.VERSION < emane.EMANE092:
trans.setAttribute("group", "1")
transport_type = interface.transport_type
if not transport_type:
logger.info("warning: %s interface type unsupported!", interface.name)
transport_type = "raw"
transport_name = emane_node.transportxmlname(transport_type)
transport = doc.createElement("transport")
transport.setAttribute("definition", transport_name)
param = doc.createElement("param")
param.setAttribute("name", "device")
if ttype == "raw":
# raw RJ45 name e.g. "eth0"
param.setAttribute("value", ifc.name)
else:
# virtual TAP name e.g. "n3.0.17"
param.setAttribute("value", ifc.localname)
if emane.VERSION > emane.EMANE091:
param.setAttribute("value", ifc.name)
param.setAttribute("value", interface.name)
trans.appendChild(param)
return trans
transport.appendChild(param)
return transport
def basename(self, interface=None):
def _basename(self, interface=None):
"""
Return the string that other names are based on.
If a specific config is stored for a node"s interface, a unique
filename is needed; otherwise the name of the EmaneNode is used.
Create name that is leveraged for configuration file creation.
:param interface: interface for this model
:return: basename used for file creation
:rtype: str
"""
emane = self.session.emane
name = "n%s" % self.object_id
if interface is not None:
nodenum = interface.node.objid
# Adamson change - use getifcconfig() to get proper result
# if emane.getconfig(nodenum, self._name, None)[1] is not None:
if emane.getifcconfig(nodenum, self.name, None, interface) is not None:
emane_manager = self.session.emane
if interface:
node_id = interface.node.objid
if emane_manager.getifcconfig(node_id, self.name, None, interface) is not None:
name = interface.localname.replace(".", "_")
return "%s%s" % (name, self.name)
def nemxmlname(self, interface=None):
def nem_name(self, interface=None):
"""
Return the string name for the NEM XML file, e.g. "n3rfpipenem.xml"
"""
append = ""
if emane.VERSION > emane.EMANE091:
if interface and interface.transport_type == "raw":
append = "_raw"
return "%snem%s.xml" % (self.basename(interface), append)
def shimxmlname(self, ifc=None):
:param interface: interface for this model
:return: nem xml filename
:rtype: str
"""
basename = self._basename(interface)
append = ""
if interface and interface.transport_type == "raw":
append = "_raw"
return "%snem%s.xml" % (basename, append)
def shim_name(self, interface=None):
"""
Return the string name for the SHIM XML file, e.g. "commeffectshim.xml"
"""
return "%sshim.xml" % self.basename(ifc)
def macxmlname(self, ifc=None):
:param interface: interface for this model
:return: shim xml filename
:rtype: str
"""
return "%sshim.xml" % self._basename(interface)
def mac_name(self, interface=None):
"""
Return the string name for the MAC XML file, e.g. "n3rfpipemac.xml"
"""
return "%smac.xml" % self.basename(ifc)
def phyxmlname(self, ifc=None):
:param interface: interface for this model
:return: mac xml filename
:rtype: str
"""
return "%smac.xml" % self._basename(interface)
def phy_name(self, interface=None):
"""
Return the string name for the PHY XML file, e.g. "n3rfpipephy.xml"
:param interface: interface for this model
:return: phy xml filename
:rtype: str
"""
return "%sphy.xml" % self.basename(ifc)
return "%sphy.xml" % self._basename(interface)
def update(self, moved, moved_netifs):
"""
invoked from MobilityModel when nodes are moved; this causes
EMANE location events to be generated for the nodes in the moved
list, making EmaneModels compatible with Ns2ScriptedMobility
Invoked from MobilityModel when nodes are moved; this causes
emane location events to be generated for the nodes in the moved
list, making EmaneModels compatible with Ns2ScriptedMobility.
:param bool moved: were nodes moved
:param list moved_netifs: interfaces that were moved
:return:
"""
try:
wlan = self.session.get_object(self.object_id)
@ -177,28 +385,14 @@ class EmaneModel(WirelessModel):
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
"""
Invoked when a Link Message is received. Default is unimplemented.
:param core.netns.vif.Veth netif: interface one
:param bw: bandwidth to set to
:param delay: packet delay to set to
:param loss: packet loss to set to
:param duplicate: duplicate percentage to set to
:param jitter: jitter to set to
:param core.netns.vif.Veth netif2: interface two
:return: nothing
"""
warntxt = "EMANE model %s does not support link " % self.name
warntxt += "configuration, dropping Link Message"
logger.warn(warntxt)
@staticmethod
def valuestrtoparamlist(dom, name, value):
"""
Helper to convert a parameter to a paramlist.
Returns a an XML paramlist, or None if the value does not expand to
multiple values.
"""
try:
values = utils.maketuplefromstr(value, str)
except SyntaxError:
logger.exception("error in value string to param list")
return None
if not hasattr(values, "__iter__"):
return None
if len(values) < 2:
return None
return xmlutils.add_param_list_to_parent(dom, parent=None, name=name, values=values)
logger.warn("emane model(%s) does not support link configuration", self.name)

View file

@ -2,159 +2,18 @@
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
"""
from core import emane
from core.emane.emanemodel import EmaneModel
from core.emane.universal import EmaneUniversalModel
from core.enumerations import ConfigDataTypes
from core.emane import emanemanifest
from core.emane import emanemodel
class EmaneIeee80211abgModel(EmaneModel):
def __init__(self, session, object_id=None):
EmaneModel.__init__(self, session, object_id)
class EmaneIeee80211abgModel(emanemodel.EmaneModel):
# model name
name = "emane_ieee80211abg"
_80211rates = "1 1 Mbps,2 2 Mbps,3 5.5 Mbps,4 11 Mbps,5 6 Mbps," + \
"6 9 Mbps,7 12 Mbps,8 18 Mbps,9 24 Mbps,10 36 Mbps,11 48 Mbps," + \
"12 54 Mbps"
if emane.VERSION >= emane.EMANE091:
xml_path = "/usr/share/emane/xml/models/mac/ieee80211abg"
else:
xml_path = "/usr/share/emane/models/ieee80211abg/xml"
# MAC parameters
_confmatrix_mac_base = [
("mode", ConfigDataTypes.UINT8.value, "0",
"0 802.11b (DSSS only),1 802.11b (DSSS only)," +
"2 802.11a or g (OFDM),3 802.11b/g (DSSS and OFDM)", "mode"),
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable promiscuous mode"),
("distance", ConfigDataTypes.UINT32.value, "1000",
"", "max distance (m)"),
("unicastrate", ConfigDataTypes.UINT8.value, "4", _80211rates,
"unicast rate (Mbps)"),
("multicastrate", ConfigDataTypes.UINT8.value, "1", _80211rates,
"multicast rate (Mbps)"),
("rtsthreshold", ConfigDataTypes.UINT16.value, "0",
"", "RTS threshold (bytes)"),
("pcrcurveuri", ConfigDataTypes.STRING.value,
"%s/ieee80211pcr.xml" % xml_path,
"", "SINR/PCR curve file"),
("flowcontrolenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable traffic flow control"),
("flowcontroltokens", ConfigDataTypes.UINT16.value, "10",
"", "number of flow control tokens"),
]
# mac parameters introduced in EMANE 0.8.1
# Note: The entry format for category queue parameters (queuesize, aifs, etc) were changed in
# EMANE 9.x, but are being preserved for the time being due to space constraints in the
# CORE GUI. A conversion function (get9xmacparamequivalent) has been defined to support this.
_confmatrix_mac_extended = [
("wmmenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "WiFi Multimedia (WMM)"),
("queuesize", ConfigDataTypes.STRING.value, "0:255 1:255 2:255 3:255",
"", "queue size (0-4:size)"),
("cwmin", ConfigDataTypes.STRING.value, "0:32 1:32 2:16 3:8",
"", "min contention window (0-4:minw)"),
("cwmax", ConfigDataTypes.STRING.value, "0:1024 1:1024 2:64 3:16",
"", "max contention window (0-4:maxw)"),
("aifs", ConfigDataTypes.STRING.value, "0:2 1:2 2:2 3:1",
"", "arbitration inter frame space (0-4:aifs)"),
("txop", ConfigDataTypes.STRING.value, "0:0 1:0 2:0 3:0",
"", "txop (0-4:usec)"),
("retrylimit", ConfigDataTypes.STRING.value, "0:3 1:3 2:3 3:3",
"", "retry limit (0-4:numretries)"),
]
_confmatrix_mac_091 = [
("radiometricenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "report radio metrics via R2RI"),
("radiometricreportinterval", ConfigDataTypes.FLOAT.value, "1.0",
"", "R2RI radio metric report interval (sec)"),
("neighbormetricdeletetime", ConfigDataTypes.FLOAT.value, "60.0",
"", "R2RI neighbor table inactivity time (sec)"),
]
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_extended
if emane.VERSION >= emane.EMANE091:
_confmatrix_mac += _confmatrix_mac_091
# PHY parameters from Universal PHY
_confmatrix_phy = EmaneUniversalModel.config_matrix
config_matrix = _confmatrix_mac + _confmatrix_phy
# value groupings
config_groups = "802.11 MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % (
len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix))
def buildnemxmlfiles(self, e, ifc):
"""
Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide
nXXemane_ieee80211abgnem.xml, nXXemane_ieee80211abgemac.xml,
nXXemane_ieee80211abgphy.xml are used.
"""
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
if values is None:
return
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "ieee80211abg NEM")
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
mactag = nemdoc.createElement("mac")
mactag.setAttribute("definition", self.macxmlname(ifc))
nem.appendChild(mactag)
phytag = nemdoc.createElement("phy")
phytag.setAttribute("definition", self.phyxmlname(ifc))
nem.appendChild(phytag)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
macdoc = e.xmldoc("mac")
mac = macdoc.getElementsByTagName("mac").pop()
mac.setAttribute("name", "ieee80211abg MAC")
mac.setAttribute("library", "ieee80211abgmaclayer")
names = self.getnames()
macnames = names[:len(self._confmatrix_mac)]
phynames = names[len(self._confmatrix_mac):]
# append all MAC options to macdoc
if emane.VERSION >= emane.EMANE091:
for macname in macnames:
mac9xnvpairlist = self.get9xmacparamequivalent(macname, values)
for nvpair in mac9xnvpairlist:
mac.appendChild(e.xmlparam(macdoc, nvpair[0], nvpair[1]))
else:
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames)
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
e.xmlwrite(phydoc, self.phyxmlname(ifc))
#
# TEMP HACK: Account for parameter convention change in EMANE 9.x
# This allows CORE to preserve the entry layout for the mac "category" parameters
# and work with EMANE 9.x onwards.
#
def get9xmacparamequivalent(self, macname, values):
"""
Generate a list of 80211abg mac parameters in 0.9.x layout for a given mac parameter
in 8.x layout.For mac category parameters, the list returned will contain the four
equivalent 9.x parameter and value pairs. Otherwise, the list returned will only
contain a single name and value pair.
"""
nvpairlist = []
macparmval = self.valueof(macname, values)
if macname in ["queuesize", "aifs", "cwmin", "cwmax", "txop", "retrylimit"]:
for catval in macparmval.split():
idx_and_val = catval.split(":")
idx = int(idx_and_val[0])
val = idx_and_val[1]
# aifs and tx are in microseconds. Convert to seconds.
if macname in ["aifs", "txop"]:
val = "%f" % (float(val) * 1e-6)
name9x = "%s%d" % (macname, idx)
nvpairlist.append([name9x, val])
else:
nvpairlist.append([macname, macparmval])
return nvpairlist
# mac configuration
mac_library = "ieee80211abgmaclayer"
mac_xml = "/usr/share/emane/manifest/ieee80211abgmaclayer.xml"
mac_defaults = {
"pcrcurveuri": "/usr/share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml",
}
mac_config = emanemanifest.parse(mac_xml, mac_defaults)

View file

@ -4,9 +4,8 @@ control of an EMANE emulation. An EmaneNode has several attached NEMs that
share the same MAC+PHY model.
"""
from os import path
import os
from core import emane
from core import logger
from core.coreobj import PyCoreNet
from core.enumerations import LinkTypes
@ -14,19 +13,12 @@ from core.enumerations import NodeTypes
from core.enumerations import RegisterTlvs
try:
from emanesh.events import LocationEvent
from emane.events import LocationEvent
except ImportError:
logger.error("error loading emanesh")
try:
import emaneeventservice
import emaneeventlocation
except ImportError:
"""
Don't require all CORE users to have EMANE libeventservice and its
Python bindings installed.
"""
logger.error("error loading emaneeventservice and emaneeventlocation")
try:
from emanesh.events import LocationEvent
except ImportError:
logger.warn("compatible emane python bindings not installed")
class EmaneNet(PyCoreNet):
@ -126,74 +118,80 @@ class EmaneNode(EmaneNet):
"""
ret = {}
if self.model is None:
logger.info("warning: EmaneNode %s has no associated model" % self.name)
logger.info("warning: EmaneNode %s has no associated model", self.name)
return ret
for netif in self.netifs():
# <nem name="NODE-001" definition="rfpipenem.xml">
nementry = self.model.buildplatformxmlnementry(doc, self, netif)
# <transport definition="transvirtual.xml" group="1">
# <param name="device" value="n1.0.158" />
# </transport>
trans = self.model.buildplatformxmltransportentry(doc, self, netif)
nementry = self.model.build_nem_xml(doc, self, netif)
trans = self.model.build_transport_xml(doc, self, netif)
nementry.appendChild(trans)
ret[netif] = nementry
return ret
def buildnemxmlfiles(self, emane):
def build_xml_files(self, emane_manager):
"""
Let the configured model build the necessary nem, mac, and phy XMLs.
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
:return: nothing
"""
if self.model is None:
return
# build XML for overall network (EmaneNode) configs
self.model.buildnemxmlfiles(emane, ifc=None)
self.model.build_xml_files(emane_manager, interface=None)
# build XML for specific interface (NEM) configs
need_virtual = False
need_raw = False
vtype = "virtual"
rtype = "raw"
for netif in self.netifs():
self.model.buildnemxmlfiles(emane, netif)
self.model.build_xml_files(emane_manager, netif)
if "virtual" in netif.transport_type:
need_virtual = True
vtype = netif.transport_type
else:
need_raw = True
rtype = netif.transport_type
# build transport XML files depending on type of interfaces involved
if need_virtual:
self.buildtransportxml(emane, vtype)
if need_raw:
self.buildtransportxml(emane, rtype)
self.buildtransportxml(emane_manager, vtype)
def buildtransportxml(self, emane, type):
if need_raw:
self.buildtransportxml(emane_manager, rtype)
def buildtransportxml(self, emane, transport_type):
"""
Write a transport XML file for the Virtual or Raw Transport.
"""
transdoc = emane.xmldoc("transport")
trans = transdoc.getElementsByTagName("transport").pop()
trans.setAttribute("name", "%s Transport" % type.capitalize())
trans.setAttribute("library", "trans%s" % type.lower())
trans.setAttribute("name", "%s Transport" % transport_type.capitalize())
trans.setAttribute("library", "trans%s" % transport_type.lower())
trans.appendChild(emane.xmlparam(transdoc, "bitrate", "0"))
flowcontrol = False
names = self.model.getnames()
values = emane.getconfig(self.objid, self.model.name,
self.model.getdefaultvalues())[1]
values = emane.getconfig(self.objid, self.model.name, self.model.getdefaultvalues())[1]
if "flowcontrolenable" in names and values:
i = names.index("flowcontrolenable")
if self.model.booltooffon(values[i]) == "on":
flowcontrol = True
if "virtual" in type.lower():
if path.exists("/dev/net/tun_flowctl"):
if "virtual" in transport_type.lower():
if os.path.exists("/dev/net/tun_flowctl"):
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun_flowctl"))
else:
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun"))
if flowcontrol:
trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable", "on"))
emane.xmlwrite(transdoc, self.transportxmlname(type.lower()))
emane.xmlwrite(transdoc, self.transportxmlname(transport_type.lower()))
def transportxmlname(self, type):
"""
@ -222,7 +220,7 @@ class EmaneNode(EmaneNet):
# at this point we register location handlers for generating
# EMANE location events
netif.poshook = self.setnemposition
(x, y, z) = netif.node.position.get()
x, y, z = netif.node.position.get()
self.setnemposition(netif, x, y, z)
def deinstallnetifs(self):
@ -248,29 +246,15 @@ class EmaneNode(EmaneNet):
if nemid is None:
logger.info("nemid for %s is unknown" % ifname)
return
(lat, long, alt) = self.session.location.getgeo(x, y, z)
logger.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)"
"(%.6f,%.6f,%.6f)" % \
(ifname, nemid, x, y, z, lat, long, alt))
if emane.VERSION >= emane.EMANE091:
event = LocationEvent()
else:
event = emaneeventlocation.EventLocation(1)
lat, long, alt = self.session.location.getgeo(x, y, z)
logger.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, long, alt)
event = LocationEvent()
# altitude must be an integer or warning is printed
# unused: yaw, pitch, roll, azimuth, elevation, velocity
alt = int(round(alt))
if emane.VERSION >= emane.EMANE091:
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
self.session.emane.service.publish(0, event)
else:
event.set(0, nemid, lat, long, alt)
self.session.emane.service.publish(
emaneeventlocation.EVENT_ID,
emaneeventservice.PLATFORMID_ANY,
emaneeventservice.NEMID_ANY,
emaneeventservice.COMPONENTID_ANY,
event.export()
)
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
self.session.emane.service.publish(0, event)
def setnempositions(self, moved_netifs):
"""
@ -280,14 +264,12 @@ class EmaneNode(EmaneNet):
"""
if len(moved_netifs) == 0:
return
if self.session.emane.service is None:
logger.info("position service not available")
return
if emane.VERSION >= emane.EMANE091:
event = LocationEvent()
else:
event = emaneeventlocation.EventLocation(len(moved_netifs))
event = LocationEvent()
i = 0
for netif in moved_netifs:
nemid = self.getnemid(netif)
@ -295,26 +277,13 @@ class EmaneNode(EmaneNet):
if nemid is None:
logger.info("nemid for %s is unknown" % ifname)
continue
(x, y, z) = netif.node.getposition()
(lat, long, alt) = self.session.location.getgeo(x, y, z)
logger.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)"
"(%.6f,%.6f,%.6f)" %
(i, ifname, nemid, x, y, z, lat, long, alt))
x, y, z = netif.node.getposition()
lat, long, alt = self.session.location.getgeo(x, y, z)
logger.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)",
i, ifname, nemid, x, y, z, lat, long, alt)
# altitude must be an integer or warning is printed
alt = int(round(alt))
if emane.VERSION >= emane.EMANE091:
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
else:
event.set(i, nemid, lat, long, alt)
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
i += 1
if emane.VERSION >= emane.EMANE091:
self.session.emane.service.publish(0, event)
else:
self.session.emane.service.publish(
emaneeventlocation.EVENT_ID,
emaneeventservice.PLATFORMID_ANY,
emaneeventservice.NEMID_ANY,
emaneeventservice.COMPONENTID_ANY,
event.export()
)
self.session.emane.service.publish(0, event)

View file

@ -2,118 +2,18 @@
rfpipe.py: EMANE RF-PIPE model for CORE
"""
from core import emane
from core.emane.emanemodel import EmaneModel
from core.emane.universal import EmaneUniversalModel
from core.enumerations import ConfigDataTypes
from core.emane import emanemanifest
from core.emane import emanemodel
class EmaneRfPipeModel(EmaneModel):
def __init__(self, session, object_id=None):
EmaneModel.__init__(self, session, object_id)
class EmaneRfPipeModel(emanemodel.EmaneModel):
# model name
name = "emane_rfpipe"
if emane.VERSION >= emane.EMANE091:
xml_path = "/usr/share/emane/xml/models/mac/rfpipe"
else:
xml_path = "/usr/share/emane/models/rfpipe/xml"
# configuration parameters are
# ( "name", "type", "default", "possible-value-list", "caption")
# MAC parameters
_confmatrix_mac_base = [
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, "0",
"True,False", "enable promiscuous mode"),
("datarate", ConfigDataTypes.UINT32.value, "1M",
"", "data rate (bps)"),
("flowcontrolenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable traffic flow control"),
("flowcontroltokens", ConfigDataTypes.UINT16.value, "10",
"", "number of flow control tokens"),
("pcrcurveuri", ConfigDataTypes.STRING.value,
"%s/rfpipepcr.xml" % xml_path,
"", "SINR/PCR curve file"),
]
_confmatrix_mac_081 = [
("jitter", ConfigDataTypes.FLOAT.value, "0.0",
"", "transmission jitter (usec)"),
("delay", ConfigDataTypes.FLOAT.value, "0.0",
"", "transmission delay (usec)"),
("transmissioncontrolmap", ConfigDataTypes.STRING.value, "",
"", "tx control map (nem:rate:freq:tx_dBm)"),
("enabletighttiming", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable tight timing for pkt delay"),
]
_confmatrix_mac_091 = [
("jitter", ConfigDataTypes.FLOAT.value, "0.0",
"", "transmission jitter (sec)"),
("delay", ConfigDataTypes.FLOAT.value, "0.0",
"", "transmission delay (sec)"),
("radiometricenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "report radio metrics via R2RI"),
("radiometricreportinterval", ConfigDataTypes.FLOAT.value, "1.0",
"", "R2RI radio metric report interval (sec)"),
("neighbormetricdeletetime", ConfigDataTypes.FLOAT.value, "60.0",
"", "R2RI neighbor table inactivity time (sec)"),
]
if emane.VERSION >= emane.EMANE091:
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_091
else:
_confmatrix_mac = _confmatrix_mac_base + _confmatrix_mac_081
# PHY parameters from Universal PHY
_confmatrix_phy = EmaneUniversalModel.config_matrix
config_matrix = _confmatrix_mac + _confmatrix_phy
# value groupings
config_groups = "RF-PIPE MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % (
len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix))
def buildnemxmlfiles(self, e, ifc):
"""
Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide nXXemane_rfpipenem.xml,
nXXemane_rfpipemac.xml, nXXemane_rfpipephy.xml are used.
"""
values = e.getifcconfig(self.object_id, self.name,
self.getdefaultvalues(), ifc)
if values is None:
return
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "RF-PIPE NEM")
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
mactag = nemdoc.createElement("mac")
mactag.setAttribute("definition", self.macxmlname(ifc))
nem.appendChild(mactag)
phytag = nemdoc.createElement("phy")
phytag.setAttribute("definition", self.phyxmlname(ifc))
nem.appendChild(phytag)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
names = list(self.getnames())
macnames = names[:len(self._confmatrix_mac)]
phynames = names[len(self._confmatrix_mac):]
macdoc = e.xmldoc("mac")
mac = macdoc.getElementsByTagName("mac").pop()
mac.setAttribute("name", "RF-PIPE MAC")
mac.setAttribute("library", "rfpipemaclayer")
if emane.VERSION < emane.EMANE091 and \
self.valueof("transmissioncontrolmap", values) is "":
macnames.remove("transmissioncontrolmap")
# EMANE 0.7.4 support
if emane.VERSION == emane.EMANE074:
# convert datarate from bps to kbps
i = names.index("datarate")
values = list(values)
values[i] = self.emane074_fixup(values[i], 1000)
# append MAC options to macdoc
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames)
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
e.xmlwrite(phydoc, self.phyxmlname(ifc))
# mac configuration
mac_library = "rfpipemaclayer"
mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml"
mac_defaults = {
"pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml",
}
mac_config = emanemanifest.parse(mac_xml, mac_defaults)

View file

@ -2,97 +2,52 @@
tdma.py: EMANE TDMA model bindings for CORE
"""
from core import emane
import os
from core import constants
from core import logger
from core.emane.emanemodel import EmaneModel
from core.emane.universal import EmaneUniversalModel
from core.emane import emanemanifest
from core.emane import emanemodel
from core.enumerations import ConfigDataTypes
from core.misc import utils
class EmaneTdmaModel(EmaneModel):
def __init__(self, session, object_id=None):
EmaneModel.__init__(self, session, object_id)
class EmaneTdmaModel(emanemodel.EmaneModel):
# model name
name = "emane_tdma"
xml_path = "/usr/share/emane/xml/models/mac/tdmaeventscheduler"
if emane.VERSION < emane.EMANE101:
logger.error("EMANE TDMA requires EMANE 1.0.1 or greater")
# MAC parameters
_confmatrix_mac = [
("enablepromiscuousmode", ConfigDataTypes.BOOL.value, "0",
"True,False", "enable promiscuous mode"),
("flowcontrolenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable traffic flow control"),
("flowcontroltokens", ConfigDataTypes.UINT16.value, "10",
"", "number of flow control tokens"),
("fragmentcheckthreshold", ConfigDataTypes.UINT16.value, "2",
"", "rate in seconds for check if fragment reassembly efforts should be abandoned"),
("fragmenttimeoutthreshold", ConfigDataTypes.UINT16.value, "5",
"", "threshold in seconds to wait for another packet fragment for reassembly"),
("neighbormetricdeletetime", ConfigDataTypes.FLOAT.value, "60.0",
"", "neighbor RF reception timeout for removal from neighbor table (sec)"),
("neighbormetricupdateinterval", ConfigDataTypes.FLOAT.value, "1.0",
"", "neighbor table update interval (sec)"),
("pcrcurveuri", ConfigDataTypes.STRING.value, "%s/tdmabasemodelpcr.xml" % xml_path,
"", "SINR/PCR curve file"),
("queue.aggregationenable", ConfigDataTypes.BOOL.value, "1",
"On,Off", "enable transmit packet aggregation"),
("queue.aggregationslotthreshold", ConfigDataTypes.FLOAT.value, "90.0",
"", "percentage of a slot that must be filled in order to conclude aggregation"),
("queue.depth", ConfigDataTypes.UINT16.value, "256",
"", "size of the per service class downstream packet queues (packets)"),
("queue.fragmentationenable", ConfigDataTypes.BOOL.value, "1",
"On,Off", "enable packet fragmentation (over multiple slots)"),
("queue.strictdequeueenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable strict dequeueing to specified queues only"),
]
# mac configuration
mac_library = "tdmaeventschedulerradiomodel"
mac_xml = "/usr/share/emane/manifest/tdmaeventschedulerradiomodel.xml"
mac_defaults = {
"pcrcurveuri": "/usr/share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml",
}
mac_config = emanemanifest.parse(mac_xml, mac_defaults)
# PHY parameters from Universal PHY
_confmatrix_phy = EmaneUniversalModel.config_matrix
# add custom schedule options and ignore it when writing emane xml
schedule_name = "schedule"
default_schedule = os.path.join(constants.CORE_DATA_DIR, "examples", "tdma", "schedule.xml")
mac_config.insert(
0,
(schedule_name, ConfigDataTypes.STRING.value, default_schedule, "", "TDMA schedule file (core)")
)
config_ignore = {schedule_name}
config_matrix = _confmatrix_mac + _confmatrix_phy
# value groupings
config_groups = "TDMA MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" % (
len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(config_matrix))
def buildnemxmlfiles(self, e, ifc):
def post_startup(self, emane_manager):
"""
Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide nXXemane_tdmanem.xml,
nXXemane_tdmamac.xml, nXXemane_tdmaphy.xml are used.
Logic to execute after the emane manager is finished with startup.
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager for the session
:return: nothing
"""
values = e.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), ifc)
# get configured schedule
values = emane_manager.getconfig(self.object_id, self.name, self.getdefaultvalues())[1]
if values is None:
return
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "TDMA NEM")
e.appendtransporttonem(nemdoc, nem, self.object_id, ifc)
mactag = nemdoc.createElement("mac")
mactag.setAttribute("definition", self.macxmlname(ifc))
nem.appendChild(mactag)
phytag = nemdoc.createElement("phy")
phytag.setAttribute("definition", self.phyxmlname(ifc))
nem.appendChild(phytag)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
schedule = self.valueof(self.schedule_name, values)
names = list(self.getnames())
macnames = names[:len(self._confmatrix_mac)]
phynames = names[len(self._confmatrix_mac):]
# make any changes to the mac/phy names here to e.g. exclude them from
# the XML output
event_device = emane_manager.event_device
macdoc = e.xmldoc("mac")
mac = macdoc.getElementsByTagName("mac").pop()
mac.setAttribute("name", "TDMA MAC")
mac.setAttribute("library", "tdmaeventschedulerradiomodel")
# append MAC options to macdoc
map(lambda n: mac.appendChild(e.xmlparam(macdoc, n, self.valueof(n, values))), macnames)
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
e.xmlwrite(phydoc, self.phyxmlname(ifc))
# initiate tdma schedule
logger.info("setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device)
utils.check_cmd(["emaneevent-tdmaschedule", "-i", event_device, schedule])

View file

@ -1,132 +0,0 @@
"""
universal.py: EMANE Universal PHY model for CORE. Enumerates configuration items
used for the Universal PHY.
"""
from core import emane
from core.emane.emanemodel import EmaneModel
from core.enumerations import ConfigDataTypes
class EmaneUniversalModel(EmaneModel):
"""
This Univeral PHY model is meant to be imported by other models,
not instantiated.
"""
def __init__(self, session, object_id=None):
raise NotImplemented("Cannot use this class directly")
name = "emane_universal"
_xmlname = "universalphy"
_xmllibrary = "universalphylayer"
# universal PHY parameters
_confmatrix_base = [
("bandwidth", ConfigDataTypes.UINT64.value, "1M",
"", "rf bandwidth (hz)"),
("frequency", ConfigDataTypes.UINT64.value, "2.347G",
"", "frequency (Hz)"),
("frequencyofinterest", ConfigDataTypes.UINT64.value, "2.347G",
"", "frequency of interest (Hz)"),
("subid", ConfigDataTypes.UINT16.value, "1",
"", "subid"),
("systemnoisefigure", ConfigDataTypes.FLOAT.value, "4.0",
"", "system noise figure (dB)"),
("txpower", ConfigDataTypes.FLOAT.value, "0.0",
"", "transmit power (dBm)"),
]
_confmatrix_081 = [
("antennagain", ConfigDataTypes.FLOAT.value, "0.0",
"", "antenna gain (dBi)"),
("antennaazimuth", ConfigDataTypes.FLOAT.value, "0.0",
"", "antenna azimuth (deg)"),
("antennaelevation", ConfigDataTypes.FLOAT.value, "0.0",
"", "antenna elevation (deg)"),
("antennaprofileid", ConfigDataTypes.STRING.value, "1",
"", "antenna profile ID"),
("antennaprofilemanifesturi", ConfigDataTypes.STRING.value, "",
"", "antenna profile manifest URI"),
("antennaprofileenable", ConfigDataTypes.BOOL.value, "0",
"On,Off", "antenna profile mode"),
("defaultconnectivitymode", ConfigDataTypes.BOOL.value, "1",
"On,Off", "default connectivity"),
("frequencyofinterestfilterenable", ConfigDataTypes.BOOL.value, "1",
"On,Off", "frequency of interest filter enable"),
("noiseprocessingmode", ConfigDataTypes.BOOL.value, "0",
"On,Off", "enable noise processing"),
("pathlossmode", ConfigDataTypes.STRING.value, "2ray",
"pathloss,2ray,freespace", "path loss mode"),
]
_confmatrix_091 = [
("fixedantennagain", ConfigDataTypes.FLOAT.value, "0.0",
"", "antenna gain (dBi)"),
("fixedantennagainenable", ConfigDataTypes.BOOL.value, "1",
"On,Off", "enable fixed antenna gain"),
("noisemode", ConfigDataTypes.STRING.value, "none",
"none,all,outofband", "noise processing mode"),
("noisebinsize", ConfigDataTypes.UINT64.value, "20",
"", "noise bin size in microseconds"),
("propagationmodel", ConfigDataTypes.STRING.value, "2ray",
"precomputed,2ray,freespace", "path loss mode"),
]
if emane.VERSION >= emane.EMANE091:
config_matrix = _confmatrix_base + _confmatrix_091
else:
config_matrix = _confmatrix_base + _confmatrix_081
# old parameters
_confmatrix_ver074 = [
("antennaazimuthbeamwidth", ConfigDataTypes.FLOAT.value, "360.0",
"", "azimith beam width (deg)"),
("antennaelevationbeamwidth", ConfigDataTypes.FLOAT.value, "180.0",
"", "elevation beam width (deg)"),
("antennatype", ConfigDataTypes.STRING.value, "omnidirectional",
"omnidirectional,unidirectional", "antenna type"),
]
# parameters that require unit conversion for 0.7.4
_update_ver074 = ("bandwidth", "frequency", "frequencyofinterest")
# parameters that should be removed for 0.7.4
_remove_ver074 = ("antennaprofileenable", "antennaprofileid",
"antennaprofilemanifesturi",
"frequencyofinterestfilterenable")
@classmethod
def getphydoc(cls, e, mac, values, phynames):
phydoc = e.xmldoc("phy")
phy = phydoc.getElementsByTagName("phy").pop()
phy.setAttribute("name", cls._xmlname)
if emane.VERSION < emane.EMANE091:
phy.setAttribute("library", cls._xmllibrary)
# EMANE 0.7.4 suppport - to be removed when 0.7.4 support is deprecated
if emane.VERSION == emane.EMANE074:
names = mac.getnames()
values = list(values)
phynames = list(phynames)
# update units for some parameters
for p in cls._update_ver074:
i = names.index(p)
# these all happen to be KHz, so 1000 is used
values[i] = cls.emane074_fixup(values[i], 1000)
# remove new incompatible options
for p in cls._remove_ver074:
phynames.remove(p)
# insert old options with their default values
for old in cls._confmatrix_ver074:
phy.appendChild(e.xmlparam(phydoc, old[0], old[2]))
frequencies = None
if emane.VERSION >= emane.EMANE091:
name = "frequencyofinterest"
value = mac.valueof(name, values)
frequencies = cls.valuestrtoparamlist(phydoc, name, value)
if frequencies:
phynames = list(phynames)
phynames.remove("frequencyofinterest")
# append all PHY options to phydoc
map(lambda n: phy.appendChild(e.xmlparam(phydoc, n, mac.valueof(n, values))), phynames)
if frequencies:
phy.appendChild(frequencies)
return phydoc

View file

@ -0,0 +1,933 @@
import atexit
import os
import signal
import sys
import core.services
from core import logger
from core.coreobj import PyCoreNet
from core.coreobj import PyCoreNode
from core.data import NodeData
from core.emulator.emudata import LinkOptions
from core.emulator.emudata import NodeOptions
from core.enumerations import EventTypes
from core.enumerations import LinkTypes
from core.enumerations import NodeTypes
from core.misc import nodemaps
from core.misc import nodeutils
from core.session import Session
from core.xml.xmlparser import core_document_parser
from core.xml.xmlwriter import core_document_writer
def signal_handler(signal_number, _):
"""
Handle signals and force an exit with cleanup.
:param int signal_number: signal number
:param _: ignored
:return: nothing
"""
logger.info("caught signal: %s", signal_number)
sys.exit(signal_number)
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGUSR1, signal_handler)
signal.signal(signal.SIGUSR2, signal_handler)
def create_interface(node, network, interface_data):
"""
Create an interface for a node on a network using provided interface data.
:param node: node to create interface for
:param network: network to associate interface with
:param core.emulator.emudata.InterfaceData interface_data: interface data
:return: created interface
"""
node.newnetif(
network,
addrlist=interface_data.get_addresses(),
hwaddr=interface_data.mac,
ifindex=interface_data.id,
ifname=interface_data.name
)
return node.netif(interface_data.id, network)
def link_config(network, interface, link_options, devname=None, interface_two=None):
"""
Convenience method for configuring a link,
:param network: network to configure link for
:param interface: interface to configure
:param core.emulator.emudata.LinkOptions link_options: data to configure link with
:param str devname: device name, default is None
:param interface_two: other interface associated, default is None
:return: nothing
"""
config = {
"netif": interface,
"bw": link_options.bandwidth,
"delay": link_options.delay,
"loss": link_options.per,
"duplicate": link_options.dup,
"jitter": link_options.jitter,
"netif2": interface_two
}
# hacky check here, because physical and emane nodes do not conform to the same linkconfig interface
if not nodeutils.is_node(network, [NodeTypes.EMANE, NodeTypes.PHYSICAL]):
config["devname"] = devname
network.linkconfig(**config)
def is_net_node(node):
"""
Convenience method for testing if a legacy core node is considered a network node.
:param object node: object to test against
:return: True if object is an instance of a network node, False otherwise
:rtype: bool
"""
return isinstance(node, PyCoreNet)
def is_core_node(node):
"""
Convenience method for testing if a legacy core node is considered a core node.
:param object node: object to test against
:return: True if object is an instance of a core node, False otherwise
:rtype: bool
"""
return isinstance(node, PyCoreNode)
class IdGen(object):
def __init__(self, _id=0):
self.id = _id
def next(self):
self.id += 1
return self.id
class EmuSession(Session):
def __init__(self, session_id, config=None, mkdir=True):
super(EmuSession, self).__init__(session_id, config, mkdir)
# object management
self.node_id_gen = IdGen()
# set default services
self.services.defaultservices = {
"mdr": ("zebra", "OSPFv3MDR", "IPForward"),
"PC": ("DefaultRoute",),
"prouter": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
"router": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
"host": ("DefaultRoute", "SSH"),
}
def _link_nodes(self, node_one_id, node_two_id):
"""
Convenience method for retrieving nodes within link data.
:param int node_one_id: node one id
:param int node_two_id: node two id
:return: nodes, network nodes if present, and tunnel if present
:rtype: tuple
"""
logger.debug("link message between node1(%s) and node2(%s)", node_one_id, node_two_id)
# values to fill
net_one = None
net_two = None
# retrieve node one
node_one = self.get_object(node_one_id)
node_two = self.get_object(node_two_id)
# both node ids are provided
tunnel = self.broker.gettunnel(node_one_id, node_two_id)
logger.debug("tunnel between nodes: %s", tunnel)
if nodeutils.is_node(tunnel, NodeTypes.TAP_BRIDGE):
net_one = tunnel
if tunnel.remotenum == node_one_id:
node_one = None
else:
node_two = None
# physical node connected via gre tap tunnel
elif tunnel:
if tunnel.remotenum == node_one_id:
node_one = None
else:
node_two = None
if is_net_node(node_one):
if not net_one:
net_one = node_one
else:
net_two = node_one
node_one = None
if is_net_node(node_two):
if not net_one:
net_one = node_two
else:
net_two = node_two
node_two = None
logger.debug("link node types n1(%s) n2(%s) net1(%s) net2(%s) tunnel(%s)",
node_one, node_two, net_one, net_two, tunnel)
return node_one, node_two, net_one, net_two, tunnel
# TODO: this doesn't appear to ever be used, EMANE or basic wireless range
def _link_wireless(self, objects, connect):
"""
Objects to deal with when connecting/disconnecting wireless links.
:param list objects: possible objects to deal with
:param bool connect: link interfaces if True, unlink otherwise
:return: nothing
"""
objects = [x for x in objects if x]
if len(objects) < 2:
raise ValueError("wireless link failure: %s", objects)
logger.debug("handling wireless linking objects(%) connect(%s)", objects, connect)
common_networks = objects[0].commonnets(objects[1])
if not common_networks:
raise ValueError("no common network found for wireless link/unlink")
for common_network, interface_one, interface_two in common_networks:
if not nodeutils.is_node(common_network, [NodeTypes.WIRELESS_LAN, NodeTypes.EMANE]):
logger.info("skipping common network that is not wireless/emane: %s", common_network)
continue
logger.info("wireless linking connect(%s): %s - %s", connect, interface_one, interface_two)
if connect:
common_network.link(interface_one, interface_two)
else:
common_network.unlink(interface_one, interface_two)
def add_link(self, node_one_id, node_two_id, interface_one=None, interface_two=None, link_options=LinkOptions()):
"""
Add a link between nodes.
:param int node_one_id: node one id
:param int node_two_id: node two id
:param core.emulator.emudata.InterfaceData interface_one: node one interface data, defaults to none
:param core.emulator.emudata.InterfaceData interface_two: node two interface data, defaults to none
:param core.emulator.emudata.LinkOptions link_options: data for creating link, defaults to no options
:return:
"""
# get node objects identified by link data
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
if node_one:
node_one.lock.acquire()
if node_two:
node_two.lock.acquire()
try:
# wireless link
if link_options.type == LinkTypes.WIRELESS:
objects = [node_one, node_two, net_one, net_two]
self._link_wireless(objects, connect=True)
# wired link
else:
# 2 nodes being linked, ptp network
if all([node_one, node_two]) and not net_one:
logger.info("adding link for peer to peer nodes: %s - %s", node_one.name, node_two.name)
ptp_class = nodeutils.get_node_class(NodeTypes.PEER_TO_PEER)
start = self.state > EventTypes.DEFINITION_STATE.value
net_one = self.add_object(cls=ptp_class, start=start)
# node to network
if node_one and net_one:
logger.info("adding link from node to network: %s - %s", node_one.name, net_one.name)
interface = create_interface(node_one, net_one, interface_one)
link_config(net_one, interface, link_options)
# network to node
if node_two and net_one:
logger.info("adding link from network to node: %s - %s", node_two.name, net_one.name)
interface = create_interface(node_two, net_one, interface_two)
if not link_options.unidirectional:
link_config(net_one, interface, link_options)
# network to network
if net_one and net_two:
logger.info("adding link from network to network: %s", net_one.name, net_two.name)
if nodeutils.is_node(net_two, NodeTypes.RJ45):
interface = net_two.linknet(net_one)
else:
interface = net_one.linknet(net_two)
link_config(net_one, interface, link_options)
if not link_options.unidirectional:
interface.swapparams("_params_up")
link_config(net_two, interface, link_options, devname=interface.name)
interface.swapparams("_params_up")
# a tunnel node was found for the nodes
addresses = []
if not node_one and all([net_one, interface_one]):
addresses.extend(interface_one.get_addresses())
if not node_two and all([net_two, interface_two]):
addresses.extend(interface_two.get_addresses())
# tunnel node logic
key = link_options.key
if key and nodeutils.is_node(net_one, NodeTypes.TUNNEL):
logger.info("setting tunnel key for: %s", net_one.name)
net_one.setkey(key)
if addresses:
net_one.addrconfig(addresses)
if key and nodeutils.is_node(net_two, NodeTypes.TUNNEL):
logger.info("setting tunnel key for: %s", net_two.name)
net_two.setkey(key)
if addresses:
net_two.addrconfig(addresses)
# physical node connected with tunnel
if not net_one and not net_two and (node_one or node_two):
if node_one and nodeutils.is_node(node_one, NodeTypes.PHYSICAL):
logger.info("adding link for physical node: %s", node_one.name)
addresses = interface_one.get_addresses()
node_one.adoptnetif(tunnel, interface_one.id, interface_one.mac, addresses)
link_config(node_one, tunnel, link_options)
elif node_two and nodeutils.is_node(node_two, NodeTypes.PHYSICAL):
logger.info("adding link for physical node: %s", node_two.name)
addresses = interface_two.get_addresses()
node_two.adoptnetif(tunnel, interface_two.id, interface_two.mac, addresses)
link_config(node_two, tunnel, link_options)
finally:
if node_one:
node_one.lock.release()
if node_two:
node_two.lock.release()
def delete_link(self, node_one_id, node_two_id, interface_one_id, interface_two_id, link_type=LinkTypes.WIRED):
"""
Delete a link between nodes.
:param int node_one_id: node one id
:param int node_two_id: node two id
:param int interface_one_id: interface id for node one
:param int interface_two_id: interface id for node two
:param core.enumerations.LinkTypes link_type: link type to delete
:return: nothing
"""
# interface data
# interface_one_data, interface_two_data = get_interfaces(link_data)
# get node objects identified by link data
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
if node_one:
node_one.lock.acquire()
if node_two:
node_two.lock.acquire()
try:
# wireless link
if link_type == LinkTypes.WIRELESS:
objects = [node_one, node_two, net_one, net_two]
self._link_wireless(objects, connect=False)
# wired link
else:
if all([node_one, node_two]):
# TODO: fix this for the case where ifindex[1,2] are not specified
# a wired unlink event, delete the connecting bridge
interface_one = node_one.netif(interface_one_id)
interface_two = node_two.netif(interface_two_id)
# get interfaces from common network, if no network node
# otherwise get interfaces between a node and network
if not interface_one and not interface_two:
common_networks = node_one.commonnets(node_two)
for network, common_interface_one, common_interface_two in common_networks:
if (net_one and network == net_one) or not net_one:
interface_one = common_interface_one
interface_two = common_interface_two
break
if all([interface_one, interface_two]) and any([interface_one.net, interface_two.net]):
if interface_one.net != interface_two.net and all([interface_one.up, interface_two.up]):
raise ValueError("no common network found")
logger.info("deleting link node(%s):interface(%s) node(%s):interface(%s)",
node_one.name, interface_one.name, node_two.name, interface_two.name)
net_one = interface_one.net
interface_one.detachnet()
interface_two.detachnet()
if net_one.numnetif() == 0:
self.delete_object(net_one.objid)
node_one.delnetif(interface_one.netindex)
node_two.delnetif(interface_two.netindex)
finally:
if node_one:
node_one.lock.release()
if node_two:
node_two.lock.release()
def update_link(self, node_one_id, node_two_id, link_options, interface_one_id=None, interface_two_id=None):
"""
Update link information between nodes.
:param int node_one_id: node one id
:param int node_two_id: node two id
:param int interface_one_id: interface id for node one
:param int interface_two_id: interface id for node two
:param core.emulator.emudata.LinkOptions link_options: data to update link with
:return: nothing
"""
# interface data
# interface_one_data, interface_two_data = get_interfaces(link_data)
# get node objects identified by link data
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
if node_one:
node_one.lock.acquire()
if node_two:
node_two.lock.acquire()
try:
# wireless link
if link_options.type == LinkTypes.WIRELESS.value:
raise ValueError("cannot update wireless link")
else:
if not node_one and not node_two:
if net_one and net_two:
# modify link between nets
interface = net_one.getlinknetif(net_two)
upstream = False
if not interface:
upstream = True
interface = net_two.getlinknetif(net_one)
if not interface:
raise ValueError("modify unknown link between nets")
if upstream:
interface.swapparams("_params_up")
link_config(net_one, interface, link_options, devname=interface.name)
interface.swapparams("_params_up")
else:
link_config(net_one, interface, link_options)
if not link_options.unidirectional:
if upstream:
link_config(net_two, interface, link_options)
else:
interface.swapparams("_params_up")
link_config(net_two, interface, link_options, devname=interface.name)
interface.swapparams("_params_up")
else:
raise ValueError("modify link for unknown nodes")
elif not node_one:
# node1 = layer 2node, node2 = layer3 node
interface = node_two.netif(interface_two_id, net_one)
link_config(net_one, interface, link_options)
elif not node_two:
# node2 = layer 2node, node1 = layer3 node
interface = node_one.netif(interface_one_id, net_one)
link_config(net_one, interface, link_options)
else:
common_networks = node_one.commonnets(node_two)
if not common_networks:
raise ValueError("no common network found")
for net_one, interface_one, interface_two in common_networks:
if interface_one_id is not None and interface_one_id != node_one.getifindex(interface_one):
continue
link_config(net_one, interface_one, link_options, interface_two=interface_two)
if not link_options.unidirectional:
link_config(net_one, interface_two, link_options, interface_two=interface_one)
finally:
if node_one:
node_one.lock.release()
if node_two:
node_two.lock.release()
def add_node(self, _type=NodeTypes.DEFAULT, _id=None, node_options=NodeOptions()):
"""
Add a node to the session, based on the provided node data.
:param core.enumerations.NodeTypes _type: type of node to create
:param int _id: id for node, defaults to None for generated id
:param core.emulator.emudata.NodeOptions node_options: data to create node with
:return: created node
"""
# retrieve node class for given node type
try:
node_class = nodeutils.get_node_class(_type)
except KeyError:
logger.error("invalid node type to create: %s", _type)
return None
# set node start based on current session state, override and check when rj45
start = self.state > EventTypes.DEFINITION_STATE.value
enable_rj45 = getattr(self.options, "enablerj45", "0") == "1"
if _type == NodeTypes.RJ45 and not enable_rj45:
start = False
# determine node id
if not _id:
while True:
_id = self.node_id_gen.next()
if _id not in self.objects:
break
# generate name if not provided
name = node_options.name
if not name:
name = "%s%s" % (node_class.__name__, _id)
# create node
logger.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start)
node = self.add_object(cls=node_class, objid=_id, name=name, start=start)
# set node attributes
node.icon = node_options.icon
node.canvas = node_options.canvas
node.opaque = node_options.opaque
# set node position and broadcast it
self.set_node_position(node, node_options)
# add services to default and physical nodes only
if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]:
node.type = node_options.model
logger.debug("set node type: %s", node.type)
services = "|".join(node_options.services) or None
self.services.addservicestonode(node, node.type, services)
# boot nodes if created after runtime, LcxNodes, Physical, and RJ45 are all PyCoreNodes
is_boot_node = isinstance(node, PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45)
if self.state == EventTypes.RUNTIME_STATE.value and is_boot_node:
self.write_objects()
self.add_remove_control_interface(node=node, remove=False)
# TODO: common method to both Physical and LxcNodes, but not the common PyCoreNode
node.boot()
return node
def update_node(self, node_id, node_options):
"""
Update node information.
:param int node_id: id of node to update
:param core.emulator.emudata.NodeOptions node_options: data to update node with
:return: True if node updated, False otherwise
:rtype: bool
"""
result = False
try:
# get node to update
node = self.get_object(node_id)
# set node position and broadcast it
self.set_node_position(node, node_options)
# update attributes
node.canvas = node_options.canvas
node.icon = node_options.icon
# set node as updated successfully
result = True
except KeyError:
logger.error("failure to update node that does not exist: %s", node_options.id)
return result
def delete_node(self, node_id):
"""
Delete a node from the session and check if session should shutdown, if no nodes are left.
:param int node_id: id of node to delete
:return: True if node deleted, False otherwise
:rtype: bool
"""
# delete node and check for session shutdown if a node was removed
result = self.custom_delete_object(node_id)
if result:
self.check_shutdown()
return result
def set_node_position(self, node, node_options):
"""
Set position for a node, use lat/lon/alt if needed.
:param node: node to set position for
:param core.emulator.emudata.NodeOptions node_options: data for node
:return: nothing
"""
# extract location values
x = node_options.x
y = node_options.y
lat = node_options.lat
lon = node_options.lon
alt = node_options.alt
# check if we need to generate position from lat/lon/alt
has_empty_position = all(i is None for i in [x, y])
has_lat_lon_alt = all(i is not None for i in [lat, lon, alt])
using_lat_lon_alt = has_empty_position and has_lat_lon_alt
if using_lat_lon_alt:
x, y, _ = self.location.getxyz(lat, lon, alt)
# set position and broadcast
node.setposition(x, y, None)
# broadcast updated location when using lat/lon/alt
if using_lat_lon_alt:
self.broadcast_node_location(node)
def broadcast_node_location(self, node):
"""
Broadcast node location to all listeners.
:param core.netns.nodes.PyCoreObj node: node to broadcast location for
:return: nothing
"""
node_data = NodeData(
message_type=0,
id=node.objid,
x_position=node.position.x,
y_position=node.position.y
)
self.broadcast_node(node_data)
def start_mobility(self, node_ids=None):
"""
Start mobility for the provided node ids.
:param list[int] node_ids: nodes to start mobility for
:return: nothing
"""
self.mobility.startup(node_ids)
def shutdown(self):
"""
Shutdown session.
:return: nothing
"""
logger.info("session(%s) shutting down", self.session_id)
self.set_state(EventTypes.DATACOLLECT_STATE, send_event=True)
self.set_state(EventTypes.SHUTDOWN_STATE, send_event=True)
super(EmuSession, self).shutdown()
def custom_delete_object(self, object_id):
"""
Remove an emulation object.
:param int object_id: object id to remove
:return: True if object deleted, False otherwise
"""
result = False
with self._objects_lock:
if object_id in self.objects:
obj = self.objects.pop(object_id)
obj.shutdown()
result = True
return result
def is_active(self):
"""
Determine if this session is considered to be active. (Runtime or Data collect states)
:return: True if active, False otherwise
"""
result = self.state in {EventTypes.RUNTIME_STATE.value, EventTypes.DATACOLLECT_STATE.value}
logger.info("session(%s) checking if active: %s", self.session_id, result)
return result
def open_xml(self, file_name, start=False):
"""
Import a session from the EmulationScript XML format.
:param str file_name: xml file to load session from
:param bool start: instantiate session if true, false otherwise
:return: nothing
"""
# clear out existing session
self.clear()
# set default node class when one is not provided
node_class = nodeutils.get_node_class(NodeTypes.DEFAULT)
options = {"start": start, "nodecls": node_class}
core_document_parser(self, file_name, options)
if start:
self.name = os.path.basename(file_name)
self.file_name = file_name
self.instantiate()
def save_xml(self, file_name, version):
"""
Export a session to the EmulationScript XML format.
:param str file_name: file name to write session xml to
:param str version: xml version type
:return: nothing
"""
doc = core_document_writer(self, version)
doc.writexml(file_name)
def add_hook(self, state, file_name, source_name, data):
"""
Store a hook from a received file message.
:param int state: when to run hook
:param str file_name: file name for hook
:param str source_name: source name
:param data: hook data
:return: nothing
"""
# hack to conform with old logic until updated
state = ":%s" % state
self.set_hook(state, file_name, source_name, data)
def add_node_service_file(self, node_id, service_name, file_name, source_name, data):
"""
Add a service file for a node.
:param int node_id: node to add service file to
:param str service_name: service file to add
:param str file_name: file name to use
:param str source_name: source file
:param str data: file data to save
:return: nothing
"""
# hack to conform with old logic until updated
service_name = ":%s" % service_name
self.services.setservicefile(node_id, service_name, file_name, source_name, data)
def add_node_file(self, node_id, source_name, file_name, data):
"""
Add a file to a node.
:param int node_id: node to add file to
:param str source_name: source file name
:param str file_name: file name to add
:param str data: file data
:return: nothing
"""
node = self.get_object(node_id)
if source_name is not None:
node.addfile(source_name, file_name)
elif data is not None:
node.nodefile(file_name, data)
def clear(self):
"""
Clear all CORE session data. (objects, hooks, broker)
:return: nothing
"""
self.delete_objects()
self.del_hooks()
self.broker.reset()
def start_events(self):
"""
Start event loop.
:return: nothing
"""
self.event_loop.run()
def services_event(self, event_data):
"""
Handle a service event.
:param core.data.EventData event_data: event data to handle
:return:
"""
self.services.handleevent(event_data)
def mobility_event(self, event_data):
"""
Handle a mobility event.
:param core.data.EventData event_data: event data to handle
:return: nothing
"""
self.mobility.handleevent(event_data)
def create_wireless_node(self, _id=None, node_options=NodeOptions()):
"""
Create a wireless node for use within an wireless/EMANE networks.
:param int _id: int for node, defaults to None and will be generated
:param core.emulator.emudata.NodeOptions node_options: options for emane node, model will always be "mdr"
:return: new emane node
:rtype: core.netns.nodes.CoreNode
"""
node_options.model = "mdr"
return self.add_node(_type=NodeTypes.DEFAULT, _id=_id, node_options=node_options)
def create_emane_network(self, model, geo_reference, geo_scale=None, node_options=NodeOptions()):
"""
Convenience method for creating an emane network.
:param model: emane model to use for emane network
:param geo_reference: geo reference point to use for emane node locations
:param geo_scale: geo scale to use for emane node locations, defaults to 1.0
:param core.emulator.emudata.NodeOptions node_options: options for emane node being created
:return: create emane network
"""
# required to be set for emane to function properly
self.location.setrefgeo(*geo_reference)
if geo_scale:
self.location.refscale = geo_scale
# create and return network
emane_network = self.add_node(_type=NodeTypes.EMANE, node_options=node_options)
self.set_emane_model(emane_network, model)
return emane_network
def set_emane_model(self, emane_node, emane_model):
"""
Set emane model for a given emane node.
:param emane_node: emane node to set model for
:param emane_model: emane model to set
:return: nothing
"""
values = list(emane_model.getdefaultvalues())
self.emane.setconfig(emane_node.objid, emane_model.name, values)
def set_wireless_model(self, node, model):
"""
Convenience method for setting a wireless model.
:param node: node to set wireless model for
:param core.mobility.WirelessModel model: wireless model to set node to
:return: nothing
"""
values = list(model.getdefaultvalues())
node.setmodel(model, values)
def wireless_link_all(self, network, nodes):
"""
Link all nodes to the provided wireless network.
:param network: wireless network to link nodes to
:param nodes: nodes to link to wireless network
:return: nothing
"""
for node in nodes:
for common_network, interface_one, interface_two in node.commonnets(network):
common_network.link(interface_one, interface_two)
class CoreEmu(object):
"""
Provides logic for creating and configuring CORE sessions and the nodes within them.
"""
def __init__(self, config=None):
"""
Create a CoreEmu object.
:param dict config: configuration options
"""
# configuration
self.config = config
# session management
self.session_id_gen = IdGen(_id=59999)
self.sessions = {}
# set default nodes
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
# load default services
core.services.load()
# catch exit event
atexit.register(self.shutdown)
def update_nodes(self, node_map):
"""
Updates node map used by core.
:param dict node_map: node map to update existing node map with
:return: nothing
"""
nodeutils.update_node_map(node_map)
def shutdown(self):
"""
Shutdown all CORE session.
:return: nothing
"""
logger.info("shutting down all sessions")
sessions = self.sessions.copy()
self.sessions.clear()
for session in sessions.itervalues():
session.shutdown()
def create_session(self, _id=None, master=True):
"""
Create a new CORE session, set to master if running standalone.
:param int _id: session id for new session
:param bool master: sets session to master
:return: created session
:rtype: EmuSession
"""
session_id = _id
if not session_id:
while True:
session_id = self.session_id_gen.next()
if session_id not in self.sessions:
break
session = EmuSession(session_id, config=self.config)
logger.info("created session: %s", session_id)
if master:
session.master = True
self.sessions[session_id] = session
return session
def delete_session(self, _id):
"""
Shutdown and delete a CORE session.
:param int _id: session id to delete
:return: True if deleted, False otherwise
:rtype: bool
"""
logger.info("deleting session: %s", _id)
session = self.sessions.pop(_id, None)
result = False
if session:
logger.info("shutting session down: %s", _id)
session.shutdown()
result = True
else:
logger.error("session to delete did not exist: %s", _id)
return result

View file

@ -0,0 +1,223 @@
from core.enumerations import LinkTypes
from core.misc.ipaddress import Ipv4Prefix
from core.misc.ipaddress import Ipv6Prefix
from core.misc.ipaddress import MacAddress
class NodeOptions(object):
"""
Options for creating and updating nodes within core.
"""
def __init__(self, name=None, model="router"):
"""
Create a NodeOptions object.
:param str name: name of node, defaults to node class name postfix with its id
:param str model: defines services for default and physical nodes, defaults to "router"
"""
self.name = name
self.model = model
self.canvas = None
self.icon = None
self.opaque = None
self.services = []
self.x = None
self.y = None
self.lat = None
self.lon = None
self.alt = None
self.emulation_id = None
self.emulation_server = None
def set_position(self, x, y):
"""
Convenience method for setting position.
:param int x: x position
:param int y: y position
:return: nothing
"""
self.x = x
self.y = y
def set_location(self, lat, lon, alt):
"""
Convenience method for setting location.
:param float lat: latitude
:param float lon: longitude
:param float alt: altitude
:return: nothing
"""
self.lat = lat
self.lon = lon
self.alt = alt
class LinkOptions(object):
"""
Options for creating and updating links within core.
"""
def __init__(self, _type=LinkTypes.WIRED):
"""
Create a LinkOptions object.
:param core.enumerations.LinkTypes _type: type of link, defaults to wired
"""
self.type = _type
self.session = None
self.delay = None
self.bandwidth = None
self.per = None
self.dup = None
self.jitter = None
self.mer = None
self.burst = None
self.mburst = None
self.gui_attributes = None
self.unidirectional = None
self.emulation_id = None
self.network_id = None
self.key = None
self.opaque = None
class IpPrefixes(object):
"""
Convenience class to help generate IP4 and IP6 addresses for nodes within CORE.
"""
def __init__(self, ip4_prefix=None, ip6_prefix=None):
"""
Creates an IpPrefixes object.
:param str ip4_prefix: ip4 prefix to use for generation
:param str ip6_prefix: ip6 prefix to use for generation
:raises ValueError: when both ip4 and ip6 prefixes have not been provided
"""
if not ip4_prefix and not ip6_prefix:
raise ValueError("ip4 or ip6 must be provided")
self.ip4 = None
if ip4_prefix:
self.ip4 = Ipv4Prefix(ip4_prefix)
self.ip6 = None
if ip6_prefix:
self.ip6 = Ipv6Prefix(ip6_prefix)
def ip4_address(self, node):
"""
Convenience method to return the IP4 address for a node.
:param node: node to get IP4 address for
:return: IP4 address or None
:rtype: str
"""
if not self.ip4:
raise ValueError("ip4 prefixes have not been set")
return str(self.ip4.addr(node.objid))
def ip6_address(self, node):
"""
Convenience method to return the IP6 address for a node.
:param node: node to get IP6 address for
:return: IP4 address or None
:rtype: str
"""
if not self.ip6:
raise ValueError("ip6 prefixes have not been set")
return str(self.ip6.addr(node.objid))
def create_interface(self, node, name=None, mac=None):
"""
Creates interface data for linking nodes, using the nodes unique id for generation, along with a random
mac address, unless provided.
:param core.coreobj.PyCoreNode node: node to create interface for
:param str name: name to set for interface, default is eth{id}
:param str mac: mac address to use for this interface, default is random generation
:return: new interface data for the provided node
:rtype: InterfaceData
"""
# interface id
inteface_id = node.newifindex()
# generate ip4 data
ip4 = None
ip4_mask = None
if self.ip4:
ip4 = str(self.ip4.addr(node.objid))
ip4_mask = self.ip4.prefixlen
# generate ip6 data
ip6 = None
ip6_mask = None
if self.ip6:
ip6 = str(self.ip6.addr(node.objid))
ip6_mask = self.ip6.prefixlen
# random mac
if not mac:
mac = str(MacAddress.random())
return InterfaceData(
_id=inteface_id,
name=name,
ip4=ip4,
ip4_mask=ip4_mask,
ip6=ip6,
ip6_mask=ip6_mask,
mac=mac
)
class InterfaceData(object):
"""
Convenience class for storing interface data.
"""
def __init__(self, _id, name, mac, ip4, ip4_mask, ip6, ip6_mask):
"""
Creates an InterfaceData object.
:param int _id:
:param str name:
:param str mac:
:param str ip4:
:param int ip4_mask:
:param str ip6:
:param int ip6_mask:
"""
self.id = _id
self.name = name
self.mac = mac
self.ip4 = ip4
self.ip4_mask = ip4_mask
self.ip6 = ip6
self.ip6_mask = ip6_mask
def has_ip4(self):
return all([self.ip4, self.ip4_mask])
def has_ip6(self):
return all([self.ip6, self.ip6_mask])
def ip4_address(self):
if self.has_ip4():
return "%s/%s" % (self.ip4, self.ip4_mask)
else:
return None
def ip6_address(self):
if self.has_ip6():
return "%s/%s" % (self.ip6, self.ip6_mask)
else:
return None
def get_addresses(self):
ip4 = self.ip4_address()
ip6 = self.ip6_address()
return [i for i in [ip4, ip6] if i]

View file

@ -69,7 +69,6 @@ class NodeTypes(Enum):
"""
DEFAULT = 0
PHYSICAL = 1
XEN = 2
TBD = 3
SWITCH = 4
HUB = 5

View file

@ -63,7 +63,7 @@ class CoreLocation(ConfigurableManager):
values = config_data.data_values
if values is None:
logger.info("location data missing")
logger.warn("location data missing")
return None
values = values.split('|')

View file

@ -215,7 +215,7 @@ class EventLoop(object):
"""
Add an event to the event loop.
:param int delaysec: delay in seconds for event
:param float delaysec: delay in seconds for event
:param func: event function
:param args: event arguments
:param kwds: event keyword arguments

View file

@ -8,13 +8,11 @@ from core.enumerations import NodeTypes
from core.netns import nodes
from core.netns.vnet import GreTapBridge
from core.phys import pnodes
from core.xen import xen
# legacy core nodes, that leverage linux bridges
NODES = {
NodeTypes.DEFAULT: nodes.CoreNode,
NodeTypes.PHYSICAL: pnodes.PhysicalNode,
NodeTypes.XEN: xen.XenNode,
NodeTypes.TBD: None,
NodeTypes.SWITCH: nodes.SwitchNode,
NodeTypes.HUB: nodes.HubNode,

View file

@ -2,13 +2,20 @@
Serves as a global point for storing and retrieving node types needed during simulation.
"""
import pprint
from core import logger
_NODE_MAP = None
def _log_map():
global _NODE_MAP
for key, value in _NODE_MAP.iteritems():
name = None
if value:
name = value.__name__
logger.info("node type (%s) - class (%s)", key.name, name)
def _convert_map(x, y):
"""
Convenience method to create a human readable version of the node map to log.
@ -21,6 +28,18 @@ def _convert_map(x, y):
return x
def update_node_map(node_map):
"""
Update the current node map with the provided node map values.
:param dict node_map: node map to update with
"""
global _NODE_MAP
_NODE_MAP.update(node_map)
_log_map()
def set_node_map(node_map):
"""
Set the global node map that proides a consistent way to retrieve differently configured nodes.
@ -29,9 +48,8 @@ def set_node_map(node_map):
:return: nothing
"""
global _NODE_MAP
print_map = reduce(lambda x, y: _convert_map(x, y), node_map.items(), {})
logger.info("setting node class map: \n%s", pprint.pformat(print_map, indent=4))
_NODE_MAP = node_map
_log_map()
def get_node_class(node_type):

View file

@ -136,7 +136,7 @@ router ospf6
:param routerid: router id
:param str redistribute: redistribute value
"""
ospf6ifs = utils.maketuple(ospf6ifs)
ospf6ifs = utils.make_tuple(ospf6ifs)
interfaces = "\n!\n".join(map(str, ospf6ifs))
ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % (x.name(), area), ospf6ifs))
Conf.__init__(self, interfaces=interfaces, routerid=routerid, ospfifs=ospfifs, redistribute=redistribute)
@ -163,9 +163,9 @@ $forwarding
:param str logfile: log file name
:param debugs: debug options
"""
routers = "\n!\n".join(map(str, utils.maketuple(routers)))
routers = "\n!\n".join(map(str, utils.make_tuple(routers)))
if debugs:
debugs = "\n".join(utils.maketuple(debugs))
debugs = "\n".join(utils.make_tuple(debugs))
else:
debugs = "! no debugs"
forwarding = "ip forwarding\nipv6 forwarding"

View file

@ -40,7 +40,7 @@ def pack_values(clazz, packers):
value = transformer(value)
# pack and add to existing data
logger.info("packing: %s - %s", tlv_type, value)
logger.debug("packing: %s - %s", tlv_type, value)
data += clazz.pack(tlv_type.value, value)
return data

View file

@ -2,16 +2,89 @@
Miscellaneous utility functions, wrappers around some subprocess procedures.
"""
import importlib
import inspect
import os
import shlex
import subprocess
import sys
import fcntl
import resource
from core import CoreCommandError
from core import logger
DEVNULL = open(os.devnull, "wb")
def closeonexec(fd):
def _detach_init():
"""
Fork a child process and exit.
:return: nothing
"""
if os.fork():
# parent exits
os._exit(0)
os.setsid()
def _valid_module(path, file_name):
"""
Check if file is a valid python module.
:param str path: path to file
:param str file_name: file name to check
:return: True if a valid python module file, False otherwise
:rtype: bool
"""
file_path = os.path.join(path, file_name)
if not os.path.isfile(file_path):
return False
if file_name.startswith("_"):
return False
if not file_name.endswith(".py"):
return False
return True
def _is_class(module, member, clazz):
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
:rtype: bool
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True
def _is_exe(file_path):
"""
Check if a given file path exists and is an executable file.
:param str file_path: file path to check
:return: True if the file is considered and executable file, False otherwise
:rtype: bool
"""
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def close_onexec(fd):
"""
Close on execution of a shell process.
@ -31,56 +104,11 @@ def check_executables(executables):
:raises EnvironmentError: when an executable doesn't exist or is not executable
"""
for executable in executables:
if not is_exe(executable):
if not _is_exe(executable):
raise EnvironmentError("executable not found: %s" % executable)
def is_exe(file_path):
"""
Check if a given file path exists and is an executable file.
:param str file_path: file path to check
:return: True if the file is considered and executable file, False otherwise
:rtype: bool
"""
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def which(program):
"""
From: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
:param str program: program to check for
:return: path if it exists, none otherwise
"""
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip("\"")
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def ensurepath(pathlist):
"""
Checks a list of paths are contained within the environment path, if not add it to the path.
:param list[str] pathlist: list of paths to check
:return: nothing
"""
searchpath = os.environ["PATH"].split(":")
for p in set(pathlist):
if p not in searchpath:
os.environ["PATH"] += ":" + p
def maketuple(obj):
def make_tuple(obj):
"""
Create a tuple from an object, or return the object itself.
@ -94,7 +122,7 @@ def maketuple(obj):
return obj,
def maketuplefromstr(s, value_type):
def make_tuple_fromstr(s, value_type):
"""
Create a tuple from a string.
@ -108,117 +136,103 @@ def maketuplefromstr(s, value_type):
return tuple(value_type(i) for i in values)
def mutecall(*args, **kwargs):
def split_args(args):
"""
Run a muted call command.
Convenience method for splitting potential string commands into a shell-like syntax list.
:param list args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: command result
:rtype: int
:param list/str args: command list or string
:return: shell-like syntax list
:rtype: list
"""
kwargs["stdout"] = open(os.devnull, "w")
kwargs["stderr"] = subprocess.STDOUT
return subprocess.call(*args, **kwargs)
if type(args) == str:
args = shlex.split(args)
return args
def mutecheck_call(*args, **kwargs):
"""
Run a muted check call command.
:param list args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: command result
:rtype: int
"""
kwargs["stdout"] = open(os.devnull, "w")
kwargs["stderr"] = subprocess.STDOUT
return subprocess.check_call(*args, **kwargs)
def spawn(*args, **kwargs):
"""
Wrapper for running a spawn command and returning the process id.
:param list args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: process id of the command
:rtype: int
"""
return subprocess.Popen(*args, **kwargs).pid
def mutespawn(*args, **kwargs):
"""
Wrapper for running a muted spawned command.
:param list args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: process id of the command
:rtype: int
"""
kwargs["stdout"] = open(os.devnull, "w")
kwargs["stderr"] = subprocess.STDOUT
return subprocess.Popen(*args, **kwargs).pid
def detachinit():
"""
Fork a child process and exit.
:return: nothing
"""
if os.fork():
# parent exits
os._exit(0)
os.setsid()
def detach(*args, **kwargs):
"""
Run a detached process by forking it.
:param list args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: process id of the command
:rtype: int
"""
kwargs["preexec_fn"] = detachinit
return subprocess.Popen(*args, **kwargs).pid
def mutedetach(*args, **kwargs):
def mute_detach(args, **kwargs):
"""
Run a muted detached process by forking it.
:param list args: arguments for the command
:param list[str]|str args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: process id of the command
:rtype: int
"""
kwargs["preexec_fn"] = detachinit
kwargs["stdout"] = open(os.devnull, "w")
args = split_args(args)
kwargs["preexec_fn"] = _detach_init
kwargs["stdout"] = DEVNULL
kwargs["stderr"] = subprocess.STDOUT
return subprocess.Popen(*args, **kwargs).pid
return subprocess.Popen(args, **kwargs).pid
def cmdresult(args):
def cmd(args, wait=True):
"""
Runs a command on and returns the exit status.
:param list[str]|str args: command arguments
:param bool wait: wait for command to end or not
:return: command status
:rtype: int
"""
args = split_args(args)
logger.debug("command: %s", args)
try:
p = subprocess.Popen(args)
if not wait:
return 0
return p.wait()
except OSError:
raise CoreCommandError(-1, args)
def cmd_output(args):
"""
Execute a command on the host and return a tuple containing the exit status and result string. stderr output
is folded into the stdout result string.
:param list args: command arguments
:param list[str]|str args: command arguments
:return: command status and stdout
:rtype: tuple[int, str]
:raises CoreCommandError: when the file to execute is not found
"""
cmdid = subprocess.Popen(args, stdin=open(os.devnull, "r"), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# err will always be None
result, err = cmdid.communicate()
status = cmdid.wait()
return status, result
args = split_args(args)
logger.debug("command: %s", args)
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
status = p.wait()
return status, stdout.strip()
except OSError:
raise CoreCommandError(-1, args)
def hexdump(s, bytes_per_word=2, words_per_line=8):
def check_cmd(args, **kwargs):
"""
Execute a command on the host and return a tuple containing the exit status and result string. stderr output
is folded into the stdout result string.
:param list[str]|str args: command arguments
:param dict kwargs: keyword arguments to pass to subprocess.Popen
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when there is a non-zero exit status or the file to execute is not found
"""
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
args = split_args(args)
logger.debug("command: %s", args)
try:
p = subprocess.Popen(args, **kwargs)
stdout, _ = p.communicate()
status = p.wait()
if status != 0:
raise CoreCommandError(status, args, stdout)
return stdout.strip()
except OSError:
raise CoreCommandError(-1, args)
def hex_dump(s, bytes_per_word=2, words_per_line=8):
"""
Hex dump of a string.
@ -229,10 +243,11 @@ def hexdump(s, bytes_per_word=2, words_per_line=8):
"""
dump = ""
count = 0
bytes = bytes_per_word * words_per_line
total_bytes = bytes_per_word * words_per_line
while s:
line = s[:bytes]
s = s[bytes:]
line = s[:total_bytes]
s = s[total_bytes:]
tmp = map(lambda x: ("%02x" * bytes_per_word) % x,
zip(*[iter(map(ord, line))] * bytes_per_word))
if len(line) % 2:
@ -242,7 +257,7 @@ def hexdump(s, bytes_per_word=2, words_per_line=8):
return dump[:-1]
def filemunge(pathname, header, text):
def file_munge(pathname, header, text):
"""
Insert text at the end of a file, surrounded by header comments.
@ -252,15 +267,15 @@ def filemunge(pathname, header, text):
:return: nothing
"""
# prevent duplicates
filedemunge(pathname, header)
f = open(pathname, "a")
f.write("# BEGIN %s\n" % header)
f.write(text)
f.write("# END %s\n" % header)
f.close()
file_demunge(pathname, header)
with open(pathname, "a") as append_file:
append_file.write("# BEGIN %s\n" % header)
append_file.write(text)
append_file.write("# END %s\n" % header)
def filedemunge(pathname, header):
def file_demunge(pathname, header):
"""
Remove text that was inserted in a file surrounded by header comments.
@ -268,25 +283,27 @@ def filedemunge(pathname, header):
:param str header: header text to target for removal
:return: nothing
"""
f = open(pathname, "r")
lines = f.readlines()
f.close()
with open(pathname, "r") as read_file:
lines = read_file.readlines()
start = None
end = None
for i in range(len(lines)):
if lines[i] == "# BEGIN %s\n" % header:
start = i
elif lines[i] == "# END %s\n" % header:
end = i + 1
if start is None or end is None:
return
f = open(pathname, "w")
lines = lines[:start] + lines[end:]
f.write("".join(lines))
f.close()
with open(pathname, "w") as write_file:
lines = lines[:start] + lines[end:]
write_file.write("".join(lines))
def expandcorepath(pathname, session=None, node=None):
def expand_corepath(pathname, session=None, node=None):
"""
Expand a file path given session information.
@ -301,13 +318,15 @@ def expandcorepath(pathname, session=None, node=None):
pathname = pathname.replace("%SESSION%", str(session.session_id))
pathname = pathname.replace("%SESSION_DIR%", session.session_dir)
pathname = pathname.replace("%SESSION_USER%", session.user)
if node is not None:
pathname = pathname.replace("%NODE%", str(node.objid))
pathname = pathname.replace("%NODENAME%", node.name)
return pathname
def sysctldevname(devname):
def sysctl_devname(devname):
"""
Translate a device name to the name used with sysctl.
@ -320,96 +339,7 @@ def sysctldevname(devname):
return devname.replace(".", "/")
def daemonize(rootdir="/", umask=0, close_fds=False, dontclose=(),
stdin=os.devnull, stdout=os.devnull, stderr=os.devnull,
stdoutmode=0644, stderrmode=0644, pidfilename=None,
defaultmaxfd=1024):
"""
Run the background process as a daemon.
:param str rootdir: root directory for daemon
:param int umask: umask for daemon
:param bool close_fds: flag to close file descriptors
:param dontclose: dont close options
:param stdin: stdin for daemon
:param stdout: stdout for daemon
:param stderr: stderr for daemon
:param int stdoutmode: stdout mode
:param int stderrmode: stderr mode
:param str pidfilename: pid file name
:param int defaultmaxfd: default max file descriptors
:return: nothing
"""
if not hasattr(dontclose, "__contains__"):
if not isinstance(dontclose, int):
raise TypeError("dontclose must be an integer")
dontclose = (int(dontclose),)
else:
for fd in dontclose:
if not isinstance(fd, int):
raise TypeError("dontclose must contain only integers")
# redirect stdin
if stdin:
fd = os.open(stdin, os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
# redirect stdout
if stdout:
fd = os.open(stdout, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
stdoutmode)
os.dup2(fd, 1)
if stdout == stderr:
os.dup2(1, 2)
os.close(fd)
# redirect stderr
if stderr and (stderr != stdout):
fd = os.open(stderr, os.O_WRONLY | os.O_CREAT | os.O_APPEND,
stderrmode)
os.dup2(fd, 2)
os.close(fd)
if os.fork():
# parent exits
os._exit(0)
os.setsid()
pid = os.fork()
if pid:
if pidfilename:
try:
f = open(pidfilename, "w")
f.write("%s\n" % pid)
f.close()
except IOError:
logger.exception("error writing to file: %s", pidfilename)
# parent exits
os._exit(0)
if rootdir:
os.chdir(rootdir)
os.umask(umask)
if close_fds:
try:
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if maxfd == resource.RLIM_INFINITY:
raise ValueError
except:
maxfd = defaultmaxfd
for fd in xrange(3, maxfd):
if fd in dontclose:
continue
try:
os.close(fd)
except IOError:
logger.exception("error closing file descriptor")
def readfileintodict(filename, d):
def load_config(filename, d):
"""
Read key=value pairs from a file, into a dict. Skip comments; strip newline characters and spacing.
@ -419,29 +349,54 @@ def readfileintodict(filename, d):
"""
with open(filename, "r") as f:
lines = f.readlines()
for l in lines:
if l[:1] == "#":
for line in lines:
if line[:1] == "#":
continue
try:
key, value = l.split("=", 1)
key, value = line.split("=", 1)
d[key] = value.strip()
except ValueError:
logger.exception("error reading file to dict: %s", filename)
def checkforkernelmodule(name):
def load_classes(path, clazz):
"""
Return a string if a Linux kernel module is loaded, None otherwise.
The string is the line from /proc/modules containing the module name,
memory size (bytes), number of loaded instances, dependencies, state,
and kernel memory offset.
Dynamically load classes for use within CORE.
:param str name: name of kernel module to check for
:return: kernel module line, None otherwise
:rtype: str
:param path: path to load classes from
:param clazz: class type expected to be inherited from for loading
:return: list of classes loaded
"""
with open("/proc/modules", "r") as f:
for line in f:
if line.startswith(name + " "):
return line.rstrip()
return None
# validate path exists
logger.debug("attempting to load modules from path: %s", path)
if not os.path.isdir(path):
logger.warn("invalid custom module directory specified" ": %s" % path)
# check if path is in sys.path
parent_path = os.path.dirname(path)
if parent_path not in sys.path:
logger.debug("adding parent path to allow imports: %s", parent_path)
sys.path.append(parent_path)
# retrieve potential service modules, and filter out invalid modules
base_module = os.path.basename(path)
module_names = os.listdir(path)
module_names = filter(lambda x: _valid_module(path, x), module_names)
module_names = map(lambda x: x[:-3], module_names)
# import and add all service modules in the path
classes = []
for module_name in module_names:
import_statement = "%s.%s" % (base_module, module_name)
logger.debug("importing custom module: %s", import_statement)
try:
module = importlib.import_module(import_statement)
members = inspect.getmembers(module, lambda x: _is_class(module, x, clazz))
for member in members:
valid_class = member[1]
classes.append(valid_class)
except:
logger.exception("unexpected error during import, skipping: %s", import_statement)
return classes

View file

@ -5,7 +5,6 @@ mobility.py: mobility helpers for moving nodes and calculating wireless range.
import heapq
import math
import os
import subprocess
import threading
import time
@ -21,6 +20,7 @@ from core.enumerations import MessageFlags
from core.enumerations import MessageTypes
from core.enumerations import NodeTlvs
from core.enumerations import RegisterTlvs
from core.misc import utils
from core.misc.ipaddress import IpAddress
@ -525,8 +525,8 @@ class BasicRangeModel(WirelessModel):
with self.wlan._linked_lock:
linked = self.wlan.linked(a, b)
logger.info("checking range netif1(%s) netif2(%s): linked(%s) actual(%s) > config(%s)",
a.name, b.name, linked, d, self.range)
logger.debug("checking range netif1(%s) netif2(%s): linked(%s) actual(%s) > config(%s)",
a.name, b.name, linked, d, self.range)
if d > self.range:
if linked:
logger.info("was linked, unlinking")
@ -1152,11 +1152,7 @@ class Ns2ScriptedMobility(WayPointMobility):
:rtype: int
"""
nodenum = int(nodenum)
try:
return self.nodemap[nodenum]
except KeyError:
logger.exception("error finding value in node map, ignored and returns node id")
return nodenum
return self.nodemap.get(nodenum, nodenum)
def startup(self):
"""
@ -1237,11 +1233,5 @@ class Ns2ScriptedMobility(WayPointMobility):
if filename is None or filename == '':
return
filename = self.findfile(filename)
try:
subprocess.check_call(
["/bin/sh", filename, typestr],
cwd=self.session.sessiondir,
env=self.session.get_environment()
)
except subprocess.CalledProcessError:
logger.exception("Error running script '%s' for WLAN state %s", filename, typestr)
args = ["/bin/sh", filename, typestr]
utils.check_cmd(args, cwd=self.session.sessiondir, env=self.session.get_environment())

View file

@ -4,11 +4,11 @@ implementing specific node types.
"""
import socket
import subprocess
import threading
from socket import AF_INET
from socket import AF_INET6
from core import CoreCommandError
from core import constants
from core import logger
from core.coreobj import PyCoreNetIf
@ -68,32 +68,35 @@ class CtrlNet(LxBrNet):
Startup functionality for the control network.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.detectoldbridge():
return
LxBrNet.startup(self)
if self.hostid:
addr = self.prefix.addr(self.hostid)
else:
addr = self.prefix.max_addr()
msg = "Added control network bridge: %s %s" % (self.brname, self.prefix)
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
logger.info("added control network bridge: %s %s", self.brname, self.prefix)
if self.assign_address:
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
self.addrconfig(addrlist=addrlist)
msg += " address %s" % addr
logger.info(msg)
if self.updown_script is not None:
logger.info("interface %s updown script (%s startup) called",
self.brname, self.updown_script)
subprocess.check_call([self.updown_script, self.brname, "startup"])
if self.serverintf is not None:
try:
subprocess.check_call([constants.BRCTL_BIN, "addif", self.brname, self.serverintf])
subprocess.check_call([constants.IP_BIN, "link", "set", self.serverintf, "up"])
except subprocess.CalledProcessError:
logger.exception("Error joining server interface %s to controlnet bridge %s",
self.serverintf, self.brname)
logger.info("address %s", addr)
if self.updown_script:
logger.info("interface %s updown script (%s startup) called", self.brname, self.updown_script)
utils.check_cmd([self.updown_script, self.brname, "startup"])
if self.serverintf:
# sets the interface as a port of the bridge
utils.check_cmd([constants.BRCTL_BIN, "addif", self.brname, self.serverintf])
# bring interface up
utils.check_cmd([constants.IP_BIN, "link", "set", self.serverintf, "up"])
def detectoldbridge(self):
"""
@ -103,32 +106,23 @@ class CtrlNet(LxBrNet):
:return: True if an old bridge was detected, False otherwise
:rtype: bool
"""
retstat, retstr = utils.cmdresult([constants.BRCTL_BIN, "show"])
if retstat != 0:
status, output = utils.cmd_output([constants.BRCTL_BIN, "show"])
if status != 0:
logger.error("Unable to retrieve list of installed bridges")
lines = retstr.split("\n")
for line in lines[1:]:
cols = line.split("\t")
oldbr = cols[0]
flds = cols[0].split(".")
if len(flds) == 3:
if flds[0] == "b" and flds[1] == self.objid:
logger.error(
"Error: An active control net bridge (%s) found. " \
"An older session might still be running. " \
"Stop all sessions and, if needed, delete %s to continue." % \
(oldbr, oldbr)
)
return True
"""
# Do this if we want to delete the old bridge
logger.warn("Warning: Old %s bridge found: %s" % (self.objid, oldbr))
try:
check_call([BRCTL_BIN, "delbr", oldbr])
except subprocess.CalledProcessError as e:
logger.exception("Error deleting old bridge %s", oldbr, e)
logger.info("Deleted %s", oldbr)
"""
else:
lines = output.split("\n")
for line in lines[1:]:
cols = line.split("\t")
oldbr = cols[0]
flds = cols[0].split(".")
if len(flds) == 3:
if flds[0] == "b" and flds[1] == self.objid:
logger.error(
"error: An active control net bridge (%s) found. "
"An older session might still be running. "
"Stop all sessions and, if needed, delete %s to continue.", oldbr, oldbr
)
return True
return False
def shutdown(self):
@ -139,21 +133,26 @@ class CtrlNet(LxBrNet):
"""
if self.serverintf is not None:
try:
subprocess.check_call([constants.BRCTL_BIN, "delif", self.brname, self.serverintf])
except subprocess.CalledProcessError:
logger.exception("Error deleting server interface %s to controlnet bridge %s",
self.serverintf, self.brname)
utils.check_cmd([constants.BRCTL_BIN, "delif", self.brname, self.serverintf])
except CoreCommandError:
logger.exception("error deleting server interface %s from bridge %s", self.serverintf, self.brname)
if self.updown_script is not None:
logger.info("interface %s updown script (%s shutdown) called" % (self.brname, self.updown_script))
subprocess.check_call([self.updown_script, self.brname, "shutdown"])
try:
logger.info("interface %s updown script (%s shutdown) called", self.brname, self.updown_script)
utils.check_cmd([self.updown_script, self.brname, "shutdown"])
except CoreCommandError:
logger.exception("error issuing shutdown script shutdown")
LxBrNet.shutdown(self)
def all_link_data(self, flags):
"""
Do not include CtrlNet in link messages describing this session.
:return: nothing
:param flags: message flags
:return: list of link data
:rtype: list[core.data.LinkData]
"""
return []
@ -175,29 +174,36 @@ class PtpNet(LxBrNet):
"""
Attach a network interface, but limit attachment to two interfaces.
:param core.coreobj.PyCoreNetIf netif: network interface
:param core.netns.vif.VEth netif: network interface
:return: nothing
"""
if len(self._netif) >= 2:
raise ValueError("Point-to-point links support at most 2 network interfaces")
LxBrNet.attach(self, netif)
def data(self, message_type):
def data(self, message_type, lat=None, lon=None, alt=None):
"""
Do not generate a Node Message for point-to-point links. They are
built using a link message instead.
:return: nothing
:param message_type: purpose for the data object we are creating
:param float lat: latitude
:param float lon: longitude
:param float alt: altitude
:return: node data object
:rtype: core.data.NodeData
"""
pass
return None
def all_link_data(self, flags):
"""
Build CORE API TLVs for a point-to-point link. One Link message
describes this network.
:return: all link data
:rtype: list[LinkData]
:param flags: message flags
:return: list of link data
:rtype: list[core.data.LinkData]
"""
all_links = []
@ -321,10 +327,13 @@ class HubNode(LxBrNet):
:param int objid: node id
:param str name: node namee
:param bool start: start flag
:raises CoreCommandError: when there is a command exception
"""
LxBrNet.__init__(self, session, objid, name, start)
# TODO: move to startup method
if start:
subprocess.check_call([constants.BRCTL_BIN, "setageing", self.brname, "0"])
utils.check_cmd([constants.BRCTL_BIN, "setageing", self.brname, "0"])
class WlanNode(LxBrNet):
@ -356,7 +365,7 @@ class WlanNode(LxBrNet):
"""
Attach a network interface.
:param core.coreobj.PyCoreNetIf netif: network interface
:param core.netns.vif.VEth netif: network interface
:return: nothing
"""
LxBrNet.attach(self, netif)
@ -367,7 +376,6 @@ class WlanNode(LxBrNet):
x, y, z = netif.node.position.get()
# invokes any netif.poshook
netif.setposition(x, y, z)
# self.model.setlinkparams()
def setmodel(self, model, config):
"""
@ -401,25 +409,28 @@ class WlanNode(LxBrNet):
logger.info("updating model %s" % model_name)
if self.model is None or self.model.name != model_name:
return
model = self.model
if model.config_type == RegisterTlvs.WIRELESS.value:
if not model.updateconfig(values):
return
if self.model.position_callback:
for netif in self.netifs():
netif.poshook = self.model.position_callback
if netif.node is not None:
(x, y, z) = netif.node.position.get()
netif.poshook(netif, x, y, z)
self.model.setlinkparams()
def all_link_data(self, flags):
"""
Retrieve all link data.
:param flags: link flags
:return: all link data
:rtype: list[LinkData]
:param flags: message flags
:return: list of link data
:rtype: list[core.data.LinkData]
"""
all_links = LxBrNet.all_link_data(self, flags)
@ -449,7 +460,6 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
:return:
"""
PyCoreNode.__init__(self, session, objid, name, start=start)
# this initializes net, params, poshook
PyCoreNetIf.__init__(self, node=self, name=name, mtu=mtu)
self.up = False
self.lock = threading.RLock()
@ -457,6 +467,9 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
# the following are PyCoreNetIf attributes
self.transport_type = "raw"
self.localname = name
self.old_up = False
self.old_addrs = []
if start:
self.startup()
@ -465,15 +478,12 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
Set the interface in the up state.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
# interface will also be marked up during net.attach()
self.savestate()
try:
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
self.up = True
except subprocess.CalledProcessError:
logger.exception("failed to run command: %s link set %s up", constants.IP_BIN, self.localname)
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"])
self.up = True
def shutdown(self):
"""
@ -484,9 +494,14 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
"""
if not self.up:
return
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "down"])
subprocess.check_call([constants.IP_BIN, "addr", "flush", "dev", self.localname])
utils.mutecall([constants.TC_BIN, "qdisc", "del", "dev", self.localname, "root"])
try:
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "down"])
utils.check_cmd([constants.IP_BIN, "addr", "flush", "dev", self.localname])
utils.check_cmd([constants.TC_BIN, "qdisc", "del", "dev", self.localname, "root"])
except CoreCommandError:
logger.exception("error shutting down")
self.up = False
self.restorestate()
@ -500,6 +515,7 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
"""
PyCoreNetIf.attachnet(self, net)
# TODO: issue in that both classes inherited from provide the same method with different signatures
def detachnet(self):
"""
Detach a network.
@ -519,7 +535,9 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
:param str hwaddr: hardware address
:param int ifindex: interface index
:param str ifname: interface name
:return:
:return: interface index
:rtype: int
:raises ValueError: when an interface has already been created, one max
"""
with self.lock:
if ifindex is None:
@ -537,7 +555,7 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
self.attachnet(net)
if addrlist:
for addr in utils.maketuple(addrlist):
for addr in utils.make_tuple(addrlist):
self.addaddr(addr)
return ifindex
@ -552,14 +570,12 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
if ifindex is None:
ifindex = 0
if ifindex not in self._netif:
raise ValueError, "ifindex %s does not exist" % ifindex
self._netif.pop(ifindex)
if ifindex == self.ifindex:
self.shutdown()
else:
raise ValueError, "ifindex %s does not exist" % ifindex
raise ValueError("ifindex %s does not exist" % ifindex)
def netif(self, ifindex, net=None):
"""
@ -602,9 +618,11 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
:param str addr: address to add
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.up:
subprocess.check_call([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
utils.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
PyCoreNetIf.addaddr(self, addr)
def deladdr(self, addr):
@ -613,9 +631,11 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
:param str addr: address to delete
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.up:
subprocess.check_call([constants.IP_BIN, "addr", "del", str(addr), "dev", self.name])
utils.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.name])
PyCoreNetIf.deladdr(self, addr)
def savestate(self):
@ -624,23 +644,17 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
interface for emulation purposes. TODO: save/restore the PROMISC flag
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.old_up = False
self.old_addrs = []
cmd = [constants.IP_BIN, "addr", "show", "dev", self.localname]
try:
tmp = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError:
logger.exception("Failed to run %s command: %s", constants.IP_BIN, cmd)
if tmp.wait():
logger.warn("Command failed: %s", cmd)
return
lines = tmp.stdout.read()
tmp.stdout.close()
for l in lines.split("\n"):
items = l.split()
args = [constants.IP_BIN, "addr", "show", "dev", self.localname]
output = utils.check_cmd(args)
for line in output.split("\n"):
items = line.split()
if len(items) < 2:
continue
if items[1] == "%s:" % self.localname:
flags = items[2][1:-1].split(",")
if "UP" in flags:
@ -657,24 +671,71 @@ class RJ45Node(PyCoreNode, PyCoreNetIf):
Restore the addresses and other interface state after using it.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
for addr in self.old_addrs:
if addr[1] is None:
subprocess.check_call([constants.IP_BIN, "addr", "add", addr[0], "dev", self.localname])
utils.check_cmd([constants.IP_BIN, "addr", "add", addr[0], "dev", self.localname])
else:
subprocess.check_call([constants.IP_BIN, "addr", "add", addr[0], "brd", addr[1], "dev", self.localname])
utils.check_cmd([constants.IP_BIN, "addr", "add", addr[0], "brd", addr[1], "dev", self.localname])
if self.old_up:
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"])
def setposition(self, x=None, y=None, z=None):
"""
Use setposition() from both parent classes.
Uses setposition from both parent classes.
:return: nothing
:param float x: x position
:param float y: y position
:param float z: z position
:return: True if position changed, False otherwise
:rtype: bool
"""
PyCoreObj.setposition(self, x, y, z)
# invoke any poshook
result = PyCoreObj.setposition(self, x, y, z)
PyCoreNetIf.setposition(self, x, y, z)
return result
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: exist status and combined stdout and stderr
:rtype: tuple[int, str]
:raises CoreCommandError: when a non-zero exit status occurs
"""
raise NotImplementedError
def cmd(self, args, wait=True):
"""
Runs shell command on node, with option to not wait for a result.
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
raise NotImplementedError
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
raise NotImplementedError
def termcmdstring(self, sh):
"""
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
raise NotImplementedError
class TunnelNode(GreTapBridge):

View file

@ -3,11 +3,11 @@ TODO: probably goes away, or implement the usage of "unshare", or docker formal.
"""
import socket
import subprocess
import threading
from socket import AF_INET
from socket import AF_INET6
from core import CoreCommandError
from core import constants
from core import logger
from core.coreobj import PyCoreNet
@ -36,12 +36,9 @@ utils.check_executables([
def ebtables_commands(call, commands):
ebtables_lock.acquire()
try:
with ebtables_lock:
for command in commands:
call(command)
finally:
ebtables_lock.release()
class OvsNet(PyCoreNet):
@ -81,24 +78,23 @@ class OvsNet(PyCoreNet):
ebtables_queue.startupdateloop(self)
def startup(self):
try:
subprocess.check_call([constants.OVS_BIN, "add-br", self.bridge_name])
except subprocess.CalledProcessError:
logger.exception("error adding bridge")
"""
try:
# turn off spanning tree protocol and forwarding delay
# TODO: appears stp and rstp are off by default, make sure this always holds true
# TODO: apears ovs only supports rstp forward delay and again it"s off by default
subprocess.check_call([constants.IP_BIN, "link", "set", self.bridge_name, "up"])
:return:
:raises CoreCommandError: when there is a command exception
"""
utils.check_cmd([constants.OVS_BIN, "add-br", self.bridge_name])
# create a new ebtables chain for this bridge
ebtables_commands(subprocess.check_call, [
[constants.EBTABLES_BIN, "-N", self.bridge_name, "-P", self.policy],
[constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name]
])
except subprocess.CalledProcessError:
logger.exception("Error setting bridge parameters")
# turn off spanning tree protocol and forwarding delay
# TODO: appears stp and rstp are off by default, make sure this always holds true
# TODO: apears ovs only supports rstp forward delay and again it's off by default
utils.check_cmd([constants.IP_BIN, "link", "set", self.bridge_name, "up"])
# create a new ebtables chain for this bridge
ebtables_commands(utils.check_cmd, [
[constants.EBTABLES_BIN, "-N", self.bridge_name, "-P", self.policy],
[constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name]
])
self.up = True
@ -109,16 +105,18 @@ class OvsNet(PyCoreNet):
ebtables_queue.stopupdateloop(self)
utils.mutecall([constants.IP_BIN, "link", "set", self.bridge_name, "down"])
utils.mutecall([constants.OVS_BIN, "del-br", self.bridge_name])
ebtables_commands(utils.mutecall, [
[constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name],
[constants.EBTABLES_BIN, "-X", self.bridge_name]
])
try:
utils.check_cmd([constants.IP_BIN, "link", "set", self.bridge_name, "down"])
utils.check_cmd([constants.OVS_BIN, "del-br", self.bridge_name])
ebtables_commands(utils.check_cmd, [
[constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name],
[constants.EBTABLES_BIN, "-X", self.bridge_name]
])
except CoreCommandError:
logger.exception("error bringing bridge down and removing it")
# removes veth pairs used for bridge-to-bridge connections
for interface in self.netifs():
# removes veth pairs used for bridge-to-bridge connections
interface.shutdown()
self._netif.clear()
@ -128,22 +126,14 @@ class OvsNet(PyCoreNet):
def attach(self, interface):
if self.up:
try:
subprocess.check_call([constants.OVS_BIN, "add-port", self.bridge_name, interface.localname])
subprocess.check_call([constants.IP_BIN, "link", "set", interface.localname, "up"])
except subprocess.CalledProcessError:
logger.exception("error joining interface %s to bridge %s", interface.localname, self.bridge_name)
return
utils.check_cmd([constants.OVS_BIN, "add-port", self.bridge_name, interface.localname])
utils.check_cmd([constants.IP_BIN, "link", "set", interface.localname, "up"])
PyCoreNet.attach(self, interface)
def detach(self, interface):
if self.up:
try:
subprocess.check_call([constants.OVS_BIN, "del-port", self.bridge_name, interface.localname])
except subprocess.CalledProcessError:
logger.exception("error removing interface %s from bridge %s", interface.localname, self.bridge_name)
return
utils.check_cmd([constants.OVS_BIN, "del-port", self.bridge_name, interface.localname])
PyCoreNet.detach(self, interface)
@ -217,14 +207,14 @@ class OvsNet(PyCoreNet):
limit = 0xffff # max IP payload
tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)]
logger.info("linkconfig: %s" % [tc + parent + ["handle", "1:"] + tbf])
subprocess.check_call(tc + parent + ["handle", "1:"] + tbf)
utils.check_cmd(tc + parent + ["handle", "1:"] + tbf)
interface.setparam("has_tbf", True)
elif interface.getparam("has_tbf") and bw <= 0:
tcd = [] + tc
tcd[2] = "delete"
if self.up:
subprocess.check_call(tcd + parent)
utils.check_cmd(tcd + parent)
interface.setparam("has_tbf", False)
# removing the parent removes the child
@ -273,12 +263,12 @@ class OvsNet(PyCoreNet):
if self.up:
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
subprocess.check_call(tc + parent + ["handle", "10:"])
utils.check_cmd(tc + parent + ["handle", "10:"])
interface.setparam("has_netem", False)
elif len(netem) > 1:
if self.up:
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
subprocess.check_call(tc + parent + ["handle", "10:"] + netem)
utils.check_cmd(tc + parent + ["handle", "10:"] + netem)
interface.setparam("has_netem", True)
def linknet(self, network):
@ -312,8 +302,8 @@ class OvsNet(PyCoreNet):
if network.up:
# this is similar to net.attach() but uses netif.name instead
# of localname
subprocess.check_call([constants.OVS_BIN, "add-port", network.bridge_name, interface.name])
subprocess.check_call([constants.IP_BIN, "link", "set", interface.name, "up"])
utils.check_cmd([constants.OVS_BIN, "add-port", network.bridge_name, interface.name])
utils.check_cmd([constants.IP_BIN, "link", "set", interface.name, "up"])
# TODO: is there a native method for this? see if this causes issues
# i = network.newifindex()
@ -346,10 +336,7 @@ class OvsNet(PyCoreNet):
return
for address in addresses:
try:
subprocess.check_call([constants.IP_BIN, "addr", "add", str(address), "dev", self.bridge_name])
except subprocess.CalledProcessError:
logger.exception("error adding IP address")
utils.check_cmd([constants.IP_BIN, "addr", "add", str(address), "dev", self.bridge_name])
class OvsCtrlNet(OvsNet):
@ -390,23 +377,19 @@ class OvsCtrlNet(OvsNet):
if self.updown_script:
logger.info("interface %s updown script %s startup called" % (self.bridge_name, self.updown_script))
subprocess.check_call([self.updown_script, self.bridge_name, "startup"])
utils.check_cmd([self.updown_script, self.bridge_name, "startup"])
if self.serverintf:
try:
subprocess.check_call([constants.OVS_BIN, "add-port", self.bridge_name, self.serverintf])
subprocess.check_call([constants.IP_BIN, "link", "set", self.serverintf, "up"])
except subprocess.CalledProcessError:
logger.exception("error joining server interface %s to controlnet bridge %s",
self.serverintf, self.bridge_name)
utils.check_cmd([constants.OVS_BIN, "add-port", self.bridge_name, self.serverintf])
utils.check_cmd([constants.IP_BIN, "link", "set", self.serverintf, "up"])
def detectoldbridge(self):
"""
Occassionally, control net bridges from previously closed sessions are not cleaned up.
Occasionally, control net bridges from previously closed sessions are not cleaned up.
Check if there are old control net bridges and delete them
"""
status, output = utils.cmdresult([constants.OVS_BIN, "list-br"])
output = utils.check_cmd([constants.OVS_BIN, "list-br"])
output = output.strip()
if output:
for line in output.split("\n"):
@ -420,14 +403,17 @@ class OvsCtrlNet(OvsNet):
def shutdown(self):
if self.serverintf:
try:
subprocess.check_call([constants.OVS_BIN, "del-port", self.bridge_name, self.serverintf])
except subprocess.CalledProcessError:
logger.exception("Error deleting server interface %s to controlnet bridge %s",
utils.check_cmd([constants.OVS_BIN, "del-port", self.bridge_name, self.serverintf])
except CoreCommandError:
logger.exception("error deleting server interface %s to controlnet bridge %s",
self.serverintf, self.bridge_name)
if self.updown_script:
logger.info("interface %s updown script (%s shutdown) called", self.bridge_name, self.updown_script)
subprocess.check_call([self.updown_script, self.bridge_name, "shutdown"])
try:
logger.info("interface %s updown script (%s shutdown) called", self.bridge_name, self.updown_script)
utils.check_cmd([self.updown_script, self.bridge_name, "shutdown"])
except CoreCommandError:
logger.exception("error during updown script shutdown")
OvsNet.shutdown(self)
@ -446,12 +432,12 @@ class OvsPtpNet(OvsNet):
raise ValueError("point-to-point links support at most 2 network interfaces")
OvsNet.attach(self, interface)
def data(self, message_type):
def data(self, message_type, lat=None, lon=None, alt=None):
"""
Do not generate a Node Message for point-to-point links. They are
built using a link message instead.
"""
pass
return None
def all_link_data(self, flags):
"""
@ -576,7 +562,7 @@ class OvsHubNode(OvsNet):
if start:
# TODO: verify that the below flow accomplishes what is desired for a "HUB"
# TODO: replace "brctl setageing 0"
subprocess.check_call([constants.OVS_FLOW_BIN, "add-flow", self.bridge_name, "action=flood"])
utils.check_cmd([constants.OVS_FLOW_BIN, "add-flow", self.bridge_name, "action=flood"])
class OvsWlanNode(OvsNet):
@ -686,8 +672,8 @@ class OvsGreTapBridge(OvsNet):
if remoteip is None:
self.gretap = None
else:
self.gretap = GreTap(node=self, name=None, session=session, remoteip=remoteip,
objid=None, localip=localip, ttl=ttl, key=self.grekey)
self.gretap = GreTap(node=self, session=session, remoteip=remoteip,
localip=localip, ttl=ttl, key=self.grekey)
if start:
self.startup()
@ -727,7 +713,7 @@ class OvsGreTapBridge(OvsNet):
if len(addresses) > 1:
localip = addresses[1].split("/")[0]
self.gretap = GreTap(session=self.session, remoteip=remoteip, objid=None, name=None,
self.gretap = GreTap(session=self.session, remoteip=remoteip,
localip=localip, ttl=self.ttl, key=self.grekey)
self.attach(self.gretap)

View file

@ -2,9 +2,9 @@
virtual ethernet classes that implement the interfaces available under Linux.
"""
import subprocess
import time
from core import CoreCommandError
from core import constants
from core import logger
from core.coreobj import PyCoreNetIf
@ -25,13 +25,13 @@ class VEth(PyCoreNetIf):
"""
Creates a VEth instance.
:param core.netns.nodes.CoreNode node: related core node
:param core.netns.vnode.SimpleLxcNode node: related core node
:param str name: interface name
:param str localname: interface local name
:param mtu: interface mtu
:param net: network
:param bool start: start flag
:return:
:raises CoreCommandError: when there is a command exception
"""
# note that net arg is ignored
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
@ -45,10 +45,11 @@ class VEth(PyCoreNetIf):
Interface startup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
subprocess.check_call([constants.IP_BIN, "link", "add", "name", self.localname,
"type", "veth", "peer", "name", self.name])
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
utils.check_cmd([constants.IP_BIN, "link", "add", "name", self.localname,
"type", "veth", "peer", "name", self.name])
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"])
self.up = True
def shutdown(self):
@ -59,10 +60,19 @@ class VEth(PyCoreNetIf):
"""
if not self.up:
return
if self.node:
self.node.cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
try:
self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
except CoreCommandError:
logger.exception("error shutting down interface")
if self.localname:
utils.mutedetach([constants.IP_BIN, "link", "delete", self.localname])
try:
utils.check_cmd([constants.IP_BIN, "link", "delete", self.localname])
except CoreCommandError:
logger.exception("error deleting link")
self.up = False
@ -76,7 +86,7 @@ class TunTap(PyCoreNetIf):
"""
Create a TunTap instance.
:param core.netns.nodes.CoreNode node: related core node
:param core.netns.vnode.SimpleLxcNode node: related core node
:param str name: interface name
:param str localname: local interface name
:param mtu: interface mtu
@ -98,10 +108,10 @@ class TunTap(PyCoreNetIf):
"""
# TODO: more sophisticated TAP creation here
# Debian does not support -p (tap) option, RedHat does.
# For now, this is disabled to allow the TAP to be created by another
# system (e.g. EMANE"s emanetransportd)
# check_call(["tunctl", "-t", self.name])
# self.install()
# For now, this is disabled to allow the TAP to be created by another
# system (e.g. EMANE"s emanetransportd)
# check_call(["tunctl", "-t", self.name])
# self.install()
self.up = True
def shutdown(self):
@ -112,9 +122,12 @@ class TunTap(PyCoreNetIf):
"""
if not self.up:
return
self.node.cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
# if self.name:
# mutedetach(["tunctl", "-d", self.localname])
try:
self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name])
except CoreCommandError:
logger.exception("error shutting down tunnel tap")
self.up = False
def waitfor(self, func, attempts=10, maxretrydelay=0.25):
@ -124,26 +137,29 @@ class TunTap(PyCoreNetIf):
:param func: function to wait for a result of zero
:param int attempts: number of attempts to wait for a zero result
:param float maxretrydelay: maximum retry delay
:return: nothing
:return: True if wait succeeded, False otherwise
:rtype: bool
"""
delay = 0.01
result = False
for i in xrange(1, attempts + 1):
r = func()
if r == 0:
return
result = True
break
msg = "attempt %s failed with nonzero exit status %s" % (i, r)
if i < attempts + 1:
msg += ", retrying..."
logger.info(msg)
time.sleep(delay)
delay = delay + delay
delay += delay
if delay > maxretrydelay:
delay = maxretrydelay
else:
msg += ", giving up"
logger.info(msg)
raise RuntimeError("command failed after %s attempts" % attempts)
return result
def waitfordevicelocal(self):
"""
@ -155,8 +171,8 @@ class TunTap(PyCoreNetIf):
"""
def localdevexists():
cmd = (constants.IP_BIN, "link", "show", self.localname)
return utils.mutecall(cmd)
args = [constants.IP_BIN, "link", "show", self.localname]
return utils.cmd(args)
self.waitfor(localdevexists)
@ -168,23 +184,25 @@ class TunTap(PyCoreNetIf):
"""
def nodedevexists():
cmd = (constants.IP_BIN, "link", "show", self.name)
return self.node.cmd(cmd)
args = [constants.IP_BIN, "link", "show", self.name]
return self.node.cmd(args)
count = 0
while True:
try:
self.waitfor(nodedevexists)
result = self.waitfor(nodedevexists)
if result:
break
except RuntimeError as e:
# check if this is an EMANE interface; if so, continue
# waiting if EMANE is still running
# TODO: remove emane code
if count < 5 and nodeutils.is_node(self.net, NodeTypes.EMANE) and \
self.node.session.emane.emanerunning(self.node):
count += 1
else:
raise e
# check if this is an EMANE interface; if so, continue
# waiting if EMANE is still running
# TODO: remove emane code
should_retry = count < 5
is_emane_node = nodeutils.is_node(self.net, NodeTypes.EMANE)
is_emane_running = self.node.session.emane.emanerunning(self.node)
if all([should_retry, is_emane_node, is_emane_running]):
count += 1
else:
raise RuntimeError("node device failed to exist")
def install(self):
"""
@ -194,20 +212,13 @@ class TunTap(PyCoreNetIf):
end of the TAP.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.waitfordevicelocal()
netns = str(self.node.pid)
try:
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "netns", netns])
except subprocess.CalledProcessError:
msg = "error installing TAP interface %s, command:" % self.localname
msg += "ip link set %s netns %s" % (self.localname, netns)
logger.exception(msg)
return
self.node.cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name])
self.node.cmd([constants.IP_BIN, "link", "set", self.name, "up"])
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "netns", netns])
self.node.check_cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name])
self.node.check_cmd([constants.IP_BIN, "link", "set", self.name, "up"])
def setaddrs(self):
"""
@ -217,7 +228,7 @@ class TunTap(PyCoreNetIf):
"""
self.waitfordevicenode()
for addr in self.addrlist:
self.node.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
self.node.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name])
class GreTap(PyCoreNetIf):
@ -233,7 +244,7 @@ class GreTap(PyCoreNetIf):
"""
Creates a GreTap instance.
:param core.netns.nodes.CoreNode node: related core node
:param core.netns.vnode.SimpleLxcNode node: related core node
:param str name: interface name
:param core.session.Session session: core session instance
:param mtu: interface mtu
@ -243,6 +254,7 @@ class GreTap(PyCoreNetIf):
:param ttl: ttl value
:param key: gre tap key
:param bool start: start flag
:raises CoreCommandError: when there is a command exception
"""
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
self.session = session
@ -260,17 +272,17 @@ class GreTap(PyCoreNetIf):
if remoteip is None:
raise ValueError, "missing remote IP required for GRE TAP device"
cmd = ("ip", "link", "add", self.localname, "type", "gretap",
"remote", str(remoteip))
args = ["ip", "link", "add", self.localname, "type", "gretap",
"remote", str(remoteip)]
if localip:
cmd += ("local", str(localip))
args += ["local", str(localip)]
if ttl:
cmd += ("ttl", str(ttl))
args += ["ttl", str(ttl)]
if key:
cmd += ("key", str(key))
subprocess.check_call(cmd)
cmd = ("ip", "link", "set", self.localname, "up")
subprocess.check_call(cmd)
args += ["key", str(key)]
utils.check_cmd(args)
args = ["ip", "link", "set", self.localname, "up"]
utils.check_cmd(args)
self.up = True
def shutdown(self):
@ -280,10 +292,14 @@ class GreTap(PyCoreNetIf):
:return: nothing
"""
if self.localname:
cmd = ("ip", "link", "set", self.localname, "down")
subprocess.check_call(cmd)
cmd = ("ip", "link", "del", self.localname)
subprocess.check_call(cmd)
try:
args = ["ip", "link", "set", self.localname, "down"]
utils.check_cmd(args)
args = ["ip", "link", "del", self.localname]
utils.check_cmd(args)
except CoreCommandError:
logger.exception("error during shutdown")
self.localname = None
def data(self, message_type):

View file

@ -4,10 +4,10 @@ Linux Ethernet bridging and ebtables rules.
"""
import os
import subprocess
import threading
import time
from core import CoreCommandError
from core import constants
from core import logger
from core.coreobj import PyCoreNet
@ -59,11 +59,12 @@ class EbtablesQueue(object):
:return: nothing
"""
self.updatelock.acquire()
self.last_update_time[wlan] = time.time()
self.updatelock.release()
with self.updatelock:
self.last_update_time[wlan] = time.time()
if self.doupdateloop:
return
self.doupdateloop = True
self.updatethread = threading.Thread(target=self.updateloop)
self.updatethread.daemon = True
@ -75,15 +76,15 @@ class EbtablesQueue(object):
:return: nothing
"""
self.updatelock.acquire()
try:
del self.last_update_time[wlan]
except KeyError:
logger.exception("error deleting last update time for wlan, ignored before: %s", wlan)
with self.updatelock:
try:
del self.last_update_time[wlan]
except KeyError:
logger.exception("error deleting last update time for wlan, ignored before: %s", wlan)
self.updatelock.release()
if len(self.last_update_time) > 0:
return
self.doupdateloop = False
if self.updatethread:
self.updatethread.join()
@ -137,25 +138,26 @@ class EbtablesQueue(object):
:return: nothing
"""
while self.doupdateloop:
self.updatelock.acquire()
for wlan in self.updates:
"""
Check if wlan is from a previously closed session. Because of the
rate limiting scheme employed here, this may happen if a new session
is started soon after closing a previous session.
"""
try:
wlan.session
except:
# Just mark as updated to remove from self.updates.
self.updated(wlan)
continue
if self.lastupdate(wlan) > self.rate:
self.buildcmds(wlan)
# print "ebtables commit %d rules" % len(self.cmds)
self.ebcommit(wlan)
self.updated(wlan)
self.updatelock.release()
with self.updatelock:
for wlan in self.updates:
"""
Check if wlan is from a previously closed session. Because of the
rate limiting scheme employed here, this may happen if a new session
is started soon after closing a previous session.
"""
# TODO: if these are WlanNodes, this will never throw an exception
try:
wlan.session
except:
# Just mark as updated to remove from self.updates.
self.updated(wlan)
continue
if self.lastupdate(wlan) > self.rate:
self.buildcmds(wlan)
self.ebcommit(wlan)
self.updated(wlan)
time.sleep(self.rate)
def ebcommit(self, wlan):
@ -165,30 +167,23 @@ class EbtablesQueue(object):
:return: nothing
"""
# save kernel ebtables snapshot to a file
cmd = self.ebatomiccmd(["--atomic-save", ])
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
logger.exception("atomic-save (%s)", cmd)
# no atomic file, exit
return
args = self.ebatomiccmd(["--atomic-save", ])
utils.check_cmd(args)
# modify the table file using queued ebtables commands
for c in self.cmds:
cmd = self.ebatomiccmd(c)
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
logger.exception("cmd=%s", cmd)
args = self.ebatomiccmd(c)
utils.check_cmd(args)
self.cmds = []
# commit the table file to the kernel
cmd = self.ebatomiccmd(["--atomic-commit", ])
args = self.ebatomiccmd(["--atomic-commit", ])
utils.check_cmd(args)
try:
subprocess.check_call(cmd)
os.unlink(self.atomic_file)
except OSError:
logger.exception("atomic-commit (%s)", cmd)
logger.exception("error removing atomic file: %s", self.atomic_file)
def ebchange(self, wlan):
"""
@ -197,10 +192,9 @@ class EbtablesQueue(object):
:return: nothing
"""
self.updatelock.acquire()
if wlan not in self.updates:
self.updates.append(wlan)
self.updatelock.release()
with self.updatelock:
if wlan not in self.updates:
self.updates.append(wlan)
def buildcmds(self, wlan):
"""
@ -208,23 +202,22 @@ class EbtablesQueue(object):
:return: nothing
"""
wlan._linked_lock.acquire()
# flush the chain
self.cmds.extend([["-F", wlan.brname], ])
# rebuild the chain
for netif1, v in wlan._linked.items():
for netif2, linked in v.items():
if wlan.policy == "DROP" and linked:
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
"-o", netif2.localname, "-j", "ACCEPT"],
["-A", wlan.brname, "-o", netif1.localname,
"-i", netif2.localname, "-j", "ACCEPT"]])
elif wlan.policy == "ACCEPT" and not linked:
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
"-o", netif2.localname, "-j", "DROP"],
["-A", wlan.brname, "-o", netif1.localname,
"-i", netif2.localname, "-j", "DROP"]])
wlan._linked_lock.release()
with wlan._linked_lock:
# flush the chain
self.cmds.extend([["-F", wlan.brname], ])
# rebuild the chain
for netif1, v in wlan._linked.items():
for netif2, linked in v.items():
if wlan.policy == "DROP" and linked:
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
"-o", netif2.localname, "-j", "ACCEPT"],
["-A", wlan.brname, "-o", netif1.localname,
"-i", netif2.localname, "-j", "ACCEPT"]])
elif wlan.policy == "ACCEPT" and not linked:
self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname,
"-o", netif2.localname, "-j", "DROP"],
["-A", wlan.brname, "-o", netif1.localname,
"-i", netif2.localname, "-j", "DROP"]])
# a global object because all WLANs share the same queue
@ -241,8 +234,8 @@ def ebtablescmds(call, cmds):
:return: nothing
"""
with ebtables_lock:
for cmd in cmds:
call(cmd)
for args in cmds:
call(args)
class LxBrNet(PyCoreNet):
@ -279,28 +272,24 @@ class LxBrNet(PyCoreNet):
Linux bridge starup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
try:
subprocess.check_call([constants.BRCTL_BIN, "addbr", self.brname])
except subprocess.CalledProcessError:
logger.exception("Error adding bridge")
utils.check_cmd([constants.BRCTL_BIN, "addbr", self.brname])
try:
# turn off spanning tree protocol and forwarding delay
subprocess.check_call([constants.BRCTL_BIN, "stp", self.brname, "off"])
subprocess.check_call([constants.BRCTL_BIN, "setfd", self.brname, "0"])
subprocess.check_call([constants.IP_BIN, "link", "set", self.brname, "up"])
# create a new ebtables chain for this bridge
ebtablescmds(subprocess.check_call, [
[constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
[constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.brname, "-j", self.brname]
])
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % self.brname
if os.path.exists(snoop):
open(snoop, "w").write("0")
except subprocess.CalledProcessError:
logger.exception("Error setting bridge parameters")
# turn off spanning tree protocol and forwarding delay
utils.check_cmd([constants.BRCTL_BIN, "stp", self.brname, "off"])
utils.check_cmd([constants.BRCTL_BIN, "setfd", self.brname, "0"])
utils.check_cmd([constants.IP_BIN, "link", "set", self.brname, "up"])
# create a new ebtables chain for this bridge
ebtablescmds(utils.check_cmd, [
[constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
[constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.brname, "-j", self.brname]
])
# turn off multicast snooping so mcast forwarding occurs w/o IGMP joins
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % self.brname
if os.path.exists(snoop):
with open(snoop, "w") as snoop_file:
snoop_file.write("0")
self.up = True
@ -312,35 +301,40 @@ class LxBrNet(PyCoreNet):
"""
if not self.up:
return
ebq.stopupdateloop(self)
utils.mutecall([constants.IP_BIN, "link", "set", self.brname, "down"])
utils.mutecall([constants.BRCTL_BIN, "delbr", self.brname])
ebtablescmds(utils.mutecall, [
[constants.EBTABLES_BIN, "-D", "FORWARD",
"--logical-in", self.brname, "-j", self.brname],
[constants.EBTABLES_BIN, "-X", self.brname]])
try:
utils.check_cmd([constants.IP_BIN, "link", "set", self.brname, "down"])
utils.check_cmd([constants.BRCTL_BIN, "delbr", self.brname])
ebtablescmds(utils.check_cmd, [
[constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.brname, "-j", self.brname],
[constants.EBTABLES_BIN, "-X", self.brname]
])
except CoreCommandError:
logger.exception("error during shutdown")
# removes veth pairs used for bridge-to-bridge connections
for netif in self.netifs():
# removes veth pairs used for bridge-to-bridge connections
netif.shutdown()
self._netif.clear()
self._linked.clear()
del self.session
self.up = False
# TODO: this depends on a subtype with localname defined, seems like the wrong place for this to live
def attach(self, netif):
"""
Attach a network interface.
:param core.netns.vif.VEth netif: network interface to attach
:param core.netns.vnode.VEth netif: network interface to attach
:return: nothing
"""
if self.up:
try:
subprocess.check_call([constants.BRCTL_BIN, "addif", self.brname, netif.localname])
subprocess.check_call([constants.IP_BIN, "link", "set", netif.localname, "up"])
except subprocess.CalledProcessError:
logger.exception("Error joining interface %s to bridge %s", netif.localname, self.brname)
return
utils.check_cmd([constants.BRCTL_BIN, "addif", self.brname, netif.localname])
utils.check_cmd([constants.IP_BIN, "link", "set", netif.localname, "up"])
PyCoreNet.attach(self, netif)
def detach(self, netif):
@ -351,11 +345,8 @@ class LxBrNet(PyCoreNet):
:return: nothing
"""
if self.up:
try:
subprocess.check_call([constants.BRCTL_BIN, "delif", self.brname, netif.localname])
except subprocess.CalledProcessError:
logger.exception("Error removing interface %s from bridge %s", netif.localname, self.brname)
return
utils.check_cmd([constants.BRCTL_BIN, "delif", self.brname, netif.localname])
PyCoreNet.detach(self, netif)
def linked(self, netif1, netif2):
@ -396,12 +387,11 @@ class LxBrNet(PyCoreNet):
:param core.netns.vif.Veth netif2: interface two
:return: nothing
"""
self._linked_lock.acquire()
if not self.linked(netif1, netif2):
self._linked_lock.release()
return
self._linked[netif1][netif2] = False
self._linked_lock.release()
with self._linked_lock:
if not self.linked(netif1, netif2):
return
self._linked[netif1][netif2] = False
ebq.ebchange(self)
def link(self, netif1, netif2):
@ -413,12 +403,11 @@ class LxBrNet(PyCoreNet):
:param core.netns.vif.Veth netif2: interface two
:return: nothing
"""
self._linked_lock.acquire()
if self.linked(netif1, netif2):
self._linked_lock.release()
return
self._linked[netif1][netif2] = True
self._linked_lock.release()
with self._linked_lock:
if self.linked(netif1, netif2):
return
self._linked[netif1][netif2] = True
ebq.ebchange(self)
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None,
@ -452,14 +441,14 @@ class LxBrNet(PyCoreNet):
if bw > 0:
if self.up:
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],))
subprocess.check_call(tc + parent + ["handle", "1:"] + tbf)
utils.check_cmd(tc + parent + ["handle", "1:"] + tbf)
netif.setparam("has_tbf", True)
changed = True
elif netif.getparam("has_tbf") and bw <= 0:
tcd = [] + tc
tcd[2] = "delete"
if self.up:
subprocess.check_call(tcd + parent)
utils.check_cmd(tcd + parent)
netif.setparam("has_tbf", False)
# removing the parent removes the child
netif.setparam("has_netem", False)
@ -497,12 +486,12 @@ class LxBrNet(PyCoreNet):
tc[2] = "delete"
if self.up:
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
subprocess.check_call(tc + parent + ["handle", "10:"])
utils.check_cmd(tc + parent + ["handle", "10:"])
netif.setparam("has_netem", False)
elif len(netem) > 1:
if self.up:
logger.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],))
subprocess.check_call(tc + parent + ["handle", "10:"] + netem)
utils.check_cmd(tc + parent + ["handle", "10:"] + netem)
netif.setparam("has_netem", True)
def linknet(self, net):
@ -519,24 +508,27 @@ class LxBrNet(PyCoreNet):
self_objid = "%x" % self.objid
except TypeError:
self_objid = "%s" % self.objid
try:
net_objid = "%x" % net.objid
except TypeError:
net_objid = "%s" % net.objid
localname = "veth%s.%s.%s" % (self_objid, net_objid, sessionid)
if len(localname) >= 16:
raise ValueError("interface local name %s too long" % localname)
name = "veth%s.%s.%s" % (net_objid, self_objid, sessionid)
if len(name) >= 16:
raise ValueError("interface name %s too long" % name)
netif = VEth(node=None, name=name, localname=localname,
mtu=1500, net=self, start=self.up)
netif = VEth(node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up)
self.attach(netif)
if net.up:
# this is similar to net.attach() but uses netif.name instead
# of localname
subprocess.check_call([constants.BRCTL_BIN, "addif", net.brname, netif.name])
subprocess.check_call([constants.IP_BIN, "link", "set", netif.name, "up"])
utils.check_cmd([constants.BRCTL_BIN, "addif", net.brname, netif.name])
utils.check_cmd([constants.IP_BIN, "link", "set", netif.name, "up"])
i = net.newifindex()
net._netif[i] = netif
with net._linked_lock:
@ -557,6 +549,7 @@ class LxBrNet(PyCoreNet):
for netif in self.netifs():
if hasattr(netif, "othernet") and netif.othernet == net:
return netif
return None
def addrconfig(self, addrlist):
@ -568,11 +561,9 @@ class LxBrNet(PyCoreNet):
"""
if not self.up:
return
for addr in addrlist:
try:
subprocess.check_call([constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname])
except subprocess.CalledProcessError:
logger.exception("Error adding IP address")
utils.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname])
class GreTapBridge(LxBrNet):
@ -609,8 +600,8 @@ class GreTapBridge(LxBrNet):
if remoteip is None:
self.gretap = None
else:
self.gretap = GreTap(node=self, name=None, session=session, remoteip=remoteip,
objid=None, localip=localip, ttl=ttl, key=self.grekey)
self.gretap = GreTap(node=self, session=session, remoteip=remoteip,
localip=localip, ttl=ttl, key=self.grekey)
if start:
self.startup()
@ -652,7 +643,7 @@ class GreTapBridge(LxBrNet):
localip = None
if len(addrlist) > 1:
localip = addrlist[1].split("/")[0]
self.gretap = GreTap(session=self.session, remoteip=remoteip, objid=None, name=None,
self.gretap = GreTap(session=self.session, remoteip=remoteip,
localip=localip, ttl=self.ttl, key=self.grekey)
self.attach(self.gretap)

View file

@ -2,14 +2,15 @@
PyCoreNode and LxcNode classes that implement the network namespac virtual node.
"""
import errno
import os
import random
import shutil
import signal
import string
import subprocess
import threading
from core import CoreCommandError
from core import constants
from core import logger
from core.coreobj import PyCoreNetIf
@ -17,18 +18,29 @@ from core.coreobj import PyCoreNode
from core.enumerations import NodeTypes
from core.misc import nodeutils
from core.misc import utils
from core.misc.ipaddress import MacAddress
from core.netns import vnodeclient
from core.netns.vif import TunTap
from core.netns.vif import VEth
_DEFAULT_MTU = 1500
utils.check_executables([constants.IP_BIN])
class SimpleLxcNode(PyCoreNode):
"""
Provides simple lxc functionality for core nodes.
:var nodedir: str
:var ctrlchnlname: str
:var client: core.netns.vnodeclient.VnodeClient
:var pid: int
:var up: bool
:var lock: threading.RLock
:var _mounts: list[tuple[str, str]]
"""
valid_deladdrtype = ("inet", "inet6", "inet6link")
valid_address_types = {"inet", "inet6", "inet6link"}
def __init__(self, session, objid=None, name=None, nodedir=None, start=True):
"""
@ -43,7 +55,7 @@ class SimpleLxcNode(PyCoreNode):
PyCoreNode.__init__(self, session, objid, name, start=start)
self.nodedir = nodedir
self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name))
self.vnodeclient = None
self.client = None
self.pid = None
self.up = False
self.lock = threading.RLock()
@ -72,39 +84,37 @@ class SimpleLxcNode(PyCoreNode):
:return: nothing
"""
if self.up:
raise Exception("already up")
vnoded = ["%s/vnoded" % constants.CORE_SBIN_DIR, "-v", "-c", self.ctrlchnlname,
"-l", self.ctrlchnlname + ".log",
"-p", self.ctrlchnlname + ".pid"]
raise ValueError("starting a node that is already up")
# create a new namespace for this node using vnoded
vnoded = [
constants.VNODED_BIN,
"-v",
"-c", self.ctrlchnlname,
"-l", self.ctrlchnlname + ".log",
"-p", self.ctrlchnlname + ".pid"
]
if self.nodedir:
vnoded += ["-C", self.nodedir]
env = self.session.get_environment(state=False)
env["NODE_NUMBER"] = str(self.objid)
env["NODE_NAME"] = str(self.name)
try:
tmp = subprocess.Popen(vnoded, stdout=subprocess.PIPE, env=env)
except OSError:
msg = "error running vnoded command: %s" % vnoded
logger.exception("SimpleLxcNode.startup(): %s", msg)
raise Exception(msg)
output = utils.check_cmd(vnoded, env=env)
self.pid = int(output)
try:
self.pid = int(tmp.stdout.read())
tmp.stdout.close()
except ValueError:
msg = "vnoded failed to create a namespace; "
msg += "check kernel support and user priveleges"
logger.exception("SimpleLxcNode.startup(): %s", msg)
# create vnode client
self.client = vnodeclient.VnodeClient(self.name, self.ctrlchnlname)
if tmp.wait():
raise Exception("command failed: %s" % vnoded)
# bring up the loopback interface
logger.debug("bringing up loopback interface")
self.check_cmd([constants.IP_BIN, "link", "set", "lo", "up"])
self.vnodeclient = vnodeclient.VnodeClient(self.name, self.ctrlchnlname)
logger.info("bringing up loopback interface")
self.cmd([constants.IP_BIN, "link", "set", "lo", "up"])
logger.info("setting hostname: %s" % self.name)
self.cmd(["hostname", self.name])
# set hostname for node
logger.debug("setting hostname: %s", self.name)
self.check_cmd(["hostname", self.name])
# mark node as up
self.up = True
def shutdown(self):
@ -130,107 +140,71 @@ class SimpleLxcNode(PyCoreNode):
try:
os.kill(self.pid, signal.SIGTERM)
os.waitpid(self.pid, 0)
except OSError:
logger.exception("error killing process")
except OSError as e:
if e.errno != 10:
logger.exception("error killing process")
# remove node directory if present
try:
if os.path.exists(self.ctrlchnlname):
os.unlink(self.ctrlchnlname)
except OSError:
logger.exception("error removing file")
os.unlink(self.ctrlchnlname)
except OSError as e:
# no such file or directory
if e.errno != errno.ENOENT:
logger.exception("error removing node directory")
# clear interface data, close client, and mark self and not up
self._netif.clear()
self.vnodeclient.close()
self.client.close()
self.up = False
# TODO: potentially remove all these wrapper methods, just make use of object itself.
def cmd(self, args, wait=True):
"""
Wrapper around vnodeclient cmd.
:param args: arguments for ocmmand
:param wait: wait or not
:return:
"""
return self.vnodeclient.cmd(args, wait)
def cmdresult(self, args):
"""
Wrapper around vnodeclient cmdresult.
:param args: arguments for ocmmand
:return:
"""
return self.vnodeclient.cmdresult(args)
def popen(self, args):
"""
Wrapper around vnodeclient popen.
:param args: arguments for ocmmand
:return:
"""
return self.vnodeclient.popen(args)
def icmd(self, args):
"""
Wrapper around vnodeclient icmd.
:param args: arguments for ocmmand
:return:
"""
return self.vnodeclient.icmd(args)
def redircmd(self, infd, outfd, errfd, args, wait=True):
"""
Wrapper around vnodeclient redircmd.
:param infd: input file descriptor
:param outfd: output file descriptor
:param errfd: err file descriptor
:param args: command arguments
:param wait: wait or not
:return:
"""
return self.vnodeclient.redircmd(infd, outfd, errfd, args, wait)
def term(self, sh="/bin/sh"):
"""
Wrapper around vnodeclient term.
:param sh: shell to create terminal for
:return:
"""
return self.vnodeclient.term(sh=sh)
def termcmdstring(self, sh="/bin/sh"):
"""
Wrapper around vnodeclient termcmdstring.
:param sh: shell to run command in
:return:
"""
return self.vnodeclient.termcmdstring(sh=sh)
def shcmd(self, cmdstr, sh="/bin/sh"):
"""
Wrapper around vnodeclient shcmd.
:param str cmdstr: command string
:param sh: shell to run command in
:return:
"""
return self.vnodeclient.shcmd(cmdstr, sh=sh)
def boot(self):
"""
Boot logic.
:return: nothing
"""
pass
return None
def cmd(self, args, wait=True):
"""
Runs shell command on node, with option to not wait for a result.
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
return self.client.cmd(args, wait)
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
return self.client.cmd_output(args)
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when a non-zero exit status occurs
"""
return self.client.check_cmd(args)
def termcmdstring(self, sh="/bin/sh"):
"""
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
return self.client.termcmdstring(sh)
def mount(self, source, target):
"""
@ -239,16 +213,15 @@ class SimpleLxcNode(PyCoreNode):
:param str source: source directory to mount
:param str target: target directory to create
:return: nothing
:raises CoreCommandError: when a non-zero exit status occurs
"""
source = os.path.abspath(source)
logger.info("mounting %s at %s" % (source, target))
try:
shcmd = 'mkdir -p "%s" && %s -n --bind "%s" "%s"' % (
target, constants.MOUNT_BIN, source, target)
self.shcmd(shcmd)
self._mounts.append((source, target))
except IOError:
logger.exception("mounting failed for %s at %s", source, target)
logger.info("node(%s) mounting: %s at %s", self.name, source, target)
cmd = 'mkdir -p "%s" && %s -n --bind "%s" "%s"' % (target, constants.MOUNT_BIN, source, target)
status, output = self.client.shcmd_result(cmd)
if status:
raise CoreCommandError(status, cmd, output)
self._mounts.append((source, target))
def umount(self, target):
"""
@ -257,11 +230,11 @@ class SimpleLxcNode(PyCoreNode):
:param str target: target directory to unmount
:return: nothing
"""
logger.info("unmounting: %s", target)
logger.info("node(%s) unmounting: %s", self.name, target)
try:
self.cmd([constants.UMOUNT_BIN, "-n", "-l", target])
except IOError:
logger.exception("unmounting failed for %s" % target)
self.check_cmd([constants.UMOUNT_BIN, "-n", "-l", target])
except CoreCommandError:
logger.exception("error during unmount")
def newifindex(self):
"""
@ -282,8 +255,7 @@ class SimpleLxcNode(PyCoreNode):
:param net: network to associate interface with
:return: nothing
"""
self.lock.acquire()
try:
with self.lock:
if ifindex is None:
ifindex = self.newifindex()
@ -300,36 +272,38 @@ class SimpleLxcNode(PyCoreNode):
localname = "veth" + suffix
if len(localname) >= 16:
raise ValueError("interface local name (%s) too long" % localname)
name = localname + "p"
if len(name) >= 16:
raise ValueError("interface name (%s) too long" % name)
veth = VEth(node=self, name=name, localname=localname, mtu=1500, net=net, start=self.up)
veth = VEth(node=self, name=name, localname=localname, net=net, start=self.up)
if self.up:
subprocess.check_call([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)])
self.cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname])
utils.check_cmd([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)])
self.check_cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname])
veth.name = ifname
# retrieve interface information
result, output = self.cmdresult(["ip", "link", "show", veth.name])
logger.info("interface command output: %s", output)
output = output.split("\n")
veth.flow_id = int(output[0].strip().split(":")[0]) + 1
logger.info("interface flow index: %s - %s", veth.name, veth.flow_id)
veth.hwaddr = output[1].strip().split()[1]
logger.info("interface mac: %s - %s", veth.name, veth.hwaddr)
if self.up:
# TODO: potentially find better way to query interface ID
# retrieve interface information
output = self.check_cmd(["ip", "link", "show", veth.name])
logger.debug("interface command output: %s", output)
output = output.split("\n")
veth.flow_id = int(output[0].strip().split(":")[0]) + 1
logger.debug("interface flow index: %s - %s", veth.name, veth.flow_id)
veth.hwaddr = MacAddress.from_string(output[1].strip().split()[1])
logger.debug("interface mac: %s - %s", veth.name, veth.hwaddr)
try:
self.addnetif(veth, ifindex)
except:
except ValueError as e:
veth.shutdown()
del veth
raise
raise e
return ifindex
finally:
self.lock.release()
def newtuntap(self, ifindex=None, ifname=None, net=None):
"""
@ -341,27 +315,26 @@ class SimpleLxcNode(PyCoreNode):
:return: interface index
:rtype: int
"""
self.lock.acquire()
try:
with self.lock:
if ifindex is None:
ifindex = self.newifindex()
if ifname is None:
ifname = "eth%d" % ifindex
sessionid = self.session.short_session_id()
localname = "tap%s.%s.%s" % (self.objid, ifindex, sessionid)
name = ifname
ifclass = TunTap
tuntap = ifclass(node=self, name=name, localname=localname,
mtu=1500, net=net, start=self.up)
tuntap = TunTap(node=self, name=name, localname=localname, net=net, start=self.up)
try:
self.addnetif(tuntap, ifindex)
except Exception as e:
except ValueError as e:
tuntap.shutdown()
del tuntap
raise e
return ifindex
finally:
self.lock.release()
def sethwaddr(self, ifindex, addr):
"""
@ -369,14 +342,13 @@ class SimpleLxcNode(PyCoreNode):
:param int ifindex: index of interface to set hardware address for
:param core.misc.ipaddress.MacAddress addr: hardware address to set
:return: mothing
:return: nothing
:raises CoreCommandError: when a non-zero exit status occurs
"""
self._netif[ifindex].sethwaddr(addr)
if self.up:
(status, result) = self.cmdresult([constants.IP_BIN, "link", "set", "dev",
self.ifname(ifindex), "address", str(addr)])
if status:
logger.error("error setting MAC address %s", str(addr))
args = [constants.IP_BIN, "link", "set", "dev", self.ifname(ifindex), "address", str(addr)]
self.check_cmd(args)
def addaddr(self, ifindex, addr):
"""
@ -387,12 +359,14 @@ class SimpleLxcNode(PyCoreNode):
:return: nothing
"""
if self.up:
if ":" in str(addr): # check if addr is ipv6
self.cmd([constants.IP_BIN, "addr", "add", str(addr),
"dev", self.ifname(ifindex)])
# check if addr is ipv6
if ":" in str(addr):
args = [constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)]
self.check_cmd(args)
else:
self.cmd([constants.IP_BIN, "addr", "add", str(addr), "broadcast", "+",
"dev", self.ifname(ifindex)])
args = [constants.IP_BIN, "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)]
self.check_cmd(args)
self._netif[ifindex].addaddr(addr)
def deladdr(self, ifindex, addr):
@ -402,6 +376,7 @@ class SimpleLxcNode(PyCoreNode):
:param int ifindex: index of interface to delete address from
:param str addr: address to delete from interface
:return: nothing
:raises CoreCommandError: when a non-zero exit status occurs
"""
try:
self._netif[ifindex].deladdr(addr)
@ -409,24 +384,28 @@ class SimpleLxcNode(PyCoreNode):
logger.exception("trying to delete unknown address: %s" % addr)
if self.up:
self.cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
self.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
def delalladdr(self, ifindex, addrtypes=valid_deladdrtype):
def delalladdr(self, ifindex, address_types=valid_address_types):
"""
Delete all addresses from an interface.
:param int ifindex: index of interface to delete all addresses from
:param tuple addrtypes: address types to delete
:param int ifindex: index of interface to delete address types from
:param tuple[str] address_types: address types to delete
:return: nothing
:raises CoreCommandError: when a non-zero exit status occurs
"""
addr = self.getaddr(self.ifname(ifindex), rescan=True)
for t in addrtypes:
if t not in self.valid_deladdrtype:
raise ValueError("addr type must be in: " + " ".join(self.valid_deladdrtype))
for a in addr[t]:
self.deladdr(ifindex, a)
interface_name = self.ifname(ifindex)
addresses = self.client.getaddr(interface_name, rescan=True)
for address_type in address_types:
if address_type not in self.valid_address_types:
raise ValueError("addr type must be in: %s" % " ".join(self.valid_address_types))
for address in addresses[address_type]:
self.deladdr(ifindex, address)
# update cached information
self.getaddr(self.ifname(ifindex), rescan=True)
self.client.getaddr(interface_name, rescan=True)
def ifup(self, ifindex):
"""
@ -436,7 +415,7 @@ class SimpleLxcNode(PyCoreNode):
:return: nothing
"""
if self.up:
self.cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"])
self.check_cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"])
def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None):
"""
@ -450,8 +429,10 @@ class SimpleLxcNode(PyCoreNode):
:return: interface index
:rtype: int
"""
self.lock.acquire()
try:
if not addrlist:
addrlist = []
with self.lock:
# TODO: see if you can move this to emane specific code
if nodeutils.is_node(net, NodeTypes.EMANE):
ifindex = self.newtuntap(ifindex=ifindex, ifname=ifname, net=net)
@ -462,8 +443,8 @@ class SimpleLxcNode(PyCoreNode):
self.attachnet(ifindex, net)
netif = self.netif(ifindex)
netif.sethwaddr(hwaddr)
for addr in utils.maketuple(addrlist):
netif.addaddr(addr)
for address in utils.make_tuple(addrlist):
netif.addaddr(address)
return ifindex
else:
ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net)
@ -474,14 +455,11 @@ class SimpleLxcNode(PyCoreNode):
if hwaddr:
self.sethwaddr(ifindex, hwaddr)
if addrlist:
for addr in utils.maketuple(addrlist):
self.addaddr(ifindex, addr)
for address in utils.make_tuple(addrlist):
self.addaddr(ifindex, address)
self.ifup(ifindex)
return ifindex
finally:
self.lock.release()
def connectnode(self, ifname, othernode, otherifname):
"""
@ -493,21 +471,19 @@ class SimpleLxcNode(PyCoreNode):
:return: nothing
"""
tmplen = 8
tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
for x in xrange(tmplen)])
tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
for x in xrange(tmplen)])
subprocess.check_call([constants.IP_BIN, "link", "add", "name", tmp1,
"type", "veth", "peer", "name", tmp2])
tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in xrange(tmplen)])
tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in xrange(tmplen)])
utils.check_cmd([constants.IP_BIN, "link", "add", "name", tmp1, "type", "veth", "peer", "name", tmp2])
subprocess.call([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
self.cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname])
self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
self.check_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname])
interface = PyCoreNetIf(node=self, name=ifname, mtu=_DEFAULT_MTU)
self.addnetif(interface, self.newifindex())
subprocess.check_call([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
othernode.cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname])
othernode.addnetif(PyCoreNetIf(othernode, otherifname),
othernode.newifindex())
utils.check_cmd([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
othernode.check_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname])
other_interface = PyCoreNetIf(node=othernode, name=otherifname, mtu=_DEFAULT_MTU)
othernode.addnetif(other_interface, othernode.newifindex())
def addfile(self, srcname, filename):
"""
@ -516,28 +492,15 @@ class SimpleLxcNode(PyCoreNode):
:param str srcname: source file name
:param str filename: file name to add
:return: nothing
:raises CoreCommandError: when a non-zero exit status occurs
"""
shcmd = 'mkdir -p $(dirname "%s") && mv "%s" "%s" && sync' % (filename, srcname, filename)
self.shcmd(shcmd)
logger.info("adding file from %s to %s", srcname, filename)
directory = os.path.dirname(filename)
def getaddr(self, ifname, rescan=False):
"""
Wrapper around vnodeclient getaddr.
:param str ifname: interface name to get address for
:param bool rescan: rescan flag
:return:
"""
return self.vnodeclient.getaddr(ifname=ifname, rescan=rescan)
def netifstats(self, ifname=None):
"""
Wrapper around vnodeclient netifstate.
:param str ifname: interface name to get state for
:return:
"""
return self.vnodeclient.netifstats(ifname=ifname)
cmd = 'mkdir -p "%s" && mv "%s" "%s" && sync' % (directory, srcname, filename)
status, output = self.client.shcmd_result(cmd)
if status:
raise CoreCommandError(status, cmd, output)
class LxcNode(SimpleLxcNode):
@ -545,8 +508,7 @@ class LxcNode(SimpleLxcNode):
Provides lcx node functionality for core nodes.
"""
def __init__(self, session, objid=None, name=None,
nodedir=None, bootsh="boot.sh", start=True):
def __init__(self, session, objid=None, name=None, nodedir=None, bootsh="boot.sh", start=True):
"""
Create a LxcNode instance.
@ -557,8 +519,7 @@ class LxcNode(SimpleLxcNode):
:param bootsh: boot shell
:param bool start: start flag
"""
super(LxcNode, self).__init__(session=session, objid=objid,
name=name, nodedir=nodedir, start=start)
super(LxcNode, self).__init__(session=session, objid=objid, name=name, nodedir=nodedir, start=start)
self.bootsh = bootsh
if start:
self.startup()
@ -585,16 +546,11 @@ class LxcNode(SimpleLxcNode):
:return: nothing
"""
self.lock.acquire()
try:
with self.lock:
self.makenodedir()
super(LxcNode, self).startup()
self.privatedir("/var/run")
self.privatedir("/var/log")
except OSError:
logger.exception("error during LxcNode.startup()")
finally:
self.lock.release()
def shutdown(self):
"""
@ -604,16 +560,14 @@ class LxcNode(SimpleLxcNode):
"""
if not self.up:
return
self.lock.acquire()
# services are instead stopped when session enters datacollect state
# self.session.services.stopnodeservices(self)
try:
super(LxcNode, self).shutdown()
except:
logger.exception("error during shutdown")
finally:
self.rmnodedir()
self.lock.release()
with self.lock:
try:
super(LxcNode, self).shutdown()
except OSError:
logger.exception("error during shutdown")
finally:
self.rmnodedir()
def privatedir(self, path):
"""
@ -625,12 +579,7 @@ class LxcNode(SimpleLxcNode):
if path[0] != "/":
raise ValueError("path not fully qualified: %s" % path)
hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip("/").replace("/", "."))
try:
os.mkdir(hostpath)
except OSError:
logger.exception("error creating directory: %s", hostpath)
os.mkdir(hostpath)
self.mount(hostpath, path)
def hostfilename(self, filename):
@ -642,7 +591,7 @@ class LxcNode(SimpleLxcNode):
"""
dirname, basename = os.path.split(filename)
if not basename:
raise ValueError("no basename for filename: " + filename)
raise ValueError("no basename for filename: %s" % filename)
if dirname and dirname[0] == "/":
dirname = dirname[1:]
dirname = dirname.replace("/", ".")
@ -673,11 +622,10 @@ class LxcNode(SimpleLxcNode):
:param int mode: mode for file
:return: nothing
"""
f = self.opennodefile(filename, "w")
f.write(contents)
os.chmod(f.name, mode)
f.close()
logger.info("created nodefile: %s; mode: 0%o", f.name, mode)
with self.opennodefile(filename, "w") as open_file:
open_file.write(contents)
os.chmod(open_file.name, mode)
logger.info("node(%s) added file: %s; mode: 0%o", self.name, open_file.name, mode)
def nodefilecopy(self, filename, srcfilename, mode=None):
"""
@ -693,4 +641,4 @@ class LxcNode(SimpleLxcNode):
shutil.copy2(srcfilename, hostfilename)
if mode is not None:
os.chmod(hostfilename, mode)
logger.info("copied nodefile: %s; mode: %s", hostfilename, mode)
logger.info("node(%s) copied file: %s; mode: %s", self.name, hostfilename, mode)

View file

@ -6,18 +6,13 @@ by invoking the vcmd shell command.
"""
import os
import stat
import subprocess
import vcmd
from core import CoreCommandError
from core import constants
from core import logger
USE_VCMD_MODULE = True
if USE_VCMD_MODULE:
import vcmd
VCMD = os.path.join(constants.CORE_SBIN_DIR, "vcmd")
from core.misc import utils
class VnodeClient(object):
@ -34,12 +29,19 @@ class VnodeClient(object):
"""
self.name = name
self.ctrlchnlname = ctrlchnlname
if USE_VCMD_MODULE:
self.cmdchnl = vcmd.VCmd(self.ctrlchnlname)
else:
self.cmdchnl = None
self.cmdchnl = vcmd.VCmd(self.ctrlchnlname)
self._addr = {}
def _verify_connection(self):
"""
Checks that the vcmd client is properly connected.
:return: nothing
:raises IOError: when not connected
"""
if not self.connected():
raise IOError("vcmd not connected")
def connected(self):
"""
Check if node is connected or not.
@ -47,10 +49,7 @@ class VnodeClient(object):
:return: True if connected, False otherwise
:rtype: bool
"""
if USE_VCMD_MODULE:
return self.cmdchnl.connected()
else:
return True
return self.cmdchnl.connected()
def close(self):
"""
@ -58,85 +57,82 @@ class VnodeClient(object):
:return: nothing
"""
if USE_VCMD_MODULE:
self.cmdchnl.close()
self.cmdchnl.close()
def cmd(self, args, wait=True):
"""
Execute a command on a node and return the status (return code).
:param list args: command arguments
:param list[str]|str args: command arguments
:param bool wait: wait for command to end or not
:return: command status
:rtype: int
"""
if USE_VCMD_MODULE:
if not self.cmdchnl.connected():
raise ValueError("self.cmdchnl not connected")
tmp = self.cmdchnl.qcmd(args)
if not wait:
return tmp
tmp = tmp.wait()
else:
if wait:
mode = os.P_WAIT
else:
mode = os.P_NOWAIT
tmp = os.spawnlp(mode, VCMD, VCMD, "-c", self.ctrlchnlname, "-q", "--", *args)
if not wait:
return tmp
self._verify_connection()
args = utils.split_args(args)
if tmp:
logger.warn("cmd exited with status %s: %s" % (tmp, str(args)))
# run command, return process when not waiting
p = self.cmdchnl.qcmd(args)
if not wait:
return 0
return tmp
# wait for and return exit status
return p.wait()
def cmdresult(self, args):
def cmd_output(self, args):
"""
Execute a command on a node and return a tuple containing the
exit status and result string. stderr output
is folded into the stdout result string.
:param list args: command arguments
:param list[str]|str args: command to run
:return: command status and combined stdout and stderr output
:rtype: tuple[int, str]
"""
cmdid, cmdin, cmdout, cmderr = self.popen(args)
result = cmdout.read()
result += cmderr.read()
cmdin.close()
cmdout.close()
cmderr.close()
status = cmdid.wait()
return status, result
p, stdin, stdout, stderr = self.popen(args)
stdin.close()
output = stdout.read() + stderr.read()
stdout.close()
stderr.close()
status = p.wait()
return status, output.strip()
def check_cmd(self, args):
"""
Run command and return exit status and combined stdout and stderr.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises core.CoreCommandError: when there is a non-zero exit status
"""
status, output = self.cmd_output(args)
if status != 0:
raise CoreCommandError(status, args, output)
return output.strip()
def popen(self, args):
"""
Execute a popen command against the node.
:param list args: command arguments
:param list[str]|str args: command arguments
:return: popen object, stdin, stdout, and stderr
:rtype: tuple
"""
if USE_VCMD_MODULE:
if not self.cmdchnl.connected():
raise ValueError("self.cmdchnl not connected")
return self.cmdchnl.popen(args)
else:
cmd = [VCMD, "-c", self.ctrlchnlname, "--"]
cmd.extend(args)
tmp = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return tmp, tmp.stdin, tmp.stdout, tmp.stderr
self._verify_connection()
args = utils.split_args(args)
return self.cmdchnl.popen(args)
def icmd(self, args):
"""
Execute an icmd against a node.
:param list args: command arguments
:param list[str]|str args: command arguments
:return: command result
:rtype: int
"""
return os.spawnlp(os.P_WAIT, VCMD, VCMD, "-c", self.ctrlchnlname, "--", *args)
args = utils.split_args(args)
return os.spawnlp(os.P_WAIT, constants.VCMD_BIN, constants.VCMD_BIN, "-c", self.ctrlchnlname, "--", *args)
def redircmd(self, infd, outfd, errfd, args, wait=True):
"""
@ -146,22 +142,24 @@ class VnodeClient(object):
:param infd: stdin file descriptor
:param outfd: stdout file descriptor
:param errfd: stderr file descriptor
:param list args: command arguments
:param list[str]|str args: command arguments
:param bool wait: wait flag
:return: command status
:rtype: int
"""
if not USE_VCMD_MODULE:
raise NotImplementedError
if not self.cmdchnl.connected():
raise ValueError("self.cmdchnl not connected")
tmp = self.cmdchnl.redircmd(infd, outfd, errfd, args)
self._verify_connection()
# run command, return process when not waiting
args = utils.split_args(args)
p = self.cmdchnl.redircmd(infd, outfd, errfd, args)
if not wait:
return tmp
tmp = tmp.wait()
if tmp:
logger.warn("cmd exited with status %s: %s" % (tmp, str(args)))
return tmp
return p
# wait for and return exit status
status = p.wait()
if status:
logger.warn("cmd exited with status %s: %s", status, args)
return status
def term(self, sh="/bin/sh"):
"""
@ -171,13 +169,12 @@ class VnodeClient(object):
:return: terminal command result
:rtype: int
"""
cmd = ("xterm", "-ut", "-title", self.name, "-e",
VCMD, "-c", self.ctrlchnlname, "--", sh)
args = ("xterm", "-ut", "-title", self.name, "-e", constants.VCMD_BIN, "-c", self.ctrlchnlname, "--", sh)
if "SUDO_USER" in os.environ:
cmd = ("su", "-s", "/bin/sh", "-c",
"exec " + " ".join(map(lambda x: "'%s'" % x, cmd)),
os.environ["SUDO_USER"])
return os.spawnvp(os.P_NOWAIT, cmd[0], cmd)
args = ("su", "-s", "/bin/sh", "-c",
"exec " + " ".join(map(lambda x: "'%s'" % x, args)),
os.environ["SUDO_USER"])
return os.spawnvp(os.P_NOWAIT, args[0], args)
def termcmdstring(self, sh="/bin/sh"):
"""
@ -186,18 +183,29 @@ class VnodeClient(object):
:param str sh: shell to execute command in
:return: str
"""
return "%s -c %s -- %s" % (VCMD, self.ctrlchnlname, sh)
return "%s -c %s -- %s" % (constants.VCMD_BIN, self.ctrlchnlname, sh)
def shcmd(self, cmdstr, sh="/bin/sh"):
def shcmd(self, cmd, sh="/bin/sh"):
"""
Execute a shell command.
:param str cmdstr: command string
:param str cmd: command string
:param str sh: shell to run command in
:return: command result
:rtype: int
"""
return self.cmd([sh, "-c", cmdstr])
return self.cmd([sh, "-c", cmd])
def shcmd_result(self, cmd, sh="/bin/sh"):
"""
Execute a shell command and return the exist status and combined output.
:param str cmd: shell command to run
:param str sh: shell to run command in
:return: exist status and combined output
:rtype: tuple[int, str]
"""
return self.cmd_output([sh, "-c", cmd])
def getaddr(self, ifname, rescan=False):
"""
@ -210,35 +218,36 @@ class VnodeClient(object):
"""
if ifname in self._addr and not rescan:
return self._addr[ifname]
tmp = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
cmd = [constants.IP_BIN, "addr", "show", "dev", ifname]
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
cmdin.close()
for line in cmdout:
interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
args = [constants.IP_BIN, "addr", "show", "dev", ifname]
p, stdin, stdout, stderr = self.popen(args)
stdin.close()
for line in stdout:
line = line.strip().split()
if line[0] == "link/ether":
tmp["ether"].append(line[1])
interface["ether"].append(line[1])
elif line[0] == "inet":
tmp["inet"].append(line[1])
interface["inet"].append(line[1])
elif line[0] == "inet6":
if line[3] == "global":
tmp["inet6"].append(line[1])
interface["inet6"].append(line[1])
elif line[3] == "link":
tmp["inet6link"].append(line[1])
interface["inet6link"].append(line[1])
else:
logger.warn("unknown scope: %s" % line[3])
err = cmderr.read()
cmdout.close()
cmderr.close()
status = cmdid.wait()
err = stderr.read()
stdout.close()
stderr.close()
status = p.wait()
if status:
logger.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
logger.warn("nonzero exist status (%s) for cmd: %s", status, args)
if err:
logger.warn("error output: %s" % err)
self._addr[ifname] = tmp
return tmp
logger.warn("error output: %s", err)
self._addr[ifname] = interface
return interface
def netifstats(self, ifname=None):
"""
@ -249,16 +258,16 @@ class VnodeClient(object):
:rtype: dict
"""
stats = {}
cmd = ["cat", "/proc/net/dev"]
cmdid, cmdin, cmdout, cmderr = self.popen(cmd)
cmdin.close()
args = ["cat", "/proc/net/dev"]
p, stdin, stdout, stderr = self.popen(args)
stdin.close()
# ignore first line
cmdout.readline()
stdout.readline()
# second line has count names
tmp = cmdout.readline().strip().split("|")
tmp = stdout.readline().strip().split("|")
rxkeys = tmp[1].split()
txkeys = tmp[2].split()
for line in cmdout:
for line in stdout:
line = line.strip().split()
devname, tmp = line[0].split(":")
if tmp:
@ -271,53 +280,15 @@ class VnodeClient(object):
for count in txkeys:
stats[devname]["tx"][count] = int(line[field])
field += 1
err = cmderr.read()
cmdout.close()
cmderr.close()
status = cmdid.wait()
err = stderr.read()
stdout.close()
stderr.close()
status = p.wait()
if status:
logger.warn("nonzero exist status (%s) for cmd: %s" % (status, cmd))
logger.warn("nonzero exist status (%s) for cmd: %s", status, args)
if err:
logger.warn("error output: %s" % err)
logger.warn("error output: %s", err)
if ifname is not None:
return stats[ifname]
else:
return stats
def createclients(sessiondir, clientcls=VnodeClient, cmdchnlfilterfunc=None):
"""
Create clients
:param str sessiondir: session directory to create clients
:param class clientcls: class to create clients from
:param func cmdchnlfilterfunc: command channel filter function
:return: list of created clients
:rtype: list
"""
direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir))
cmdchnls = filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries)
if cmdchnlfilterfunc:
cmdchnls = filter(cmdchnlfilterfunc, cmdchnls)
cmdchnls.sort()
return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls)
def createremoteclients(sessiondir, clientcls=VnodeClient, filterfunc=None):
"""
Creates remote VnodeClients, for nodes emulated on other machines. The
session.Broker writes a n1.conf/server file having the server's info.
:param str sessiondir: session directory to create clients
:param class clientcls: class to create clients from
:param func filterfunc: filter function
:return: list of remove clients
:rtype: list
"""
direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir))
nodedirs = filter(lambda x: stat.S_ISDIR(os.stat(x).st_mode), direntries)
nodedirs = filter(lambda x: os.path.exists(os.path.join(x, "server")), nodedirs)
if filterfunc:
nodedirs = filter(filterfunc, nodedirs)
nodedirs.sort()
return map(lambda x: clientcls(x), nodedirs)

View file

@ -6,6 +6,7 @@ import os
import subprocess
import threading
from core import CoreCommandError
from core import constants
from core import logger
from core.coreobj import PyCoreNode
@ -31,63 +32,72 @@ class PhysicalNode(PyCoreNode):
self.session.services.validatenodeservices(self)
def startup(self):
self.lock.acquire()
try:
with self.lock:
self.makenodedir()
# self.privatedir("/var/run")
# self.privatedir("/var/log")
except OSError:
logger.exception("startup error")
finally:
self.lock.release()
def shutdown(self):
if not self.up:
return
self.lock.acquire()
while self._mounts:
source, target = self._mounts.pop(-1)
self.umount(target)
for netif in self.netifs():
netif.shutdown()
self.rmnodedir()
self.lock.release()
with self.lock:
while self._mounts:
source, target = self._mounts.pop(-1)
self.umount(target)
for netif in self.netifs():
netif.shutdown()
self.rmnodedir()
def termcmdstring(self, sh="/bin/sh"):
"""
The broker will add the appropriate SSH command to open a terminal
on this physical node.
Create a terminal command string.
:param str sh: shell to execute command in
:return: str
"""
return sh
def cmd(self, args, wait=True):
"""
run a command on the physical node
"""
os.chdir(self.nodedir)
try:
if wait:
# os.spawnlp(os.P_WAIT, args)
subprocess.call(args)
else:
# os.spawnlp(os.P_NOWAIT, args)
subprocess.Popen(args)
except subprocess.CalledProcessError:
logger.exception("cmd exited with status: %s", str(args))
Runs shell command on node, with option to not wait for a result.
def cmdresult(self, args):
"""
run a command on the physical node and get the result
:param list[str]|str args: command to run
:param bool wait: wait for command to exit, defaults to True
:return: exit status for command
:rtype: int
"""
os.chdir(self.nodedir)
# in Python 2.7 we can use subprocess.check_output() here
tmp = subprocess.Popen(args, stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# err will always be None
result, err = tmp.communicate()
status = tmp.wait()
return status, result
status = utils.cmd(args, wait)
return status
def cmd_output(self, args):
"""
Runs shell command on node and get exit status and output.
:param list[str]|str args: command to run
:return: exit status and combined stdout and stderr
:rtype: tuple[int, str]
"""
os.chdir(self.nodedir)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
status = p.wait()
return status, stdout.strip()
def check_cmd(self, args):
"""
Runs shell command on node.
:param list[str]|str args: command to run
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when a non-zero exit status occurs
"""
status, output = self.cmd_output(args)
if status:
raise CoreCommandError(status, args, output)
return output.strip()
def shcmd(self, cmdstr, sh="/bin/sh"):
return self.cmd([sh, "-c", cmdstr])
@ -99,17 +109,14 @@ class PhysicalNode(PyCoreNode):
self._netif[ifindex].sethwaddr(addr)
ifname = self.ifname(ifindex)
if self.up:
(status, result) = self.cmdresult(
[constants.IP_BIN, "link", "set", "dev", ifname, "address", str(addr)])
if status:
logger.error("error setting MAC address %s", str(addr))
self.check_cmd([constants.IP_BIN, "link", "set", "dev", ifname, "address", str(addr)])
def addaddr(self, ifindex, addr):
"""
same as SimpleLxcNode.addaddr()
"""
if self.up:
self.cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)])
self.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)])
self._netif[ifindex].addaddr(addr)
@ -123,7 +130,7 @@ class PhysicalNode(PyCoreNode):
logger.exception("trying to delete unknown address: %s", addr)
if self.up:
self.cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
self.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)])
def adoptnetif(self, netif, ifindex, hwaddr, addrlist):
"""
@ -135,28 +142,31 @@ class PhysicalNode(PyCoreNode):
netif.name = "gt%d" % ifindex
netif.node = self
self.addnetif(netif, ifindex)
# use a more reasonable name, e.g. "gt0" instead of "gt.56286.150"
if self.up:
self.cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "down"])
self.cmd([constants.IP_BIN, "link", "set", netif.localname, "name", netif.name])
self.check_cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "down"])
self.check_cmd([constants.IP_BIN, "link", "set", netif.localname, "name", netif.name])
netif.localname = netif.name
if hwaddr:
self.sethwaddr(ifindex, hwaddr)
for addr in utils.maketuple(addrlist):
self.addaddr(ifindex, addr)
if self.up:
self.cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "up"])
def linkconfig(self, netif, bw=None, delay=None,
loss=None, duplicate=None, jitter=None, netif2=None):
for addr in utils.make_tuple(addrlist):
self.addaddr(ifindex, addr)
if self.up:
self.check_cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "up"])
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
"""
Apply tc queing disciplines using LxBrNet.linkconfig()
"""
# borrow the tc qdisc commands from LxBrNet.linkconfig()
linux_bridge = LxBrNet(session=self.session, start=False)
linux_bridge.up = True
linux_bridge.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate,
jitter=jitter, netif2=netif2)
linux_bridge.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate, jitter=jitter, netif2=netif2)
del linux_bridge
def newifindex(self):
@ -199,51 +209,43 @@ class PhysicalNode(PyCoreNode):
def privatedir(self, path):
if path[0] != "/":
raise ValueError, "path not fully qualified: " + path
raise ValueError("path not fully qualified: %s" % path)
hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip('/').replace('/', '.'))
try:
os.mkdir(hostpath)
except OSError:
logger.exception("error creating directory: %s", hostpath)
os.mkdir(hostpath)
self.mount(hostpath, path)
def mount(self, source, target):
source = os.path.abspath(source)
logger.info("mounting %s at %s" % (source, target))
try:
os.makedirs(target)
self.cmd([constants.MOUNT_BIN, "--bind", source, target])
self._mounts.append((source, target))
except OSError:
logger.exception("error making directories")
except:
logger.exception("mounting failed for %s at %s", source, target)
logger.info("mounting %s at %s", source, target)
os.makedirs(target)
self.check_cmd([constants.MOUNT_BIN, "--bind", source, target])
self._mounts.append((source, target))
def umount(self, target):
logger.info("unmounting '%s'" % target)
try:
self.cmd([constants.UMOUNT_BIN, "-l", target])
except:
self.check_cmd([constants.UMOUNT_BIN, "-l", target])
except CoreCommandError:
logger.exception("unmounting failed for %s", target)
def opennodefile(self, filename, mode="w"):
dirname, basename = os.path.split(filename)
if not basename:
raise ValueError("no basename for filename: " + filename)
if dirname and dirname[0] == "/":
dirname = dirname[1:]
dirname = dirname.replace("/", ".")
dirname = os.path.join(self.nodedir, dirname)
if not os.path.isdir(dirname):
os.makedirs(dirname, mode=0755)
hostfilename = os.path.join(dirname, basename)
return open(hostfilename, mode)
def nodefile(self, filename, contents, mode=0644):
f = self.opennodefile(filename, "w")
f.write(contents)
os.chmod(f.name, mode)
f.close()
logger.info("created nodefile: '%s'; mode: 0%o" % (f.name, mode))
with self.opennodefile(filename, "w") as node_file:
node_file.write(contents)
os.chmod(node_file.name, mode)
logger.info("created nodefile: '%s'; mode: 0%o", node_file.name, mode)

View file

@ -46,11 +46,15 @@ class Sdt(object):
DEFAULT_ALT = 2500
# TODO: read in user"s nodes.conf here; below are default node types from the GUI
DEFAULT_SPRITES = [
("router", "router.gif"), ("host", "host.gif"),
("PC", "pc.gif"), ("mdr", "mdr.gif"),
("prouter", "router_green.gif"), ("xen", "xen.gif"),
("hub", "hub.gif"), ("lanswitch", "lanswitch.gif"),
("wlan", "wlan.gif"), ("rj45", "rj45.gif"),
("router", "router.gif"),
("host", "host.gif"),
("PC", "pc.gif"),
("mdr", "mdr.gif"),
("prouter", "router_green.gif"),
("hub", "hub.gif"),
("lanswitch", "lanswitch.gif"),
("wlan", "wlan.gif"),
("rj45", "rj45.gif"),
("tunnel", "tunnel.gif"),
]
@ -404,8 +408,7 @@ class Sdt(object):
net = False
if nodetype == NodeTypes.DEFAULT.value or \
nodetype == NodeTypes.PHYSICAL.value or \
nodetype == NodeTypes.XEN.value:
nodetype == NodeTypes.PHYSICAL.value:
if model is None:
model = "router"
type = model

View file

@ -6,15 +6,10 @@ The CoreServices class handles configuration messages for sending
a list of available services to the GUI and for configuring individual
services.
"""
import importlib
import inspect
import os
import shlex
import sys
import time
from itertools import repeat
from core import CoreCommandError
from core import logger
from core.conf import Configurable
from core.conf import ConfigurableManager
@ -29,49 +24,6 @@ from core.enumerations import RegisterTlvs
from core.misc import utils
def _valid_module(path, file_name):
"""
Check if file is a valid python module.
:param str path: path to file
:param str file_name: file name to check
:return: True if a valid python module file, False otherwise
:rtype: bool
"""
file_path = os.path.join(path, file_name)
if not os.path.isfile(file_path):
return False
if file_name.startswith("_"):
return False
if not file_name.endswith(".py"):
return False
return True
def _is_service(module, member):
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:return: True if a valid service, False otherwise
:rtype: bool
"""
if not inspect.isclass(member):
return False
if not issubclass(member, CoreService):
return False
if member.__module__ != module.__name__:
return False
return True
class ServiceManager(object):
"""
Manages services available for CORE nodes to use.
@ -92,7 +44,7 @@ class ServiceManager(object):
insert = index + 1
break
logger.info("loading service: %s - %s: %s", insert, service, service._name)
logger.info("loading service: %s", service.__name__)
cls.services.insert(insert, service)
@classmethod
@ -118,32 +70,12 @@ class ServiceManager(object):
:return: list of core services
:rtype: list
"""
# validate path exists for importing
logger.info("getting custom services from: %s", path)
parent_path = os.path.dirname(path)
if parent_path not in sys.path:
logger.info("adding parent path to allow imports: %s", parent_path)
sys.path.append(parent_path)
# retrieve potential service modules, and filter out invalid modules
base_module = os.path.basename(path)
module_names = os.listdir(path)
module_names = filter(lambda x: _valid_module(path, x), module_names)
module_names = map(lambda x: x[:-3], module_names)
# import and add all service modules in the path
for module_name in module_names:
import_statement = "%s.%s" % (base_module, module_name)
logger.info("importing custom service module: %s", import_statement)
try:
module = importlib.import_module(import_statement)
members = inspect.getmembers(module, lambda x: _is_service(module, x))
for member in members:
clazz = member[1]
clazz.on_load()
cls.add(clazz)
except:
logger.exception("unexpected error during import, skipping: %s", import_statement)
services = utils.load_classes(path, CoreService)
for service in services:
if not service._name:
continue
service.on_load()
cls.add(service)
class CoreServices(ConfigurableManager):
@ -157,9 +89,7 @@ class CoreServices(ConfigurableManager):
name = "services"
config_type = RegisterTlvs.UTILITY.value
_invalid_custom_names = (
'core', 'addons', 'api', 'bsd', 'emane', 'misc', 'netns', 'phys', 'services', 'xen'
)
_invalid_custom_names = ("core", "api", "emane", "misc", "netns", "phys", "services")
def __init__(self, session):
"""
@ -175,33 +105,10 @@ class CoreServices(ConfigurableManager):
# dict of tuple of service objects, key is node number
self.customservices = {}
paths = self.session.get_config_item('custom_services_dir')
if paths:
for path in paths.split(','):
path = path.strip()
self.importcustom(path)
# TODO: remove need for cyclic import
from core.services import startup
self.is_startup_service = startup.Startup.is_startup_service
def importcustom(self, path):
"""
Import services from a myservices directory.
:param str path: path to import custom services from
:return: nothing
"""
if not path or len(path) == 0:
return
if not os.path.isdir(path):
logger.warn("invalid custom service directory specified" ": %s" % path)
return
ServiceManager.add_services(path)
def reset(self):
"""
Called when config message with reset flag is received
@ -293,21 +200,21 @@ class CoreServices(ConfigurableManager):
:return: nothing
"""
if services_str is not None:
logger.info("setting node specific services: %s", services_str)
logger.info("setting custom services for node(%s)", node.name)
services = services_str.split("|")
for name in services:
s = ServiceManager.get(name)
if s is None:
logger.warn("configured service %s for node %s is unknown", name, node.name)
continue
logger.info("adding configured service %s to node %s", s._name, node.name)
logger.info("adding service to node(%s): %s", node.name, s._name)
s = self.getcustomservice(node.objid, s)
node.addservice(s)
else:
logger.info("setting default services for node (%s) type (%s)", node.objid, nodetype)
logger.info("setting default services for node(%s) type (%s)", node.name, nodetype)
services = self.getdefaultservices(nodetype)
for s in services:
logger.info("adding default service %s to node %s", s._name, node.name)
logger.info("adding service to node(%s): %s", node.name, s._name)
s = self.getcustomservice(node.objid, s)
node.addservice(s)
@ -351,8 +258,8 @@ class CoreServices(ConfigurableManager):
"""
Start all services on a node.
:param core.netns.nodes.CoreNode node: node to start services on
:return:
:param core.netns.vnode.LxcNode node: node to start services on
:return: nothing
"""
services = sorted(node.services, key=lambda service: service._startindex)
use_startup_service = any(map(self.is_startup_service, services))
@ -373,7 +280,7 @@ class CoreServices(ConfigurableManager):
Start a service on a node. Create private dirs, generate config
files, and execute startup commands.
:param core.netns.nodes.CoreNode node: node to boot services on
:param core.netns.vnode.LxcNode node: node to boot services on
:param CoreService service: service to start
:param list services: service list
:param bool use_startup_service: flag to use startup services or not
@ -383,12 +290,9 @@ class CoreServices(ConfigurableManager):
self.bootnodecustomservice(node, service, services, use_startup_service)
return
logger.info("starting service %s (%s)" % (service._name, service._startindex))
logger.info("starting node(%s) service: %s (%s)", node.name, service._name, service._startindex)
for directory in service._dirs:
try:
node.privatedir(directory)
except:
logger.exception("Error making node %s dir %s", node.name, directory)
node.privatedir(directory)
for filename in service.getconfigfilenames(node.objid, services):
cfg = service.generateconfig(node, filename, services)
@ -397,31 +301,24 @@ class CoreServices(ConfigurableManager):
if use_startup_service and not self.is_startup_service(service):
return
for cmd in service.getstartup(node, services):
try:
# NOTE: this wait=False can be problematic!
node.cmd(shlex.split(cmd), wait=False)
except:
logger.exception("error starting command %s", cmd)
for args in service.getstartup(node, services):
# TODO: this wait=False can be problematic!
node.cmd(args, wait=False)
def bootnodecustomservice(self, node, service, services, use_startup_service):
"""
Start a custom service on a node. Create private dirs, use supplied
config files, and execute supplied startup commands.
:param core.netns.nodes.CoreNode node: node to boot services on
:param core.netns.vnode.LxcNode node: node to boot services on
:param CoreService service: service to start
:param list services: service list
:param bool use_startup_service: flag to use startup services or not
:return: nothing
"""
logger.info("starting service(%s) %s (%s)(custom)",
service, service._name, service._startindex)
logger.info("starting service(%s) %s (%s)(custom)", service, service._name, service._startindex)
for directory in service._dirs:
try:
node.privatedir(directory)
except:
logger.exception("error making node %s dir %s", node.name, directory)
node.privatedir(directory)
logger.info("service configurations: %s", service._configs)
for i, filename in enumerate(service._configs):
@ -443,12 +340,9 @@ class CoreServices(ConfigurableManager):
if use_startup_service and not self.is_startup_service(service):
return
for cmd in service._startup:
try:
# NOTE: this wait=False can be problematic!
node.cmd(shlex.split(cmd), wait=False)
except:
logger.exception("error starting command %s", cmd)
for args in service._startup:
# TODO: this wait=False can be problematic!
node.cmd(args, wait=False)
def copyservicefile(self, node, filename, cfg):
"""
@ -456,7 +350,7 @@ class CoreServices(ConfigurableManager):
config references an existing file that should be copied.
Returns True for local files, False for generated.
:param core.netns.nodes.CoreNode node: node to copy service for
:param core.netns.vnode.LxcNode node: node to copy service for
:param str filename: file name for a configured service
:param str cfg: configuration string
:return: True if successful, False otherwise
@ -465,7 +359,7 @@ class CoreServices(ConfigurableManager):
if cfg[:7] == 'file://':
src = cfg[7:]
src = src.split('\n')[0]
src = utils.expandcorepath(src, node.session, node)
src = utils.expand_corepath(src, node.session, node)
# TODO: glob here
node.nodefilecopy(filename, src, mode=0644)
return True
@ -475,7 +369,7 @@ class CoreServices(ConfigurableManager):
"""
Run validation commands for all services on a node.
:param core.netns.nodes.CoreNode node: node to validate services for
:param core.netns.vnode.LxcNode node: node to validate services for
:return: nothing
"""
services = sorted(node.services, key=lambda service: service._startindex)
@ -486,14 +380,13 @@ class CoreServices(ConfigurableManager):
"""
Run the validation command(s) for a service.
:param core.netns.nodes.CoreNode node: node to validate service for
:param core.netns.vnode.LxcNode node: node to validate service for
:param CoreService service: service to validate
:param list services: services for node
:return: service validation status
:rtype: int
"""
logger.info("validating service for node (%s - %s): %s (%s)",
node.objid, node.name, service._name, service._startindex)
logger.info("validating service for node (%s): %s (%s)", node.name, service._name, service._startindex)
if service._custom:
validate_cmds = service._validate
else:
@ -501,15 +394,13 @@ class CoreServices(ConfigurableManager):
status = 0
# has validate commands
if len(validate_cmds) > 0:
for cmd in validate_cmds:
logger.info("validating service %s using: %s", service._name, cmd)
if validate_cmds:
for args in validate_cmds:
logger.info("validating service %s using: %s", service._name, args)
try:
status, result = node.cmdresult(shlex.split(cmd))
if status != 0:
raise ValueError("non-zero exit status")
except:
logger.exception("validate command failed: %s", cmd)
node.check_cmd(args)
except CoreCommandError:
logger.exception("validate command failed")
status = -1
return status
@ -529,23 +420,20 @@ class CoreServices(ConfigurableManager):
"""
Stop a service on a node.
:param core.netns.nodes.CoreNode node: node to stop a service on
:param core.netns.vnode.LxcNode node: node to stop a service on
:param CoreService service: service to stop
:return: status for stopping the services
:rtype: str
"""
status = ""
if len(service._shutdown) == 0:
# doesn't have a shutdown command
status += "0"
else:
for cmd in service._shutdown:
status = "0"
if service._shutdown:
for args in service._shutdown:
try:
tmp = node.cmd(shlex.split(cmd), wait=True)
status += "%s" % tmp
except:
logger.exception("error running stop command %s", cmd)
status += "-1"
node.check_cmd(args)
except CoreCommandError:
logger.exception("error running stop command %s", args)
# TODO: determine if its ok to just return the bad exit status
status = "-1"
return status
def configure_request(self, config_data):
@ -563,7 +451,7 @@ class CoreServices(ConfigurableManager):
session_id = config_data.session
opaque = config_data.opaque
logger.info("configuration request: node(%s) session(%s) opaque(%s)", node_id, session_id, opaque)
logger.debug("configuration request: node(%s) session(%s) opaque(%s)", node_id, session_id, opaque)
# send back a list of available services
if opaque is None:
@ -656,7 +544,7 @@ class CoreServices(ConfigurableManager):
return None
key = values.pop(0)
self.defaultservices[key] = values
logger.info("default services for type %s set to %s" % (key, values))
logger.debug("default services for type %s set to %s", key, values)
else:
# store service customized config in self.customservices[]
if node_id is None:
@ -735,7 +623,7 @@ class CoreServices(ConfigurableManager):
The file data is either auto-generated or comes from an existing config.
:param list services: service list
:param core.netns.nodes.CoreNode node: node to get service file from
:param core.netns.vnode.LxcNode node: node to get service file from
:param str filename: file name to retrieve
:return: file message for node
"""
@ -752,7 +640,7 @@ class CoreServices(ConfigurableManager):
# get the file data
data = self.getservicefiledata(svc, filename)
if data is None:
data = "%s" % (svc.generateconfig(node, filename, services))
data = "%s" % svc.generateconfig(node, filename, services)
else:
data = "%s" % data
filetypestr = "service:%s" % svc._name
@ -851,15 +739,12 @@ class CoreServices(ConfigurableManager):
else:
cmds = s.getstartup(node, services)
if len(cmds) > 0:
for cmd in cmds:
for args in cmds:
try:
# node.cmd(shlex.split(cmd), wait = False)
status = node.cmd(shlex.split(cmd), wait=True)
if status != 0:
fail += "Start %s(%s)," % (s._name, cmd)
except:
logger.exception("error starting command %s", cmd)
fail += "Start %s," % s._name
node.check_cmd(args)
except CoreCommandError:
logger.exception("error starting command")
fail += "Start %s(%s)," % (s._name, args)
if event_type == EventTypes.PAUSE.value:
status = self.validatenodeservice(node, s, services)
if status != 0:
@ -877,11 +762,8 @@ class CoreServices(ConfigurableManager):
cfg = self.getservicefiledata(s, filename)
if cfg is None:
cfg = s.generateconfig(node, filename, services)
try:
node.nodefile(filename, cfg)
except:
logger.exception("error in configure file: %s", filename)
fail += "%s," % s._name
node.nodefile(filename, cfg)
fail_data = ""
if len(fail) > 0:
@ -975,7 +857,7 @@ class CoreService(object):
Return the configuration string to be written to a file or sent
to the GUI for customization.
:param core.netns.nodes.CoreNode node: node to generate config for
:param core.netns.vnode.LxcNode node: node to generate config for
:param str filename: file name to generate config for
:param list services: services for node
:return: nothing
@ -990,7 +872,7 @@ class CoreService(object):
overridden to provide node-specific commands that may be
based on other services.
:param core.netns.nodes.CoreNode node: node to get startup for
:param core.netns.vnode.LxcNode node: node to get startup for
:param list services: services for node
:return: startup commands
:rtype: tuple
@ -1005,7 +887,7 @@ class CoreService(object):
overriden to provide node-specific commands that may be
based on other services.
:param core.netns.nodes.CoreNode node: node to validate
:param core.netns.vnode.LxcNode node: node to validate
:param list services: services for node
:return: validation commands
:rtype: tuple
@ -1027,10 +909,8 @@ class CoreService(object):
cls._shutdown, cls._validate, cls._meta, cls._starttime]
if not cls._custom:
# this is always reached due to classmethod
valmap[valmap.index(cls._configs)] = \
cls.getconfigfilenames(node.objid, services)
valmap[valmap.index(cls._startup)] = \
cls.getstartup(node, services)
valmap[valmap.index(cls._configs)] = cls.getconfigfilenames(node.objid, services)
valmap[valmap.index(cls._startup)] = cls.getstartup(node, services)
vals = map(lambda a, b: "%s=%s" % (a, str(b)), cls.keys, valmap)
return "|".join(vals)
@ -1067,7 +947,7 @@ class CoreService(object):
elif key == "meta":
value = str(value)
else:
value = utils.maketuplefromstr(value, str)
value = utils.make_tuple_fromstr(value, str)
if key == "dirs":
self._dirs = value

View file

@ -90,7 +90,7 @@ class BirdService(CoreService):
common to Bird's routing daemons.
"""
_name = "BirdDaemon"
_name = None
_group = "BIRD"
_depends = ("bird",)
_dirs = ()

View file

@ -13,7 +13,7 @@ class NrlService(CoreService):
Parent class for NRL services. Defines properties and methods
common to NRL's routing daemons.
"""""
_name = "Protean"
_name = None
_group = "ProtoSvc"
_depends = ()
_dirs = ()
@ -57,7 +57,7 @@ class MgenSinkService(NrlService):
def generateconfig(cls, node, filename, services):
cfg = "0.0 LISTEN UDP 5000\n"
for ifc in node.netifs():
name = utils.sysctldevname(ifc.name)
name = utils.sysctl_devname(ifc.name)
cfg += "0.0 Join 224.225.1.2 INTERFACE %s\n" % name
return cfg
@ -126,7 +126,7 @@ class NrlSmf(NrlService):
servicenames = map(lambda x: x._name, services)
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
if len(netifs) == 0:
return ()
return ""
if "arouted" in servicenames:
comments += "# arouted service is enabled\n"
@ -298,8 +298,7 @@ class OlsrOrg(NrlService):
#######################################
### Linux specific OLSRd extensions ###
#######################################
# these parameters are only working on linux at the moment, but might become
# useful on BSD in the future
# these parameters are only working on linux at the moment
# SrcIpRoutes tells OLSRd to set the Src flag of host routes to the originator-ip
# of the node. In addition to this an additional localhost device is created
@ -516,7 +515,7 @@ LinkQualityFishEye 0
# - /lib, followed by /usr/lib
#
# the examples in this list are for linux, so check if the plugin is
# available if you use windows/BSD.
# available if you use windows.
# each plugin should have a README file in it's lib subfolder
# LoadPlugin "olsrd_txtinfo.dll"
@ -607,7 +606,7 @@ class MgenActor(NrlService):
servicenames = map(lambda x: x._name, services)
netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs())
if len(netifs) == 0:
return ()
return ""
cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n"
return cfg

View file

@ -222,7 +222,7 @@ class QuaggaService(CoreService):
Parent class for Quagga services. Defines properties and methods
common to Quagga's routing daemons.
"""
_name = "QuaggaDaemon"
_name = None
_group = "Quagga"
_depends = ("zebra",)
_dirs = ()

View file

@ -11,7 +11,7 @@ class SdnService(CoreService):
"""
Parent class for SDN services.
"""
_name = "SdnProcess"
_name = None
_group = "SDN"
_depends = ()
_dirs = ()

View file

@ -3,8 +3,8 @@ utility.py: defines miscellaneous utility services.
"""
import os
import subprocess
from core import CoreCommandError
from core import constants
from core.misc import utils
from core.misc.ipaddress import Ipv4Prefix
@ -16,7 +16,7 @@ class UtilService(CoreService):
"""
Parent class for utility services.
"""
_name = "UtilityProcess"
_name = None
_group = "Utility"
_depends = ()
_dirs = ()
@ -40,10 +40,8 @@ class IPForwardService(UtilService):
def generateconfig(cls, node, filename, services):
if os.uname()[0] == "Linux":
return cls.generateconfiglinux(node, filename, services)
elif os.uname()[0] == "FreeBSD":
return cls.generateconfigbsd(node, filename, services)
else:
raise Exception, "unknown platform"
raise Exception("unknown platform")
@classmethod
def generateconfiglinux(cls, node, filename, services):
@ -60,24 +58,13 @@ class IPForwardService(UtilService):
%(sysctl)s -w net.ipv4.conf.default.rp_filter=0
""" % {'sysctl': constants.SYSCTL_BIN}
for ifc in node.netifs():
name = utils.sysctldevname(ifc.name)
name = utils.sysctl_devname(ifc.name)
cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (constants.SYSCTL_BIN, name)
cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \
(constants.SYSCTL_BIN, name)
cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (constants.SYSCTL_BIN, name)
return cfg
@classmethod
def generateconfigbsd(cls, node, filename, services):
return """\
#!/bin/sh
# auto-generated by IPForward service (utility.py)
%s -w net.inet.ip.forwarding=1
%s -w net.inet6.ip6.forwarding=1
%s -w net.inet.icmp.bmcastecho=1
%s -w net.inet.icmp.icmplim=0
""" % (constants.SYSCTL_BIN, constants.SYSCTL_BIN, constants.SYSCTL_BIN, constants.SYSCTL_BIN)
class DefaultRouteService(UtilService):
_name = "DefaultRoute"
@ -108,10 +95,8 @@ class DefaultRouteService(UtilService):
else:
if os.uname()[0] == "Linux":
rtcmd = "ip route add default via"
elif os.uname()[0] == "FreeBSD":
rtcmd = "route add -%s" % fam
else:
raise Exception, "unknown platform"
raise Exception("unknown platform")
return "%s %s" % (rtcmd, net.min_addr())
@ -132,10 +117,8 @@ class DefaultMulticastRouteService(UtilService):
continue
if os.uname()[0] == "Linux":
rtcmd = "ip route add 224.0.0.0/4 dev"
elif os.uname()[0] == "FreeBSD":
rtcmd = "route add 224.0.0.0/4 -iface"
else:
raise Exception, "unknown platform"
raise Exception("unknown platform")
cfg += "%s %s\n" % (rtcmd, ifc.name)
cfg += "\n"
break
@ -176,21 +159,15 @@ class StaticRouteService(UtilService):
else:
if os.uname()[0] == "Linux":
rtcmd = "#/sbin/ip route add %s via" % dst
elif os.uname()[0] == "FreeBSD":
rtcmd = "#/sbin/route add -%s %s" % (fam, dst)
else:
raise Exception, "unknown platform"
raise Exception("unknown platform")
return "%s %s" % (rtcmd, net.min_addr())
class SshService(UtilService):
_name = "SSH"
if os.uname()[0] == "FreeBSD":
_configs = ("startsshd.sh", "sshd_config",)
_dirs = ()
else:
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
_dirs = ("/etc/ssh", "/var/run/sshd",)
_configs = ("startsshd.sh", "/etc/ssh/sshd_config",)
_dirs = ("/etc/ssh", "/var/run/sshd",)
_startup = ("sh startsshd.sh",)
_shutdown = ("killall sshd",)
_validate = ()
@ -201,14 +178,9 @@ class SshService(UtilService):
Use a startup script for launching sshd in order to wait for host
key generation.
"""
if os.uname()[0] == "FreeBSD":
sshcfgdir = node.nodedir
sshstatedir = node.nodedir
sshlibdir = "/usr/libexec"
else:
sshcfgdir = cls._dirs[0]
sshstatedir = cls._dirs[1]
sshlibdir = "/usr/lib/openssh"
sshcfgdir = cls._dirs[0]
sshstatedir = cls._dirs[1]
sshlibdir = "/usr/lib/openssh"
if filename == "startsshd.sh":
return """\
#!/bin/sh
@ -418,8 +390,8 @@ class HttpService(UtilService):
Detect the apache2 version using the 'a2query' command.
"""
try:
status, result = utils.cmdresult(['a2query', '-v'])
except subprocess.CalledProcessError:
status, result = utils.cmd_output(['a2query', '-v'])
except CoreCommandError:
status = -1
if status == 0 and result[:3] == '2.4':

View file

@ -74,7 +74,7 @@ class XorpService(CoreService):
Parent class for XORP services. Defines properties and methods
common to XORP's routing daemons.
"""
_name = "XorpDaemon"
_name = None
_group = "XORP"
_depends = ("xorp_rtrmgr",)
_dirs = ()

View file

@ -3,11 +3,9 @@ session.py: defines the Session class used by the core-daemon daemon program
that manages a CORE session.
"""
import atexit
import os
import pprint
import random
import shlex
import shutil
import subprocess
import tempfile
@ -45,72 +43,20 @@ from core.mobility import Ns2ScriptedMobility
from core.netns import nodes
from core.sdt import Sdt
from core.service import CoreServices
from core.xen.xenconfig import XenConfigManager
from core.xml.xmlsession import save_session_xml
class SessionManager(object):
"""
Manages currently known sessions.
"""
sessions = set()
session_lock = threading.Lock()
@classmethod
def add(cls, session):
"""
Add a session to the manager.
:param Session session: session to add
:return: nothing
"""
with cls.session_lock:
logger.info("adding session to manager: %s", session.session_id)
cls.sessions.add(session)
@classmethod
def remove(cls, session):
"""
Remove session from the manager.
:param Session session: session to remove
:return: nothing
"""
with cls.session_lock:
logger.info("removing session from manager: %s", session.session_id)
if session in cls.sessions:
cls.sessions.remove(session)
else:
logger.info("session was already removed: %s", session.session_id)
@classmethod
def on_exit(cls):
"""
Method used to shutdown all currently known sessions, in case of unexpected exit.
:return: nothing
"""
logger.info("caught program exit, shutting down all known sessions")
while cls.sessions:
with cls.session_lock:
session = cls.sessions.pop()
logger.error("WARNING: automatically shutting down non-persistent session %s - %s",
session.session_id, session.name)
session.shutdown()
class Session(object):
"""
CORE session manager.
"""
def __init__(self, session_id, config=None, persistent=False, mkdir=True):
def __init__(self, session_id, config=None, mkdir=True):
"""
Create a Session instance.
:param int session_id: session id
:param dict config: session configuration
:param bool persistent: flag is session is considered persistent
:param bool mkdir: flag to determine if a directory should be made
"""
self.session_id = session_id
@ -129,7 +75,6 @@ class Session(object):
self.file_name = None
self.thumbnail = None
self.user = None
self._state_time = time.time()
self.event_loop = EventLoop()
# dict of objects: all nodes and nets
@ -142,6 +87,7 @@ class Session(object):
# TODO: should the default state be definition?
self.state = EventTypes.NONE.value
self._state_time = time.time()
self._state_file = os.path.join(self.session_dir, "state")
self._hooks = {}
@ -149,9 +95,6 @@ class Session(object):
self.add_state_hook(state=EventTypes.RUNTIME_STATE.value, hook=self.runtime_state_hook)
if not persistent:
SessionManager.add(self)
self.master = False
# handlers for broadcasting information
@ -186,10 +129,6 @@ class Session(object):
self.emane = EmaneManager(session=self)
self.add_config_object(EmaneManager.name, EmaneManager.config_type, self.emane.configure)
# setup xen
self.xen = XenConfigManager(session=self)
self.add_config_object(XenConfigManager.name, XenConfigManager.config_type, self.xen.configure)
# setup sdt
self.sdt = Sdt(session=self)
@ -228,9 +167,6 @@ class Session(object):
for handler in self.shutdown_handlers:
handler(self)
# remove this session from the manager
SessionManager.remove(self)
def broadcast_event(self, event_data):
"""
Handle event data that should be provided to event handler.
@ -301,27 +237,27 @@ class Session(object):
"""
Set the session's current state.
:param int state: state to set to
:param core.enumerations.EventTypes state: state to set to
:param send_event: if true, generate core API event messages
:return: nothing
"""
state_name = coreapi.state_name(state)
state_value = state.value
state_name = state.name
if self.state == state:
logger.info("session is already in state: %s, skipping change", state_name)
if self.state == state_value:
logger.info("session(%s) is already in state: %s, skipping change", self.session_id, state_name)
return
self.state = state
self.state = state_value
self._state_time = time.time()
logger.info("changing session %s to state %s(%s) at %s",
self.session_id, state, state_name, self._state_time)
logger.info("changing session(%s) to state %s", self.session_id, state_name)
self.write_state(state)
self.run_hooks(state)
self.run_state_hooks(state)
self.write_state(state_value)
self.run_hooks(state_value)
self.run_state_hooks(state_value)
if send_event:
event_data = EventData(event_type=state, time="%s" % time.time())
event_data = EventData(event_type=state_value, time="%s" % time.time())
self.broadcast_event(event_data)
def write_state(self, state):
@ -424,11 +360,11 @@ class Session(object):
# execute hook file
try:
subprocess.check_call(["/bin/sh", file_name], stdin=open(os.devnull, 'r'),
stdout=stdout, stderr=stderr, close_fds=True,
cwd=self.session_dir, env=self.get_environment())
except subprocess.CalledProcessError:
logger.exception("error running hook '%s'", file_name)
args = ["/bin/sh", file_name]
subprocess.check_call(args, stdout=stdout, stderr=stderr,
close_fds=True, cwd=self.session_dir, env=self.get_environment())
except (OSError, subprocess.CalledProcessError):
logger.exception("error running hook: %s", file_name)
def run_state_hooks(self, state):
"""
@ -515,17 +451,17 @@ class Session(object):
environment_config_file = os.path.join(constants.CORE_CONF_DIR, "environment")
try:
if os.path.isfile(environment_config_file):
utils.readfileintodict(environment_config_file, env)
utils.load_config(environment_config_file, env)
except IOError:
logger.exception("error reading environment configuration file: %s", environment_config_file)
logger.warn("environment configuration file does not exist: %s", environment_config_file)
# attempt to read and add user environment file
if self.user:
environment_user_file = os.path.join("/home", self.user, ".core", "environment")
try:
utils.readfileintodict(environment_user_file, env)
utils.load_config(environment_user_file, env)
except IOError:
logger.exception("error reading user core environment settings file: %s", environment_user_file)
logger.debug("user core environment settings file not present: %s", environment_user_file)
return env
@ -604,6 +540,7 @@ class Session(object):
:param int object_id: object id to retrieve
:return: object for the given id
:rtype: core.coreobj.PyCoreNode
"""
if object_id not in self.objects:
raise KeyError("unknown object id %s" % object_id)
@ -672,7 +609,7 @@ class Session(object):
:return: nothing
"""
register_tlv = RegisterTlvs(object_type)
logger.info("adding config object callback: %s - %s", name, register_tlv)
logger.debug("adding config object callback: %s - %s", name, register_tlv)
with self._config_objects_lock:
self.config_objects[name] = (object_type, callback)
@ -686,8 +623,9 @@ class Session(object):
:rtype: list
"""
name = config_data.object
logger.info("session(%s): handling config message(%s): \n%s",
self.session_id, name, config_data)
logger.info("session(%s) setting config(%s)", self.session_id, name)
for key, value in config_data.__dict__.iteritems():
logger.debug("%s = %s", key, value)
replies = []
@ -857,7 +795,8 @@ class Session(object):
# this is called from instantiate() after receiving an event message
# for the instantiation state, and from the broker when distributed
# nodes have been started
logger.info("checking runtime: %s", self.state)
logger.info("session(%s) checking if not in runtime state, current state: %s", self.session_id,
coreapi.state_name(self.state))
if self.state == EventTypes.RUNTIME_STATE.value:
logger.info("valid runtime state found, returning")
return
@ -868,7 +807,7 @@ class Session(object):
# start event loop and set to runtime
self.event_loop.run()
self.set_state(EventTypes.RUNTIME_STATE.value, send_event=True)
self.set_state(EventTypes.RUNTIME_STATE, send_event=True)
def data_collect(self):
"""
@ -903,12 +842,12 @@ class Session(object):
and links remain.
"""
node_count = self.get_node_count()
logger.info("checking shutdown for session %d: %d nodes remaining", self.session_id, node_count)
logger.info("session(%s) checking shutdown: %s nodes remaining", self.session_id, node_count)
shutdown = False
if node_count == 0:
shutdown = True
self.set_state(state=EventTypes.SHUTDOWN_STATE.value)
self.set_state(EventTypes.SHUTDOWN_STATE)
return shutdown
@ -928,10 +867,10 @@ class Session(object):
"""
with self._objects_lock:
for obj in self.objects.itervalues():
# TODO: PyCoreNode is not the type to check, but there are two types, due to bsd and netns
# TODO: PyCoreNode is not the type to check
if isinstance(obj, nodes.PyCoreNode) and not nodeutils.is_node(obj, NodeTypes.RJ45):
# add a control interface if configured
logger.info("booting node: %s - %s", obj.objid, obj.name)
logger.info("booting node: %s", obj.name)
self.add_remove_control_interface(node=obj, remove=False)
obj.boot()
@ -1022,6 +961,7 @@ class Session(object):
:return: control net object
:rtype: core.netns.nodes.CtrlNet
"""
logger.debug("add/remove control net: index(%s) remove(%s) conf_required(%s)", net_index, remove, conf_required)
prefix_spec_list = self.get_control_net_prefixes()
prefix_spec = prefix_spec_list[net_index]
if not prefix_spec:
@ -1031,6 +971,7 @@ class Session(object):
else:
control_net_class = nodeutils.get_node_class(NodeTypes.CONTROL_NET)
prefix_spec = control_net_class.DEFAULT_PREFIX_LIST[net_index]
logger.debug("prefix spec: %s", prefix_spec)
server_interface = self.get_control_net_server_interfaces()[net_index]
@ -1183,7 +1124,7 @@ class Session(object):
header = "CORE session %s host entries" % self.session_id
if remove:
logger.info("Removing /etc/hosts file entries.")
utils.filedemunge("/etc/hosts", header)
utils.file_demunge("/etc/hosts", header)
return
entries = []
@ -1194,7 +1135,7 @@ class Session(object):
logger.info("Adding %d /etc/hosts file entries." % len(entries))
utils.filemunge("/etc/hosts", header, "\n".join(entries) + "\n")
utils.file_munge("/etc/hosts", header, "\n".join(entries) + "\n")
def runtime(self):
"""
@ -1232,13 +1173,14 @@ class Session(object):
name = ""
logger.info("scheduled event %s at time %s data=%s", name, event_time + current_time, data)
# TODO: if data is None, this blows up, but this ties into how event functions are ran, need to clean that up
def run_event(self, node_id=None, name=None, data=None):
"""
Run a scheduled event, executing commands in the data string.
:param int node_id: node id to run event
:param str name: event name
:param data: event data
:param str data: event data
:return: nothing
"""
now = self.runtime()
@ -1246,12 +1188,11 @@ class Session(object):
name = ""
logger.info("running event %s at time %s cmd=%s" % (name, now, data))
commands = shlex.split(data)
if not node_id:
utils.mutedetach(commands)
utils.mute_detach(data)
else:
node = self.get_object(node_id)
node.cmd(commands, wait=False)
node.cmd(data, wait=False)
def send_objects(self):
"""
@ -1511,7 +1452,3 @@ class SessionMetaData(ConfigurableManager):
:return: configuration items iterator
"""
return self.configs.iteritems()
# configure the program exit function to run
atexit.register(SessionManager.on_exit)

View file

@ -1,788 +0,0 @@
"""
xen.py: implementation of the XenNode and XenVEth classes that support
generating Xen domUs based on an ISO image and persistent configuration area
"""
import base64
import os
import shutil
import string
import subprocess
import sys
import threading
import crypt
from core import constants
from core import logger
from core.coreobj import PyCoreNetIf
from core.coreobj import PyCoreNode
from core.enumerations import NodeTypes
from core.misc import nodeutils
from core.misc import utils
from core.netns.vnode import LxcNode
try:
import parted
except ImportError:
logger.error("failed to import parted for xen nodes")
try:
import fsimage
except ImportError:
# fix for fsimage under Ubuntu
sys.path.append("/usr/lib/xen-default/lib/python")
try:
import fsimage
except ImportError:
logger.error("failed to import fsimage for xen nodes")
# XXX move these out to config file
AWK_PATH = "/bin/awk"
KPARTX_PATH = "/sbin/kpartx"
LVCREATE_PATH = "/sbin/lvcreate"
LVREMOVE_PATH = "/sbin/lvremove"
LVCHANGE_PATH = "/sbin/lvchange"
MKFSEXT4_PATH = "/sbin/mkfs.ext4"
MKSWAP_PATH = "/sbin/mkswap"
TAR_PATH = "/bin/tar"
SED_PATH = "/bin/sed"
XM_PATH = "/usr/sbin/xm"
UDEVADM_PATH = "/sbin/udevadm"
class XenVEth(PyCoreNetIf):
def __init__(self, node, name, localname, mtu=1500, net=None, start=True, hwaddr=None):
# note that net arg is ignored
PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu)
self.localname = localname
self.up = False
self.hwaddr = hwaddr
if start:
self.startup()
def startup(self):
cmd = [XM_PATH, "network-attach", self.node.vmname, "vifname=%s" % self.localname, "script=vif-core"]
if self.hwaddr is not None:
cmd.append("mac=%s" % self.hwaddr)
subprocess.check_call(cmd)
subprocess.check_call([constants.IP_BIN, "link", "set", self.localname, "up"])
self.up = True
def shutdown(self):
if not self.up:
return
if self.localname:
if self.hwaddr is not None:
pass
# this should be doable, but some argument isn"t a string
# check_call([XM_PATH, "network-detach", self.node.vmname,
# self.hwaddr])
self.up = False
class XenNode(PyCoreNode):
apitype = NodeTypes.XEN.value
files_to_ignore = frozenset([
# "ipforward.sh",
"quaggaboot.sh",
])
files_redirection = {
"ipforward.sh": "/core-tmp/ipforward.sh",
}
cmds_to_ignore = frozenset([
# "sh ipforward.sh",
# "sh quaggaboot.sh zebra",
# "sh quaggaboot.sh ospfd",
# "sh quaggaboot.sh ospf6d",
"killall zebra",
"killall ospfd",
"killall ospf6d",
"pidof zebra", "pidof ospfd", "pidof ospf6d",
])
def redir_cmd_ipforward(self):
sysctl_file = open(os.path.join(self.mountdir, self.etcdir, "sysctl.conf"), "a")
p1 = subprocess.Popen([AWK_PATH, "/^\/sbin\/sysctl -w/ {print $NF}",
os.path.join(self.nodedir, "core-tmp/ipforward.sh")], stdout=sysctl_file)
p1.wait()
sysctl_file.close()
def redir_cmd_zebra(self):
subprocess.check_call([SED_PATH, "-i", "-e", "s/^zebra=no/zebra=yes/",
os.path.join(self.mountdir, self.etcdir, "quagga/daemons")])
def redir_cmd_ospfd(self):
subprocess.check_call([SED_PATH, "-i", "-e", "s/^ospfd=no/ospfd=yes/",
os.path.join(self.mountdir, self.etcdir, "quagga/daemons")])
def redir_cmd_ospf6d(self):
subprocess.check_call([SED_PATH, "-i", "-e", "s/^ospf6d=no/ospf6d=yes/",
os.path.join(self.mountdir, self.etcdir, "quagga/daemons")])
cmds_redirection = {
"sh ipforward.sh": redir_cmd_ipforward,
"sh quaggaboot.sh zebra": redir_cmd_zebra,
"sh quaggaboot.sh ospfd": redir_cmd_ospfd,
"sh quaggaboot.sh ospf6d": redir_cmd_ospf6d,
}
# CoreNode: no __init__, take from LxcNode & SimpleLxcNode
def __init__(self, session, objid=None, name=None,
nodedir=None, bootsh="boot.sh", start=True, model=None,
vgname=None, ramsize=None, disksize=None,
isofile=None):
# SimpleLxcNode initialization
PyCoreNode.__init__(self, session=session, objid=objid, name=name)
self.nodedir = nodedir
self.model = model
# indicates startup() has been invoked and disk has been initialized
self.up = False
# indicates boot() has been invoked and domU is running
self.booted = False
self.ifindex = 0
self.lock = threading.RLock()
self._netif = {}
# domU name
self.vmname = "c" + str(session.session_id) + "-" + name
# LVM volume group name
self.vgname = self.getconfigitem("vg_name", vgname)
# LVM logical volume name
self.lvname = self.vmname + "-"
# LVM logical volume device path name
self.lvpath = os.path.join("/dev", self.vgname, self.lvname)
self.disksize = self.getconfigitem("disk_size", disksize)
self.ramsize = int(self.getconfigitem("ram_size", ramsize))
self.isofile = self.getconfigitem("iso_file", isofile)
# temporary mount point for paused VM persistent filesystem
self.mountdir = None
self.etcdir = self.getconfigitem("etc_path")
# TODO: remove this temporary hack
self.files_redirection["/usr/local/etc/quagga/Quagga.conf"] = os.path.join(
self.getconfigitem("mount_path"), self.etcdir, "quagga/Quagga.conf")
# LxcNode initialization
# self.makenodedir()
if self.nodedir is None:
self.nodedir = os.path.join(session.session_dir, self.name + ".conf")
self.mountdir = self.nodedir + self.getconfigitem("mount_path")
if not os.path.isdir(self.mountdir):
os.makedirs(self.mountdir)
self.tmpnodedir = True
else:
raise Exception("Xen PVM node requires a temporary nodedir")
self.bootsh = bootsh
if start:
self.startup()
def getconfigitem(self, name, default=None):
"""
Configuration items come from the xen.conf file and/or input from
the GUI, and are stored in the session using the XenConfigManager
object. self.model is used to identify particular profiles
associated with a node type in the GUI.
"""
return self.session.xen.getconfigitem(name=name, model=self.model, node=self, value=default)
# from class LxcNode (also SimpleLxcNode)
def startup(self):
logger.warn("XEN PVM startup() called: preparing disk for %s", self.name)
self.lock.acquire()
try:
if self.up:
raise Exception("already up")
self.createlogicalvolume()
self.createpartitions()
persistdev = self.createfilesystems()
subprocess.check_call([constants.MOUNT_BIN, "-t", "ext4", persistdev, self.mountdir])
self.untarpersistent(tarname=self.getconfigitem("persist_tar_iso"),
iso=True)
self.setrootpassword(pw=self.getconfigitem("root_password"))
self.sethostname(old="UBASE", new=self.name)
self.setupssh(keypath=self.getconfigitem("ssh_key_path"))
self.createvm()
self.up = True
finally:
self.lock.release()
# from class LxcNode (also SimpleLxcNode)
def boot(self):
logger.warn("XEN PVM boot() called")
self.lock.acquire()
if not self.up:
raise Exception("Can't boot VM without initialized disk")
if self.booted:
self.lock.release()
return
self.session.services.bootnodeservices(self)
tarname = self.getconfigitem("persist_tar")
if tarname:
self.untarpersistent(tarname=tarname, iso=False)
try:
subprocess.check_call([constants.UMOUNT_BIN, self.mountdir])
self.unmount_all(self.mountdir)
subprocess.check_call([UDEVADM_PATH, "settle"])
subprocess.check_call([KPARTX_PATH, "-d", self.lvpath])
# time.sleep(5)
# time.sleep(1)
# unpause VM
logger.warn("XEN PVM boot() unpause domU %s", self.vmname)
utils.mutecheck_call([XM_PATH, "unpause", self.vmname])
self.booted = True
finally:
self.lock.release()
def validate(self):
self.session.services.validatenodeservices(self)
# from class LxcNode (also SimpleLxcNode)
def shutdown(self):
logger.warn("XEN PVM shutdown() called")
if not self.up:
return
self.lock.acquire()
try:
if self.up:
# sketch from SimpleLxcNode
for netif in self.netifs():
netif.shutdown()
try:
# RJE XXX what to do here
if self.booted:
utils.mutecheck_call([XM_PATH, "destroy", self.vmname])
self.booted = False
except (OSError, subprocess.CalledProcessError):
# ignore this error too, the VM may have exited already
logger.exception("error during shutdown")
# discard LVM volume
lvm_remove_count = 0
while os.path.exists(self.lvpath):
try:
subprocess.check_call([UDEVADM_PATH, "settle"])
utils.mutecall([LVCHANGE_PATH, "-an", self.lvpath])
lvm_remove_count += 1
utils.mutecall([LVREMOVE_PATH, "-f", self.lvpath])
except OSError:
logger.exception("error during shutdown")
if lvm_remove_count > 1:
logger.warn("XEN PVM shutdown() required %d lvremove executions.", lvm_remove_count)
self._netif.clear()
del self.session
self.up = False
finally:
self.rmnodedir()
self.lock.release()
def createlogicalvolume(self):
"""
Create a logical volume for this Xen domU. Called from startup().
"""
if os.path.exists(self.lvpath):
raise Exception, "LVM volume already exists"
utils.mutecheck_call([LVCREATE_PATH, "--size", self.disksize,
"--name", self.lvname, self.vgname])
def createpartitions(self):
"""
Partition the LVM volume into persistent and swap partitions
using the parted module.
"""
dev = parted.Device(path=self.lvpath)
dev.removeFromCache()
disk = parted.freshDisk(dev, "msdos")
constraint = parted.Constraint(device=dev)
persist_size = int(0.75 * constraint.maxSize)
self.createpartition(device=dev, disk=disk, start=1,
end=persist_size - 1, type="ext4")
self.createpartition(device=dev, disk=disk, start=persist_size,
end=constraint.maxSize - 1, type="linux-swap(v1)")
disk.commit()
def createpartition(self, device, disk, start, end, type):
"""
Create a single partition of the specified type and size and add
it to the disk object, using the parted module.
"""
geo = parted.Geometry(device=device, start=start, end=end)
fs = parted.FileSystem(type=type, geometry=geo)
part = parted.Partition(disk=disk, fs=fs, type=parted.PARTITION_NORMAL, geometry=geo)
constraint = parted.Constraint(exactGeom=geo)
disk.addPartition(partition=part, constraint=constraint)
def createfilesystems(self):
"""
Make an ext4 filesystem and swap space. Return the device name for
the persistent partition so we can mount it.
"""
output = subprocess.Popen([KPARTX_PATH, "-l", self.lvpath],
stdout=subprocess.PIPE).communicate()[0]
lines = output.splitlines()
persistdev = "/dev/mapper/" + lines[0].strip().split(" ")[0].strip()
swapdev = "/dev/mapper/" + lines[1].strip().split(" ")[0].strip()
subprocess.check_call([KPARTX_PATH, "-a", self.lvpath])
utils.mutecheck_call([MKFSEXT4_PATH, "-L", "persist", persistdev])
utils.mutecheck_call([MKSWAP_PATH, "-f", "-L", "swap", swapdev])
return persistdev
def untarpersistent(self, tarname, iso):
"""
Unpack a persistent template tar file to the mounted mount dir.
Uses fsimage library to read from an ISO file.
"""
# filename may use hostname
tarname = tarname.replace("%h", self.name)
if iso:
try:
fs = fsimage.open(self.isofile, 0)
except IOError:
logger.exception("Failed to open ISO file: %s", self.isofile)
return
try:
tardata = fs.open_file(tarname).read()
except IOError:
logger.exception("Failed to open tar file: %s", tarname)
return
finally:
del fs
else:
try:
f = open(tarname)
tardata = f.read()
f.close()
except IOError:
logger.exception("Failed to open tar file: %s", tarname)
return
p = subprocess.Popen([TAR_PATH, "-C", self.mountdir, "--numeric-owner",
"-xf", "-"], stdin=subprocess.PIPE)
p.communicate(input=tardata)
p.wait()
def setrootpassword(self, pw):
"""
Set the root password by updating the shadow password file that
is on the filesystem mounted in the temporary area.
"""
saltedpw = crypt.crypt(pw, "$6$" + base64.b64encode(os.urandom(12)))
subprocess.check_call([SED_PATH, "-i", "-e",
"/^root:/s_^root:\([^:]*\):_root:" + saltedpw + ":_",
os.path.join(self.mountdir, self.etcdir, "shadow")])
def sethostname(self, old, new):
"""
Set the hostname by updating the hostname and hosts files that
reside on the filesystem mounted in the temporary area.
"""
subprocess.check_call([SED_PATH, "-i", "-e", "s/%s/%s/" % (old, new),
os.path.join(self.mountdir, self.etcdir, "hostname")])
subprocess.check_call([SED_PATH, "-i", "-e", "s/%s/%s/" % (old, new),
os.path.join(self.mountdir, self.etcdir, "hosts")])
def setupssh(self, keypath):
"""
Configure SSH access by installing host keys and a system-wide
authorized_keys file.
"""
sshdcfg = os.path.join(self.mountdir, self.etcdir, "ssh/sshd_config")
subprocess.check_call([SED_PATH, "-i", "-e", "s/PermitRootLogin no/PermitRootLogin yes/", sshdcfg])
sshdir = os.path.join(self.getconfigitem("mount_path"), self.etcdir, "ssh")
# backslash slashes for use in sed
sshdir = sshdir.replace("/", "\\/")
subprocess.check_call([SED_PATH, "-i", "-e",
"s/#AuthorizedKeysFile %h\/.ssh\/authorized_keys/" + \
"AuthorizedKeysFile " + sshdir + "\/authorized_keys/",
sshdcfg])
for f in "ssh_host_rsa_key", "ssh_host_rsa_key.pub", "authorized_keys":
src = os.path.join(keypath, f)
dst = os.path.join(self.mountdir, self.etcdir, "ssh", f)
shutil.copy(src, dst)
if f[-3:] != "pub":
os.chmod(dst, 0600)
def createvm(self):
"""
Instantiate a *paused* domU VM
Instantiate it now, so we can add network interfaces,
pause it so we can have the filesystem open for configuration.
"""
args = [XM_PATH, "create", os.devnull, "--paused"]
args.extend(["name=" + self.vmname, "memory=" + str(self.ramsize)])
args.append("disk=tap:aio:" + self.isofile + ",hda,r")
args.append("disk=phy:" + self.lvpath + ",hdb,w")
args.append("bootloader=pygrub")
bootargs = "--kernel=/isolinux/vmlinuz --ramdisk=/isolinux/initrd"
args.append("bootargs=" + bootargs)
for action in ("poweroff", "reboot", "suspend", "crash", "halt"):
args.append("on_%s=destroy" % action)
args.append("extra=" + self.getconfigitem("xm_create_extra"))
utils.mutecheck_call(args)
# from class LxcNode
def privatedir(self, path):
# self.warn("XEN PVM privatedir() called")
# Do nothing, Xen PVM nodes are fully private
pass
# from class LxcNode
def opennodefile(self, filename, mode="w"):
logger.warn("XEN PVM opennodefile() called")
raise Exception("Can't open VM file with opennodefile()")
# from class LxcNode
# open a file on a paused Xen node
def openpausednodefile(self, filename, mode="w"):
dirname, basename = os.path.split(filename)
if not basename:
raise ValueError("no basename for filename: %s" % filename)
if dirname and dirname[0] == "/":
dirname = dirname[1:]
# dirname = dirname.replace("/", ".")
dirname = os.path.join(self.nodedir, dirname)
if not os.path.isdir(dirname):
os.makedirs(dirname, mode=0755)
hostfilename = os.path.join(dirname, basename)
return open(hostfilename, mode)
# from class LxcNode
def nodefile(self, filename, contents, mode=0644):
if filename in self.files_to_ignore:
# self.warn("XEN PVM nodefile(filename=%s) ignored" % [filename])
return
if filename in self.files_redirection:
redirection_filename = self.files_redirection[filename]
logger.warn("XEN PVM nodefile(filename=%s) redirected to %s", filename, redirection_filename)
filename = redirection_filename
logger.warn("XEN PVM nodefile(filename=%s) called", filename)
self.lock.acquire()
if not self.up:
self.lock.release()
raise Exception("Can't access VM file as VM disk isn't ready")
if self.booted:
self.lock.release()
raise Exception("Can't access VM file as VM is already running")
try:
f = self.openpausednodefile(filename, "w")
f.write(contents)
os.chmod(f.name, mode)
f.close()
logger.info("created nodefile: %s; mode: 0%o", f.name, mode)
finally:
self.lock.release()
# from class SimpleLxcNode
def alive(self):
# is VM running?
return False # XXX
def cmd(self, args, wait=True):
cmd_string = string.join(args, " ")
if cmd_string in self.cmds_to_ignore:
# self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
return 0
if cmd_string in self.cmds_redirection:
self.cmds_redirection[cmd_string](self)
return 0
logger("XEN PVM cmd(args=[%s]) called, but not yet implemented", cmd_string)
return 0
def cmdresult(self, args):
cmd_string = string.join(args, " ")
if cmd_string in self.cmds_to_ignore:
# self.warn("XEN PVM cmd(args=[%s]) called and ignored" % cmdAsString)
return 0, ""
logger.warn("XEN PVM cmdresult(args=[%s]) called, but not yet implemented", cmd_string)
return 0, ""
def popen(self, args):
cmd_string = string.join(args, " ")
logger.warn("XEN PVM popen(args=[%s]) called, but not yet implemented", cmd_string)
return
def icmd(self, args):
cmd_string = string.join(args, " ")
logger.warn("XEN PVM icmd(args=[%s]) called, but not yet implemented", cmd_string)
return
def term(self, sh="/bin/sh"):
logger.warn("XEN PVM term() called, but not yet implemented")
return
def termcmdstring(self, sh="/bin/sh"):
"""
We may add "sudo" to the command string because the GUI runs as a
normal user. Use SSH if control interface is available, otherwise
use Xen console with a keymapping for easy login.
"""
controlifc = None
for ifc in self.netifs():
if hasattr(ifc, "control") and ifc.control is True:
controlifc = ifc
break
cmd = "xterm "
# use SSH if control interface is available
if controlifc:
controlip = controlifc.addrlist[0].split("/")[0]
cmd += "-e ssh root@%s" % controlip
return cmd
# otherwise use "xm console"
# pw = self.getconfigitem("root_password")
# cmd += "-xrm "XTerm*VT100.translations: #override <Key>F1: "
# cmd += "string(\"root\\n\") \\n <Key>F2: string(\"%s\\n\")" " % pw
cmd += "-e sudo %s console %s" % (XM_PATH, self.vmname)
return cmd
def shcmd(self, cmdstr, sh="/bin/sh"):
logger("XEN PVM shcmd(args=[%s]) called, but not yet implemented", cmdstr)
return
def mount(self, source, target):
logger.warn("XEN PVM Nodes can't bind-mount filesystems")
def umount(self, target):
logger.warn("XEN PVM Nodes can't bind-mount filesystems")
def newifindex(self):
self.lock.acquire()
try:
while self.ifindex in self._netif:
self.ifindex += 1
ifindex = self.ifindex
self.ifindex += 1
return ifindex
finally:
self.lock.release()
def getifindex(self, netif):
for ifindex in self._netif:
if self._netif[ifindex] is netif:
return ifindex
return -1
def addnetif(self, netif, ifindex):
logger.warn("XEN PVM addnetif() called")
PyCoreNode.addnetif(self, netif, ifindex)
def delnetif(self, ifindex):
logger.warn("XEN PVM delnetif() called")
PyCoreNode.delnetif(self, ifindex)
def newveth(self, ifindex=None, ifname=None, net=None, hwaddr=None):
logger.warn("XEN PVM newveth(ifindex=%s, ifname=%s) called", ifindex, ifname)
self.lock.acquire()
try:
if ifindex is None:
ifindex = self.newifindex()
if ifname is None:
ifname = "eth%d" % ifindex
sessionid = self.session.short_session_id()
name = "n%s.%s.%s" % (self.objid, ifindex, sessionid)
localname = "n%s.%s.%s" % (self.objid, ifname, sessionid)
ifclass = XenVEth
veth = ifclass(node=self, name=name, localname=localname,
mtu=1500, net=net, hwaddr=hwaddr)
veth.name = ifname
try:
self.addnetif(veth, ifindex)
except:
veth.shutdown()
del veth
raise
return ifindex
finally:
self.lock.release()
def newtuntap(self, ifindex=None, ifname=None, net=None):
logger.warn("XEN PVM newtuntap() called but not implemented")
def sethwaddr(self, ifindex, addr):
self._netif[ifindex].sethwaddr(addr)
if self.up:
pass
# self.cmd([IP_BIN, "link", "set", "dev", self.ifname(ifindex),
# "address", str(addr)])
def addaddr(self, ifindex, addr):
if self.up:
pass
# self.cmd([IP_BIN, "addr", "add", str(addr),
# "dev", self.ifname(ifindex)])
self._netif[ifindex].addaddr(addr)
def deladdr(self, ifindex, addr):
try:
self._netif[ifindex].deladdr(addr)
except ValueError:
logger.exception("trying to delete unknown address: %s", addr)
if self.up:
pass
# self.cmd([IP_BIN, "addr", "del", str(addr),
# "dev", self.ifname(ifindex)])
valid_deladdrtype = ("inet", "inet6", "inet6link")
def delalladdr(self, ifindex, addrtypes=valid_deladdrtype):
addr = self.getaddr(self.ifname(ifindex), rescan=True)
for t in addrtypes:
if t not in self.valid_deladdrtype:
raise ValueError("addr type must be in: " + " ".join(self.valid_deladdrtype))
for a in addr[t]:
self.deladdr(ifindex, a)
# update cached information
self.getaddr(self.ifname(ifindex), rescan=True)
# Xen PVM relies on boot process to bring up links
# def ifup(self, ifindex):
# if self.up:
# self.cmd([IP_BIN, "link", "set", self.ifname(ifindex), "up"])
def newnetif(self, net=None, addrlist=[], hwaddr=None, ifindex=None, ifname=None):
logger.warn("XEN PVM newnetif(ifindex=%s, ifname=%s) called", ifindex, ifname)
self.lock.acquire()
if not self.up:
self.lock.release()
raise Exception("Can't access add veth as VM disk isn't ready")
if self.booted:
self.lock.release()
raise Exception("Can't access add veth as VM is already running")
try:
if nodeutils.is_node(net, NodeTypes.EMANE):
raise Exception("Xen PVM doesn't yet support Emane nets")
# ifindex = self.newtuntap(ifindex = ifindex, ifname = ifname,
# net = net)
# # TUN/TAP is not ready for addressing yet; the device may
# # take some time to appear, and installing it into a
# # namespace after it has been bound removes addressing;
# # save addresses with the interface now
# self.attachnet(ifindex, net)
# netif = self.netif(ifindex)
# netif.sethwaddr(hwaddr)
# for addr in maketuple(addrlist):
# netif.addaddr(addr)
# return ifindex
else:
ifindex = self.newveth(ifindex=ifindex, ifname=ifname,
net=net, hwaddr=hwaddr)
if net is not None:
self.attachnet(ifindex, net)
rulefile = os.path.join(self.getconfigitem("mount_path"),
self.etcdir,
"udev/rules.d/70-persistent-net.rules")
f = self.openpausednodefile(rulefile, "a")
f.write(
"\n# Xen PVM virtual interface #%s %s with MAC address %s\n" % (ifindex, self.ifname(ifindex), hwaddr))
# Using MAC address as we"re now loading PVM net driver "early"
# OLD: Would like to use MAC address, but udev isn"t working with paravirtualized NICs. Perhaps the "set hw address" isn"t triggering a rescan.
f.write(
'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="%s", KERNEL=="eth*", NAME="%s"\n' % (
hwaddr, self.ifname(ifindex)))
# f.write("SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", DEVPATH=="/devices/vif-%s/?*", KERNEL=="eth*", NAME="%s"\n" % (ifindex, self.ifname(ifindex)))
f.close()
if hwaddr:
self.sethwaddr(ifindex, hwaddr)
for addr in utils.maketuple(addrlist):
self.addaddr(ifindex, addr)
# self.ifup(ifindex)
return ifindex
finally:
self.lock.release()
def connectnode(self, ifname, othernode, otherifname):
logger.warn("XEN PVM connectnode() called")
# tmplen = 8
# tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase)
# for x in xrange(tmplen)])
# tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase)
# for x in xrange(tmplen)])
# check_call([IP_BIN, "link", "add", "name", tmp1,
# "type", "veth", "peer", "name", tmp2])
#
# check_call([IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
# self.cmd([IP_BIN, "link", "set", tmp1, "name", ifname])
# self.addnetif(PyCoreNetIf(self, ifname), self.newifindex())
#
# check_call([IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)])
# othernode.cmd([IP_BIN, "link", "set", tmp2, "name", otherifname])
# othernode.addnetif(PyCoreNetIf(othernode, otherifname),
# othernode.newifindex())
def addfile(self, srcname, filename, mode=0644):
self.lock.acquire()
if not self.up:
self.lock.release()
raise Exception("Can't access VM file as VM disk isn't ready")
if self.booted:
self.lock.release()
raise Exception("Can't access VM file as VM is already running")
if filename in self.files_to_ignore:
# self.warn("XEN PVM addfile(filename=%s) ignored" % [filename])
return
if filename in self.files_redirection:
redirection_filename = self.files_redirection[filename]
logger.warn("XEN PVM addfile(filename=%s) redirected to %s", filename, redirection_filename)
filename = redirection_filename
try:
fin = open(srcname, "r")
contents = fin.read()
fin.close()
fout = self.openpausednodefile(filename, "w")
fout.write(contents)
os.chmod(fout.name, mode)
fout.close()
logger.info("created nodefile: %s; mode: 0%o", fout.name, mode)
finally:
self.lock.release()
logger.warn("XEN PVM addfile(filename=%s) called", filename)
# shcmd = "mkdir -p $(dirname "%s") && mv "%s" "%s" && sync" % \
# (filename, srcname, filename)
# self.shcmd(shcmd)
def unmount_all(self, path):
"""
Namespaces inherit the host mounts, so we need to ensure that all
namespaces have unmounted our temporary mount area so that the
kpartx command will succeed.
"""
# Session.bootnodes() already has self.session._objslock
for o in self.session.objects.itervalues():
if not isinstance(o, LxcNode):
continue
o.umount(path)

View file

@ -1,301 +0,0 @@
"""
xenconfig.py: Implementation of the XenConfigManager class for managing
configurable items for XenNodes.
Configuration for a XenNode is available at these three levels:
Global config: XenConfigManager.configs[0] = (type="xen", values)
Nodes of this machine type have this config. These are the default values.
XenConfigManager.default_config comes from defaults + xen.conf
Node type config: XenConfigManager.configs[0] = (type="mytype", values)
All nodes of this type have this config.
Node-specific config: XenConfigManager.configs[nodenumber] = (type, values)
The node having this specific number has this config.
"""
import ConfigParser
import os
import string
from core import constants
from core import logger
from core.api import coreapi
from core.conf import Configurable
from core.conf import ConfigurableManager
from core.enumerations import ConfigDataTypes
from core.enumerations import ConfigFlags
from core.enumerations import ConfigTlvs
from core.enumerations import RegisterTlvs
class XenConfigManager(ConfigurableManager):
"""
Xen controller object. Lives in a Session instance and is used for
building Xen profiles.
"""
name = "xen"
config_type = RegisterTlvs.EMULATION_SERVER.value
def __init__(self, session):
"""
Creates a XenConfigManager instance.
:param core.session.Session session: session this manager is tied to
:return: nothing
"""
ConfigurableManager.__init__(self)
self.default_config = XenDefaultConfig(session, object_id=None)
self.loadconfigfile()
def setconfig(self, nodenum, conftype, values):
"""
Add configuration values for a node to a dictionary; values are
usually received from a Configuration Message, and may refer to a
node for which no object exists yet
:param int nodenum: node id to configure
:param str conftype: configuration type
:param tuple values: values to configure
:return: None
"""
# used for storing the global default config
if nodenum is None:
nodenum = 0
return ConfigurableManager.setconfig(self, nodenum, conftype, values)
def getconfig(self, nodenum, conftype, defaultvalues):
"""
Get configuration values for a node; if the values don"t exist in
our dictionary then return the default values supplied; if conftype
is None then we return a match on any conftype.
:param int nodenum: node id to configure
:param str conftype: configuration type
:param tuple defaultvalues: default values to return
:return: configuration for node and config type
:rtype: tuple
"""
# used for storing the global default config
if nodenum is None:
nodenum = 0
return ConfigurableManager.getconfig(self, nodenum, conftype, defaultvalues)
def clearconfig(self, nodenum):
"""
Remove configuration values for a node
:param int nodenum: node id to clear config
:return: nothing
"""
ConfigurableManager.clearconfig(self, nodenum)
if 0 in self.configs:
self.configs.pop(0)
def configure(self, session, config_data):
"""
Handle configuration messages for global Xen config.
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
"""
return self.default_config.configure(self, config_data)
def loadconfigfile(self, filename=None):
"""
Load defaults from the /etc/core/xen.conf file into dict object.
:param str filename: file name of configuration to load
:return: nothing
"""
if filename is None:
filename = os.path.join(constants.CORE_CONF_DIR, "xen.conf")
cfg = ConfigParser.SafeConfigParser()
if filename not in cfg.read(filename):
logger.warn("unable to read Xen config file: %s", filename)
return
section = "xen"
if not cfg.has_section(section):
logger.warn("%s is missing a xen section!", filename)
return
self.configfile = dict(cfg.items(section))
# populate default config items from config file entries
vals = list(self.default_config.getdefaultvalues())
names = self.default_config.getnames()
for i in range(len(names)):
if names[i] in self.configfile:
vals[i] = self.configfile[names[i]]
# this sets XenConfigManager.configs[0] = (type="xen", vals)
self.setconfig(None, self.default_config.name, vals)
def getconfigitem(self, name, model=None, node=None, value=None):
"""
Get a config item of the given name, first looking for node-specific
configuration, then model specific, and finally global defaults.
If a value is supplied, it will override any stored config.
:param str name: name of config item to get
:param model: model config to get
:param node: node config to get
:param value: value to override stored config, if provided
:return: nothing
"""
if value is not None:
return value
n = None
if node:
n = node.objid
(t, v) = self.getconfig(nodenum=n, conftype=model, defaultvalues=None)
if n is not None and v is None:
# get item from default config for the node type
(t, v) = self.getconfig(nodenum=None, conftype=model, defaultvalues=None)
if v is None:
# get item from default config for the machine type
(t, v) = self.getconfig(nodenum=None, conftype=self.default_config.name, defaultvalues=None)
confignames = self.default_config.getnames()
if v and name in confignames:
i = confignames.index(name)
return v[i]
else:
# name may only exist in config file
if name in self.configfile:
return self.configfile[name]
else:
# logger.warn("missing config item "%s"" % name)
return None
class XenConfig(Configurable):
"""
Manage Xen configuration profiles.
"""
@classmethod
def configure(cls, xen, config_data):
"""
Handle configuration messages for setting up a model.
Similar to Configurable.configure(), but considers opaque data
for indicating node types.
:param xen: xen instance to configure
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
"""
reply = None
node_id = config_data.node
object_name = config_data.object
config_type = config_data.type
opaque = config_data.opaque
values_str = config_data.data_values
nodetype = object_name
if opaque is not None:
opaque_items = opaque.split(":")
if len(opaque_items) != 2:
logger.warn("xen config: invalid opaque data in conf message")
return None
nodetype = opaque_items[1]
logger.info("received configure message for %s", nodetype)
if config_type == ConfigFlags.REQUEST.value:
logger.info("replying to configure request for %s " % nodetype)
# when object name is "all", the reply to this request may be None
# if this node has not been configured for this model; otherwise we
# reply with the defaults for this model
if object_name == "all":
typeflags = ConfigFlags.UPDATE.value
else:
typeflags = ConfigFlags.NONE.value
values = xen.getconfig(node_id, nodetype, defaultvalues=None)[1]
if values is None:
# get defaults from default "xen" config which includes
# settings from both cls._confdefaultvalues and xen.conf
defaults = cls.getdefaultvalues()
values = xen.getconfig(node_id, cls.name, defaults)[1]
if values is None:
return None
# reply with config options
if node_id is None:
node_id = 0
reply = cls.config_data(0, node_id, typeflags, nodetype, values)
elif config_type == ConfigFlags.RESET.value:
if object_name == "all":
xen.clearconfig(node_id)
# elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
else:
# store the configuration values for later use, when the XenNode
# object has been created
if object_name is None:
logger.info("no configuration object for node %s" % node_id)
return None
if values_str is None:
# use default or preconfigured values
defaults = cls.getdefaultvalues()
values = xen.getconfig(node_id, cls.name, defaults)[1]
else:
# use new values supplied from the conf message
values = values_str.split("|")
xen.setconfig(node_id, nodetype, values)
return reply
@classmethod
def config_data(cls, flags, node_id, type_flags, nodetype, values):
"""
Convert this class to a Config API message. Some TLVs are defined
by the class, but node number, conf type flags, and values must
be passed in.
:param int flags: configuration flags
:param int node_id: node id
:param int type_flags: type flags
:param int nodetype: node type
:param tuple values: values
:return: configuration message
"""
values_str = string.join(values, "|")
tlvdata = ""
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.NODE.value, node_id)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, cls.name)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, type_flags)
datatypes = tuple(map(lambda x: x[1], cls.config_matrix))
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.DATA_TYPES.value, datatypes)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, values_str)
captions = reduce(lambda a, b: a + "|" + b, map(lambda x: x[4], cls.config_matrix))
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.CAPTIONS, captions)
possiblevals = reduce(lambda a, b: a + "|" + b, map(lambda x: x[3], cls.config_matrix))
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.POSSIBLE_VALUES.value, possiblevals)
if cls.bitmap is not None:
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.BITMAP.value, cls.bitmap)
if cls.config_groups is not None:
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.GROUPS.value, cls.config_groups)
opaque = "%s:%s" % (cls.name, nodetype)
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OPAQUE.value, opaque)
msg = coreapi.CoreConfMessage.pack(flags, tlvdata)
return msg
class XenDefaultConfig(XenConfig):
"""
Global default Xen configuration options.
"""
name = "xen"
# Configuration items:
# ("name", "type", "default", "possible-value-list", "caption")
config_matrix = [
("ram_size", ConfigDataTypes.STRING.value, "256", "",
"ram size (MB)"),
("disk_size", ConfigDataTypes.STRING.value, "256M", "",
"disk size (use K/M/G suffix)"),
("iso_file", ConfigDataTypes.STRING.value, "", "",
"iso file"),
("mount_path", ConfigDataTypes.STRING.value, "", "",
"mount path"),
("etc_path", ConfigDataTypes.STRING.value, "", "",
"etc path"),
("persist_tar_iso", ConfigDataTypes.STRING.value, "", "",
"iso persist tar file"),
("persist_tar", ConfigDataTypes.STRING.value, "", "",
"persist tar file"),
("root_password", ConfigDataTypes.STRING.value, "password", "",
"root password"),
]
config_groups = "domU properties:1-%d" % len(config_matrix)

View file

@ -1,13 +1,12 @@
import os
import socket
import subprocess
from core import constants
from core import emane
from core import logger
from core.enumerations import NodeTypes
from core.misc import ipaddress
from core.misc import nodeutils
from core.misc import utils
from core.netns import nodes
from core.xml import xmlutils
@ -18,16 +17,13 @@ class CoreDeploymentWriter(object):
self.root = root
self.session = session
self.hostname = socket.gethostname()
if emane.VERSION < emane.EMANE092:
self.transport = None
self.platform = None
@staticmethod
def get_ipv4_addresses(hostname):
if hostname == 'localhost':
addr_list = []
cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show')
output = subprocess.check_output(cmd)
args = [constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show']
output = utils.check_cmd(args)
for line in output.split(os.linesep):
split = line.split()
if not split:
@ -44,12 +40,12 @@ class CoreDeploymentWriter(object):
def get_interface_names(hostname):
"""
Uses same methodology of get_ipv4_addresses() to get
parallel list of interface names to go with ...
"""
parallel list of interface names to go with ...
"""
if hostname == 'localhost':
iface_list = []
cmd = (constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show')
output = subprocess.check_output(cmd)
args = [constants.IP_BIN, '-o', '-f', 'inet', 'addr', 'show']
output = utils.check_cmd(args)
for line in output.split(os.linesep):
split = line.split()
if not split:
@ -186,18 +182,8 @@ class CoreDeploymentWriter(object):
def add_emane_interface(self, physical_host, virtual_host, netif, platform_name='p1', transport_name='t1'):
nemid = netif.net.nemidmap[netif]
if emane.VERSION < emane.EMANE092:
if self.platform is None:
self.platform = \
self.add_platform(physical_host, name=platform_name)
platform = self.platform
if self.transport is None:
self.transport = \
self.add_transport(physical_host, name=transport_name)
transport = self.transport
else:
platform = self.add_platform(virtual_host, name=platform_name)
transport = self.add_transport(virtual_host, name=transport_name)
platform = self.add_platform(virtual_host, name=platform_name)
transport = self.add_transport(virtual_host, name=transport_name)
nem_name = 'nem%s' % nemid
nem = self.add_nem(platform, nem_name)
self.add_parameter(nem, 'nemid', str(nemid))

View file

@ -202,8 +202,6 @@ class CoreDocumentParser0(object):
mgr = None
self.parsenetem(model, obj, kvs)
elif name[:3] == "xen":
mgr = self.session.xen
# TODO: assign other config managers here
if mgr:
mgr.setconfig_keyvalues(nodenum, name, kvs)
@ -252,7 +250,7 @@ class CoreDocumentParser0(object):
geo.append(a)
location.setrefgeo(geo[0], geo[1], geo[2])
scale = origin.getAttribute("scale100")
if scale is not None:
if scale is not None and scale:
location.refscale = float(scale)
point = xmlutils.get_one_element(origin, "point")
if point is not None and point.firstChild is not None:

View file

@ -204,8 +204,6 @@ class CoreDocumentParser1(object):
mgr = self.session.mobility
elif model_name.startswith('emane'):
mgr = self.session.emane
elif model_name.startswith('xen'):
mgr = self.session.xen
else:
# TODO: any other config managers?
raise NotImplementedError
@ -685,8 +683,6 @@ class CoreDocumentParser1(object):
'host': 'host.gif',
'PC': 'pc.gif',
'mdr': 'mdr.gif',
# 'prouter': 'router_green.gif',
# 'xen': 'xen.gif'
}
icon_set = False
for child in xmlutils.iter_children_with_name(element, 'CORE:presentation'):

View file

@ -77,7 +77,7 @@ class CoreDocumentWriter1(Document):
objects from the given session.
"""
Document.__init__(self)
logger.info('Exporting to NMF XML version 1.0')
logger.debug('Exporting to NMF XML version 1.0')
with session._objects_lock:
self.scenarioPlan = ScenarioPlan(self, session)
if session.state == EventTypes.RUNTIME_STATE.value:
@ -213,7 +213,7 @@ class ScenarioPlan(XmlElement):
self.setAttribute('xmlns:CORE', 'coreSpecific')
self.setAttribute('compiled', 'true')
self.all_channel_members = dict()
self.all_channel_members = {}
self.last_network_id = 0
self.addNetworks()
self.addDevices()
@ -795,26 +795,23 @@ class InterfaceElement(NamedXmlElement):
"""
Add a reference to the channel that uses this interface
"""
try:
cm = self.scenPlan.all_channel_members[self.id]
if cm is not None:
ch = cm.base_element.parentNode
if ch is not None:
net = ch.parentNode
if net is not None:
MemberElement(self.scenPlan,
self,
referenced_type=MembType.CHANNEL,
referenced_id=ch.getAttribute("id"),
index=int(cm.getAttribute("index")))
MemberElement(self.scenPlan,
self,
referenced_type=MembType.NETWORK,
referenced_id=net.getAttribute("id"))
except KeyError:
# Not an error. This occurs when an interface belongs to a switch
# or a hub within a network and the channel is yet to be defined
logger.exception("noted as not an error, add channel reference error")
# cm is None when an interface belongs to a switch
# or a hub within a network and the channel is yet to be defined
cm = self.scenPlan.all_channel_members.get(self.id)
if cm is not None:
ch = cm.base_element.parentNode
if ch is not None:
net = ch.parentNode
if net is not None:
MemberElement(self.scenPlan,
self,
referenced_type=MembType.CHANNEL,
referenced_id=ch.getAttribute("id"),
index=int(cm.getAttribute("index")))
MemberElement(self.scenPlan,
self,
referenced_type=MembType.NETWORK,
referenced_id=net.getAttribute("id"))
def addAddresses(self, interface_object):
"""
@ -894,6 +891,10 @@ def get_endpoint(network_object, interface_object):
ep = None
l2devport = None
# skip if either are none
if not network_object or not interface_object:
return ep
# if ifcObj references an interface of a node and is part of this network
if interface_object.net.objid == network_object.objid and hasattr(interface_object,
'node') and interface_object.node:
@ -960,7 +961,7 @@ def get_endpoints(network_object):
if ep is not None:
endpoints.append(ep)
except:
logger.exception("error geting endpoints, was skipped before")
logger.debug("error geting endpoints, was skipped before")
return endpoints

View file

@ -1,6 +1,4 @@
# Configuration file for CORE (core-gui, core-daemon)
#
### GUI configuration options ###
[core-gui]
@ -8,14 +6,10 @@
### core-daemon configuration options ###
[core-daemon]
pidfile = /var/run/core-daemon.pid
logfile = /var/log/core-daemon.log
xmlfilever = 1.0
# you may want to change the listenaddr below to 0.0.0.0
listenaddr = localhost
port = 4038
numthreads = 1
verbose = False
quagga_bin_search = "/usr/local/bin /usr/bin /usr/lib/quagga"
quagga_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/quagga"
# uncomment the following line to load custom services from the specified dir
@ -57,9 +51,7 @@ emane_platform_port = 8101
emane_transform_port = 8201
emane_event_generate = True
emane_event_monitor = False
emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass, Tdma
#emane_models_dir = /home/username/.core/myemane
# EMANE log level range [0,4] default: 2
#emane_log_level = 2
emane_realtime = True
#aux_request_handler = core.addons.api2handler.CoreApi2RequestHandler:12222

20
daemon/data/logging.conf Normal file
View file

@ -0,0 +1,20 @@
{
"version": 1,
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "default",
"level": "DEBUG",
"stream": "ext://sys.stdout"
}
},
"formatters": {
"default": {
"format": "%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - %(message)s"
}
},
"root": {
"level": "INFO",
"handlers": ["console"]
}
}

View file

@ -1,35 +0,0 @@
# Configuration file for CORE Xen support
### Xen configuration options ###
[xen]
### The following three configuration options *must* be specified in this
### system-wide configuration file.
# LVM volume group name for creating new volumes
vg_name = domU
# directory containing an RSA SSH host key and authorized_keys file to use
# within the VM
ssh_key_path = /opt/core-xen/ssh
# extra arguments to pass via 'extra=' option to 'xm create'
xm_create_extra = console=hvc0 rtr_boot=/dev/xvda rtr_boot_fstype=iso9660 rtr_root=/boot/root.img rtr_persist=LABEL=persist rtr_swap=LABEL=swap rtr_overlay_limit=500
### The remaining configuration options *may* be specified here.
### If not specified here, they *must* be specified in the user (or scenario's)
### nodes.conf file as profile-specific configuration options.
# domU RAM memory size in MB
ram_size = 256
# domU disk size in MB
disk_size = 256M
# ISO filesystem to mount as read-only
iso_file = /opt/core-xen/iso-files/rtr.iso
# directory used temporarily as moint point for persistent area, under
# /tmp/pycore.nnnnn/nX.conf/
mount_path = /rtr/persist
# mount_path + this directory where configuration files are located on the VM
etc_path = config/etc
# name of tar file within the iso_file to unpack to mount_path
persist_tar_iso = persist-template.tar
# name of tar file in dom0 that will be unpacked to mount_path prior to boot
# the string '%h' will be replaced with the hostname (e.g. 'n3' for node 3)
persist_tar = /opt/core-xen/rtr-configs/custom-%%h.tar
# root password to set
root_password = password

View file

@ -8,7 +8,7 @@
#
# extra cruft to remove
DISTCLEANFILES = conf.py Makefile.in stamp-vti *.rst
DISTCLEANFILES = conf.py Makefile Makefile.in stamp-vti *.rst
all: index.rst
@ -23,7 +23,7 @@ index.rst:
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXOPTS = -q
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build

View file

@ -42,16 +42,16 @@ master_doc = 'index'
# General information about the project.
project = u'CORE Python modules'
copyright = u'2017, core-dev'
copyright = u'2005-2018, core-dev'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '@CORE_VERSION@'
version = '@PACKAGE_VERSION@'
# The full version, including alpha/beta/rc tags.
release = '@CORE_VERSION@'
release = '@PACKAGE_VERSION@'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
@ -223,7 +223,7 @@ man_pages = [
epub_title = u'CORE Python'
epub_author = u'core-dev'
epub_publisher = u'core-dev'
epub_copyright = u'2017, core-dev'
epub_copyright = u'2005-2018, core-dev'
# The language of the text. It defaults to the language option
# or en if the language is not set.

View file

@ -0,0 +1,60 @@
#!/usr/bin/python -i
#
# Example CORE Python script that attaches N nodes to an EMANE 802.11abg network.
import datetime
import parser
from core.emane.ieee80211abg import EmaneIeee80211abgModel
from core.emulator.coreemu import CoreEmu
from core.emulator.emudata import IpPrefixes
from core.enumerations import EventTypes
def example(options):
# ip generator for example
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = CoreEmu()
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create emane network node
emane_network = session.create_emane_network(
model=EmaneIeee80211abgModel,
geo_reference=(47.57917, -122.13232, 2.00000)
)
emane_network.setposition(x=80, y=50)
# create nodes
for i in xrange(options.nodes):
node = session.create_wireless_node()
node.setposition(x=150 * (i + 1), y=150)
interface = prefixes.create_interface(node)
session.add_link(node.objid, emane_network.objid, interface_one=interface)
# instantiate session
session.instantiate()
# start a shell on the first node
node = session.get_object(2)
node.client.term("bash")
# shutdown session
raw_input("press enter to exit...")
coreemu.shutdown()
def main():
options = parser.parse_options("emane80211")
start = datetime.datetime.now()
print "running emane 80211 example: nodes(%s) time(%s)" % (options.nodes, options.time)
example(options)
print "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__" or __name__ == "__builtin__":
main()

View file

@ -0,0 +1,41 @@
import argparse
DEFAULT_NODES = 2
DEFAULT_TIME = 10
DEFAULT_STEP = 1
def parse_options(name):
parser = argparse.ArgumentParser(description="Run %s example" % name)
parser.add_argument("-n", "--nodes", type=int, default=DEFAULT_NODES,
help="number of nodes to create in this example")
parser.add_argument("-t", "--time", type=int, default=DEFAULT_TIME,
help="example iperf run time in seconds")
options = parser.parse_args()
# usagestr = "usage: %prog [-h] [options] [args]"
# parser = optparse.OptionParser(usage=usagestr)
#
# parser.add_option("-n", "--nodes", dest="nodes", type=int, default=DEFAULT_NODES,
# help="number of nodes to create in this example")
#
# parser.add_option("-t", "--time", dest="time", type=int, default=DEFAULT_TIME,
# help="example iperf run time in seconds")
# def usage(msg=None, err=0):
# print
# if msg:
# print "%s\n" % msg
# parser.print_help()
# sys.exit(err)
# parse command line options
# options, args = parser.parse_args()
if options.nodes < 2:
parser.error("invalid min number of nodes: %s" % options.nodes)
if options.time < 1:
parser.error("invalid test time: %s" % options.time)
return options

View file

@ -0,0 +1,64 @@
#!/usr/bin/python
#
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
import datetime
import parser
from core.emulator.coreemu import CoreEmu
from core.emulator.emudata import IpPrefixes
from core.enumerations import NodeTypes, EventTypes
def example(options):
# ip generator for example
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = CoreEmu()
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create switch network node
switch = session.add_node(_type=NodeTypes.SWITCH)
# create nodes
for _ in xrange(options.nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, switch.objid, interface_one=interface)
# instantiate session
session.instantiate()
# get nodes to run example
first_node = session.get_object(2)
last_node = session.get_object(options.nodes + 1)
print "starting iperf server on node: %s" % first_node.name
first_node.cmd(["iperf", "-s", "-D"])
first_node_address = prefixes.ip4_address(first_node)
print "node %s connecting to %s" % (last_node.name, first_node_address)
last_node.client.icmd(["iperf", "-t", str(options.time), "-c", first_node_address])
first_node.cmd(["killall", "-9", "iperf"])
# shutdown session
coreemu.shutdown()
def main():
options = parser.parse_options("switch")
start = datetime.datetime.now()
print "running switch example: nodes(%s) time(%s)" % (options.nodes, options.time)
example(options)
print "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,37 @@
#!/usr/bin/python
#
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
from core.emulator.emudata import IpPrefixes
from core.enumerations import NodeTypes, EventTypes
def example(nodes):
# ip generator for example
prefixes = IpPrefixes("10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = globals()["coreemu"]
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create switch network node
switch = session.add_node(_type=NodeTypes.SWITCH)
# create nodes
for _ in xrange(nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, switch.objid, interface_one=interface)
# instantiate session
session.instantiate()
if __name__ in {"__main__", "__builtin__"}:
example(2)

View file

@ -0,0 +1,71 @@
#!/usr/bin/python
#
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
import datetime
import parser
from core.emulator.coreemu import CoreEmu
from core.emulator.emudata import IpPrefixes
from core.enumerations import NodeTypes, EventTypes
from core.mobility import BasicRangeModel
def example(options):
# ip generator for example
prefixes = IpPrefixes("10.83.0.0/16")
# create emulator instance for creating sessions and utility methods
coreemu = CoreEmu()
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create wlan network node
wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN)
session.set_wireless_model(wlan, BasicRangeModel)
# create nodes
wireless_nodes = []
for _ in xrange(options.nodes):
node = session.add_node()
interface = prefixes.create_interface(node)
session.add_link(node.objid, wlan.objid, interface_one=interface)
wireless_nodes.append(node)
# link all created nodes with the wireless network
session.wireless_link_all(wlan, wireless_nodes)
# instantiate session
session.instantiate()
# get nodes for example run
first_node = session.get_object(2)
last_node = session.get_object(options.nodes + 1)
print "starting iperf server on node: %s" % first_node.name
first_node.cmd(["iperf", "-s", "-D"])
address = prefixes.ip4_address(first_node)
print "node %s connecting to %s" % (last_node.name, address)
last_node.client.icmd(["iperf", "-t", str(options.time), "-c", address])
first_node.cmd(["killall", "-9", "iperf"])
# shutdown session
coreemu.shutdown()
def main():
options = parser.parse_options("wlan")
start = datetime.datetime.now()
print "running wlan example: nodes(%s) time(%s)" % (options.nodes, options.time)
example(options)
print "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__":
main()

View file

@ -1,214 +0,0 @@
#!/usr/bin/env python
from emanesh import manifest
import os.path
import re
import textwrap
class EmaneManifest2Model(object):
class EmaneModel(object):
class EmaneModelParameter(object):
intfloat_regex = re.compile(r'^([0-9]+)\.(0*)$')
indent = ' ' * 16
def __init__(self, name, apitype, default, caption,
possible_values = ()):
self.name = name
self.apitype = apitype
self.default = self.intfloat_regex.sub(r'\1.0', default)
self.possible_values = possible_values
self.caption = caption
def __str__(self):
return '''%s('%s', %s,\n%s '%s', '%s', '%s')''' % \
(self.indent, self.name, self.apitype,
self.indent, self.default,
','.join(self.possible_values), self.caption)
def __init__(self, name):
self.name = name
self.parameters = []
def add_parameter(self, name, apitype, default, caption,
possible_values = ()):
p = self.EmaneModelParameter(name, apitype, default, caption,
possible_values)
self.parameters.append(p)
mac_xml_path = '/usr/share/emane/xml/models/mac'
# map emane parameter types to CORE api data types
core_api_type = {
'uint8': 'coreapi.CONF_DATA_TYPE_UINT8',
'uint16': 'coreapi.CONF_DATA_TYPE_UINT16',
'uint32': 'coreapi.CONF_DATA_TYPE_UINT32',
'uint64': 'coreapi.CONF_DATA_TYPE_UINT64',
'int8': 'coreapi.CONF_DATA_TYPE_INT8',
'int16': 'coreapi.CONF_DATA_TYPE_INT16',
'int32': 'coreapi.CONF_DATA_TYPE_INT32',
'int64': 'coreapi.CONF_DATA_TYPE_INT64',
'float': 'coreapi.CONF_DATA_TYPE_FLOAT',
'double': 'coreapi.CONF_DATA_TYPE_FLOAT',
'bool': 'coreapi.CONF_DATA_TYPE_BOOL',
'string': 'coreapi.CONF_DATA_TYPE_STRING',
}
parameter_regex = re.compile(r'^\^\(([\|\-\w]+)\)\$$')
@classmethod
def emane_model(cls, xmlfile):
m = manifest.Manifest(xmlfile)
model = cls.EmaneModel(m.getName())
for name in m.getAllConfiguration():
info = m.getConfigurationInfo(name)
apitype = None
for t in 'numeric', 'nonnumeric':
if t in info:
apitype = cls.core_api_type[info[t]['type']]
break
default = ''
if info['default']:
values = info['values']
if values:
default = values[0]
caption = name
possible_values = []
if apitype == 'coreapi.CONF_DATA_TYPE_BOOL':
possible_values = ['On,Off']
elif apitype == 'coreapi.CONF_DATA_TYPE_STRING':
if name == 'pcrcurveuri':
default = os.path.join(cls.mac_xml_path,
model.name, model.name + 'pcr.xml')
else:
regex = info['regex']
if regex:
match = cls.parameter_regex.match(regex)
if match:
possible_values = match.group(1).split('|')
model.add_parameter(name, apitype, default,
caption, possible_values)
model.parameters.sort(key = lambda x: x.name)
return model
@classmethod
def core_emane_model(cls, class_name, macmanifest_filename,
phymanifest_filename):
template = '''\
from core.emane.emane import EmaneModel
from core.api import coreapi
class BaseEmaneModel(EmaneModel):
def __init__(self, session, objid = None, verbose = False):
EmaneModel.__init__(self, session, objid, verbose)
def buildnemxmlfiles(self, e, ifc):
\'\'\'\\
Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to
build that file also. Otherwise the WLAN-wide
nXXemane_*nem.xml, nXXemane_*mac.xml, nXXemane_*phy.xml are
used.
\'\'\'
values = e.getifcconfig(self.objid, self._name,
self.getdefaultvalues(), ifc)
if values is None:
return
nemdoc = e.xmldoc('nem')
nem = nemdoc.getElementsByTagName('nem').pop()
e.appendtransporttonem(nemdoc, nem, self.objid, ifc)
def append_definition(tag, name, xmlname, doc):
el = doc.createElement(name)
el.setAttribute('definition', xmlname)
tag.appendChild(el)
append_definition(nem, 'mac', self.macxmlname(ifc), nemdoc)
append_definition(nem, 'phy', self.phyxmlname(ifc), nemdoc)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
names = list(self.getnames())
def append_options(tag, optnames, doc):
for name in optnames:
value = self.valueof(name, values).strip()
if value:
tag.appendChild(e.xmlparam(doc, name, value))
macdoc = e.xmldoc('mac')
mac = macdoc.getElementsByTagName('mac').pop()
mac.setAttribute('library', '%(modelLibrary)s')
# append MAC options to macdoc
append_options(mac, names[:len(self._confmatrix_mac)], macdoc)
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = e.xmldoc('phy')
phy = phydoc.getElementsByTagName('phy').pop()
# append PHY options to phydoc
append_options(phy, names[len(self._confmatrix_mac):], phydoc)
e.xmlwrite(phydoc, self.phyxmlname(ifc))
class %(modelClass)s(BaseEmaneModel):
# model name
_name = 'emane_%(modelName)s'
# configuration parameters are
# ( 'name', 'type', 'default', 'possible-value-list', 'caption')
# MAC parameters
_confmatrix_mac = [\n%(confMatrixMac)s
]
# PHY parameters
_confmatrix_phy = [\n%(confMatrixPhy)s
]
_confmatrix = _confmatrix_mac + _confmatrix_phy
# value groupings
_confgroups = 'MAC Parameters:1-%%s|PHY Parameters:%%s-%%s' %% \\
(len(_confmatrix_mac), \\
len(_confmatrix_mac) + 1, len(_confmatrix))
'''
macmodel = cls.emane_model(macmanifest_filename)
phymodel = cls.emane_model(phymanifest_filename)
d = {
'modelClass': 'Emane%sModel' % (class_name),
'modelName': macmodel.name,
'confMatrixMac': ',\n'.join(map(str, macmodel.parameters)) + ',',
'confMatrixPhy': ',\n'.join(map(str, phymodel.parameters)) + ',',
'modelLibrary': macmodel.name,
}
return textwrap.dedent(template % d)
def main():
import argparse
import sys
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = 'Create skeleton CORE bindings from ' \
'EMANE model manifest files.',
epilog = 'example:\n' \
' %(prog)s -c RadioX \\\n' \
' -m /usr/share/emane/manifest/radiox.xml \\\n' \
' -p /usr/share/emane/manifest/emanephy.xml')
parser.add_argument('-c', '--class-name', dest = 'classname',
required = True, help = 'corresponding python '
'class name: RadioX -> EmaneRadioXModel')
parser.add_argument('-m', '--mac-xmlfile', dest = 'macxmlfilename',
required = True,
help = 'MAC model manifest XML filename')
parser.add_argument('-p', '--phy-xmlfile', dest = 'phyxmlfilename',
required = True,
help = 'PHY model manifest XML filename')
args = parser.parse_args()
model = EmaneManifest2Model.core_emane_model(args.classname,
args.macxmlfilename,
args.phyxmlfilename)
sys.stdout.write(model)
if __name__ == "__main__":
main()

View file

@ -1,187 +0,0 @@
#!/usr/bin/env python
#
# CORE
# Copyright (c) 2013 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
'''
emanemodel2core.py: scans an EMANE model source file
(e.g. emane/models/rfpipe/maclayer/rfpipemaclayer.cc) and outputs Python
bindings that allow the model to be used in CORE.
When using this conversion utility, you should replace XYZ, Xyz, and xyz with
the actual model name. Note the capitalization convention.
'''
import os, sys, optparse
MODEL_TEMPLATE_PART1 = """
#
# CORE
# Copyright (c)2013 Company.
# See the LICENSE file included in this distribution.
#
# author: Name <email@company.com>
#
'''
xyz.py: EMANE XYZ model bindings for CORE
'''
from core.api import coreapi
from emane import EmaneModel
from universal import EmaneUniversalModel
class EmaneXyzModel(EmaneModel):
def __init__(self, session, objid = None, verbose = False):
EmaneModel.__init__(self, session, objid, verbose)
# model name
_name = "emane_xyz"
# MAC parameters
_confmatrix_mac = [
"""
MODEL_TEMPLATE_PART2 = """
]
# PHY parameters from Universal PHY
_confmatrix_phy = EmaneUniversalModel._confmatrix
_confmatrix = _confmatrix_mac + _confmatrix_phy
# value groupings
_confgroups = "XYZ MAC Parameters:1-%d|Universal PHY Parameters:%d-%d" \
% ( len(_confmatrix_mac), len(_confmatrix_mac) + 1, len(_confmatrix))
def buildnemxmlfiles(self, e, ifc):
''' Build the necessary nem, mac, and phy XMLs in the given path.
If an individual NEM has a nonstandard config, we need to build
that file also. Otherwise the WLAN-wide nXXemane_xyznem.xml,
nXXemane_xyzmac.xml, nXXemane_xyzphy.xml are used.
'''
values = e.getifcconfig(self.objid, self._name,
self.getdefaultvalues(), ifc)
if values is None:
return
nemdoc = e.xmldoc("nem")
nem = nemdoc.getElementsByTagName("nem").pop()
nem.setAttribute("name", "XYZ NEM")
mactag = nemdoc.createElement("mac")
mactag.setAttribute("definition", self.macxmlname(ifc))
nem.appendChild(mactag)
phytag = nemdoc.createElement("phy")
phytag.setAttribute("definition", self.phyxmlname(ifc))
nem.appendChild(phytag)
e.xmlwrite(nemdoc, self.nemxmlname(ifc))
names = list(self.getnames())
macnames = names[:len(self._confmatrix_mac)]
phynames = names[len(self._confmatrix_mac):]
# make any changes to the mac/phy names here to e.g. exclude them from
# the XML output
macdoc = e.xmldoc("mac")
mac = macdoc.getElementsByTagName("mac").pop()
mac.setAttribute("name", "XYZ MAC")
mac.setAttribute("library", "xyzmaclayer")
# append MAC options to macdoc
map( lambda n: mac.appendChild(e.xmlparam(macdoc, n, \
self.valueof(n, values))), macnames)
e.xmlwrite(macdoc, self.macxmlname(ifc))
phydoc = EmaneUniversalModel.getphydoc(e, self, values, phynames)
e.xmlwrite(phydoc, self.phyxmlname(ifc))
"""
def emane_model_source_to_core(infile, outfile):
do_parse_line = False
output = MODEL_TEMPLATE_PART1
with open(infile, 'r') as f:
for line in f:
# begin marker
if "EMANE::ConfigurationDefinition" in line:
do_parse_line = True
# end marker -- all done
if "{0, 0, 0, 0, 0, 0" in line:
break
if do_parse_line:
outstr = convert_line(line)
if outstr is not None:
output += outstr
continue
output += MODEL_TEMPLATE_PART2
if outfile == sys.stdout:
sys.stdout.write(output)
else:
with open(outfile, 'w') as f:
f.write(output)
def convert_line(line):
line = line.strip()
# skip comments
if line.startswith(('/*', '//')):
return None
items = line.strip('{},').split(',')
if len(items) != 7:
#print "continuning on line=", len(items), items
return None
return convert_items_to_line(items)
def convert_items_to_line(items):
fields = ('required', 'default', 'count', 'name', 'value', 'type',
'description')
getfield = lambda(x): items[fields.index(x)].strip()
output = " ("
output += "%s, " % getfield('name')
value = getfield('value')
if value == '"off"':
type = "coreapi.CONF_DATA_TYPE_BOOL"
value = "0"
defaults = '"On,Off"'
elif value == '"on"':
type = "coreapi.CONF_DATA_TYPE_BOOL"
value = '"1"'
defaults = '"On,Off"'
else:
type = "coreapi.CONF_DATA_TYPE_STRING"
defaults = '""'
output += "%s, %s, %s, " % (type, value, defaults)
output += getfield('description')
output += "),\n"
return output
def main():
usagestr = "usage: %prog [-h] [options] -- <command> ..."
parser = optparse.OptionParser(usage = usagestr)
parser.set_defaults(infile = None, outfile = sys.stdout)
parser.add_option("-i", "--infile", dest = "infile",
help = "file to read (usually '*mac.cc')")
parser.add_option("-o", "--outfile", dest = "outfile",
help = "file to write (stdout is default)")
def usage(msg = None, err = 0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.infile is None:
usage("please specify input file with the '-i' option", err=1)
emane_model_source_to_core(options.infile, options.outfile)
if __name__ == "__main__":
main()

View file

@ -1,78 +0,0 @@
#!/usr/bin/env python
#
# Search for installed CORE library files and Python bindings.
#
import os, glob
pythondirs = [
"/usr/lib/python2.7/site-packages",
"/usr/lib/python2.7/dist-packages",
"/usr/lib64/python2.7/site-packages",
"/usr/lib64/python2.7/dist-packages",
"/usr/local/lib/python2.7/site-packages",
"/usr/local/lib/python2.7/dist-packages",
"/usr/local/lib64/python2.7/site-packages",
"/usr/local/lib64/python2.7/dist-packages",
"/usr/lib/python2.6/site-packages",
"/usr/lib/python2.6/dist-packages",
"/usr/lib64/python2.6/site-packages",
"/usr/lib64/python2.6/dist-packages",
"/usr/local/lib/python2.6/site-packages",
"/usr/local/lib/python2.6/dist-packages",
"/usr/local/lib64/python2.6/site-packages",
"/usr/local/lib64/python2.6/dist-packages",
]
tcldirs = [
"/usr/lib/core",
"/usr/local/lib/core",
]
def find_in_file(fn, search, column=None):
''' Find a line starting with 'search' in the file given by the filename
'fn'. Return True if found, False if not found, or the column text if
column is specified.
'''
r = False
if not os.path.exists(fn):
return r
f = open(fn, "r")
for line in f:
if line[:len(search)] != search:
continue
r = True
if column is not None:
r = line.split()[column]
break
f.close()
return r
def main():
versions = []
for d in pythondirs:
fn = "%s/core/constants.py" % d
ver = find_in_file(fn, 'COREDPY_VERSION', 2)
if ver:
ver = ver.strip('"')
versions.append((d, ver))
for e in glob.iglob("%s/core_python*egg-info" % d):
ver = find_in_file(e, 'Version:', 1)
if ver:
versions.append((e, ver))
for e in glob.iglob("%s/netns*egg-info" % d):
ver = find_in_file(e, 'Version:', 1)
if ver:
versions.append((e, ver))
for d in tcldirs:
fn = "%s/version.tcl" % d
ver = find_in_file(fn, 'set CORE_VERSION', 2)
if ver:
versions.append((d, ver))
for (d, ver) in versions:
print "%8s %s" % (ver, d)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,56 @@
from core.emane import emanemanifest
from core.emane import emanemodel
## Custom EMANE Model
class ExampleModel(emanemodel.EmaneModel):
### MAC Definition
# Defines the emane model name that will show up in the GUI.
name = "emane_example"
# Defines that mac library that the model will reference.
mac_library = "rfpipemaclayer"
# Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed
# within the GUI.
mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml"
# Allows you to override options that are maintained within the manifest file above.
mac_defaults = {
"pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml",
}
# Parses the manifest file and converts configurations into core supported formats.
mac_config = emanemanifest.parse(mac_xml, mac_defaults)
### PHY Definition
# **NOTE: phy configuration will default to the universal model as seen below and the below section does not
# have to be included.**
# Defines that phy library that the model will reference, used if you need to provide a custom phy.
phy_library = None
# Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed
# within the GUI.
phy_xml = "/usr/share/emane/manifest/emanephy.xml"
# Allows you to override options that are maintained within the manifest file above or for the default universal
# model.
phy_defaults = {
"subid": "1",
"propagationmodel": "2ray",
"noisemode": "none"
}
# Parses the manifest file and converts configurations into core supported formats.
phy_config = emanemanifest.parse(phy_xml, phy_defaults)
### Custom override options
# **NOTE: these options default to what's seen below and do not have to be included.**
# Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display
# within the gui.
config_ignore = set()
# Allows you to override how options are displayed with the GUI, using the GUI format of
# "name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed
# numbers after ":" for including values in each tab.
config_groups_override = None
# Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in
# that order.
config_matrix_override = None

View file

@ -1,8 +1,3 @@
#
# CORE
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
"""
Sample user-defined service.
"""

View file

@ -1,115 +0,0 @@
#!/usr/bin/python
#
# Copyright (c)2011-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
#
# Test 3D range calculation of the BasicRangeModel by adding n nodes to a WLAN
# stacked 100 units above each other (using z-axis).
#
import datetime
import optparse
import sys
import time
from core.misc import ipaddress, nodeutils
from core.misc import nodemaps
from core.mobility import BasicRangeModel
from core.netns.nodes import WlanNode
from core.netns.vnet import EbtablesQueue
from core.netns.vnode import LxcNode
from core.session import Session
# node list - global so you can play using 'python -i'
# e.g. >>> n[0].session.shutdown()
n = []
def test(options):
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
session = Session(1, persistent=True)
if options.enablesdt:
# GUI default
session.location.setrefgeo(47.57917, -122.13232, 50.0)
session.location.refscale = 100.0
session.options.enablesdt = True
session.options.sdturl = options.sdturl
wlanid = options.numnodes + 1
net = session.add_object(
cls=WlanNode,
name="wlan%d" % wlanid,
objid=wlanid
)
values = list(BasicRangeModel.getdefaultvalues())
# values[0] = 5000000 # 5000km range
net.setmodel(BasicRangeModel, values)
for i in xrange(1, options.numnodes + 1):
node = session.add_object(cls=LxcNode, name="n%d" % i, objid=i)
address = "%s/%s" % (prefix.addr(i), prefix.prefixlen)
print "setting node address: %s - %s" % (node.objid, address)
node.newnetif(net, [address])
# set increasing Z coordinates
node.setposition(10, 10, 100)
n.append(node)
# example setting node n2 to a high altitude
# n[1].setposition(10, 10, 2000000) # 2000km
# session.sdt.updatenode(n[1].objid, 0, 10, 10, 2000000)
# launches terminal for the first node
# n[0].term("bash")
n[0].icmd(["ping", "-c", "5", "127.0.0.1"])
# wait for rate seconds to allow ebtables commands to commit
time.sleep(EbtablesQueue.rate)
raw_input("press enter to exit")
session.shutdown()
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage=usagestr)
parser.set_defaults(numnodes=2, enablesdt=False, sdturl="tcp://127.0.0.1:50000/")
parser.add_option(
"-n", "--numnodes", dest="numnodes", type=int,
help="number of nodes to test; default = %s" % parser.defaults["numnodes"]
)
parser.add_option("-s", "--sdt", dest="enablesdt", action="store_true", help="enable SDT output")
parser.add_option(
"-u", "--sdturl", dest="sdturl", type="string",
help="URL for SDT connection, default = %s" % parser.defaults["sdturl"]
)
def usage(msg=None, err=0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.numnodes < 2:
usage("invalid number of nodes: %s" % options.numnodes)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
test(options)
print >> sys.stderr, "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -25,7 +25,7 @@ from core.enumerations import LinkTlvs
from core.enumerations import LinkTypes
from core.enumerations import MessageFlags
from core.enumerations import MessageTypes
from core.misc import ipaddress, nodeutils, nodemaps
from core.misc import ipaddress
from core.netns import nodes
# declare classes for use with Broker
@ -105,7 +105,7 @@ def main():
start = datetime.datetime.now()
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
session = Session(1, persistent=True)
session = Session(1)
if "server" in globals():
server.addsession(session)
@ -125,14 +125,13 @@ def main():
# Set the local session id to match the port.
# Not necessary but seems neater.
# session.sessionid = session.broker.getserver("localhost")[2].getsockname()[1]
session.broker.setupserver(daemon)
# We do not want the recvloop running as we will deal ourselves
session.broker.dorecvloop = False
# Change to configuration state on both machines
session.set_state(EventTypes.CONFIGURATION_STATE.value)
session.set_state(EventTypes.CONFIGURATION_STATE)
tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value)
session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata))
@ -187,8 +186,4 @@ def main():
if __name__ == "__main__" or __name__ == "__builtin__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -16,7 +16,7 @@ import sys
from core import constants
from core.api import coreapi, dataconversion
from core.enumerations import CORE_API_PORT, EventTypes, EventTlvs, LinkTlvs, LinkTypes, MessageFlags
from core.misc import ipaddress, nodeutils, nodemaps
from core.misc import ipaddress
from core.netns import nodes
from core.session import Session
@ -55,7 +55,7 @@ def main():
start = datetime.datetime.now()
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
session = Session(1, persistent=True)
session = Session(1)
if 'server' in globals():
server.addsession(session)
@ -69,7 +69,7 @@ def main():
print "connecting to slave at %s:%d" % (slave, port)
session.broker.addserver(slave, slave, port)
session.broker.setupserver(slave)
session.set_state(EventTypes.CONFIGURATION_STATE.value)
session.set_state(EventTypes.CONFIGURATION_STATE)
tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value)
session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata))
@ -116,7 +116,7 @@ def main():
session.broker.handlerawmsg(msg)
# start a shell on node 1
n[1].term("bash")
n[1].client.term("bash")
print "elapsed time: %s" % (datetime.datetime.now() - start)
print "To stop this session, use the 'core-cleanup' script on this server"
@ -124,8 +124,4 @@ def main():
if __name__ == "__main__" or __name__ == "__builtin__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -1,107 +0,0 @@
#!/usr/bin/python -i
# Copyright (c)2010-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
# Example CORE Python script that attaches N nodes to an EMANE 802.11abg
# network. One of the parameters is changed, the pathloss mode.
import datetime
import optparse
import sys
from core import constants
from core.emane.ieee80211abg import EmaneIeee80211abgModel
from core.emane.nodes import EmaneNode
from core.misc import ipaddress, nodeutils, nodemaps
from core.netns import nodes
# node list (count from 1)
from core.session import Session
n = [None]
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage=usagestr)
parser.set_defaults(numnodes=5)
parser.add_option("-n", "--numnodes", dest="numnodes", type=int,
help="number of nodes")
def usage(msg=None, err=0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.numnodes < 1:
usage("invalid number of nodes: %s" % options.numnodes)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
# IP subnet
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
# session with some EMANE initialization
cfg = {'verbose': 'false'}
session = Session(1, config=cfg, persistent=True)
session.master = True
session.location.setrefgeo(47.57917, -122.13232, 2.00000)
session.location.refscale = 150.0
session.config['emane_models'] = "RfPipe, Ieee80211abg, Bypass"
session.emane.loadmodels()
if 'server' in globals():
server.addsession(session)
# EMANE WLAN
print "creating EMANE WLAN wlan1"
wlan = session.add_object(cls=EmaneNode, name="wlan1")
wlan.setposition(x=80, y=50)
names = EmaneIeee80211abgModel.getnames()
values = list(EmaneIeee80211abgModel.getdefaultvalues())
# TODO: change any of the EMANE 802.11 parameter values here
for i in range(0, len(names)):
print "EMANE 80211 \"%s\" = \"%s\"" % (names[i], values[i])
try:
values[names.index('pathlossmode')] = '2ray'
except ValueError:
values[names.index('propagationmodel')] = '2ray'
session.emane.setconfig(wlan.objid, EmaneIeee80211abgModel.name, values)
services_str = "zebra|OSPFv3MDR|IPForward"
print "creating %d nodes with addresses from %s" % \
(options.numnodes, prefix)
for i in xrange(1, options.numnodes + 1):
tmp = session.add_object(cls=nodes.CoreNode, name="n%d" % i,
objid=i)
tmp.newnetif(wlan, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
tmp.setposition(x=150 * i, y=150)
session.services.addservicestonode(tmp, "", services_str)
n.append(tmp)
# this starts EMANE, etc.
session.node_count = str(options.numnodes + 1)
session.instantiate()
# start a shell on node 1
n[1].term("bash")
print "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__" or __name__ == "__builtin__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -20,7 +20,7 @@ import sys
import time
from core import constants
from core.misc import ipaddress, nodeutils, nodemaps
from core.misc import ipaddress
from core.netns import nodes
from core.session import Session
@ -106,7 +106,7 @@ def main():
parser.print_help()
sys.exit(err)
(options, args) = parser.parse_args()
options, args = parser.parse_args()
for a in args:
sys.stderr.write("ignoring command line argument: %s\n" % a)
@ -117,8 +117,7 @@ def main():
print "Testing how many network namespace nodes this machine can create."
print " - %s" % linuxversion()
mem = memfree()
print " - %.02f GB total memory (%.02f GB swap)" % \
(mem["total"] / GBD, mem["stotal"] / GBD)
print " - %.02f GB total memory (%.02f GB swap)" % (mem["total"] / GBD, mem["stotal"] / GBD)
print " - using IPv4 network prefix %s" % prefix
print " - using wait time of %s" % options.waittime
print " - using %d nodes per bridge" % options.bridges
@ -135,7 +134,7 @@ def main():
lfp.write("# numnodes,%s\n" % ",".join(MEMKEYS))
lfp.flush()
session = Session(1, persistent=True)
session = Session(1)
switch = session.add_object(cls=nodes.SwitchNode)
switchlist.append(switch)
print "Added bridge %s (%d)." % (switch.brname, len(switchlist))
@ -146,15 +145,14 @@ def main():
i += 1
# optionally add a bridge (options.bridges nodes per bridge)
try:
if options.bridges > 0 and switch.numnetif() >= options.bridges:
if 0 < options.bridges <= switch.numnetif():
switch = session.add_object(cls=nodes.SwitchNode)
switchlist.append(switch)
print "\nAdded bridge %s (%d) for node %d." % \
(switch.brname, len(switchlist), i)
print "\nAdded bridge %s (%d) for node %d." % (switch.brname, len(switchlist), i)
except Exception, e:
print "At %d bridges (%d nodes) caught exception:\n%s\n" % \
(len(switchlist), i - 1, e)
print "At %d bridges (%d nodes) caught exception:\n%s\n" % (len(switchlist), i - 1, e)
break
# create a node
try:
n = session.add_object(cls=nodes.LxcNode, name="n%d" % i)
@ -206,8 +204,4 @@ def main():
if __name__ == "__main__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -1,107 +0,0 @@
#!/usr/bin/python
# Copyright (c)2013 the Boeing Company.
# See the LICENSE file included in this distribution.
# This script creates a CORE session, that will connect n nodes together
# in a chain, with static routes between nodes
# number of nodes / number of hops
# 2 0
# 3 1
# 4 2
# n n - 2
#
# Use core-cleanup to clean up after this script as the session is left running.
#
import datetime
import optparse
import sys
from core import constants
from core.misc import ipaddress, nodeutils, nodemaps
from core.netns import nodes
# node list (count from 1)
from core.session import Session
n = [None]
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage=usagestr)
parser.set_defaults(numnodes=5)
parser.add_option("-n", "--numnodes", dest="numnodes", type=int,
help="number of nodes")
def usage(msg=None, err=0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.numnodes < 1:
usage("invalid number of nodes: %s" % options.numnodes)
if options.numnodes >= 255:
usage("invalid number of nodes: %s" % options.numnodes)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
session = Session(1, persistent=True)
if 'server' in globals():
server.addsession(session)
print "creating %d nodes" % options.numnodes
left = None
prefix = None
for i in xrange(1, options.numnodes + 1):
tmp = session.add_object(cls=nodes.CoreNode, name="n%d" % i, objid=i)
if left:
tmp.newnetif(left, ["%s/%s" % (prefix.addr(2), prefix.prefixlen)])
# limit: i < 255
prefix = ipaddress.Ipv4Prefix("10.83.%d.0/24" % i)
right = session.add_object(cls=nodes.PtpNet)
tmp.newnetif(right, ["%s/%s" % (prefix.addr(1), prefix.prefixlen)])
tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.conf.all.forwarding=1"])
tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.conf.default.rp_filter=0"])
tmp.setposition(x=100 * i, y=150)
n.append(tmp)
left = right
prefixes = map(lambda (x): ipaddress.Ipv4Prefix("10.83.%d.0/24" % x),
xrange(1, options.numnodes + 1))
# set up static routing in the chain
for i in xrange(1, options.numnodes + 1):
for j in xrange(1, options.numnodes + 1):
if j < i - 1:
gw = prefixes[i - 2].addr(1)
elif j > i:
if i > len(prefixes) - 1:
continue
gw = prefixes[i - 1].addr(2)
else:
continue
net = prefixes[j - 1]
n[i].cmd([constants.IP_BIN, "route", "add", str(net), "via", str(gw)])
print "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__" or __name__ == "__builtin__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -1,290 +0,0 @@
#!/bin/sh -e
#
# iperf-performance.sh
#
# (c)2013 the Boeing Company
# authors: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
#
# Utility script to automate several iperf runs.
#
# number of iperf runs per test
NUMRUNS=10
# number of seconds per run (10s is iperf default)
RUNTIME=10
# logging
LOG=/tmp/${0}.log
STAMP=`date +%Y%m%d%H%M%S`
#
# client---(loopback)---server
#
loopbacktest () {
killall iperf 2> /dev/null || true
echo ">> loopback iperf test"
echo "loopback" > ${LOG}
# start an iperf server in the background
# -s = server
# -y c = CSV output
echo "starting local iperf server"
iperf -s -y c >> ${LOG} &
# run an iperf client NUMRUNS times
i=1
while [ $i -le $NUMRUNS ]; do
echo "run $i/$NUMRUNS:"
iperf -t ${RUNTIME} -c localhost
sleep 0.3
i=$((i+1))
done
sleep 1
echo "stopping local iperf server"
killall -v iperf
}
#
# lxc1( client )---veth-pair---lxc2( server )
#
lxcvethtest () {
SERVERIP=10.0.0.1
CLIENTIP=10.0.0.2
SERVER=/tmp/${0}-server
CLIENT=/tmp/${0}-client
echo ">> lxc veth iperf test"
echo "lxcveth" >> ${LOG}
echo "starting lxc iperf server"
vnoded -l $SERVER.log -p $SERVER.pid -c $SERVER
ip link add name veth0.1 type veth peer name veth0
ip link set veth0 netns `cat $SERVER.pid` up
vcmd -c $SERVER -- ip link set lo up
vcmd -c $SERVER -- ip addr add $SERVERIP/24 dev veth0
vcmd -c $SERVER -- iperf -s -y c >> ${LOG} &
echo "starting lxc iperf client"
vnoded -l $CLIENT.log -p $CLIENT.pid -c $CLIENT
ip link set veth0.1 netns `cat $CLIENT.pid` up
vcmd -c $CLIENT -- ip link set lo up
vcmd -c $CLIENT -- ip addr add $CLIENTIP/24 dev veth0.1
i=1
while [ $i -le $NUMRUNS ]; do
echo "run $i/$NUMRUNS:"
vcmd -c $CLIENT -- iperf -t ${RUNTIME} -c ${SERVERIP}
sleep 0.3
i=$((i+1))
done
sleep 1
echo "stopping lxc iperf server"
vcmd -c $SERVER -- killall -v iperf
echo "stopping containers"
kill -9 `cat $SERVER.pid`
kill -9 `cat $CLIENT.pid`
echo "cleaning up"
rm -f ${SERVER}*
rm -f ${CLIENT}*
}
#
# lxc1( client veth:):veth---bridge---veth:(:veth server )lxc2
#
lxcbrtest () {
SERVERIP=10.0.0.1
CLIENTIP=10.0.0.2
SERVER=/tmp/${0}-server
CLIENT=/tmp/${0}-client
BRIDGE="lxcbrtest"
echo ">> lxc bridge iperf test"
echo "lxcbr" >> ${LOG}
echo "building bridge"
brctl addbr $BRIDGE
brctl stp $BRIDGE off # disable spanning tree protocol
brctl setfd $BRIDGE 0 # disable forwarding delay
ip link set $BRIDGE up
echo "starting lxc iperf server"
vnoded -l $SERVER.log -p $SERVER.pid -c $SERVER
ip link add name veth0.1 type veth peer name veth0
ip link set veth0 netns `cat $SERVER.pid` up
vcmd -c $SERVER -- ip link set lo up
vcmd -c $SERVER -- ip addr add $SERVERIP/24 dev veth0
brctl addif $BRIDGE veth0.1
ip link set veth0.1 up
vcmd -c $SERVER -- iperf -s -y c >> ${LOG} &
echo "starting lxc iperf client"
vnoded -l $CLIENT.log -p $CLIENT.pid -c $CLIENT
ip link add name veth1.1 type veth peer name veth1
ip link set veth1 netns `cat $CLIENT.pid` up
vcmd -c $CLIENT -- ip link set lo up
vcmd -c $CLIENT -- ip addr add $CLIENTIP/24 dev veth1
brctl addif $BRIDGE veth1.1
ip link set veth1.1 up
i=1
while [ $i -le $NUMRUNS ]; do
echo "run $i/$NUMRUNS:"
vcmd -c $CLIENT -- iperf -t ${RUNTIME} -c ${SERVERIP}
sleep 0.3
i=$((i+1))
done
sleep 1
echo "stopping lxc iperf server"
vcmd -c $SERVER -- killall -v iperf
echo "stopping containers"
kill -9 `cat $SERVER.pid`
kill -9 `cat $CLIENT.pid`
echo "cleaning up"
ip link set $BRIDGE down
brctl delbr $BRIDGE
rm -f ${SERVER}*
rm -f ${CLIENT}*
}
#
# n1---n2---n3--- ... ---nN
# N nodes (N-2 hops) in chain with static routing
#
chaintest () {
NUMNODES=$1
SERVERIP=10.83.$NUMNODES.1
if [ -d /tmp/pycore.* ]; then
echo "/tmp/pycore.* already exists, skipping chaintest $NUMNODES"
return
fi
echo ">> n=$NUMNODES node chain iperf test"
echo "chain$NUMNODES" >> ${LOG}
echo "running external chain CORE script with '-n $NUMNODES'"
python iperf-performance-chain.py -n $NUMNODES
echo "starting lxc iperf server on node $NUMNODES"
vcmd -c /tmp/pycore.*/n$NUMNODES -- iperf -s -y c >> ${LOG} &
echo "starting lxc iperf client"
i=1
while [ $i -le $NUMRUNS ]; do
echo "run $i/$NUMRUNS:"
vcmd -c /tmp/pycore.*/n1 -- iperf -t ${RUNTIME} -c ${SERVERIP}
sleep 0.3
i=$((i+1))
done
sleep 1
echo "stopping lxc iperf server"
vcmd -c /tmp/pycore.*/n$NUMNODES -- killall -v iperf
echo "cleaning up"
core-cleanup
}
if [ "z$1" != "z" ]; then
echo "This script takes no parameters and must be run as root."
exit 1
fi
if [ `id -u` != 0 ]; then
echo "This script must be run as root."
exit 1
fi
#
# N lxc clients >---bridge---veth:(:veth server )
#
clientstest () {
NUMCLIENTS=$1
SERVERIP=10.0.0.1
SERVER=/tmp/${0}-server
BRIDGE="lxcbrtest"
echo ">> n=$NUMCLIENTS clients iperf test"
echo "clients$NUMCLIENTS" >> ${LOG}
echo "building bridge"
brctl addbr $BRIDGE
brctl stp $BRIDGE off # disable spanning tree protocol
brctl setfd $BRIDGE 0 # disable forwarding delay
ip link set $BRIDGE up
echo "starting lxc iperf server"
vnoded -l $SERVER.log -p $SERVER.pid -c $SERVER
ip link add name veth0.1 type veth peer name veth0
ip link set veth0 netns `cat $SERVER.pid` up
vcmd -c $SERVER -- ip link set lo up
vcmd -c $SERVER -- ip addr add $SERVERIP/24 dev veth0
brctl addif $BRIDGE veth0.1
ip link set veth0.1 up
vcmd -c $SERVER -- iperf -s -y c >> ${LOG} &
i=1
CLIENTS=""
while [ $i -le $NUMCLIENTS ]; do
echo "starting lxc iperf client $i/$NUMCLIENTS"
CLIENT=/tmp/${0}-client$i
CLIENTIP=10.0.0.1$i
vnoded -l $CLIENT.log -p $CLIENT.pid -c $CLIENT
ip link add name veth1.$i type veth peer name veth1
ip link set veth1 netns `cat $CLIENT.pid` up
vcmd -c $CLIENT -- ip link set lo up
vcmd -c $CLIENT -- ip addr add $CLIENTIP/24 dev veth1
brctl addif $BRIDGE veth1.$i
ip link set veth1.$i up
i=$((i+1))
CLIENTS="$CLIENTS $CLIENT"
done
j=1
while [ $j -le $NUMRUNS ]; do
echo "run $j/$NUMRUNS iperf:"
for CLIENT in $CLIENTS; do
vcmd -c $CLIENT -- iperf -t ${RUNTIME} -c ${SERVERIP} &
done
sleep ${RUNTIME} 1
j=$((j+1))
done
sleep 1
echo "stopping lxc iperf server"
vcmd -c $SERVER -- killall -v iperf
echo "stopping containers"
kill -9 `cat $SERVER.pid`
for CLIENT in $CLIENTS; do
kill -9 `cat $CLIENT.pid`
done
# time needed for processes/containers to shut down
sleep 2
echo "cleaning up"
ip link set $BRIDGE down
brctl delbr $BRIDGE
rm -f ${SERVER}*
rm -f /tmp/${0}-client*
# time needed for bridge clean-up
sleep 1
}
#
# run all tests
#
loopbacktest
lxcvethtest
lxcbrtest
chaintest 5
chaintest 10
clientstest 5
clientstest 10
clientstest 15
mv ${LOG} ${PWD}/${0}-${STAMP}.log
echo "===> results in ${PWD}/${0}-${STAMP}.log"

View file

@ -16,8 +16,9 @@ import time
from string import Template
from core.constants import QUAGGA_STATE_DIR
from core.misc import ipaddress, nodeutils, nodemaps
from core.misc.utils import mutecall
from core.misc import ipaddress
from core.misc.utils import check_cmd
from core.netns import nodes
# this is the /etc/core/core.conf default
@ -32,8 +33,7 @@ try:
if os.path.exists(os.path.join(p, "zebra")):
quagga_path = p
break
mutecall([os.path.join(quagga_path, "zebra"),
"-u", "root", "-g", "root", "-v"])
check_cmd([os.path.join(quagga_path, "zebra"), "-u", "root", "-g", "root", "-v"])
except OSError:
sys.stderr.write("ERROR: running zebra failed\n")
sys.exit(1)
@ -385,8 +385,7 @@ class Cmd:
def open(self):
""" Exceute call to node.popen(). """
self.id, self.stdin, self.out, self.err = \
self.node.popen(self.args)
self.id, self.stdin, self.out, self.err = self.node.client.popen(self.args)
def parse(self):
""" This method is overloaded by child classes and should return some
@ -409,7 +408,7 @@ class VtyshCmd(Cmd):
def open(self):
args = ("vtysh", "-c", self.args)
self.id, self.stdin, self.out, self.err = self.node.popen(args)
self.id, self.stdin, self.out, self.err = self.node.client.popen(args)
class Ospf6NeighState(VtyshCmd):
@ -602,8 +601,4 @@ def main():
if __name__ == "__main__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
me = main()

View file

@ -1,80 +0,0 @@
#!/usr/bin/python -i
# Copyright (c)2010-2013 the Boeing Company.
# See the LICENSE file included in this distribution.
# connect n nodes to a virtual switch/hub
import datetime
import optparse
import sys
from core import constants
from core.misc import ipaddress, nodeutils, nodemaps
from core.netns import nodes
# node list (count from 1)
from core.session import Session
n = [None]
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage=usagestr)
parser.set_defaults(numnodes=5)
parser.add_option("-n", "--numnodes", dest="numnodes", type=int,
help="number of nodes")
def usage(msg=None, err=0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.numnodes < 1:
usage("invalid number of nodes: %s" % options.numnodes)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
# IP subnet
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
session = Session(1, persistent=True)
if 'server' in globals():
server.addsession(session)
# emulated Ethernet switch
switch = session.add_object(cls=nodes.SwitchNode, name="switch")
switch.setposition(x=80, y=50)
print "creating %d nodes with addresses from %s" % (options.numnodes, prefix)
for i in xrange(1, options.numnodes + 1):
tmp = session.add_object(cls=nodes.CoreNode, name="n%d" % i, objid=i)
tmp.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
tmp.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
tmp.setposition(x=150 * i, y=150)
n.append(tmp)
session.node_count = str(options.numnodes + 1)
session.instantiate()
print "elapsed time: %s" % (datetime.datetime.now() - start)
# start a shell on node 1
n[1].term("bash")
raw_input("press enter to exit")
session.shutdown()
if __name__ == "__main__" or __name__ == "__builtin__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -1,108 +0,0 @@
#!/usr/bin/python
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual hub/switch; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
import datetime
import optparse
import sys
from core.misc import ipaddress, nodeutils, nodemaps
from core.misc.utils import mutecall
from core.netns import nodes
from core.session import Session
try:
mutecall(["iperf", "-v"])
except OSError:
sys.stderr.write("ERROR: running iperf failed\n")
sys.exit(1)
def test(numnodes, testsec):
# node list
n = []
# IP subnet
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
session = Session(1)
# emulated network
net = session.add_object(cls=nodes.SwitchNode)
for i in xrange(1, numnodes + 1):
tmp = session.add_object(cls=nodes.LxcNode, name="n%d" % i)
tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
n.append(tmp)
n[0].cmd(["iperf", "-s", "-D"])
n[-1].icmd(["iperf", "-t", str(int(testsec)), "-c", str(prefix.addr(1))])
n[0].cmd(["killall", "-9", "iperf"])
raw_input("press enter to exit")
session.shutdown()
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage=usagestr)
parser.set_defaults(minnodes=2)
parser.add_option("-m", "--minnodes", dest="minnodes", type=int,
help="min number of nodes to test; default = %s" %
parser.defaults["minnodes"])
parser.set_defaults(maxnodes=2)
parser.add_option("-n", "--maxnodes", dest="maxnodes", type=int,
help="max number of nodes to test; default = %s" %
parser.defaults["maxnodes"])
parser.set_defaults(testsec=10)
parser.add_option("-t", "--testsec", dest="testsec", type=int,
help="test time in seconds; default = %s" %
parser.defaults["testsec"])
parser.set_defaults(nodestep=1)
parser.add_option("-s", "--nodestep", dest="nodestep", type=int,
help="number of nodes step size; default = %s" %
parser.defaults["nodestep"])
def usage(msg=None, err=0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.minnodes < 2:
usage("invalid min number of nodes: %s" % options.minnodes)
if options.maxnodes < options.minnodes:
usage("invalid max number of nodes: %s" % options.maxnodes)
if options.testsec < 1:
usage("invalid test time: %s" % options.testsec)
if options.nodestep < 1:
usage("invalid node step: %s" % options.nodestep)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
for i in xrange(options.minnodes, options.maxnodes + 1, options.nodestep):
print >> sys.stderr, "%s node test:" % i
test(i, options.testsec)
print >> sys.stderr, ""
print >> sys.stderr, "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -1,33 +0,0 @@
#!/bin/sh -e
# Below is a transcript of creating two emulated nodes and connecting them
# together with a wired link. You can run the core-cleanup script to clean
# up after this script.
# create node 1 namespace container
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
# create a virtual Ethernet (veth) pair, installing one end into node 1
ip link add name n1.0.1 type veth peer name n1.0
ip link set n1.0 netns `cat /tmp/n1.pid`
vcmd -c /tmp/n1.ctl -- /bin/sh -e -c \
"ip link set lo up && ip link set n1.0 name eth0 up && ip addr add 10.0.0.1/24 dev eth0"
# create node 2 namespace container
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
# create a virtual Ethernet (veth) pair, installing one end into node 2
ip link add name n2.0.1 type veth peer name n2.0
ip link set n2.0 netns `cat /tmp/n2.pid`
vcmd -c /tmp/n2.ctl -- /bin/sh -e -c \
"ip link set lo up && ip link set n2.0 name eth0 up && ip addr add 10.0.0.2/24 dev eth0"
# bridge together nodes 1 and 2 using the other end of each veth pair
brctl addbr b.1.1
brctl setfd b.1.1 0
brctl addif b.1.1 n1.0.1
brctl addif b.1.1 n2.0.1
ip link set n1.0.1 up
ip link set n2.0.1 up
ip link set b.1.1 up
# display connectivity and ping from node 1 to node 2
brctl show
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2

View file

@ -41,7 +41,7 @@ from core import emane
from core.emane.bypass import EmaneBypassModel
from core.emane.nodes import EmaneNode
from core.emane.rfpipe import EmaneRfPipeModel
from core.misc import ipaddress, nodemaps, nodeutils
from core.misc import ipaddress
from core.netns import nodes
from core.session import Session
@ -131,7 +131,7 @@ class Cmd(object):
def open(self):
""" Exceute call to node.popen(). """
self.id, self.stdin, self.out, self.err = self.node.popen(self.args)
self.id, self.stdin, self.out, self.err = self.node.client.popen(self.args)
def parse(self):
""" This method is overloaded by child classes and should return some
@ -166,7 +166,7 @@ class ClientServerCmd(Cmd):
self.client_open() # client
status = self.client_id.wait()
# stop the server
self.node.cmdresult(["killall", self.args[0]])
self.node.cmd_output(["killall", self.args[0]])
r = self.parse()
self.cleanup()
return r
@ -174,7 +174,7 @@ class ClientServerCmd(Cmd):
def client_open(self):
""" Exceute call to client_node.popen(). """
self.client_id, self.client_stdin, self.client_out, self.client_err = \
self.client_node.popen(self.client_args)
self.client_node.client.popen(self.client_args)
def parse(self):
""" This method is overloaded by child classes and should return some
@ -207,7 +207,7 @@ class PingCmd(Cmd):
def run(self):
if self.verbose:
self.info("%s initial test ping (max 1 second)..." % self.node.name)
(status, result) = self.node.cmdresult(["ping", "-q", "-c", "1", "-w", "1", self.addr])
(status, result) = self.node.cmd_output(["ping", "-q", "-c", "1", "-w", "1", self.addr])
if status != 0:
self.warn("initial ping from %s to %s failed! result:\n%s" %
(self.node.name, self.addr, result))
@ -226,7 +226,7 @@ class PingCmd(Cmd):
stats = stats_str.split("/")
avg_latency = float(stats[1])
mdev = float(stats[3].split(" ")[0])
except Exception, e:
except:
self.warn("ping parsing exception: %s" % e)
return avg_latency, mdev
@ -487,13 +487,13 @@ class Experiment(object):
if i > 1:
neigh_left = "%s" % prefix.addr(i - 1)
cmd = routecmd + [neigh_left, "dev", node.netif(0).name]
(status, result) = node.cmdresult(cmd)
(status, result) = node.cmd_output(cmd)
if status != 0:
self.warn("failed to add interface route: %s" % cmd)
if i < numnodes:
neigh_right = "%s" % prefix.addr(i + 1)
cmd = routecmd + [neigh_right, "dev", node.netif(0).name]
(status, result) = node.cmdresult(cmd)
(status, result) = node.cmd_output(cmd)
if status != 0:
self.warn("failed to add interface route: %s" % cmd)
@ -507,7 +507,7 @@ class Experiment(object):
else:
gw = neigh_right
cmd = routecmd + [addr, "via", gw]
(status, result) = node.cmdresult(cmd)
(status, result) = node.cmd_output(cmd)
if status != 0:
self.warn("failed to add route: %s" % cmd)
@ -635,8 +635,8 @@ class Experiment(object):
if self.verbose:
self.info("%s initial test ping (max 1 second)..." % \
self.firstnode.name)
(status, result) = self.firstnode.cmdresult(["ping", "-q", "-c", "1",
"-w", "1", self.lastaddr])
(status, result) = self.firstnode.cmd_output(["ping", "-q", "-c", "1",
"-w", "1", self.lastaddr])
if status != 0:
self.warn("initial ping from %s to %s failed! result:\n%s" % \
(self.firstnode.name, self.lastaddr, result))
@ -706,11 +706,6 @@ def main():
exp = Experiment(opt=opt, start=starttime)
exp.info("Starting wlanemanetests.py tests %s" % starttime.ctime())
# system sanity checks here
emanever, emaneverstr = emane.VERSION, emane.VERSIONSTR
if opt.verbose:
exp.info("Detected EMANE version %s" % (emaneverstr,))
# bridged
exp.info("setting up bridged tests 1/2 no link effects")
exp.info("creating topology: numnodes = %s" % (opt.numnodes,))
@ -851,8 +846,4 @@ def main():
if __name__ == "__main__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

View file

@ -1,106 +0,0 @@
#!/usr/bin/python
# Copyright (c)2010-2012 the Boeing Company.
# See the LICENSE file included in this distribution.
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
import datetime
import optparse
import sys
from core.misc import ipaddress, nodeutils, nodemaps
from core.misc.utils import mutecall
from core.netns import nodes
from core.session import Session
try:
mutecall(["iperf", "-v"])
except OSError:
sys.stderr.write("ERROR: running iperf failed\n")
sys.exit(1)
def test(numnodes, testsec):
# node list
n = []
# IP subnet
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
session = Session(1)
# emulated network
net = session.add_object(cls=nodes.WlanNode)
for i in xrange(1, numnodes + 1):
tmp = session.add_object(cls=nodes.LxcNode, objid="%d" % i, name="n%d" % i)
tmp.newnetif(net, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
n.append(tmp)
net.link(n[0].netif(0), n[-1].netif(0))
n[0].cmd(["iperf", "-s", "-D"])
n[-1].icmd(["iperf", "-t", str(int(testsec)), "-c", str(prefix.addr(1))])
n[0].cmd(["killall", "-9", "iperf"])
session.shutdown()
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage=usagestr)
parser.set_defaults(minnodes=2)
parser.add_option("-m", "--minnodes", dest="minnodes", type=int,
help="min number of nodes to test; default = %s" % parser.defaults["minnodes"])
parser.set_defaults(maxnodes=2)
parser.add_option("-n", "--maxnodes", dest="maxnodes", type=int,
help="max number of nodes to test; default = %s" %
parser.defaults["maxnodes"])
parser.set_defaults(testsec=10)
parser.add_option("-t", "--testsec", dest="testsec", type=int,
help="test time in seconds; default = %s" %
parser.defaults["testsec"])
parser.set_defaults(nodestep=1)
parser.add_option("-s", "--nodestep", dest="nodestep", type=int,
help="number of nodes step size; default = %s" %
parser.defaults["nodestep"])
def usage(msg=None, err=0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.minnodes < 2:
usage("invalid min number of nodes: %s" % options.minnodes)
if options.maxnodes < options.minnodes:
usage("invalid max number of nodes: %s" % options.maxnodes)
if options.testsec < 1:
usage("invalid test time: %s" % options.testsec)
if options.nodestep < 1:
usage("invalid node step: %s" % options.nodestep)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
for i in xrange(options.minnodes, options.maxnodes + 1, options.nodestep):
print >> sys.stderr, "%s node test:" % i
test(i, options.testsec)
print >> sys.stderr, ""
print >> sys.stderr, "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__":
# configure nodes to use
node_map = nodemaps.NODES
nodeutils.set_node_map(node_map)
main()

Some files were not shown because too many files have changed in this diff Show more