From 174bae6de1136ef4bdb272a27f7e694d13600dc0 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Thu, 6 Jun 2019 14:59:35 -0400 Subject: [PATCH 01/51] Update usage.md Added icons for the toolbar buttons. --- docs/usage.md | 54 +++++++++++++++++++++++++-------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/usage.md b/docs/usage.md index fa029e2c..744c33a2 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -69,51 +69,51 @@ The toolbar is a row of buttons that runs vertically along the left side of the When CORE is in Edit mode (the default), the vertical Editing Toolbar exists on the left side of the CORE window. Below are brief descriptions for each toolbar item, starting from the top. Most of the tools are grouped into related sub-menus, which appear when you click on their group icon. -* |select| *Selection Tool* - default tool for selecting, moving, configuring nodes -* |start| *Start button* - starts Execute mode, instantiates the emulation -* |link| *Link* - the Link Tool allows network links to be drawn between two nodes by clicking and dragging the mouse -* |router| *Network-layer virtual nodes* - * |router| *Router* - runs Quagga OSPFv2 and OSPFv3 routing to forward packets - * |host| *Host* - emulated server machine having a default route, runs SSH server - * |pc| *PC* - basic emulated machine having a default route, runs no processes by default - * |mdr| *MDR* - runs Quagga OSPFv3 MDR routing for MANET-optimized routing - * |router_green| *PRouter* - physical router represents a real testbed machine - * |document_properties| *Edit* - edit node types button invokes the CORE Node Types dialog. New types of nodes may be created having different icons and names. The default services that are started with each node type can be changed here. -* |hub| *Link-layer nodes* - * |hub| *Hub* - the Ethernet hub forwards incoming packets to every connected node - * |lanswitch| *Switch* - the Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table - * |wlan| *Wireless LAN* - when routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them - * |rj45| *RJ45* - with the RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation - * |tunnel| *Tunnel* - the Tunnel Tool allows connecting together more than one CORE emulation using GRE tunnels +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/select.gif) *Selection Tool* - default tool for selecting, moving, configuring nodes +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/start.gif) *Start button* - starts Execute mode, instantiates the emulation +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/link.gif) *Link* - the Link Tool allows network links to be drawn between two nodes by clicking and dragging the mouse +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/router.gif) *Network-layer virtual nodes* + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/router.gif) *Router* - runs Quagga OSPFv2 and OSPFv3 routing to forward packets + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/host.gif) *Host* - emulated server machine having a default route, runs SSH server + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/pc.gif) *PC* - basic emulated machine having a default route, runs no processes by default + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/mdr.gif) *MDR* - runs Quagga OSPFv3 MDR routing for MANET-optimized routing + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/router_green.gif) *PRouter* - physical router represents a real testbed machine + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/document-properties.gif) *Edit* - edit node types button invokes the CORE Node Types dialog. New types of nodes may be created having different icons and names. The default services that are started with each node type can be changed here. +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/hub.gif) *Link-layer nodes* + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/hub.gif) *Hub* - the Ethernet hub forwards incoming packets to every connected node + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/lanswitch.gif) *Switch* - the Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/wlan.gif) *Wireless LAN* - when routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/rj45.gif) *RJ45* - with the RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/tunnel.gif) *Tunnel* - the Tunnel Tool allows connecting together more than one CORE emulation using GRE tunnels * *Annotation Tools* - * |marker| *Marker* - for drawing marks on the canvas - * |oval| *Oval* - for drawing circles on the canvas that appear in the background - * |rectangle| *Rectangle* - for drawing rectangles on the canvas that appear in the background - * |text| *Text* - for placing text captions on the canvas + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/marker.gif) *Marker* - for drawing marks on the canvas + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/oval.gif) *Oval* - for drawing circles on the canvas that appear in the background + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/rectangle.gif) *Rectangle* - for drawing rectangles on the canvas that appear in the background + * ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/text.gif) *Text* - for placing text captions on the canvas ### Execution Toolbar When the Start button is pressed, CORE switches to Execute mode, and the Edit toolbar on the left of the CORE window is replaced with the Execution toolbar Below are the items on this toolbar, starting from the top. -* |select| *Selection Tool* - in Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node -* |stop| *Stop button* - stops Execute mode, terminates the emulation, returns CORE to edit mode. -* |observe| *Observer Widgets Tool* - clicking on this magnifying glass icon +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/select.gif) *Selection Tool* - in Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/stop.gif) *Stop button* - stops Execute mode, terminates the emulation, returns CORE to edit mode. +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/observe.gif) *Observer Widgets Tool* - clicking on this magnifying glass icon invokes a menu for easily selecting an Observer Widget. The icon has a darker gray background when an Observer Widget is active, during which time moving the mouse over a node will pop up an information display for that node. -* |plot| *Plot Tool* - with this tool enabled, clicking on any link will +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/plot.gif) *Plot Tool* - with this tool enabled, clicking on any link will activate the Throughput Widget and draw a small, scrolling throughput plot on the canvas. The plot shows the real-time kbps traffic for that link. The plots may be dragged around the canvas; right-click on a plot to remove it. -* |marker| *Marker* - for drawing freehand lines on the canvas, useful during +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/marker.gif) *Marker* - for drawing freehand lines on the canvas, useful during demonstrations; markings are not saved -* |twonode| *Two-node Tool* - click to choose a starting and ending node, and +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/twonode.gif) *Two-node Tool* - click to choose a starting and ending node, and run a one-time *traceroute* between those nodes or a continuous *ping -R* between nodes. The output is displayed in real time in a results box, while the IP addresses are parsed and the complete network path is highlighted on the CORE display. -* |run| *Run Tool* - this tool allows easily running a command on all or a +* ![alt text](https://github.com/coreemu/core/blob/master/gui/icons/tiny/run.gif) *Run Tool* - this tool allows easily running a command on all or a subset of all nodes. A list box allows selecting any of the nodes. A text entry box allows entering any command. The command should return immediately, otherwise the display will block awaiting response. The *ping* command, for From 4535cef89ef4268e80184dbeff81902db9d55f56 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 10 Jun 2019 07:51:00 -0500 Subject: [PATCH 02/51] Updated usage.md Deleted services division --- docs/usage.md | 149 -------------------------------------------------- 1 file changed, 149 deletions(-) diff --git a/docs/usage.md b/docs/usage.md index 744c33a2..09f549b3 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -834,155 +834,6 @@ to the Linux bridging and ebtables rules that are used. The basic range wireless model does not support distributed emulation, but EMANE does. -## Services - -CORE uses the concept of services to specify what processes or scripts run on a -node when it is started. Layer-3 nodes such as routers and PCs are defined by -the services that they run. - -Services may be customized for each node, or new custom services can be -created. New node types can be created each having a different name, icon, and -set of default services. Each service defines the per-node directories, -configuration files, startup index, starting commands, validation commands, -shutdown commands, and meta-data associated with a node. - -**NOTE:** - Network namespace nodes do not undergo the normal Linux boot process - using the **init**, **upstart**, or **systemd** frameworks. These - lightweight nodes use configured CORE *services*. - -### Default Services and Node Types - -Here are the default node types and their services: - -* *router* - zebra, OSFPv2, OSPFv3, and IPForward services for IGP - link-state routing. -* *host* - DefaultRoute and SSH services, representing an SSH server having a - default route when connected directly to a router. -* *PC* - DefaultRoute service for having a default route when connected - directly to a router. -* *mdr* - zebra, OSPFv3MDR, and IPForward services for - wireless-optimized MANET Designated Router routing. -* *prouter* - a physical router, having the same default services as the - *router* node type; for incorporating Linux testbed machines into an - emulation. - -Configuration files can be automatically generated by each service. For -example, CORE automatically generates routing protocol configuration for the -router nodes in order to simplify the creation of virtual networks. - -To change the services associated with a node, double-click on the node to -invoke its configuration dialog and click on the *Services...* button, -or right-click a node a choose *Services...* from the menu. -Services are enabled or disabled by clicking on their names. The button next to -each service name allows you to customize all aspects of this service for this -node. For example, special route redistribution commands could be inserted in -to the Quagga routing configuration associated with the zebra service. - -To change the default services associated with a node type, use the Node Types -dialog available from the *Edit* button at the end of the Layer-3 nodes -toolbar, or choose *Node types...* from the *Session* menu. Note that -any new services selected are not applied to existing nodes if the nodes have -been customized. - -The node types are saved in a **~/.core/nodes.conf** file, not with the -**.imn** file. Keep this in mind when changing the default services for -existing node types; it may be better to simply create a new node type. It is -recommended that you do not change the default built-in node types. The -**nodes.conf** file can be copied between CORE machines to save your custom -types. - -### Customizing a Service - -A service can be fully customized for a particular node. From the node's -configuration dialog, click on the button next to the service name to invoke -the service customization dialog for that service. -The dialog has three tabs for configuring the different aspects of the service: -files, directories, and startup/shutdown. - -**NOTE:** - A **yellow** customize icon next to a service indicates that service - requires customization (e.g. the *Firewall* service). - A **green** customize icon indicates that a custom configuration exists. - Click the *Defaults* button when customizing a service to remove any - customizations. - -The Files tab is used to display or edit the configuration files or scripts that -are used for this service. Files can be selected from a drop-down list, and -their contents are displayed in a text entry below. The file contents are -generated by the CORE daemon based on the network topology that exists at -the time the customization dialog is invoked. - -The Directories tab shows the per-node directories for this service. For the -default types, CORE nodes share the same filesystem tree, except for these -per-node directories that are defined by the services. For example, the -**/var/run/quagga** directory needs to be unique for each node running -the Zebra service, because Quagga running on each node needs to write separate -PID files to that directory. - -**NOTE:** - The **/var/log** and **/var/run** directories are - mounted uniquely per-node by default. - Per-node mount targets can be found in **/tmp/pycore.nnnnn/nN.conf/** - (where *nnnnn* is the session number and *N* is the node number.) - -The Startup/shutdown tab lists commands that are used to start and stop this -service. The startup index allows configuring when this service starts relative -to the other services enabled for this node; a service with a lower startup -index value is started before those with higher values. Because shell scripts -generated by the Files tab will not have execute permissions set, the startup -commands should include the shell name, with -something like ```sh script.sh```. - -Shutdown commands optionally terminate the process(es) associated with this -service. Generally they send a kill signal to the running process using the -*kill* or *killall* commands. If the service does not terminate -the running processes using a shutdown command, the processes will be killed -when the *vnoded* daemon is terminated (with *kill -9*) and -the namespace destroyed. It is a good practice to -specify shutdown commands, which will allow for proper process termination, and -for run-time control of stopping and restarting services. - -Validate commands are executed following the startup commands. A validate -command can execute a process or script that should return zero if the service -has started successfully, and have a non-zero return value for services that -have had a problem starting. For example, the *pidof* command will check -if a process is running and return zero when found. When a validate command -produces a non-zero return value, an exception is generated, which will cause -an error to be displayed in the Check Emulation Light. - -**TIP:** - To start, stop, and restart services during run-time, right-click a - node and use the *Services...* menu. - -### Creating new Services - -Services can save time required to configure nodes, especially if a number -of nodes require similar configuration procedures. New services can be -introduced to automate tasks. - -The easiest way to capture the configuration of a new process into a service -is by using the **UserDefined** service. This is a blank service where any -aspect may be customized. The UserDefined service is convenient for testing -ideas for a service before adding a new service type. - -To introduce new service types, a **myservices/** directory exists in the -user's CORE configuration directory, at **~/.core/myservices/**. A detailed -**README.txt** file exists in that directory to outline the steps necessary -for adding a new service. First, you need to create a small Python file that -defines the service; then the **custom_services_dir** entry must be set -in the **/etc/core/core.conf** configuration file. A sample is provided in -the **myservices/** directory. - -**NOTE:** - The directory name used in **custom_services_dir** should be unique and - should not correspond to - any existing Python module name. For example, don't use the name **subprocess** - or **services**. - -If you have created a new service type that may be useful to others, please -consider contributing it to the CORE project. - ## Check Emulation Light The |cel| Check Emulation Light, or CEL, is located in the bottom right-hand corner From 4c5686fdea33cd2618dd318957b72a44d21a380e Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 10 Jun 2019 08:19:28 -0500 Subject: [PATCH 03/51] Updated services.md Added the information regarding services removed from the usage.md document --- docs/services.md | 151 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 147 insertions(+), 4 deletions(-) diff --git a/docs/services.md b/docs/services.md index 793e6f99..1e67a543 100644 --- a/docs/services.md +++ b/docs/services.md @@ -3,11 +3,154 @@ * Table of Contents {:toc} -## Custom Services +## Services -CORE supports custom developed services by way of dynamically loading user created python files. -Custom services should be placed within the path defined by **custom_services_dir** in the CORE -configuration file. This path cannot end in **/services**. +CORE uses the concept of services to specify what processes or scripts run on a +node when it is started. Layer-3 nodes such as routers and PCs are defined by +the services that they run. + +Services may be customized for each node, or new custom services can be +created. New node types can be created each having a different name, icon, and +set of default services. Each service defines the per-node directories, +configuration files, startup index, starting commands, validation commands, +shutdown commands, and meta-data associated with a node. + +**NOTE:** + Network namespace nodes do not undergo the normal Linux boot process + using the **init**, **upstart**, or **systemd** frameworks. These + lightweight nodes use configured CORE *services*. + +### Default Services and Node Types + +Here are the default node types and their services: + +* *router* - zebra, OSFPv2, OSPFv3, and IPForward services for IGP + link-state routing. +* *host* - DefaultRoute and SSH services, representing an SSH server having a + default route when connected directly to a router. +* *PC* - DefaultRoute service for having a default route when connected + directly to a router. +* *mdr* - zebra, OSPFv3MDR, and IPForward services for + wireless-optimized MANET Designated Router routing. +* *prouter* - a physical router, having the same default services as the + *router* node type; for incorporating Linux testbed machines into an + emulation. + +Configuration files can be automatically generated by each service. For +example, CORE automatically generates routing protocol configuration for the +router nodes in order to simplify the creation of virtual networks. + +To change the services associated with a node, double-click on the node to +invoke its configuration dialog and click on the *Services...* button, +or right-click a node a choose *Services...* from the menu. +Services are enabled or disabled by clicking on their names. The button next to +each service name allows you to customize all aspects of this service for this +node. For example, special route redistribution commands could be inserted in +to the Quagga routing configuration associated with the zebra service. + +To change the default services associated with a node type, use the Node Types +dialog available from the *Edit* button at the end of the Layer-3 nodes +toolbar, or choose *Node types...* from the *Session* menu. Note that +any new services selected are not applied to existing nodes if the nodes have +been customized. + +The node types are saved in a **~/.core/nodes.conf** file, not with the +**.imn** file. Keep this in mind when changing the default services for +existing node types; it may be better to simply create a new node type. It is +recommended that you do not change the default built-in node types. The +**nodes.conf** file can be copied between CORE machines to save your custom +types. + +### Customizing a Service + +A service can be fully customized for a particular node. From the node's +configuration dialog, click on the button next to the service name to invoke +the service customization dialog for that service. +The dialog has three tabs for configuring the different aspects of the service: +files, directories, and startup/shutdown. + +**NOTE:** + A **yellow** customize icon next to a service indicates that service + requires customization (e.g. the *Firewall* service). + A **green** customize icon indicates that a custom configuration exists. + Click the *Defaults* button when customizing a service to remove any + customizations. + +The Files tab is used to display or edit the configuration files or scripts that +are used for this service. Files can be selected from a drop-down list, and +their contents are displayed in a text entry below. The file contents are +generated by the CORE daemon based on the network topology that exists at +the time the customization dialog is invoked. + +The Directories tab shows the per-node directories for this service. For the +default types, CORE nodes share the same filesystem tree, except for these +per-node directories that are defined by the services. For example, the +**/var/run/quagga** directory needs to be unique for each node running +the Zebra service, because Quagga running on each node needs to write separate +PID files to that directory. + +**NOTE:** + The **/var/log** and **/var/run** directories are + mounted uniquely per-node by default. + Per-node mount targets can be found in **/tmp/pycore.nnnnn/nN.conf/** + (where *nnnnn* is the session number and *N* is the node number.) + +The Startup/shutdown tab lists commands that are used to start and stop this +service. The startup index allows configuring when this service starts relative +to the other services enabled for this node; a service with a lower startup +index value is started before those with higher values. Because shell scripts +generated by the Files tab will not have execute permissions set, the startup +commands should include the shell name, with +something like ```sh script.sh```. + +Shutdown commands optionally terminate the process(es) associated with this +service. Generally they send a kill signal to the running process using the +*kill* or *killall* commands. If the service does not terminate +the running processes using a shutdown command, the processes will be killed +when the *vnoded* daemon is terminated (with *kill -9*) and +the namespace destroyed. It is a good practice to +specify shutdown commands, which will allow for proper process termination, and +for run-time control of stopping and restarting services. + +Validate commands are executed following the startup commands. A validate +command can execute a process or script that should return zero if the service +has started successfully, and have a non-zero return value for services that +have had a problem starting. For example, the *pidof* command will check +if a process is running and return zero when found. When a validate command +produces a non-zero return value, an exception is generated, which will cause +an error to be displayed in the Check Emulation Light. + +**TIP:** + To start, stop, and restart services during run-time, right-click a + node and use the *Services...* menu. + +### Creating new Services + +Services can save time required to configure nodes, especially if a number +of nodes require similar configuration procedures. New services can be +introduced to automate tasks. + +The easiest way to capture the configuration of a new process into a service +is by using the **UserDefined** service. This is a blank service where any +aspect may be customized. The UserDefined service is convenient for testing +ideas for a service before adding a new service type. + +To introduce new service types, a **myservices/** directory exists in the +user's CORE configuration directory, at **~/.core/myservices/**. A detailed +**README.txt** file exists in that directory to outline the steps necessary +for adding a new service. First, you need to create a small Python file that +defines the service; then the **custom_services_dir** entry must be set +in the **/etc/core/core.conf** configuration file. A sample is provided in +the **myservices/** directory. + +**NOTE:** + The directory name used in **custom_services_dir** should be unique and + should not correspond to + any existing Python module name. For example, don't use the name **subprocess** + or **services**. + +If you have created a new service type that may be useful to others, please +consider contributing it to the CORE project. Here is an example service with documentation describing functionality: [Example Service](exampleservice.html) From 4485d9b3c5a08b4dd2959a30a3d7c33a433d3459 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 10 Jun 2019 14:41:30 -0500 Subject: [PATCH 04/51] Formated install from source instructions on install.md --- docs/install.md | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/docs/install.md b/docs/install.md index fb161f78..40208d21 100644 --- a/docs/install.md +++ b/docs/install.md @@ -170,27 +170,35 @@ This option is listed here for developers and advanced users who are comfortable To build CORE from source on Ubuntu, first install these development packages. These packages are not required for normal binary package installs. -#### Ubuntu 18.04 pre-reqs +You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu/core) page. Choose either a stable release version or the development snapshot available in the *nightly_snapshots* directory. + +#### Install Requirements + +##### Ubuntu 18.04 Requirements ```shell sudo apt install automake pkg-config gcc libev-dev bridge-utils ebtables python-dev python-sphinx python-setuptools python-lxml python-enum34 tk libtk-img ``` -#### Ubuntu 16.04 Requirements +##### Ubuntu 16.04 Requirements ```shell sudo apt-get install automake bridge-utils ebtables python-dev libev-dev python-sphinx python-setuptools python-enum34 python-lxml libtk-img ``` -#### CentOS 7 with Gnome Desktop Requirements +##### CentOS 7 with Gnome Desktop Requirements ```shell sudo yum -y install automake gcc python-devel libev-devel python-sphinx tk python-lxml python-enum34 ``` -You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu/core) page. Choose either a stable release version or the development snapshot available in the *nightly_snapshots* directory. +#### Download and Extract Source Code +##### Download +You can obtain the CORE source code from the [CORE GitHub](https://github.com/coreemu/core) page. + +##### Extract ```shell tar xzf core-*.tar.gz cd core-* From 777e224a87285c67adc576edf44932a4e9f17fd7 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 10 Jun 2019 14:47:15 -0500 Subject: [PATCH 05/51] Fixed misspelled word --- docs/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/install.md b/docs/install.md index 40208d21..6966c6f7 100644 --- a/docs/install.md +++ b/docs/install.md @@ -204,7 +204,7 @@ tar xzf core-*.tar.gz cd core-* ``` -#### Tradional Autotools Build +#### Traditional Autotools Build ```shell ./bootstrap.sh ./configure From 6b4b82d2f656c128c4be8bb22e903f60aed35903 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 10 Jun 2019 15:28:52 -0500 Subject: [PATCH 06/51] Fixed misspelled word in update.md --- docs/usage.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/usage.md b/docs/usage.md index 09f549b3..c844be81 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -23,7 +23,7 @@ __Note: The CORE GUI is currently in a state of transition. The replacement can ## Prerequisites -Beyond instaling CORE, you must have the CORE daemon running. This is done on the command line with either Systemd or SysV +Beyond installing CORE, you must have the CORE daemon running. This is done on the command line with either Systemd or SysV ```shell # systed sudo systemctl daemon-reload From 20dc78dfc4f34c2a46bba07ff8308dfdf092d35c Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Tue, 11 Jun 2019 14:43:34 -0500 Subject: [PATCH 07/51] Update services.md --- docs/services.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/docs/services.md b/docs/services.md index 1e67a543..5aaf3593 100644 --- a/docs/services.md +++ b/docs/services.md @@ -154,3 +154,29 @@ consider contributing it to the CORE project. Here is an example service with documentation describing functionality: [Example Service](exampleservice.html) + +### Available Services + +#### BIRD Internet Routing Daemon +The [BIRD Internet Routing Daemon](https://bird.network.cz/) is a routing daemon; i.e., a software responsible for managing kernel packet forwarding tables. It aims to develop a dynamic IP routing daemon with full support of all modern routing protocols, easy to use configuration interface and powerful route filtering language, primarily targeted on (but not limited to) Linux and other UNIX-like systems and distributed under the GNU General Public License. BIRD has a free implementation of several well known and common routing and router-supplemental protocols, namely RIP, RIPng, OSPFv2, OSPFv3, BGP, BFD, and NDP/RA. BIRD supports IPv4 and IPv6 address families, Linux kernel and several BSD variants (tested on FreeBSD, NetBSD and OpenBSD). BIRD consists of bird daemon and birdc interactive CLI client used for supervision. + +In order to be able to use the BIRD Internet Routing Protocol, you must first install project on your machine. + + +##### BIRD Package Install +```shell +sudo apt-get install bird +``` + +##### BIRD Source Code Install +You can download BIRD source code from it's [official repository.](https://gitlab.labs.nic.cz/labs/bird/) +```shell +./configure +make +su +make install +vi /usr/local/etc/bird.conf +``` +The installation will place the bird directory inside */etc* where you will also find its config file. + +In order to be able to do use the Bird Internet Routing Protocol, you must modify *bird.conf* due to the fact that the given configuration file is not configured beyond allowing the bird daemon to start, which means that nothing else will happen if you run it. Keeran Marquis has a very detailed example on [Configuring BGP using Bird on Ubuntu](https://blog.marquis.co/configuring-bgp-using-bird-on-ubuntu-14-04lts/) which can be used as a building block to implement your custom routing daemon. From c1113cc19293085a7512c17f95c6bf3f6a997cb2 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Wed, 12 Jun 2019 13:40:51 -0500 Subject: [PATCH 08/51] Added FRRouting documentation --- docs/services.md | 64 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 62 insertions(+), 2 deletions(-) diff --git a/docs/services.md b/docs/services.md index 5aaf3593..f3300679 100644 --- a/docs/services.md +++ b/docs/services.md @@ -160,7 +160,7 @@ Here is an example service with documentation describing functionality: #### BIRD Internet Routing Daemon The [BIRD Internet Routing Daemon](https://bird.network.cz/) is a routing daemon; i.e., a software responsible for managing kernel packet forwarding tables. It aims to develop a dynamic IP routing daemon with full support of all modern routing protocols, easy to use configuration interface and powerful route filtering language, primarily targeted on (but not limited to) Linux and other UNIX-like systems and distributed under the GNU General Public License. BIRD has a free implementation of several well known and common routing and router-supplemental protocols, namely RIP, RIPng, OSPFv2, OSPFv3, BGP, BFD, and NDP/RA. BIRD supports IPv4 and IPv6 address families, Linux kernel and several BSD variants (tested on FreeBSD, NetBSD and OpenBSD). BIRD consists of bird daemon and birdc interactive CLI client used for supervision. -In order to be able to use the BIRD Internet Routing Protocol, you must first install project on your machine. +In order to be able to use the BIRD Internet Routing Protocol, you must first install the project on your machine. ##### BIRD Package Install @@ -179,4 +179,64 @@ vi /usr/local/etc/bird.conf ``` The installation will place the bird directory inside */etc* where you will also find its config file. -In order to be able to do use the Bird Internet Routing Protocol, you must modify *bird.conf* due to the fact that the given configuration file is not configured beyond allowing the bird daemon to start, which means that nothing else will happen if you run it. Keeran Marquis has a very detailed example on [Configuring BGP using Bird on Ubuntu](https://blog.marquis.co/configuring-bgp-using-bird-on-ubuntu-14-04lts/) which can be used as a building block to implement your custom routing daemon. +In order to be able to do use the Bird Internet Routing Protocol, you must modify *bird.conf* due to the fact that the given configuration file is not configured beyond allowing the bird daemon to start, which means that nothing else will happen if you run it. Keeran Marquis has a very detailed example on [Configuring BGP using Bird on Ubuntu](https://blog.marquis.co/configuring-bgp-using-bird-on-ubuntu-14-04lts/) which can be used as a building block to implement your custom routing daemon. + + +#### FRRouting +FRRouting is a routing software package that provides TCP/IP based routing services with routing protocols support such as BGP, RIP, OSPF, IS-IS and more. FRR also supports special BGP Route Reflector and Route Server behavior. In addition to traditional IPv4 routing protocols, FRR also supports IPv6 routing protocols. With an SNMP daemon that supports the AgentX protocol, FRR provides routing protocol MIB read-only access (SNMP Support). + +FRR currently supports the following protocols: +* BGP +* OSPFv2 +* OSPFv3 +* RIPv1 +* RIPv2 +* RIPng +* IS-IS +* PIM-SM/MSDP +* LDP +* BFD +* Babel +* PBR +* OpenFabric +* EIGRP (alpha) +* NHRP (alpha) + +##### FRRouting Package Install +```shell +sudo apt install curl +curl -s https://deb.frrouting.org/frr/keys.asc | sudo apt-key add - +FRRVER="frr-stable" +echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) $FRRVER | sudo tee -a /etc/apt/sources.list.d/frr.list +sudo apt update && sudo apt install frr frr-pythontools +``` + +##### FRRouting Source Code Install +Building FRR from source is the best way to ensure you have the latest features and bug fixes. Details for each supported platform, including dependency package listings, permissions, and other gotchas, are in the developer’s documentation. + +FRR’s source is available on the project [GitHub page](https://github.com/FRRouting/frr). +```shell +git clone https://github.com/FRRouting/frr.git +``` + +Change into your FRR source directory and issue: +```shell +./bootstrap.sh +``` +Then, choose the configuration options that you wish to use for the installation. You can find these options on FRR's [official webpage](http://docs.frrouting.org/en/latest/installation.html). Once you have chosen your configure options, run the configure script and pass the options you chose: +```shell +./configure \ + --prefix=/usr \ + --enable-exampledir=/usr/share/doc/frr/examples/ \ + --localstatedir=/var/run/frr \ + --sbindir=/usr/lib/frr \ + --sysconfdir=/etc/frr \ + --enable-pimd \ + --enable-watchfrr \ + ... +``` +After configuring the software, you are ready to build and install it in your system. +```shell +make && sudo make install +``` +If everything finishes successfully, FRR should be installed. From 2a1f6388c51f6b61ffd26113fcbc6c89e2c925d9 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Thu, 13 Jun 2019 13:42:50 -0500 Subject: [PATCH 09/51] Added docker services documentation --- docs/services.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/docs/services.md b/docs/services.md index f3300679..14092492 100644 --- a/docs/services.md +++ b/docs/services.md @@ -240,3 +240,40 @@ After configuring the software, you are ready to build and install it in your sy make && sudo make install ``` If everything finishes successfully, FRR should be installed. + +#### Docker +Docker service allows running docker containers within CORE nodes. +The running of Docker within a CORE node allows for additional extensibility to +the CORE services. This allows network applications and protocols to be easily +packaged and run on any node. + +This service will add a new group to the services list. This will have a service called Docker which will just start the docker service within the node but not run anything. It will also scan all docker images on the host machine. If any are tagged with 'core' then they will be added as a service to the Docker group. The image will then be auto run if that service is selected. + +This requires a recent version of Docker. This was tested using a PPA on Ubuntu with version 1.2.0. The version in the standard Ubuntu repo is to old for this purpose (we need --net host). + +##### Docker Installation +To use Docker services, you must first install the Docker python image. This is used to interface with Docker from the python service. + +```shell +sudo apt-get install docker.io +sudo apt-get install python-pip +pip install docker-py +``` +Once everything runs successfully, a Docker group under services will appear. An example use case is to pull an image from [Docker](https://hub.docker.com/). A test image has been uploaded for this purpose: +```shell +sudo docker pull stuartmarsden/multicastping +``` +This downloads an image which is based on Ubuntu 14.04 with python and twisted. It runs a simple program that sends a multicast ping and listens and records any it receives. In order for this to appear as a docker service it must be tagged with core. +Find out the id by running 'sudo docker images'. You should see all installed images and the one you want looks like this: +```shell +stuartmarsden/multicastping latest 4833487e66d2 20 hours +ago 487 MB +``` +The id will be different on your machine so use it in the following command: +```shell +sudo docker tag 4833487e66d2 stuartmarsden/multicastping:core +``` +This image will be listed in the services after we restart the core-daemon: +```shell +sudo service core-daemon restart +``` From a715a77c0d3e0d63c3633cbdef44fff22a720053 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Thu, 13 Jun 2019 14:05:13 -0500 Subject: [PATCH 10/51] Fixed bird config path --- docs/services.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/services.md b/docs/services.md index 14092492..6f1f4940 100644 --- a/docs/services.md +++ b/docs/services.md @@ -175,7 +175,7 @@ You can download BIRD source code from it's [official repository.](https://gitla make su make install -vi /usr/local/etc/bird.conf +vi /etc/bird/bird.conf ``` The installation will place the bird directory inside */etc* where you will also find its config file. From 0661f5d601cd65c87cbed2d15ef1d5a0793d8f12 Mon Sep 17 00:00:00 2001 From: bharnden Date: Fri, 14 Jun 2019 23:04:18 -0700 Subject: [PATCH 11/51] corefx - updated proto link --- corefx/src/main/proto/core.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/corefx/src/main/proto/core.proto b/corefx/src/main/proto/core.proto index a00ebb81..cec58963 120000 --- a/corefx/src/main/proto/core.proto +++ b/corefx/src/main/proto/core.proto @@ -1 +1 @@ -../../../../daemon/proto/core.proto \ No newline at end of file +../../../../daemon/proto/core/api/grpc/core.proto \ No newline at end of file From 338439b42959b2e8452b0b4b9f2704a797857f17 Mon Sep 17 00:00:00 2001 From: bharnden Date: Sat, 15 Jun 2019 00:43:17 -0700 Subject: [PATCH 12/51] corefx - changes to reuse deleted subnets --- .../java/com/core/graph/CoreAddresses.java | 23 +++++++++++-- .../java/com/core/graph/NetworkGraph.java | 33 +++++++++++++++++++ 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/corefx/src/main/java/com/core/graph/CoreAddresses.java b/corefx/src/main/java/com/core/graph/CoreAddresses.java index dc1dcc5d..fdd872c4 100644 --- a/corefx/src/main/java/com/core/graph/CoreAddresses.java +++ b/corefx/src/main/java/com/core/graph/CoreAddresses.java @@ -7,18 +7,31 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import java.util.Comparator; +import java.util.Queue; import java.util.Set; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.atomic.AtomicBoolean; public class CoreAddresses { private static final Logger logger = LogManager.getLogger(); - private IPAddress currentSubnet = new IPAddressString("10.0.0.0/24").getAddress(); + private IPAddress currentSubnet = new IPAddressString("10.0.0.0/24").getAddress().toPrefixBlock(); private AtomicBoolean firstSubnet = new AtomicBoolean(true); + private Queue deleted = new LinkedBlockingQueue<>(); + + public void reuseSubnet(IPAddress subnet) { + deleted.add(subnet); + } public IPAddress nextSubnet() { logger.info("getting next subnet: {}", currentSubnet); if (!firstSubnet.getAndSet(false)) { - currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock(); + IPAddress deletedSubnet = deleted.poll(); + if (deletedSubnet != null) { + currentSubnet = deletedSubnet; + } else { + currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock(); + } } logger.info("getting updated boundary: {}", currentSubnet); return currentSubnet; @@ -43,6 +56,12 @@ public class CoreAddresses { .orElseGet(() -> currentSubnet); } + public void reset() { + deleted.clear(); + firstSubnet.set(true); + currentSubnet = new IPAddressString("10.0.0.0/24").getAddress().toPrefixBlock(); + } + public static void main(String... args) { IPAddress addresses = new IPAddressString("10.0.0.0/16").getAddress(); System.out.println(String.format("address: %s", addresses.increment(257))); diff --git a/corefx/src/main/java/com/core/graph/NetworkGraph.java b/corefx/src/main/java/com/core/graph/NetworkGraph.java index 5e7f7259..8dfe4e84 100644 --- a/corefx/src/main/java/com/core/graph/NetworkGraph.java +++ b/corefx/src/main/java/com/core/graph/NetworkGraph.java @@ -248,6 +248,7 @@ public class NetworkGraph { } nodeMap.clear(); graphViewer.repaint(); + coreAddresses.reset(); } public void updatePositions() { @@ -417,6 +418,38 @@ public class NetworkGraph { private void handleEdgeRemoved(GraphEvent.Edge edgeEvent) { CoreLink link = edgeEvent.getEdge(); logger.info("removed edge: {}", link); + CoreNode nodeOne = getVertex(link.getNodeOne()); + CoreInterface interfaceOne = link.getInterfaceOne(); + CoreNode nodeTwo = getVertex(link.getNodeTwo()); + CoreInterface interfaceTwo = link.getInterfaceTwo(); + boolean nodeOneIsDefault = isNode(nodeOne); + boolean nodeTwoIsDefault = isNode(nodeTwo); + + // check what we are unlinking + Set interfaces; + IPAddress subnet = null; + if (nodeOneIsDefault && nodeTwoIsDefault) { + subnet = interfaceOne.getIp4().toPrefixBlock(); + logger.info("unlinking node to node reuse subnet: {}", subnet); + } else if (nodeOneIsDefault) { + interfaces = getNetworkInterfaces(nodeTwo, new HashSet<>()); + if (interfaces.isEmpty()) { + subnet = interfaceOne.getIp4().toPrefixBlock(); + logger.info("unlinking node one from network reuse subnet: {}", subnet); + } + } else if (nodeTwoIsDefault) { + interfaces = getNetworkInterfaces(nodeOne, new HashSet<>()); + if (interfaces.isEmpty()) { + subnet = interfaceTwo.getIp4().toPrefixBlock(); + logger.info("unlinking node two from network reuse subnet: {}", subnet); + } + } else { + logger.info("nothing to do when unlinking networks"); + } + + if (subnet != null) { + coreAddresses.reuseSubnet(subnet); + } } private void handleVertexAdded(GraphEvent.Vertex vertexEvent) { From 0af3629ac66c72757e384e47636990bf12f96f7a Mon Sep 17 00:00:00 2001 From: bharnden Date: Sat, 15 Jun 2019 10:42:55 -0700 Subject: [PATCH 13/51] corefx - fixed some issues loading xml due to grpc change, updated subnet logic to help handle loaded scenarios. grpc - fixed issue when loading network links --- .../src/main/java/com/core/data/NodeType.java | 6 ++-- .../java/com/core/graph/CoreAddresses.java | 32 ++++++++++++------- .../java/com/core/graph/NetworkGraph.java | 9 +++++- daemon/core/api/grpc/server.py | 15 ++++++--- 4 files changed, 42 insertions(+), 20 deletions(-) diff --git a/corefx/src/main/java/com/core/data/NodeType.java b/corefx/src/main/java/com/core/data/NodeType.java index 4af7dc3d..3b070799 100644 --- a/corefx/src/main/java/com/core/data/NodeType.java +++ b/corefx/src/main/java/com/core/data/NodeType.java @@ -73,11 +73,9 @@ public class NodeType { return ID_LOOKUP.values().stream() .filter(nodeType -> { boolean sameType = nodeType.getValue() == type; - boolean sameModel; - if (model != null) { + boolean sameModel = true; + if (!model.isEmpty()) { sameModel = model.equals(nodeType.getModel()); - } else { - sameModel = nodeType.getModel() == null; } return sameType && sameModel; }) diff --git a/corefx/src/main/java/com/core/graph/CoreAddresses.java b/corefx/src/main/java/com/core/graph/CoreAddresses.java index fdd872c4..70ab5e06 100644 --- a/corefx/src/main/java/com/core/graph/CoreAddresses.java +++ b/corefx/src/main/java/com/core/graph/CoreAddresses.java @@ -6,7 +6,9 @@ import inet.ipaddr.IPAddressString; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import java.beans.IndexedPropertyDescriptor; import java.util.Comparator; +import java.util.HashSet; import java.util.Queue; import java.util.Set; import java.util.concurrent.ArrayBlockingQueue; @@ -16,8 +18,14 @@ import java.util.concurrent.atomic.AtomicBoolean; public class CoreAddresses { private static final Logger logger = LogManager.getLogger(); private IPAddress currentSubnet = new IPAddressString("10.0.0.0/24").getAddress().toPrefixBlock(); - private AtomicBoolean firstSubnet = new AtomicBoolean(true); private Queue deleted = new LinkedBlockingQueue<>(); + private Set usedSubnets = new HashSet<>(); + + public void usedAddress(IPAddress address) { + logger.info("adding used address: {} - {}", address, address.toPrefixBlock()); + usedSubnets.add(address.toPrefixBlock()); + logger.info("used subnets: {}", usedSubnets); + } public void reuseSubnet(IPAddress subnet) { deleted.add(subnet); @@ -25,16 +33,18 @@ public class CoreAddresses { public IPAddress nextSubnet() { logger.info("getting next subnet: {}", currentSubnet); - if (!firstSubnet.getAndSet(false)) { - IPAddress deletedSubnet = deleted.poll(); - if (deletedSubnet != null) { - currentSubnet = deletedSubnet; - } else { - currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock(); - } + // skip existing subnets, when loaded from file + while (usedSubnets.contains(currentSubnet)) { + currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock(); } - logger.info("getting updated boundary: {}", currentSubnet); - return currentSubnet; + + // re-use any deleted subnets + IPAddress next = deleted.poll(); + if (next == null) { + next = currentSubnet; + currentSubnet = currentSubnet.incrementBoundary(1).toPrefixBlock(); + } + return next; } public IPAddress findSubnet(Set interfaces) { @@ -58,7 +68,7 @@ public class CoreAddresses { public void reset() { deleted.clear(); - firstSubnet.set(true); + usedSubnets.clear(); currentSubnet = new IPAddressString("10.0.0.0/24").getAddress().toPrefixBlock(); } diff --git a/corefx/src/main/java/com/core/graph/NetworkGraph.java b/corefx/src/main/java/com/core/graph/NetworkGraph.java index 8dfe4e84..da3389ef 100644 --- a/corefx/src/main/java/com/core/graph/NetworkGraph.java +++ b/corefx/src/main/java/com/core/graph/NetworkGraph.java @@ -286,6 +286,13 @@ public class NetworkGraph { private void handleEdgeAdded(GraphEvent.Edge edgeEvent) { CoreLink link = edgeEvent.getEdge(); if (link.isLoaded()) { + // load addresses to avoid duplication + if (link.getInterfaceOne().getIp4() != null) { + coreAddresses.usedAddress(link.getInterfaceOne().getIp4()); + } + if (link.getInterfaceTwo().getIp4() != null) { + coreAddresses.usedAddress(link.getInterfaceTwo().getIp4()); + } return; } Pair endpoints = graph.getEndpoints(link); @@ -508,7 +515,7 @@ public class NetworkGraph { } private boolean isWirelessNode(CoreNode node) { - return node.getType() == NodeType.EMANE || node.getType() == NodeType.WLAN; + return node != null && (node.getType() == NodeType.EMANE || node.getType() == NodeType.WLAN); } private boolean checkForWirelessNode(CoreNode nodeOne, CoreNode nodeTwo) { diff --git a/daemon/core/api/grpc/server.py b/daemon/core/api/grpc/server.py index 72e390ff..b2c26f42 100644 --- a/daemon/core/api/grpc/server.py +++ b/daemon/core/api/grpc/server.py @@ -17,6 +17,7 @@ from core.emulator.emudata import NodeOptions, InterfaceData, LinkOptions from core.emulator.enumerations import NodeTypes, EventTypes, LinkTypes from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility from core.nodes import nodeutils +from core.nodes.base import CoreNetworkBase from core.nodes.ipaddress import MacAddress from core.services.coreservices import ServiceManager @@ -73,18 +74,24 @@ def convert_link(session, link_data): interface_one = None if link_data.interface1_id is not None: node = session.get_node(link_data.node1_id) - interface = node.netif(link_data.interface1_id) + interface_name = None + if not isinstance(node, CoreNetworkBase): + interface = node.netif(link_data.interface1_id) + interface_name = interface.name interface_one = core_pb2.Interface( - id=link_data.interface1_id, name=interface.name, mac=convert_value(link_data.interface1_mac), + id=link_data.interface1_id, name=interface_name, mac=convert_value(link_data.interface1_mac), ip4=convert_value(link_data.interface1_ip4), ip4mask=link_data.interface1_ip4_mask, ip6=convert_value(link_data.interface1_ip6), ip6mask=link_data.interface1_ip6_mask) interface_two = None if link_data.interface2_id is not None: node = session.get_node(link_data.node2_id) - interface = node.netif(link_data.interface2_id) + interface_name = None + if not isinstance(node, CoreNetworkBase): + interface = node.netif(link_data.interface2_id) + interface_name = interface.name interface_two = core_pb2.Interface( - id=link_data.interface2_id, name=interface.name, mac=convert_value(link_data.interface2_mac), + id=link_data.interface2_id, name=interface_name, mac=convert_value(link_data.interface2_mac), ip4=convert_value(link_data.interface2_ip4), ip4mask=link_data.interface2_ip4_mask, ip6=convert_value(link_data.interface2_ip6), ip6mask=link_data.interface2_ip6_mask) From d8505418a6704cee2792ba1c6c6b46d5a2b9ac68 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 17 Jun 2019 08:23:03 -0500 Subject: [PATCH 14/51] Updated services.md with quagga documentation --- docs/services.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/services.md b/docs/services.md index 6f1f4940..fc5f9491 100644 --- a/docs/services.md +++ b/docs/services.md @@ -277,3 +277,24 @@ This image will be listed in the services after we restart the core-daemon: ```shell sudo service core-daemon restart ``` + +#### Quagga Routing Suite + Quagga is a routing software suite, providing implementations of OSPFv2, OSPFv3, RIP v1 and v2, RIPng and BGP-4 for Unix platforms, particularly FreeBSD, Linux, Solaris and NetBSD. Quagga is a fork of GNU Zebra which was developed by Kunihiro Ishiguro. +The Quagga architecture consists of a core daemon, zebra, which acts as an abstraction layer to the underlying Unix kernel and presents the Zserv API over a Unix or TCP stream to Quagga clients. It is these Zserv clients which typically implement a routing protocol and communicate routing updates to the zebra daemon. + +##### Quagga Package Install +```shell +sudo apt-get install quagga +``` + +##### Quagga Source Install +First, download the source code from their [official webpage](https://www.quagga.net/). +```shell +sudo apt-get install gawk +``` +Extract the tarball, go to the directory of your currently extracted code and issue the following commands. +```shell +./configure +make +sudo make install +``` From 9ff327b933d7e4008d23c5477b1bdff23e8e9849 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 17 Jun 2019 10:49:05 -0500 Subject: [PATCH 15/51] Updated services with security documentation --- docs/services.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/docs/services.md b/docs/services.md index fc5f9491..39d6b9bc 100644 --- a/docs/services.md +++ b/docs/services.md @@ -241,6 +241,7 @@ make && sudo make install ``` If everything finishes successfully, FRR should be installed. + #### Docker Docker service allows running docker containers within CORE nodes. The running of Docker within a CORE node allows for additional extensibility to @@ -278,6 +279,7 @@ This image will be listed in the services after we restart the core-daemon: sudo service core-daemon restart ``` + #### Quagga Routing Suite Quagga is a routing software suite, providing implementations of OSPFv2, OSPFv3, RIP v1 and v2, RIPng and BGP-4 for Unix platforms, particularly FreeBSD, Linux, Solaris and NetBSD. Quagga is a fork of GNU Zebra which was developed by Kunihiro Ishiguro. The Quagga architecture consists of a core daemon, zebra, which acts as an abstraction layer to the underlying Unix kernel and presents the Zserv API over a Unix or TCP stream to Quagga clients. It is these Zserv clients which typically implement a routing protocol and communicate routing updates to the zebra daemon. @@ -298,3 +300,33 @@ Extract the tarball, go to the directory of your currently extracted code and is make sudo make install ``` + + +#### Software Defined Networking +Ryu is a component-based software defined networking framework. Ryu provides software components with well defined API that make it easy for developers to create new network management and control applications. Ryu supports various protocols for managing network devices, such as OpenFlow, Netconf, OF-config, etc. About OpenFlow, Ryu supports fully 1.0, 1.2, 1.3, 1.4, 1.5 and Nicira Extensions. All of the code is freely available under the Apache 2.0 license. +```shell +``` + +##### Installation +###### Prerequisites +```shell +sudo apt-get install gcc python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev zlib1g-dev +``` +###### Ryu Package Install +```shell +pip install ryu +``` +###### Ryu Source Install +```shell +git clone git://github.com/osrg/ryu.git +cd ryu; pip install . +``` + + +#### Security Services +The security services offer a wide variety of protocols capable of satisfying the most use cases available. Security services such as IP security protocols, for providing security at the IP layer, as well as the suite of protocols designed to provide that security, through authentication and encryption of IP network packets. Virtual Private Networks (VPNs) and Firewalls are also available for use to the user. + +##### Installation +```shell +sudo apt-get install ipsec-tools racoon openvpn +``` From 3dcaa5e963166aadd1f29adc534fadc13640125d Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 17 Jun 2019 12:25:00 -0500 Subject: [PATCH 16/51] Updates services.md, added ucarp documentation --- docs/services.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/services.md b/docs/services.md index 39d6b9bc..be2b4564 100644 --- a/docs/services.md +++ b/docs/services.md @@ -330,3 +330,13 @@ The security services offer a wide variety of protocols capable of satisfying th ```shell sudo apt-get install ipsec-tools racoon openvpn ``` + +#### UCARP +UCARP allows a couple of hosts to share common virtual IP addresses in order to provide automatic failover. It is a portable userland implementation of the secure and patent-free Common Address Redundancy Protocol (CARP, OpenBSD's alternative to the patents-bloated VRRP). + +Strong points of the CARP protocol are: very low overhead, cryptographically signed messages, interoperability between different operating systems and no need for any dedicated extra network link between redundant hosts. + +##### Installation +```shell +sudo apt-get install ucarp +``` From dc92719934758ae092975d056fd02f34ed5bd2c4 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 17 Jun 2019 13:56:52 -0500 Subject: [PATCH 17/51] Updated services.md, added utility services documentation --- docs/services.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/docs/services.md b/docs/services.md index be2b4564..93cdeb42 100644 --- a/docs/services.md +++ b/docs/services.md @@ -340,3 +340,24 @@ Strong points of the CARP protocol are: very low overhead, cryptographically sig ```shell sudo apt-get install ucarp ``` + + +#### Utilities Services +The following services are provided as utilities: +* Default Routing +* Default Muticast Routing +* Static Routing +* SSH +* DHCP +* DHCP Client +* FTP +* HTTP +* PCAP +* RADVD +* ATD + +##### Installation +To install the functionality of the previously metioned services you can run the following command: +```shell +sudo apt-get install isc-dhcp-server apache2 libpcap-dev radvd at +``` From 828ab7c16c4b79922d0f585c944696db95f24aa9 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Mon, 17 Jun 2019 15:05:12 -0500 Subject: [PATCH 18/51] Updated services.md, added xorp documentation --- docs/services.md | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/docs/services.md b/docs/services.md index 93cdeb42..4e2810a2 100644 --- a/docs/services.md +++ b/docs/services.md @@ -361,3 +361,39 @@ To install the functionality of the previously metioned services you can run the ```shell sudo apt-get install isc-dhcp-server apache2 libpcap-dev radvd at ``` + +#### XORP routing suite +XORP is an open networking platform that supports OSPF, RIP, BGP, OLSR, VRRP, PIM, IGMP (Multicast) and other routing protocols. Most protocols support IPv4 and IPv6 where applicable. It is known to work on various Linux distributions and flavors of BSD. + +XORP started life as a project at the ICSI Center for Open Networking (ICON) at the International Computer Science Institute in Berkeley, California, USA, and spent some time with the team at XORP, Inc. It is now maintained and improved on a volunteer basis by a core of long-term XORP developers and some newer contributors. + +XORP's primary goal is to be an open platform for networking protocol implementations and an alternative to proprietary and closed networking products in the marketplace today. It is the only open source platform to offer integrated multicast capability. + +XORP design philosophy is: + * modularity + * extensibility + * performance + * robustness +This is achieved by carefully separating functionalities into independent modules, and by providing an API for each module. + +XORP divides into two subsystems. The higher-level ("user-level") subsystem consists of the routing protocols. The lower-level ("kernel") manages the forwarding path, and provides APIs for the higher-level to access. + +User-level XORP uses multi-process architecture with one process per routing protocol, and a novel inter-process communication mechanism called XRL (XORP Resource Locator). + +The lower-level subsystem can use traditional UNIX kernel forwarding, or Click modular router. The modularity and independency of the lower-level from the user-level subsystem allows for its easily replacement with other solutions including high-end hardware-based forwarding engines. + +##### Installation +In order to be able to install the XORP Routing Suite, you must first install scons in order to compile it. +```shell +sudo apt-get install scons +``` +Then, download XORP from its official [release web page](http://www.xorp.org/releases/current/). +```shell +http://www.xorp.org/releases/current/ +cd xorp +sudo apt-get install libssl-dev ncurses-dev +scons +scons install +``` + + From 8b4f8c2c5bb7577f5484f8a67f0e0f6493925889 Mon Sep 17 00:00:00 2001 From: bharnden Date: Mon, 17 Jun 2019 19:59:44 -0700 Subject: [PATCH 19/51] changes to fix some issues when building fpm packages --- Makefile.am | 13 +++++++++---- scripts/core-daemon.in | 2 +- scripts/core-daemon.service.in | 2 +- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/Makefile.am b/Makefile.am index 6bf34253..23fdd956 100644 --- a/Makefile.am +++ b/Makefile.am @@ -46,9 +46,11 @@ MAINTAINERCLEANFILES = .version \ if PYTHON3 -PYTHON_DEP = python3 >= 3.0 +PYTHON_DEB_DEP = python3 >= 3.0 +PYTHON_RPM_DEP = python3 >= 3.0 else -PYTHON_DEP = python >= 2.7, python < 3.0 +PYTHON_DEB_DEP = python (>= 2.7), python (<< 3.0) +PYTHON_RPM_DEP = python >= 2.7, python < 3.0 endif define fpm-rpm = @@ -61,6 +63,7 @@ fpm -s dir -t rpm -n core \ -p core_$(PYTHON)_VERSION_ARCH.rpm \ -v $(PACKAGE_VERSION) \ --rpm-init scripts/core-daemon \ + --config-files "/etc/core" \ -d "tcl" \ -d "tk" \ -d "procps-ng" \ @@ -70,7 +73,7 @@ fpm -s dir -t rpm -n core \ -d "iproute" \ -d "libev" \ -d "net-tools" \ - -d "$(PYTHON_DEP)" \ + -d "$(PYTHON_RPM_DEP)" \ -C $(DESTDIR) endef @@ -84,6 +87,8 @@ fpm -s dir -t deb -n core \ -p core_$(PYTHON)_VERSION_ARCH.deb \ -v $(PACKAGE_VERSION) \ --deb-systemd scripts/core-daemon.service \ + --deb-no-default-config-files \ + --config-files "/etc/core" \ -d "tcl" \ -d "tk" \ -d "libtk-img" \ @@ -94,7 +99,7 @@ fpm -s dir -t deb -n core \ -d "ebtables" \ -d "iproute2" \ -d "libev4" \ - -d "$(PYTHON_DEP)" \ + -d "$(PYTHON_DEB_DEP)" \ -C $(DESTDIR) endef diff --git a/scripts/core-daemon.in b/scripts/core-daemon.in index a865fcef..263d980d 100644 --- a/scripts/core-daemon.in +++ b/scripts/core-daemon.in @@ -20,7 +20,7 @@ NAME=`basename $0` PIDFILE="@CORE_STATE_DIR@/run/$NAME.pid" LOG="@CORE_STATE_DIR@/log/$NAME.log" -CMD="PYTHONPATH=@pythondir@ @PYTHON@ @bindir@/$NAME" +CMD="@bindir@/$NAME" get_pid() { cat "$PIDFILE" diff --git a/scripts/core-daemon.service.in b/scripts/core-daemon.service.in index 210de4d5..cd53cfad 100644 --- a/scripts/core-daemon.service.in +++ b/scripts/core-daemon.service.in @@ -4,7 +4,7 @@ After=network.target [Service] Type=simple -ExecStart=@PYTHON@ @bindir@/core-daemon +ExecStart=@bindir@/core-daemon TasksMax=infinity [Install] From 74ea163e724c7979b4ed61506abab9dd8b8edbad Mon Sep 17 00:00:00 2001 From: bharnden <32446120+bharnden@users.noreply.github.com> Date: Mon, 17 Jun 2019 20:21:43 -0700 Subject: [PATCH 20/51] Update install.md --- docs/install.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/install.md b/docs/install.md index 08cd797d..86b93c34 100644 --- a/docs/install.md +++ b/docs/install.md @@ -46,10 +46,13 @@ Install Path | Description The newly added gRPC API which depends on python library grpcio is not commonly found within system repos. To account for this it would be recommended to install the python dependencies using the **requirements.txt** found in -the latest release. +the latest [CORE Release](https://github.com/coreemu/core/releases). ```shell -sudo pip install -r requirements.txt +# for python 2 +sudo python -m pip install -r requirements.txt +# for python 3 +sudo python3 -m pip install -r requirements.txt ``` ## Ubuntu 19.04 @@ -121,9 +124,9 @@ Ubuntu package defaults to using systemd for running as a service. ```shell # python2 -sudo apt ./core_python_$VERSION_amd64.deb +sudo apt install ./core_python_$VERSION_amd64.deb # python3 -sudo apt ./core_python3_$VERSION_amd64.deb +sudo apt install ./core_python3_$VERSION_amd64.deb ``` Run the CORE GUI as a normal user: From 0f1105e81d23c36b2d3a77d2c7f454cc58146362 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Tue, 18 Jun 2019 11:30:38 -0500 Subject: [PATCH 21/51] Updated services.md, added nrl services documentation --- docs/services.md | 55 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/docs/services.md b/docs/services.md index 4e2810a2..2826550e 100644 --- a/docs/services.md +++ b/docs/services.md @@ -280,6 +280,61 @@ sudo service core-daemon restart ``` +#### NRL Services +The Protean Protocol Prototyping Library (ProtoLib) is a cross-platform library that allows applications to be built while supporting a variety of platforms including Linux, Windows, WinCE/PocketPC, MacOS, FreeBSD, Solaris, etc as well as the simulation environments of NS2 and Opnet. The goal of the Protolib is to provide a set of simple, cross-platform C++ classes that allow development of network protocols and applications that can run on different platforms and in network simulation environments. While Protolib provides an overall framework for developing working protocol implementations, applications, and simulation modules, the individual classes are designed for use as stand-alone components when possible. Although Protolib is principally for research purposes, the code has been constructed to provide robust, efficient performance and adaptability to real applications. In some cases, the code consists of data structures, etc useful in protocol implementations and, in other cases, provides common, cross-platform interfaces to system services and functions (e.g., sockets, timers, routing tables, etc). + +Currently the Naval Research Laboratory uses this library to develop a wide variety of protocols.The NRL Protolib currently supports the following protocols: +* MGEN_Sink +* NHDP +* SMF +* OLSR +* OLSRv2 +* OLSRORG +* MgenActor +* arouted + +#### NRL Installation +In order to be able to use the different protocols that NRL offers, you must first download the support library itself. You can get the source code from their [official nightly snapshots website](https://downloads.pf.itd.nrl.navy.mil/protolib/nightly_snapshots/). + +##### Multi-Generator (MGEN) +Download MGEN from the [NRL MGEN nightly snapshots](https://downloads.pf.itd.nrl.navy.mil/mgen/nightly_snapshots/), unpack it and copy the protolib library into the main folder *mgen*. Execute the following commands to build the protocol. +```shell +cd mgen/makefiles +make -f Makefile.{os} mgen +``` + +##### Neighborhood Discovery Protocol (NHDP) +Download NHDP from the [NRL NHDP nightly snapshots](https://downloads.pf.itd.nrl.navy.mil/nhdp/nightly_snapshots/). +```shell +sudo apt-get install libpcap-dev libboost-all-dev +wget https://github.com/protocolbuffers/protobuf/releases/download/v3.8.0/protoc-3.8.0-linux-x86_64.zip +unzip protoc-3.8.0-linux-x86_64.zip +``` +Then place the binaries in your $PATH. To know your paths you can issue the following command +```shell +echo $PATH +``` +Go to the downloaded *NHDP* tarball, unpack it and place the protolib library inside the NHDP main folder. Now, compile the NHDP Protocol. +```shell +cd nhdp/unix +make -f Makefile.{os} +``` + +##### Simplified Multicast Forwarding (SMF) +Download SMF from the [NRL SMF nightly snapshot](https://downloads.pf.itd.nrl.navy.mil/smf/nightly_snapshots/) , unpack it and place the protolib library inside the *smf* main folder. +```shell +cd mgen/makefiles +make -f Makefile.{os} +``` + +##### Optimized Link State Routing Protocol (OLSR) +To install the OLSR protocol, download their source code from their [nightly snapshots](https://downloads.pf.itd.nrl.navy.mil/olsr/nightly_snapshots/nrlolsr-svnsnap.tgz). Unpack it and place the previously downloaded protolib library inside the *nrlolsr* main directory. Then execute the following commands: +```shell +cd ./unix +make -f Makefile.{os} +``` + + #### Quagga Routing Suite Quagga is a routing software suite, providing implementations of OSPFv2, OSPFv3, RIP v1 and v2, RIPng and BGP-4 for Unix platforms, particularly FreeBSD, Linux, Solaris and NetBSD. Quagga is a fork of GNU Zebra which was developed by Kunihiro Ishiguro. The Quagga architecture consists of a core daemon, zebra, which acts as an abstraction layer to the underlying Unix kernel and presents the Zserv API over a Unix or TCP stream to Quagga clients. It is these Zserv clients which typically implement a routing protocol and communicate routing updates to the zebra daemon. From 948b1126baac2faae819152926d0ba1fa42bf08f Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Tue, 18 Jun 2019 10:33:16 -0700 Subject: [PATCH 22/51] changes to support not modifying controlnet configuration messages and avoid issues with setting the master meane config when dealing with distributed emane --- daemon/core/api/tlv/broker.py | 60 ----------------------------- daemon/core/api/tlv/corehandlers.py | 3 +- daemon/core/emane/emanemanager.py | 29 ++++++++------ daemon/core/emane/nodes.py | 8 ++-- daemon/core/emulator/session.py | 2 +- daemon/core/xml/emanexml.py | 3 +- 6 files changed, 25 insertions(+), 80 deletions(-) diff --git a/daemon/core/api/tlv/broker.py b/daemon/core/api/tlv/broker.py index d63a8ca2..2021bb4c 100644 --- a/daemon/core/api/tlv/broker.py +++ b/daemon/core/api/tlv/broker.py @@ -121,7 +121,6 @@ class CoreBroker(object): self.physical_nodes = set() # allows for other message handlers to process API messages (e.g. EMANE) self.handlers = set() - self.handlers.add(self.handle_distributed) # dict with tunnel key to tunnel device mapping self.tunnels = {} self.dorecvloop = False @@ -1049,62 +1048,3 @@ class CoreBroker(object): if not server.instantiation_complete: return False return True - - def handle_distributed(self, message): - """ - Handle the session options config message as it has reached the - broker. Options requiring modification for distributed operation should - be handled here. - - :param message: message to handle - :return: nothing - """ - if not self.session.master: - return - - if message.message_type != MessageTypes.CONFIG.value or message.get_tlv(ConfigTlvs.OBJECT.value) != "session": - return - - values_str = message.get_tlv(ConfigTlvs.VALUES.value) - if values_str is None: - return - - value_strings = values_str.split("|") - for value_string in value_strings: - key, _value = value_string.split("=", 1) - if key == "controlnet": - self.handle_distributed_control_net(message, value_strings, value_strings.index(value_string)) - - def handle_distributed_control_net(self, message, values, index): - """ - Modify Config Message if multiple control network prefixes are - defined. Map server names to prefixes and repack the message before - it is forwarded to slave servers. - - :param message: message to handle - :param list values: values to handle - :param int index: index ti get key value from - :return: nothing - """ - key_value = values[index] - _key, value = key_value.split("=", 1) - control_nets = value.split() - - if len(control_nets) < 2: - logging.warning("multiple controlnet prefixes do not exist") - return - - servers = self.session.broker.getservernames() - if len(servers) < 2: - logging.warning("not distributed") - return - - servers.remove("localhost") - # master always gets first prefix - servers.insert(0, "localhost") - # create list of "server1:ctrlnet1 server2:ctrlnet2 ..." - control_nets = map(lambda x: "%s:%s" % (x[0], x[1]), zip(servers, control_nets)) - values[index] = "controlnet=%s" % (" ".join(control_nets)) - values_str = "|".join(values) - message.tlv_data[ConfigTlvs.VALUES.value] = values_str - message.repack() diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py index 40ac9e94..e57494d6 100644 --- a/daemon/core/api/tlv/corehandlers.py +++ b/daemon/core/api/tlv/corehandlers.py @@ -510,7 +510,6 @@ class CoreHandler(socketserver.BaseRequestHandler): :param message: message for replies :return: nothing """ - logging.debug("dispatching replies: %s", replies) for reply in replies: message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply) try: @@ -524,7 +523,7 @@ class CoreHandler(socketserver.BaseRequestHandler): reply_message = "CoreMessage (type %d flags %d length %d)" % ( message_type, message_flags, message_length) - logging.debug("dispatch reply:\n%s", reply_message) + logging.debug("sending reply:\n%s", reply_message) try: self.sendall(reply) diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py index aec34e2f..1856d946 100644 --- a/daemon/core/emane/emanemanager.py +++ b/daemon/core/emane/emanemanager.py @@ -2,6 +2,7 @@ emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation. """ +import copy import logging import os import threading @@ -443,10 +444,13 @@ class EmaneManager(ModelManager): continue platformid += 1 + + # create temporary config for updating distributed nodes typeflags = ConfigFlags.UPDATE.value - self.set_config("platform_id_start", str(platformid)) - self.set_config("nem_id_start", str(nemid)) - config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, self.get_configs()) + config = copy.deepcopy(self.get_configs()) + config["platform_id_start"] = str(platformid) + config["nem_id_start"] = str(nemid) + config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, config) message = dataconversion.convert_config(config_data) server.sock.send(message) # increment nemid for next server by number of interfaces @@ -477,26 +481,26 @@ class EmaneManager(ModelManager): be configured. This generates configuration for slave control nets using the default list of prefixes. """ - session = self.session # slave server + session = self.session if not session.master: return - servers = session.broker.getservernames() # not distributed + servers = session.broker.getservernames() if len(servers) < 2: return - prefix = session.options.get_config("controlnet") - prefixes = prefix.split() # normal Config messaging will distribute controlnets - if len(prefixes) >= len(servers): - return + prefix = session.options.get_config("controlnet", default="") + prefixes = prefix.split() + if len(prefixes) < len(servers): + logging.info("setting up default controlnet prefixes for distributed (%d configured)", len(prefixes)) + prefix = ctrlnet.DEFAULT_PREFIX_LIST[0] # this generates a config message having controlnet prefix assignments - logging.info("Setting up default controlnet prefixes for distributed (%d configured)" % len(prefixes)) - prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0] - vals = 'controlnet="%s"' % prefixes + logging.info("setting up controlnet prefixes for distributed: %s", prefix) + vals = "controlnet=%s" % prefix tlvdata = b"" tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session") tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0) @@ -504,6 +508,7 @@ class EmaneManager(ModelManager): rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata) msghdr = rawmsg[:coreapi.CoreMessage.header_len] msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:]) + logging.debug("sending controlnet message:\n%s", msg) self.session.broker.handle_message(msg) def check_node_models(self): diff --git a/daemon/core/emane/nodes.py b/daemon/core/emane/nodes.py index cbbec693..256351b4 100644 --- a/daemon/core/emane/nodes.py +++ b/daemon/core/emane/nodes.py @@ -165,16 +165,16 @@ class EmaneNode(EmaneNet): nemid = self.getnemid(netif) ifname = netif.localname if nemid is None: - logging.info("nemid for %s is unknown" % ifname) + logging.info("nemid for %s is unknown", ifname) return - lat, long, alt = self.session.location.getgeo(x, y, z) - logging.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, long, alt) + lat, lon, alt = self.session.location.getgeo(x, y, z) + logging.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, lon, alt) event = LocationEvent() # altitude must be an integer or warning is printed # unused: yaw, pitch, roll, azimuth, elevation, velocity alt = int(round(alt)) - event.append(nemid, latitude=lat, longitude=long, altitude=alt) + event.append(nemid, latitude=lat, longitude=lon, altitude=alt) self.session.emane.service.publish(0, event) def setnempositions(self, moved_netifs): diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index de2a38c3..17e96a80 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -1498,7 +1498,7 @@ class Session(object): break if not prefix: - logging.error("Control network prefix not found for server '%s'" % servers[0]) + logging.error("control network prefix not found for server: %s", servers[0]) assign_address = False try: prefix = prefixes[0].split(':', 1)[1] diff --git a/daemon/core/xml/emanexml.py b/daemon/core/xml/emanexml.py index a4833e24..14bff5ba 100644 --- a/daemon/core/xml/emanexml.py +++ b/daemon/core/xml/emanexml.py @@ -108,7 +108,7 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x :return: the next nem id that can be used for creating platform xml files :rtype: int """ - logging.debug("building emane platform xml for node(%s): %s", node, node.name) + logging.debug("building emane platform xml for node(%s) nem_id(%s): %s", node, nem_id, node.name) nem_entries = {} if node.model is None: @@ -116,6 +116,7 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x return nem_entries for netif in node.netifs(): + logging.debug("building platform xml for interface(%s) nem_id(%s)", netif.name, nem_id) # build nem xml nem_definition = nem_file_name(node.model, netif) nem_element = etree.Element("nem", id=str(nem_id), name=netif.localname, definition=nem_definition) From 14fc16832dd7798ff57fa4cd50ce6748598bd450 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Wed, 19 Jun 2019 08:22:28 -0700 Subject: [PATCH 23/51] updated long to use lon in emane code to avoid name conflicts in 2.7 --- daemon/core/emane/nodes.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/core/emane/nodes.py b/daemon/core/emane/nodes.py index 256351b4..8280e6f5 100644 --- a/daemon/core/emane/nodes.py +++ b/daemon/core/emane/nodes.py @@ -199,12 +199,12 @@ class EmaneNode(EmaneNet): logging.info("nemid for %s is unknown" % ifname) continue x, y, z = netif.node.getposition() - lat, long, alt = self.session.location.getgeo(x, y, z) + lat, lon, alt = self.session.location.getgeo(x, y, z) logging.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", - i, ifname, nemid, x, y, z, lat, long, alt) + i, ifname, nemid, x, y, z, lat, lon, alt) # altitude must be an integer or warning is printed alt = int(round(alt)) - event.append(nemid, latitude=lat, longitude=long, altitude=alt) + event.append(nemid, latitude=lat, longitude=lon, altitude=alt) i += 1 self.session.emane.service.publish(0, event) From 3dac7f096cf9928fff637e473844b0ea7496fc77 Mon Sep 17 00:00:00 2001 From: "Blake J. Harnden" Date: Wed, 19 Jun 2019 10:31:34 -0700 Subject: [PATCH 24/51] docs - added an updated take on running distributed and isolated it to its own higher level page --- docs/distributed.md | 172 ++++++++++++++++++++++++++++++++++++++++++++ docs/index.md | 1 + docs/usage.md | 106 --------------------------- 3 files changed, 173 insertions(+), 106 deletions(-) create mode 100644 docs/distributed.md diff --git a/docs/distributed.md b/docs/distributed.md new file mode 100644 index 00000000..526cf041 --- /dev/null +++ b/docs/distributed.md @@ -0,0 +1,172 @@ +# CORE - Distributed Emulation + +* Table of Contents +{:toc} + +## Overview + +A large emulation scenario can be deployed on multiple emulation servers and +controlled by a single GUI. The GUI, representing the entire topology, can be +run on one of the emulation servers or on a separate machine. + +Each machine that will act as an emulation server would ideally have the + same version of CORE installed. It is not important to have the GUI component + but the CORE Python daemon **core-daemon** needs to be installed. + +**NOTE: The server that the GUI connects with is referred to as +the master server.** + +## Configuring Listen Address + +First we need to configure the **core-daemon** on all servers to listen on an +interface over the network. The simplest way would be updating the core +configuration file to listen on all interfaces. Alternatively, configure it to +listen to the specific interface you desire by supplying the correct address. + +The **listenaddr** configuration should be set to the address of the interface +that should receive CORE API control commands from the other servers; +setting **listenaddr = 0.0.0.0** causes the Python daemon to listen on all +interfaces. CORE uses TCP port **4038** by default to communicate from the +controlling machine (with GUI) to the emulation servers. Make sure that +firewall rules are configured as necessary to allow this traffic. + +```shell +# open configuration file +vi /etc/core/core.conf + +# within core.conf +[core-daemon] +listenaddr = 0.0.0.0 +``` + +## Enabling Remote SSH Shells + +### Update GUI Terminal Program + +**Edit -> Preferences... -> Terminal program:** + +Currently recommend setting this to **xterm -e** as the default +**gnome-terminal** will not work. + +May need to install xterm if, not already installed. + +```shell +sudo apt install xterm +``` + +### Setup SSH + +In order to easily open shells on the emulation servers, the servers should be +running an SSH server, and public key login should be enabled. This is +accomplished by generating an SSH key for your user on all servers being used +for distributed emulation, if you do not already have one. Then copying your +master server public key to the authorized_keys file on all other servers that +will be used to help drive the distributed emulation. When double-clicking on a +node during runtime, instead of opening a local shell, the GUI will attempt to +SSH to the emulation server to run an interactive shell. + +You need to have the same user defined on each server, since the user used +for these remote shells is the same user that is running the CORE GUI. + +```shell +# install openssh-server +sudo apt install openssh-server + +# generate ssh if needed +ssh-keygen -o -t rsa -b 4096 + +# copy public key to authorized_keys file +ssh-copy-id user@server +# or +scp ~/.ssh/id_rsa.pub username@server:~/.ssh/authorized_keys +``` + +## Add Emulation Servers in GUI + +Within the core-gui navigate to menu option: + +**Session -> Emulation servers...** + +Within the dialog box presented, add or modify an existing server if present +to use the name, address, and port for the a server you plan to use. + +Server configurations are loaded and written to in a configuration file for +the GUI. + +**~/.core/servers.conf** +```conf +# name address port +server2 192.168.0.2 4038 +``` + +## Assigning Nodes + +The user needs to assign nodes to emulation servers in the scenario. Making no +assignment means the node will be emulated on the master server +In the configuration window of every node, a drop-down box located between +the *Node name* and the *Image* button will select the name of the emulation +server. By default, this menu shows *(none)*, indicating that the node will +be emulated locally on the master. When entering Execute mode, the CORE GUI +will deploy the node on its assigned emulation server. + +Another way to assign emulation servers is to select one or more nodes using +the select tool (shift-click to select multiple), and right-click one of the +nodes and choose *Assign to...*. + +The **CORE emulation servers** dialog box may also be used to assign nodes to +servers. The assigned server name appears in parenthesis next to the node name. +To assign all nodes to one of the servers, click on the server name and then +the **all nodes** button. Servers that have assigned nodes are shown in blue in +the server list. Another option is to first select a subset of nodes, then open +the **CORE emulation servers** box and use the **selected nodes** button. + +**IMPORTANT: Leave the nodes unassigned if they are to be run on the master +server. Do not explicitly assign the nodes to the master server.** + +## GUI Visualization + +If there is a link between two nodes residing on different servers, the GUI +will draw the link with a dashed line. + +## Concerns and Limitations + +Wireless nodes, i.e. those connected to a WLAN node, can be assigned to +different emulation servers and participate in the same wireless network +only if an EMANE model is used for the WLAN. The basic range model does +not work across multiple servers due to the Linux bridging and ebtables +rules that are used. + +**NOTE: The basic range wireless model does not support distributed emulation, +but EMANE does.** + +When nodes are linked across servers **core-daemons** will automatically +create necessary tunnels between the nodes when executed. Care should be taken +to arrange the topology such that the number of tunnels is minimized. The +tunnels carry data between servers to connect nodes as specified in the topology. +These tunnels are created using GRE tunneling, similar to the Tunnel Tool. + +### EMANE Issues + +EMANE appears to require location events for nodes to be sync'ed across +all EMANE instances for nodes to find each other. Using an EMANE eel file +for your scenario can help clear this up, which might be desired anyway. + +* https://github.com/adjacentlink/emane/wiki/EEL-Generator + +You can also move nodes within the GUI to help trigger location events from +CORE when the **core.conf** settings below is used. Assuming the nodes +did not find each other by default and you are not using an eel file. + +```shell +emane_event_generate = True +``` + +## Distributed Checklist + +1. Install the same version of the CORE daemon on all servers. +1. Set **listenaddr** configuration in all of the server's core.conf files, +then start (or restart) the daemon. +1. Installed and configure public-key SSH access on all servers (if you want to use +double-click shells or Widgets.) +1. Assign nodes to desired servers, empty for master server +1. Press the **Start** button to launch the distributed emulation. diff --git a/docs/index.md b/docs/index.md index 6d0a0477..fb640347 100644 --- a/docs/index.md +++ b/docs/index.md @@ -23,6 +23,7 @@ networking scenarios, security studies, and increasing the size of physical test |[Architecture](architecture.md)|Overview of the architecture| |[Installation](install.md)|Installing from source, packages, & other dependencies| |[Using the GUI](usage.md)|Details on the different node types and options in the GUI| +|[Distributed](distributed.md)|Overview and detals for running CORE across multiple servers| |[Python Scripting](scripting.md)|How to write python scripts for creating a CORE session| |[gRPC API](grpc.md)|How to enable and use the gRPC API| |[Node Types](machine.md)|Overview of node types supported within CORE| diff --git a/docs/usage.md b/docs/usage.md index fa029e2c..f36e6019 100644 --- a/docs/usage.md +++ b/docs/usage.md @@ -728,112 +728,6 @@ pseudo-link will be drawn, representing the link between the two nodes on different canvases. Double-clicking on the label at the end of the arrow will jump to the canvas that it links. -Distributed Emulation ---------------------- - -A large emulation scenario can be deployed on multiple emulation servers and -controlled by a single GUI. The GUI, representing the entire topology, can be -run on one of the emulation servers or on a separate machine. Emulations can be -distributed on Linux. - -Each machine that will act as an emulation server needs to have CORE installed. -It is not important to have the GUI component but the CORE Python daemon -**core-daemon** needs to be installed. Set the **listenaddr** line in the -**/etc/core/core.conf** configuration file so that the CORE Python -daemon will respond to commands from other servers: - -```shell -### core-daemon configuration options ### -[core-daemon] -pidfile = /var/run/core-daemon.pid -logfile = /var/log/core-daemon.log -listenaddr = 0.0.0.0 -``` - - -The **listenaddr** should be set to the address of the interface that should -receive CORE API control commands from the other servers; setting **listenaddr -= 0.0.0.0** causes the Python daemon to listen on all interfaces. CORE uses TCP -port 4038 by default to communicate from the controlling machine (with GUI) to -the emulation servers. Make sure that firewall rules are configured as -necessary to allow this traffic. - -In order to easily open shells on the emulation servers, the servers should be -running an SSH server, and public key login should be enabled. This is -accomplished by generating an SSH key for your user if you do not already have -one (use **ssh-keygen -t rsa**), and then copying your public key to the -authorized_keys file on the server (for example, **ssh-copy-id user@server** or -**scp ~/.ssh/id_rsa.pub server:.ssh/authorized_keys**.) When double-clicking on -a node during runtime, instead of opening a local shell, the GUI will attempt -to SSH to the emulation server to run an interactive shell. The user name used -for these remote shells is the same user that is running the CORE GUI. - -**HINT: Here is a quick distributed emulation checklist.** - -1. Install the CORE daemon on all servers. -2. Configure public-key SSH access to all servers (if you want to use -double-click shells or Widgets.) -3. Set **listenaddr=0.0.0.0** in all of the server's core.conf files, -then start (or restart) the daemon. -4. Select nodes, right-click them, and choose *Assign to* to assign -the servers (add servers through *Session*, *Emulation Servers...*) -5. Press the *Start* button to launch the distributed emulation. - -Servers are configured by choosing *Emulation servers...* from the *Session* -menu. Servers parameters are configured in the list below and stored in a -*servers.conf* file for use in different scenarios. The IP address and port of -the server must be specified. The name of each server will be saved in the -topology file as each node's location. - -**NOTE:** - The server that the GUI connects with - is referred to as the master server. - -The user needs to assign nodes to emulation servers in the scenario. Making no -assignment means the node will be emulated on the master server -In the configuration window of every node, a drop-down box located between -the *Node name* and the *Image* button will select the name of the emulation -server. By default, this menu shows *(none)*, indicating that the node will -be emulated locally on the master. When entering Execute mode, the CORE GUI -will deploy the node on its assigned emulation server. - -Another way to assign emulation servers is to select one or more nodes using -the select tool (shift-click to select multiple), and right-click one of the -nodes and choose *Assign to...*. - -The *CORE emulation servers* dialog box may also be used to assign nodes to -servers. The assigned server name appears in parenthesis next to the node name. -To assign all nodes to one of the servers, click on the server name and then -the *all nodes* button. Servers that have assigned nodes are shown in blue in -the server list. Another option is to first select a subset of nodes, then open -the *CORE emulation servers* box and use the *selected nodes* button. - -**IMPORTANT:** - Leave the nodes unassigned if they are to be run on the master server. - Do not explicitly assign the nodes to the master server. - -The emulation server machines should be reachable on the specified port and via -SSH. SSH is used when double-clicking a node to open a shell, the GUI will open -an SSH prompt to that node's emulation server. Public-key authentication should -be configured so that SSH passwords are not needed. - -If there is a link between two nodes residing on different servers, the GUI -will draw the link with a dashed line, and automatically create necessary -tunnels between the nodes when executed. Care should be taken to arrange the -topology such that the number of tunnels is minimized. The tunnels carry data -between servers to connect nodes as specified in the topology. -These tunnels are created using GRE tunneling, similar to the Tunnel Tool. - -Wireless nodes, i.e. those connected to a WLAN node, can be assigned to -different emulation servers and participate in the same wireless network -only if an -EMANE model is used for the WLAN. The basic range model does not work across multiple servers due -to the Linux bridging and ebtables rules that are used. - -**NOTE:** - The basic range wireless model does not support distributed emulation, - but EMANE does. - ## Services CORE uses the concept of services to specify what processes or scripts run on a From 6270fcbc755c784419b8aa22d0376adc3789cf0e Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Wed, 19 Jun 2019 10:58:49 -0700 Subject: [PATCH 25/51] shifted session startup order to help ensure broker tunnels get setup before emane starts --- daemon/core/emulator/session.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 17e96a80..34df079d 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -1214,19 +1214,19 @@ class Session(object): # write current nodes out to session directory file self.write_nodes() - # controlnet may be needed by some EMANE models + # create control net interfaces and broker network tunnels + # which need to exist for emane to sync on location events + # in distributed scenarios self.add_remove_control_interface(node=None, remove=False) + self.broker.startup() # instantiate will be invoked again upon Emane configure if self.emane.startup() == self.emane.NOT_READY: return - # start feature helpers - self.broker.startup() - self.mobility.startup() - - # boot the services on each node + # boot node services and then start mobility self.boot_nodes() + self.mobility.startup() # set broker local instantiation to complete self.broker.local_instantiation_complete() From 407cfa5fe1029397678d2860107799cb4fe6089b Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Wed, 19 Jun 2019 13:52:51 -0700 Subject: [PATCH 26/51] updated emanemanager to add server prefixes to control nets, when control nets are not configured --- daemon/core/emane/emanemanager.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py index 1856d946..7d1cab9e 100644 --- a/daemon/core/emane/emanemanager.py +++ b/daemon/core/emane/emanemanager.py @@ -497,6 +497,10 @@ class EmaneManager(ModelManager): if len(prefixes) < len(servers): logging.info("setting up default controlnet prefixes for distributed (%d configured)", len(prefixes)) prefix = ctrlnet.DEFAULT_PREFIX_LIST[0] + prefixes = prefix.split() + servers.remove("localhost") + servers.insert(0, "localhost") + prefix = " ".join("%s:%s" % (s, prefixes[i]) for i, s in enumerate(servers)) # this generates a config message having controlnet prefix assignments logging.info("setting up controlnet prefixes for distributed: %s", prefix) From ee6b420c9e846bf799d5c2d14869e0619aee985f Mon Sep 17 00:00:00 2001 From: bharnden <32446120+bharnden@users.noreply.github.com> Date: Wed, 19 Jun 2019 14:07:04 -0700 Subject: [PATCH 27/51] Update distributed.md --- docs/distributed.md | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/docs/distributed.md b/docs/distributed.md index 526cf041..b251c4ca 100644 --- a/docs/distributed.md +++ b/docs/distributed.md @@ -145,7 +145,15 @@ to arrange the topology such that the number of tunnels is minimized. The tunnels carry data between servers to connect nodes as specified in the topology. These tunnels are created using GRE tunneling, similar to the Tunnel Tool. -### EMANE Issues +### EMANE Configuration and Issues + +EMANE needs to have controlnet configured in **core.conf** in order to startup correctly. +The names before the addresses need to match the servers configured in +**~/.core/servers.conf** previously. + +```shell +controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24 +``` EMANE appears to require location events for nodes to be sync'ed across all EMANE instances for nodes to find each other. Using an EMANE eel file From b5acdf0c3d1b4a302e0a26e8b37e33d837f954cc Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Wed, 19 Jun 2019 17:34:33 -0700 Subject: [PATCH 28/51] updated tests for running distributed to working order and adding simple emane case --- daemon/core/api/tlv/corehandlers.py | 2 +- daemon/tests/conftest.py | 128 +--------- daemon/tests/distributed/test_distributed.py | 241 ++++++++++++++++--- 3 files changed, 220 insertions(+), 151 deletions(-) diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py index e57494d6..7ccd15c0 100644 --- a/daemon/core/api/tlv/corehandlers.py +++ b/daemon/core/api/tlv/corehandlers.py @@ -628,7 +628,7 @@ class CoreHandler(socketserver.BaseRequestHandler): """ Node Message handler - :param core.api.coreapi.CoreNodeMessage message: node message + :param core.api.tlv.coreapi.CoreNodeMessage message: node message :return: replies to node message """ replies = [] diff --git a/daemon/tests/conftest.py b/daemon/tests/conftest.py index 0893e7f7..cf82993e 100644 --- a/daemon/tests/conftest.py +++ b/daemon/tests/conftest.py @@ -9,134 +9,22 @@ import time import pytest from mock.mock import MagicMock -from core.api.tlv.coreapi import CoreConfMessage -from core.api.tlv.coreapi import CoreEventMessage -from core.api.tlv.coreapi import CoreExecMessage -from core.api.tlv.coreapi import CoreLinkMessage -from core.api.tlv.coreapi import CoreNodeMessage +from core.api.grpc.client import InterfaceHelper +from core.api.grpc.server import CoreGrpcServer +from core.api.tlv.coreapi import CoreConfMessage, CoreEventMessage from core.api.tlv.corehandlers import CoreHandler from core.api.tlv.coreserver import CoreServer from core.emulator.coreemu import CoreEmu from core.emulator.emudata import IpPrefixes -from core.emulator.enumerations import CORE_API_PORT +from core.emulator.enumerations import CORE_API_PORT, EventTlvs from core.emulator.enumerations import ConfigTlvs -from core.emulator.enumerations import EventTlvs from core.emulator.enumerations import EventTypes -from core.emulator.enumerations import ExecuteTlvs -from core.emulator.enumerations import LinkTlvs -from core.emulator.enumerations import LinkTypes -from core.emulator.enumerations import MessageFlags -from core.emulator.enumerations import NodeTlvs -from core.emulator.enumerations import NodeTypes -from core.api.grpc.client import InterfaceHelper -from core.api.grpc.server import CoreGrpcServer from core.nodes import ipaddress -from core.nodes.ipaddress import MacAddress from core.services.coreservices import ServiceManager EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward" -def node_message(_id, name, emulation_server=None, node_type=NodeTypes.DEFAULT, model=None): - """ - Convenience method for creating a node TLV messages. - - :param int _id: node id - :param str name: node name - :param str emulation_server: distributed server name, if desired - :param core.enumerations.NodeTypes node_type: node type - :param str model: model for node - :return: tlv message - :rtype: core.api.coreapi.CoreNodeMessage - """ - values = [ - (NodeTlvs.NUMBER, _id), - (NodeTlvs.TYPE, node_type.value), - (NodeTlvs.NAME, name), - (NodeTlvs.EMULATION_SERVER, emulation_server), - ] - - if model: - values.append((NodeTlvs.MODEL, model)) - - return CoreNodeMessage.create(MessageFlags.ADD.value, values) - - -def link_message(n1, n2, intf_one=None, address_one=None, intf_two=None, address_two=None, key=None): - """ - Convenience method for creating link TLV messages. - - :param int n1: node one id - :param int n2: node two id - :param int intf_one: node one interface id - :param core.misc.ipaddress.IpAddress address_one: node one ip4 address - :param int intf_two: node two interface id - :param core.misc.ipaddress.IpAddress address_two: node two ip4 address - :param int key: tunnel key for link if needed - :return: tlv mesage - :rtype: core.api.coreapi.CoreLinkMessage - """ - mac_one, mac_two = None, None - if address_one: - mac_one = MacAddress.random() - if address_two: - mac_two = MacAddress.random() - - values = [ - (LinkTlvs.N1_NUMBER, n1), - (LinkTlvs.N2_NUMBER, n2), - (LinkTlvs.DELAY, 0), - (LinkTlvs.BANDWIDTH, 0), - (LinkTlvs.PER, "0"), - (LinkTlvs.DUP, "0"), - (LinkTlvs.JITTER, 0), - (LinkTlvs.TYPE, LinkTypes.WIRED.value), - (LinkTlvs.INTERFACE1_NUMBER, intf_one), - (LinkTlvs.INTERFACE1_IP4, address_one), - (LinkTlvs.INTERFACE1_IP4_MASK, 24), - (LinkTlvs.INTERFACE1_MAC, mac_one), - (LinkTlvs.INTERFACE2_NUMBER, intf_two), - (LinkTlvs.INTERFACE2_IP4, address_two), - (LinkTlvs.INTERFACE2_IP4_MASK, 24), - (LinkTlvs.INTERFACE2_MAC, mac_two), - ] - - if key: - values.append((LinkTlvs.KEY, key)) - - return CoreLinkMessage.create(MessageFlags.ADD.value, values) - - -def command_message(node, command): - """ - Create an execute command TLV message. - - :param node: node to execute command for - :param command: command to execute - :return: tlv message - :rtype: core.api.coreapi.CoreExecMessage - """ - flags = MessageFlags.STRING.value | MessageFlags.TEXT.value - return CoreExecMessage.create(flags, [ - (ExecuteTlvs.NODE, node.id), - (ExecuteTlvs.NUMBER, 1), - (ExecuteTlvs.COMMAND, command) - ]) - - -def state_message(state): - """ - Create a event TLV message for a new state. - - :param core.enumerations.EventTypes state: state to create message for - :return: tlv message - :rtype: core.api.coreapi.CoreEventMessage - """ - return CoreEventMessage.create(0, [ - (EventTlvs.TYPE, state.value) - ]) - - class CoreServerTest(object): def __init__(self, port=CORE_API_PORT): self.host = "localhost" @@ -152,13 +40,12 @@ class CoreServerTest(object): self.session = None self.request_handler = None - def setup(self, distributed_address, port): + def setup(self, distributed_address): # validate address assert distributed_address, "distributed server address was not provided" # create session self.session = self.server.coreemu.create_session(1) - self.session.master = True # create request handler request_mock = MagicMock() @@ -170,11 +57,11 @@ class CoreServerTest(object): # have broker handle a configuration state change self.session.set_state(EventTypes.DEFINITION_STATE) - message = state_message(EventTypes.CONFIGURATION_STATE) + message = CoreEventMessage.create(0, [(EventTlvs.TYPE, EventTypes.CONFIGURATION_STATE.value)]) self.request_handler.handle_message(message) # add broker server for distributed core - distributed = "%s:%s:%s" % (self.distributed_server, distributed_address, port) + distributed = "%s:%s:%s" % (self.distributed_server, distributed_address, self.port) message = CoreConfMessage.create(0, [ (ConfigTlvs.OBJECT, "broker"), (ConfigTlvs.TYPE, 0), @@ -204,7 +91,6 @@ class CoreServerTest(object): def shutdown(self): self.server.coreemu.shutdown() - self.server.shutdown() self.server.server_close() diff --git a/daemon/tests/distributed/test_distributed.py b/daemon/tests/distributed/test_distributed.py index 925c4b7f..c183843e 100644 --- a/daemon/tests/distributed/test_distributed.py +++ b/daemon/tests/distributed/test_distributed.py @@ -1,15 +1,125 @@ """ Unit tests for testing CORE with distributed networks. """ +from core.emane.ieee80211abg import EmaneIeee80211abgModel -import conftest - -from core.api.tlv.coreapi import CoreExecMessage -from core.emulator.enumerations import EventTypes +from core.api.tlv.coreapi import CoreExecMessage, CoreNodeMessage, CoreLinkMessage, CoreEventMessage, CoreConfMessage +from core.emulator.enumerations import EventTypes, NodeTlvs, LinkTlvs, LinkTypes, EventTlvs, ConfigTlvs, ConfigFlags from core.emulator.enumerations import ExecuteTlvs from core.emulator.enumerations import MessageFlags from core.emulator.enumerations import NodeTypes -from core.nodes.ipaddress import IpAddress +from core.nodes.ipaddress import IpAddress, MacAddress, Ipv4Prefix + + +def set_emane_model(node_id, model): + return CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, node_id), + (ConfigTlvs.OBJECT, model), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + ]) + + +def node_message(_id, name, emulation_server=None, node_type=NodeTypes.DEFAULT, model=None): + """ + Convenience method for creating a node TLV messages. + + :param int _id: node id + :param str name: node name + :param str emulation_server: distributed server name, if desired + :param core.emulator.enumerations.NodeTypes node_type: node type + :param str model: model for node + :return: tlv message + :rtype: core.api.tlv.coreapi.CoreNodeMessage + """ + values = [ + (NodeTlvs.NUMBER, _id), + (NodeTlvs.TYPE, node_type.value), + (NodeTlvs.NAME, name), + (NodeTlvs.EMULATION_SERVER, emulation_server), + (NodeTlvs.X_POSITION, 0), + (NodeTlvs.Y_POSITION, 0), + ] + + if model: + values.append((NodeTlvs.MODEL, model)) + + return CoreNodeMessage.create(MessageFlags.ADD.value, values) + + +def link_message(n1, n2, intf_one=None, address_one=None, intf_two=None, address_two=None, key=None, mask=24): + """ + Convenience method for creating link TLV messages. + + :param int n1: node one id + :param int n2: node two id + :param int intf_one: node one interface id + :param core.nodes.ipaddress.IpAddress address_one: node one ip4 address + :param int intf_two: node two interface id + :param core.nodes.ipaddress.IpAddress address_two: node two ip4 address + :param int key: tunnel key for link if needed + :param int mask: ip4 mask to use for link + :return: tlv mesage + :rtype: core.api.tlv.coreapi.CoreLinkMessage + """ + mac_one, mac_two = None, None + if address_one: + mac_one = MacAddress.random() + if address_two: + mac_two = MacAddress.random() + + values = [ + (LinkTlvs.N1_NUMBER, n1), + (LinkTlvs.N2_NUMBER, n2), + (LinkTlvs.DELAY, 0), + (LinkTlvs.BANDWIDTH, 0), + (LinkTlvs.PER, "0"), + (LinkTlvs.DUP, "0"), + (LinkTlvs.JITTER, 0), + (LinkTlvs.TYPE, LinkTypes.WIRED.value), + (LinkTlvs.INTERFACE1_NUMBER, intf_one), + (LinkTlvs.INTERFACE1_IP4, address_one), + (LinkTlvs.INTERFACE1_IP4_MASK, mask), + (LinkTlvs.INTERFACE1_MAC, mac_one), + (LinkTlvs.INTERFACE2_NUMBER, intf_two), + (LinkTlvs.INTERFACE2_IP4, address_two), + (LinkTlvs.INTERFACE2_IP4_MASK, mask), + (LinkTlvs.INTERFACE2_MAC, mac_two), + ] + + if key: + values.append((LinkTlvs.KEY, key)) + + return CoreLinkMessage.create(MessageFlags.ADD.value, values) + + +def command_message(node, command): + """ + Create an execute command TLV message. + + :param node: node to execute command for + :param command: command to execute + :return: tlv message + :rtype: core.api.tlv.coreapi.CoreExecMessage + """ + flags = MessageFlags.STRING.value | MessageFlags.TEXT.value + return CoreExecMessage.create(flags, [ + (ExecuteTlvs.NODE, node.id), + (ExecuteTlvs.NUMBER, 1), + (ExecuteTlvs.COMMAND, command) + ]) + + +def state_message(state): + """ + Create a event TLV message for a new state. + + :param core.enumerations.EventTypes state: state to create message for + :return: tlv message + :rtype: core.api.tlv.coreapi.CoreEventMessage + """ + return CoreEventMessage.create(0, [ + (EventTlvs.TYPE, state.value) + ]) def validate_response(replies, _): @@ -28,18 +138,18 @@ def validate_response(replies, _): class TestDistributed: - def test_distributed(self, cored, distributed_address): + def test_switch(self, cored, distributed_address): """ - Test creating a distributed network. + Test creating a distributed switch network. - :param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param core.api.tlv.coreserver.CoreServer conftest.Core cored: core daemon server to test with :param str distributed_address: distributed server to test against """ # initialize server for testing cored.setup(distributed_address) # create local node - message = conftest.node_message( + message = node_message( _id=1, name="n1", model="host" @@ -47,7 +157,7 @@ class TestDistributed: cored.request_handler.handle_message(message) # create distributed node and assign to distributed server - message = conftest.node_message( + message = node_message( _id=2, name="n2", emulation_server=cored.distributed_server, @@ -56,17 +166,16 @@ class TestDistributed: cored.request_handler.handle_message(message) # create distributed switch and assign to distributed server - message = conftest.node_message( + message = node_message( _id=3, name="n3", - emulation_server=cored.distributed_server, node_type=NodeTypes.SWITCH ) cored.request_handler.handle_message(message) # link message one ip4_address = cored.prefix.addr(1) - message = conftest.link_message( + message = link_message( n1=1, n2=3, intf_one=0, @@ -76,7 +185,7 @@ class TestDistributed: # link message two ip4_address = cored.prefix.addr(2) - message = conftest.link_message( + message = link_message( n1=3, n2=2, intf_two=0, @@ -85,12 +194,86 @@ class TestDistributed: cored.request_handler.handle_message(message) # change session to instantiation state - message = conftest.state_message(EventTypes.INSTANTIATION_STATE) + message = state_message(EventTypes.INSTANTIATION_STATE) cored.request_handler.handle_message(message) # test a ping command node_one = cored.session.get_node(1) - message = conftest.command_message(node_one, "ping -c 5 %s" % ip4_address) + message = command_message(node_one, "ping -c 5 %s" % ip4_address) + cored.request_handler.dispatch_replies = validate_response + cored.request_handler.handle_message(message) + + def test_emane(self, cored, distributed_address): + """ + Test creating a distributed emane network. + + :param core.api.tlv.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param str distributed_address: distributed server to test against + """ + # initialize server for testing + cored.setup(distributed_address) + + # configure required controlnet + cored.session.options.set_config("controlnet", "core1:172.16.1.0/24 core2:172.16.2.0/24") + + # create local node + message = node_message( + _id=1, + name="n1", + model="mdr" + ) + cored.request_handler.handle_message(message) + + # create distributed node and assign to distributed server + message = node_message( + _id=2, + name="n2", + emulation_server=cored.distributed_server, + model="mdr" + ) + cored.request_handler.handle_message(message) + + # create distributed switch and assign to distributed server + message = node_message( + _id=3, + name="n3", + node_type=NodeTypes.EMANE + ) + cored.request_handler.handle_message(message) + + # set emane model + message = set_emane_model(3, EmaneIeee80211abgModel.name) + cored.request_handler.handle_message(message) + + # link message one + ip4_address = cored.prefix.addr(1) + message = link_message( + n1=1, + n2=3, + intf_one=0, + address_one=ip4_address, + mask=32 + ) + cored.request_handler.handle_message(message) + + # link message two + ip4_address = cored.prefix.addr(2) + message = link_message( + n1=2, + n2=3, + intf_one=0, + address_one=ip4_address, + mask=32 + ) + cored.request_handler.handle_message(message) + + # change session to instantiation state + message = state_message(EventTypes.INSTANTIATION_STATE) + cored.request_handler.handle_message(message) + + # test a ping command + node_one = cored.session.get_node(1) + message = command_message(node_one, "ping -c 5 %s" % ip4_address) cored.request_handler.dispatch_replies = validate_response cored.request_handler.handle_message(message) @@ -98,14 +281,14 @@ class TestDistributed: """ Test creating a distributed prouter node. - :param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param core.coreserver.CoreServer Core cored: core daemon server to test with :param str distributed_address: distributed server to test against """ # initialize server for testing cored.setup(distributed_address) # create local node - message = conftest.node_message( + message = node_message( _id=1, name="n1", model="host" @@ -113,7 +296,7 @@ class TestDistributed: cored.request_handler.handle_message(message) # create distributed node and assign to distributed server - message = conftest.node_message( + message = node_message( _id=2, name="n2", emulation_server=cored.distributed_server, @@ -123,7 +306,7 @@ class TestDistributed: cored.request_handler.handle_message(message) # create distributed switch and assign to distributed server - message = conftest.node_message( + message = node_message( _id=3, name="n3", node_type=NodeTypes.SWITCH @@ -132,7 +315,7 @@ class TestDistributed: # link message one ip4_address = cored.prefix.addr(1) - message = conftest.link_message( + message = link_message( n1=1, n2=3, intf_one=0, @@ -142,7 +325,7 @@ class TestDistributed: # link message two ip4_address = cored.prefix.addr(2) - message = conftest.link_message( + message = link_message( n1=3, n2=2, intf_two=0, @@ -151,12 +334,12 @@ class TestDistributed: cored.request_handler.handle_message(message) # change session to instantiation state - message = conftest.state_message(EventTypes.INSTANTIATION_STATE) + message = state_message(EventTypes.INSTANTIATION_STATE) cored.request_handler.handle_message(message) # test a ping command node_one = cored.session.get_node(1) - message = conftest.command_message(node_one, "ping -c 5 %s" % ip4_address) + message = command_message(node_one, "ping -c 5 %s" % ip4_address) cored.request_handler.dispatch_replies = validate_response cored.request_handler.handle_message(message) cored.request_handler.handle_message(message) @@ -165,14 +348,14 @@ class TestDistributed: """ Test session broker creation. - :param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param core.coreserver.CoreServer Core cored: core daemon server to test with :param str distributed_address: distributed server to test against """ # initialize server for testing cored.setup(distributed_address) # create local node - message = conftest.node_message( + message = node_message( _id=1, name="n1", model="host" @@ -180,7 +363,7 @@ class TestDistributed: cored.request_handler.handle_message(message) # create distributed node and assign to distributed server - message = conftest.node_message( + message = node_message( _id=2, name=distributed_address, emulation_server=cored.distributed_server, @@ -191,7 +374,7 @@ class TestDistributed: # link message one ip4_address = cored.prefix.addr(1) address_two = IpAddress.from_string(distributed_address) - message = conftest.link_message( + message = link_message( n1=1, n2=2, intf_one=0, @@ -203,5 +386,5 @@ class TestDistributed: cored.request_handler.handle_message(message) # change session to instantiation state - message = conftest.state_message(EventTypes.INSTANTIATION_STATE) + message = state_message(EventTypes.INSTANTIATION_STATE) cored.request_handler.handle_message(message) From 338c3a1fa19303233de49da49c1db1cd88d3705f Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Thu, 20 Jun 2019 10:49:07 -0700 Subject: [PATCH 29/51] fix to avoid sorting issue for comparinga str to an int in python3 --- daemon/core/emulator/session.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 34df079d..dffaff04 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -22,10 +22,10 @@ from core.api.tlv.broker import CoreBroker from core.emane.emanemanager import EmaneManager from core.emulator.data import EventData, NodeData from core.emulator.data import ExceptionData -from core.emulator.emudata import LinkOptions, NodeOptions from core.emulator.emudata import IdGen -from core.emulator.emudata import is_net_node +from core.emulator.emudata import LinkOptions, NodeOptions from core.emulator.emudata import create_interface +from core.emulator.emudata import is_net_node from core.emulator.emudata import link_config from core.emulator.enumerations import EventTypes, LinkTypes from core.emulator.enumerations import ExceptionLevels @@ -1168,7 +1168,7 @@ class Session(object): with self._nodes_lock: file_path = os.path.join(self.session_dir, "nodes") with open(file_path, "w") as f: - for _id in sorted(self.nodes.keys()): + for _id in self.nodes.keys(): node = self.nodes[_id] f.write("%s %s %s %s\n" % (_id, node.name, node.apitype, type(node))) except IOError: From a5f26e664a9498afa988c15153336a97f8c963cf Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Thu, 20 Jun 2019 13:22:20 -0700 Subject: [PATCH 30/51] updated logic for creating tunnel keys to use a consistent hashing method, since the builtin hash is not guaranteed in python3 as it was before in python2 --- daemon/core/api/tlv/broker.py | 4 +++- daemon/core/utils.py | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/daemon/core/api/tlv/broker.py b/daemon/core/api/tlv/broker.py index 2021bb4c..b7b24746 100644 --- a/daemon/core/api/tlv/broker.py +++ b/daemon/core/api/tlv/broker.py @@ -10,6 +10,7 @@ import select import socket import threading +from core import utils from core.api.tlv import coreapi from core.nodes.base import CoreNodeBase, CoreNetworkBase from core.emulator.enumerations import ConfigDataTypes @@ -387,12 +388,13 @@ class CoreBroker(object): :return: tunnel key for the node pair :rtype: int """ + logging.debug("creating tunnel key for: %s, %s", n1num, n2num) sid = self.session_id_master if sid is None: # this is the master session sid = self.session.id - key = (sid << 16) ^ hash(n1num) ^ (hash(n2num) << 8) + key = (sid << 16) ^ utils.hash(n1num) ^ (utils.hash(n2num) << 8) return key & 0xFFFFFFFF def addtunnel(self, remoteip, n1num, n2num, localnum): diff --git a/daemon/core/utils.py b/daemon/core/utils.py index ac76e693..89c8aa71 100644 --- a/daemon/core/utils.py +++ b/daemon/core/utils.py @@ -3,6 +3,7 @@ Miscellaneous utility functions, wrappers around some subprocess procedures. """ import fcntl +import hashlib import importlib import inspect import logging @@ -17,6 +18,22 @@ from core import CoreCommandError DEVNULL = open(os.devnull, "wb") +def hash(value): + """ + Provide a consistent hash that can be used in place + of the builtin hash, that no longer behaves consistently + in python3. + + :param str/int value: value to hash + :return: hash value + :rtype: int + """ + if isinstance(value, int): + value = str(value) + value = value.encode("utf-8") + return int(hashlib.sha256(value).hexdigest(), 16) + + def _detach_init(): """ Fork a child process and exit. From c57089284f8f1b4ccb3e18d88ae01b379359414e Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Fri, 21 Jun 2019 08:51:56 -0700 Subject: [PATCH 31/51] updates to rename utils.hash to utils.hashkey to avoid builtin conflict --- daemon/core/api/tlv/broker.py | 2 +- daemon/core/utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/core/api/tlv/broker.py b/daemon/core/api/tlv/broker.py index b7b24746..98c258b0 100644 --- a/daemon/core/api/tlv/broker.py +++ b/daemon/core/api/tlv/broker.py @@ -394,7 +394,7 @@ class CoreBroker(object): # this is the master session sid = self.session.id - key = (sid << 16) ^ utils.hash(n1num) ^ (utils.hash(n2num) << 8) + key = (sid << 16) ^ utils.hashkey(n1num) ^ (utils.hashkey(n2num) << 8) return key & 0xFFFFFFFF def addtunnel(self, remoteip, n1num, n2num, localnum): diff --git a/daemon/core/utils.py b/daemon/core/utils.py index 89c8aa71..dbbdc321 100644 --- a/daemon/core/utils.py +++ b/daemon/core/utils.py @@ -18,7 +18,7 @@ from core import CoreCommandError DEVNULL = open(os.devnull, "wb") -def hash(value): +def hashkey(value): """ Provide a consistent hash that can be used in place of the builtin hash, that no longer behaves consistently From 05c6233908d5c1ffbeac05f405d8e9c5000ecdeb Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Fri, 21 Jun 2019 09:29:19 -0700 Subject: [PATCH 32/51] added utility method to replace execfile for python2/3 support --- daemon/core/api/tlv/corehandlers.py | 2 +- daemon/core/utils.py | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py index 7ccd15c0..64d952ed 100644 --- a/daemon/core/api/tlv/corehandlers.py +++ b/daemon/core/api/tlv/corehandlers.py @@ -859,7 +859,7 @@ class CoreHandler(socketserver.BaseRequestHandler): raise else: thread = threading.Thread( - target=execfile, + target=utils.execute_file, args=(file_name, {"__file__": file_name, "coreemu": self.coreemu}) ) thread.daemon = True diff --git a/daemon/core/utils.py b/daemon/core/utils.py index dbbdc321..98d7a9bf 100644 --- a/daemon/core/utils.py +++ b/daemon/core/utils.py @@ -18,6 +18,27 @@ from core import CoreCommandError DEVNULL = open(os.devnull, "wb") +def execute_file(path, exec_globals=None, exec_locals=None): + """ + Provides an alternative way to run execfile to be compatible for + both python2/3. + + :param str path: path of file to execute + :param dict exec_globals: globals values to pass to execution + :param dict exec_locals: local values to pass to execution + :return: nothing + """ + if exec_globals is None: + exec_globals = {} + exec_globals.update({ + "__file__": path, + "__name__": "__main__" + }) + with open(path, "rb") as f: + data = compile(f.read(), path, "exec") + exec(data, exec_globals, exec_locals) + + def hashkey(value): """ Provide a consistent hash that can be used in place From f9304b0875596c9e15355966430075daa4f61792 Mon Sep 17 00:00:00 2001 From: bharnden <32446120+bharnden@users.noreply.github.com> Date: Fri, 21 Jun 2019 12:51:20 -0700 Subject: [PATCH 33/51] Update install.md fixed missing command to build ospf msdr and added possible dependencies --- docs/install.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/install.md b/docs/install.md index 86b93c34..ac1362d2 100644 --- a/docs/install.md +++ b/docs/install.md @@ -89,9 +89,13 @@ sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb Requires building from source, from the latest nightly snapshot. ```shell +# packages needed beyond what's normally required to build core on ubuntu +sudo apt install libtool libreadline-dev + wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/nightly_snapshots/quagga-svnsnap.tgz tar xzf quagga-svnsnap.tgz cd quagga +./bootstrap.sh ./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \ --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ --localstatedir=/var/run/quagga From e11ec020ebbd8df0d1c78d4be249de3c87190587 Mon Sep 17 00:00:00 2001 From: bharnden <32446120+bharnden@users.noreply.github.com> Date: Fri, 21 Jun 2019 12:57:32 -0700 Subject: [PATCH 34/51] Update install.md avoid building docs for ospf mdr --- docs/install.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/install.md b/docs/install.md index ac1362d2..ae3146af 100644 --- a/docs/install.md +++ b/docs/install.md @@ -96,7 +96,7 @@ wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/nightly_snapshots/quagga-s tar xzf quagga-svnsnap.tgz cd quagga ./bootstrap.sh -./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \ +./configure --disable-doc --enable-user=root --enable-group=root --with-cflags=-ggdb \ --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ --localstatedir=/var/run/quagga make From 588a0932d3b1360a497053ba0aa1d218dbedf219 Mon Sep 17 00:00:00 2001 From: bharnden Date: Fri, 21 Jun 2019 23:12:18 -0700 Subject: [PATCH 35/51] beginning to add some tests to help verify handling different tlv messages, added delete link logic for net to node and node to net --- daemon/core/emulator/session.py | 12 ++ daemon/tests/conftest.py | 22 +++ daemon/tests/test_gui.py | 259 ++++++++++++++++++++++++-------- 3 files changed, 232 insertions(+), 61 deletions(-) diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index dffaff04..4118bd78 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -361,6 +361,18 @@ class Session(object): self.delete_node(net_one.id) node_one.delnetif(interface_one.netindex) node_two.delnetif(interface_two.netindex) + elif node_one and net_one: + interface = node_one.netif(interface_one_id) + logging.info("deleting link node(%s):interface(%s) node(%s)", + node_one.name, interface.name, net_one.name) + interface.detachnet() + node_one.delnetif(interface.netindex) + elif node_two and net_one: + interface = node_two.netif(interface_two_id) + logging.info("deleting link node(%s):interface(%s) node(%s)", + node_two.name, interface.name, net_one.name) + interface.detachnet() + node_two.delnetif(interface.netindex) finally: if node_one: node_one.lock.release() diff --git a/daemon/tests/conftest.py b/daemon/tests/conftest.py index cf82993e..76be943a 100644 --- a/daemon/tests/conftest.py +++ b/daemon/tests/conftest.py @@ -40,6 +40,14 @@ class CoreServerTest(object): self.session = None self.request_handler = None + def setup_handler(self): + self.session = self.server.coreemu.create_session(1) + request_mock = MagicMock() + request_mock.fileno = MagicMock(return_value=1) + self.request_handler = CoreHandler(request_mock, "", self.server) + self.request_handler.session = self.session + self.request_handler.add_session_handlers() + def setup(self, distributed_address): # validate address assert distributed_address, "distributed server address was not provided" @@ -154,6 +162,20 @@ def cored(): ServiceManager.services.clear() +@pytest.fixture() +def coreserver(): + # create and return server + server = CoreServerTest() + server.setup_handler() + yield server + + # cleanup + server.shutdown() + + # cleanup services + ServiceManager.services.clear() + + def ping(from_node, to_node, ip_prefixes, count=3): address = ip_prefixes.ip4_address(to_node) return from_node.cmd(["ping", "-c", str(count), address]) diff --git a/daemon/tests/test_gui.py b/daemon/tests/test_gui.py index 3083ea2d..ab8169c0 100644 --- a/daemon/tests/test_gui.py +++ b/daemon/tests/test_gui.py @@ -2,19 +2,18 @@ Unit tests for testing with a CORE switch. """ -import threading +import pytest -from core.api.tlv import coreapi, dataconversion +from core.api.tlv import coreapi from core.api.tlv.coreapi import CoreExecuteTlv -from core.emulator.enumerations import CORE_API_PORT, NodeTypes from core.emulator.enumerations import EventTlvs -from core.emulator.enumerations import EventTypes from core.emulator.enumerations import ExecuteTlvs from core.emulator.enumerations import LinkTlvs from core.emulator.enumerations import LinkTypes from core.emulator.enumerations import MessageFlags from core.emulator.enumerations import MessageTypes -from core.nodes import ipaddress +from core.emulator.enumerations import NodeTypes, NodeTlvs +from core.nodes.ipaddress import Ipv4Prefix def command_message(node, command): @@ -101,74 +100,212 @@ def run_cmd(node, exec_cmd): class TestGui: - def test_broker(self, cored): - """ - Test session broker creation. + @pytest.mark.parametrize("node_type, model", [ + (NodeTypes.DEFAULT, "PC"), + (NodeTypes.EMANE, None), + (NodeTypes.HUB, None), + (NodeTypes.SWITCH, None), + (NodeTypes.WIRELESS_LAN, None), + (NodeTypes.TUNNEL, None), + (NodeTypes.RJ45, None), + ]) + def test_node_add(self, coreserver, node_type, model): + node_id = 1 + message = coreapi.CoreNodeMessage.create(MessageFlags.ADD.value, [ + (NodeTlvs.NUMBER, node_id), + (NodeTlvs.TYPE, node_type.value), + (NodeTlvs.NAME, "n1"), + (NodeTlvs.X_POSITION, 0), + (NodeTlvs.Y_POSITION, 0), + (NodeTlvs.MODEL, model), + ]) - :param core.emulator.coreemu.EmuSession session: session for test - :param cored: cored daemon server to test with - """ + coreserver.request_handler.handle_message(message) - # set core daemon to run in the background - thread = threading.Thread(target=cored.server.serve_forever) - thread.daemon = True - thread.start() + assert coreserver.session.get_node(node_id) is not None - # ip prefix for nodes - prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") - daemon = "localhost" + def test_node_update(self, coreserver): + node_id = 1 + coreserver.session.add_node(_id=node_id) + x = 50 + y = 100 + message = coreapi.CoreNodeMessage.create(0, [ + (NodeTlvs.NUMBER, node_id), + (NodeTlvs.X_POSITION, x), + (NodeTlvs.Y_POSITION, y), + ]) - # add server - session = cored.server.coreemu.create_session() - session.broker.addserver(daemon, "127.0.0.1", CORE_API_PORT) + coreserver.request_handler.handle_message(message) - # setup server - session.broker.setupserver(daemon) + node = coreserver.session.get_node(node_id) + assert node is not None + assert node.position.x == x + assert node.position.y == y - # do not want the recvloop running as we will deal ourselves - session.broker.dorecvloop = False + def test_node_delete(self, coreserver): + node_id = 1 + coreserver.session.add_node(_id=node_id) + message = coreapi.CoreNodeMessage.create(MessageFlags.DELETE.value, [ + (NodeTlvs.NUMBER, node_id), + ]) - # have broker handle a configuration state change - session.set_state(EventTypes.CONFIGURATION_STATE) - event_message = state_message(EventTypes.CONFIGURATION_STATE) - session.broker.handlerawmsg(event_message) + coreserver.request_handler.handle_message(message) - # create a switch node - switch = session.add_node(_type=NodeTypes.SWITCH) - switch.setposition(x=80, y=50) - switch.server = daemon + with pytest.raises(KeyError): + coreserver.session.get_node(node_id) - # retrieve switch data representation, create a switch message for broker to handle - switch_data = switch.data(MessageFlags.ADD.value) - switch_message = dataconversion.convert_node(switch_data) - session.broker.handlerawmsg(switch_message) + def test_link_add(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + switch = 2 + coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + coreserver.session.add_link(node_one, switch, interface_one) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, switch), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE1_IP4, interface_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + ]) - # create node one - node_one = session.add_node() - node_one.server = daemon + coreserver.request_handler.handle_message(message) - # create node two - node_two = session.add_node() - node_two.server = daemon + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 1 - # create node messages for the broker to handle - for node in [node_one, node_two]: - node_data = node.data(MessageFlags.ADD.value) - node_message = dataconversion.convert_node(node_data) - session.broker.handlerawmsg(node_message) + def test_link_update(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + switch = 2 + coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, switch), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE1_IP4, interface_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + ]) + coreserver.request_handler.handle_message(message) + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 1 + link = all_links[0] + assert link.bandwidth is None - # create links to switch from nodes for broker to handle - for index, node in enumerate([node_one, node_two], start=1): - ip4_address = prefix.addr(index) - link_message = switch_link_message(switch, node, ip4_address, prefix.prefixlen) - session.broker.handlerawmsg(link_message) + bandwidth = 50000 + message = coreapi.CoreLinkMessage.create(0, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, switch), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.BANDWIDTH, bandwidth), + ]) + coreserver.request_handler.handle_message(message) - # change session to instantiation state - event_message = state_message(EventTypes.INSTANTIATION_STATE) - session.broker.handlerawmsg(event_message) + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 1 + link = all_links[0] + assert link.bandwidth == bandwidth - # Get the ip or last node and ping it from the first - output, status = run_cmd(node_one, "ip -4 -o addr show dev eth0") - pingip = output.split()[3].split("/")[0] - output, status = run_cmd(node_two, "ping -c 5 " + pingip) - assert not status + def test_link_delete_node_to_node(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + node_two = 2 + coreserver.session.add_node(_id=node_two) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + interface_two = ip_prefix.addr(node_two) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, node_two), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE1_IP4, interface_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + (LinkTlvs.INTERFACE2_IP4, interface_two), + (LinkTlvs.INTERFACE2_IP4_MASK, 24), + ]) + coreserver.request_handler.handle_message(message) + all_links = [] + for node_id in coreserver.session.nodes: + node = coreserver.session.nodes[node_id] + all_links += node.all_link_data(0) + assert len(all_links) == 1 + + message = coreapi.CoreLinkMessage.create(MessageFlags.DELETE.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, node_two), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE2_NUMBER, 0), + ]) + coreserver.request_handler.handle_message(message) + + all_links = [] + for node_id in coreserver.session.nodes: + node = coreserver.session.nodes[node_id] + all_links += node.all_link_data(0) + assert len(all_links) == 0 + + def test_link_delete_node_to_net(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + switch = 2 + coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, switch), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE1_IP4, interface_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + ]) + coreserver.request_handler.handle_message(message) + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 1 + + message = coreapi.CoreLinkMessage.create(MessageFlags.DELETE.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, switch), + (LinkTlvs.INTERFACE1_NUMBER, 0), + ]) + coreserver.request_handler.handle_message(message) + + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 0 + + def test_link_delete_net_to_node(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + switch = 2 + coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, switch), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE1_IP4, interface_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + ]) + coreserver.request_handler.handle_message(message) + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 1 + + message = coreapi.CoreLinkMessage.create(MessageFlags.DELETE.value, [ + (LinkTlvs.N1_NUMBER, switch), + (LinkTlvs.N2_NUMBER, node_one), + (LinkTlvs.INTERFACE2_NUMBER, 0), + ]) + coreserver.request_handler.handle_message(message) + + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 0 From 4f4605163f5f96628f35a94df2c5f6a510429e6a Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Mon, 24 Jun 2019 15:49:12 -0700 Subject: [PATCH 36/51] fixed issues executing xml from gui, added more tests for testing message handling --- daemon/core/api/tlv/corehandlers.py | 4 +- daemon/core/emulator/session.py | 3 + daemon/tests/test_gui.py | 429 ++++++++++++++++++++++------ 3 files changed, 344 insertions(+), 92 deletions(-) diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py index 64d952ed..682978d6 100644 --- a/daemon/core/api/tlv/corehandlers.py +++ b/daemon/core/api/tlv/corehandlers.py @@ -1396,7 +1396,7 @@ class CoreHandler(socketserver.BaseRequestHandler): open_file.write(data) return () - self.session.node_add_file(node_num, source_name, file_name, data) + self.session.add_node_file(node_num, source_name, file_name, data) else: raise NotImplementedError @@ -1639,10 +1639,10 @@ class CoreHandler(socketserver.BaseRequestHandler): logging.info("request to connect to session %s", session_id) # remove client from session broker and shutdown if needed - self.remove_session_handlers() self.session.broker.session_clients.remove(self) if not self.session.broker.session_clients and not self.session.is_active(): self.coreemu.delete_session(self.session.id) + self.remove_session_handlers() # set session to join self.session = session diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 4118bd78..246f0c0a 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -628,6 +628,9 @@ class Session(object): # clear out existing session self.clear() + if start: + self.set_state(EventTypes.CONFIGURATION_STATE) + # write out xml file CoreXmlReader(self).read(file_name) diff --git a/daemon/tests/test_gui.py b/daemon/tests/test_gui.py index ab8169c0..fd9f6bed 100644 --- a/daemon/tests/test_gui.py +++ b/daemon/tests/test_gui.py @@ -1,104 +1,21 @@ """ -Unit tests for testing with a CORE switch. +Tests for testing tlv message handling. """ +import os +import time +import mock import pytest from core.api.tlv import coreapi -from core.api.tlv.coreapi import CoreExecuteTlv -from core.emulator.enumerations import EventTlvs +from core.emulator.enumerations import EventTlvs, SessionTlvs, EventTypes, FileTlvs, RegisterTlvs from core.emulator.enumerations import ExecuteTlvs from core.emulator.enumerations import LinkTlvs -from core.emulator.enumerations import LinkTypes from core.emulator.enumerations import MessageFlags -from core.emulator.enumerations import MessageTypes from core.emulator.enumerations import NodeTypes, NodeTlvs from core.nodes.ipaddress import Ipv4Prefix -def command_message(node, command): - """ - Create an execute command TLV message. - - :param node: node to execute command for - :param command: command to execute - :return: packed execute message - """ - tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id) - tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1) - tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command) - return coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data) - - -def state_message(state): - """ - Create a event TLV message for a new state. - - :param core.enumerations.EventTypes state: state to create message for - :return: packed event message - """ - tlv_data = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, state.value) - return coreapi.CoreEventMessage.pack(0, tlv_data) - - -def switch_link_message(switch, node, address, prefix_len): - """ - Create a link TLV message for node to a switch, with the provided address and prefix length. - - :param switch: switch for link - :param node: node for link - :param address: address node on link - :param prefix_len: prefix length of address - :return: packed link message - """ - tlv_data = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.id) - tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, node.id) - tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) - tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) - tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, address) - tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix_len) - return coreapi.CoreLinkMessage.pack(MessageFlags.ADD.value, tlv_data) - - -def run_cmd(node, exec_cmd): - """ - Convenience method for sending commands to a node using the legacy API. - - :param node: The node the command should be issued too - :param exec_cmd: A string with the command to be run - :return: Returns the result of the command - """ - # Set up the command api message - # tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id) - # tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1) - # tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd) - # message = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data) - message = command_message(node, exec_cmd) - node.session.broker.handlerawmsg(message) - - # Now wait for the response - server = node.session.broker.servers["localhost"] - server.sock.settimeout(50.0) - - # receive messages until we get our execute response - result = None - status = False - while True: - message_header = server.sock.recv(coreapi.CoreMessage.header_len) - message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(message_header) - message_data = server.sock.recv(message_length) - - # If we get the right response return the results - print("received response message: %s" % message_type) - if message_type == MessageTypes.EXECUTE.value: - message = coreapi.CoreExecMessage(message_flags, message_header, message_data) - result = message.get_tlv(ExecuteTlvs.RESULT.value) - status = message.get_tlv(ExecuteTlvs.STATUS.value) - break - - return result, status - - class TestGui: @pytest.mark.parametrize("node_type, model", [ (NodeTypes.DEFAULT, "PC"), @@ -154,14 +71,13 @@ class TestGui: with pytest.raises(KeyError): coreserver.session.get_node(node_id) - def test_link_add(self, coreserver): + def test_link_add_node_to_net(self, coreserver): node_one = 1 coreserver.session.add_node(_id=node_one) switch = 2 coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH) ip_prefix = Ipv4Prefix("10.0.0.0/24") interface_one = ip_prefix.addr(node_one) - coreserver.session.add_link(node_one, switch, interface_one) message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ (LinkTlvs.N1_NUMBER, node_one), (LinkTlvs.N2_NUMBER, switch), @@ -176,6 +92,54 @@ class TestGui: all_links = switch_node.all_link_data(0) assert len(all_links) == 1 + def test_link_add_net_to_node(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + switch = 2 + coreserver.session.add_node(_id=switch, _type=NodeTypes.SWITCH) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, switch), + (LinkTlvs.N2_NUMBER, node_one), + (LinkTlvs.INTERFACE2_NUMBER, 0), + (LinkTlvs.INTERFACE2_IP4, interface_one), + (LinkTlvs.INTERFACE2_IP4_MASK, 24), + ]) + + coreserver.request_handler.handle_message(message) + + switch_node = coreserver.session.get_node(switch) + all_links = switch_node.all_link_data(0) + assert len(all_links) == 1 + + def test_link_add_node_to_node(self, coreserver): + node_one = 1 + coreserver.session.add_node(_id=node_one) + node_two = 2 + coreserver.session.add_node(_id=node_two) + ip_prefix = Ipv4Prefix("10.0.0.0/24") + interface_one = ip_prefix.addr(node_one) + interface_two = ip_prefix.addr(node_two) + message = coreapi.CoreLinkMessage.create(MessageFlags.ADD.value, [ + (LinkTlvs.N1_NUMBER, node_one), + (LinkTlvs.N2_NUMBER, node_two), + (LinkTlvs.INTERFACE1_NUMBER, 0), + (LinkTlvs.INTERFACE1_IP4, interface_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + (LinkTlvs.INTERFACE2_NUMBER, 0), + (LinkTlvs.INTERFACE2_IP4, interface_two), + (LinkTlvs.INTERFACE2_IP4_MASK, 24), + ]) + + coreserver.request_handler.handle_message(message) + + all_links = [] + for node_id in coreserver.session.nodes: + node = coreserver.session.nodes[node_id] + all_links += node.all_link_data(0) + assert len(all_links) == 1 + def test_link_update(self, coreserver): node_one = 1 coreserver.session.add_node(_id=node_one) @@ -309,3 +273,288 @@ class TestGui: switch_node = coreserver.session.get_node(switch) all_links = switch_node.all_link_data(0) assert len(all_links) == 0 + + def test_session_update(self, coreserver): + session_id = coreserver.session.id + name = "test" + message = coreapi.CoreSessionMessage.create(0, [ + (SessionTlvs.NUMBER, str(session_id)), + (SessionTlvs.NAME, name), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.name == name + + def test_session_query(self, coreserver): + coreserver.request_handler.dispatch_replies = mock.MagicMock() + message = coreapi.CoreSessionMessage.create(MessageFlags.STRING.value, []) + + coreserver.request_handler.handle_message(message) + + args, _ = coreserver.request_handler.dispatch_replies.call_args + replies = args[0] + assert len(replies) == 1 + + def test_session_join(self, coreserver): + coreserver.request_handler.dispatch_replies = mock.MagicMock() + session_id = coreserver.session.id + message = coreapi.CoreSessionMessage.create(MessageFlags.ADD.value, [ + (SessionTlvs.NUMBER, str(session_id)), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.request_handler.session.id == session_id + + def test_session_delete(self, coreserver): + assert len(coreserver.server.coreemu.sessions) == 1 + session_id = coreserver.session.id + message = coreapi.CoreSessionMessage.create(MessageFlags.DELETE.value, [ + (SessionTlvs.NUMBER, str(session_id)), + ]) + + coreserver.request_handler.handle_message(message) + + assert len(coreserver.server.coreemu.sessions) == 0 + + def test_file_hook_add(self, coreserver): + state = EventTypes.DATACOLLECT_STATE.value + assert coreserver.session._hooks.get(state) is None + file_name = "test.sh" + file_data = "echo hello" + message = coreapi.CoreFileMessage.create(MessageFlags.ADD.value, [ + (FileTlvs.TYPE, "hook:%s" % state), + (FileTlvs.NAME, file_name), + (FileTlvs.DATA, file_data), + ]) + + coreserver.request_handler.handle_message(message) + + hooks = coreserver.session._hooks.get(state) + assert len(hooks) == 1 + name, data = hooks[0] + assert file_name == name + assert file_data == data + + def test_file_service_file_set(self, coreserver): + node = coreserver.session.add_node() + service = "DefaultRoute" + file_name = "defaultroute.sh" + file_data = "echo hello" + message = coreapi.CoreFileMessage.create(MessageFlags.ADD.value, [ + (FileTlvs.NODE, node.id), + (FileTlvs.TYPE, "service:%s" % service), + (FileTlvs.NAME, file_name), + (FileTlvs.DATA, file_data), + ]) + + coreserver.request_handler.handle_message(message) + + service_file = coreserver.session.services.get_service_file(node, service, file_name) + assert file_data == service_file.data + + def test_file_node_file_copy(self, coreserver): + file_name = "/var/log/test/node.log" + node = coreserver.session.add_node() + node.makenodedir() + file_data = "echo hello" + message = coreapi.CoreFileMessage.create(MessageFlags.ADD.value, [ + (FileTlvs.NODE, node.id), + (FileTlvs.NAME, file_name), + (FileTlvs.DATA, file_data), + ]) + + coreserver.request_handler.handle_message(message) + + directory, basename = os.path.split(file_name) + created_directory = directory[1:].replace("/", ".") + create_path = os.path.join(node.nodedir, created_directory, basename) + assert os.path.exists(create_path) + + def test_exec_node_tty(self, coreserver): + coreserver.request_handler.dispatch_replies = mock.MagicMock() + node = coreserver.session.add_node() + node.startup() + message = coreapi.CoreExecMessage.create(MessageFlags.TTY.value, [ + (ExecuteTlvs.NODE, node.id), + (ExecuteTlvs.NUMBER, 1), + (ExecuteTlvs.COMMAND, "bash") + ]) + + coreserver.request_handler.handle_message(message) + + args, _ = coreserver.request_handler.dispatch_replies.call_args + replies = args[0] + assert len(replies) == 1 + + def test_exec_local_command(self, coreserver): + coreserver.request_handler.dispatch_replies = mock.MagicMock() + node = coreserver.session.add_node() + node.startup() + message = coreapi.CoreExecMessage.create( + MessageFlags.TEXT.value | MessageFlags.LOCAL.value, [ + (ExecuteTlvs.NODE, node.id), + (ExecuteTlvs.NUMBER, 1), + (ExecuteTlvs.COMMAND, "echo hello") + ]) + + coreserver.request_handler.handle_message(message) + + args, _ = coreserver.request_handler.dispatch_replies.call_args + replies = args[0] + assert len(replies) == 1 + + def test_exec_node_command(self, coreserver): + coreserver.request_handler.dispatch_replies = mock.MagicMock() + node = coreserver.session.add_node() + node.startup() + message = coreapi.CoreExecMessage.create( + MessageFlags.TEXT.value, [ + (ExecuteTlvs.NODE, node.id), + (ExecuteTlvs.NUMBER, 1), + (ExecuteTlvs.COMMAND, "echo hello") + ]) + + coreserver.request_handler.handle_message(message) + + args, _ = coreserver.request_handler.dispatch_replies.call_args + replies = args[0] + assert len(replies) == 1 + + @pytest.mark.parametrize("state", [ + EventTypes.SHUTDOWN_STATE, + EventTypes.RUNTIME_STATE, + EventTypes.DATACOLLECT_STATE, + EventTypes.CONFIGURATION_STATE, + EventTypes.DEFINITION_STATE + ]) + def test_event_state(self, coreserver, state): + message = coreapi.CoreEventMessage.create(0, [ + (EventTlvs.TYPE, state.value), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.state == state.value + + def test_event_schedule(self, coreserver): + coreserver.session.add_event = mock.MagicMock() + node = coreserver.session.add_node() + message = coreapi.CoreEventMessage.create(MessageFlags.ADD.value, [ + (EventTlvs.TYPE, EventTypes.SCHEDULED.value), + (EventTlvs.TIME, str(time.time() + 100)), + (EventTlvs.NODE, node.id), + (EventTlvs.NAME, "event"), + (EventTlvs.DATA, "data"), + ]) + + coreserver.request_handler.handle_message(message) + + coreserver.session.add_event.assert_called_once() + + def test_event_save_xml(self, coreserver, tmpdir): + xml_file = tmpdir.join("session.xml") + file_path = xml_file.strpath + coreserver.session.add_node() + message = coreapi.CoreEventMessage.create(0, [ + (EventTlvs.TYPE, EventTypes.FILE_SAVE.value), + (EventTlvs.NAME, file_path), + ]) + + coreserver.request_handler.handle_message(message) + + assert os.path.exists(file_path) + + def test_event_open_xml(self, coreserver, tmpdir): + xml_file = tmpdir.join("session.xml") + file_path = xml_file.strpath + node = coreserver.session.add_node() + coreserver.session.save_xml(file_path) + coreserver.session.delete_node(node.id) + message = coreapi.CoreEventMessage.create(0, [ + (EventTlvs.TYPE, EventTypes.FILE_OPEN.value), + (EventTlvs.NAME, file_path), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.get_node(node.id) + + @pytest.mark.parametrize("state", [ + EventTypes.START, + EventTypes.STOP, + EventTypes.RESTART, + EventTypes.PAUSE, + EventTypes.RECONFIGURE + ]) + def test_event_service(self, coreserver, state): + coreserver.session.broadcast_event = mock.MagicMock() + node = coreserver.session.add_node() + node.startup() + message = coreapi.CoreEventMessage.create(0, [ + (EventTlvs.TYPE, state.value), + (EventTlvs.NODE, node.id), + (EventTlvs.NAME, "service:DefaultRoute"), + ]) + + coreserver.request_handler.handle_message(message) + + coreserver.session.broadcast_event.assert_called_once() + + @pytest.mark.parametrize("state", [ + EventTypes.START, + EventTypes.STOP, + EventTypes.RESTART, + EventTypes.PAUSE, + EventTypes.RECONFIGURE + ]) + def test_event_mobility(self, coreserver, state): + coreserver.session.broadcast_event = mock.MagicMock() + message = coreapi.CoreEventMessage.create(0, [ + (EventTlvs.TYPE, state.value), + (EventTlvs.NAME, "mobility:ns2script"), + ]) + + coreserver.request_handler.handle_message(message) + + def test_register_gui(self, coreserver): + coreserver.request_handler.master = False + message = coreapi.CoreRegMessage.create(0, [ + (RegisterTlvs.GUI, "gui"), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.request_handler.master is True + + def test_register_xml(self, coreserver, tmpdir): + xml_file = tmpdir.join("session.xml") + file_path = xml_file.strpath + node = coreserver.session.add_node() + coreserver.session.save_xml(file_path) + coreserver.session.delete_node(node.id) + message = coreapi.CoreRegMessage.create(0, [ + (RegisterTlvs.EXECUTE_SERVER, file_path), + ]) + coreserver.session.instantiate() + + coreserver.request_handler.handle_message(message) + + assert coreserver.server.coreemu.sessions[2].get_node(node.id) + + def test_register_python(self, coreserver, tmpdir): + xml_file = tmpdir.join("test.py") + file_path = xml_file.strpath + with open(file_path, "w") as f: + f.write("coreemu = globals()['coreemu']\n") + f.write("session = coreemu.sessions[1]\n") + f.write("session.add_node()\n") + message = coreapi.CoreRegMessage.create(0, [ + (RegisterTlvs.EXECUTE_SERVER, file_path), + ]) + coreserver.session.instantiate() + + coreserver.request_handler.handle_message(message) + + assert len(coreserver.session.nodes) == 1 From ca10ef4e9e250c1242f82e2eebf2d5ccfd411f16 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Tue, 25 Jun 2019 12:29:46 -0700 Subject: [PATCH 37/51] finished initial pass on adding tests to help run through and validate tlv message handling --- daemon/core/api/tlv/corehandlers.py | 4 +- daemon/core/location/mobility.py | 8 + daemon/tests/test_gui.py | 295 +++++++++++++++++++++++++++- 3 files changed, 304 insertions(+), 3 deletions(-) diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py index 682978d6..de0c62f7 100644 --- a/daemon/core/api/tlv/corehandlers.py +++ b/daemon/core/api/tlv/corehandlers.py @@ -1035,8 +1035,10 @@ class CoreHandler(socketserver.BaseRequestHandler): if message_type == ConfigFlags.REQUEST: node_id = config_data.node metadata_configs = self.session.metadata.get_configs() + if metadata_configs is None: + metadata_configs = {} data_values = "|".join(["%s=%s" % (x, metadata_configs[x]) for x in metadata_configs]) - data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs()) + data_types = tuple(ConfigDataTypes.STRING.value for _ in metadata_configs) config_response = ConfigData( message_type=0, node=node_id, diff --git a/daemon/core/location/mobility.py b/daemon/core/location/mobility.py index a63dce65..c35cd1ba 100644 --- a/daemon/core/location/mobility.py +++ b/daemon/core/location/mobility.py @@ -53,6 +53,14 @@ class MobilityManager(ModelManager): self.physnets = {} self.session.broker.handlers.add(self.physnodehandlelink) + def reset(self): + """ + Clear out all current configurations. + + :return: nothing + """ + self.config_reset() + def startup(self, node_ids=None): """ Session is transitioning from instantiation to runtime state. diff --git a/daemon/tests/test_gui.py b/daemon/tests/test_gui.py index fd9f6bed..eeec6e3e 100644 --- a/daemon/tests/test_gui.py +++ b/daemon/tests/test_gui.py @@ -8,14 +8,21 @@ import mock import pytest from core.api.tlv import coreapi -from core.emulator.enumerations import EventTlvs, SessionTlvs, EventTypes, FileTlvs, RegisterTlvs +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emulator.enumerations import EventTlvs, SessionTlvs, EventTypes, FileTlvs, RegisterTlvs, ConfigTlvs, \ + ConfigFlags from core.emulator.enumerations import ExecuteTlvs from core.emulator.enumerations import LinkTlvs from core.emulator.enumerations import MessageFlags from core.emulator.enumerations import NodeTypes, NodeTlvs +from core.location.mobility import BasicRangeModel from core.nodes.ipaddress import Ipv4Prefix +def dict_to_str(values): + return "|".join("%s=%s" % (x, values[x]) for x in values) + + class TestGui: @pytest.mark.parametrize("node_type, model", [ (NodeTypes.DEFAULT, "PC"), @@ -510,7 +517,6 @@ class TestGui: EventTypes.RECONFIGURE ]) def test_event_mobility(self, coreserver, state): - coreserver.session.broadcast_event = mock.MagicMock() message = coreapi.CoreEventMessage.create(0, [ (EventTlvs.TYPE, state.value), (EventTlvs.NAME, "mobility:ns2script"), @@ -558,3 +564,288 @@ class TestGui: coreserver.request_handler.handle_message(message) assert len(coreserver.session.nodes) == 1 + + def test_config_all(self, coreserver): + node = coreserver.session.add_node() + message = coreapi.CoreConfMessage.create(MessageFlags.ADD.value, [ + (ConfigTlvs.OBJECT, "all"), + (ConfigTlvs.NODE, node.id), + (ConfigTlvs.TYPE, ConfigFlags.RESET.value), + ]) + coreserver.session.location.reset = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.session.location.reset.assert_called_once() + + def test_config_options_request(self, coreserver): + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "session"), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_options_update(self, coreserver): + test_key = "test" + test_value = "test" + values = { + test_key: test_value + } + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "session"), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, dict_to_str(values)), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.options.get_config(test_key) == test_value + + def test_config_location_reset(self, coreserver): + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "location"), + (ConfigTlvs.TYPE, ConfigFlags.RESET.value), + ]) + coreserver.session.location.refxyz = (10, 10, 10) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.location.refxyz == (0, 0, 0) + + def test_config_location_update(self, coreserver): + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "location"), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, "10|10|70|50|0|0.5"), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.location.refxyz == (10, 10, 0.0) + assert coreserver.session.location.refgeo == (70, 50, 0) + assert coreserver.session.location.refscale == 0.5 + + def test_config_metadata_request(self, coreserver): + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "metadata"), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_metadata_update(self, coreserver): + test_key = "test" + test_value = "test" + values = { + test_key: test_value + } + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "metadata"), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, dict_to_str(values)), + ]) + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.metadata.get_config(test_key) == test_value + + def test_config_broker_request(self, coreserver): + server = "test" + host = "10.0.0.1" + port = 50000 + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "broker"), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, "%s:%s:%s" % (server, host, port)), + ]) + coreserver.session.broker.addserver = mock.MagicMock() + coreserver.session.broker.setupserver = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.session.broker.addserver.assert_called_once_with(server, host, port) + coreserver.session.broker.setupserver.assert_called_once_with(server) + + def test_config_services_request_all(self, coreserver): + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "services"), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_services_request_specific(self, coreserver): + node = coreserver.session.add_node() + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, node.id), + (ConfigTlvs.OBJECT, "services"), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + (ConfigTlvs.OPAQUE, "service:DefaultRoute"), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_services_request_specific_file(self, coreserver): + node = coreserver.session.add_node() + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, node.id), + (ConfigTlvs.OBJECT, "services"), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + (ConfigTlvs.OPAQUE, "service:DefaultRoute:defaultroute.sh"), + ]) + coreserver.session.broadcast_file = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.session.broadcast_file.assert_called_once() + + def test_config_services_reset(self, coreserver): + node = coreserver.session.add_node() + service = "DefaultRoute" + coreserver.session.services.set_service(node.id, service) + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "services"), + (ConfigTlvs.TYPE, ConfigFlags.RESET.value), + ]) + assert coreserver.session.services.get_service(node.id, service) is not None + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.services.get_service(node.id, service) is None + + def test_config_services_set(self, coreserver): + node = coreserver.session.add_node() + service = "DefaultRoute" + values = { + "meta": "metadata" + } + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, node.id), + (ConfigTlvs.OBJECT, "services"), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.OPAQUE, "service:%s" % service), + (ConfigTlvs.VALUES, dict_to_str(values)), + ]) + assert coreserver.session.services.get_service(node.id, service) is None + + coreserver.request_handler.handle_message(message) + + assert coreserver.session.services.get_service(node.id, service) is not None + + def test_config_mobility_reset(self, coreserver): + wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN) + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "MobilityManager"), + (ConfigTlvs.TYPE, ConfigFlags.RESET.value), + ]) + coreserver.session.mobility.set_model_config(wlan.id, BasicRangeModel.name, {}) + assert len(coreserver.session.mobility.node_configurations) == 1 + + coreserver.request_handler.handle_message(message) + + assert len(coreserver.session.mobility.node_configurations) == 0 + + def test_config_mobility_model_request(self, coreserver): + wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN) + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, wlan.id), + (ConfigTlvs.OBJECT, BasicRangeModel.name), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_mobility_model_update(self, coreserver): + wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN) + config_key = "range" + config_value = "1000" + values = { + config_key: config_value + } + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, wlan.id), + (ConfigTlvs.OBJECT, BasicRangeModel.name), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, dict_to_str(values)), + ]) + + coreserver.request_handler.handle_message(message) + + config = coreserver.session.mobility.get_model_config(wlan.id, BasicRangeModel.name) + assert config[config_key] == config_value + + def test_config_emane_model_request(self, coreserver): + wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN) + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, wlan.id), + (ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_emane_model_update(self, coreserver): + wlan = coreserver.session.add_node(_type=NodeTypes.WIRELESS_LAN) + config_key = "distance" + config_value = "50051" + values = { + config_key: config_value + } + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.NODE, wlan.id), + (ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, dict_to_str(values)), + ]) + + coreserver.request_handler.handle_message(message) + + config = coreserver.session.emane.get_model_config(wlan.id, EmaneIeee80211abgModel.name) + assert config[config_key] == config_value + + def test_config_emane_request(self, coreserver): + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "emane"), + (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), + ]) + coreserver.request_handler.handle_broadcast_config = mock.MagicMock() + + coreserver.request_handler.handle_message(message) + + coreserver.request_handler.handle_broadcast_config.assert_called_once() + + def test_config_emane_update(self, coreserver): + config_key = "eventservicedevice" + config_value = "eth4" + values = { + config_key: config_value + } + message = coreapi.CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "emane"), + (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), + (ConfigTlvs.VALUES, dict_to_str(values)), + ]) + + coreserver.request_handler.handle_message(message) + + config = coreserver.session.emane.get_configs() + assert config[config_key] == config_value From f6af078e7ed66a03a38b8e1d066c96716f5be466 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Tue, 25 Jun 2019 16:03:37 -0700 Subject: [PATCH 38/51] pass at cleaning up custom service documentation and removing the need for pycco for this case --- daemon/examples/myservices/README.txt | 26 -- daemon/examples/myservices/sample.py | 95 ++++--- docs/exampleservice.html | 344 -------------------------- docs/services.md | 24 +- 4 files changed, 82 insertions(+), 407 deletions(-) delete mode 100644 daemon/examples/myservices/README.txt delete mode 100644 docs/exampleservice.html diff --git a/daemon/examples/myservices/README.txt b/daemon/examples/myservices/README.txt deleted file mode 100644 index 0f92f698..00000000 --- a/daemon/examples/myservices/README.txt +++ /dev/null @@ -1,26 +0,0 @@ -This directory contains a sample custom service that you can use as a template -for creating your own services. - -Follow these steps to add your own services: - -1. Modify the sample service MyService to do what you want. It could generate - config/script files, mount per-node directories, start processes/scripts, - etc. sample.py is a Python file that defines one or more classes to be - imported. You can create multiple Python files that will be imported. - Add any new filenames to the __init__.py file. - -2. Put these files in a directory such as /home/username/.core/myservices - Note that the last component of this directory name 'myservices' should not - be named something like 'services' which conflicts with an existing Python - name (the syntax 'from myservices import *' is used). - -3. Add a 'custom_services_dir = /home/username/.core/myservices' entry to the - /etc/core/core.conf file. - -4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) - should be displayed in the /var/log/core-daemon.log log file (or on screen). - -5. Start using your custom service on your nodes. You can create a new node - type that uses your service, or change the default services for an existing - node type, or change individual nodes. - diff --git a/daemon/examples/myservices/sample.py b/daemon/examples/myservices/sample.py index 3a58345d..84069c80 100644 --- a/daemon/examples/myservices/sample.py +++ b/daemon/examples/myservices/sample.py @@ -1,64 +1,81 @@ """ -Sample user-defined service. +Simple example for a user-defined service. """ from core.services.coreservices import CoreService from core.services.coreservices import ServiceMode -## Custom CORE Service class MyService(CoreService): - ### Service Attributes + """ + Custom CORE Service - # Name used as a unique ID for this service and is required, no spaces. + :var str name: name used as a unique ID for this service and is required, no spaces + :var str group: allows you to group services within the GUI under a common name + :var tuple executables: executables this service depends on to function, if executable is + not on the path, service will not be loaded + :var tuple dependencies: services that this service depends on for startup, tuple of service names + :var tuple dirs: directories that this service will create within a node + :var tuple configs: files that this service will generate, without a full path this file goes in + the node's directory e.g. /tmp/pycore.12345/n1.conf/myfile + :var tuple startup: commands used to start this service, any non-zero exit code will cause a failure + :var tuple validate: commands used to validate that a service was started, any non-zero exit code + will cause a failure + :var ServiceMode validation_mode: validation mode, used to determine startup success. + NON_BLOCKING - runs startup commands, and validates success with validation commands + BLOCKING - runs startup commands, and validates success with the startup commands themselves + TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone + :var int validation_timer: time in seconds for a service to wait for validation, before determining + success in TIMER/NON_BLOCKING modes. + :var float validation_validation_period: period in seconds to wait before retrying validation, + only used in NON_BLOCKING mode + :var tuple shutdown: shutdown commands to stop this service + """ name = "MyService" - # Allows you to group services within the GUI under a common name. group = "Utility" - # Executables this service depends on to function, if executable is not on the path, service will not be loaded. executables = () - # Services that this service depends on for startup, tuple of service names. dependencies = () - # Directories that this service will create within a node. dirs = () - # Files that this service will generate, without a full path this file goes in the node's directory. - # e.g. /tmp/pycore.12345/n1.conf/myfile configs = ("myservice1.sh", "myservice2.sh") - # Commands used to start this service, any non-zero exit code will cause a failure. startup = ("sh %s" % configs[0], "sh %s" % configs[1]) - # Commands used to validate that a service was started, any non-zero exit code will cause a failure. validate = () - # Validation mode, used to determine startup success. - # - # * NON_BLOCKING - runs startup commands, and validates success with validation commands - # * BLOCKING - runs startup commands, and validates success with the startup commands themselves - # * TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone validation_mode = ServiceMode.NON_BLOCKING - # Time in seconds for a service to wait for validation, before determining success in TIMER/NON_BLOCKING modes. validation_timer = 5 - # Period in seconds to wait before retrying validation, only used in NON_BLOCKING mode. validation_period = 0.5 - # Shutdown commands to stop this service. shutdown = () - ### On Load @classmethod def on_load(cls): - # Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate - # dynamic settings for the environment. + """ + Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate + dynamic settings for the environment. + + :return: nothing + """ pass - ### Get Configs @classmethod def get_configs(cls, node): - # Provides a way to dynamically generate the config files from the node a service will run. - # Defaults to the class definition and can be left out entirely if not needed. + """ + Provides a way to dynamically generate the config files from the node a service will run. + Defaults to the class definition and can be left out entirely if not needed. + + :param node: core node that the service is being ran on + :return: tuple of config files to create + """ return cls.configs - ### Generate Config @classmethod def generate_config(cls, node, filename): - # Returns a string representation for a file, given the node the service is starting on the config filename - # that this information will be used for. This must be defined, if "configs" are defined. + """ + Returns a string representation for a file, given the node the service is starting on the config filename + that this information will be used for. This must be defined, if "configs" are defined. + + :param node: core node that the service is being ran on + :param str filename: configuration file to generate + :return: configuration file content + :rtype: str + """ cfg = "#!/bin/sh\n" if filename == cls.configs[0]: @@ -70,16 +87,24 @@ class MyService(CoreService): return cfg - ### Get Startup @classmethod def get_startup(cls, node): - # Provides a way to dynamically generate the startup commands from the node a service will run. - # Defaults to the class definition and can be left out entirely if not needed. + """ + Provides a way to dynamically generate the startup commands from the node a service will run. + Defaults to the class definition and can be left out entirely if not needed. + + :param node: core node that the service is being ran on + :return: tuple of startup commands to run + """ return cls.startup - ### Get Validate @classmethod def get_validate(cls, node): - # Provides a way to dynamically generate the validate commands from the node a service will run. - # Defaults to the class definition and can be left out entirely if not needed. + """ + Provides a way to dynamically generate the validate commands from the node a service will run. + Defaults to the class definition and can be left out entirely if not needed. + + :param node: core node that the service is being ran on + :return: tuple of commands to validate service startup with + """ return cls.validate diff --git a/docs/exampleservice.html b/docs/exampleservice.html deleted file mode 100644 index cddb18d4..00000000 --- a/docs/exampleservice.html +++ /dev/null @@ -1,344 +0,0 @@ - - - - - sample.py - - - -
-
-
-

sample.py

-
-
-
-
-
- # -
-

Sample user-defined service.

-
-
-
from core.service import CoreService
-from core.service import ServiceMode
-
-
-
-
-
-
- # -
-

Custom CORE Service

-
-
-
class MyService(CoreService):
-
-
-
-
-
-
- # -
-

Service Attributes

-
-
-
-
-
-
-
-
-
- # -
-

Name used as a unique ID for this service and is required, no spaces.

-
-
-
    name = "MyService"
-
-
-
-
-
-
- # -
-

Allows you to group services within the GUI under a common name.

-
-
-
    group = "Utility"
-
-
-
-
-
-
- # -
-

Executables this service depends on to function, if executable is not on the path, service will not be loaded.

-
-
-
    executables = ()
-
-
-
-
-
-
- # -
-

Services that this service depends on for startup, tuple of service names.

-
-
-
    dependencies = ()
-
-
-
-
-
-
- # -
-

Directories that this service will create within a node.

-
-
-
    dirs = ()
-
-
-
-
-
-
- # -
-

Files that this service will generate, without a full path this file goes in the node’s directory. -e.g. /tmp/pycore.12345/n1.conf/myfile

-
-
-
    configs = ("myservice1.sh", "myservice2.sh")
-
-
-
-
-
-
- # -
-

Commands used to start this service, any non-zero exit code will cause a failure.

-
-
-
    startup = ("sh %s" % configs[0], "sh %s" % configs[1])
-
-
-
-
-
-
- # -
-

Commands used to validate that a service was started, any non-zero exit code will cause a failure.

-
-
-
    validate = ()
-
-
-
-
-
-
- # -
-

Validation mode, used to determine startup success.

-
    -
  • NON_BLOCKING - runs startup commands, and validates success with validation commands
  • -
  • BLOCKING - runs startup commands, and validates success with the startup commands themselves
  • -
  • TIMER - runs startup commands, and validates success by waiting for “validation_timer” alone
  • -
-
-
-
    validation_mode = ServiceMode.NON_BLOCKING
-
-
-
-
-
-
- # -
-

Time in seconds for a service to wait for validation, before determining success in TIMER/NON_BLOCKING modes.

-
-
-
    validation_timer = 5
-
-
-
-
-
-
- # -
-

Period in seconds to wait before retrying validation, only used in NON_BLOCKING mode.

-
-
-
    validation_period = 0.5
-
-
-
-
-
-
- # -
-

Shutdown commands to stop this service.

-
-
-
    shutdown = ()
-
-
-
-
-
-
- # -
-

On Load

-
-
-
    @classmethod
-    def on_load(cls):
-
-
-
-
-
-
- # -
-

Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate -dynamic settings for the environment.

-
-
-
        pass
-
-
-
-
-
-
- # -
-

Get Configs

-
-
-
    @classmethod
-    def get_configs(cls, node):
-
-
-
-
-
-
- # -
-

Provides a way to dynamically generate the config files from the node a service will run. -Defaults to the class definition and can be left out entirely if not needed.

-
-
-
        return cls.configs
-
-
-
-
-
-
- # -
-

Generate Config

-
-
-
    @classmethod
-    def generate_config(cls, node, filename):
-
-
-
-
-
-
- # -
-

Returns a string representation for a file, given the node the service is starting on the config filename -that this information will be used for. This must be defined, if “configs” are defined.

-
-
-
        cfg = "#!/bin/sh\n"
-
-        if filename == cls.configs[0]:
-            cfg += "# auto-generated by MyService (sample.py)\n"
-            for ifc in node.netifs():
-                cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
-        elif filename == cls.configs[1]:
-            cfg += "echo hello"
-
-        return cfg
-
-
-
-
-
-
- # -
-

Get Startup

-
-
-
    @classmethod
-    def get_startup(cls, node):
-
-
-
-
-
-
- # -
-

Provides a way to dynamically generate the startup commands from the node a service will run. -Defaults to the class definition and can be left out entirely if not needed.

-
-
-
        return cls.startup
-
-
-
-
-
-
- # -
-

Get Validate

-
-
-
    @classmethod
-    def get_validate(cls, node):
-
-
-
-
-
-
- # -
-

Provides a way to dynamically generate the validate commands from the node a service will run. -Defaults to the class definition and can be left out entirely if not needed.

-
-
-
        return cls.validate
-
-
-
-
-
-
- diff --git a/docs/services.md b/docs/services.md index 793e6f99..32d98f86 100644 --- a/docs/services.md +++ b/docs/services.md @@ -9,5 +9,25 @@ CORE supports custom developed services by way of dynamically loading user creat Custom services should be placed within the path defined by **custom_services_dir** in the CORE configuration file. This path cannot end in **/services**. -Here is an example service with documentation describing functionality: -[Example Service](exampleservice.html) +Follow these steps to add your own services: + +1. Modify the [Example Service File](/daemon/examples/myservices/sample.py) + to do what you want. It could generate config/script files, mount per-node + directories, start processes/scripts, etc. sample.py is a Python file that + defines one or more classes to be imported. You can create multiple Python + files that will be imported. Add any new filenames to the __init__.py file. + +2. Put these files in a directory such as /home/username/.core/myservices + Note that the last component of this directory name **myservices** should not + be named something like **services** which conflicts with an existing Python + name (the syntax 'from myservices import *' is used). + +3. Add a **custom_services_dir = /home/username/.core/myservices** entry to the + /etc/core/core.conf file. + +4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) + should be displayed in the /var/log/core-daemon.log log file (or on screen). + +5. Start using your custom service on your nodes. You can create a new node + type that uses your service, or change the default services for an existing + node type, or change individual nodes. From 2b5e3666b21f1ac71b5a08b7c3dcf434a64f36f9 Mon Sep 17 00:00:00 2001 From: SaintYomar Date: Wed, 26 Jun 2019 10:24:07 -0500 Subject: [PATCH 39/51] Removed LXC references in architecture documentation --- docs/architecture.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/architecture.md b/docs/architecture.md index 8599afba..724bca79 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -26,7 +26,7 @@ A CORE node is a lightweight virtual machine. The CORE framework runs on Linux. ### Linux -Linux network namespaces (also known as netns, LXC, or [Linux containers](http://lxc.sourceforge.net/)) is the primary virtualization technique used by CORE. LXC has been part of the mainline Linux kernel since 2.6.24. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE. +Linux network namespaces (also known as netns) is the primary virtualization technique used by CORE. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE. CORE combines these namespaces with Linux Ethernet bridging to form networks. Link characteristics are applied using Linux Netem queuing disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless networks are emulated by controlling which interfaces can send and receive with ebtables rules. From 67595485c62c9faad6eb831d58735593d8690440 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Wed, 26 Jun 2019 09:38:54 -0700 Subject: [PATCH 40/51] updated example custom emane model to get away from needing pycco altogether for documentation --- daemon/examples/myemane/examplemodel.py | 58 +++--- docs/emane.md | 4 +- docs/examplemodel.html | 239 ------------------------ docs/pycco.css | 190 ------------------- 4 files changed, 33 insertions(+), 458 deletions(-) delete mode 100644 docs/examplemodel.html delete mode 100644 docs/pycco.css diff --git a/daemon/examples/myemane/examplemodel.py b/daemon/examples/myemane/examplemodel.py index 308b6f1d..186de8e7 100644 --- a/daemon/examples/myemane/examplemodel.py +++ b/daemon/examples/myemane/examplemodel.py @@ -1,48 +1,54 @@ +""" +Example custom emane model. +""" + from core.emane import emanemanifest from core.emane import emanemodel -## Custom EMANE Model class ExampleModel(emanemodel.EmaneModel): - ### MAC Definition + """ + Custom emane model. + + :var str name: defines the emane model name that will show up in the GUI + + Mac Definition: + :var str mac_library: defines that mac library that the model will reference + :var str mac_xml: defines the mac manifest file that will be parsed to obtain configuration options, + that will be displayed within the GUI + :var dict mac_mac_defaults: allows you to override options that are maintained within the manifest file above + :var list mac_mac_config: parses the manifest file and converts configurations into core supported formats + + Phy Definition: + NOTE: phy configuration will default to the universal model as seen below and the below section does not + have to be included + :var str phy_library: defines that phy library that the model will reference, used if you need to + provide a custom phy + :var str phy_xml: defines the phy manifest file that will be parsed to obtain configuration options, + that will be displayed within the GUI + :var dict phy_defaults: allows you to override options that are maintained within the manifest file above + or for the default universal model + :var list phy_config: parses the manifest file and converts configurations into core supported formats + + Custom Override Options: + NOTE: these options default to what's seen below and do not have to be included + :var set config_ignore: allows you to ignore options within phy/mac, used typically if you needed to add + a custom option for display within the gui + """ - # Defines the emane model name that will show up in the GUI. name = "emane_example" - - # Defines that mac library that the model will reference. mac_library = "rfpipemaclayer" - # Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed - # within the GUI. mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml" - # Allows you to override options that are maintained within the manifest file above. mac_defaults = { "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml", } - # Parses the manifest file and converts configurations into core supported formats. mac_config = emanemanifest.parse(mac_xml, mac_defaults) - - ### PHY Definition - # **NOTE: phy configuration will default to the universal model as seen below and the below section does not - # have to be included.** - - # Defines that phy library that the model will reference, used if you need to provide a custom phy. phy_library = None - # Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed - # within the GUI. phy_xml = "/usr/share/emane/manifest/emanephy.xml" - # Allows you to override options that are maintained within the manifest file above or for the default universal - # model. phy_defaults = { "subid": "1", "propagationmodel": "2ray", "noisemode": "none" } - # Parses the manifest file and converts configurations into core supported formats. phy_config = emanemanifest.parse(phy_xml, phy_defaults) - - ### Custom override options - # **NOTE: these options default to what's seen below and do not have to be included.** - - # Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display - # within the gui. config_ignore = set() diff --git a/docs/emane.md b/docs/emane.md index 2182bed1..90c5ce2c 100644 --- a/docs/emane.md +++ b/docs/emane.md @@ -68,9 +68,7 @@ sudo ln -s /usr/local/share/emane /usr/share/emane CORE supports custom developed EMANE models by way of dynamically loading user created python files that represent the model. Custom EMANE models should be placed within the path defined by **emane_models_dir** in the CORE configuration file. This path cannot end in **/emane**. Here is an example model with documentation describing functionality: -[Example Model](examplemodel.html) - - +[Example Model](/daemon/examples/myemane/examplemodel.py) ## Single PC with EMANE diff --git a/docs/examplemodel.html b/docs/examplemodel.html deleted file mode 100644 index 35613c45..00000000 --- a/docs/examplemodel.html +++ /dev/null @@ -1,239 +0,0 @@ - - - - - examplemodel.py - - - -
-
-
-

examplemodel.py

-
-
-
-
-
- # -
- -
-
-
from core.emane import emanemanifest
-from core.emane import emanemodel
-
-
-
-
-
-
- # -
-

Custom EMANE Model

-
-
-
class ExampleModel(emanemodel.EmaneModel):
-
-
-
-
-
-
- # -
-

MAC Definition

-
-
-
-
-
-
-
-
-
- # -
-

Defines the emane model name that will show up in the GUI.

-
-
-
    name = "emane_example"
-
-
-
-
-
-
- # -
-

Defines that mac library that the model will reference.

-
-
-
    mac_library = "rfpipemaclayer"
-
-
-
-
-
-
- # -
-

Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed -within the GUI.

-
-
-
    mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml"
-
-
-
-
-
-
- # -
-

Allows you to override options that are maintained within the manifest file above.

-
-
-
    mac_defaults = {
-        "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml",
-    }
-
-
-
-
-
-
- # -
-

Parses the manifest file and converts configurations into core supported formats.

-
-
-
    mac_config = emanemanifest.parse(mac_xml, mac_defaults)
-
-
-
-
-
-
- # -
-

PHY Definition

-

NOTE: phy configuration will default to the universal model as seen below and the below section does not -have to be included.

-
-
-
-
-
-
-
-
-
- # -
-

Defines that phy library that the model will reference, used if you need to provide a custom phy.

-
-
-
    phy_library = None
-
-
-
-
-
-
- # -
-

Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed -within the GUI.

-
-
-
    phy_xml = "/usr/share/emane/manifest/emanephy.xml"
-
-
-
-
-
-
- # -
-

Allows you to override options that are maintained within the manifest file above or for the default universal -model.

-
-
-
    phy_defaults = {
-        "subid": "1",
-        "propagationmodel": "2ray",
-        "noisemode": "none"
-    }
-
-
-
-
-
-
- # -
-

Parses the manifest file and converts configurations into core supported formats.

-
-
-
    phy_config = emanemanifest.parse(phy_xml, phy_defaults)
-
-
-
-
-
-
- # -
-

Custom override options

-

NOTE: these options default to what's seen below and do not have to be included.

-
-
-
-
-
-
-
-
-
- # -
-

Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display -within the gui.

-
-
-
    config_ignore = set()
-
-
-
-
-
-
- # -
-

Allows you to override how options are displayed with the GUI, using the GUI format of -"name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed -numbers after ":" for including values in each tab.

-
-
-
    config_groups_override = None
-
-
-
-
-
-
- # -
-

Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in -that order.

-
-
-
    config_matrix_override = None
-
-
-
-
-
-
- diff --git a/docs/pycco.css b/docs/pycco.css deleted file mode 100644 index aef571a5..00000000 --- a/docs/pycco.css +++ /dev/null @@ -1,190 +0,0 @@ -/*--------------------- Layout and Typography ----------------------------*/ -body { - font-family: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif; - font-size: 16px; - line-height: 24px; - color: #252519; - margin: 0; padding: 0; - background: #f5f5ff; -} -a { - color: #261a3b; -} - a:visited { - color: #261a3b; - } -p { - margin: 0 0 15px 0; -} -h1, h2, h3, h4, h5, h6 { - margin: 40px 0 15px 0; -} -h2, h3, h4, h5, h6 { - margin-top: 0; - } -#container { - background: white; - } -#container, div.section { - position: relative; -} -#background { - position: absolute; - top: 0; left: 580px; right: 0; bottom: 0; - background: #f5f5ff; - border-left: 1px solid #e5e5ee; - z-index: 0; -} -#jump_to, #jump_page { - background: white; - -webkit-box-shadow: 0 0 25px #777; -moz-box-shadow: 0 0 25px #777; - -webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px; - font: 10px Arial; - text-transform: uppercase; - cursor: pointer; - text-align: right; -} -#jump_to, #jump_wrapper { - position: fixed; - right: 0; top: 0; - padding: 5px 10px; -} - #jump_wrapper { - padding: 0; - display: none; - } - #jump_to:hover #jump_wrapper { - display: block; - } - #jump_page { - padding: 5px 0 3px; - margin: 0 0 25px 25px; - } - #jump_page .source { - display: block; - padding: 5px 10px; - text-decoration: none; - border-top: 1px solid #eee; - } - #jump_page .source:hover { - background: #f5f5ff; - } - #jump_page .source:first-child { - } -div.docs { - float: left; - max-width: 500px; - min-width: 500px; - min-height: 5px; - padding: 10px 25px 1px 50px; - vertical-align: top; - text-align: left; -} - .docs pre { - margin: 15px 0 15px; - padding-left: 15px; - } - .docs p tt, .docs p code { - background: #f8f8ff; - border: 1px solid #dedede; - font-size: 12px; - padding: 0 0.2em; - } - .octowrap { - position: relative; - } - .octothorpe { - font: 12px Arial; - text-decoration: none; - color: #454545; - position: absolute; - top: 3px; left: -20px; - padding: 1px 2px; - opacity: 0; - -webkit-transition: opacity 0.2s linear; - } - div.docs:hover .octothorpe { - opacity: 1; - } -div.code { - margin-left: 580px; - padding: 14px 15px 16px 50px; - vertical-align: top; -} - .code pre, .docs p code { - font-size: 12px; - } - pre, tt, code { - line-height: 18px; - font-family: Monaco, Consolas, "Lucida Console", monospace; - margin: 0; padding: 0; - } -div.clearall { - clear: both; -} - - -/*---------------------- Syntax Highlighting -----------------------------*/ -td.linenos { background-color: #f0f0f0; padding-right: 10px; } -span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; } -body .hll { background-color: #ffffcc } -body .c { color: #408080; font-style: italic } /* Comment */ -body .err { border: 1px solid #FF0000 } /* Error */ -body .k { color: #954121 } /* Keyword */ -body .o { color: #666666 } /* Operator */ -body .cm { color: #408080; font-style: italic } /* Comment.Multiline */ -body .cp { color: #BC7A00 } /* Comment.Preproc */ -body .c1 { color: #408080; font-style: italic } /* Comment.Single */ -body .cs { color: #408080; font-style: italic } /* Comment.Special */ -body .gd { color: #A00000 } /* Generic.Deleted */ -body .ge { font-style: italic } /* Generic.Emph */ -body .gr { color: #FF0000 } /* Generic.Error */ -body .gh { color: #000080; font-weight: bold } /* Generic.Heading */ -body .gi { color: #00A000 } /* Generic.Inserted */ -body .go { color: #808080 } /* Generic.Output */ -body .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ -body .gs { font-weight: bold } /* Generic.Strong */ -body .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ -body .gt { color: #0040D0 } /* Generic.Traceback */ -body .kc { color: #954121 } /* Keyword.Constant */ -body .kd { color: #954121; font-weight: bold } /* Keyword.Declaration */ -body .kn { color: #954121; font-weight: bold } /* Keyword.Namespace */ -body .kp { color: #954121 } /* Keyword.Pseudo */ -body .kr { color: #954121; font-weight: bold } /* Keyword.Reserved */ -body .kt { color: #B00040 } /* Keyword.Type */ -body .m { color: #666666 } /* Literal.Number */ -body .s { color: #219161 } /* Literal.String */ -body .na { color: #7D9029 } /* Name.Attribute */ -body .nb { color: #954121 } /* Name.Builtin */ -body .nc { color: #0000FF; font-weight: bold } /* Name.Class */ -body .no { color: #880000 } /* Name.Constant */ -body .nd { color: #AA22FF } /* Name.Decorator */ -body .ni { color: #999999; font-weight: bold } /* Name.Entity */ -body .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ -body .nf { color: #0000FF } /* Name.Function */ -body .nl { color: #A0A000 } /* Name.Label */ -body .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ -body .nt { color: #954121; font-weight: bold } /* Name.Tag */ -body .nv { color: #19469D } /* Name.Variable */ -body .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ -body .w { color: #bbbbbb } /* Text.Whitespace */ -body .mf { color: #666666 } /* Literal.Number.Float */ -body .mh { color: #666666 } /* Literal.Number.Hex */ -body .mi { color: #666666 } /* Literal.Number.Integer */ -body .mo { color: #666666 } /* Literal.Number.Oct */ -body .sb { color: #219161 } /* Literal.String.Backtick */ -body .sc { color: #219161 } /* Literal.String.Char */ -body .sd { color: #219161; font-style: italic } /* Literal.String.Doc */ -body .s2 { color: #219161 } /* Literal.String.Double */ -body .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ -body .sh { color: #219161 } /* Literal.String.Heredoc */ -body .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ -body .sx { color: #954121 } /* Literal.String.Other */ -body .sr { color: #BB6688 } /* Literal.String.Regex */ -body .s1 { color: #219161 } /* Literal.String.Single */ -body .ss { color: #19469D } /* Literal.String.Symbol */ -body .bp { color: #954121 } /* Name.Builtin.Pseudo */ -body .vc { color: #19469D } /* Name.Variable.Class */ -body .vg { color: #19469D } /* Name.Variable.Global */ -body .vi { color: #19469D } /* Name.Variable.Instance */ -body .il { color: #666666 } /* Literal.Number.Integer.Long */ From 9825706e034faa22252af8202859781be49f3335 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Thu, 27 Jun 2019 15:45:35 -0700 Subject: [PATCH 41/51] initial commit with some docker nodes integrating with core at a basic level --- daemon/core/emulator/enumerations.py | 1 + daemon/core/nodes/docker.py | 549 ++++++++++++++++++++++++ daemon/core/nodes/nodemaps.py | 4 +- daemon/examples/docker/README.md | 30 ++ daemon/examples/docker/daemon.json | 5 + daemon/examples/docker/docker2core.py | 31 ++ daemon/examples/docker/docker2docker.py | 31 ++ daemon/examples/docker/switch.py | 36 ++ 8 files changed, 686 insertions(+), 1 deletion(-) create mode 100644 daemon/core/nodes/docker.py create mode 100644 daemon/examples/docker/README.md create mode 100644 daemon/examples/docker/daemon.json create mode 100644 daemon/examples/docker/docker2core.py create mode 100644 daemon/examples/docker/docker2docker.py create mode 100644 daemon/examples/docker/switch.py diff --git a/daemon/core/emulator/enumerations.py b/daemon/core/emulator/enumerations.py index ac287272..fe59891f 100644 --- a/daemon/core/emulator/enumerations.py +++ b/daemon/core/emulator/enumerations.py @@ -81,6 +81,7 @@ class NodeTypes(Enum): PEER_TO_PEER = 12 CONTROL_NET = 13 EMANE_NET = 14 + DOCKER = 15 class Rj45Models(Enum): diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py new file mode 100644 index 00000000..a01db2e9 --- /dev/null +++ b/daemon/core/nodes/docker.py @@ -0,0 +1,549 @@ +import logging +import os +import random +import shutil +import string +import threading + +from core import utils, CoreCommandError, constants +from core.emulator.enumerations import NodeTypes +from core.nodes.base import CoreNodeBase +from core.nodes.interface import Veth, TunTap, CoreInterface + +_DEFAULT_MTU = 1500 + + +class DockerClient(object): + def __init__(self, name): + self.name = name + self.pid = None + self._addr = {} + + def create_container(self, image): + utils.check_cmd("docker run -td --net=none --hostname {name} --name {name} {image} /bin/bash".format( + name=self.name, + image=image + )) + + def is_container_alive(self): + status, output = utils.cmd_output("docker containers ls -f name={name}".format( + name=self.name + )) + return not status and len(output.split("\n")) == 2 + + def stop_container(self): + utils.check_cmd("docker rm -f {name}".format( + name=self.name + )) + + def run_cmd(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + return utils.cmd_output("docker exec -it {name} {cmd}".format( + name=self.name, + cmd=cmd + )) + + def ns_cmd(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + status, output = utils.cmd_output("nsenter -t {pid} -m -u -i -p -n {cmd}".format( + pid=self.pid, + cmd=cmd + )) + if status: + raise CoreCommandError(status, output) + return output + + def get_pid(self): + status, output = utils.cmd_output("docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name)) + if status: + raise CoreCommandError(status, output) + self.pid = output + logging.debug("node(%s) pid: %s", self.name, self.pid) + return output + + def getaddr(self, ifname, rescan=False): + """ + Get address for interface on node. + + :param str ifname: interface name to get address for + :param bool rescan: rescan flag + :return: interface information + :rtype: dict + """ + if ifname in self._addr and not rescan: + return self._addr[ifname] + + interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []} + args = ["ip", "addr", "show", "dev", ifname] + status, output = self.ns_cmd(args) + for line in output: + line = line.strip().split() + if line[0] == "link/ether": + interface["ether"].append(line[1]) + elif line[0] == "inet": + interface["inet"].append(line[1]) + elif line[0] == "inet6": + if line[3] == "global": + interface["inet6"].append(line[1]) + elif line[3] == "link": + interface["inet6link"].append(line[1]) + else: + logging.warning("unknown scope: %s" % line[3]) + + if status: + logging.warning("nonzero exist status (%s) for cmd: %s", status, args) + self._addr[ifname] = interface + return interface + + +class DockerNode(CoreNodeBase): + apitype = NodeTypes.DOCKER.value + valid_address_types = {"inet", "inet6", "inet6link"} + + def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True): + """ + Create a CoreNode instance. + + :param core.emulator.session.Session session: core session instance + :param int _id: object id + :param str name: object name + :param str nodedir: node directory + :param str bootsh: boot shell to use + :param bool start: start flag + """ + super(CoreNodeBase, self).__init__(session, _id, name, start=start) + self.nodedir = nodedir + self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) + self.client = DockerClient(self.name) + self.pid = None + self.up = False + self.lock = threading.RLock() + self._mounts = [] + self.bootsh = bootsh + if start: + self.startup() + + def alive(self): + """ + Check if the node is alive. + + :return: True if node is alive, False otherwise + :rtype: bool + """ + return self.client.is_container_alive() + + def startup(self): + """ + Start a new namespace node by invoking the vnoded process that + allocates a new namespace. Bring up the loopback device and set + the hostname. + + :return: nothing + """ + with self.lock: + if self.up: + raise ValueError("starting a node that is already up") + self.client.create_container("ubuntu:ifconfig") + self.pid = self.client.get_pid() + self.up = True + + def shutdown(self): + """ + Shutdown logic. + + :return: nothing + """ + # nothing to do if node is not up + if not self.up: + return + + with self.lock: + self._netif.clear() + self.client.stop_container() + self.up = False + + def cmd(self, args, wait=True): + """ + Runs shell command on node, with option to not wait for a result. + + :param list[str]|str args: command to run + :param bool wait: wait for command to exit, defaults to True + :return: exit status for command + :rtype: int + """ + status, _ = self.client.run_cmd(args) + return status + + def cmd_output(self, args): + """ + Runs shell command on node and get exit status and output. + + :param list[str]|str args: command to run + :return: exit status and combined stdout and stderr + :rtype: tuple[int, str] + """ + return self.client.run_cmd(args) + + def check_cmd(self, args): + """ + Runs shell command on node. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when a non-zero exit status occurs + """ + status, output = self.client.run_cmd(args) + if status: + raise CoreCommandError(status, output) + return output + + def termcmdstring(self, sh="/bin/sh"): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + return "" + + def privatedir(self, path): + """ + Create a private directory. + + :param str path: path to create + :return: nothing + """ + pass + + def mount(self, source, target): + """ + Create and mount a directory. + + :param str source: source directory to mount + :param str target: target directory to create + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + pass + + def newifindex(self): + """ + Retrieve a new interface index. + + :return: new interface index + :rtype: int + """ + with self.lock: + return super(DockerNode, self).newifindex() + + def newveth(self, ifindex=None, ifname=None, net=None): + """ + Create a new interface. + + :param int ifindex: index for the new interface + :param str ifname: name for the new interface + :param core.nodes.base.CoreNetworkBase net: network to associate interface with + :return: nothing + """ + with self.lock: + if ifindex is None: + ifindex = self.newifindex() + + if ifname is None: + ifname = "eth%d" % ifindex + + sessionid = self.session.short_session_id() + + try: + suffix = "%x.%s.%s" % (self.id, ifindex, sessionid) + except TypeError: + suffix = "%s.%s.%s" % (self.id, ifindex, sessionid) + + localname = "veth" + suffix + if len(localname) >= 16: + raise ValueError("interface local name (%s) too long" % localname) + + name = localname + "p" + if len(name) >= 16: + raise ValueError("interface name (%s) too long" % name) + + veth = Veth(node=self, name=name, localname=localname, net=net, start=self.up) + + if self.up: + utils.check_cmd([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)]) + self.client.ns_cmd(["ip", "link", "set", veth.name, "name", ifname]) + self.client.ns_cmd(["ethtool", "-K", ifname, "rx", "off", "tx", "off"]) + + veth.name = ifname + + if self.up: + # TODO: potentially find better way to query interface ID + # retrieve interface information + output = self.client.ns_cmd([constants.IP_BIN, "link", "show", veth.name]) + logging.debug("interface command output: %s", output) + output = output.split("\n") + veth.flow_id = int(output[0].strip().split(":")[0]) + 1 + logging.debug("interface flow index: %s - %s", veth.name, veth.flow_id) + # TODO: mimic packed hwaddr + # veth.hwaddr = MacAddress.from_string(output[1].strip().split()[1]) + logging.debug("interface mac: %s - %s", veth.name, veth.hwaddr) + + try: + self.addnetif(veth, ifindex) + except ValueError as e: + veth.shutdown() + del veth + raise e + + return ifindex + + def newtuntap(self, ifindex=None, ifname=None, net=None): + """ + Create a new tunnel tap. + + :param int ifindex: interface index + :param str ifname: interface name + :param net: network to associate with + :return: interface index + :rtype: int + """ + with self.lock: + if ifindex is None: + ifindex = self.newifindex() + + if ifname is None: + ifname = "eth%d" % ifindex + + sessionid = self.session.short_session_id() + localname = "tap%s.%s.%s" % (self.id, ifindex, sessionid) + name = ifname + tuntap = TunTap(node=self, name=name, localname=localname, net=net, start=self.up) + + try: + self.addnetif(tuntap, ifindex) + except ValueError as e: + tuntap.shutdown() + del tuntap + raise e + + return ifindex + + def sethwaddr(self, ifindex, addr): + """ + Set hardware addres for an interface. + + :param int ifindex: index of interface to set hardware address for + :param core.nodes.ipaddress.MacAddress addr: hardware address to set + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + self._netif[ifindex].sethwaddr(addr) + if self.up: + args = ["ip", "link", "set", "dev", self.ifname(ifindex), "address", str(addr)] + self.client.ns_cmd(args) + + def addaddr(self, ifindex, addr): + """ + Add interface address. + + :param int ifindex: index of interface to add address to + :param str addr: address to add to interface + :return: nothing + """ + if self.up: + # check if addr is ipv6 + if ":" in str(addr): + args = ["ip", "addr", "add", str(addr), "dev", self.ifname(ifindex)] + self.client.ns_cmd(args) + else: + args = ["ip", "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)] + self.client.ns_cmd(args) + + self._netif[ifindex].addaddr(addr) + + def deladdr(self, ifindex, addr): + """ + Delete address from an interface. + + :param int ifindex: index of interface to delete address from + :param str addr: address to delete from interface + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + try: + self._netif[ifindex].deladdr(addr) + except ValueError: + logging.exception("trying to delete unknown address: %s" % addr) + + if self.up: + self.check_cmd(["ip", "addr", "del", str(addr), "dev", self.ifname(ifindex)]) + + def delalladdr(self, ifindex, address_types=None): + """ + Delete all addresses from an interface. + + :param int ifindex: index of interface to delete address types from + :param tuple[str] address_types: address types to delete + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + if not address_types: + address_types = self.valid_address_types + + interface_name = self.ifname(ifindex) + addresses = self.client.getaddr(interface_name, rescan=True) + + for address_type in address_types: + if address_type not in self.valid_address_types: + raise ValueError("addr type must be in: %s" % " ".join(self.valid_address_types)) + for address in addresses[address_type]: + self.deladdr(ifindex, address) + + # update cached information + self.client.getaddr(interface_name, rescan=True) + + def ifup(self, ifindex): + """ + Bring an interface up. + + :param int ifindex: index of interface to bring up + :return: nothing + """ + if self.up: + # self.check_cmd(["ip", "link", "set", self.ifname(ifindex), "up"]) + self.client.ns_cmd(["ip", "link", "set", self.ifname(ifindex), "up"]) + + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + """ + Create a new network interface. + + :param core.nodes.base.CoreNetworkBase net: network to associate with + :param list addrlist: addresses to add on the interface + :param core.nodes.ipaddress.MacAddress hwaddr: hardware address to set for interface + :param int ifindex: index of interface to create + :param str ifname: name for interface + :return: interface index + :rtype: int + """ + if not addrlist: + addrlist = [] + + with self.lock: + ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) + + if net is not None: + self.attachnet(ifindex, net) + + if hwaddr: + self.sethwaddr(ifindex, hwaddr) + + for address in utils.make_tuple(addrlist): + self.addaddr(ifindex, address) + + self.ifup(ifindex) + return ifindex + + def connectnode(self, ifname, othernode, otherifname): + """ + Connect a node. + + :param str ifname: name of interface to connect + :param core.nodes.CoreNodeBase othernode: node to connect to + :param str otherifname: interface name to connect to + :return: nothing + """ + tmplen = 8 + tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in range(tmplen)]) + tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in range(tmplen)]) + utils.check_cmd([constants.IP_BIN, "link", "add", "name", tmp1, "type", "veth", "peer", "name", tmp2]) + + utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)]) + self.check_cmd(["ip", "link", "set", tmp1, "name", ifname]) + interface = CoreInterface(node=self, name=ifname, mtu=_DEFAULT_MTU) + self.addnetif(interface, self.newifindex()) + + utils.check_cmd([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]) + othernode.check_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname]) + other_interface = CoreInterface(node=othernode, name=otherifname, mtu=_DEFAULT_MTU) + othernode.addnetif(other_interface, othernode.newifindex()) + + def addfile(self, srcname, filename): + """ + Add a file. + + :param str srcname: source file name + :param str filename: file name to add + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + logging.info("adding file from %s to %s", srcname, filename) + directory = os.path.dirname(filename) + + cmd = 'mkdir -p "%s" && mv "%s" "%s" && sync' % (directory, srcname, filename) + status, output = self.client.run_cmd(cmd) + if status: + raise CoreCommandError(status, cmd, output) + + def hostfilename(self, filename): + """ + Return the name of a node"s file on the host filesystem. + + :param str filename: host file name + :return: path to file + """ + dirname, basename = os.path.split(filename) + if not basename: + raise ValueError("no basename for filename: %s" % filename) + if dirname and dirname[0] == "/": + dirname = dirname[1:] + dirname = dirname.replace("/", ".") + dirname = os.path.join(self.nodedir, dirname) + return os.path.join(dirname, basename) + + def opennodefile(self, filename, mode="w"): + """ + Open a node file, within it"s directory. + + :param str filename: file name to open + :param str mode: mode to open file in + :return: open file + :rtype: file + """ + hostfilename = self.hostfilename(filename) + dirname, _basename = os.path.split(hostfilename) + if not os.path.isdir(dirname): + os.makedirs(dirname, mode=0o755) + return open(hostfilename, mode) + + def nodefile(self, filename, contents, mode=0o644): + """ + Create a node file with a given mode. + + :param str filename: name of file to create + :param contents: contents of file + :param int mode: mode for file + :return: nothing + """ + with self.opennodefile(filename, "w") as open_file: + open_file.write(contents) + os.chmod(open_file.name, mode) + logging.info("node(%s) added file: %s; mode: 0%o", self.name, open_file.name, mode) + + def nodefilecopy(self, filename, srcfilename, mode=None): + """ + Copy a file to a node, following symlinks and preserving metadata. + Change file mode if specified. + + :param str filename: file name to copy file to + :param str srcfilename: file to copy + :param int mode: mode to copy to + :return: nothing + """ + hostfilename = self.hostfilename(filename) + shutil.copy2(srcfilename, hostfilename) + if mode is not None: + os.chmod(hostfilename, mode) + logging.info("node(%s) copied file: %s; mode: %s", self.name, hostfilename, mode) diff --git a/daemon/core/nodes/nodemaps.py b/daemon/core/nodes/nodemaps.py index 525ab43d..0c1bb1bc 100644 --- a/daemon/core/nodes/nodemaps.py +++ b/daemon/core/nodes/nodemaps.py @@ -2,6 +2,7 @@ Provides default node maps that can be used to run core with. """ import core.nodes.base +import core.nodes.docker import core.nodes.network import core.nodes.physical from core.emane.nodes import EmaneNet @@ -25,5 +26,6 @@ NODES = { NodeTypes.EMANE_NET: EmaneNet, NodeTypes.TAP_BRIDGE: GreTapBridge, NodeTypes.PEER_TO_PEER: core.nodes.network.PtpNet, - NodeTypes.CONTROL_NET: core.nodes.network.CtrlNet + NodeTypes.CONTROL_NET: core.nodes.network.CtrlNet, + NodeTypes.DOCKER: core.nodes.docker.DockerNode } diff --git a/daemon/examples/docker/README.md b/daemon/examples/docker/README.md new file mode 100644 index 00000000..fac8d846 --- /dev/null +++ b/daemon/examples/docker/README.md @@ -0,0 +1,30 @@ +# Docker Support + +Information on how Docker can be leveraged and included to create +nodes based on Docker containers and images to interface with +existing CORE nodes, when needed. + +# Installation + +```shell +sudo apt install docker.io +``` + +# Configuration + +Custom configuration required to avoid iptable rules being added and removing +the need for the default docker network, since core will be orchestrating +connections between nodes. + +Place the file below in **/etc/docker/** +* daemon.json + +# Tools and Versions Tested With + +* Docker version 18.09.5, build e8ff056 +* nsenter from util-linux 2.31.1 + +# Examples + +This directory provides a few small examples creating Docker nodes +and linking them to themselves or with standard CORE nodes. diff --git a/daemon/examples/docker/daemon.json b/daemon/examples/docker/daemon.json new file mode 100644 index 00000000..8fefb9ab --- /dev/null +++ b/daemon/examples/docker/daemon.json @@ -0,0 +1,5 @@ +{ + "bridge": "none", + "iptables": false + +} diff --git a/daemon/examples/docker/docker2core.py b/daemon/examples/docker/docker2core.py new file mode 100644 index 00000000..ba328c0f --- /dev/null +++ b/daemon/examples/docker/docker2core.py @@ -0,0 +1,31 @@ +import logging + +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes +from core.emulator.enumerations import NodeTypes, EventTypes + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + + coreemu = CoreEmu() + session = coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create nodes and interfaces + try: + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + node_one = session.add_node(_type=NodeTypes.DOCKER) + node_two = session.add_node() + interface_one = prefixes.create_interface(node_one) + interface_two = prefixes.create_interface(node_two) + + # add link + input("press key to continue") + session.add_link(node_one.id, node_two.id, interface_one, interface_two) + print(node_one.cmd_output("ifconfig")) + print(node_two.cmd_output("ifconfig")) + input("press key to continue") + finally: + input("continue to shutdown") + coreemu.shutdown() diff --git a/daemon/examples/docker/docker2docker.py b/daemon/examples/docker/docker2docker.py new file mode 100644 index 00000000..e1001da9 --- /dev/null +++ b/daemon/examples/docker/docker2docker.py @@ -0,0 +1,31 @@ +import logging + +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes +from core.emulator.enumerations import NodeTypes, EventTypes + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + + coreemu = CoreEmu() + session = coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create nodes and interfaces + try: + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + node_one = session.add_node(_type=NodeTypes.DOCKER) + node_two = session.add_node(_type=NodeTypes.DOCKER) + interface_one = prefixes.create_interface(node_one) + interface_two = prefixes.create_interface(node_two) + + # add link + input("press key to continue") + session.add_link(node_one.id, node_two.id, interface_one, interface_two) + print(node_one.cmd_output("ifconfig")) + print(node_two.cmd_output("ifconfig")) + input("press key to continue") + finally: + input("continue to shutdown") + coreemu.shutdown() diff --git a/daemon/examples/docker/switch.py b/daemon/examples/docker/switch.py new file mode 100644 index 00000000..dbcc8466 --- /dev/null +++ b/daemon/examples/docker/switch.py @@ -0,0 +1,36 @@ +import logging + +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes +from core.emulator.enumerations import NodeTypes, EventTypes + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + + coreemu = CoreEmu() + session = coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create nodes and interfaces + try: + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + switch = session.add_node(_type=NodeTypes.SWITCH) + node_one = session.add_node(_type=NodeTypes.DOCKER) + node_two = session.add_node(_type=NodeTypes.DOCKER) + node_three = session.add_node() + interface_one = prefixes.create_interface(node_one) + interface_two = prefixes.create_interface(node_two) + interface_three = prefixes.create_interface(node_three) + + # add link + input("press key to continue") + session.add_link(node_one.id, switch.id, interface_one) + session.add_link(node_two.id, switch.id, interface_two) + session.add_link(node_three.id, switch.id, interface_three) + print(node_one.cmd_output("ifconfig")) + print(node_two.cmd_output("ifconfig")) + input("press key to continue") + finally: + input("continue to shutdown") + coreemu.shutdown() From 597195052371d67f009321dfddcc5de8577fff3a Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Fri, 28 Jun 2019 08:17:11 -0700 Subject: [PATCH 42/51] changes to have DockerNode based off a CoreNode instead, elminating the need for a lot of boiler plate --- daemon/core/nodes/docker.py | 354 +----------------------- daemon/examples/docker/docker2core.py | 1 + daemon/examples/docker/docker2docker.py | 1 + daemon/examples/docker/switch.py | 1 + 4 files changed, 17 insertions(+), 340 deletions(-) diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index a01db2e9..74b9ea01 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -1,16 +1,10 @@ import logging import os -import random -import shutil -import string import threading -from core import utils, CoreCommandError, constants +from core import utils, CoreCommandError from core.emulator.enumerations import NodeTypes -from core.nodes.base import CoreNodeBase -from core.nodes.interface import Veth, TunTap, CoreInterface - -_DEFAULT_MTU = 1500 +from core.nodes.base import CoreNode class DockerClient(object): @@ -24,8 +18,10 @@ class DockerClient(object): name=self.name, image=image )) + self.pid = self.get_pid() + return self.pid - def is_container_alive(self): + def is_alive(self): status, output = utils.cmd_output("docker containers ls -f name={name}".format( name=self.name )) @@ -47,13 +43,10 @@ class DockerClient(object): def ns_cmd(self, cmd): if isinstance(cmd, list): cmd = " ".join(cmd) - status, output = utils.cmd_output("nsenter -t {pid} -m -u -i -p -n {cmd}".format( + return utils.cmd_output("nsenter -t {pid} -m -u -i -p -n {cmd}".format( pid=self.pid, cmd=cmd )) - if status: - raise CoreCommandError(status, output) - return output def get_pid(self): status, output = utils.cmd_output("docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name)) @@ -98,7 +91,7 @@ class DockerClient(object): return interface -class DockerNode(CoreNodeBase): +class DockerNode(CoreNode): apitype = NodeTypes.DOCKER.value valid_address_types = {"inet", "inet6", "inet6link"} @@ -113,7 +106,7 @@ class DockerNode(CoreNodeBase): :param str bootsh: boot shell to use :param bool start: start flag """ - super(CoreNodeBase, self).__init__(session, _id, name, start=start) + super(CoreNode, self).__init__(session, _id, name, start=start) self.nodedir = nodedir self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) self.client = DockerClient(self.name) @@ -122,6 +115,7 @@ class DockerNode(CoreNodeBase): self.lock = threading.RLock() self._mounts = [] self.bootsh = bootsh + logging.debug("docker services: %s", self.services) if start: self.startup() @@ -132,7 +126,7 @@ class DockerNode(CoreNodeBase): :return: True if node is alive, False otherwise :rtype: bool """ - return self.client.is_container_alive() + return self.client.is_alive() def startup(self): """ @@ -145,8 +139,7 @@ class DockerNode(CoreNodeBase): with self.lock: if self.up: raise ValueError("starting a node that is already up") - self.client.create_container("ubuntu:ifconfig") - self.pid = self.client.get_pid() + self.pid = self.client.create_container("ubuntu:ifconfig") self.up = True def shutdown(self): @@ -173,7 +166,7 @@ class DockerNode(CoreNodeBase): :return: exit status for command :rtype: int """ - status, _ = self.client.run_cmd(args) + status, _ = self.client.ns_cmd(args) return status def cmd_output(self, args): @@ -184,7 +177,7 @@ class DockerNode(CoreNodeBase): :return: exit status and combined stdout and stderr :rtype: tuple[int, str] """ - return self.client.run_cmd(args) + return self.client.ns_cmd(args) def check_cmd(self, args): """ @@ -195,7 +188,7 @@ class DockerNode(CoreNodeBase): :rtype: str :raises CoreCommandError: when a non-zero exit status occurs """ - status, output = self.client.run_cmd(args) + status, output = self.client.ns_cmd(args) if status: raise CoreCommandError(status, output) return output @@ -228,322 +221,3 @@ class DockerNode(CoreNodeBase): :raises CoreCommandError: when a non-zero exit status occurs """ pass - - def newifindex(self): - """ - Retrieve a new interface index. - - :return: new interface index - :rtype: int - """ - with self.lock: - return super(DockerNode, self).newifindex() - - def newveth(self, ifindex=None, ifname=None, net=None): - """ - Create a new interface. - - :param int ifindex: index for the new interface - :param str ifname: name for the new interface - :param core.nodes.base.CoreNetworkBase net: network to associate interface with - :return: nothing - """ - with self.lock: - if ifindex is None: - ifindex = self.newifindex() - - if ifname is None: - ifname = "eth%d" % ifindex - - sessionid = self.session.short_session_id() - - try: - suffix = "%x.%s.%s" % (self.id, ifindex, sessionid) - except TypeError: - suffix = "%s.%s.%s" % (self.id, ifindex, sessionid) - - localname = "veth" + suffix - if len(localname) >= 16: - raise ValueError("interface local name (%s) too long" % localname) - - name = localname + "p" - if len(name) >= 16: - raise ValueError("interface name (%s) too long" % name) - - veth = Veth(node=self, name=name, localname=localname, net=net, start=self.up) - - if self.up: - utils.check_cmd([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)]) - self.client.ns_cmd(["ip", "link", "set", veth.name, "name", ifname]) - self.client.ns_cmd(["ethtool", "-K", ifname, "rx", "off", "tx", "off"]) - - veth.name = ifname - - if self.up: - # TODO: potentially find better way to query interface ID - # retrieve interface information - output = self.client.ns_cmd([constants.IP_BIN, "link", "show", veth.name]) - logging.debug("interface command output: %s", output) - output = output.split("\n") - veth.flow_id = int(output[0].strip().split(":")[0]) + 1 - logging.debug("interface flow index: %s - %s", veth.name, veth.flow_id) - # TODO: mimic packed hwaddr - # veth.hwaddr = MacAddress.from_string(output[1].strip().split()[1]) - logging.debug("interface mac: %s - %s", veth.name, veth.hwaddr) - - try: - self.addnetif(veth, ifindex) - except ValueError as e: - veth.shutdown() - del veth - raise e - - return ifindex - - def newtuntap(self, ifindex=None, ifname=None, net=None): - """ - Create a new tunnel tap. - - :param int ifindex: interface index - :param str ifname: interface name - :param net: network to associate with - :return: interface index - :rtype: int - """ - with self.lock: - if ifindex is None: - ifindex = self.newifindex() - - if ifname is None: - ifname = "eth%d" % ifindex - - sessionid = self.session.short_session_id() - localname = "tap%s.%s.%s" % (self.id, ifindex, sessionid) - name = ifname - tuntap = TunTap(node=self, name=name, localname=localname, net=net, start=self.up) - - try: - self.addnetif(tuntap, ifindex) - except ValueError as e: - tuntap.shutdown() - del tuntap - raise e - - return ifindex - - def sethwaddr(self, ifindex, addr): - """ - Set hardware addres for an interface. - - :param int ifindex: index of interface to set hardware address for - :param core.nodes.ipaddress.MacAddress addr: hardware address to set - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - self._netif[ifindex].sethwaddr(addr) - if self.up: - args = ["ip", "link", "set", "dev", self.ifname(ifindex), "address", str(addr)] - self.client.ns_cmd(args) - - def addaddr(self, ifindex, addr): - """ - Add interface address. - - :param int ifindex: index of interface to add address to - :param str addr: address to add to interface - :return: nothing - """ - if self.up: - # check if addr is ipv6 - if ":" in str(addr): - args = ["ip", "addr", "add", str(addr), "dev", self.ifname(ifindex)] - self.client.ns_cmd(args) - else: - args = ["ip", "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)] - self.client.ns_cmd(args) - - self._netif[ifindex].addaddr(addr) - - def deladdr(self, ifindex, addr): - """ - Delete address from an interface. - - :param int ifindex: index of interface to delete address from - :param str addr: address to delete from interface - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - try: - self._netif[ifindex].deladdr(addr) - except ValueError: - logging.exception("trying to delete unknown address: %s" % addr) - - if self.up: - self.check_cmd(["ip", "addr", "del", str(addr), "dev", self.ifname(ifindex)]) - - def delalladdr(self, ifindex, address_types=None): - """ - Delete all addresses from an interface. - - :param int ifindex: index of interface to delete address types from - :param tuple[str] address_types: address types to delete - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - if not address_types: - address_types = self.valid_address_types - - interface_name = self.ifname(ifindex) - addresses = self.client.getaddr(interface_name, rescan=True) - - for address_type in address_types: - if address_type not in self.valid_address_types: - raise ValueError("addr type must be in: %s" % " ".join(self.valid_address_types)) - for address in addresses[address_type]: - self.deladdr(ifindex, address) - - # update cached information - self.client.getaddr(interface_name, rescan=True) - - def ifup(self, ifindex): - """ - Bring an interface up. - - :param int ifindex: index of interface to bring up - :return: nothing - """ - if self.up: - # self.check_cmd(["ip", "link", "set", self.ifname(ifindex), "up"]) - self.client.ns_cmd(["ip", "link", "set", self.ifname(ifindex), "up"]) - - def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): - """ - Create a new network interface. - - :param core.nodes.base.CoreNetworkBase net: network to associate with - :param list addrlist: addresses to add on the interface - :param core.nodes.ipaddress.MacAddress hwaddr: hardware address to set for interface - :param int ifindex: index of interface to create - :param str ifname: name for interface - :return: interface index - :rtype: int - """ - if not addrlist: - addrlist = [] - - with self.lock: - ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) - - if net is not None: - self.attachnet(ifindex, net) - - if hwaddr: - self.sethwaddr(ifindex, hwaddr) - - for address in utils.make_tuple(addrlist): - self.addaddr(ifindex, address) - - self.ifup(ifindex) - return ifindex - - def connectnode(self, ifname, othernode, otherifname): - """ - Connect a node. - - :param str ifname: name of interface to connect - :param core.nodes.CoreNodeBase othernode: node to connect to - :param str otherifname: interface name to connect to - :return: nothing - """ - tmplen = 8 - tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in range(tmplen)]) - tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in range(tmplen)]) - utils.check_cmd([constants.IP_BIN, "link", "add", "name", tmp1, "type", "veth", "peer", "name", tmp2]) - - utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)]) - self.check_cmd(["ip", "link", "set", tmp1, "name", ifname]) - interface = CoreInterface(node=self, name=ifname, mtu=_DEFAULT_MTU) - self.addnetif(interface, self.newifindex()) - - utils.check_cmd([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]) - othernode.check_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname]) - other_interface = CoreInterface(node=othernode, name=otherifname, mtu=_DEFAULT_MTU) - othernode.addnetif(other_interface, othernode.newifindex()) - - def addfile(self, srcname, filename): - """ - Add a file. - - :param str srcname: source file name - :param str filename: file name to add - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - logging.info("adding file from %s to %s", srcname, filename) - directory = os.path.dirname(filename) - - cmd = 'mkdir -p "%s" && mv "%s" "%s" && sync' % (directory, srcname, filename) - status, output = self.client.run_cmd(cmd) - if status: - raise CoreCommandError(status, cmd, output) - - def hostfilename(self, filename): - """ - Return the name of a node"s file on the host filesystem. - - :param str filename: host file name - :return: path to file - """ - dirname, basename = os.path.split(filename) - if not basename: - raise ValueError("no basename for filename: %s" % filename) - if dirname and dirname[0] == "/": - dirname = dirname[1:] - dirname = dirname.replace("/", ".") - dirname = os.path.join(self.nodedir, dirname) - return os.path.join(dirname, basename) - - def opennodefile(self, filename, mode="w"): - """ - Open a node file, within it"s directory. - - :param str filename: file name to open - :param str mode: mode to open file in - :return: open file - :rtype: file - """ - hostfilename = self.hostfilename(filename) - dirname, _basename = os.path.split(hostfilename) - if not os.path.isdir(dirname): - os.makedirs(dirname, mode=0o755) - return open(hostfilename, mode) - - def nodefile(self, filename, contents, mode=0o644): - """ - Create a node file with a given mode. - - :param str filename: name of file to create - :param contents: contents of file - :param int mode: mode for file - :return: nothing - """ - with self.opennodefile(filename, "w") as open_file: - open_file.write(contents) - os.chmod(open_file.name, mode) - logging.info("node(%s) added file: %s; mode: 0%o", self.name, open_file.name, mode) - - def nodefilecopy(self, filename, srcfilename, mode=None): - """ - Copy a file to a node, following symlinks and preserving metadata. - Change file mode if specified. - - :param str filename: file name to copy file to - :param str srcfilename: file to copy - :param int mode: mode to copy to - :return: nothing - """ - hostfilename = self.hostfilename(filename) - shutil.copy2(srcfilename, hostfilename) - if mode is not None: - os.chmod(hostfilename, mode) - logging.info("node(%s) copied file: %s; mode: %s", self.name, hostfilename, mode) diff --git a/daemon/examples/docker/docker2core.py b/daemon/examples/docker/docker2core.py index ba328c0f..1b27039b 100644 --- a/daemon/examples/docker/docker2core.py +++ b/daemon/examples/docker/docker2core.py @@ -26,6 +26,7 @@ if __name__ == "__main__": print(node_one.cmd_output("ifconfig")) print(node_two.cmd_output("ifconfig")) input("press key to continue") + session.instantiate() finally: input("continue to shutdown") coreemu.shutdown() diff --git a/daemon/examples/docker/docker2docker.py b/daemon/examples/docker/docker2docker.py index e1001da9..60c63c53 100644 --- a/daemon/examples/docker/docker2docker.py +++ b/daemon/examples/docker/docker2docker.py @@ -26,6 +26,7 @@ if __name__ == "__main__": print(node_one.cmd_output("ifconfig")) print(node_two.cmd_output("ifconfig")) input("press key to continue") + session.instantiate() finally: input("continue to shutdown") coreemu.shutdown() diff --git a/daemon/examples/docker/switch.py b/daemon/examples/docker/switch.py index dbcc8466..70ba634f 100644 --- a/daemon/examples/docker/switch.py +++ b/daemon/examples/docker/switch.py @@ -31,6 +31,7 @@ if __name__ == "__main__": print(node_one.cmd_output("ifconfig")) print(node_two.cmd_output("ifconfig")) input("press key to continue") + session.instantiate() finally: input("continue to shutdown") coreemu.shutdown() From 6ab8368f1cca304d64ec2fc5b00bde78dd3fbbeb Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Fri, 28 Jun 2019 12:58:08 -0700 Subject: [PATCH 43/51] updates to attempt to support trying to run traditional services in some manner within the context of a docker node --- daemon/core/emulator/session.py | 2 +- daemon/core/nodes/docker.py | 63 ++++++++++++++++++++++++--- daemon/examples/docker/docker2core.py | 17 +++++--- 3 files changed, 68 insertions(+), 14 deletions(-) diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 246f0c0a..ef79dcf5 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -1364,7 +1364,7 @@ class Session(object): # TODO: PyCoreNode is not the type to check if isinstance(node, CoreNodeBase) and not nodeutils.is_node(node, NodeTypes.RJ45): # add a control interface if configured - logging.info("booting node: %s", node.name) + logging.info("booting node(%s): %s", node.name, node.services) self.add_remove_control_interface(node=node, remove=False) result = pool.apply_async(self.services.boot_services, (node,)) results.append(result) diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index 74b9ea01..007c76fc 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -35,6 +35,7 @@ class DockerClient(object): def run_cmd(self, cmd): if isinstance(cmd, list): cmd = " ".join(cmd) + logging.info("docker cmd: %s", cmd) return utils.cmd_output("docker exec -it {name} {cmd}".format( name=self.name, cmd=cmd @@ -43,19 +44,31 @@ class DockerClient(object): def ns_cmd(self, cmd): if isinstance(cmd, list): cmd = " ".join(cmd) + logging.info("ns cmd: %s", cmd) return utils.cmd_output("nsenter -t {pid} -m -u -i -p -n {cmd}".format( pid=self.pid, cmd=cmd )) def get_pid(self): - status, output = utils.cmd_output("docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name)) + args = "docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name) + status, output = utils.cmd_output(args) if status: - raise CoreCommandError(status, output) + raise CoreCommandError(status, args, output) self.pid = output logging.debug("node(%s) pid: %s", self.name, self.pid) return output + def copy_file(self, source, destination): + args = "docker cp {source} {name}:{destination}".format( + source=source, + name=self.name, + destination=destination + ) + status, output = utils.cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + def getaddr(self, ifname, rescan=False): """ Get address for interface on node. @@ -139,6 +152,7 @@ class DockerNode(CoreNode): with self.lock: if self.up: raise ValueError("starting a node that is already up") + self.makenodedir() self.pid = self.client.create_container("ubuntu:ifconfig") self.up = True @@ -166,7 +180,9 @@ class DockerNode(CoreNode): :return: exit status for command :rtype: int """ - status, _ = self.client.ns_cmd(args) + status, output = self.client.ns_cmd(args) + if status: + raise CoreCommandError(status, args, output) return status def cmd_output(self, args): @@ -190,7 +206,7 @@ class DockerNode(CoreNode): """ status, output = self.client.ns_cmd(args) if status: - raise CoreCommandError(status, output) + raise CoreCommandError(status, args, output) return output def termcmdstring(self, sh="/bin/sh"): @@ -209,7 +225,11 @@ class DockerNode(CoreNode): :param str path: path to create :return: nothing """ - pass + logging.info("creating node dir: %s", path) + args = "mkdir -p {path}".format(path=path) + status, output = self.client.run_cmd(args) + if status: + raise CoreCommandError(status, args, output) def mount(self, source, target): """ @@ -220,4 +240,35 @@ class DockerNode(CoreNode): :return: nothing :raises CoreCommandError: when a non-zero exit status occurs """ - pass + logging.info("mounting source(%s) target(%s)", source, target) + raise Exception("you found a docker node") + + def nodefile(self, filename, contents, mode=0o644): + """ + Create a node file with a given mode. + + :param str filename: name of file to create + :param contents: contents of file + :param int mode: mode for file + :return: nothing + """ + logging.info("node dir(%s) ctrlchannel(%s)", self.nodedir, self.ctrlchnlname) + logging.info("nodefile filename(%s) mode(%s)", filename, mode) + file_path = os.path.join(self.nodedir, filename) + with open(file_path, "w") as f: + os.chmod(f.name, mode) + f.write(contents) + self.client.copy_file(file_path, filename) + + def nodefilecopy(self, filename, srcfilename, mode=None): + """ + Copy a file to a node, following symlinks and preserving metadata. + Change file mode if specified. + + :param str filename: file name to copy file to + :param str srcfilename: file to copy + :param int mode: mode to copy to + :return: nothing + """ + logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode) + raise Exception("you found a docker node") diff --git a/daemon/examples/docker/docker2core.py b/daemon/examples/docker/docker2core.py index 1b27039b..39491214 100644 --- a/daemon/examples/docker/docker2core.py +++ b/daemon/examples/docker/docker2core.py @@ -7,25 +7,28 @@ from core.emulator.enumerations import NodeTypes, EventTypes if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) - coreemu = CoreEmu() session = coreemu.create_session() session.set_state(EventTypes.CONFIGURATION_STATE) - # create nodes and interfaces try: + # create nodes one prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") node_one = session.add_node(_type=NodeTypes.DOCKER) - node_two = session.add_node() + session.services.add_services(node_one, node_one.type, ["SSH"]) + logging.info("docker node(%s): %s", node_one.name, node_one.services) interface_one = prefixes.create_interface(node_one) + + # create nodes two + node_two = session.add_node() interface_two = prefixes.create_interface(node_two) # add link - input("press key to continue") session.add_link(node_one.id, node_two.id, interface_one, interface_two) - print(node_one.cmd_output("ifconfig")) - print(node_two.cmd_output("ifconfig")) - input("press key to continue") + + # instantiate + logging.info("INSTANTIATE") + logging.info("docker node(%s): %s", node_one.name, node_one.services) session.instantiate() finally: input("continue to shutdown") From 0e7464d419d04f9fe5631887fdf295dfcacdbba5 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Fri, 28 Jun 2019 14:24:36 -0700 Subject: [PATCH 44/51] fixed cmd to leverage wait similar to standard nodes and avoid throwing its own exceptions --- daemon/core/nodes/docker.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index 007c76fc..7f612d48 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -41,14 +41,25 @@ class DockerClient(object): cmd=cmd )) - def ns_cmd(self, cmd): - if isinstance(cmd, list): - cmd = " ".join(cmd) - logging.info("ns cmd: %s", cmd) - return utils.cmd_output("nsenter -t {pid} -m -u -i -p -n {cmd}".format( + def _ns_args(self, cmd): + return "nsenter -t {pid} -m -u -i -p -n {cmd}".format( pid=self.pid, cmd=cmd - )) + ) + + def ns_cmd_output(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = self._ns_args(cmd) + logging.info("ns cmd: %s", args) + return utils.cmd_output(args) + + def ns_cmd(self, cmd, wait=True): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = self._ns_args(cmd) + logging.info("ns cmd: %s", args) + return utils.cmd(args, wait) def get_pid(self): args = "docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name) @@ -83,7 +94,7 @@ class DockerClient(object): interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []} args = ["ip", "addr", "show", "dev", ifname] - status, output = self.ns_cmd(args) + status, output = self.ns_cmd_output(args) for line in output: line = line.strip().split() if line[0] == "link/ether": @@ -180,10 +191,7 @@ class DockerNode(CoreNode): :return: exit status for command :rtype: int """ - status, output = self.client.ns_cmd(args) - if status: - raise CoreCommandError(status, args, output) - return status + return self.client.ns_cmd(args, wait) def cmd_output(self, args): """ @@ -193,7 +201,7 @@ class DockerNode(CoreNode): :return: exit status and combined stdout and stderr :rtype: tuple[int, str] """ - return self.client.ns_cmd(args) + return self.client.ns_cmd_output(args) def check_cmd(self, args): """ @@ -204,7 +212,7 @@ class DockerNode(CoreNode): :rtype: str :raises CoreCommandError: when a non-zero exit status occurs """ - status, output = self.client.ns_cmd(args) + status, output = self.client.ns_cmd_output(args) if status: raise CoreCommandError(status, args, output) return output From 2397cd58eead17566337740bf54717f60c61340a Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Fri, 28 Jun 2019 15:41:55 -0700 Subject: [PATCH 45/51] integrated creation into the standard session.add_node call, currently requires an extra check due to the need for being able to specify an image --- daemon/core/emulator/emudata.py | 4 +++- daemon/core/emulator/session.py | 7 +++++-- daemon/core/nodes/docker.py | 15 ++++++++------ daemon/examples/docker/docker2core.py | 14 ++++++------- daemon/examples/docker/docker2docker.py | 17 +++++++++------- daemon/examples/docker/switch.py | 26 +++++++++++++++---------- 6 files changed, 49 insertions(+), 34 deletions(-) diff --git a/daemon/core/emulator/emudata.py b/daemon/core/emulator/emudata.py index ed54e20a..2dc96570 100644 --- a/daemon/core/emulator/emudata.py +++ b/daemon/core/emulator/emudata.py @@ -79,12 +79,13 @@ class NodeOptions(object): Options for creating and updating nodes within core. """ - def __init__(self, name=None, model="PC"): + def __init__(self, name=None, model="PC", image=None): """ Create a NodeOptions object. :param str name: name of node, defaults to node class name postfix with its id :param str model: defines services for default and physical nodes, defaults to "router" + :param str image: image to use for docker nodes """ self.name = name self.model = model @@ -99,6 +100,7 @@ class NodeOptions(object): self.alt = None self.emulation_id = None self.emulation_server = None + self.image = image def set_position(self, x, y): """ diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index ef79dcf5..7d2365e7 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -500,7 +500,10 @@ class Session(object): # create node logging.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start) - node = self.create_node(cls=node_class, _id=_id, name=name, start=start) + if _type == NodeTypes.DOCKER: + node = self.create_node(cls=node_class, _id=_id, name=name, start=start, image=node_options.image) + else: + node = self.create_node(cls=node_class, _id=_id, name=name, start=start) # set node attributes node.icon = node_options.icon @@ -511,7 +514,7 @@ class Session(object): self.set_node_position(node, node_options) # add services to default and physical nodes only - if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]: + if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL, NodeTypes.DOCKER]: node.type = node_options.model logging.debug("set node type: %s", node.type) self.services.add_services(node, node.type, node_options.services) diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index 7f612d48..909909f9 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -8,15 +8,16 @@ from core.nodes.base import CoreNode class DockerClient(object): - def __init__(self, name): + def __init__(self, name, image): self.name = name + self.image = image self.pid = None self._addr = {} - def create_container(self, image): + def create_container(self): utils.check_cmd("docker run -td --net=none --hostname {name} --name {name} {image} /bin/bash".format( name=self.name, - image=image + image=self.image )) self.pid = self.get_pid() return self.pid @@ -119,7 +120,7 @@ class DockerNode(CoreNode): apitype = NodeTypes.DOCKER.value valid_address_types = {"inet", "inet6", "inet6link"} - def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True): + def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None): """ Create a CoreNode instance. @@ -133,7 +134,9 @@ class DockerNode(CoreNode): super(CoreNode, self).__init__(session, _id, name, start=start) self.nodedir = nodedir self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) - self.client = DockerClient(self.name) + if image is None: + image = "ubuntu" + self.client = DockerClient(self.name, image) self.pid = None self.up = False self.lock = threading.RLock() @@ -164,7 +167,7 @@ class DockerNode(CoreNode): if self.up: raise ValueError("starting a node that is already up") self.makenodedir() - self.pid = self.client.create_container("ubuntu:ifconfig") + self.pid = self.client.create_container() self.up = True def shutdown(self): diff --git a/daemon/examples/docker/docker2core.py b/daemon/examples/docker/docker2core.py index 39491214..e7a626ec 100644 --- a/daemon/examples/docker/docker2core.py +++ b/daemon/examples/docker/docker2core.py @@ -1,7 +1,7 @@ import logging from core.emulator.coreemu import CoreEmu -from core.emulator.emudata import IpPrefixes +from core.emulator.emudata import IpPrefixes, NodeOptions from core.emulator.enumerations import NodeTypes, EventTypes @@ -12,14 +12,14 @@ if __name__ == "__main__": session.set_state(EventTypes.CONFIGURATION_STATE) try: - # create nodes one prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - node_one = session.add_node(_type=NodeTypes.DOCKER) - session.services.add_services(node_one, node_one.type, ["SSH"]) - logging.info("docker node(%s): %s", node_one.name, node_one.services) + options = NodeOptions(image="ubuntu:ifconfig") + + # create node one + node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options) interface_one = prefixes.create_interface(node_one) - # create nodes two + # create node two node_two = session.add_node() interface_two = prefixes.create_interface(node_two) @@ -27,8 +27,6 @@ if __name__ == "__main__": session.add_link(node_one.id, node_two.id, interface_one, interface_two) # instantiate - logging.info("INSTANTIATE") - logging.info("docker node(%s): %s", node_one.name, node_one.services) session.instantiate() finally: input("continue to shutdown") diff --git a/daemon/examples/docker/docker2docker.py b/daemon/examples/docker/docker2docker.py index 60c63c53..52bca1ce 100644 --- a/daemon/examples/docker/docker2docker.py +++ b/daemon/examples/docker/docker2docker.py @@ -1,7 +1,7 @@ import logging from core.emulator.coreemu import CoreEmu -from core.emulator.emudata import IpPrefixes +from core.emulator.emudata import IpPrefixes, NodeOptions from core.emulator.enumerations import NodeTypes, EventTypes @@ -15,17 +15,20 @@ if __name__ == "__main__": # create nodes and interfaces try: prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - node_one = session.add_node(_type=NodeTypes.DOCKER) - node_two = session.add_node(_type=NodeTypes.DOCKER) + options = NodeOptions(image="ubuntu:ifconfig") + + # create node one + node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options) interface_one = prefixes.create_interface(node_one) + + # create node two + node_two = session.add_node(_type=NodeTypes.DOCKER, node_options=options) interface_two = prefixes.create_interface(node_two) # add link - input("press key to continue") session.add_link(node_one.id, node_two.id, interface_one, interface_two) - print(node_one.cmd_output("ifconfig")) - print(node_two.cmd_output("ifconfig")) - input("press key to continue") + + # instantiate session.instantiate() finally: input("continue to shutdown") diff --git a/daemon/examples/docker/switch.py b/daemon/examples/docker/switch.py index 70ba634f..6204a4cb 100644 --- a/daemon/examples/docker/switch.py +++ b/daemon/examples/docker/switch.py @@ -1,7 +1,7 @@ import logging from core.emulator.coreemu import CoreEmu -from core.emulator.emudata import IpPrefixes +from core.emulator.emudata import IpPrefixes, NodeOptions from core.emulator.enumerations import NodeTypes, EventTypes @@ -12,25 +12,31 @@ if __name__ == "__main__": session = coreemu.create_session() session.set_state(EventTypes.CONFIGURATION_STATE) - # create nodes and interfaces try: prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + options = NodeOptions(image="ubuntu:ifconfig") + + # create switch switch = session.add_node(_type=NodeTypes.SWITCH) - node_one = session.add_node(_type=NodeTypes.DOCKER) - node_two = session.add_node(_type=NodeTypes.DOCKER) - node_three = session.add_node() + + # node one + node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options) interface_one = prefixes.create_interface(node_one) + + # node two + node_two = session.add_node(_type=NodeTypes.DOCKER, node_options=options) interface_two = prefixes.create_interface(node_two) + + # node three + node_three = session.add_node() interface_three = prefixes.create_interface(node_three) - # add link - input("press key to continue") + # add links session.add_link(node_one.id, switch.id, interface_one) session.add_link(node_two.id, switch.id, interface_two) session.add_link(node_three.id, switch.id, interface_three) - print(node_one.cmd_output("ifconfig")) - print(node_two.cmd_output("ifconfig")) - input("press key to continue") + + # instantiate session.instantiate() finally: input("continue to shutdown") From e83b38d96af60eac52359441e3e55fd6d5fe9086 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Tue, 2 Jul 2019 07:48:43 -0700 Subject: [PATCH 46/51] initial lxd based node working from simple scripts --- daemon/core/emulator/enumerations.py | 1 + daemon/core/emulator/session.py | 4 +- daemon/core/nodes/lxd.py | 336 +++++++++++++++++++++++++++ daemon/core/nodes/nodemaps.py | 4 +- daemon/examples/lxd/README.md | 29 +++ daemon/examples/lxd/lxd2core.py | 34 +++ daemon/examples/lxd/lxd2lxd.py | 35 +++ daemon/examples/lxd/switch.py | 43 ++++ 8 files changed, 483 insertions(+), 3 deletions(-) create mode 100644 daemon/core/nodes/lxd.py create mode 100644 daemon/examples/lxd/README.md create mode 100644 daemon/examples/lxd/lxd2core.py create mode 100644 daemon/examples/lxd/lxd2lxd.py create mode 100644 daemon/examples/lxd/switch.py diff --git a/daemon/core/emulator/enumerations.py b/daemon/core/emulator/enumerations.py index fe59891f..654b2091 100644 --- a/daemon/core/emulator/enumerations.py +++ b/daemon/core/emulator/enumerations.py @@ -82,6 +82,7 @@ class NodeTypes(Enum): CONTROL_NET = 13 EMANE_NET = 14 DOCKER = 15 + LXC = 16 class Rj45Models(Enum): diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 7d2365e7..7e63425d 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -500,7 +500,7 @@ class Session(object): # create node logging.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start) - if _type == NodeTypes.DOCKER: + if _type in [NodeTypes.DOCKER, NodeTypes.LXC]: node = self.create_node(cls=node_class, _id=_id, name=name, start=start, image=node_options.image) else: node = self.create_node(cls=node_class, _id=_id, name=name, start=start) @@ -514,7 +514,7 @@ class Session(object): self.set_node_position(node, node_options) # add services to default and physical nodes only - if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL, NodeTypes.DOCKER]: + if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL, NodeTypes.DOCKER, NodeTypes.LXC]: node.type = node_options.model logging.debug("set node type: %s", node.type) self.services.add_services(node, node.type, node_options.services) diff --git a/daemon/core/nodes/lxd.py b/daemon/core/nodes/lxd.py new file mode 100644 index 00000000..5a36a1d9 --- /dev/null +++ b/daemon/core/nodes/lxd.py @@ -0,0 +1,336 @@ +import json +import logging +import os +import threading +import time + +from core import utils, CoreCommandError +from core.emulator.enumerations import NodeTypes +from core.nodes.base import CoreNode + + +class LxdClient(object): + def __init__(self, name, image): + self.name = name + self.image = image + self.pid = None + self._addr = {} + + def create_container(self): + utils.check_cmd("lxc launch {image} {name}".format( + name=self.name, + image=self.image + )) + data = self._get_data()[0] + self.pid = data["state"]["pid"] + return self.pid + + def _get_data(self): + args = "lxc list {name} --format json".format(name=self.name) + status, output = utils.cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + return json.loads(output) + + def _cmd_args(self, cmd): + return "lxc exec {name} -- {cmd}".format( + name=self.name, + cmd=cmd + ) + + def is_alive(self): + data = self._get_data() + if not data: + return False + data = data[0] + return data["state"]["status"] == "Running" + + def stop_container(self): + utils.check_cmd("lxc delete --force {name}".format( + name=self.name + )) + + def run_cmd_output(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = self._cmd_args(cmd) + logging.info("lxc cmd output: %s", args) + return utils.cmd_output(args) + + def run_cmd(self, cmd, wait=True): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = self._cmd_args(cmd) + logging.info("lxc cmd: %s", args) + return utils.cmd(args, wait) + + def _ns_args(self, cmd): + return "nsenter -t {pid} -m -u -i -p -n {cmd}".format( + pid=self.pid, + cmd=cmd + ) + + def ns_cmd_output(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = self._ns_args(cmd) + logging.info("ns cmd: %s", args) + return utils.cmd_output(args) + + def ns_cmd(self, cmd, wait=True): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = self._ns_args(cmd) + logging.info("ns cmd: %s", args) + return utils.cmd(args, wait) + + def copy_file(self, source, destination): + if destination[0] != "/": + destination = os.path.join("/root/", destination) + + args = "lxc file push {source} {name}/{destination}".format( + source=source, + name=self.name, + destination=destination + ) + status, output = utils.cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + + def getaddr(self, ifname, rescan=False): + """ + Get address for interface on node. + + :param str ifname: interface name to get address for + :param bool rescan: rescan flag + :return: interface information + :rtype: dict + """ + if ifname in self._addr and not rescan: + return self._addr[ifname] + + interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []} + args = ["ip", "addr", "show", "dev", ifname] + status, output = self.ns_cmd_output(args) + for line in output: + line = line.strip().split() + if line[0] == "link/ether": + interface["ether"].append(line[1]) + elif line[0] == "inet": + interface["inet"].append(line[1]) + elif line[0] == "inet6": + if line[3] == "global": + interface["inet6"].append(line[1]) + elif line[3] == "link": + interface["inet6link"].append(line[1]) + else: + logging.warning("unknown scope: %s" % line[3]) + + if status: + logging.warning("nonzero exist status (%s) for cmd: %s", status, args) + self._addr[ifname] = interface + return interface + + +class LxcNode(CoreNode): + apitype = NodeTypes.LXC.value + valid_address_types = {"inet", "inet6", "inet6link"} + + def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None): + """ + Create a CoreNode instance. + + :param core.emulator.session.Session session: core session instance + :param int _id: object id + :param str name: object name + :param str nodedir: node directory + :param str bootsh: boot shell to use + :param bool start: start flag + """ + super(CoreNode, self).__init__(session, _id, name, start=start) + self.nodedir = nodedir + self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) + if image is None: + image = "ubuntu" + self.client = LxdClient(self.name, image) + self.pid = None + self.up = False + self.lock = threading.RLock() + self._mounts = [] + self.bootsh = bootsh + if start: + self.startup() + + def alive(self): + """ + Check if the node is alive. + + :return: True if node is alive, False otherwise + :rtype: bool + """ + return self.client.is_alive() + + def startup(self): + """ + Start a new namespace node by invoking the vnoded process that + allocates a new namespace. Bring up the loopback device and set + the hostname. + + :return: nothing + """ + with self.lock: + if self.up: + raise ValueError("starting a node that is already up") + self.makenodedir() + self.pid = self.client.create_container() + self.up = True + + def shutdown(self): + """ + Shutdown logic. + + :return: nothing + """ + # nothing to do if node is not up + if not self.up: + return + + with self.lock: + self._netif.clear() + self.client.stop_container() + self.up = False + + def cmd(self, args, wait=True): + """ + Runs shell command on node, with option to not wait for a result. + + :param list[str]|str args: command to run + :param bool wait: wait for command to exit, defaults to True + :return: exit status for command + :rtype: int + """ + # return self.client.ns_cmd(args, wait) + return self.client.run_cmd(args, wait) + + def cmd_output(self, args): + """ + Runs shell command on node and get exit status and output. + + :param list[str]|str args: command to run + :return: exit status and combined stdout and stderr + :rtype: tuple[int, str] + """ + # return self.client.ns_cmd_output(args) + return self.client.run_cmd_output(args) + + def check_cmd(self, args): + """ + Runs shell command on node. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when a non-zero exit status occurs + """ + # status, output = self.client.ns_cmd_output(args) + status, output = self.client.run_cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + return output + + def termcmdstring(self, sh="/bin/sh"): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + return "" + + def privatedir(self, path): + """ + Create a private directory. + + :param str path: path to create + :return: nothing + """ + logging.info("creating node dir: %s", path) + args = "mkdir -p {path}".format(path=path) + status, output = self.client.run_cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + + def mount(self, source, target): + """ + Create and mount a directory. + + :param str source: source directory to mount + :param str target: target directory to create + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + logging.info("mounting source(%s) target(%s)", source, target) + raise Exception("you found a lxc node") + + def nodefile(self, filename, contents, mode=0o644): + """ + Create a node file with a given mode. + + :param str filename: name of file to create + :param contents: contents of file + :param int mode: mode for file + :return: nothing + """ + logging.info("node dir(%s) ctrlchannel(%s)", self.nodedir, self.ctrlchnlname) + logging.info("nodefile filename(%s) mode(%s)", filename, mode) + file_path = os.path.join(self.nodedir, filename) + with open(file_path, "w") as f: + os.chmod(f.name, mode) + f.write(contents) + self.client.copy_file(file_path, filename) + + def nodefilecopy(self, filename, srcfilename, mode=None): + """ + Copy a file to a node, following symlinks and preserving metadata. + Change file mode if specified. + + :param str filename: file name to copy file to + :param str srcfilename: file to copy + :param int mode: mode to copy to + :return: nothing + """ + logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode) + raise Exception("you found a lxc node") + + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + """ + Create a new network interface. + + :param core.nodes.base.CoreNetworkBase net: network to associate with + :param list addrlist: addresses to add on the interface + :param core.nodes.ipaddress.MacAddress hwaddr: hardware address to set for interface + :param int ifindex: index of interface to create + :param str ifname: name for interface + :return: interface index + :rtype: int + """ + if not addrlist: + addrlist = [] + + with self.lock: + ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) + + if net is not None: + self.attachnet(ifindex, net) + + if hwaddr: + self.sethwaddr(ifindex, hwaddr) + + # delay required for lxc nodes + time.sleep(0.5) + + for address in utils.make_tuple(addrlist): + self.addaddr(ifindex, address) + + self.ifup(ifindex) + return ifindex diff --git a/daemon/core/nodes/nodemaps.py b/daemon/core/nodes/nodemaps.py index 0c1bb1bc..206d80d4 100644 --- a/daemon/core/nodes/nodemaps.py +++ b/daemon/core/nodes/nodemaps.py @@ -3,6 +3,7 @@ Provides default node maps that can be used to run core with. """ import core.nodes.base import core.nodes.docker +import core.nodes.lxd import core.nodes.network import core.nodes.physical from core.emane.nodes import EmaneNet @@ -27,5 +28,6 @@ NODES = { NodeTypes.TAP_BRIDGE: GreTapBridge, NodeTypes.PEER_TO_PEER: core.nodes.network.PtpNet, NodeTypes.CONTROL_NET: core.nodes.network.CtrlNet, - NodeTypes.DOCKER: core.nodes.docker.DockerNode + NodeTypes.DOCKER: core.nodes.docker.DockerNode, + NodeTypes.LXC: core.nodes.lxd.LxcNode } diff --git a/daemon/examples/lxd/README.md b/daemon/examples/lxd/README.md new file mode 100644 index 00000000..25d91ecd --- /dev/null +++ b/daemon/examples/lxd/README.md @@ -0,0 +1,29 @@ +# LXD Support + +Information on how LXD can be leveraged and included to create +nodes based on LXC containers and images to interface with +existing CORE nodes, when needed. + +# Installation + +```shell +sudo snap install lxd +``` + +# Configuration + +Initialize LXD and say no to adding a default bridge. + +```shell +sudo lxd init +``` + +# Tools and Versions Tested With + +* LXD 3.14 +* nsenter from util-linux 2.31.1 + +# Examples + +This directory provides a few small examples creating LXC nodes +using LXD and linking them to themselves or with standard CORE nodes. diff --git a/daemon/examples/lxd/lxd2core.py b/daemon/examples/lxd/lxd2core.py new file mode 100644 index 00000000..8a5c9990 --- /dev/null +++ b/daemon/examples/lxd/lxd2core.py @@ -0,0 +1,34 @@ +import logging + +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes, NodeOptions +from core.emulator.enumerations import NodeTypes, EventTypes + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + coreemu = CoreEmu() + session = coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + try: + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + options = NodeOptions(image="ubuntu") + options.services = ["SSH"] + + # create node one + node_one = session.add_node(_type=NodeTypes.LXC, node_options=options) + interface_one = prefixes.create_interface(node_one) + + # create node two + node_two = session.add_node() + interface_two = prefixes.create_interface(node_two) + + # add link + session.add_link(node_one.id, node_two.id, interface_one, interface_two) + + # instantiate + session.instantiate() + finally: + input("continue to shutdown") + coreemu.shutdown() diff --git a/daemon/examples/lxd/lxd2lxd.py b/daemon/examples/lxd/lxd2lxd.py new file mode 100644 index 00000000..73f11d66 --- /dev/null +++ b/daemon/examples/lxd/lxd2lxd.py @@ -0,0 +1,35 @@ +import logging + +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes, NodeOptions +from core.emulator.enumerations import NodeTypes, EventTypes + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + + coreemu = CoreEmu() + session = coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create nodes and interfaces + try: + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + options = NodeOptions(image="ubuntu") + + # create node one + node_one = session.add_node(_type=NodeTypes.LXC, node_options=options) + interface_one = prefixes.create_interface(node_one) + + # create node two + node_two = session.add_node(_type=NodeTypes.LXC, node_options=options) + interface_two = prefixes.create_interface(node_two) + + # add link + session.add_link(node_one.id, node_two.id, interface_one, interface_two) + + # instantiate + session.instantiate() + finally: + input("continue to shutdown") + coreemu.shutdown() diff --git a/daemon/examples/lxd/switch.py b/daemon/examples/lxd/switch.py new file mode 100644 index 00000000..ac58d5e1 --- /dev/null +++ b/daemon/examples/lxd/switch.py @@ -0,0 +1,43 @@ +import logging + +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes, NodeOptions +from core.emulator.enumerations import NodeTypes, EventTypes + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + + coreemu = CoreEmu() + session = coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + try: + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + options = NodeOptions(image="ubuntu") + + # create switch + switch = session.add_node(_type=NodeTypes.SWITCH) + + # node one + node_one = session.add_node(_type=NodeTypes.LXC, node_options=options) + interface_one = prefixes.create_interface(node_one) + + # node two + node_two = session.add_node(_type=NodeTypes.LXC, node_options=options) + interface_two = prefixes.create_interface(node_two) + + # node three + node_three = session.add_node() + interface_three = prefixes.create_interface(node_three) + + # add links + session.add_link(node_one.id, switch.id, interface_one) + session.add_link(node_two.id, switch.id, interface_two) + session.add_link(node_three.id, switch.id, interface_three) + + # instantiate + session.instantiate() + finally: + input("continue to shutdown") + coreemu.shutdown() From 9e273c2d8fc52178ddf33efb197b0ea8a440a979 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Tue, 2 Jul 2019 09:51:56 -0700 Subject: [PATCH 47/51] small tweaks to cleanup lxd from commented out code --- daemon/core/nodes/lxd.py | 7 +------ daemon/examples/lxd/lxd2core.py | 1 - 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/daemon/core/nodes/lxd.py b/daemon/core/nodes/lxd.py index 5a36a1d9..f8ca377e 100644 --- a/daemon/core/nodes/lxd.py +++ b/daemon/core/nodes/lxd.py @@ -172,9 +172,7 @@ class LxcNode(CoreNode): def startup(self): """ - Start a new namespace node by invoking the vnoded process that - allocates a new namespace. Bring up the loopback device and set - the hostname. + Startup logic. :return: nothing """ @@ -209,7 +207,6 @@ class LxcNode(CoreNode): :return: exit status for command :rtype: int """ - # return self.client.ns_cmd(args, wait) return self.client.run_cmd(args, wait) def cmd_output(self, args): @@ -220,7 +217,6 @@ class LxcNode(CoreNode): :return: exit status and combined stdout and stderr :rtype: tuple[int, str] """ - # return self.client.ns_cmd_output(args) return self.client.run_cmd_output(args) def check_cmd(self, args): @@ -232,7 +228,6 @@ class LxcNode(CoreNode): :rtype: str :raises CoreCommandError: when a non-zero exit status occurs """ - # status, output = self.client.ns_cmd_output(args) status, output = self.client.run_cmd_output(args) if status: raise CoreCommandError(status, args, output) diff --git a/daemon/examples/lxd/lxd2core.py b/daemon/examples/lxd/lxd2core.py index 8a5c9990..69d4e1de 100644 --- a/daemon/examples/lxd/lxd2core.py +++ b/daemon/examples/lxd/lxd2core.py @@ -14,7 +14,6 @@ if __name__ == "__main__": try: prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") options = NodeOptions(image="ubuntu") - options.services = ["SSH"] # create node one node_one = session.add_node(_type=NodeTypes.LXC, node_options=options) From 913ca51b1c4c570dc705479000f8dd4eb54f4d68 Mon Sep 17 00:00:00 2001 From: Blake Harnden Date: Tue, 2 Jul 2019 16:05:45 -0700 Subject: [PATCH 48/51] cleanup for docker/lxd code being used, tweaks to running container commands --- daemon/core/emane/emanemanager.py | 4 +- daemon/core/nodes/base.py | 35 +++++--- daemon/core/nodes/docker.py | 112 +++++++++++++----------- daemon/core/nodes/interface.py | 18 ++-- daemon/core/nodes/lxd.py | 110 +++++++++-------------- daemon/examples/docker/docker2core.py | 2 +- daemon/examples/docker/docker2docker.py | 2 +- daemon/examples/docker/switch.py | 2 +- 8 files changed, 141 insertions(+), 144 deletions(-) diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py index 7d1cab9e..451e3a11 100644 --- a/daemon/core/emane/emanemanager.py +++ b/daemon/core/emane/emanemanager.py @@ -669,12 +669,12 @@ class EmaneManager(ModelManager): # multicast route is needed for OTA data args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev] - node.check_cmd(args) + node.network_cmd(args) # multicast route is also needed for event data if on control network if eventservicenetidx >= 0 and eventgroup != otagroup: args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev] - node.check_cmd(args) + node.network_cmd(args) # start emane args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)] diff --git a/daemon/core/nodes/base.py b/daemon/core/nodes/base.py index 555eda78..1ce21aca 100644 --- a/daemon/core/nodes/base.py +++ b/daemon/core/nodes/base.py @@ -493,11 +493,11 @@ class CoreNode(CoreNodeBase): # bring up the loopback interface logging.debug("bringing up loopback interface") - self.check_cmd([constants.IP_BIN, "link", "set", "lo", "up"]) + self.network_cmd([constants.IP_BIN, "link", "set", "lo", "up"]) # set hostname for node logging.debug("setting hostname: %s", self.name) - self.check_cmd(["hostname", self.name]) + self.network_cmd(["hostname", self.name]) # mark node as up self.up = True @@ -572,6 +572,17 @@ class CoreNode(CoreNodeBase): """ return self.client.cmd_output(args) + def network_cmd(self, args): + """ + Runs a command for a node that is used to configure and setup network interfaces. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when a non-zero exit status occurs + """ + return self.check_cmd(args) + def check_cmd(self, args): """ Runs shell command on node. @@ -667,15 +678,15 @@ class CoreNode(CoreNodeBase): if self.up: utils.check_cmd([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)]) - self.check_cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname]) - self.check_cmd([constants.ETHTOOL_BIN, "-K", ifname, "rx", "off", "tx", "off"]) + self.network_cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname]) + self.network_cmd([constants.ETHTOOL_BIN, "-K", ifname, "rx", "off", "tx", "off"]) veth.name = ifname if self.up: # TODO: potentially find better way to query interface ID # retrieve interface information - output = self.check_cmd(["ip", "link", "show", veth.name]) + output = self.network_cmd([constants.IP_BIN, "link", "show", veth.name]) logging.debug("interface command output: %s", output) output = output.split("\n") veth.flow_id = int(output[0].strip().split(":")[0]) + 1 @@ -736,7 +747,7 @@ class CoreNode(CoreNodeBase): self._netif[ifindex].sethwaddr(addr) if self.up: args = [constants.IP_BIN, "link", "set", "dev", self.ifname(ifindex), "address", str(addr)] - self.check_cmd(args) + self.network_cmd(args) def addaddr(self, ifindex, addr): """ @@ -750,10 +761,10 @@ class CoreNode(CoreNodeBase): # check if addr is ipv6 if ":" in str(addr): args = [constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)] - self.check_cmd(args) + self.network_cmd(args) else: args = [constants.IP_BIN, "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)] - self.check_cmd(args) + self.network_cmd(args) self._netif[ifindex].addaddr(addr) @@ -772,7 +783,7 @@ class CoreNode(CoreNodeBase): logging.exception("trying to delete unknown address: %s" % addr) if self.up: - self.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)]) + self.network_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)]) def delalladdr(self, ifindex, address_types=None): """ @@ -806,7 +817,7 @@ class CoreNode(CoreNodeBase): :return: nothing """ if self.up: - self.check_cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"]) + self.network_cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"]) def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): """ @@ -867,12 +878,12 @@ class CoreNode(CoreNodeBase): utils.check_cmd([constants.IP_BIN, "link", "add", "name", tmp1, "type", "veth", "peer", "name", tmp2]) utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)]) - self.check_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname]) + self.network_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname]) interface = CoreInterface(node=self, name=ifname, mtu=_DEFAULT_MTU) self.addnetif(interface, self.newifindex()) utils.check_cmd([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]) - othernode.check_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname]) + othernode.network_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname]) other_interface = CoreInterface(node=othernode, name=otherifname, mtu=_DEFAULT_MTU) othernode.addnetif(other_interface, othernode.newifindex()) diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index 909909f9..f14ceaa9 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -1,6 +1,6 @@ +import json import logging import os -import threading from core import utils, CoreCommandError from core.emulator.enumerations import NodeTypes @@ -15,53 +15,66 @@ class DockerClient(object): self._addr = {} def create_container(self): - utils.check_cmd("docker run -td --net=none --hostname {name} --name {name} {image} /bin/bash".format( - name=self.name, - image=self.image - )) + utils.check_cmd( + "docker run -td --init --net=none --hostname {name} --name {name} " + "--sysctl net.ipv6.conf.all.disable_ipv6=0 " + "{image} /bin/bash".format( + name=self.name, + image=self.image + )) self.pid = self.get_pid() return self.pid + def get_info(self): + args = "docker inspect {name}".format(name=self.name) + status, output = utils.cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + data = json.loads(output) + if not data: + raise CoreCommandError(status, args, "docker({name}) not present".format(name=self.name)) + return data[0] + def is_alive(self): - status, output = utils.cmd_output("docker containers ls -f name={name}".format( - name=self.name - )) - return not status and len(output.split("\n")) == 2 + try: + data = self.get_info() + return data["State"]["Running"] + except CoreCommandError: + return False def stop_container(self): utils.check_cmd("docker rm -f {name}".format( name=self.name )) - def run_cmd(self, cmd): + def cmd(self, cmd, wait=True): if isinstance(cmd, list): cmd = " ".join(cmd) - logging.info("docker cmd: %s", cmd) - return utils.cmd_output("docker exec -it {name} {cmd}".format( + logging.info("docker cmd wait(%s): %s", wait, cmd) + return utils.cmd("docker exec {name} {cmd}".format( + name=self.name, + cmd=cmd + ), wait) + + def cmd_output(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + logging.info("docker cmd output: %s", cmd) + return utils.cmd_output("docker exec {name} {cmd}".format( name=self.name, cmd=cmd )) - def _ns_args(self, cmd): - return "nsenter -t {pid} -m -u -i -p -n {cmd}".format( + def ns_cmd(self, cmd): + if isinstance(cmd, list): + cmd = " ".join(cmd) + args = "nsenter -t {pid} -u -i -p -n {cmd}".format( pid=self.pid, cmd=cmd ) - - def ns_cmd_output(self, cmd): - if isinstance(cmd, list): - cmd = " ".join(cmd) - args = self._ns_args(cmd) logging.info("ns cmd: %s", args) return utils.cmd_output(args) - def ns_cmd(self, cmd, wait=True): - if isinstance(cmd, list): - cmd = " ".join(cmd) - args = self._ns_args(cmd) - logging.info("ns cmd: %s", args) - return utils.cmd(args, wait) - def get_pid(self): args = "docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name) status, output = utils.cmd_output(args) @@ -95,7 +108,7 @@ class DockerClient(object): interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []} args = ["ip", "addr", "show", "dev", ifname] - status, output = self.ns_cmd_output(args) + status, output = self.ns_cmd(args) for line in output: line = line.strip().split() if line[0] == "link/ether": @@ -118,11 +131,10 @@ class DockerClient(object): class DockerNode(CoreNode): apitype = NodeTypes.DOCKER.value - valid_address_types = {"inet", "inet6", "inet6link"} def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None): """ - Create a CoreNode instance. + Create a DockerNode instance. :param core.emulator.session.Session session: core session instance :param int _id: object id @@ -130,21 +142,12 @@ class DockerNode(CoreNode): :param str nodedir: node directory :param str bootsh: boot shell to use :param bool start: start flag + :param str image: image to start container with """ - super(CoreNode, self).__init__(session, _id, name, start=start) - self.nodedir = nodedir - self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) if image is None: image = "ubuntu" - self.client = DockerClient(self.name, image) - self.pid = None - self.up = False - self.lock = threading.RLock() - self._mounts = [] - self.bootsh = bootsh - logging.debug("docker services: %s", self.services) - if start: - self.startup() + self.image = image + super(DockerNode, self).__init__(session, _id, name, nodedir, bootsh, start) def alive(self): """ @@ -167,6 +170,7 @@ class DockerNode(CoreNode): if self.up: raise ValueError("starting a node that is already up") self.makenodedir() + self.client = DockerClient(self.name, self.image) self.pid = self.client.create_container() self.up = True @@ -194,7 +198,7 @@ class DockerNode(CoreNode): :return: exit status for command :rtype: int """ - return self.client.ns_cmd(args, wait) + return self.client.cmd(args, wait) def cmd_output(self, args): """ @@ -204,7 +208,7 @@ class DockerNode(CoreNode): :return: exit status and combined stdout and stderr :rtype: tuple[int, str] """ - return self.client.ns_cmd_output(args) + return self.client.cmd_output(args) def check_cmd(self, args): """ @@ -215,7 +219,17 @@ class DockerNode(CoreNode): :rtype: str :raises CoreCommandError: when a non-zero exit status occurs """ - status, output = self.client.ns_cmd_output(args) + status, output = self.client.cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + return output + + def network_cmd(self, args): + if not self.up: + logging.debug("node down, not running network command: %s", args) + return 0 + + status, output = self.client.ns_cmd(args) if status: raise CoreCommandError(status, args, output) return output @@ -227,7 +241,7 @@ class DockerNode(CoreNode): :param str sh: shell to execute command in :return: str """ - return "" + return "docker exec -it {name} bash".format(name=self.name) def privatedir(self, path): """ @@ -238,9 +252,7 @@ class DockerNode(CoreNode): """ logging.info("creating node dir: %s", path) args = "mkdir -p {path}".format(path=path) - status, output = self.client.run_cmd(args) - if status: - raise CoreCommandError(status, args, output) + self.check_cmd(args) def mount(self, source, target): """ @@ -252,7 +264,7 @@ class DockerNode(CoreNode): :raises CoreCommandError: when a non-zero exit status occurs """ logging.info("mounting source(%s) target(%s)", source, target) - raise Exception("you found a docker node") + raise Exception("not supported") def nodefile(self, filename, contents, mode=0o644): """ @@ -282,4 +294,4 @@ class DockerNode(CoreNode): :return: nothing """ logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode) - raise Exception("you found a docker node") + raise Exception("not supported") diff --git a/daemon/core/nodes/interface.py b/daemon/core/nodes/interface.py index 3d8b6dc5..e9e4af6f 100644 --- a/daemon/core/nodes/interface.py +++ b/daemon/core/nodes/interface.py @@ -237,7 +237,7 @@ class Veth(CoreInterface): if self.node: try: - self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) + self.node.network_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) except CoreCommandError: logging.exception("error shutting down interface") @@ -245,7 +245,7 @@ class Veth(CoreInterface): try: utils.check_cmd([constants.IP_BIN, "link", "delete", self.localname]) except CoreCommandError: - logging.exception("error deleting link") + logging.info("link already removed: %s", self.localname) self.up = False @@ -298,7 +298,7 @@ class TunTap(CoreInterface): return try: - self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) + self.node.network_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) except CoreCommandError: logging.exception("error shutting down tunnel tap") @@ -361,7 +361,11 @@ class TunTap(CoreInterface): def nodedevexists(): args = [constants.IP_BIN, "link", "show", self.name] - return self.node.cmd(args) + try: + self.node.network_cmd(args) + return 0 + except CoreCommandError: + return 1 count = 0 while True: @@ -393,8 +397,8 @@ class TunTap(CoreInterface): self.waitfordevicelocal() netns = str(self.node.pid) utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "netns", netns]) - self.node.check_cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name]) - self.node.check_cmd([constants.IP_BIN, "link", "set", self.name, "up"]) + self.node.network_cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name]) + self.node.network_cmd([constants.IP_BIN, "link", "set", self.name, "up"]) def setaddrs(self): """ @@ -404,7 +408,7 @@ class TunTap(CoreInterface): """ self.waitfordevicenode() for addr in self.addrlist: - self.node.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]) + self.node.network_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]) class GreTap(CoreInterface): diff --git a/daemon/core/nodes/lxd.py b/daemon/core/nodes/lxd.py index f8ca377e..1437cac1 100644 --- a/daemon/core/nodes/lxd.py +++ b/daemon/core/nodes/lxd.py @@ -1,7 +1,6 @@ import json import logging import os -import threading import time from core import utils, CoreCommandError @@ -21,43 +20,46 @@ class LxdClient(object): name=self.name, image=self.image )) - data = self._get_data()[0] + data = self.get_info() self.pid = data["state"]["pid"] return self.pid - def _get_data(self): + def get_info(self): args = "lxc list {name} --format json".format(name=self.name) status, output = utils.cmd_output(args) if status: raise CoreCommandError(status, args, output) - return json.loads(output) - - def _cmd_args(self, cmd): - return "lxc exec {name} -- {cmd}".format( - name=self.name, - cmd=cmd - ) + data = json.loads(output) + if not data: + raise CoreCommandError(status, args, "LXC({name}) not present".format(name=self.name)) + return data[0] def is_alive(self): - data = self._get_data() - if not data: + try: + data = self.get_info() + return data["state"]["status"] == "Running" + except CoreCommandError: return False - data = data[0] - return data["state"]["status"] == "Running" def stop_container(self): utils.check_cmd("lxc delete --force {name}".format( name=self.name )) - def run_cmd_output(self, cmd): + def _cmd_args(self, cmd): + return "lxc exec -nT {name} -- {cmd}".format( + name=self.name, + cmd=cmd + ) + + def cmd_output(self, cmd): if isinstance(cmd, list): cmd = " ".join(cmd) args = self._cmd_args(cmd) logging.info("lxc cmd output: %s", args) return utils.cmd_output(args) - def run_cmd(self, cmd, wait=True): + def cmd(self, cmd, wait=True): if isinstance(cmd, list): cmd = " ".join(cmd) args = self._cmd_args(cmd) @@ -134,11 +136,10 @@ class LxdClient(object): class LxcNode(CoreNode): apitype = NodeTypes.LXC.value - valid_address_types = {"inet", "inet6", "inet6link"} def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None): """ - Create a CoreNode instance. + Create a LxcNode instance. :param core.emulator.session.Session session: core session instance :param int _id: object id @@ -146,20 +147,12 @@ class LxcNode(CoreNode): :param str nodedir: node directory :param str bootsh: boot shell to use :param bool start: start flag + :param str image: image to start container with """ - super(CoreNode, self).__init__(session, _id, name, start=start) - self.nodedir = nodedir - self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) if image is None: image = "ubuntu" - self.client = LxdClient(self.name, image) - self.pid = None - self.up = False - self.lock = threading.RLock() - self._mounts = [] - self.bootsh = bootsh - if start: - self.startup() + self.image = image + super(LxcNode, self).__init__(session, _id, name, nodedir, bootsh, start) def alive(self): """ @@ -180,6 +173,7 @@ class LxcNode(CoreNode): if self.up: raise ValueError("starting a node that is already up") self.makenodedir() + self.client = LxdClient(self.name, self.image) self.pid = self.client.create_container() self.up = True @@ -207,7 +201,7 @@ class LxcNode(CoreNode): :return: exit status for command :rtype: int """ - return self.client.run_cmd(args, wait) + return self.client.cmd(args, wait) def cmd_output(self, args): """ @@ -217,7 +211,7 @@ class LxcNode(CoreNode): :return: exit status and combined stdout and stderr :rtype: tuple[int, str] """ - return self.client.run_cmd_output(args) + return self.client.cmd_output(args) def check_cmd(self, args): """ @@ -228,11 +222,17 @@ class LxcNode(CoreNode): :rtype: str :raises CoreCommandError: when a non-zero exit status occurs """ - status, output = self.client.run_cmd_output(args) + status, output = self.client.cmd_output(args) if status: raise CoreCommandError(status, args, output) return output + def network_cmd(self, args): + if not self.up: + logging.debug("node down, not running network command: %s", args) + return 0 + return self.check_cmd(args) + def termcmdstring(self, sh="/bin/sh"): """ Create a terminal command string. @@ -240,7 +240,7 @@ class LxcNode(CoreNode): :param str sh: shell to execute command in :return: str """ - return "" + return "lxc exec {name} -- bash".format(name=self.name) def privatedir(self, path): """ @@ -251,9 +251,7 @@ class LxcNode(CoreNode): """ logging.info("creating node dir: %s", path) args = "mkdir -p {path}".format(path=path) - status, output = self.client.run_cmd_output(args) - if status: - raise CoreCommandError(status, args, output) + self.check_cmd(args) def mount(self, source, target): """ @@ -265,7 +263,7 @@ class LxcNode(CoreNode): :raises CoreCommandError: when a non-zero exit status occurs """ logging.info("mounting source(%s) target(%s)", source, target) - raise Exception("you found a lxc node") + raise Exception("not supported") def nodefile(self, filename, contents, mode=0o644): """ @@ -295,37 +293,9 @@ class LxcNode(CoreNode): :return: nothing """ logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode) - raise Exception("you found a lxc node") + raise Exception("not supported") - def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): - """ - Create a new network interface. - - :param core.nodes.base.CoreNetworkBase net: network to associate with - :param list addrlist: addresses to add on the interface - :param core.nodes.ipaddress.MacAddress hwaddr: hardware address to set for interface - :param int ifindex: index of interface to create - :param str ifname: name for interface - :return: interface index - :rtype: int - """ - if not addrlist: - addrlist = [] - - with self.lock: - ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) - - if net is not None: - self.attachnet(ifindex, net) - - if hwaddr: - self.sethwaddr(ifindex, hwaddr) - - # delay required for lxc nodes - time.sleep(0.5) - - for address in utils.make_tuple(addrlist): - self.addaddr(ifindex, address) - - self.ifup(ifindex) - return ifindex + def addnetif(self, netif, ifindex): + super(LxcNode, self).addnetif(netif, ifindex) + # adding small delay to allow time for adding addresses to work correctly + time.sleep(0.5) diff --git a/daemon/examples/docker/docker2core.py b/daemon/examples/docker/docker2core.py index e7a626ec..15d1bbe7 100644 --- a/daemon/examples/docker/docker2core.py +++ b/daemon/examples/docker/docker2core.py @@ -13,7 +13,7 @@ if __name__ == "__main__": try: prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - options = NodeOptions(image="ubuntu:ifconfig") + options = NodeOptions(model=None, image="ubuntu") # create node one node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options) diff --git a/daemon/examples/docker/docker2docker.py b/daemon/examples/docker/docker2docker.py index 52bca1ce..32fcc6d6 100644 --- a/daemon/examples/docker/docker2docker.py +++ b/daemon/examples/docker/docker2docker.py @@ -15,7 +15,7 @@ if __name__ == "__main__": # create nodes and interfaces try: prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - options = NodeOptions(image="ubuntu:ifconfig") + options = NodeOptions(model=None, image="ubuntu") # create node one node_one = session.add_node(_type=NodeTypes.DOCKER, node_options=options) diff --git a/daemon/examples/docker/switch.py b/daemon/examples/docker/switch.py index 6204a4cb..a4615d4a 100644 --- a/daemon/examples/docker/switch.py +++ b/daemon/examples/docker/switch.py @@ -14,7 +14,7 @@ if __name__ == "__main__": try: prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") - options = NodeOptions(image="ubuntu:ifconfig") + options = NodeOptions(model=None, image="ubuntu") # create switch switch = session.add_node(_type=NodeTypes.SWITCH) From ae5d71873772fd288e8f05760b0ae1a0f7822b2f Mon Sep 17 00:00:00 2001 From: bharnden Date: Wed, 3 Jul 2019 23:09:55 -0700 Subject: [PATCH 49/51] revert loss pack to using floats in all related places --- daemon/core/nodes/network.py | 2 +- daemon/core/nodes/openvswitch.py | 2 +- daemon/core/xml/corexml.py | 2 +- daemon/proto/core/api/grpc/core.proto | 2 +- daemon/tests/test_xml.py | 6 +++--- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/daemon/core/nodes/network.py b/daemon/core/nodes/network.py index bb5b269d..2359ac41 100644 --- a/daemon/core/nodes/network.py +++ b/daemon/core/nodes/network.py @@ -459,7 +459,7 @@ class CoreNetwork(CoreNetworkBase): netem = ["netem"] changed = max(changed, netif.setparam("delay", delay)) if loss is not None: - loss = int(loss) + loss = float(loss) changed = max(changed, netif.setparam("loss", loss)) if duplicate is not None: duplicate = int(duplicate) diff --git a/daemon/core/nodes/openvswitch.py b/daemon/core/nodes/openvswitch.py index 33bea7f3..9e3bbd16 100644 --- a/daemon/core/nodes/openvswitch.py +++ b/daemon/core/nodes/openvswitch.py @@ -225,7 +225,7 @@ class OvsNet(CoreNetworkBase): delay_changed = netif.setparam("delay", delay) if loss is not None: - loss = int(loss) + loss = float(loss) loss_changed = netif.setparam("loss", loss) if duplicate is not None: diff --git a/daemon/core/xml/corexml.py b/daemon/core/xml/corexml.py index fb99e061..6b78f44e 100644 --- a/daemon/core/xml/corexml.py +++ b/daemon/core/xml/corexml.py @@ -782,7 +782,7 @@ class CoreXmlReader(object): link_options.mburst = get_int(options_element, "mburst") link_options.jitter = get_int(options_element, "jitter") link_options.key = get_int(options_element, "key") - link_options.per = get_int(options_element, "per") + link_options.per = get_float(options_element, "per") link_options.unidirectional = get_int(options_element, "unidirectional") link_options.session = options_element.get("session") link_options.emulation_id = get_int(options_element, "emulation_id") diff --git a/daemon/proto/core/api/grpc/core.proto b/daemon/proto/core/api/grpc/core.proto index 6f34070f..26c4bf59 100644 --- a/daemon/proto/core/api/grpc/core.proto +++ b/daemon/proto/core/api/grpc/core.proto @@ -791,7 +791,7 @@ message LinkOptions { int32 key = 3; int32 mburst = 4; int32 mer = 5; - int32 per = 6; + float per = 6; int64 bandwidth = 7; int32 burst = 8; int64 delay = 9; diff --git a/daemon/tests/test_xml.py b/daemon/tests/test_xml.py index 7e8dabd1..6106383e 100644 --- a/daemon/tests/test_xml.py +++ b/daemon/tests/test_xml.py @@ -350,7 +350,7 @@ class TestXml: # create link link_options = LinkOptions() - link_options.per = 20 + link_options.per = 10.5 link_options.bandwidth = 50000 link_options.jitter = 10 link_options.delay = 30 @@ -415,7 +415,7 @@ class TestXml: # create link link_options = LinkOptions() - link_options.per = 20 + link_options.per = 10.5 link_options.bandwidth = 50000 link_options.jitter = 10 link_options.delay = 30 @@ -483,7 +483,7 @@ class TestXml: link_options_one.unidirectional = 1 link_options_one.bandwidth = 5000 link_options_one.delay = 10 - link_options_one.per = 5 + link_options_one.per = 10.5 link_options_one.dup = 5 link_options_one.jitter = 5 session.add_link(node_one.id, node_two.id, interface_one, interface_two, link_options_one) From 836133c97d7c8baea3bd615a8d74ef1f16d9011f Mon Sep 17 00:00:00 2001 From: bharnden Date: Fri, 5 Jul 2019 09:00:14 -0700 Subject: [PATCH 50/51] reverting a change that caused switching sessions within gui to break --- daemon/core/api/tlv/corehandlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py index de0c62f7..4e51b901 100644 --- a/daemon/core/api/tlv/corehandlers.py +++ b/daemon/core/api/tlv/corehandlers.py @@ -1641,10 +1641,10 @@ class CoreHandler(socketserver.BaseRequestHandler): logging.info("request to connect to session %s", session_id) # remove client from session broker and shutdown if needed + self.remove_session_handlers() self.session.broker.session_clients.remove(self) if not self.session.broker.session_clients and not self.session.is_active(): self.coreemu.delete_session(self.session.id) - self.remove_session_handlers() # set session to join self.session = session From b8cbf6b5f6724744c26e20819592f67af274713a Mon Sep 17 00:00:00 2001 From: bharnden Date: Fri, 5 Jul 2019 16:12:26 -0700 Subject: [PATCH 51/51] bumped version for release --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index d8a01a2c..547389cd 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. # this defines the CORE version number, must be static for AC_INIT -AC_INIT(core, 5.3.0) +AC_INIT(core, 5.3.1) # autoconf and automake initialization AC_CONFIG_SRCDIR([netns/version.h.in])