diff --git a/.editorconfig b/.editorconfig index f1b09263..d0466b2f 100644 --- a/.editorconfig +++ b/.editorconfig @@ -11,7 +11,6 @@ insert_final_newline = true [*.py] indent_style = space indent_size = 4 -max_line_length = 88 [*.am] indent_style = tab diff --git a/.github/workflows/daemon-checks.yml b/.github/workflows/daemon-checks.yml deleted file mode 100644 index dc169dcf..00000000 --- a/.github/workflows/daemon-checks.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Daemon Checks - -on: [push] - -jobs: - build: - runs-on: ubuntu-22.04 - steps: - - uses: actions/checkout@v1 - - name: Set up Python 3.9 - uses: actions/setup-python@v1 - with: - python-version: 3.9 - - name: install poetry - run: | - python -m pip install --upgrade pip - pip install poetry - cd daemon - cp core/constants.py.in core/constants.py - sed -i 's/required=True/required=False/g' core/emulator/coreemu.py - poetry install - - name: isort - run: | - cd daemon - poetry run isort -c -df - - name: black - run: | - cd daemon - poetry run black --check . - - name: flake8 - run: | - cd daemon - poetry run flake8 - - name: grpc - run: | - cd daemon/proto - poetry run python -m grpc_tools.protoc -I . --python_out=.. --grpc_python_out=.. core/api/grpc/*.proto - - name: test - run: | - cd daemon - poetry run pytest --mock tests diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index abbadab3..00000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: documentation -on: - push: - branches: - - master -permissions: - contents: write -jobs: - deploy: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: 3.x - - uses: actions/cache@v2 - with: - key: ${{ github.ref }} - path: .cache - - run: pip install mkdocs-material - - run: mkdocs gh-deploy --force diff --git a/.gitignore b/.gitignore index ca4c07dd..dbf9e4c3 100644 --- a/.gitignore +++ b/.gitignore @@ -8,22 +8,18 @@ Makefile Makefile.in aclocal.m4 autom4te.cache -/config +config config.h config.h.in config.log config.status configure -configure~ debian stamp-h1 -# python virtual environments -venv - # generated protobuf files -*_pb2.py -*_pb2_grpc.py +daemon/core/grpc/core_pb2.py +daemon/core/grpc/core_pb2_grpc.py # python build directory dist @@ -43,7 +39,6 @@ coverage.xml # python files *.egg-info -*.pyc # ignore package files *.rpm @@ -59,9 +54,4 @@ coverage.xml # ignore built input files netns/setup.py daemon/setup.py - -# python -__pycache__ - -# ignore core player files -*.core +ns3/setup.py diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 425f2ae0..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,977 +0,0 @@ -## 2023-08-01 CORE 9.0.3 - -* Installation - * updated various dependencies -* Documentation - * improved GUI docs to include node interaction and note xhost usage - * \#780 - fixed gRPC examples - * \#787 - complete documentation revamp to leverage mkdocs material - * \#790 - fixed custom emane model example -* core-daemon - * update type hinting to avoid deprecated imports - * updated commands ran within docker based nodes to have proper environment variables - * fixed issue improperly setting session options over gRPC - * \#668 - add fedora sbin path to frr service - * \#774 - fixed pcap configservice - * \#805 - fixed radvd configservice template error -* core-gui - * update type hinting to avoid deprecated imports - * fixed issue allowing duplicate named hook scripts - * fixed issue joining sessions with RJ45 nodes -* utility scripts - * fixed issue in core-cleanup for removing devices - -## 2023-03-02 CORE 9.0.2 - -* Installation - * updated python dependencies, including invoke to resolve python 3.10+ issues - * improved example dockerfiles to use less space for built images -* Documentation - * updated emane install instructions - * added Docker related issues to install instructions -* core-daemon - * fixed issue using invalid device name in sysctl commands - * updated PTP nodes to properly disable mac learning for their linux bridge - * fixed issue for LXC nodes to properly use a configured image name and write it to XML - * \#742 - fixed issue with bad wlan node id being used - * \#744 - fixed issue not properly setting broadcast address -* core-gui - * fixed sample1.xml to remove SSH service - * fixed emane demo examples - * fixed issue displaying emane configs generally configured for a node - -## 2022-11-28 CORE 9.0.1 - -* Installation - * updated protobuf and grpcio-tools versions in pyproject.toml to account for bad version mix - -## 2022-11-18 CORE 9.0.0 - -* Breaking Changes - * removed session nodes file - * removed session state file - * emane now runs in one process per nem with unique control ports - * grpc client has been refactored and updated - * removed tcl/legacy gui, imn file support and the tlv api - * link configuration is now different, but consistent, for wired links -* Installation - * added packaging for single file distribution - * python3.9 is now the minimum required version - * updated Dockerfile examples - * updated various python dependencies - * virtual environment is now installed to /opt/core/venv -* Documentation - * updated emane invoke task examples - * revamped install documentation - * added wireless node notes -* core-gui - * updated config services to display rendered templated and allow editing - * fixed node icon issue when updating preferences - * \#89 - throughput widget now works for hubs/switches - * \#691 - fixed custom nodes to properly use config services -* gRPC API - * add linked call to support linking and unlinking interfaces without destroying them - * fixed issue during start session clearing out session options - * added call to get rendered config service files - * removed get_node_links from links from client - * nem id and nem port have been added to GetNode and AddLink calls -* core-daemon - * wired links always create two veth pairs joined by a bridge - * node interfaces are now configured within the container to apply to outgoing traffic - * session.add_node now uses NodeOptions, allowing for node specific options - * fixed issue with xml reading node canvas values - * removed Session.add_node_file - * fixed get requirements logic - * fixed docker/lxd node support terminal commands on remote servers - * improved docker node command execution time using nsenter - * new wireless node type added to support dynamic loss based on distance - * \#513 - add and deleting distributed links during runtime is now supported - * \#703 - fixed issue not starting emane event listening service - -## 2022-03-21 CORE 8.2.0 - -* core-gui - * improved failed starts to trigger runtime to allow node investigation -* core-daemon - * improved default service loading to use a full import path - * updated session instantiation to always set to a runtime state -* core-cli - * \#672 - fixed xml loading - * \#578 - restored json flag and added geo output to session overview -* Documentation - * updated emane example and documentation - * improved table markdown - -## 2022-02-18 CORE 8.1.0 - -* Installation - * updated dependency versions to account for known vulnerabilities -* GUI - * fixed issue drawing asymmetric link configurations when joining a session -* daemon - * fixed issue getting templates and creating files for config services - * added by directional support for network to network links - * \#647 - fixed issue when creating RJ45 nodes - * \#646 - fixed issue when creating files for Docker nodes - * \#645 - improved wlan change updates to account for all updates with no delay -* services - * fixed file generation for OSPFv2 config service - -## 2022-01-12 CORE 8.0.0 - -*Breaking Changes - * heavily refactored gRPC client, removing some calls, adding others, all using type hinted classes representing their protobuf counterparts - * emane adjustments to run each nem in its own process, includes adjustments to configuration, which may cause issues - * internal daemon cleanup and refactoring, in a script directly driving a scenario is used -* Installation - * added options to allow installation without ospf mdr - * removed tasks that are no longer needed - * updates to properly install/remove example files - * pipx/poetry/invoke versions are now locked to help avoid update related issues - * install.sh is now setup.sh and is a convenience to get tool setup to run invoke -* Documentation - * formally added notes for Docker and LXD based node types - * added config services - * Updated README to have quick notes for installation - * \#563 - update to note how to enable core service -* Examples - * \#598 - update to fix sample1.imn to working order -* core-daemon - * emane global configuration is now configurable per nem - * fixed wlan loss to support float values - * improved default service loading to use full core path - * improved emane model loading to occur one time - * fixed handling rj45 link edits from tlv api - * fixed wlan config getting a default value for the promiscuous setting when not provided - * ebtables usage has now been replaced with nftables - * \#564 - logging is now using module named loggers - * \#573 - emane processes are not created 1 to 1 with nems - * \#608 - update lxml version - * \#609 - update pyyaml version - * \#623 - fixed issue with ovs mode and mac learning -* core-gui - * config services are now the default service type - * legacy services are marked as deprecated - * fix to properly load session options - * logging is now using module named loggers - * save as will not update the current session file name as expected - * fix to properly clear out removed customized services - * adding directories to a service that do not exist, is now valid - * added flag to exit after creating gui directory from command line - * added new options to enable/disable ip4/ip6 assignment - * improved canvas draw order, when joining sessions - * improved node copy/paste to avoid issues when pasting text into service config dialogs - * each canvas will not correctly save and load their size from xml -* gRPC API - * session options are now returned for GetSession - * fixed issue not properly creating the session directory during start session definition state - * updates to separate editing a node and moving a node, new MoveNode call added, EditNode is now used for editing icons -* Services - * fixed default route config service - * config services now have options for shadowing directories, including per node customization - -## 2021-09-17 CORE 7.5.2 - -* Installation - * \#596 - fixes issue related to installing poetry by pinning version to 1.1.7 - * updates pipx installation to pinned version 0.16.4 -* core-daemon - * \#600 - fixes known vulnerability for pillow dependency by updating version - -## 2021-04-15 CORE 7.5.1 - -* core-pygui - * fixed issues creating and drawing custom nodes - -## 2021-03-11 CORE 7.5.0 - -* core-daemon - * fixed issue setting mobility loop value properly - * fixed issue that some states would not properly remove session directories - * \#560 - fixed issues with sdt integration for mobility movement and layer creation -* core-pygui - * added multiple canvas support - * added support to hide nodes and restore them visually - * update to assign full netmasks to wireless connected nodes by default - * update to display services and action controls for nodes during runtime - * fixed issues with custom nodes - * fixed issue auto assigning macs, avoiding duplication - * fixed issue joining session with different netmasks - * fixed issues when deleting a session from the sessions dialog - * \#550 - fixed issue not sending all service customization data -* core-cli - * added delete session command - -## 2021-01-11 CORE 7.4.0 - -* Installation - * fixed issue for automated install assuming ID_LIKE is always present in /etc/os-release -* gRPC API - * fixed issue stopping session and not properly going to data collect state - * fixed issue to have start session properly create a directory before configuration state -* core-pygui - * fixed issue handling deletion of wired link to a switch - * avoid saving edge metadata to xml when values are default - * fixed issue editing node mac addresses - * added support for configuring interface names - * fixed issue with potential node names to allow hyphens and remove under bars - * \#531 - fixed issue changing distributed nodes back to local -* core-daemon - * fixed issue to properly handle deleting links from a network to network node - * updated xml to support writing and reading link buffer configurations - * reverted change and removed mac learning from wlan, due to promiscuous like behavior - * fixed issue creating control interfaces when starting services - * fixed deadlock issue when clearing a session using sdt - * \#116 - fixed issue for wlans handling multiple mobility scripts at once - * \#539 - fixed issue in udp tlv api - -## 2020-12-02 CORE 7.3.0 - -* core-daemon - * fixed issue where emane global configuration was not being sent to core-gui - * updated controlnet names on host to be prefixed with ctrl - * fixed RJ45 link shutdown from core-gui causing an error - * fixed emane external transport xml generation - * \#517 - update to account for radvd required directory - * \#514 - support added for session specific environment files - * \#529 - updated to configure netem limit based on delay or user specified, requires kernel 3.3+ -* core-pygui - * fixed issue drawing wlan/emane link options when it should not have - * edge labels are now placed a set distance from nodes like original gui - * link color/width are now saved to xml files - * added support to configure buffer size for links - * \#525 - added support for multiple wired links between the same nodes - * \#526 - added option to hide/show links with 100% loss -* Documentation - * \#527 - typo in service documentation - * \#515 - added examples to docs for using EMANE features within a CORE context - -## 2020-09-29 CORE 7.2.1 - -* core-daemon - * fixed issue where shutting down sessions may not have removed session directories - * fixed issue with multiple emane interfaces on the same node not getting the right configuration -* Installation - * updated automated install to be a bit more robust for alternative distros - * added force install type to try and leverage a redhat/debian like install - * locked ospf mdr version installed to older commit to avoid issues with multiple interfaces on same node - -## 2020-09-15 CORE 7.2.0 - -* Installation - * locked down version of ospf-mdr installed in automated install - * locked down version of emane to v1.2.5 in automated emane install - * added option to install locally using the -l option -* core-daemon - * improve error when retrieving services that do not exist, or failed to load - * fixed issue with writing/reading emane node interface configurations to xml - * fixed issue with not setting the emane model when creating a node - * added common utility method for getting a emane node interface config id in core.utils - * fixed issue running emane on more than one interface for a node - * fixed issue validating paths when creating emane transport xml for a node - * fixed issue avoiding multiple calls to shutdown, if already in shutdown state -* core-pygui - * fixed issue configuring emane for a node interface -* gRPC API - * added wrapper client that can provide type hinting and a simpler interface at core.api.grpc.clientw - * fixed issue creating sessions that default to having a very large reference scale - * fixed issue with GetSession returning control net nodes - -## 2020-08-21 CORE 7.1.0 - -* Installation - * added core-python script that gets installed to help globally reference the virtual environment -* gRPC API - * GetSession will now return all configuration information for a session and the file it was opened from, if applicable - * node update events will now include icon information - * fixed issue with getting session throughputs for sessions with a high id -* core-daemon - * \#503 - EMANE networks will now work with mobility again - * \#506 - fixed service dependency resolution issue - * fixed issue sending hooks to core-gui when joining session -* core-pygui - * fixed issues editing hooks - * fixed issue with cpu usage when joining a session - * fixed mac field not being disabled during runtime when configuring a node - * removed unlimited button from link config dialog - * fixed issue with copy/paste links and their options - * fixed issue with adding nodes/links and editing links during runtime - * updated open file dialog in config dialogs to open to ~/.coregui home directory - * fixed issue double clicking sessions dialog in invalid areas - * added display of asymmetric link options on links - * fixed emane config dialog display - * fixed issue saving backgrounds in xml files - * added view toggle for wired/wireless links - * node events will now update icons - -## 2020-07-28 CORE 7.0.1 - -* Bugfixes - * \#500 - fixed issue running node commands with shell=True - * fixed issue for poetry based install not properly vetting requirements for dataclasses dependency - -## 2020-07-23 CORE 7.0.0 - -* Breaking Changes - * core.emudata and core.data combined and cleaned up into core.data - * updates to consistently use mac instead of hwaddr/mac - * \#468 - code related to adding/editing/deleting links cleaned up - * \#469 - usages of per all changed to loss to be consistent - * \#470 - variables with numbered names now use numbers directly - * \#471 - node startup is no longer embedded within its constructor - * \#472 - code updated to refer to interfaces consistently as iface - * \#475 - code updates changing how ip addresses are stored on interfaces - * \#476 - executables to check for moved into own module core.executables - * \#486 - core will now install into its own python virtual environment managed by poetry -* core-daemon - * updates to properly save/load distributed servers to xml - * \#474 - added type hinting to all service files - * \#478 - fixed typo in config service directory - * \#479 - opening an xml file will now cycle through states like a normal session - * \#480 - ovs configuration will now save/load from xml and display in guis - * \#484 - changes to support adding emane links during runtime -* core-pygui - * fixed issue not displaying services for the default group in service dialogs - * fixed issue starting a session when the daemon is not present - * fixed issue attempting to open terminals for invalid nodes - * fixed issue syncing session location - * fixed issue joining a session with mobility, not in runtime - * added cpu usage monitor to status bar - * emane configurations can now be seen during runtime - * rj45 nodes can only have one link - * disabling throughputs will clear labels - * improvements to custom service copy - * link options will now be drawn on as a label - * updates to handle runtime link events - * \#477 - added optional details pane for a quick view of node/link details - * \#485 - pygui fixed observer widget for invalid nodes - * \#496 - improved alert handling -* core-gui - * \#493 - increased frame size to show all emane configuration options -* gRPC API - * added set session user rpc - * added cpu usage stream - * interface objects returned from get_node will now provide node_id, net_id, and net2_id data - * peer to peer nodes will not be included in get_session calls - * pathloss events will now throw an error when nem id not found - * \#481 - link rpc calls will broadcast out - * \#496 - added alert rpc call -* Services - * fixed issue reading files in security services - * \#494 - add staticd to daemons list for frr services - -## 2020-06-11 CORE 6.5.0 -* Breaking Changes - * CoreNode.newnetif - both parameters are required and now takes an InterfaceData object as its second parameter - * CoreNetworkBase.linkconfig - now takes a LinkOptions parameter instead of a subset of some of the options (ie bandwidth, delay, etc) - * \#453 - Session.add_node and Session.get_node now requires the node class you expect to create/retrieve - * \#458 - rj45 cleanup to only inherit from one class -* Enhancements - * fixed issues with handling bad commands for TLV execute messages - * removed unused boot.sh from CoreNode types - * added linkconfig to CoreNetworkBase and cleaned up function signature - * emane position hook now saves geo position to node - * emane pathloss support - * core.emulator.emudata leveraged dataclass and type hinting - * \#459 - updated transport type usage to an enum - * \#460 - updated network policy type usage to an enum -* Python GUI Enhancements - * fixed throughput events do not work for joined sessions - * fixed exiting app with a toolbar picker showing - * fixed issue with creating interfaces and reusing subnets after deletion - * fixed issue with moving text shapes - * fixed scaling with custom node selected - * fixed toolbar state switching issues - * enable/disable toolbar when running stop/start - * marker config integrated into toolbar - * improved color picker layout - * shapes can now be moved while drawing shapes - * added observers to toolbar in run mode -* gRPC API - * node events will now have geo positional data - * node geo data is now returned in get_session and get_node calls - * \#451 - added wlan link api to allow direct linking/unlinking of wireless links between nodes - * \#462 - added streaming call for sending node position/geo changes - * \#463 - added streaming call for emane pathloss events -* Bugfixes - * \#454 - fixed issue creating docker nodes, but containers are now required to have networking tools - * \#466 - fixed issue in python gui when xml file is loading nodes with no ip4 addresses - -## 2020-05-11 CORE 6.4.0 -* Enhancements - * updates to core-route-monitor, allow specific session, configurable settings, and properly - listen on all interfaces - * install.sh now has a "-r" option to help with reinstalling from current branch and installing - current python dependencies - * \#202 - enable OSPFv2 fast convergence - * \#178 - added comments to OVS service -* Python GUI Enhancements - * added initial documentation to help support usage - * supports drawing multiple links for wireless connections - * supports differentiating wireless networks with different colored links - * implemented unlink in node context menu to delete links to other nodes - * implemented node run tool dialog - * implemented find node dialog - * implemented address configuration dialog - * implemented mac configuration dialog - * updated link address creation to more closely mimic prior behavior - * updated configuration to use yaml class based configs - * implemented auto grid layout for nodes - * fixed drawn wlan ranges during configuration -* Bugfixes - * no longer writes link option data for WLAN/EMANE links in XML - * avoid configuring links for WLAN/EMANE link options in XML, due to them being written to XML prior - * updates to allow building python docs again - * \#431 - peer to peer node uplink link data was not using an enum properly due to code changes - * \#432 - loading XML was not setting EMANE nodes model - * \#435 - loading XML was not maintaining existing session options - * \#448 - fixed issue sorting hooks being saved to XML - -## 2020-04-13 CORE 6.3.0 -* Features - * \#424 - added FRR IS-IS service -* Enhancements - * \#414 - update GUI OSPFv2 adjacency widget to work with FRR - * \#416 - EMANE links can now be drawn for 80211 and RF Pipe models - * \#418 #409 - code cleanup - * \#425 - added route monitor script for SDT3D integration - * a formal error will now be thrown when EMANE binding are not installed, but attempted to be used - * node positions will now default to 0,0 to avoid GUI errors, when one is not provided - * improved SDT3D integration, multiple link support and usage of custom layers -* Python GUI Enhancements - * enabled edit menu delete - * cleaned up node context menu and enabled delete -* Bugfixes - * \#427 - fixed issue in default route service - * \#426 - fixed issue reading ipsec template file - * \#420 - fixed issue with TLV API udp handler - * \#411 - allow wlan to be configured with 0 values - * \#415 - general EMANE configuration was not being saved/loaded from XML - -## 2020-03-16 CORE 6.2.0 -* gRPC API - * Added call to execute python script -* Enhancements - * \#371 - improved coretk gui scaling - * \#374 - display range visually for wlan in coretk gui, when configuring - * \#377 - improved coretk error dialogs - * \#379 - fixed issues with core converting between x,y and lon,lat for values that would cross utm zones - * \#384 - sdt integration moved internally to core code allowing it to work for coretk gui as well - * \#387 - coretk gui will now auto detect potential valid terminal and command to use for interacting with nodes during runtime - * \#389 - coretk gui will now attempt to reconnect to daemon without need to restart - * \#395 - coretk gui now has "save" and "save as" menu options - * \#402 - coretk will now allow terminal preference to be directly edited -* Bugfixes - * \#375 - fixed issues with emane event monitor handling data - * \#381 - executing a python script will now wait until completion before looking to join a new session - * \#391 - fixed configuring node ip addresses in coretk gui - * \#392 - fixed coretk link display when addresses are cleared out - * \#393 - coretk gui will properly clear marker annotations when switching sessions - * \#396 - Docker and LXC nodes will now properly save to XML - * \#406- WLAN bridge initialization was not ran when all nodes are disconnected - -## 2020-02-20 CORE 6.1.0 -* New - * config services - these services leverage a proper template engine and have configurable parameters, given enough time may replace existing services - * core-imn-to-xml - IMN to XML utility script - * replaced internal code for determining ip/mac address with netaddr library -* Enhancements - * added distributed package for built packages - * made use of python type hinting for functions and their return values - * updated Quagga zebra service to remove deprecated warning -* Removed - * removed stale ns3 code -* CORETK GUI - * added logging - * improved error dialog - * properly use global ipv6 addresses for nodes - * disable proxy usage by default, flag available to enable -* gRPC API - * add_link - now returns created interface information - * set_node_service - can now set files and directories to properly replicate previous usage - * get_emane_event_channel - return information related to the currently used emane event channel -* Bugfixes - * fixed session SDT functionality back to working order, due to python3 changes - * avoid shutting down services for nodes that are not up - * EMANE bypass model options will now display properly in GUIs - * XML scenarios will now properly read in custom node icons - * \#372 - fixed mobility waypoint comparisons - * \#370 - fixed radvd service - * \#368 - updated frr services to properly start staticd when needed - * \#358 - fixed systemd service install path - * \#350 - fixed frr babel wireless configuration - * \#354 - updated frr to reset interfaces to properly take configurations - -## 2020-01-01 CORE 6.0.0 -* New - * beta release of the python based tk GUI, use **coretk-gui** to try it out, plan will be to eventually sunset the old GUI once this is good enough - * this GUI will allow us to provide enhancements and a consistent python dev environment for developers -* Major Changes - * python3.6+ support only, due to python2 EOL https://pyfound.blogspot.com/2019/12/python-2-sunset.html - * distributed sessions now leverages the fabric library for sending remote SSH commands -* Enhancements - * changed usage of bridge-utils to using ip based bridge commands due to deprecation - * installation.sh script to help automate a standard make install or dev install - * when sessions are created without an id they will now always start from 1 and return the next unused id - * gRPC is now running by default -* Session API - * removed **create_emane_network** and **create_wlan_network** to help force using **add_node** for all cases - * removed **session.master** as it was only used for previous distributed sessions - * updated **add_node** to allow providing a custom class for node creation -* gRPC API - * added get all services configurations - * added get all wlan configurations - * added start/stop session calls, provides more freedom for startup and shutdown logic - * session events now have a session id to help differentiate which session they are coming from - * throughput events now require a session id and responses include session id for differentiating data - * session events can now be subscribed to with a subset of events or all - * emane model config data now include interface ids properly - * sessions returned from get sessions call may include file names when created from xml - * when opening an xml the session can now be started or not - * edit node will now broadcast the edit for others to listen to - * all config responses will now be in the form of a mapped value of key to ConfigOption, or a list of these when retrieving all, sometimes the config response may be wrapped in a different message to include other metadata -* Bugfixes - * \#311 - initialize ebtables chains for wlan networks only - * \#312 - removed sudo from init script - * \#313 - check if interface exists before flushing, previously would log an exception that didn't matter - * \#314 - node locations stored as floats instead of ints to avoid mobility calculations due to loss of precision - * \#321 - python installation path will be based on distr ibution/python building it - * emane options xml parsing didn't properly take into account the **emane_prefix** configuration - * updates services that checked for ipv4/ipv6 addresses to not fail for valid ipv6 addresses with a decimal -* Documentation - * updated NRL links to new GitHub locations - * updates for distributed session - * updates to dev guide - * updates to examples LXD/Docker setup - * updates to FRR service documentation - * gRPC get node service file will not throw an exception when node doesn't exist - -## 2019-10-12 CORE 5.5.2 -* gRPC - * Added emane_link API for linking/unlinking EMANE nodes within the GUI -* Bugfixes - * Fixed python3 issues when configuring WLAN nodes - * Fixed issue due to refactoring when running distributed - * Fixed issue when running python script from GUI - -## 2019-10-09 CORE 5.5.1 -* Bugfix - * Fixed issue with 5.5.0 refactoring causing issues in python2. - * Fixed python3 issues with NRL services - -## 2019-10-03 CORE 5.5.0 -* Documentation - * updated dependencies for building OSPF MDR on installation page - * added python/pip instruction on installation page - * added ethtool dependency for CORE -* GUI - * removed experimental OVS node to avoid confusion and issues related to using it -* Daemon - * fixed core-daemon --ovs flag back to working order for running CORE using OVS bridges instead of Linux bridges - * updated requirements.txt to refer to configparser 4.0.2, due to 4.0.1 removal by developers - * update to fail fast for dependent executables that are not found within PATH - * update to not load services that fail during service.on_load and move on -* Build - * fixed issue with configure script when using option flags - * python install path will use the native install path for AM_PATH_PYTHON, instead of coercing to python3 -* Issues - * \#271 - OVS node error in GUI - * \#291 - configparser 4.0.1 issue - * \#290 - python3 path issue when building - -## 2019-09-23 CORE 5.4.0 -* Documentation - * Updates to documentation dev guide -* Improvements - * Added support for Pipenv for development - * Added configuration to leverage pre-commit during development - * Added configuration to leverage isort, black, and flake8 during development - * Added Github Actions to help verify pull requests in the same way as pre-commit -* Issues - * \#279 - WLAN configuration does not get set by default - * \#272 - error installing python package futures==3.2.0 -* Pull Requests - * \#275 - Disable MAC learning on WLAN - * \#281 - Bumped jackson version on corefx - -## 2019-07-05 CORE 5.3.1 -* Documentation - * Updates to provide more information regarding several of the included services -* Issues - * \#252 - fixed changing wlan configurations during runtime - * \#256 - fixed mobility waypoint comparison for python3 - * \#174 - turn tx/rx checksums off by default as they will never be valid for virtual interfaces - * \#259 - fixes for distributed EMANE - * \#260 - fixed issue with how execfile was being used due to it not existing within python3 - -## 2019-06-10 CORE 5.3.0 -* Enhancements - * python 2 / 3 support - * added new API using [gRPC](https://grpc.io/) - * --grpc --grpc-port --grpc-address flags added to core-daemon - * core.api.grpc.client.CoreGrpcClient, provides a convenience wrapper for leveraging the API -* Docs - * Updates to installation instructions for latest changes -* Services - * Added FRR service -* EMANE - * Added EMANE prefix configuration when looking for emane model manifest files - * requires configuring **emane_prefix** in /etc/core/core.conf -* Cleanup - * Refactoring of the core python package structure, trying to help provide better organization and - logical groupings -* Issues - * \#246 - Fixed network to network link handling when reading xml files - * \#236 - Fixed storing/reading of link configuration values within xml files - * \#170 - FRR Service - * \#155 - EMANE path configuration - * \#233 - Python 3 support - * \#245 - Fixed bidirectional link configurations when reading from xml files - * \#208 - gRPC API - * Fixed link configuration dup handling when loaded from xml files - -## 2019-06-07 CORE 5.2.2 -* Enhancements: - * adds back in core-daemon udp support for coresendmsg, people may have depended on previously for certain scenarios -* Bug Fixes: - * fixes issue in GUI that would prevent moving nodes during mobility scenarios - -## 2019-03-25 CORE 5.2.1 -* Packaging: - * documentation no longer builds by default, must use configure flag - * added configure flag to allow only building vcmd - * sphinx will no long be required when not building documentation -* Services: - * Added source NAT service - * Fixed DHCP service for Ubuntu 18.04 -* BUGFIXES: - * \#188 - properly remove session on delete TLV API call - * \#192 - updated default gnome terminal command for nodes to be Ubuntu 18.04 compatible - * \#193 - updates to service validation, will retry on failure and better exception logging - * \#195 - TLV link message data fix - * \#196 - fix to avoid clearing out default services - * \#197 - removed wireless_link_all API from EmuSession - * \#216 - updated default WLAN bandwidth to 54Mbps - * \#223 - fix to saving RJ45 to session XML files - -## 2018-05-22 CORE 5.1 -* DAEMON: - * removed and cleared out code that is either legacy or no longer supported (Xen, BSD, Kernel patching, RPM/DEB - specific files) - * default nodes are now set in the node map - * moved ns3 and netns directories to the top of the repo - * changes to make use of fpm as the tool for building packages - * removed usage of logzero to avoid dependency issues for built packages - * removed daemon addons directory - * added CoreEmu to core.emulator.coreemu to help begin serving as the basis for a more formal API for scripting - and creating new external APIs out of - * cleaned up logging, moved more logging to DEBUG from INFO, tried to mold INFO message to be more simple and - informative - * EMANE 1.0.1-1.21 supported - * updates to leverage EMANE python bindings for dynamically parsing phy/mac manifest files - * example custom EMANE model lives under /usr/share/core/examples/myemane/examplemodel.py - * EMANE TDMA model now supports an option to start a TDMA schedule when running - * fixed issues with coresendmsg script due to code refactoring - * added make target for generating documentation "make doc" - * Python 2.7+ is now required - * ns3 is no longer bundled by default, but will be produced as a separate package for installation -* GUI: - * updated broken help links in GUI Help->About -* Packaging: - * fixed PYTHON_PATH to PYTHONPATH in sysv script - * added make command to leverage FPM as the tool for creating deb/rpm packages going forward, there is documentation - within README.md to try it out -* TEST: - * fixed some broken tests - * new test cases based on CoreEmu usage -* BUGFIXES: - * \#142 - duplication of custom services - * \#136 - sphinx-apidoc command not found - * \#137 - make command fails when using distclean - -## 2017-09-01 CORE 5.0 -* DEVELOPMENT: - * support for editorconfig to help standardize development across IDEs, from the defined configuration file - * support for sonarqube analysis, from the defined configuration file -* DAEMON: - * code cleanup and improvements to adhere to coding standards (SonarQube) - * leverage "logzero" module to make easy usage of the standard logging module - * improvements to documentation across the code base - * initial work to separate the dependence on TCP API messaging from the core library (easier core scripting) - * beta support for running core in Open vSwitch mode, leveraging Open vSwitch bridges, instead of Linux bridges -* SERVICES: - * added Ryu SDN controller service - * added Open vSwitch service -* TEST: - * added unit/integration tests to support validating changes going forward -* BUGFIXES: - * merged pull requests for: #115, #110, #109, #107, #106, #105, #103, #102, #101, #96 - -## 2015-06-05 CORE 4.8 -* EMANE: - * support for EMANE 0.9.2 - * run emane in each container when using EMANE 0.9.2 - * support using separate control networks for EMANE OTA and event traffic -* GUI: - * fixed an issue where the adjacency widget lines pointed to old node positions - * fixed an issue where not all EMANE 0.9.x IEEE 802.11 MAC parameter were configurable - * fixed an issue related to running python scripts from the GUI when using tcl/tk version 8.6 - * improved batch mode execution to display the check emulation light status - * improved managing multiple sessions - * improved support for using multiple canvases - * added a reload option to the file menu to revert back to a saved scenario -* DAEMON: - * support exporting scenarios in NRL Network Modeling Framework 1.0 XML format - * support importing scenarios in NRL Network Modeling Framework 1.0 XML format - * support exporting the deployed scenario state in NRL NMF XML 1.0 format - * improved EMANE post-startup processing to better synchronize distributed emulations - * improved how addresses are assigned to tun/tap devices - * added support for python state-change callbacks -* SERVICES: - * added mgen sink and mgen actor services - * added oslrv2 and olsr.org services - * added a docker service -* BUILD: - * improved the install/uninstall process - * improved debian and rpm packaging -* BUGFIXES: - * updated the http service for ubuntu 14.04 - * improved included examples - * shortened the length of network interface names - * improved how the core system service manages running the core daemon - * fixed an issues related to applying session configuration setting - * improved detecting when a distributed emulation is already running - * improved documentation - -## 2014-08-06 CORE 4.7 -* EMANE: - * support for EMANE 0.9.1 - * fix error when using Comm Effect model with loss/duplicate string values - * enable flow control in virtual transport if enabled in the MAC model - * fix bug #150 where EMANE event service/address port were not used -* GUI: - * support Tcl/Tk 8.6 when available - * added --(a)ddress and --(p)ort arguments to core-gui command-line - * added File > Execute XML or Python script... option - * added File > Execute Python script with options... menu item - * when executing Python script from GUI, run in background thread, wait for - RUNTIME state - * enter RUNTIME state when start button pressed with empty canvas - * added support for asymmetric link effects - * support link delays up to 274 seconds (netem maximum) - * allow runtime changes of WLAN link effects -* DAEMON: - * set NODE_NAME, NODE_NUMBER, SESSION_SHORT in default vnoded environment - * changed host device naming to use veth, tap prefixes; b.n.SS for bridges - * allow parsing XML files into live running session - * enable link effects between hub/switch and hub/switch connections - * update MDR service to use broadcast interfaces for non-WLAN links - * allow node class to be specified when initializing XML parser - * save and parse canvas origin (reference point) and scale in MP XML - * up/down control script session option - * fix hash calculation used to determine GRE tunnel keys - * use shell script to detach SMF on startup - * added NRL services for mgen sink and nrlolsrv2 - * use SDT URL session option - * added core-manage tool for addons to add/remove/check services, models, - and custom node types -* API: - * implement local flag in Execute Message for running host commands - * jitter changed to 64-bit value to align with delay in Link Message - * added unidirectional link flag TLV to Link Message - * added reconfigure event type for re-generating service config files - * return errors in API with failed services -* BUGFIXES: - * fix HTTP service running under Ubuntu - * fixed the following bugs: #150, 169, 188, 220, 225, 230, 231, 242, 244, - 247, 248, 250, 251 - -## 2013-09-25 CORE 4.6 -* NOTE: cored is now core-daemon, and core is now core-gui (for Debian acceptance) -* NOTE: /etc/init.d/core is now /etc/init.d/core-daemon (for insserv compatibility) -* EMANE: - * don't start EMANE locally if no local NEMs - * EMANE poststartup() to re-transmit location events during initialization - * added debug port to EMANE options - * added a basic EMANE 802.11 CORE Python script example - * expose transport XML block generation to EmaneModels - * expose NEM entry to the EmaneModel so it can be overridden by a model - * add the control interface bridge prior to starting EMANE, as some models may - * depend on the controlnet functionality - * added EMANE model to CORE converter - * parse lat/long/alt from node messages, for moving nodes using command-line - * fix bug #196 incorrect distance when traversing UTM zones -* GUI: - * added Cut, Copy, and Paste options to the Edit menu - * paste will copy selected services and take care of node and interface - * renumbering - * implement Edit > Find dialog for searching nodes and links - * when copying existing file for a service, perform string replacement of: - * "~", "%SESSION%", "%SESSION_DIR%", "%SESSION_USER%", "%NODE%", "%NODENAME%" - * use CORE_DATA_DIR insteadof LIBDIR - * fix Adjacency Widget to work with OSPFv2 only networks -* BUILD: - * build/packaging improvements for inclusion on Debian - * fix error when running scenario with a mobility script in batch mode - * include Linux kernel patches for 3.8 - * renamed core-cleanup.sh to core-cleanup for Debian conformance - * don't always generate man pages from Makefile; new manpages for - coresendmsg and core-daemon -* BUGFIXES: - * don't auto-assign IPv4/IPv6 addresses when none received in Link Messages (session reconnect) - * fixed lock view - * fix GUI spinbox errors for Tk 8.5.8 (RHEL/CentOS 6.2) - * fix broker node count for distributed session entering the RUNTIME state when - * (non-EMANE) WLANs or GreTapBridges are involved; - * fix "file exists" error message when distributed session number is re-used - * and servers file is written - * fix bug #194 configuration dialog too long, make dialog scrollable/resizable - * allow float values for loss and duplicates percent - * fix the following bugs: 166, 172, 177, 178, 192, 194, 196, 201, 202, - 205, 206, 210, 212, 213, 214, 221 - -## 2013-04-13 CORE 4.5 -* GUI: - * improved behavior when starting GUI without daemon, or using File New after connection with daemon is lost - * fix various GUI issues when reconnecting to a session - * support 3D GUI via output to SDT3D - * added "Execute Python script..." entry to the File Menu - * support user-defined terminal program instead of hard-coded xterm - * added session options for "enable RJ45s", "preserve session dir" - * added buttons to the IP Addresses dialog for removing all/selected IPv4/IPv6 - * allow sessions with multiple canvases to enter RUNTIME state - * added "--addons" startup mode to pass control to code included from addons dir - * added "Locked" entry to View menu to prevent moving items - * use currently selected node type when invoking a topology generator - * updated throughput plots with resizing, color picker, plot labels, locked scales, and save/load plot - configuration with imn file - * improved session dialog -* EMANE: - * EMANE 0.8.1 support with backwards-compatibility for 0.7.4 - * extend CommEffect model to generate CommEffect events upon receipt of Link Messages having link effects -* Services: - * updated FTP service with root directory for anonymous users - * added HTTP, PCAP, BIRD, RADVD, and Babel services - * support copying existing files instead of always generating them - * added "Services..." entry to node right-click menu - * added "View" button for side-by-side comparison when copying customized config files - * updated Quagga daemons to wait for zebra.vty VTY file before starting -* General: - * XML import and export - * renamed "cored.py" to "cored", "coresendmsg.py" to "coresendmsg" - * code reorganization and clean-up - * updated XML export to write NetworkPlan, MotionPlan, and ServicePlan within a Scenario tag, added new - "Save As XML..." File menu entry - * added script_start/pause/stop options to Ns2ScriptedMobility - * "python" source sub-directory renamed to "daemon" - * added "cored -e" option to execute a Python script, adding its session to the active sessions list, allowing for - GUI connection - * support comma-separated list for custom_services_dir in core.conf file - * updated kernel patches for Linux kernel 3.5 - * support RFC 6164-style IPv6 /127 addressing -* ns-3: - * integrate ns-3 node location between CORE and ns-3 simulation - * added ns-3 random walk mobility example - * updated ns-3 Wifi example to allow GUI connection and moving of nodes -* fixed the following bugs: 54, 103, 111, 136, 145, 153, 157, 160, 161, 162, 164, 165, 168, 170, 171, 173, 174, 176, -184, 190, 193 - -## 2012-09-25 CORE 4.4 -* GUI: - * real-time bandwidth plotting tool - * added Wireshark and tshark right-click menu items - * X,Y coordinates shown in the status bar - * updated GUI attribute option to link messages for changing color/width/dash - * added sample IPsec and VPN scenarios, how many nodes script - * added jitter parameter to WLANs - * renamed Experiment menu to Session menu, added session options - * use 'key=value' configuration for services, EMANE models, WLAN models, etc. - * save only service values that have been customized - * copy service parameters from one customized service to another - * right-click menu to start/stop/restart each service -* EMANE: - * EMANE 0.7.4 support - * added support for EMANE CommEffect model and Comm Effect controller GUI - * added support for EMANE Raw Transport when using RJ45 devices -* Services: - * improved service customization; allow a service to define custom Tcl tab - * added vtysh.conf for Quagga service to support 'write mem' - * support scheduled events and services that start N seconds after runtime - * added UCARP service -* Documentation: - * converted the CORE manual to reStructuredText using Sphinx; added Python docs -* General: - * Python code reorganization - * improved cored.py thread locking - * merged xen branch into trunk - * added an event queue to a session with notion of time zero - * added UDP support to cored.py - * use UDP by default in coresendmsg.py; added '-H' option to print examples - * enter a bash shell by default when running vcmd with no arguments - * fixes to distributed emulation entering runtime state - * write 'nodes' file upon session startup - * make session number and other attributes available in environment - * support /etc/core/environment and ~/.core/environment files - * added Ns2ScriptedMobility model to Python, removed from the GUI - * namespace nodes mount a private /sys - * fixed the following bugs: 80, 81, 84, 99, 104, 109, 110, 122, 124, 131, 133, 134, 135, 137, 140, 143, 144, 146, - 147, 151, 154, 155 - -## 2012-03-07 CORE 4.3 -* EMANE 0.7.2 and 0.7.3 support -* hook scripts: customize actions at any of six different session states -* Check Emulation Light (CEL) exception feedback system -* added FTP and XORP services, and service validate commands -* services can flag when customization is required -* Python classes to support ns-3 simulation experiments -* write state, node X,Y position, and servers to pycore session dir -* removed over 9,000 lines of unused GUI code -* performance monitoring script -* batch mode improvements and --closebatch option -* export session to EmulationScript XML files -* basic range model moved from GUI to Python, supports 3D coordinates -* improved WLAN dialog with tabs -* added PhysicalNode class for joining real nodes with emulated networks -* fixed the following bugs: 50, 75, 76, 79, 82, 83, 85, 86, 89, 90, 92, 94, 96, 98, 100, 112, 113, 116, 119, 120 - -## 2011-08-19 CORE 4.2 -* EMANE 0.7.1 support - * support for Bypass model, Universal PHY, logging, realtime -* configurable MAC addresses -* control interfaces (backchannel between node and host) -* service customization dialog improved (tabbed) -* new testing scripts for MDR and EMANE performance testing -* improved upgrading of old imn files -* new coresendmsg.py utility (deprecates libcoreapi and coreapisend) -* new security services, custom service becomes UserDefined -* new services and Python scripting chapters in manual -* fixes to distributed emulation, linking tunnels/RJ45s with WLANs/hubs/switches -* fixed the following bugs: 18, 32, 34, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 52, 53, 55, 57, 58, 60, 62, 64, -65, 66, 68, 71, 72, 74 - -## 2011-01-05 CORE 4.1 -* new icons for toolbars and nodes -* node services introduced, node models deprecated -* customizable node types -* traffic flow editor with MGEN support -* user configs moved from /etc/core/`*` to ~/.core/ -* allocate addresses from custom IPv4/IPv6 prefixes -* distributed emulation using GRE tunnels -* FreeBSD 8.1 now uses cored.py -* EMANE 0.6.4 support -* numerous bugfixes - -## 2010-08-17 CORE 4.0 -* Python framework with Linux network namespace (netns) support (Linux netns is now the primary supported platform) -* ability to close the GUI and later reconnect to a running session (netns only) -* EMANE integration (netns only) -* new topology generators, host file generator -* user-editable Observer Widgets -* use of /etc/core instead of /usr/local/etc/core -* various bugfixes - -## 2009-09-15 CORE 3.5 - -## 2009-06-23 CORE 3.4 - -## 2009-03-11 CORE 3.3 diff --git a/Changelog b/Changelog new file mode 100644 index 00000000..49d2d0d0 --- /dev/null +++ b/Changelog @@ -0,0 +1,342 @@ +2019-03-25 CORE 5.2.1 + * Packaging: + - documentation no longer builds by default, must use configure flag + - added configure flag to allow only building vcmd + - sphinx will no long be required when not building documentation + * Services: + - Added source NAT service + - Fixed DHCP service for Ubuntu 18.04 + * BUGFIXES: + - #188 - properly remove session on delete TLV API call + - #192 - updated default gnome terminal command for nodes to be Ubuntu 18.04 compatible + - #193 - updates to service validation, will retry on failure and better exception logging + - #195 - TLV link message data fix + - #196 - fix to avoid clearing out default services + - #197 - removed wireless_link_all API from EmuSession + - #216 - updated default WLAN bandwidth to 54Mbps + - #223 - fix to saving RJ45 to session XML files + +2018-05-22 CORE 5.1 + * DAEMON: + - removed and cleared out code that is either legacy or no longer supported (Xen, BSD, Kernel patching, RPM/DEB specific files) + - default nodes are now set in the node map + - moved ns3 and netns directories to the top of the repo + - changes to make use of fpm as the tool for building packages + - removed usage of logzero to avoid dependency issues for built packages + - removed daemon addons directory + - added CoreEmu to core.emulator.coreemu to help begin serving as the basis for a more formal API for scripting and creating new external APIs out of + - cleaned up logging, moved more logging to DEBUG from INFO, tried to mold INFO message to be more simple and informative + - EMANE 1.0.1-1.21 supported + - updates to leverage EMANE python bindings for dynamically parsing phy/mac manifest files + - example custom EMANE model lives under /usr/share/core/examples/myemane/examplemodel.py + - EMANE TDMA model now supports an option to start a TDMA schedule when running + - fixed issues with coresendmsg script due to code refactoring + - added make target for generating documentation "make doc" + - Python 2.7+ is now required + - ns3 is no longer bundled by default, but will be produced as a separate package for installation + * GUI: + - updated broken help links in GUI Help->About + * Packaging: + - fixed PYTHON_PATH to PYTHONPATH in sysv script + - added make command to leverage FPM as the tool for creating deb/rpm packages going forward, there is documentation within README.md to try it out + * TEST: + - fixed some broken tests + - new test cases based on CoreEmu usage + * BUGFIXES: + - #142 - duplication of custom services + - #136 - sphinx-apidoc command not found + - #137 - make command fails when using distclean + +2017-09-01 CORE 5.0 + * DEVELOPMENT: + - support for editorconfig to help standardize development across IDEs, from the defined configuration file + - support for sonarqube analysis, from the defined configuration file + * DAEMON: + - code cleanup and improvements to adhere to coding standards (SonarQube) + - leverage "logzero" module to make easy usage of the standard logging module + - improvements to documentation across the code base + - initial work to separate the dependence on TCP API messaging from the core library (easier core scripting) + - beta support for running core in Open vSwitch mode, leveraging Open vSwitch bridges, instead of Linux bridges + * SERVICES: + - added Ryu SDN controller service + - added Open vSwitch service + * TEST: + - added unit/integration tests to support validating changes going forward + * BUGFIXES: + - merged pull requests for: #115, #110, #109, #107, #106, #105, #103, #102, #101, #96 + +2015-06-05 CORE 4.8 + * EMANE: + - support for EMANE 0.9.2 + - run emane in each container when using EMANE 0.9.2 + - support using separate control networks for EMANE OTA and event traffic + * GUI: + - fixed an issue where the adjacency widget lines pointed to old node positions + - fixed an issue where not all EMANE 0.9.x IEEE 802.11 MAC parameter were configurable + - fixed an issue related to running python scripts from the GUI when using tcl/tk version 8.6 + - improved batch mode execution to display the check emulation light status + - improved managing multiple sessions + - improved support for using multiple canvases + - added a reload option to the file menu to revert back to a saved scenario + * DAEMON: + - support exporting scenarios in NRL Network Modeling Framework 1.0 XML format + - support importing scenarios in NRL Network Modeling Framework 1.0 XML format + - support exporting the deployed scenario state in NRL NMF XML 1.0 format + - improved EMANE post-startup processing to better synchronize distributed emulations + - improved how addresses are assigned to tun/tap devices + - added support for python state-change callbacks + * SERVICES: + - added mgen sink and mgen actor services + - added oslrv2 and olsr.org services + - added a docker service + * BUILD: + - improved the install/uninstall process + - improved debian and rpm packaging + * BUGFIXES: + - updated the http service for ubuntu 14.04 + - improved included examples + - shortened the length of network interface names + - improved how the core system service manages running the core daemon + - fixed an issues related to applying session configuration setting + - improved detecting when a distributed emulation is already running + - improved documentation + +2014-08-06 CORE 4.7 + + * EMANE: + - support for EMANE 0.9.1 + - fix error when using Comm Effect model with loss/duplicate string values + - enable flow control in virtual transport if enabled in the MAC model + - fix bug #150 where EMANE event service/address port were not used + * GUI: + - support Tcl/Tk 8.6 when available + - added --(a)ddress and --(p)ort arguments to core-gui command-line + - added File > Execute XML or Python script... option + - added File > Execute Python script with options... menu item + - when executing Python script from GUI, run in background thread, wait for + RUNTIME state + - enter RUNTIME state when start button pressed with empty canvas + - added support for asymmetric link effects + - support link delays up to 274 seconds (netem maximum) + - allow runtime changes of WLAN link effects + * DAEMON: + - set NODE_NAME, NODE_NUMBER, SESSION_SHORT in default vnoded environment + - changed host device naming to use veth, tap prefixes; b.n.SS for bridges + - allow parsing XML files into live running session + - enable link effects between hub/switch and hub/switch connections + - update MDR service to use broadcast interfaces for non-WLAN links + - allow node class to be specified when initializing XML parser + - save and parse canvas origin (reference point) and scale in MP XML + - up/down control script session option + - fix hash calculation used to determine GRE tunnel keys + - use shell script to detach SMF on startup + - added NRL services for mgen sink and nrlolsrv2 + - use SDT URL session option + - added core-manage tool for addons to add/remove/check services, models, + and custom node types + * API: + - implement local flag in Execute Message for running host commands + - jitter changed to 64-bit value to align with delay in Link Message + - added unidirectional link flag TLV to Link Message + - added reconfigure event type for re-generating service config files + - return errors in API with failed services + * BUGFIXES: + - fix HTTP service running under Ubuntu + - fixed the following bugs: #150, 169, 188, 220, 225, 230, 231, 242, 244, + 247, 248, 250, 251 + +2013-09-25 CORE 4.6 + + * NOTE: cored is now core-daemon, and core is now core-gui (for Debian + acceptance) + * NOTE: /etc/init.d/core is now /etc/init.d/core-daemon (for insserv + compatibility) + * EMANE: + - don't start EMANE locally if no local NEMs + - EMANE poststartup() to re-transmit location events during initialization + - added debug port to EMANE options + - added a basic EMANE 802.11 CORE Python script example + - expose transport XML block generation to EmaneModels + - expose NEM entry to the EmaneModel so it can be overridden by a model + - add the control interface bridge prior to starting EMANE, as some models may + - depend on the controlnet functionality + - added EMANE model to CORE converter + - parse lat/long/alt from node messages, for moving nodes using command-line + - fix bug #196 incorrect distance when traversing UTM zones + + * GUI: + - added Cut, Copy, and Paste options to the Edit menu + - paste will copy selected services and take care of node and interface + - renumbering + - implement Edit > Find dialog for searching nodes and links + - when copying existing file for a service, perform string replacement of: + - "~", "%SESSION%", "%SESSION_DIR%", "%SESSION_USER%", "%NODE%", "%NODENAME%" + - use CORE_DATA_DIR insteadof LIBDIR + - fix Adjacency Widget to work with OSPFv2 only networks + + * BUILD: + - build/packaging improvements for inclusion on Debian + - fix error when running scenario with a mobility script in batch mode + - include Linux kernel patches for 3.8 + - renamed core-cleanup.sh to core-cleanup for Debian conformance + - don't always generate man pages from Makefile; new manpages for + coresendmsg and core-daemon + + * BUGFIXES: + - don't auto-assign IPv4/IPv6 addresses when none received in Link Messages (session reconnect) + - fixed lock view + - fix GUI spinbox errors for Tk 8.5.8 (RHEL/CentOS 6.2) + - fix broker node count for distributed session entering the RUNTIME state when + - (non-EMANE) WLANs or GreTapBridges are involved; + - fix "file exists" error message when distributed session number is re-used + - and servers file is written + - fix bug #194 configuration dialog too long, make dialog scrollable/resizable + - allow float values for loss and duplicates percent + - fix the following bugs: 166, 172, 177, 178, 192, 194, 196, 201, 202, + 205, 206, 210, 212, 213, 214, 221 + +2013-04-13 CORE 4.5 + + * GUI: + - improved behavior when starting GUI without daemon, or using File New after connection with daemon is lost + - fix various GUI issues when reconnecting to a session + - support 3D GUI via output to SDT3D + - added "Execute Python script..." entry to the File Menu + - support user-defined terminal program instead of hard-coded xterm + - added session options for "enable RJ45s", "preserve session dir" + - added buttons to the IP Addresses dialog for removing all/selected IPv4/IPv6 + - allow sessions with multiple canvases to enter RUNTIME state + - added "--addons" startup mode to pass control to code included from addons dir + - added "Locked" entry to View menu to prevent moving items + - use currently selected node type when invoking a topology generator + - updated throughput plots with resizing, color picker, plot labels, locked scales, and save/load plot configuration with imn file + - improved session dialog + * EMANE: + - EMANE 0.8.1 support with backwards-compatibility for 0.7.4 + - extend CommEffect model to generate CommEffect events upon receipt of Link Messages having link effects + * Services: + - updated FTP service with root directory for anonymous users + - added HTTP, PCAP, BIRD, RADVD, and Babel services + - support copying existing files instead of always generating them + - added "Services..." entry to node right-click menu + - added "View" button for side-by-side comparison when copying customized config files + - updated Quagga daemons to wait for zebra.vty VTY file before starting + * General: + - XML import and export + - renamed "cored.py" to "cored", "coresendmsg.py" to "coresendmsg" + - code reorganization and clean-up + - updated XML export to write NetworkPlan, MotionPlan, and ServicePlan within a Scenario tag, added new "Save As XML..." File menu entry + - added script_start/pause/stop options to Ns2ScriptedMobility + - "python" source sub-directory renamed to "daemon" + - added "cored -e" option to execute a Python script, adding its session to the active sessions list, allowing for GUI connection + - support comma-separated list for custom_services_dir in core.conf file + - updated kernel patches for Linux kernel 3.5 + - support RFC 6164-style IPv6 /127 addressing + * ns-3: + - integrate ns-3 node location between CORE and ns-3 simulation + - added ns-3 random walk mobility example + - updated ns-3 Wifi example to allow GUI connection and moving of nodes + * fixed the following bugs: 54, 103, 111, 136, 145, 153, 157, 160, 161, 162, 164, 165, 168, 170, 171, 173, 174, 176, 184, 190, 193 + +2012-09-25 CORE 4.4 + + * GUI: + - real-time bandwidth plotting tool + - added Wireshark and tshark right-click menu items + - X,Y coordinates shown in the status bar + - updated GUI attribute option to link messages for changing color/width/dash + - added sample IPsec and VPN scenarios, how many nodes script + - added jitter parameter to WLANs + - renamed Experiment menu to Session menu, added session options + - use 'key=value' configuration for services, EMANE models, WLAN models, etc. + - save only service values that have been customized + - copy service parameters from one customized service to another + - right-click menu to start/stop/restart each service + * EMANE: + - EMANE 0.7.4 support + - added support for EMANE CommEffect model and Comm Effect controller GUI + - added support for EMANE Raw Transport when using RJ45 devices + * Services: + - improved service customization; allow a service to define custom Tcl tab + - added vtysh.conf for Quagga service to support 'write mem' + - support scheduled events and services that start N seconds after runtime + - added UCARP service + * Documentation: + - converted the CORE manual to reStructuredText using Sphinx; added Python docs + * General: + - Python code reorganization + - improved cored.py thread locking + - merged xen branch into trunk + - added an event queue to a session with notion of time zero + - added UDP support to cored.py + - use UDP by default in coresendmsg.py; added '-H' option to print examples + - enter a bash shell by default when running vcmd with no arguments + - fixes to distributed emulation entering runtime state + - write 'nodes' file upon session startup + - make session number and other attributes available in environment + - support /etc/core/environment and ~/.core/environment files + - added Ns2ScriptedMobility model to Python, removed from the GUI + - namespace nodes mount a private /sys + + - fixed the following bugs: 80, 81, 84, 99, 104, 109, 110, 122, 124, 131, 133, 134, 135, 137, 140, 143, 144, 146, 147, 151, 154, 155 + +2012-03-07 CORE 4.3 + + * EMANE 0.7.2 and 0.7.3 support + * hook scripts: customize actions at any of six different session states + * Check Emulation Light (CEL) exception feedback system + * added FTP and XORP services, and service validate commands + * services can flag when customization is required + * Python classes to support ns-3 simulation experiments + * write state, node X,Y position, and servers to pycore session dir + * removed over 9,000 lines of unused GUI code + * performance monitoring script + * batch mode improvements and --closebatch option + * export session to EmulationScript XML files + * basic range model moved from GUI to Python, supports 3D coordinates + * improved WLAN dialog with tabs + * added PhysicalNode class for joining real nodes with emulated networks + * fixed the following bugs: 50, 75, 76, 79, 82, 83, 85, 86, 89, 90, 92, 94, 96, 98, 100, 112, 113, 116, 119, 120 + +2011-08-19 CORE 4.2 + + * EMANE 0.7.1 support + - support for Bypass model, Universal PHY, logging, realtime + * configurable MAC addresses + * control interfaces (backchannel between node and host) + * service customization dialog improved (tabbed) + * new testing scripts for MDR and EMANE performance testing + * improved upgrading of old imn files + * new coresendmsg.py utility (deprecates libcoreapi and coreapisend) + * new security services, custom service becomes UserDefined + * new services and Python scripting chapters in manual + * fixes to distributed emulation, linking tunnels/RJ45s with WLANs/hubs/switches + * fixed the following bugs: 18, 32, 34, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 52, 53, 55, 57, 58, 60, 62, 64, 65, 66, 68, 71, 72, 74 + +2011-01-05 CORE 4.1 + * new icons for toolbars and nodes + * node services introduced, node models deprecated + * customizable node types + * traffic flow editor with MGEN support + * user configs moved from /etc/core/`*` to ~/.core/ + * allocate addresses from custom IPv4/IPv6 prefixes + * distributed emulation using GRE tunnels + * FreeBSD 8.1 now uses cored.py + * EMANE 0.6.4 support + * numerous bugfixes + +2010-08-17 CORE 4.0 + * Python framework with Linux network namespace (netns) support (Linux netns is now the primary supported platform) + * ability to close the GUI and later reconnect to a running session (netns only) + * EMANE integration (netns only) + * new topology generators, host file generator + * user-editable Observer Widgets + * use of /etc/core instead of /usr/local/etc/core + * various bugfixes + +2009-09-15 CORE 3.5 + +2009-06-23 CORE 3.4 + +2009-03-11 CORE 3.3 + diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 155cacc0..00000000 --- a/Dockerfile +++ /dev/null @@ -1,126 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM ubuntu:22.04 -LABEL Description="CORE Docker Ubuntu Image" - -ARG PREFIX=/usr/local -ARG BRANCH=master -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV DEBIAN_FRONTEND=noninteractive -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install system dependencies - -RUN apt-get update -y && \ - apt-get install -y software-properties-common - -RUN add-apt-repository "deb http://archive.ubuntu.com/ubuntu jammy universe" - -RUN apt-get update -y && \ - apt-get install -y --no-install-recommends \ - automake \ - bash \ - ca-certificates \ - ethtool \ - gawk \ - gcc \ - g++ \ - iproute2 \ - iputils-ping \ - libc-dev \ - libev-dev \ - libreadline-dev \ - libtool \ - nftables \ - python3 \ - python3-pip \ - python3-tk \ - pkg-config \ - tk \ - xauth \ - xterm \ - wireshark \ - vim \ - build-essential \ - nano \ - firefox \ - net-tools \ - rsync \ - openssh-server \ - openssh-client \ - vsftpd \ - atftpd \ - atftp \ - mini-httpd \ - lynx \ - tcpdump \ - iperf \ - iperf3 \ - tshark \ - openssh-sftp-server \ - bind9 \ - bind9-utils \ - openvpn \ - isc-dhcp-server \ - isc-dhcp-client \ - whois \ - ipcalc \ - socat \ - hping3 \ - libgtk-3-0 \ - librest-0.7-0 \ - libgtk-3-common \ - dconf-gsettings-backend \ - libsoup-gnome2.4-1 \ - libsoup2.4-1 \ - dconf-service \ - x11-xserver-utils \ - ftp \ - git \ - sudo \ - wget \ - tzdata \ - libpcap-dev \ - libpcre3-dev \ - libprotobuf-dev \ - libxml2-dev \ - protobuf-compiler \ - unzip \ - uuid-dev \ - iproute2 \ - vlc \ - iputils-ping && \ - apt-get autoremove -y - -# install core -RUN git clone https://github.com/coreemu/core && \ - cd core && \ - git checkout ${BRANCH} && \ - ./setup.sh && \ - PATH=/root/.local/bin:$PATH inv install -v -p ${PREFIX} && \ - cd /opt && \ - rm -rf ospf-mdr - -# install emane -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make -j$(nproc) && \ - make install && \ - cd src/python && \ - make clean && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip - -WORKDIR /root - -CMD /opt/core/venv/bin/core-daemon diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 00000000..e70e3dc6 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,20 @@ +pipeline { + agent any + stages { + stage('build core') { + steps { + sh './bootstrap.sh' + sh './configure' + sh 'make' + sh 'sudo make install' + } + } + stage('test core') { + steps { + sh 'pytest daemon/tests/test_core.py' + sh 'pytest daemon/tests/test_gui.py' + sh 'pytest daemon/tests/test_emane.py' + } + } + } +} \ No newline at end of file diff --git a/Makefile.am b/Makefile.am index 2b5f29e2..42dd4af6 100644 --- a/Makefile.am +++ b/Makefile.am @@ -6,26 +6,29 @@ if WANT_DOCS DOCS = docs man endif +if WANT_GUI + GUI = gui +endif + if WANT_DAEMON - DAEMON = daemon + DAEMON = scripts daemon endif if WANT_NETNS - NETNS = netns + NETNS = netns ns3 endif # keep docs last due to dependencies on binaries -SUBDIRS = $(DAEMON) $(NETNS) $(DOCS) +SUBDIRS = $(GUI) $(DAEMON) $(NETNS) $(DOCS) ACLOCAL_AMFLAGS = -I config # extra files to include with distribution tarball EXTRA_DIST = bootstrap.sh \ - package \ LICENSE \ README.md \ ASSIGNMENT_OF_COPYRIGHT.pdf \ - CHANGELOG.md \ + Changelog \ .version \ .version.date @@ -41,117 +44,80 @@ DISTCLEANFILES = aclocal.m4 \ MAINTAINERCLEANFILES = .version \ .version.date -define fpm-distributed-deb = -fpm -s dir -t deb -n core-distributed \ +define fpm-python = +fpm -s python -t $1 \ -m "$(PACKAGE_MAINTAINERS)" \ - --license "BSD" \ - --description "Common Open Research Emulator Distributed Package" \ - --url https://github.com/coreemu/core \ --vendor "$(PACKAGE_VENDOR)" \ - -p core-distributed_VERSION_ARCH.deb \ - -v $(PACKAGE_VERSION) \ - -d "ethtool" \ - -d "procps" \ - -d "libc6 >= 2.14" \ - -d "bash >= 3.0" \ - -d "nftables" \ - -d "iproute2" \ - -d "libev4" \ - -d "openssh-server" \ - -d "xterm" \ - netns/vnoded=/usr/bin/ \ - netns/vcmd=/usr/bin/ + $2 endef -define fpm-distributed-rpm = -fpm -s dir -t rpm -n core-distributed \ +define fpm-gui = +fpm -s dir -t $1 -n core-gui \ -m "$(PACKAGE_MAINTAINERS)" \ --license "BSD" \ - --description "Common Open Research Emulator Distributed Package" \ + --description "Common Open Research Emulator GUI front-end" \ --url https://github.com/coreemu/core \ --vendor "$(PACKAGE_VENDOR)" \ - -p core-distributed_VERSION_ARCH.rpm \ + -p core-gui_VERSION_ARCH.$1 \ -v $(PACKAGE_VERSION) \ - -d "ethtool" \ - -d "procps-ng" \ - -d "bash >= 3.0" \ - -d "nftables" \ - -d "iproute" \ - -d "libev" \ - -d "net-tools" \ - -d "openssh-server" \ - -d "xterm" \ - netns/vnoded=/usr/bin/ \ - netns/vcmd=/usr/bin/ -endef - -define fpm-rpm = -fpm -s dir -t rpm -n core \ - -m "$(PACKAGE_MAINTAINERS)" \ - --license "BSD" \ - --description "core vnoded/vcmd and system dependencies" \ - --url https://github.com/coreemu/core \ - --vendor "$(PACKAGE_VENDOR)" \ - -p core_VERSION_ARCH.rpm \ - -v $(PACKAGE_VERSION) \ - --rpm-init package/core-daemon \ - --after-install package/after-install.sh \ - --after-remove package/after-remove.sh \ - -d "ethtool" \ + -d "bash" \ + -d "tcl" \ -d "tk" \ + $2 \ + -C $(DESTDIR) +endef + +define fpm-daemon-rpm = +fpm -s python -t rpm \ + -p NAME_sysv_VERSION_ARCH.rpm \ + --rpm-init scripts/core-daemon \ + --python-install-bin $(bindir) \ + --python-install-data $(prefix) \ + --python-install-lib $(pythondir) \ + -m "$(PACKAGE_MAINTAINERS)" \ + --vendor "$(PACKAGE_VENDOR)" \ -d "procps-ng" \ -d "bash >= 3.0" \ + -d "bridge-utils" \ -d "ebtables" \ -d "iproute" \ -d "libev" \ -d "net-tools" \ - -d "nftables" \ - netns/vnoded=/usr/bin/ \ - netns/vcmd=/usr/bin/ \ - package/etc/core.conf=/etc/core/ \ - package/etc/logging.conf=/etc/core/ \ - package/examples=/opt/core/ \ - daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/ + -d "python >= 2.7, python < 3.0" \ + netns/setup.py daemon/setup.py endef -define fpm-deb = -fpm -s dir -t deb -n core \ +define fpm-daemon-deb = +fpm -s python -t deb \ + -p NAME_$1_VERSION_ARCH.deb \ + --python-install-bin $(bindir) \ + --python-install-data $(prefix) \ + --python-install-lib $(pythondir) \ + $2 $3 \ -m "$(PACKAGE_MAINTAINERS)" \ - --license "BSD" \ - --description "core vnoded/vcmd and system dependencies" \ - --url https://github.com/coreemu/core \ --vendor "$(PACKAGE_VENDOR)" \ - -p core_VERSION_ARCH.deb \ - -v $(PACKAGE_VERSION) \ - --deb-systemd package/core-daemon.service \ - --deb-no-default-config-files \ - --after-install package/after-install.sh \ - --after-remove package/after-remove.sh \ - -d "ethtool" \ - -d "tk" \ - -d "libtk-img" \ -d "procps" \ -d "libc6 >= 2.14" \ -d "bash >= 3.0" \ + -d "bridge-utils" \ -d "ebtables" \ -d "iproute2" \ -d "libev4" \ - -d "nftables" \ - netns/vnoded=/usr/bin/ \ - netns/vcmd=/usr/bin/ \ - package/etc/core.conf=/etc/core/ \ - package/etc/logging.conf=/etc/core/ \ - package/examples=/opt/core/ \ - daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/ + -d "python (>= 2.7), python (<< 3.0)" \ + --deb-recommends quagga \ + netns/setup.py daemon/setup.py endef .PHONY: fpm fpm: clean-local-fpm - cd daemon && poetry build -f wheel - $(call fpm-deb) - $(call fpm-rpm) - $(call fpm-distributed-deb) - $(call fpm-distributed-rpm) + $(MAKE) -C gui install DESTDIR=$(DESTDIR) + $(call fpm-gui,rpm) + $(call fpm-gui,deb,-d "libtk-img") + $(call fpm-python,rpm,ns3/setup.py) + $(call fpm-python,deb,ns3/setup.py) + $(call fpm-daemon-rpm) + $(call fpm-daemon-deb,sysv,--deb-init,scripts/core-daemon) + $(call fpm-daemon-deb,systemd,--deb-systemd,scripts/core-daemon.service) .PHONY: clean-local-fpm clean-local-fpm: @@ -170,12 +136,24 @@ define change-files = $(info creating file $1 from $1.in) @$(SED) -e 's,[@]sbindir[@],$(sbindir),g' \ -e 's,[@]bindir[@],$(bindir),g' \ + -e 's,[@]pythondir[@],$(pythondir),g' \ + -e 's,[@]PYTHON[@],$(PYTHON),g' \ -e 's,[@]PACKAGE_VERSION[@],$(PACKAGE_VERSION),g' \ -e 's,[@]PACKAGE_DATE[@],$(PACKAGE_DATE),g' \ -e 's,[@]CORE_LIB_DIR[@],$(CORE_LIB_DIR),g' \ -e 's,[@]CORE_STATE_DIR[@],$(CORE_STATE_DIR),g' \ -e 's,[@]CORE_DATA_DIR[@],$(CORE_DATA_DIR),g' \ -e 's,[@]CORE_CONF_DIR[@],$(CORE_CONF_DIR),g' \ + -e 's,[@]CORE_GUI_CONF_DIR[@],$(CORE_GUI_CONF_DIR),g' \ + -e 's,[@]brctl_path[@],$(brctl_path),g' \ + -e 's,[@]sysctl_path[@],$(sysctl_path),g' \ + -e 's,[@]ip_path[@],$(ip_path),g' \ + -e 's,[@]tc_path[@],$(tc_path),g' \ + -e 's,[@]ebtables_path[@],$(ebtables_path),g' \ + -e 's,[@]mount_path[@],$(mount_path),g' \ + -e 's,[@]umount_path[@],$(umount_path),g' \ + -e 's,[@]ovs_vs_path[@],$(ovs_vs_path),g' \ + -e 's,[@]ovs_of_path[@],$(ovs_of_path),g' \ < $1.in > $1 endef @@ -183,8 +161,13 @@ all: change-files .PHONY: change-files change-files: + $(call change-files,gui/core-gui) + $(call change-files,scripts/core-daemon.service) + $(call change-files,scripts/core-daemon) $(call change-files,daemon/core/constants.py) + $(call change-files,ns3/setup.py) $(call change-files,netns/setup.py) + $(call change-files,daemon/setup.py) CORE_DOC_SRC = core-python-$(PACKAGE_VERSION) .PHONY: doc diff --git a/README.md b/README.md index efab2e70..c5f7e182 100644 --- a/README.md +++ b/README.md @@ -1,107 +1,41 @@ -# Index -- CORE -- Docker Setup - - Precompiled container image - - Build container image from source - - Adding extra packages - -- Useful commands -- License - -# CORE +# CORE [![Codacy Badge](https://api.codacy.com/project/badge/Grade/d94eb0244ade4510a106b4af76077a92)](https://www.codacy.com/app/blakeharnden/core?utm_source=github.com&utm_medium=referral&utm_content=coreemu/core&utm_campaign=Badge_Grade) CORE: Common Open Research Emulator -Copyright (c)2005-2022 the Boeing Company. +Copyright (c)2005-2018 the Boeing Company. See the LICENSE file included in this distribution. -# Docker Setup +## About -Here you have 2 choices +The Common Open Research Emulator (CORE) is a tool for emulating +networks on one or more machines. You can connect these emulated +networks to live networks. CORE consists of a GUI for drawing +topologies of lightweight virtual machines, and Python modules for +scripting network emulation. -## Precompiled container image +## Documentation and Examples -```bash +* Documentation hosted on GitHub + * +* Basic Script Examples + * [Examples](daemon/examples/api) +* Custom Service Example + * [sample.py](daemon/examples/myservices/sample.py) +* Custom Emane Model Example + * [examplemodel.py](daemon/examples/myemane/examplemodel.py) -# Start container -sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged --restart unless-stopped git.olympuslab.net/afonso/core-extra:latest +## Support -``` -## Build container image from source +We are leveraging Discord for persistent chat rooms, voice chat, and +GitHub integration. This allows for more dynamic conversations and the +capability to respond faster. Feel free to join us at the link below. + -```bash -# Clone the repo -git clone https://gitea.olympuslab.net/afonso/core-extra.git +## Building CORE -# cd into the directory -cd core-extra +See [CORE Installation](http://coreemu.github.io/core/install.html) for detailed build instructions. -# build the docker image -sudo docker build -t core-extra . +### Running CORE -# start container -sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged --restart unless-stopped core-extra - -``` - -### Adding extra packages - -To add extra packages you must modify the Dockerfile and then compile the docker image. -If you install it after starting the container it will, by docker nature, be reverted on the next boot of the container. - -# Useful commands - -I have the following functions on my fish shell -to help me better use core - -THIS ONLY WORKS ON FISH, MODIFY FOR BASH OR ZSH - -```fish - -# RUN CORE GUI -function core - xhost +local:root - sudo docker exec -it core core-gui -end - -# RUN BASH INSIDE THE CONTAINER -function core-bash - sudo docker exec -it core /bin/bash -end - - -# LAUNCH NODE BASH ON THE HOST MACHINE -function launch-term --argument nodename - sudo docker exec -it core xterm -bg black -fg white -fa 'DejaVu Sans Mono' -fs 16 -e vcmd -c /tmp/pycore.1/$nodename -- /bin/bash -end - -#TO RUN ANY OTHER COMMAND -sudo docker exec -it core COMAND_GOES_HERE - -``` - -## LICENSE - -Copyright (c) 2005-2018, the Boeing Company. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. +See [Using the CORE GUI](http://coreemu.github.io/core/usage.html) for more details on running CORE. diff --git a/bootstrap.sh b/bootstrap.sh index 25fdecfd..ab3d741c 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -1,5 +1,9 @@ #!/bin/sh # +# (c)2010-2012 the Boeing Company +# +# author: Jeff Ahrenholz +# # Bootstrap the autoconf system. # diff --git a/configure.ac b/configure.ac index 4e56507a..79c5ecc0 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. # this defines the CORE version number, must be static for AC_INIT -AC_INIT(core, 9.0.3) +AC_INIT(core, 5.2.1, core-dev@nrl.navy.mil) # autoconf and automake initialization AC_CONFIG_SRCDIR([netns/version.h.in]) @@ -14,7 +14,7 @@ AM_INIT_AUTOMAKE([tar-ustar]) # define variables used for packaging and date display PACKAGE_DATE=m4_esyscmd_s([date +%Y%m%d]) PACKAGE_VENDOR="CORE Developers" -PACKAGE_MAINTAINERS="$PACKAGE_VENDOR" +PACKAGE_MAINTAINERS="$PACKAGE_VENDOR <$PACKAGE_BUGREPORT>" # core specific variables CORE_LIB_DIR="\${prefix}/lib/core" @@ -30,14 +30,20 @@ AC_SUBST(CORE_CONF_DIR) AC_SUBST(CORE_DATA_DIR) AC_SUBST(CORE_STATE_DIR) -# documentation option -AC_ARG_ENABLE([docs], - [AS_HELP_STRING([--enable-docs[=ARG]], - [build python documentation (default is no)])], - [], [enable_docs=no]) -AC_SUBST(enable_docs) +# CORE GUI configuration files and preferences in CORE_GUI_CONF_DIR +# scenario files in ~/.core/configs/ +AC_ARG_WITH([guiconfdir], + [AS_HELP_STRING([--with-guiconfdir=dir], + [specify GUI configuration directory])], + [CORE_GUI_CONF_DIR="$with_guiconfdir"], + [CORE_GUI_CONF_DIR="\$\${HOME}/.core"]) +AC_SUBST(CORE_GUI_CONF_DIR) +AC_ARG_ENABLE([gui], + [AS_HELP_STRING([--enable-gui[=ARG]], + [build and install the GUI (default is yes)])], + [], [enable_gui=yes]) +AC_SUBST(enable_gui) -# python option AC_ARG_ENABLE([python], [AS_HELP_STRING([--enable-python[=ARG]], [build and install the python bindings (default is yes)])], @@ -48,7 +54,6 @@ if test "x$enable_python" = "xyes" ; then else want_python=no fi - AC_ARG_ENABLE([daemon], [AS_HELP_STRING([--enable-daemon[=ARG]], [build and install the daemon with Python modules @@ -83,62 +88,8 @@ if test "x$enable_daemon" = "xyes"; then want_python=yes want_linux_netns=yes - AM_PATH_PYTHON(3.9) - AS_IF([$PYTHON -m grpc_tools.protoc -h &> /dev/null], [], [AC_MSG_ERROR([please install python grpcio-tools])]) - - AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH) - if test "x$sysctl_path" = "xno" ; then - AC_MSG_ERROR([Could not locate sysctl (from procps package).]) - fi - - AC_CHECK_PROG(nftables_path, nft, $as_dir, no, $SEARCHPATH) - if test "x$nftables_path" = "xno" ; then - AC_MSG_ERROR([Could not locate nftables (from nftables package).]) - fi - - AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH) - if test "x$ip_path" = "xno" ; then - AC_MSG_ERROR([Could not locate ip (from iproute package).]) - fi - - AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH) - if test "x$tc_path" = "xno" ; then - AC_MSG_ERROR([Could not locate tc (from iproute package).]) - fi - - AC_CHECK_PROG(ethtool_path, ethtool, $as_dir, no, $SEARCHPATH) - if test "x$ethtool_path" = "xno" ; then - AC_MSG_ERROR([Could not locate ethtool (from package ethtool)]) - fi - - AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH) - if test "x$mount_path" = "xno" ; then - AC_MSG_ERROR([Could not locate mount (from package mount)]) - fi - - AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH) - if test "x$umount_path" = "xno" ; then - AC_MSG_ERROR([Could not locate umount (from package mount)]) - fi - - AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH) - if test "x$convert" = "xno" ; then - AC_MSG_WARN([Could not locate ImageMagick convert.]) - fi - - AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH) - if test "x$ovs_vs_path" = "xno" ; then - AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS mode]) - fi - - AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH) - if test "x$ovs_of_path" = "xno" ; then - AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS mode]) - fi -fi - -if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; then - want_linux_netns=yes + # Checks for libraries. + AC_CHECK_LIB([netgraph], [NgMkSockNode]) # Checks for header files. AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h stdint.h stdlib.h string.h sys/ioctl.h sys/mount.h sys/socket.h sys/time.h termios.h unistd.h]) @@ -158,6 +109,54 @@ if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; AC_FUNC_REALLOC AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname]) + AM_PATH_PYTHON(2.7) + + AC_CHECK_PROG(brctl_path, brctl, $as_dir, no, $SEARCHPATH) + if test "x$brctl_path" = "xno" ; then + AC_MSG_ERROR([Could not locate brctl (from bridge-utils package).]) + fi + AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH) + AC_CHECK_PROG(ebtables_path, ebtables, $as_dir, no, $SEARCHPATH) + if test "x$ebtables_path" = "xno" ; then + AC_MSG_ERROR([Could not locate ebtables (from ebtables package).]) + fi + AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH) + if test "x$ip_path" = "xno" ; then + AC_MSG_ERROR([Could not locate ip (from iproute package).]) + fi + AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH) + if test "x$tc_path" = "xno" ; then + AC_MSG_ERROR([Could not locate tc (from iproute package).]) + fi + AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH) + AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH) + AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH) + if test "x$convert" = "xno" ; then + AC_MSG_WARN([Could not locate ImageMagick convert.]) + fi + AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH) + if test "x$ovs_vs_path" = "xno" ; then + AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS nodes]) + fi + AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH) + if test "x$ovs_of_path" = "xno" ; then + AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS nodes]) + fi + + CFLAGS_save=$CFLAGS + CPPFLAGS_save=$CPPFLAGS + if test "x$PYTHON_INCLUDE_DIR" = "x"; then + PYTHON_INCLUDE_DIR=`$PYTHON -c "import distutils.sysconfig; print distutils.sysconfig.get_python_inc()"` + fi + CFLAGS="-I$PYTHON_INCLUDE_DIR" + CPPFLAGS="-I$PYTHON_INCLUDE_DIR" + AC_CHECK_HEADERS([Python.h], [], + AC_MSG_ERROR([Python bindings require Python development headers (try installing your 'python-devel' or 'python-dev' package)])) + CFLAGS=$CFLAGS_save + CPPFLAGS=$CPPFLAGS_save +fi +if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; then + want_linux_netns=yes PKG_CHECK_MODULES(libev, libev, AC_MSG_RESULT([found libev using pkgconfig OK]) AC_SUBST(libev_CFLAGS) @@ -171,7 +170,8 @@ if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; fi want_docs=no -if [test "x$want_python" = "xyes" && test "x$enable_docs" = "xyes"] ; then +if test "x$enable_docs" = "xyes" ; then + AC_CHECK_PROG(help2man, help2man, yes, no, $SEARCHPATH) if test "x$help2man" = "xno" ; then @@ -189,17 +189,37 @@ if [test "x$want_python" = "xyes" && test "x$enable_docs" = "xyes"] ; then # check for sphinx required during make AC_CHECK_PROG(sphinxapi_path, sphinx-apidoc, $as_dir, no, $SEARCHPATH) if test "x$sphinxapi_path" = "xno" ; then - AC_MSG_ERROR(["Could not locate sphinx-apidoc, install python3 -m pip install sphinx"]) + AC_MSG_ERROR(["Could not location sphinx-apidoc, from the python-sphinx package"]) want_docs=no fi - AS_IF([$PYTHON -c "import sphinx_rtd_theme" &> /dev/null], [], [AC_MSG_ERROR([doc dependency missing, please install python3 -m pip install sphinx-rtd-theme])]) fi +#AC_PATH_PROGS(tcl_path, [tclsh tclsh8.5 tclsh8.4], no) +#if test "x$tcl_path" = "xno" ; then +# AC_MSG_ERROR([Could not locate tclsh. Please install Tcl/Tk.]) +#fi + +#AC_PATH_PROGS(wish_path, [wish wish8.5 wish8.4], no) +#if test "x$wish_path" = "xno" ; then +# AC_MSG_ERROR([Could not locate wish. Please install Tcl/Tk.]) +#fi + +AC_ARG_WITH([startup], + [AS_HELP_STRING([--with-startup=option], + [option=systemd,suse,none to install systemd/SUSE init scripts])], + [with_startup=$with_startup], + [with_startup=initd]) +AC_SUBST(with_startup) +AC_MSG_RESULT([using startup option $with_startup]) + # Variable substitutions +AM_CONDITIONAL(WANT_GUI, test x$enable_gui = xyes) AM_CONDITIONAL(WANT_DAEMON, test x$enable_daemon = xyes) AM_CONDITIONAL(WANT_DOCS, test x$want_docs = xyes) AM_CONDITIONAL(WANT_PYTHON, test x$want_python = xyes) AM_CONDITIONAL(WANT_NETNS, test x$want_linux_netns = xyes) +AM_CONDITIONAL(WANT_INITD, test x$with_startup = xinitd) +AM_CONDITIONAL(WANT_SYSTEMD, test x$with_startup = xsystemd) AM_CONDITIONAL(WANT_VNODEDONLY, test x$enable_vnodedonly = xyes) if test $cross_compiling = no; then @@ -210,6 +230,11 @@ fi # Output files AC_CONFIG_FILES([Makefile + gui/version.tcl + gui/Makefile + gui/icons/Makefile + scripts/Makefile + scripts/perf/Makefile man/Makefile docs/Makefile daemon/Makefile @@ -217,7 +242,8 @@ AC_CONFIG_FILES([Makefile daemon/doc/conf.py daemon/proto/Makefile netns/Makefile - netns/version.h],) + netns/version.h + ns3/Makefile],) AC_OUTPUT # Summary text @@ -231,12 +257,20 @@ Build: Prefix: ${prefix} Exec Prefix: ${exec_prefix} +GUI: + GUI path: ${CORE_LIB_DIR} + GUI config: ${CORE_GUI_CONF_DIR} + Daemon: Daemon path: ${bindir} Daemon config: ${CORE_CONF_DIR} - Python: ${PYTHON} + Python modules: ${pythondir} + Logs: ${CORE_STATE_DIR}/log + +Startup: ${with_startup} Features to build: + Build GUI: ${enable_gui} Build Daemon: ${enable_daemon} Documentation: ${want_docs} diff --git a/daemon/.gitignore b/daemon/.gitignore new file mode 100644 index 00000000..27ffc2f1 --- /dev/null +++ b/daemon/.gitignore @@ -0,0 +1,2 @@ +*.pyc +build diff --git a/daemon/.pre-commit-config.yaml b/daemon/.pre-commit-config.yaml deleted file mode 100644 index bc9ead08..00000000 --- a/daemon/.pre-commit-config.yaml +++ /dev/null @@ -1,23 +0,0 @@ -repos: -- repo: local - hooks: - - id: isort - name: isort - stages: [commit] - language: system - entry: bash -c 'cd daemon && poetry run isort --atomic -y' - types: [python] - - - id: black - name: black - stages: [commit] - language: system - entry: bash -c 'cd daemon && poetry run black .' - types: [python] - - - id: flake8 - name: flake8 - stages: [commit] - language: system - entry: bash -c 'cd daemon && poetry run flake8' - types: [python] diff --git a/daemon/Makefile.am b/daemon/Makefile.am index 2585ea1a..769098bb 100644 --- a/daemon/Makefile.am +++ b/daemon/Makefile.am @@ -1,14 +1,50 @@ # CORE +# (c)2010-2012 the Boeing Company. +# See the LICENSE file included in this distribution. +# +# author: Jeff Ahrenholz # # Makefile for building netns components. # +SETUPPY = setup.py +SETUPPYFLAGS = -v + if WANT_DOCS -DOCS = doc + DOCS = doc endif SUBDIRS = proto $(DOCS) +SCRIPT_FILES := $(notdir $(wildcard scripts/*)) +MAN_FILES := $(notdir $(wildcard ../man/*.1)) + +# Python package build +noinst_SCRIPTS = build +build: + $(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) build + +# Python package install +install-exec-hook: + $(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) install \ + --root=/$(DESTDIR) \ + --prefix=$(prefix) \ + --install-lib=$(pythondir) \ + --single-version-externally-managed + +# Python package uninstall +uninstall-hook: + rm -rf $(DESTDIR)/etc/core + rm -rf $(DESTDIR)/$(datadir)/core + rm -f $(addprefix $(DESTDIR)/$(datarootdir)/man/man1/, $(MAN_FILES)) + rm -f $(addprefix $(DESTDIR)/$(bindir)/,$(SCRIPT_FILES)) + rm -rf $(DESTDIR)/$(pythondir)/core-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info + rm -rf $(DESTDIR)/$(pythondir)/core + +# Python package cleanup +clean-local: + -rm -rf build + # because we include entire directories with EXTRA_DIST, we need to clean up # the source control files dist-hook: @@ -17,12 +53,17 @@ dist-hook: distclean-local: -rm -rf core.egg-info + DISTCLEANFILES = Makefile.in # files to include with distribution tarball -EXTRA_DIST = core \ +EXTRA_DIST = $(SETUPPY) \ + core \ + data \ doc/conf.py.in \ + examples \ + scripts \ tests \ + test.py \ setup.cfg \ - poetry.lock \ - pyproject.toml + requirements.txt diff --git a/daemon/core/__init__.py b/daemon/core/__init__.py index c847c8dc..65b380c1 100644 --- a/daemon/core/__init__.py +++ b/daemon/core/__init__.py @@ -1,4 +1,30 @@ +import json import logging.config +import os +import subprocess + +from core import constants # setup default null handler logging.getLogger(__name__).addHandler(logging.NullHandler()) + + +def load_logging_config(): + """ + Load CORE logging configuration file. + + :return: nothing + """ + log_config_path = os.path.join(constants.CORE_CONF_DIR, "logging.conf") + with open(log_config_path, "r") as log_config_file: + log_config = json.load(log_config_file) + logging.config.dictConfig(log_config) + + +class CoreCommandError(subprocess.CalledProcessError): + """ + Used when encountering internal CORE command errors. + """ + + def __str__(self): + return "Command(%s), Status(%s):\n%s" % (self.cmd, self.returncode, self.output) diff --git a/daemon/core/api/__init__.py b/daemon/core/api/__init__.py index e69de29b..c2e3c613 100644 --- a/daemon/core/api/__init__.py +++ b/daemon/core/api/__init__.py @@ -0,0 +1,3 @@ +""" +Contains code specific to the legacy TCP API for interacting with the TCL based GUI. +""" diff --git a/daemon/core/api/coreapi.py b/daemon/core/api/coreapi.py new file mode 100644 index 00000000..cbdf8d97 --- /dev/null +++ b/daemon/core/api/coreapi.py @@ -0,0 +1,1003 @@ +""" +Uses coreapi_data for message and TLV types, and defines TLV data +types and objects used for parsing and building CORE API messages. + +CORE API messaging is leveraged for communication with the GUI. +""" + +import socket +import struct + +from enum import Enum + +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExceptionTlvs +from core.enumerations import ExecuteTlvs +from core.enumerations import FileTlvs +from core.enumerations import InterfaceTlvs +from core.enumerations import LinkTlvs +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import RegisterTlvs +from core.enumerations import SessionTlvs +from core.misc import structutils +from core.misc.ipaddress import IpAddress +from core.misc.ipaddress import MacAddress + + +class CoreTlvData(object): + """ + Helper base class used for packing and unpacking values using struct. + """ + + # format string for packing data + data_format = None + # python data type for the data + data_type = None + # pad length for data after packing + pad_len = None + + @classmethod + def pack(cls, value): + """ + Convenience method for packing data using the struct module. + + :param value: value to pack + :return: length of data and the packed data itself + :rtype: tuple + """ + data = struct.pack(cls.data_format, value) + length = len(data) - cls.pad_len + return length, data + + @classmethod + def unpack(cls, data): + """ + Convenience method for unpacking data using the struct module. + + :param data: data to unpack + :return: the value of the unpacked data + """ + return struct.unpack(cls.data_format, data)[0] + + @classmethod + def pack_string(cls, value): + """ + Convenience method for packing data from a string representation. + + :param str value: value to pack + :return: length of data and the packed data itself + :rtype: tuple + """ + return cls.pack(cls.from_string(value)) + + @classmethod + def from_string(cls, value): + """ + Retrieve the value type from a string representation. + + :param str value: value to get a data type from + :return: value parse from string representation + """ + return cls.data_type(value) + + +class CoreTlvDataObj(CoreTlvData): + """ + Helper class for packing custom object data. + """ + + @classmethod + def pack(cls, value): + """ + Convenience method for packing custom object data. + + :param obj: custom object to pack + :return: length of data and the packed data itself + :rtype: tuple + """ + value = cls.get_value(value) + return super(CoreTlvDataObj, cls).pack(value) + + @classmethod + def unpack(cls, data): + """ + Convenience method for unpacking custom object data. + + :param data: data to unpack custom object from + :return: unpacked custom object + """ + data = super(CoreTlvDataObj, cls).unpack(data) + return cls.new_obj(data) + + @staticmethod + def get_value(obj): + """ + Method that will be used to retrieve the data to pack from a custom object. + + :param obj: custom object to get data to pack + :return: data value to pack + """ + raise NotImplementedError + + @staticmethod + def new_obj(obj): + """ + Method for retrieving data to unpack from an object. + + :param obj: object to get unpack data from + :return: value of unpacked data + """ + raise NotImplementedError + + +class CoreTlvDataUint16(CoreTlvData): + """ + Helper class for packing uint16 data. + """ + data_format = "!H" + data_type = int + pad_len = 0 + + +class CoreTlvDataUint32(CoreTlvData): + """ + Helper class for packing uint32 data. + """ + data_format = "!2xI" + data_type = int + pad_len = 2 + + +class CoreTlvDataUint64(CoreTlvData): + """ + Helper class for packing uint64 data. + """ + data_format = "!2xQ" + data_type = long + pad_len = 2 + + +class CoreTlvDataString(CoreTlvData): + """ + Helper class for packing string data. + """ + data_type = str + + @classmethod + def pack(cls, value): + """ + Convenience method for packing string data. + + :param str value: string to pack + :return: length of data packed and the packed data + :rtype: tuple + """ + if not isinstance(value, str): + raise ValueError("value not a string: %s" % value) + + if len(value) < 256: + header_len = CoreTlv.header_len + else: + header_len = CoreTlv.long_header_len + + pad_len = -(header_len + len(value)) % 4 + return len(value), value + "\0" * pad_len + + @classmethod + def unpack(cls, data): + """ + Convenience method for unpacking string data. + + :param str data: unpack string data + :return: unpacked string data + """ + return data.rstrip("\0") + + +class CoreTlvDataUint16List(CoreTlvData): + """ + List of unsigned 16-bit values. + """ + data_type = tuple + data_format = "!H" + + @classmethod + def pack(cls, values): + """ + Convenience method for packing a uint 16 list. + + :param list values: unint 16 list to pack + :return: length of data packed and the packed data + :rtype: tuple + """ + if not isinstance(values, tuple): + raise ValueError("value not a tuple: %s" % values) + + data = "" + for value in values: + data += struct.pack(cls.data_format, value) + + pad_len = -(CoreTlv.header_len + len(data)) % 4 + return len(data), data + "\0" * pad_len + + @classmethod + def unpack(cls, data): + """ + Convenience method for unpacking a uint 16 list. + + :param data: data to unpack + :return: unpacked data + """ + data_format = "!%dH" % (len(data) / 2) + return struct.unpack(data_format, data) + + @classmethod + def from_string(cls, value): + """ + Retrieves a unint 16 list from a string + + :param str value: string representation of a uint 16 list + :return: unint 16 list + :rtype: list + """ + return tuple(int(x) for x in value.split()) + + +class CoreTlvDataIpv4Addr(CoreTlvDataObj): + """ + Utility class for packing/unpacking Ipv4 addresses. + """ + data_type = IpAddress.from_string + data_format = "!2x4s" + pad_len = 2 + + @staticmethod + def get_value(obj): + """ + Retrieve Ipv4 address value from object. + + :param core.misc.ipaddress.IpAddress obj: ip address to get value from + :return: + """ + return obj.addr + + @staticmethod + def new_obj(obj): + """ + Retrieve Ipv4 address from a string representation. + + :param str value: value to get Ipv4 address from + :return: Ipv4 address + :rtype: core.misc.ipaddress.IpAddress + """ + return IpAddress(af=socket.AF_INET, address=obj) + + +class CoreTlvDataIPv6Addr(CoreTlvDataObj): + """ + Utility class for packing/unpacking Ipv6 addresses. + """ + data_format = "!16s2x" + data_type = IpAddress.from_string + pad_len = 2 + + @staticmethod + def get_value(obj): + """ + Retrieve Ipv6 address value from object. + + :param core.misc.ipaddress.IpAddress obj: ip address to get value from + :return: + """ + return obj.addr + + @staticmethod + def new_obj(value): + """ + Retrieve Ipv6 address from a string representation. + + :param str value: value to get Ipv4 address from + :return: Ipv4 address + :rtype: core.misc.ipaddress.IpAddress + """ + return IpAddress(af=socket.AF_INET6, address=value) + + +class CoreTlvDataMacAddr(CoreTlvDataObj): + """ + Utility class for packing/unpacking mac addresses. + """ + data_format = "!2x8s" + data_type = MacAddress.from_string + pad_len = 2 + + @staticmethod + def get_value(obj): + """ + Retrieve Ipv6 address value from object. + + :param core.misc.ipaddress.MacAddress obj: mac address to get value from + :return: + """ + # extend to 64 bits + return "\0\0" + obj.addr + + @staticmethod + def new_obj(value): + """ + Retrieve mac address from a string representation. + + :param str value: value to get Ipv4 address from + :return: Ipv4 address + :rtype: core.misc.ipaddress.MacAddress + """ + # only use 48 bits + return MacAddress(address=value[2:]) + + +class CoreTlv(object): + """ + Base class for representing CORE TLVs. + """ + header_format = "!BB" + header_len = struct.calcsize(header_format) + + long_header_format = "!BBH" + long_header_len = struct.calcsize(long_header_format) + + tlv_type_map = Enum + tlv_data_class_map = {} + + def __init__(self, tlv_type, tlv_data): + """ + Create a CoreTlv instance. + + :param int tlv_type: tlv type + :param tlv_data: data to unpack + :return: unpacked data + """ + self.tlv_type = tlv_type + if tlv_data: + try: + self.value = self.tlv_data_class_map[self.tlv_type].unpack(tlv_data) + except KeyError: + self.value = tlv_data + else: + self.value = None + + @classmethod + def unpack(cls, data): + """ + Parse data and return unpacked class. + + :param data: data to unpack + :return: unpacked data class + """ + tlv_type, tlv_len = struct.unpack(cls.header_format, data[:cls.header_len]) + header_len = cls.header_len + if tlv_len == 0: + tlv_type, _zero, tlv_len = struct.unpack(cls.long_header_format, data[:cls.long_header_len]) + header_len = cls.long_header_len + tlv_size = header_len + tlv_len + # for 32-bit alignment + tlv_size += -tlv_size % 4 + return cls(tlv_type, data[header_len:tlv_size]), data[tlv_size:] + + @classmethod + def pack(cls, tlv_type, value): + """ + Pack a TLV value, based on type. + + :param int tlv_type: type of data to pack + :param value: data to pack + :return: header and packed data + """ + tlv_len, tlv_data = cls.tlv_data_class_map[tlv_type].pack(value) + + if tlv_len < 256: + hdr = struct.pack(cls.header_format, tlv_type, tlv_len) + else: + hdr = struct.pack(cls.long_header_format, tlv_type, 0, tlv_len) + + return hdr + tlv_data + + @classmethod + def pack_string(cls, tlv_type, value): + """ + Pack data type from a string representation + + :param int tlv_type: type of data to pack + :param str value: string representation of data + :return: header and packed data + """ + return cls.pack(tlv_type, cls.tlv_data_class_map[tlv_type].from_string(value)) + + def type_str(self): + """ + Retrieve type string for this data type. + + :return: data type name + :rtype: str + """ + try: + return self.tlv_type_map(self.tlv_type).name + except ValueError: + return "unknown tlv type: %s" % str(self.tlv_type) + + def __str__(self): + """ + String representation of this data type. + + :return: string representation + :rtype: str + """ + return "%s " % (self.__class__.__name__, self.type_str(), self.value) + + +class CoreNodeTlv(CoreTlv): + """ + Class for representing CORE Node TLVs. + """ + + tlv_type_map = NodeTlvs + tlv_data_class_map = { + NodeTlvs.NUMBER.value: CoreTlvDataUint32, + NodeTlvs.TYPE.value: CoreTlvDataUint32, + NodeTlvs.NAME.value: CoreTlvDataString, + NodeTlvs.IP_ADDRESS.value: CoreTlvDataIpv4Addr, + NodeTlvs.MAC_ADDRESS.value: CoreTlvDataMacAddr, + NodeTlvs.IP6_ADDRESS.value: CoreTlvDataIPv6Addr, + NodeTlvs.MODEL.value: CoreTlvDataString, + NodeTlvs.EMULATION_SERVER.value: CoreTlvDataString, + NodeTlvs.SESSION.value: CoreTlvDataString, + NodeTlvs.X_POSITION.value: CoreTlvDataUint16, + NodeTlvs.Y_POSITION.value: CoreTlvDataUint16, + NodeTlvs.CANVAS.value: CoreTlvDataUint16, + NodeTlvs.EMULATION_ID.value: CoreTlvDataUint32, + NodeTlvs.NETWORK_ID.value: CoreTlvDataUint32, + NodeTlvs.SERVICES.value: CoreTlvDataString, + NodeTlvs.LATITUDE.value: CoreTlvDataString, + NodeTlvs.LONGITUDE.value: CoreTlvDataString, + NodeTlvs.ALTITUDE.value: CoreTlvDataString, + NodeTlvs.ICON.value: CoreTlvDataString, + NodeTlvs.OPAQUE.value: CoreTlvDataString, + } + + +class CoreLinkTlv(CoreTlv): + """ + Class for representing CORE link TLVs. + """ + + tlv_type_map = LinkTlvs + tlv_data_class_map = { + LinkTlvs.N1_NUMBER.value: CoreTlvDataUint32, + LinkTlvs.N2_NUMBER.value: CoreTlvDataUint32, + LinkTlvs.DELAY.value: CoreTlvDataUint64, + LinkTlvs.BANDWIDTH.value: CoreTlvDataUint64, + LinkTlvs.PER.value: CoreTlvDataString, + LinkTlvs.DUP.value: CoreTlvDataString, + LinkTlvs.JITTER.value: CoreTlvDataUint64, + LinkTlvs.MER.value: CoreTlvDataUint16, + LinkTlvs.BURST.value: CoreTlvDataUint16, + LinkTlvs.SESSION.value: CoreTlvDataString, + LinkTlvs.MBURST.value: CoreTlvDataUint16, + LinkTlvs.TYPE.value: CoreTlvDataUint32, + LinkTlvs.GUI_ATTRIBUTES.value: CoreTlvDataString, + LinkTlvs.UNIDIRECTIONAL.value: CoreTlvDataUint16, + LinkTlvs.EMULATION_ID.value: CoreTlvDataUint32, + LinkTlvs.NETWORK_ID.value: CoreTlvDataUint32, + LinkTlvs.KEY.value: CoreTlvDataUint32, + LinkTlvs.INTERFACE1_NUMBER.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE1_IP4.value: CoreTlvDataIpv4Addr, + LinkTlvs.INTERFACE1_IP4_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE1_MAC.value: CoreTlvDataMacAddr, + LinkTlvs.INTERFACE1_IP6.value: CoreTlvDataIPv6Addr, + LinkTlvs.INTERFACE1_IP6_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE2_NUMBER.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE2_IP4.value: CoreTlvDataIpv4Addr, + LinkTlvs.INTERFACE2_IP4_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE2_MAC.value: CoreTlvDataMacAddr, + LinkTlvs.INTERFACE2_IP6.value: CoreTlvDataIPv6Addr, + LinkTlvs.INTERFACE2_IP6_MASK.value: CoreTlvDataUint16, + LinkTlvs.INTERFACE1_NAME.value: CoreTlvDataString, + LinkTlvs.INTERFACE2_NAME.value: CoreTlvDataString, + LinkTlvs.OPAQUE.value: CoreTlvDataString, + } + + +class CoreExecuteTlv(CoreTlv): + """ + Class for representing CORE execute TLVs. + """ + + tlv_type_map = ExecuteTlvs + tlv_data_class_map = { + ExecuteTlvs.NODE.value: CoreTlvDataUint32, + ExecuteTlvs.NUMBER.value: CoreTlvDataUint32, + ExecuteTlvs.TIME.value: CoreTlvDataUint32, + ExecuteTlvs.COMMAND.value: CoreTlvDataString, + ExecuteTlvs.RESULT.value: CoreTlvDataString, + ExecuteTlvs.STATUS.value: CoreTlvDataUint32, + ExecuteTlvs.SESSION.value: CoreTlvDataString, + } + + +class CoreRegisterTlv(CoreTlv): + """ + Class for representing CORE register TLVs. + """ + + tlv_type_map = RegisterTlvs + tlv_data_class_map = { + RegisterTlvs.WIRELESS.value: CoreTlvDataString, + RegisterTlvs.MOBILITY.value: CoreTlvDataString, + RegisterTlvs.UTILITY.value: CoreTlvDataString, + RegisterTlvs.EXECUTE_SERVER.value: CoreTlvDataString, + RegisterTlvs.GUI.value: CoreTlvDataString, + RegisterTlvs.EMULATION_SERVER.value: CoreTlvDataString, + RegisterTlvs.SESSION.value: CoreTlvDataString, + } + + +class CoreConfigTlv(CoreTlv): + """ + Class for representing CORE configuration TLVs. + """ + + tlv_type_map = ConfigTlvs + tlv_data_class_map = { + ConfigTlvs.NODE.value: CoreTlvDataUint32, + ConfigTlvs.OBJECT.value: CoreTlvDataString, + ConfigTlvs.TYPE.value: CoreTlvDataUint16, + ConfigTlvs.DATA_TYPES.value: CoreTlvDataUint16List, + ConfigTlvs.VALUES.value: CoreTlvDataString, + ConfigTlvs.CAPTIONS.value: CoreTlvDataString, + ConfigTlvs.BITMAP.value: CoreTlvDataString, + ConfigTlvs.POSSIBLE_VALUES.value: CoreTlvDataString, + ConfigTlvs.GROUPS.value: CoreTlvDataString, + ConfigTlvs.SESSION.value: CoreTlvDataString, + ConfigTlvs.INTERFACE_NUMBER.value: CoreTlvDataUint16, + ConfigTlvs.NETWORK_ID.value: CoreTlvDataUint32, + ConfigTlvs.OPAQUE.value: CoreTlvDataString, + } + + +class CoreFileTlv(CoreTlv): + """ + Class for representing CORE file TLVs. + """ + + tlv_type_map = FileTlvs + tlv_data_class_map = { + FileTlvs.NODE.value: CoreTlvDataUint32, + FileTlvs.NAME.value: CoreTlvDataString, + FileTlvs.MODE.value: CoreTlvDataString, + FileTlvs.NUMBER.value: CoreTlvDataUint16, + FileTlvs.TYPE.value: CoreTlvDataString, + FileTlvs.SOURCE_NAME.value: CoreTlvDataString, + FileTlvs.SESSION.value: CoreTlvDataString, + FileTlvs.DATA.value: CoreTlvDataString, + FileTlvs.COMPRESSED_DATA.value: CoreTlvDataString, + } + + +class CoreInterfaceTlv(CoreTlv): + """ + Class for representing CORE interface TLVs. + """ + + tlv_type_map = InterfaceTlvs + tlv_data_class_map = { + InterfaceTlvs.NODE.value: CoreTlvDataUint32, + InterfaceTlvs.NUMBER.value: CoreTlvDataUint16, + InterfaceTlvs.NAME.value: CoreTlvDataString, + InterfaceTlvs.IP_ADDRESS.value: CoreTlvDataIpv4Addr, + InterfaceTlvs.MASK.value: CoreTlvDataUint16, + InterfaceTlvs.MAC_ADDRESS.value: CoreTlvDataMacAddr, + InterfaceTlvs.IP6_ADDRESS.value: CoreTlvDataIPv6Addr, + InterfaceTlvs.IP6_MASK.value: CoreTlvDataUint16, + InterfaceTlvs.TYPE.value: CoreTlvDataUint16, + InterfaceTlvs.SESSION.value: CoreTlvDataString, + InterfaceTlvs.STATE.value: CoreTlvDataUint16, + InterfaceTlvs.EMULATION_ID.value: CoreTlvDataUint32, + InterfaceTlvs.NETWORK_ID.value: CoreTlvDataUint32, + } + + +class CoreEventTlv(CoreTlv): + """ + Class for representing CORE event TLVs. + """ + + tlv_type_map = EventTlvs + tlv_data_class_map = { + EventTlvs.NODE.value: CoreTlvDataUint32, + EventTlvs.TYPE.value: CoreTlvDataUint32, + EventTlvs.NAME.value: CoreTlvDataString, + EventTlvs.DATA.value: CoreTlvDataString, + EventTlvs.TIME.value: CoreTlvDataString, + EventTlvs.SESSION.value: CoreTlvDataString, + } + + +class CoreSessionTlv(CoreTlv): + """ + Class for representing CORE session TLVs. + """ + + tlv_type_map = SessionTlvs + tlv_data_class_map = { + SessionTlvs.NUMBER.value: CoreTlvDataString, + SessionTlvs.NAME.value: CoreTlvDataString, + SessionTlvs.FILE.value: CoreTlvDataString, + SessionTlvs.NODE_COUNT.value: CoreTlvDataString, + SessionTlvs.DATE.value: CoreTlvDataString, + SessionTlvs.THUMB.value: CoreTlvDataString, + SessionTlvs.USER.value: CoreTlvDataString, + SessionTlvs.OPAQUE.value: CoreTlvDataString, + } + + +class CoreExceptionTlv(CoreTlv): + """ + Class for representing CORE exception TLVs. + """ + + tlv_type_map = ExceptionTlvs + tlv_data_class_map = { + ExceptionTlvs.NODE.value: CoreTlvDataUint32, + ExceptionTlvs.SESSION.value: CoreTlvDataString, + ExceptionTlvs.LEVEL.value: CoreTlvDataUint16, + ExceptionTlvs.SOURCE.value: CoreTlvDataString, + ExceptionTlvs.DATE.value: CoreTlvDataString, + ExceptionTlvs.TEXT.value: CoreTlvDataString, + ExceptionTlvs.OPAQUE.value: CoreTlvDataString, + } + + +class CoreMessage(object): + """ + Base class for representing CORE messages. + """ + + header_format = "!BBH" + header_len = struct.calcsize(header_format) + message_type = None + flag_map = MessageFlags + tlv_class = CoreTlv + + def __init__(self, flags, hdr, data): + self.raw_message = hdr + data + self.flags = flags + self.tlv_data = {} + self.parse_data(data) + + @classmethod + def unpack_header(cls, data): + """ + parse data and return (message_type, message_flags, message_len). + + :param str data: data to parse + :return: unpacked tuple + :rtype: tuple + """ + message_type, message_flags, message_len = struct.unpack(cls.header_format, data[:cls.header_len]) + return message_type, message_flags, message_len + + @classmethod + def create(cls, flags, values): + tlv_data = structutils.pack_values(cls.tlv_class, values) + packed = cls.pack(flags, tlv_data) + header_data = packed[:cls.header_len] + return cls(flags, header_data, tlv_data) + + @classmethod + def pack(cls, message_flags, tlv_data): + """ + Pack CORE message data. + + :param message_flags: message flags to pack with data + :param tlv_data: data to get length from for packing + :return: combined header and tlv data + """ + header = struct.pack(cls.header_format, cls.message_type, message_flags, len(tlv_data)) + return header + tlv_data + + def add_tlv_data(self, key, value): + """ + Add TLV data into the data map. + + :param int key: key to store TLV data + :param value: data to associate with key + :return: nothing + """ + if key in self.tlv_data: + raise KeyError("key already exists: %s (val=%s)" % (key, value)) + + self.tlv_data[key] = value + + def get_tlv(self, tlv_type): + """ + Retrieve TLV data from data map. + + :param int tlv_type: type of data to retrieve + :return: TLV type data + """ + return self.tlv_data.get(tlv_type) + + def parse_data(self, data): + """ + Parse data while possible and adding TLV data to the data map. + + :param data: data to parse for TLV data + :return: nothing + """ + while data: + tlv, data = self.tlv_class.unpack(data) + self.add_tlv_data(tlv.tlv_type, tlv.value) + + def pack_tlv_data(self): + """ + Opposite of parse_data(). Return packed TLV data using self.tlv_data dict. Used by repack(). + + :return: packed data + :rtype: str + """ + tlv_data = "" + keys = sorted(self.tlv_data.keys()) + + for key in keys: + value = self.tlv_data[key] + tlv_data += self.tlv_class.pack(key, value) + + return tlv_data + + def repack(self): + """ + Invoke after updating self.tlv_data[] to rebuild self.raw_message. + Useful for modifying a message that has been parsed, before + sending the raw data again. + + :return: nothing + """ + tlv_data = self.pack_tlv_data() + self.raw_message = self.pack(self.flags, tlv_data) + + def type_str(self): + """ + Retrieve data of the message type. + + :return: name of message type + :rtype: str + """ + try: + return MessageTypes(self.message_type).name + except ValueError: + return "unknown message type: %s" % str(self.message_type) + + def flag_str(self): + """ + Retrieve message flag string. + + :return: message flag string + :rtype: str + """ + message_flags = [] + flag = 1L + + while True: + if self.flags & flag: + try: + message_flags.append(self.flag_map(flag).name) + except ValueError: + message_flags.append("0x%x" % flag) + flag <<= 1 + if not (self.flags & ~(flag - 1)): + break + + return "0x%x <%s>" % (self.flags, " | ".join(message_flags)) + + def __str__(self): + """ + Retrieve string representation of the message. + + :return: string representation + :rtype: str + """ + result = "%s " % (self.__class__.__name__, self.type_str(), self.flag_str()) + + for key, value in self.tlv_data.iteritems(): + try: + tlv_type = self.tlv_class.tlv_type_map(key).name + except ValueError: + tlv_type = "tlv type %s" % key + + result += "\n %s: %s" % (tlv_type, value) + + return result + + def node_numbers(self): + """ + Return a list of node numbers included in this message. + """ + number1 = None + number2 = None + + # not all messages have node numbers + if self.message_type == MessageTypes.NODE.value: + number1 = self.get_tlv(NodeTlvs.NUMBER.value) + elif self.message_type == MessageTypes.LINK.value: + number1 = self.get_tlv(LinkTlvs.N1_NUMBER.value) + number2 = self.get_tlv(LinkTlvs.N2_NUMBER.value) + elif self.message_type == MessageTypes.EXECUTE.value: + number1 = self.get_tlv(ExecuteTlvs.NODE.value) + elif self.message_type == MessageTypes.CONFIG.value: + number1 = self.get_tlv(ConfigTlvs.NODE.value) + elif self.message_type == MessageTypes.FILE.value: + number1 = self.get_tlv(FileTlvs.NODE.value) + elif self.message_type == MessageTypes.INTERFACE.value: + number1 = self.get_tlv(InterfaceTlvs.NODE.value) + elif self.message_type == MessageTypes.EVENT.value: + number1 = self.get_tlv(EventTlvs.NODE.value) + + result = [] + + if number1: + result.append(number1) + + if number2: + result.append(number2) + + return result + + def session_numbers(self): + """ + Return a list of session numbers included in this message. + """ + result = [] + + if self.message_type == MessageTypes.SESSION.value: + sessions = self.get_tlv(SessionTlvs.NUMBER.value) + elif self.message_type == MessageTypes.EXCEPTION.value: + sessions = self.get_tlv(ExceptionTlvs.SESSION.value) + else: + # All other messages share TLV number 0xA for the session number(s). + sessions = self.get_tlv(NodeTlvs.SESSION.value) + + if sessions: + for session_id in sessions.split("|"): + result.append(int(session_id)) + + return result + + +class CoreNodeMessage(CoreMessage): + """ + CORE node message class. + """ + message_type = MessageTypes.NODE.value + tlv_class = CoreNodeTlv + + +class CoreLinkMessage(CoreMessage): + """ + CORE link message class. + """ + message_type = MessageTypes.LINK.value + tlv_class = CoreLinkTlv + + +class CoreExecMessage(CoreMessage): + """ + CORE execute message class. + """ + message_type = MessageTypes.EXECUTE.value + tlv_class = CoreExecuteTlv + + +class CoreRegMessage(CoreMessage): + """ + CORE register message class. + """ + message_type = MessageTypes.REGISTER.value + tlv_class = CoreRegisterTlv + + +class CoreConfMessage(CoreMessage): + """ + CORE configuration message class. + """ + message_type = MessageTypes.CONFIG.value + tlv_class = CoreConfigTlv + + +class CoreFileMessage(CoreMessage): + """ + CORE file message class. + """ + message_type = MessageTypes.FILE.value + tlv_class = CoreFileTlv + + +class CoreIfaceMessage(CoreMessage): + """ + CORE interface message class. + """ + message_type = MessageTypes.INTERFACE.value + tlv_class = CoreInterfaceTlv + + +class CoreEventMessage(CoreMessage): + """ + CORE event message class. + """ + message_type = MessageTypes.EVENT.value + tlv_class = CoreEventTlv + + +class CoreSessionMessage(CoreMessage): + """ + CORE session message class. + """ + message_type = MessageTypes.SESSION.value + tlv_class = CoreSessionTlv + + +class CoreExceptionMessage(CoreMessage): + """ + CORE exception message class. + """ + message_type = MessageTypes.EXCEPTION.value + tlv_class = CoreExceptionTlv + + +# map used to translate enumerated message type values to message class objects +CLASS_MAP = { + MessageTypes.NODE.value: CoreNodeMessage, + MessageTypes.LINK.value: CoreLinkMessage, + MessageTypes.EXECUTE.value: CoreExecMessage, + MessageTypes.REGISTER.value: CoreRegMessage, + MessageTypes.CONFIG.value: CoreConfMessage, + MessageTypes.FILE.value: CoreFileMessage, + MessageTypes.INTERFACE.value: CoreIfaceMessage, + MessageTypes.EVENT.value: CoreEventMessage, + MessageTypes.SESSION.value: CoreSessionMessage, + MessageTypes.EXCEPTION.value: CoreExceptionMessage, +} + + +def str_to_list(value): + """ + Helper to convert pipe-delimited string ("a|b|c") into a list (a, b, c). + + :param str value: string to convert + :return: converted list + :rtype: list + """ + + if value is None: + return None + + return value.split("|") + + +def state_name(value): + """ + Helper to convert state number into state name using event types. + + :param int value: state value to derive name from + :return: state name + :rtype: str + """ + + try: + value = EventTypes(value).name + except ValueError: + value = "unknown" + + return value diff --git a/daemon/core/api/dataconversion.py b/daemon/core/api/dataconversion.py new file mode 100644 index 00000000..5bc33fa9 --- /dev/null +++ b/daemon/core/api/dataconversion.py @@ -0,0 +1,65 @@ +""" +Converts CORE data objects into legacy API messages. +""" + +from core.api import coreapi +from core.enumerations import ConfigTlvs +from core.enumerations import NodeTlvs +from core.misc import structutils + + +def convert_node(node_data): + """ + Convenience method for converting NodeData to a packed TLV message. + + :param core.data.NodeData node_data: node data to convert + :return: packed node message + """ + tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [ + (NodeTlvs.NUMBER, node_data.id), + (NodeTlvs.TYPE, node_data.node_type), + (NodeTlvs.NAME, node_data.name), + (NodeTlvs.IP_ADDRESS, node_data.ip_address), + (NodeTlvs.MAC_ADDRESS, node_data.mac_address), + (NodeTlvs.IP6_ADDRESS, node_data.ip6_address), + (NodeTlvs.MODEL, node_data.model), + (NodeTlvs.EMULATION_ID, node_data.emulation_id), + (NodeTlvs.EMULATION_SERVER, node_data.emulation_server), + (NodeTlvs.SESSION, node_data.session), + (NodeTlvs.X_POSITION, node_data.x_position), + (NodeTlvs.Y_POSITION, node_data.y_position), + (NodeTlvs.CANVAS, node_data.canvas), + (NodeTlvs.NETWORK_ID, node_data.network_id), + (NodeTlvs.SERVICES, node_data.services), + (NodeTlvs.LATITUDE, node_data.latitude), + (NodeTlvs.LONGITUDE, node_data.longitude), + (NodeTlvs.ALTITUDE, node_data.altitude), + (NodeTlvs.ICON, node_data.icon), + (NodeTlvs.OPAQUE, node_data.opaque) + ]) + return coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data) + + +def convert_config(config_data): + """ + Convenience method for converting ConfigData to a packed TLV message. + + :param core.data.ConfigData config_data: config data to convert + :return: packed message + """ + tlv_data = structutils.pack_values(coreapi.CoreConfigTlv, [ + (ConfigTlvs.NODE, config_data.node), + (ConfigTlvs.OBJECT, config_data.object), + (ConfigTlvs.TYPE, config_data.type), + (ConfigTlvs.DATA_TYPES, config_data.data_types), + (ConfigTlvs.VALUES, config_data.data_values), + (ConfigTlvs.CAPTIONS, config_data.captions), + (ConfigTlvs.BITMAP, config_data.bitmap), + (ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values), + (ConfigTlvs.GROUPS, config_data.groups), + (ConfigTlvs.SESSION, config_data.session), + (ConfigTlvs.INTERFACE_NUMBER, config_data.interface_number), + (ConfigTlvs.NETWORK_ID, config_data.network_id), + (ConfigTlvs.OPAQUE, config_data.opaque), + ]) + return coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data) diff --git a/daemon/core/api/grpc/client.py b/daemon/core/api/grpc/client.py deleted file mode 100644 index 2a5a1d44..00000000 --- a/daemon/core/api/grpc/client.py +++ /dev/null @@ -1,1177 +0,0 @@ -""" -gRpc client for interfacing with CORE. -""" - -import logging -import threading -from collections.abc import Callable, Generator, Iterable -from contextlib import contextmanager -from pathlib import Path -from queue import Queue -from typing import Any, Optional - -import grpc - -from core.api.grpc import core_pb2, core_pb2_grpc, emane_pb2, wrappers -from core.api.grpc.configservices_pb2 import ( - GetConfigServiceDefaultsRequest, - GetConfigServiceRenderedRequest, - GetNodeConfigServiceRequest, -) -from core.api.grpc.core_pb2 import ( - ExecuteScriptRequest, - GetConfigRequest, - GetWirelessConfigRequest, - LinkedRequest, - WirelessConfigRequest, - WirelessLinkedRequest, -) -from core.api.grpc.emane_pb2 import ( - EmaneLinkRequest, - GetEmaneEventChannelRequest, - GetEmaneModelConfigRequest, - SetEmaneModelConfigRequest, -) -from core.api.grpc.mobility_pb2 import ( - GetMobilityConfigRequest, - MobilityActionRequest, - MobilityConfig, - SetMobilityConfigRequest, -) -from core.api.grpc.services_pb2 import ( - GetNodeServiceFileRequest, - GetNodeServiceRequest, - GetServiceDefaultsRequest, - ServiceActionRequest, - ServiceDefaults, - SetServiceDefaultsRequest, -) -from core.api.grpc.wlan_pb2 import ( - GetWlanConfigRequest, - SetWlanConfigRequest, - WlanConfig, - WlanLinkRequest, -) -from core.api.grpc.wrappers import LinkOptions -from core.emulator.data import IpPrefixes -from core.errors import CoreError -from core.utils import SetQueue - -logger = logging.getLogger(__name__) - - -class MoveNodesStreamer: - def __init__(self, session_id: int, source: str = None) -> None: - self.session_id: int = session_id - self.source: Optional[str] = source - self.queue: SetQueue = SetQueue() - - def send_position(self, node_id: int, x: float, y: float) -> None: - position = wrappers.Position(x=x, y=y) - request = wrappers.MoveNodesRequest( - session_id=self.session_id, - node_id=node_id, - source=self.source, - position=position, - ) - self.send(request) - - def send_geo(self, node_id: int, lon: float, lat: float, alt: float) -> None: - geo = wrappers.Geo(lon=lon, lat=lat, alt=alt) - request = wrappers.MoveNodesRequest( - session_id=self.session_id, node_id=node_id, source=self.source, geo=geo - ) - self.send(request) - - def send(self, request: wrappers.MoveNodesRequest) -> None: - self.queue.put(request) - - def stop(self) -> None: - self.queue.put(None) - - def next(self) -> Optional[core_pb2.MoveNodesRequest]: - request: Optional[wrappers.MoveNodesRequest] = self.queue.get() - if request: - return request.to_proto() - else: - return request - - def iter(self) -> Iterable: - return iter(self.next, None) - - -class EmanePathlossesStreamer: - def __init__(self) -> None: - self.queue: Queue = Queue() - - def send(self, request: Optional[wrappers.EmanePathlossesRequest]) -> None: - self.queue.put(request) - - def next(self) -> Optional[emane_pb2.EmanePathlossesRequest]: - request: Optional[wrappers.EmanePathlossesRequest] = self.queue.get() - if request: - return request.to_proto() - else: - return request - - def iter(self): - return iter(self.next, None) - - -class InterfaceHelper: - """ - Convenience class to help generate IP4 and IP6 addresses for gRPC clients. - """ - - def __init__(self, ip4_prefix: str = None, ip6_prefix: str = None) -> None: - """ - Creates an InterfaceHelper object. - - :param ip4_prefix: ip4 prefix to use for generation - :param ip6_prefix: ip6 prefix to use for generation - :raises ValueError: when both ip4 and ip6 prefixes have not been provided - """ - self.prefixes: IpPrefixes = IpPrefixes(ip4_prefix, ip6_prefix) - - def create_iface( - self, node_id: int, iface_id: int, name: str = None, mac: str = None - ) -> wrappers.Interface: - """ - Create an interface protobuf object. - - :param node_id: node id to create interface for - :param iface_id: interface id - :param name: name of interface - :param mac: mac address for interface - :return: interface protobuf - """ - iface_data = self.prefixes.gen_iface(node_id, name, mac) - return wrappers.Interface( - id=iface_id, - name=iface_data.name, - ip4=iface_data.ip4, - ip4_mask=iface_data.ip4_mask, - ip6=iface_data.ip6, - ip6_mask=iface_data.ip6_mask, - mac=iface_data.mac, - ) - - -def throughput_listener( - stream: Any, handler: Callable[[wrappers.ThroughputsEvent], None] -) -> None: - """ - Listen for throughput events and provide them to the handler. - - :param stream: grpc stream that will provide events - :param handler: function that handles an event - :return: nothing - """ - try: - for event_proto in stream: - event = wrappers.ThroughputsEvent.from_proto(event_proto) - handler(event) - except grpc.RpcError as e: - if e.code() == grpc.StatusCode.CANCELLED: - logger.debug("throughput stream closed") - else: - logger.exception("throughput stream error") - - -def cpu_listener( - stream: Any, handler: Callable[[wrappers.CpuUsageEvent], None] -) -> None: - """ - Listen for cpu events and provide them to the handler. - - :param stream: grpc stream that will provide events - :param handler: function that handles an event - :return: nothing - """ - try: - for event_proto in stream: - event = wrappers.CpuUsageEvent.from_proto(event_proto) - handler(event) - except grpc.RpcError as e: - if e.code() == grpc.StatusCode.CANCELLED: - logger.debug("cpu stream closed") - else: - logger.exception("cpu stream error") - - -def event_listener(stream: Any, handler: Callable[[wrappers.Event], None]) -> None: - """ - Listen for session events and provide them to the handler. - - :param stream: grpc stream that will provide events - :param handler: function that handles an event - :return: nothing - """ - try: - for event_proto in stream: - event = wrappers.Event.from_proto(event_proto) - handler(event) - except grpc.RpcError as e: - if e.code() == grpc.StatusCode.CANCELLED: - logger.debug("session stream closed") - else: - logger.exception("session stream error") - - -class CoreGrpcClient: - """ - Provides convenience methods for interfacing with the CORE grpc server. - """ - - def __init__(self, address: str = "localhost:50051", proxy: bool = False) -> None: - """ - Creates a CoreGrpcClient instance. - - :param address: grpc server address to connect to - """ - self.address: str = address - self.stub: Optional[core_pb2_grpc.CoreApiStub] = None - self.channel: Optional[grpc.Channel] = None - self.proxy: bool = proxy - - def start_session( - self, session: wrappers.Session, definition: bool = False - ) -> tuple[bool, list[str]]: - """ - Start a session. - - :param session: session to start - :param definition: True to only define session data, False to start session - :return: tuple of result and exception strings - """ - request = core_pb2.StartSessionRequest( - session=session.to_proto(), definition=definition - ) - response = self.stub.StartSession(request) - return response.result, list(response.exceptions) - - def stop_session(self, session_id: int) -> bool: - """ - Stop a running session. - - :param session_id: id of session - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.StopSessionRequest(session_id=session_id) - response = self.stub.StopSession(request) - return response.result - - def create_session(self, session_id: int = None) -> wrappers.Session: - """ - Create a session. - - :param session_id: id for session, default is None and one will be created - for you - :return: session id - """ - request = core_pb2.CreateSessionRequest(session_id=session_id) - response = self.stub.CreateSession(request) - return wrappers.Session.from_proto(response.session) - - def delete_session(self, session_id: int) -> bool: - """ - Delete a session. - - :param session_id: id of session - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.DeleteSessionRequest(session_id=session_id) - response = self.stub.DeleteSession(request) - return response.result - - def get_sessions(self) -> list[wrappers.SessionSummary]: - """ - Retrieves all currently known sessions. - - :return: response with a list of currently known session, their state and - number of nodes - """ - response = self.stub.GetSessions(core_pb2.GetSessionsRequest()) - sessions = [] - for session_proto in response.sessions: - session = wrappers.SessionSummary.from_proto(session_proto) - sessions.append(session) - return sessions - - def check_session(self, session_id: int) -> bool: - """ - Check if a session exists. - - :param session_id: id of session to check for - :return: True if exists, False otherwise - """ - request = core_pb2.CheckSessionRequest(session_id=session_id) - response = self.stub.CheckSession(request) - return response.result - - def get_session(self, session_id: int) -> wrappers.Session: - """ - Retrieve a session. - - :param session_id: id of session - :return: session - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.GetSessionRequest(session_id=session_id) - response = self.stub.GetSession(request) - return wrappers.Session.from_proto(response.session) - - def alert( - self, - session_id: int, - level: wrappers.ExceptionLevel, - source: str, - text: str, - node_id: int = None, - ) -> bool: - """ - Initiate an alert to be broadcast out to all listeners. - - :param session_id: id of session - :param level: alert level - :param source: source of alert - :param text: alert text - :param node_id: node associated with alert - :return: True for success, False otherwise - """ - request = core_pb2.SessionAlertRequest( - session_id=session_id, - level=level.value, - source=source, - text=text, - node_id=node_id, - ) - response = self.stub.SessionAlert(request) - return response.result - - def events( - self, - session_id: int, - handler: Callable[[wrappers.Event], None], - events: list[wrappers.EventType] = None, - ) -> grpc.Future: - """ - Listen for session events. - - :param session_id: id of session - :param handler: handler for received events - :param events: events to listen to, defaults to all - :return: stream processing events, can be used to cancel stream - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.EventsRequest(session_id=session_id, events=events) - stream = self.stub.Events(request) - thread = threading.Thread( - target=event_listener, args=(stream, handler), daemon=True - ) - thread.start() - return stream - - def throughputs( - self, session_id: int, handler: Callable[[wrappers.ThroughputsEvent], None] - ) -> grpc.Future: - """ - Listen for throughput events with information for interfaces and bridges. - - :param session_id: session id - :param handler: handler for every event - :return: stream processing events, can be used to cancel stream - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.ThroughputsRequest(session_id=session_id) - stream = self.stub.Throughputs(request) - thread = threading.Thread( - target=throughput_listener, args=(stream, handler), daemon=True - ) - thread.start() - return stream - - def cpu_usage( - self, delay: int, handler: Callable[[wrappers.CpuUsageEvent], None] - ) -> grpc.Future: - """ - Listen for cpu usage events with the given repeat delay. - - :param delay: delay between receiving events - :param handler: handler for every event - :return: stream processing events, can be used to cancel stream - """ - request = core_pb2.CpuUsageRequest(delay=delay) - stream = self.stub.CpuUsage(request) - thread = threading.Thread( - target=cpu_listener, args=(stream, handler), daemon=True - ) - thread.start() - return stream - - def add_node(self, session_id: int, node: wrappers.Node, source: str = None) -> int: - """ - Add node to session. - - :param session_id: session id - :param node: node to add - :param source: source application - :return: id of added node - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.AddNodeRequest( - session_id=session_id, node=node.to_proto(), source=source - ) - response = self.stub.AddNode(request) - return response.node_id - - def get_node( - self, session_id: int, node_id: int - ) -> tuple[wrappers.Node, list[wrappers.Interface], list[wrappers.Link]]: - """ - Get node details. - - :param session_id: session id - :param node_id: node id - :return: tuple of node and its interfaces - :raises grpc.RpcError: when session or node doesn't exist - """ - request = core_pb2.GetNodeRequest(session_id=session_id, node_id=node_id) - response = self.stub.GetNode(request) - node = wrappers.Node.from_proto(response.node) - ifaces = [] - for iface_proto in response.ifaces: - iface = wrappers.Interface.from_proto(iface_proto) - ifaces.append(iface) - links = [] - for link_proto in response.links: - link = wrappers.Link.from_proto(link_proto) - links.append(link) - return node, ifaces, links - - def edit_node( - self, session_id: int, node_id: int, icon: str = None, source: str = None - ) -> bool: - """ - Edit a node's icon and/or location, can only use position(x,y) or - geo(lon, lat, alt), not both. - - :param session_id: session id - :param node_id: node id - :param icon: path to icon for gui to use for node - :param source: application source - :return: True for success, False otherwise - :raises grpc.RpcError: when session or node doesn't exist - """ - request = core_pb2.EditNodeRequest( - session_id=session_id, node_id=node_id, icon=icon, source=source - ) - response = self.stub.EditNode(request) - return response.result - - def move_node( - self, - session_id: int, - node_id: int, - position: wrappers.Position = None, - geo: wrappers.Geo = None, - source: str = None, - ) -> bool: - """ - Move node using provided position or geo location. - - :param session_id: session id - :param node_id: node id - :param position: x,y position to move to - :param geo: geospatial position to move to - :param source: source generating motion - :return: nothing - :raises grpc.RpcError: when session or nodes do not exist - """ - if not position and not geo: - raise CoreError("must provide position or geo to move node") - position = position.to_proto() if position else None - geo = geo.to_proto() if geo else None - request = core_pb2.MoveNodeRequest( - session_id=session_id, - node_id=node_id, - position=position, - geo=geo, - source=source, - ) - response = self.stub.MoveNode(request) - return response.result - - def move_nodes(self, streamer: MoveNodesStreamer) -> None: - """ - Stream node movements using the provided iterator. - - :param streamer: move nodes streamer - :return: nothing - :raises grpc.RpcError: when session or nodes do not exist - """ - self.stub.MoveNodes(streamer.iter()) - - def delete_node(self, session_id: int, node_id: int, source: str = None) -> bool: - """ - Delete node from session. - - :param session_id: session id - :param node_id: node id - :param source: application source - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.DeleteNodeRequest( - session_id=session_id, node_id=node_id, source=source - ) - response = self.stub.DeleteNode(request) - return response.result - - def node_command( - self, - session_id: int, - node_id: int, - command: str, - wait: bool = True, - shell: bool = False, - ) -> tuple[int, str]: - """ - Send command to a node and get the output. - - :param session_id: session id - :param node_id: node id - :param command: command to run on node - :param wait: wait for command to complete - :param shell: send shell command - :return: returns tuple of return code and output - :raises grpc.RpcError: when session or node doesn't exist - """ - request = core_pb2.NodeCommandRequest( - session_id=session_id, - node_id=node_id, - command=command, - wait=wait, - shell=shell, - ) - response = self.stub.NodeCommand(request) - return response.return_code, response.output - - def get_node_terminal(self, session_id: int, node_id: int) -> str: - """ - Retrieve terminal command string for launching a local terminal. - - :param session_id: session id - :param node_id: node id - :return: node terminal - :raises grpc.RpcError: when session or node doesn't exist - """ - request = core_pb2.GetNodeTerminalRequest( - session_id=session_id, node_id=node_id - ) - response = self.stub.GetNodeTerminal(request) - return response.terminal - - def add_link( - self, session_id: int, link: wrappers.Link, source: str = None - ) -> tuple[bool, wrappers.Interface, wrappers.Interface]: - """ - Add a link between nodes. - - :param session_id: session id - :param link: link to add - :param source: application source - :return: tuple of result and finalized interface values - :raises grpc.RpcError: when session or one of the nodes don't exist - """ - request = core_pb2.AddLinkRequest( - session_id=session_id, link=link.to_proto(), source=source - ) - response = self.stub.AddLink(request) - iface1 = wrappers.Interface.from_proto(response.iface1) - iface2 = wrappers.Interface.from_proto(response.iface2) - return response.result, iface1, iface2 - - def edit_link( - self, session_id: int, link: wrappers.Link, source: str = None - ) -> bool: - """ - Edit a link between nodes. - - :param session_id: session id - :param link: link to edit - :param source: application source - :return: response with result of success or failure - :raises grpc.RpcError: when session or one of the nodes don't exist - """ - iface1_id = link.iface1.id if link.iface1 else None - iface2_id = link.iface2.id if link.iface2 else None - request = core_pb2.EditLinkRequest( - session_id=session_id, - node1_id=link.node1_id, - node2_id=link.node2_id, - options=link.options.to_proto(), - iface1_id=iface1_id, - iface2_id=iface2_id, - source=source, - ) - response = self.stub.EditLink(request) - return response.result - - def delete_link( - self, session_id: int, link: wrappers.Link, source: str = None - ) -> bool: - """ - Delete a link between nodes. - - :param session_id: session id - :param link: link to delete - :param source: application source - :return: response with result of success or failure - :raises grpc.RpcError: when session doesn't exist - """ - iface1_id = link.iface1.id if link.iface1 else None - iface2_id = link.iface2.id if link.iface2 else None - request = core_pb2.DeleteLinkRequest( - session_id=session_id, - node1_id=link.node1_id, - node2_id=link.node2_id, - iface1_id=iface1_id, - iface2_id=iface2_id, - source=source, - ) - response = self.stub.DeleteLink(request) - return response.result - - def get_mobility_config( - self, session_id: int, node_id: int - ) -> dict[str, wrappers.ConfigOption]: - """ - Get mobility configuration for a node. - - :param session_id: session id - :param node_id: node id - :return: dict of config name to options - :raises grpc.RpcError: when session or node doesn't exist - """ - request = GetMobilityConfigRequest(session_id=session_id, node_id=node_id) - response = self.stub.GetMobilityConfig(request) - return wrappers.ConfigOption.from_dict(response.config) - - def set_mobility_config( - self, session_id: int, node_id: int, config: dict[str, str] - ) -> bool: - """ - Set mobility configuration for a node. - - :param session_id: session id - :param node_id: node id - :param config: mobility configuration - :return: True for success, False otherwise - :raises grpc.RpcError: when session or node doesn't exist - """ - mobility_config = MobilityConfig(node_id=node_id, config=config) - request = SetMobilityConfigRequest( - session_id=session_id, mobility_config=mobility_config - ) - response = self.stub.SetMobilityConfig(request) - return response.result - - def mobility_action( - self, session_id: int, node_id: int, action: wrappers.MobilityAction - ) -> bool: - """ - Send a mobility action for a node. - - :param session_id: session id - :param node_id: node id - :param action: action to take - :return: True for success, False otherwise - :raises grpc.RpcError: when session or node doesn't exist - """ - request = MobilityActionRequest( - session_id=session_id, node_id=node_id, action=action.value - ) - response = self.stub.MobilityAction(request) - return response.result - - def get_config(self) -> wrappers.CoreConfig: - """ - Retrieve the current core configuration values. - - :return: core configuration - """ - request = GetConfigRequest() - response = self.stub.GetConfig(request) - return wrappers.CoreConfig.from_proto(response) - - def get_service_defaults(self, session_id: int) -> list[wrappers.ServiceDefault]: - """ - Get default services for different default node models. - - :param session_id: session id - :return: list of service defaults - :raises grpc.RpcError: when session doesn't exist - """ - request = GetServiceDefaultsRequest(session_id=session_id) - response = self.stub.GetServiceDefaults(request) - defaults = [] - for default_proto in response.defaults: - default = wrappers.ServiceDefault.from_proto(default_proto) - defaults.append(default) - return defaults - - def set_service_defaults( - self, session_id: int, service_defaults: dict[str, list[str]] - ) -> bool: - """ - Set default services for node models. - - :param session_id: session id - :param service_defaults: node models to lists of services - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - defaults = [] - for model in service_defaults: - services = service_defaults[model] - default = ServiceDefaults(model=model, services=services) - defaults.append(default) - request = SetServiceDefaultsRequest(session_id=session_id, defaults=defaults) - response = self.stub.SetServiceDefaults(request) - return response.result - - def get_node_service( - self, session_id: int, node_id: int, service: str - ) -> wrappers.NodeServiceData: - """ - Get service data for a node. - - :param session_id: session id - :param node_id: node id - :param service: service name - :return: node service data - :raises grpc.RpcError: when session or node doesn't exist - """ - request = GetNodeServiceRequest( - session_id=session_id, node_id=node_id, service=service - ) - response = self.stub.GetNodeService(request) - return wrappers.NodeServiceData.from_proto(response.service) - - def get_node_service_file( - self, session_id: int, node_id: int, service: str, file_name: str - ) -> str: - """ - Get a service file for a node. - - :param session_id: session id - :param node_id: node id - :param service: service name - :param file_name: file name to get data for - :return: file data - :raises grpc.RpcError: when session or node doesn't exist - """ - request = GetNodeServiceFileRequest( - session_id=session_id, node_id=node_id, service=service, file=file_name - ) - response = self.stub.GetNodeServiceFile(request) - return response.data - - def service_action( - self, - session_id: int, - node_id: int, - service: str, - action: wrappers.ServiceAction, - ) -> bool: - """ - Send an action to a service for a node. - - :param session_id: session id - :param node_id: node id - :param service: service name - :param action: action for service (start, stop, restart, - validate) - :return: True for success, False otherwise - :raises grpc.RpcError: when session or node doesn't exist - """ - request = ServiceActionRequest( - session_id=session_id, node_id=node_id, service=service, action=action.value - ) - response = self.stub.ServiceAction(request) - return response.result - - def config_service_action( - self, - session_id: int, - node_id: int, - service: str, - action: wrappers.ServiceAction, - ) -> bool: - """ - Send an action to a config service for a node. - - :param session_id: session id - :param node_id: node id - :param service: config service name - :param action: action for service (start, stop, restart, - validate) - :return: True for success, False otherwise - :raises grpc.RpcError: when session or node doesn't exist - """ - request = ServiceActionRequest( - session_id=session_id, node_id=node_id, service=service, action=action.value - ) - response = self.stub.ConfigServiceAction(request) - return response.result - - def get_wlan_config( - self, session_id: int, node_id: int - ) -> dict[str, wrappers.ConfigOption]: - """ - Get wlan configuration for a node. - - :param session_id: session id - :param node_id: node id - :return: dict of names to options - :raises grpc.RpcError: when session doesn't exist - """ - request = GetWlanConfigRequest(session_id=session_id, node_id=node_id) - response = self.stub.GetWlanConfig(request) - return wrappers.ConfigOption.from_dict(response.config) - - def set_wlan_config( - self, session_id: int, node_id: int, config: dict[str, str] - ) -> bool: - """ - Set wlan configuration for a node. - - :param session_id: session id - :param node_id: node id - :param config: wlan configuration - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - wlan_config = WlanConfig(node_id=node_id, config=config) - request = SetWlanConfigRequest(session_id=session_id, wlan_config=wlan_config) - response = self.stub.SetWlanConfig(request) - return response.result - - def get_emane_model_config( - self, session_id: int, node_id: int, model: str, iface_id: int = -1 - ) -> dict[str, wrappers.ConfigOption]: - """ - Get emane model configuration for a node or a node's interface. - - :param session_id: session id - :param node_id: node id - :param model: emane model name - :param iface_id: node interface id - :return: dict of names to options - :raises grpc.RpcError: when session doesn't exist - """ - request = GetEmaneModelConfigRequest( - session_id=session_id, node_id=node_id, model=model, iface_id=iface_id - ) - response = self.stub.GetEmaneModelConfig(request) - return wrappers.ConfigOption.from_dict(response.config) - - def set_emane_model_config( - self, session_id: int, emane_model_config: wrappers.EmaneModelConfig - ) -> bool: - """ - Set emane model configuration for a node or a node's interface. - - :param session_id: session id - :param emane_model_config: emane model config to set - :return: True for success, False otherwise - :raises grpc.RpcError: when session doesn't exist - """ - request = SetEmaneModelConfigRequest( - session_id=session_id, emane_model_config=emane_model_config.to_proto() - ) - response = self.stub.SetEmaneModelConfig(request) - return response.result - - def save_xml(self, session_id: int, file_path: str) -> None: - """ - Save the current scenario to an XML file. - - :param session_id: session to save xml file for - :param file_path: local path to save scenario XML file to - :return: nothing - :raises grpc.RpcError: when session doesn't exist - """ - request = core_pb2.SaveXmlRequest(session_id=session_id) - response = self.stub.SaveXml(request) - with open(file_path, "w") as xml_file: - xml_file.write(response.data) - - def open_xml(self, file_path: Path, start: bool = False) -> tuple[bool, int]: - """ - Load a local scenario XML file to open as a new session. - - :param file_path: path of scenario XML file - :param start: tuple of result and session id when successful - :return: tuple of result and session id - """ - with file_path.open("r") as f: - data = f.read() - request = core_pb2.OpenXmlRequest(data=data, start=start, file=str(file_path)) - response = self.stub.OpenXml(request) - return response.result, response.session_id - - def emane_link(self, session_id: int, nem1: int, nem2: int, linked: bool) -> bool: - """ - Helps broadcast wireless link/unlink between EMANE nodes. - - :param session_id: session to emane link - :param nem1: first nem for emane link - :param nem2: second nem for emane link - :param linked: True to link, False to unlink - :return: True for success, False otherwise - :raises grpc.RpcError: when session or nodes related to nems do not exist - """ - request = EmaneLinkRequest( - session_id=session_id, nem1=nem1, nem2=nem2, linked=linked - ) - response = self.stub.EmaneLink(request) - return response.result - - def get_ifaces(self) -> list[str]: - """ - Retrieves a list of interfaces available on the host machine that are not - a part of a CORE session. - - :return: list of interfaces - """ - request = core_pb2.GetInterfacesRequest() - response = self.stub.GetInterfaces(request) - return list(response.ifaces) - - def get_config_service_defaults( - self, session_id: int, node_id: int, name: str - ) -> wrappers.ConfigServiceDefaults: - """ - Retrieves config service default values. - - :param session_id: session id to get node from - :param node_id: node id to get service data from - :param name: name of service to get defaults for - :return: config service defaults - """ - request = GetConfigServiceDefaultsRequest( - name=name, session_id=session_id, node_id=node_id - ) - response = self.stub.GetConfigServiceDefaults(request) - return wrappers.ConfigServiceDefaults.from_proto(response) - - def get_node_config_service( - self, session_id: int, node_id: int, name: str - ) -> dict[str, str]: - """ - Retrieves information for a specific config service on a node. - - :param session_id: session node belongs to - :param node_id: id of node to get service information from - :param name: name of service - :return: config dict of names to values - :raises grpc.RpcError: when session or node doesn't exist - """ - request = GetNodeConfigServiceRequest( - session_id=session_id, node_id=node_id, name=name - ) - response = self.stub.GetNodeConfigService(request) - return dict(response.config) - - def get_config_service_rendered( - self, session_id: int, node_id: int, name: str - ) -> dict[str, str]: - """ - Retrieve the rendered config service files for a node. - - :param session_id: id of session - :param node_id: id of node - :param name: name of service - :return: dict mapping names of files to rendered data - """ - request = GetConfigServiceRenderedRequest( - session_id=session_id, node_id=node_id, name=name - ) - response = self.stub.GetConfigServiceRendered(request) - return dict(response.rendered) - - def get_emane_event_channel( - self, session_id: int, nem_id: int - ) -> wrappers.EmaneEventChannel: - """ - Retrieves the current emane event channel being used for a session. - - :param session_id: session to get emane event channel for - :param nem_id: nem id for the desired event channel - :return: emane event channel - :raises grpc.RpcError: when session doesn't exist - """ - request = GetEmaneEventChannelRequest(session_id=session_id, nem_id=nem_id) - response = self.stub.GetEmaneEventChannel(request) - return wrappers.EmaneEventChannel.from_proto(response) - - def execute_script(self, script: str, args: str) -> Optional[int]: - """ - Executes a python script given context of the current CoreEmu object. - - :param script: script to execute - :param args: arguments to provide to script - :return: create session id for script executed - """ - request = ExecuteScriptRequest(script=script, args=args) - response = self.stub.ExecuteScript(request) - return response.session_id if response.session_id else None - - def wlan_link( - self, session_id: int, wlan_id: int, node1_id: int, node2_id: int, linked: bool - ) -> bool: - """ - Links/unlinks nodes on the same WLAN. - - :param session_id: session id containing wlan and nodes - :param wlan_id: wlan nodes must belong to - :param node1_id: first node of pair to link/unlink - :param node2_id: second node of pair to link/unlin - :param linked: True to link, False to unlink - :return: True for success, False otherwise - :raises grpc.RpcError: when session or one of the nodes do not exist - """ - request = WlanLinkRequest( - session_id=session_id, - wlan=wlan_id, - node1_id=node1_id, - node2_id=node2_id, - linked=linked, - ) - response = self.stub.WlanLink(request) - return response.result - - def emane_pathlosses(self, streamer: EmanePathlossesStreamer) -> None: - """ - Stream EMANE pathloss events. - - :param streamer: emane pathlosses streamer - :return: nothing - :raises grpc.RpcError: when a pathloss event session or one of the nodes do not - exist - """ - self.stub.EmanePathlosses(streamer.iter()) - - def linked( - self, - session_id: int, - node1_id: int, - node2_id: int, - iface1_id: int, - iface2_id: int, - linked: bool, - ) -> None: - """ - Link or unlink an existing core wired link. - - :param session_id: session containing the link - :param node1_id: first node in link - :param node2_id: second node in link - :param iface1_id: node1 interface - :param iface2_id: node2 interface - :param linked: True to connect link, False to disconnect - :return: nothing - """ - request = LinkedRequest( - session_id=session_id, - node1_id=node1_id, - node2_id=node2_id, - iface1_id=iface1_id, - iface2_id=iface2_id, - linked=linked, - ) - self.stub.Linked(request) - - def wireless_linked( - self, - session_id: int, - wireless_id: int, - node1_id: int, - node2_id: int, - linked: bool, - ) -> None: - request = WirelessLinkedRequest( - session_id=session_id, - wireless_id=wireless_id, - node1_id=node1_id, - node2_id=node2_id, - linked=linked, - ) - self.stub.WirelessLinked(request) - - def wireless_config( - self, - session_id: int, - wireless_id: int, - node1_id: int, - node2_id: int, - options1: LinkOptions, - options2: LinkOptions = None, - ) -> None: - if options2 is None: - options2 = options1 - request = WirelessConfigRequest( - session_id=session_id, - wireless_id=wireless_id, - node1_id=node1_id, - node2_id=node2_id, - options1=options1.to_proto(), - options2=options2.to_proto(), - ) - self.stub.WirelessConfig(request) - - def get_wireless_config( - self, session_id: int, node_id: int - ) -> dict[str, wrappers.ConfigOption]: - request = GetWirelessConfigRequest(session_id=session_id, node_id=node_id) - response = self.stub.GetWirelessConfig(request) - return wrappers.ConfigOption.from_dict(response.config) - - def connect(self) -> None: - """ - Open connection to server, must be closed manually. - - :return: nothing - """ - self.channel = grpc.insecure_channel( - self.address, options=[("grpc.enable_http_proxy", self.proxy)] - ) - self.stub = core_pb2_grpc.CoreApiStub(self.channel) - - def close(self) -> None: - """ - Close currently opened server channel connection. - - :return: nothing - """ - if self.channel: - self.channel.close() - self.channel = None - - @contextmanager - def context_connect(self) -> Generator[None, None, None]: - """ - Makes a context manager based connection to the server, will close after - context ends. - - :return: nothing - """ - try: - self.connect() - yield - finally: - self.close() diff --git a/daemon/core/api/grpc/events.py b/daemon/core/api/grpc/events.py deleted file mode 100644 index 65a20296..00000000 --- a/daemon/core/api/grpc/events.py +++ /dev/null @@ -1,219 +0,0 @@ -import logging -from collections.abc import Iterable -from queue import Empty, Queue -from typing import Optional - -from core.api.grpc import core_pb2, grpcutils -from core.api.grpc.grpcutils import convert_link_data -from core.emulator.data import ( - ConfigData, - EventData, - ExceptionData, - FileData, - LinkData, - NodeData, -) -from core.emulator.session import Session - -logger = logging.getLogger(__name__) - - -def handle_node_event(session: Session, node_data: NodeData) -> core_pb2.Event: - """ - Handle node event when there is a node event - - :param session: session node is from - :param node_data: node data - :return: node event that contains node id, name, model, position, and services - """ - node = node_data.node - emane_configs = grpcutils.get_emane_model_configs_dict(session) - node_emane_configs = emane_configs.get(node.id, []) - node_proto = grpcutils.get_node_proto(session, node, node_emane_configs) - message_type = node_data.message_type.value - node_event = core_pb2.NodeEvent(message_type=message_type, node=node_proto) - return core_pb2.Event(node_event=node_event, source=node_data.source) - - -def handle_link_event(link_data: LinkData) -> core_pb2.Event: - """ - Handle link event when there is a link event - - :param link_data: link data - :return: link event that has message type and link information - """ - link = convert_link_data(link_data) - message_type = link_data.message_type.value - link_event = core_pb2.LinkEvent(message_type=message_type, link=link) - return core_pb2.Event(link_event=link_event, source=link_data.source) - - -def handle_session_event(event_data: EventData) -> core_pb2.Event: - """ - Handle session event when there is a session event - - :param event_data: event data - :return: session event - """ - event_time = event_data.time - if event_time is not None: - event_time = float(event_time) - session_event = core_pb2.SessionEvent( - node_id=event_data.node, - event=event_data.event_type.value, - name=event_data.name, - data=event_data.data, - time=event_time, - ) - return core_pb2.Event(session_event=session_event) - - -def handle_config_event(config_data: ConfigData) -> core_pb2.Event: - """ - Handle configuration event when there is configuration event - - :param config_data: configuration data - :return: configuration event - """ - config_event = core_pb2.ConfigEvent( - message_type=config_data.message_type, - node_id=config_data.node, - object=config_data.object, - type=config_data.type, - captions=config_data.captions, - bitmap=config_data.bitmap, - data_values=config_data.data_values, - possible_values=config_data.possible_values, - groups=config_data.groups, - iface_id=config_data.iface_id, - network_id=config_data.network_id, - opaque=config_data.opaque, - data_types=config_data.data_types, - ) - return core_pb2.Event(config_event=config_event) - - -def handle_exception_event(exception_data: ExceptionData) -> core_pb2.Event: - """ - Handle exception event when there is exception event - - :param exception_data: exception data - :return: exception event - """ - exception_event = core_pb2.ExceptionEvent( - node_id=exception_data.node, - level=exception_data.level.value, - source=exception_data.source, - date=exception_data.date, - text=exception_data.text, - opaque=exception_data.opaque, - ) - return core_pb2.Event(exception_event=exception_event) - - -def handle_file_event(file_data: FileData) -> core_pb2.Event: - """ - Handle file event - - :param file_data: file data - :return: file event - """ - file_event = core_pb2.FileEvent( - message_type=file_data.message_type.value, - node_id=file_data.node, - name=file_data.name, - mode=file_data.mode, - number=file_data.number, - type=file_data.type, - source=file_data.source, - data=file_data.data, - compressed_data=file_data.compressed_data, - ) - return core_pb2.Event(file_event=file_event) - - -class EventStreamer: - """ - Processes session events to generate grpc events. - """ - - def __init__( - self, session: Session, event_types: Iterable[core_pb2.EventType] - ) -> None: - """ - Create a EventStreamer instance. - - :param session: session to process events for - :param event_types: types of events to process - """ - self.session: Session = session - self.event_types: Iterable[core_pb2.EventType] = event_types - self.queue: Queue = Queue() - self.add_handlers() - - def add_handlers(self) -> None: - """ - Add a session event handler for desired event types. - - :return: nothing - """ - if core_pb2.EventType.NODE in self.event_types: - self.session.node_handlers.append(self.queue.put) - if core_pb2.EventType.LINK in self.event_types: - self.session.link_handlers.append(self.queue.put) - if core_pb2.EventType.CONFIG in self.event_types: - self.session.config_handlers.append(self.queue.put) - if core_pb2.EventType.FILE in self.event_types: - self.session.file_handlers.append(self.queue.put) - if core_pb2.EventType.EXCEPTION in self.event_types: - self.session.exception_handlers.append(self.queue.put) - if core_pb2.EventType.SESSION in self.event_types: - self.session.event_handlers.append(self.queue.put) - - def process(self) -> Optional[core_pb2.Event]: - """ - Process the next event in the queue. - - :return: grpc event, or None when invalid event or queue timeout - """ - event = None - try: - data = self.queue.get(timeout=1) - if isinstance(data, NodeData): - event = handle_node_event(self.session, data) - elif isinstance(data, LinkData): - event = handle_link_event(data) - elif isinstance(data, EventData): - event = handle_session_event(data) - elif isinstance(data, ConfigData): - event = handle_config_event(data) - elif isinstance(data, ExceptionData): - event = handle_exception_event(data) - elif isinstance(data, FileData): - event = handle_file_event(data) - else: - logger.error("unknown event: %s", data) - except Empty: - pass - if event: - event.session_id = self.session.id - return event - - def remove_handlers(self) -> None: - """ - Remove session event handlers for events being watched. - - :return: nothing - """ - if core_pb2.EventType.NODE in self.event_types: - self.session.node_handlers.remove(self.queue.put) - if core_pb2.EventType.LINK in self.event_types: - self.session.link_handlers.remove(self.queue.put) - if core_pb2.EventType.CONFIG in self.event_types: - self.session.config_handlers.remove(self.queue.put) - if core_pb2.EventType.FILE in self.event_types: - self.session.file_handlers.remove(self.queue.put) - if core_pb2.EventType.EXCEPTION in self.event_types: - self.session.exception_handlers.remove(self.queue.put) - if core_pb2.EventType.SESSION in self.event_types: - self.session.event_handlers.remove(self.queue.put) diff --git a/daemon/core/api/grpc/grpcutils.py b/daemon/core/api/grpc/grpcutils.py deleted file mode 100644 index f89144e4..00000000 --- a/daemon/core/api/grpc/grpcutils.py +++ /dev/null @@ -1,908 +0,0 @@ -import logging -import time -from pathlib import Path -from typing import Any, Optional, Union - -import grpc -from grpc import ServicerContext - -from core import utils -from core.api.grpc import common_pb2, core_pb2, wrappers -from core.api.grpc.configservices_pb2 import ConfigServiceConfig -from core.api.grpc.emane_pb2 import NodeEmaneConfig -from core.api.grpc.services_pb2 import ( - NodeServiceConfig, - NodeServiceData, - ServiceConfig, - ServiceDefaults, -) -from core.config import ConfigurableOptions -from core.emane.nodes import EmaneNet, EmaneOptions -from core.emulator.data import InterfaceData, LinkData, LinkOptions -from core.emulator.enumerations import LinkTypes, NodeTypes -from core.emulator.links import CoreLink -from core.emulator.session import Session -from core.errors import CoreError -from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility -from core.nodes.base import ( - CoreNode, - CoreNodeBase, - CoreNodeOptions, - NodeBase, - NodeOptions, - Position, -) -from core.nodes.docker import DockerNode, DockerOptions -from core.nodes.interface import CoreInterface -from core.nodes.lxd import LxcNode, LxcOptions -from core.nodes.network import CoreNetwork, CtrlNet, PtpNet, WlanNode -from core.nodes.podman import PodmanNode, PodmanOptions -from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService - -logger = logging.getLogger(__name__) -WORKERS = 10 - - -class CpuUsage: - def __init__(self) -> None: - self.stat_file: Path = Path("/proc/stat") - self.prev_idle: int = 0 - self.prev_total: int = 0 - - def run(self) -> float: - lines = self.stat_file.read_text().splitlines()[0] - values = [int(x) for x in lines.split()[1:]] - idle = sum(values[3:5]) - non_idle = sum(values[:3] + values[5:8]) - total = idle + non_idle - total_diff = total - self.prev_total - idle_diff = idle - self.prev_idle - self.prev_idle = idle - self.prev_total = total - return (total_diff - idle_diff) / total_diff - - -def add_node_data( - _class: type[NodeBase], node_proto: core_pb2.Node -) -> tuple[Position, NodeOptions]: - """ - Convert node protobuf message to data for creating a node. - - :param _class: node class to create options from - :param node_proto: node proto message - :return: node type, id, and options - """ - options = _class.create_options() - options.icon = node_proto.icon - options.canvas = node_proto.canvas - if isinstance(options, CoreNodeOptions): - options.model = node_proto.model - options.services = node_proto.services - options.config_services = node_proto.config_services - if isinstance(options, EmaneOptions): - options.emane_model = node_proto.emane - if isinstance(options, (DockerOptions, LxcOptions, PodmanOptions)): - options.image = node_proto.image - position = Position() - position.set(node_proto.position.x, node_proto.position.y) - if node_proto.HasField("geo"): - geo = node_proto.geo - position.set_geo(geo.lon, geo.lat, geo.alt) - return position, options - - -def link_iface(iface_proto: core_pb2.Interface) -> InterfaceData: - """ - Create interface data from interface proto. - - :param iface_proto: interface proto - :return: interface data - """ - iface_data = None - if iface_proto: - name = iface_proto.name if iface_proto.name else None - mac = iface_proto.mac if iface_proto.mac else None - ip4 = iface_proto.ip4 if iface_proto.ip4 else None - ip6 = iface_proto.ip6 if iface_proto.ip6 else None - iface_data = InterfaceData( - id=iface_proto.id, - name=name, - mac=mac, - ip4=ip4, - ip4_mask=iface_proto.ip4_mask, - ip6=ip6, - ip6_mask=iface_proto.ip6_mask, - ) - return iface_data - - -def add_link_data( - link_proto: core_pb2.Link, -) -> tuple[InterfaceData, InterfaceData, LinkOptions]: - """ - Convert link proto to link interfaces and options data. - - :param link_proto: link proto - :return: link interfaces and options - """ - iface1_data = link_iface(link_proto.iface1) - iface2_data = link_iface(link_proto.iface2) - options = LinkOptions() - options_proto = link_proto.options - if options_proto: - options.delay = options_proto.delay - options.bandwidth = options_proto.bandwidth - options.loss = options_proto.loss - options.dup = options_proto.dup - options.jitter = options_proto.jitter - options.mer = options_proto.mer - options.burst = options_proto.burst - options.mburst = options_proto.mburst - options.buffer = options_proto.buffer - options.unidirectional = options_proto.unidirectional - options.key = options_proto.key - return iface1_data, iface2_data, options - - -def create_nodes( - session: Session, node_protos: list[core_pb2.Node] -) -> tuple[list[NodeBase], list[Exception]]: - """ - Create nodes using a thread pool and wait for completion. - - :param session: session to create nodes in - :param node_protos: node proto messages - :return: results and exceptions for created nodes - """ - funcs = [] - for node_proto in node_protos: - _type = NodeTypes(node_proto.type) - _class = session.get_node_class(_type) - position, options = add_node_data(_class, node_proto) - args = ( - _class, - node_proto.id or None, - node_proto.name or None, - node_proto.server or None, - position, - options, - ) - funcs.append((session.add_node, args, {})) - start = time.monotonic() - results, exceptions = utils.threadpool(funcs) - total = time.monotonic() - start - logger.debug("grpc created nodes time: %s", total) - return results, exceptions - - -def create_links( - session: Session, link_protos: list[core_pb2.Link] -) -> tuple[list[NodeBase], list[Exception]]: - """ - Create links using a thread pool and wait for completion. - - :param session: session to create nodes in - :param link_protos: link proto messages - :return: results and exceptions for created links - """ - funcs = [] - for link_proto in link_protos: - node1_id = link_proto.node1_id - node2_id = link_proto.node2_id - iface1, iface2, options = add_link_data(link_proto) - args = (node1_id, node2_id, iface1, iface2, options) - funcs.append((session.add_link, args, {})) - start = time.monotonic() - results, exceptions = utils.threadpool(funcs) - total = time.monotonic() - start - logger.debug("grpc created links time: %s", total) - return results, exceptions - - -def edit_links( - session: Session, link_protos: list[core_pb2.Link] -) -> tuple[list[None], list[Exception]]: - """ - Edit links using a thread pool and wait for completion. - - :param session: session to create nodes in - :param link_protos: link proto messages - :return: results and exceptions for created links - """ - funcs = [] - for link_proto in link_protos: - node1_id = link_proto.node1_id - node2_id = link_proto.node2_id - iface1, iface2, options = add_link_data(link_proto) - args = (node1_id, node2_id, iface1.id, iface2.id, options) - funcs.append((session.update_link, args, {})) - start = time.monotonic() - results, exceptions = utils.threadpool(funcs) - total = time.monotonic() - start - logger.debug("grpc edit links time: %s", total) - return results, exceptions - - -def convert_value(value: Any) -> str: - """ - Convert value into string. - - :param value: value - :return: string conversion of the value - """ - if value is not None: - value = str(value) - return value - - -def convert_session_options(session: Session) -> dict[str, common_pb2.ConfigOption]: - config_options = {} - for option in session.options.options: - value = session.options.get(option.id) - config_option = common_pb2.ConfigOption( - label=option.label, - name=option.id, - value=value, - type=option.type.value, - select=option.options, - group="Options", - ) - config_options[option.id] = config_option - return config_options - - -def get_config_options( - config: dict[str, str], - configurable_options: Union[ConfigurableOptions, type[ConfigurableOptions]], -) -> dict[str, common_pb2.ConfigOption]: - """ - Retrieve configuration options in a form that is used by the grpc server. - - :param config: configuration - :param configurable_options: configurable options - :return: mapping of configuration ids to configuration options - """ - results = {} - for configuration in configurable_options.configurations(): - value = config.get(configuration.id, configuration.default) - config_option = common_pb2.ConfigOption( - label=configuration.label, - name=configuration.id, - value=value, - type=configuration.type.value, - select=configuration.options, - ) - results[configuration.id] = config_option - for config_group in configurable_options.config_groups(): - start = config_group.start - 1 - stop = config_group.stop - options = list(results.values())[start:stop] - for option in options: - option.group = config_group.name - return results - - -def get_node_proto( - session: Session, node: NodeBase, emane_configs: list[NodeEmaneConfig] -) -> core_pb2.Node: - """ - Convert CORE node to protobuf representation. - - :param session: session containing node - :param node: node to convert - :param emane_configs: emane configs related to node - :return: node proto - """ - node_type = session.get_node_type(node.__class__) - position = core_pb2.Position( - x=node.position.x, y=node.position.y, z=node.position.z - ) - geo = core_pb2.Geo( - lat=node.position.lat, lon=node.position.lon, alt=node.position.alt - ) - services = [x.name for x in node.services] - node_dir = None - config_services = [] - if isinstance(node, CoreNodeBase): - node_dir = str(node.directory) - config_services = [x for x in node.config_services] - channel = None - if isinstance(node, CoreNode): - channel = str(node.ctrlchnlname) - emane_model = None - if isinstance(node, EmaneNet): - emane_model = node.wireless_model.name - image = None - if isinstance(node, (DockerNode, LxcNode, PodmanNode)): - image = node.image - # check for wlan config - wlan_config = session.mobility.get_configs( - node.id, config_type=BasicRangeModel.name - ) - if wlan_config: - wlan_config = get_config_options(wlan_config, BasicRangeModel) - # check for wireless config - wireless_config = None - if isinstance(node, WirelessNode): - configs = node.get_config() - wireless_config = {} - for config in configs.values(): - config_option = common_pb2.ConfigOption( - label=config.label, - name=config.id, - value=config.default, - type=config.type.value, - select=config.options, - group=config.group, - ) - wireless_config[config.id] = config_option - # check for mobility config - mobility_config = session.mobility.get_configs( - node.id, config_type=Ns2ScriptedMobility.name - ) - if mobility_config: - mobility_config = get_config_options(mobility_config, Ns2ScriptedMobility) - # check for service configs - custom_services = session.services.custom_services.get(node.id) - service_configs = {} - if custom_services: - for service in custom_services.values(): - service_proto = get_service_configuration(service) - service_configs[service.name] = NodeServiceConfig( - node_id=node.id, - service=service.name, - data=service_proto, - files=service.config_data, - ) - # check for config service configs - config_service_configs = {} - if isinstance(node, CoreNode): - for service in node.config_services.values(): - if not service.custom_templates and not service.custom_config: - continue - config_service_configs[service.name] = ConfigServiceConfig( - node_id=node.id, - name=service.name, - templates=service.custom_templates, - config=service.custom_config, - ) - return core_pb2.Node( - id=node.id, - name=node.name, - emane=emane_model, - model=node.model, - type=node_type.value, - position=position, - geo=geo, - services=services, - icon=node.icon, - image=image, - config_services=config_services, - dir=node_dir, - channel=channel, - canvas=node.canvas, - wlan_config=wlan_config, - wireless_config=wireless_config, - mobility_config=mobility_config, - service_configs=service_configs, - config_service_configs=config_service_configs, - emane_configs=emane_configs, - ) - - -def get_links(session: Session, node: NodeBase) -> list[core_pb2.Link]: - """ - Retrieve a list of links for grpc to use. - - :param session: session to get links for node - :param node: node to get links from - :return: protobuf links - """ - link_protos = [] - for core_link in session.link_manager.node_links(node): - link_protos.extend(convert_core_link(core_link)) - if isinstance(node, (WlanNode, EmaneNet)): - for link_data in node.links(): - link_protos.append(convert_link_data(link_data)) - return link_protos - - -def convert_iface(iface: CoreInterface) -> core_pb2.Interface: - """ - Convert interface to protobuf. - - :param iface: interface to convert - :return: protobuf interface - """ - if isinstance(iface.node, CoreNetwork): - return core_pb2.Interface(id=iface.id) - else: - ip4 = iface.get_ip4() - ip4_mask = ip4.prefixlen if ip4 else None - ip4 = str(ip4.ip) if ip4 else None - ip6 = iface.get_ip6() - ip6_mask = ip6.prefixlen if ip6 else None - ip6 = str(ip6.ip) if ip6 else None - mac = str(iface.mac) if iface.mac else None - return core_pb2.Interface( - id=iface.id, - name=iface.name, - mac=mac, - ip4=ip4, - ip4_mask=ip4_mask, - ip6=ip6, - ip6_mask=ip6_mask, - ) - - -def convert_core_link(core_link: CoreLink) -> list[core_pb2.Link]: - """ - Convert core link to protobuf data. - - :param core_link: core link to convert - :return: protobuf link data - """ - links = [] - node1, iface1 = core_link.node1, core_link.iface1 - node2, iface2 = core_link.node2, core_link.iface2 - unidirectional = core_link.is_unidirectional() - link = convert_link(node1, iface1, node2, iface2, iface1.options, unidirectional) - links.append(link) - if unidirectional: - link = convert_link( - node2, iface2, node1, iface1, iface2.options, unidirectional - ) - links.append(link) - return links - - -def convert_link_data(link_data: LinkData) -> core_pb2.Link: - """ - Convert link_data into core protobuf link. - :param link_data: link to convert - :return: core protobuf Link - """ - iface1 = None - if link_data.iface1 is not None: - iface1 = convert_iface_data(link_data.iface1) - iface2 = None - if link_data.iface2 is not None: - iface2 = convert_iface_data(link_data.iface2) - options = convert_link_options(link_data.options) - return core_pb2.Link( - type=link_data.type.value, - node1_id=link_data.node1_id, - node2_id=link_data.node2_id, - iface1=iface1, - iface2=iface2, - options=options, - network_id=link_data.network_id, - label=link_data.label, - color=link_data.color, - ) - - -def convert_iface_data(iface_data: InterfaceData) -> core_pb2.Interface: - """ - Convert interface data to protobuf. - - :param iface_data: interface data to convert - :return: interface protobuf - """ - return core_pb2.Interface( - id=iface_data.id, - name=iface_data.name, - mac=iface_data.mac, - ip4=iface_data.ip4, - ip4_mask=iface_data.ip4_mask, - ip6=iface_data.ip6, - ip6_mask=iface_data.ip6_mask, - ) - - -def convert_link_options(options: LinkOptions) -> core_pb2.LinkOptions: - """ - Convert link options to protobuf. - - :param options: link options to convert - :return: link options protobuf - """ - return core_pb2.LinkOptions( - jitter=options.jitter, - key=options.key, - mburst=options.mburst, - mer=options.mer, - loss=options.loss, - bandwidth=options.bandwidth, - burst=options.burst, - delay=options.delay, - dup=options.dup, - buffer=options.buffer, - unidirectional=options.unidirectional, - ) - - -def convert_options_proto(options: core_pb2.LinkOptions) -> LinkOptions: - return LinkOptions( - delay=options.delay, - bandwidth=options.bandwidth, - loss=options.loss, - dup=options.dup, - jitter=options.jitter, - mer=options.mer, - burst=options.burst, - mburst=options.mburst, - buffer=options.buffer, - unidirectional=options.unidirectional, - key=options.key, - ) - - -def convert_link( - node1: NodeBase, - iface1: Optional[CoreInterface], - node2: NodeBase, - iface2: Optional[CoreInterface], - options: LinkOptions, - unidirectional: bool, -) -> core_pb2.Link: - """ - Convert link objects to link protobuf. - - :param node1: first node in link - :param iface1: node1 interface - :param node2: second node in link - :param iface2: node2 interface - :param options: link options - :param unidirectional: if this link is considered unidirectional - :return: protobuf link - """ - if iface1 is not None: - iface1 = convert_iface(iface1) - if iface2 is not None: - iface2 = convert_iface(iface2) - is_node1_wireless = isinstance(node1, (WlanNode, EmaneNet)) - is_node2_wireless = isinstance(node2, (WlanNode, EmaneNet)) - if not (is_node1_wireless or is_node2_wireless): - options = convert_link_options(options) - options.unidirectional = unidirectional - else: - options = None - return core_pb2.Link( - type=LinkTypes.WIRED.value, - node1_id=node1.id, - node2_id=node2.id, - iface1=iface1, - iface2=iface2, - options=options, - network_id=None, - label=None, - color=None, - ) - - -def parse_proc_net_dev(lines: list[str]) -> dict[str, dict[str, float]]: - """ - Parse lines of output from /proc/net/dev. - - :param lines: lines of /proc/net/dev - :return: parsed device to tx/rx values - """ - stats = {} - for line in lines[2:]: - line = line.strip() - if not line: - continue - line = line.split() - line[0] = line[0].strip(":") - stats[line[0]] = {"rx": float(line[1]), "tx": float(line[9])} - return stats - - -def get_net_stats() -> dict[str, dict[str, float]]: - """ - Retrieve status about the current interfaces in the system - - :return: send and receive status of the interfaces in the system - """ - with open("/proc/net/dev", "r") as f: - lines = f.readlines()[2:] - return parse_proc_net_dev(lines) - - -def session_location(session: Session, location: core_pb2.SessionLocation) -> None: - """ - Set session location based on location proto. - - :param session: session for location - :param location: location to set - :return: nothing - """ - session.location.refxyz = (location.x, location.y, location.z) - session.location.setrefgeo(location.lat, location.lon, location.alt) - session.location.refscale = location.scale - - -def service_configuration(session: Session, config: ServiceConfig) -> None: - """ - Convenience method for setting a node service configuration. - - :param session: session for service configuration - :param config: service configuration - :return: - """ - session.services.set_service(config.node_id, config.service) - service = session.services.get_service(config.node_id, config.service) - if config.files: - service.configs = tuple(config.files) - if config.directories: - service.dirs = tuple(config.directories) - if config.startup: - service.startup = tuple(config.startup) - if config.validate: - service.validate = tuple(config.validate) - if config.shutdown: - service.shutdown = tuple(config.shutdown) - - -def get_service_configuration(service: CoreService) -> NodeServiceData: - """ - Convenience for converting a service to service data proto. - - :param service: service to get proto data for - :return: service proto data - """ - return NodeServiceData( - executables=service.executables, - dependencies=service.dependencies, - dirs=service.dirs, - configs=service.configs, - startup=service.startup, - validate=service.validate, - validation_mode=service.validation_mode.value, - validation_timer=service.validation_timer, - shutdown=service.shutdown, - meta=service.meta, - ) - - -def iface_to_proto(session: Session, iface: CoreInterface) -> core_pb2.Interface: - """ - Convenience for converting a core interface to the protobuf representation. - - :param session: session interface belongs to - :param iface: interface to convert - :return: interface proto - """ - ip4_net = iface.get_ip4() - ip4 = str(ip4_net.ip) if ip4_net else None - ip4_mask = ip4_net.prefixlen if ip4_net else None - ip6_net = iface.get_ip6() - ip6 = str(ip6_net.ip) if ip6_net else None - ip6_mask = ip6_net.prefixlen if ip6_net else None - mac = str(iface.mac) if iface.mac else None - nem_id = None - nem_port = None - if isinstance(iface.net, EmaneNet): - nem_id = session.emane.get_nem_id(iface) - nem_port = session.emane.get_nem_port(iface) - return core_pb2.Interface( - id=iface.id, - name=iface.name, - mac=mac, - mtu=iface.mtu, - flow_id=iface.flow_id, - ip4=ip4, - ip4_mask=ip4_mask, - ip6=ip6, - ip6_mask=ip6_mask, - nem_id=nem_id, - nem_port=nem_port, - ) - - -def get_nem_id( - session: Session, node: CoreNode, iface_id: int, context: ServicerContext -) -> int: - """ - Get nem id for a given node and interface id. - - :param session: session node belongs to - :param node: node to get nem id for - :param iface_id: id of interface on node to get nem id for - :param context: request context - :return: nem id - """ - iface = node.ifaces.get(iface_id) - if not iface: - message = f"{node.name} missing interface {iface_id}" - context.abort(grpc.StatusCode.NOT_FOUND, message) - net = iface.net - if not isinstance(net, EmaneNet): - message = f"{node.name} interface {iface_id} is not an EMANE network" - context.abort(grpc.StatusCode.INVALID_ARGUMENT, message) - nem_id = session.emane.get_nem_id(iface) - if nem_id is None: - message = f"{node.name} interface {iface_id} nem id does not exist" - context.abort(grpc.StatusCode.INVALID_ARGUMENT, message) - return nem_id - - -def get_emane_model_configs_dict(session: Session) -> dict[int, list[NodeEmaneConfig]]: - """ - Get emane model configuration protobuf data. - - :param session: session to get emane model configuration for - :return: dict of emane model protobuf configurations - """ - configs = {} - for _id, model_configs in session.emane.node_configs.items(): - for model_name in model_configs: - model_class = session.emane.get_model(model_name) - current_config = session.emane.get_config(_id, model_name) - config = get_config_options(current_config, model_class) - node_id, iface_id = utils.parse_iface_config_id(_id) - iface_id = iface_id if iface_id is not None else -1 - node_config = NodeEmaneConfig( - model=model_name, iface_id=iface_id, config=config - ) - node_configs = configs.setdefault(node_id, []) - node_configs.append(node_config) - return configs - - -def get_hooks(session: Session) -> list[core_pb2.Hook]: - """ - Retrieve hook protobuf data for a session. - - :param session: session to get hooks for - :return: list of hook protobufs - """ - hooks = [] - for state in session.hooks: - state_hooks = session.hooks[state] - for file_name, file_data in state_hooks: - hook = core_pb2.Hook(state=state.value, file=file_name, data=file_data) - hooks.append(hook) - return hooks - - -def get_default_services(session: Session) -> list[ServiceDefaults]: - """ - Retrieve the default service sets for a given session. - - :param session: session to get default service sets for - :return: list of default service sets - """ - default_services = [] - for model, services in session.services.default_services.items(): - default_service = ServiceDefaults(model=model, services=services) - default_services.append(default_service) - return default_services - - -def get_mobility_node( - session: Session, node_id: int, context: ServicerContext -) -> Union[WlanNode, EmaneNet]: - """ - Get mobility node. - - :param session: session to get node from - :param node_id: id of node to get - :param context: grpc context - :return: wlan or emane node - """ - try: - return session.get_node(node_id, WlanNode) - except CoreError: - try: - return session.get_node(node_id, EmaneNet) - except CoreError: - context.abort(grpc.StatusCode.NOT_FOUND, "node id is not for wlan or emane") - - -def convert_session(session: Session) -> wrappers.Session: - """ - Convert session to its wrapped version. - - :param session: session to convert - :return: wrapped session data - """ - emane_configs = get_emane_model_configs_dict(session) - nodes = [] - links = [] - for _id in session.nodes: - node = session.nodes[_id] - if not isinstance(node, (PtpNet, CtrlNet)): - node_emane_configs = emane_configs.get(node.id, []) - node_proto = get_node_proto(session, node, node_emane_configs) - nodes.append(node_proto) - if isinstance(node, (WlanNode, EmaneNet)): - for link_data in node.links(): - links.append(convert_link_data(link_data)) - for core_link in session.link_manager.links(): - links.extend(convert_core_link(core_link)) - default_services = get_default_services(session) - x, y, z = session.location.refxyz - lat, lon, alt = session.location.refgeo - location = core_pb2.SessionLocation( - x=x, y=y, z=z, lat=lat, lon=lon, alt=alt, scale=session.location.refscale - ) - hooks = get_hooks(session) - session_file = str(session.file_path) if session.file_path else None - options = convert_session_options(session) - servers = [ - core_pb2.Server(name=x.name, host=x.host) - for x in session.distributed.servers.values() - ] - return core_pb2.Session( - id=session.id, - state=session.state.value, - nodes=nodes, - links=links, - dir=str(session.directory), - user=session.user, - default_services=default_services, - location=location, - hooks=hooks, - metadata=session.metadata, - file=session_file, - options=options, - servers=servers, - ) - - -def configure_node( - session: Session, node: core_pb2.Node, core_node: NodeBase, context: ServicerContext -) -> None: - """ - Configure a node using all provided protobuf data. - - :param session: session for node - :param node: node protobuf data - :param core_node: session node - :param context: grpc context - :return: nothing - """ - for emane_config in node.emane_configs: - _id = utils.iface_config_id(node.id, emane_config.iface_id) - config = {k: v.value for k, v in emane_config.config.items()} - session.emane.set_config(_id, emane_config.model, config) - if node.wlan_config: - config = {k: v.value for k, v in node.wlan_config.items()} - session.mobility.set_model_config(node.id, BasicRangeModel.name, config) - if node.mobility_config: - config = {k: v.value for k, v in node.mobility_config.items()} - session.mobility.set_model_config(node.id, Ns2ScriptedMobility.name, config) - if isinstance(core_node, WirelessNode) and node.wireless_config: - config = {k: v.value for k, v in node.wireless_config.items()} - core_node.set_config(config) - for service_name, service_config in node.service_configs.items(): - data = service_config.data - config = ServiceConfig( - node_id=node.id, - service=service_name, - startup=data.startup, - validate=data.validate, - shutdown=data.shutdown, - files=data.configs, - directories=data.dirs, - ) - service_configuration(session, config) - for file_name, file_data in service_config.files.items(): - session.services.set_service_file( - node.id, service_name, file_name, file_data - ) - if node.config_service_configs: - if not isinstance(core_node, CoreNode): - context.abort( - grpc.StatusCode.INVALID_ARGUMENT, - "invalid node type with config service configs", - ) - for service_name, service_config in node.config_service_configs.items(): - service = core_node.config_services[service_name] - if service_config.config: - service.set_config(service_config.config) - for name, template in service_config.templates.items(): - service.set_template(name, template) diff --git a/daemon/core/api/grpc/server.py b/daemon/core/api/grpc/server.py deleted file mode 100644 index 6a86ab0a..00000000 --- a/daemon/core/api/grpc/server.py +++ /dev/null @@ -1,1436 +0,0 @@ -import logging -import os -import re -import signal -import sys -import tempfile -import time -from collections.abc import Iterable -from concurrent import futures -from pathlib import Path -from re import Pattern -from typing import Optional - -import grpc -from grpc import ServicerContext - -from core import utils -from core.api.grpc import ( - common_pb2, - configservices_pb2, - core_pb2, - core_pb2_grpc, - grpcutils, -) -from core.api.grpc.configservices_pb2 import ( - ConfigService, - GetConfigServiceDefaultsRequest, - GetConfigServiceDefaultsResponse, - GetConfigServiceRenderedRequest, - GetConfigServiceRenderedResponse, - GetNodeConfigServiceRequest, - GetNodeConfigServiceResponse, -) -from core.api.grpc.core_pb2 import ( - ExecuteScriptResponse, - GetWirelessConfigRequest, - GetWirelessConfigResponse, - LinkedRequest, - LinkedResponse, - WirelessConfigRequest, - WirelessConfigResponse, - WirelessLinkedRequest, - WirelessLinkedResponse, -) -from core.api.grpc.emane_pb2 import ( - EmaneLinkRequest, - EmaneLinkResponse, - EmanePathlossesRequest, - EmanePathlossesResponse, - GetEmaneEventChannelRequest, - GetEmaneEventChannelResponse, - GetEmaneModelConfigRequest, - GetEmaneModelConfigResponse, - SetEmaneModelConfigRequest, - SetEmaneModelConfigResponse, -) -from core.api.grpc.events import EventStreamer -from core.api.grpc.grpcutils import get_config_options, get_links, get_net_stats -from core.api.grpc.mobility_pb2 import ( - GetMobilityConfigRequest, - GetMobilityConfigResponse, - MobilityAction, - MobilityActionRequest, - MobilityActionResponse, - SetMobilityConfigRequest, - SetMobilityConfigResponse, -) -from core.api.grpc.services_pb2 import ( - GetNodeServiceFileRequest, - GetNodeServiceFileResponse, - GetNodeServiceRequest, - GetNodeServiceResponse, - GetServiceDefaultsRequest, - GetServiceDefaultsResponse, - Service, - ServiceAction, - ServiceActionRequest, - ServiceActionResponse, - SetServiceDefaultsRequest, - SetServiceDefaultsResponse, -) -from core.api.grpc.wlan_pb2 import ( - GetWlanConfigRequest, - GetWlanConfigResponse, - SetWlanConfigRequest, - SetWlanConfigResponse, - WlanLinkRequest, - WlanLinkResponse, -) -from core.configservice.base import ConfigServiceBootError -from core.emane.modelmanager import EmaneModelManager -from core.emulator.coreemu import CoreEmu -from core.emulator.data import InterfaceData, LinkData, LinkOptions -from core.emulator.enumerations import ( - EventTypes, - ExceptionLevels, - MessageFlags, - NodeTypes, -) -from core.emulator.session import NT, Session -from core.errors import CoreCommandError, CoreError -from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility -from core.nodes.base import CoreNode, NodeBase -from core.nodes.network import CoreNetwork, WlanNode -from core.nodes.wireless import WirelessNode -from core.services.coreservices import ServiceManager - -logger = logging.getLogger(__name__) -_ONE_DAY_IN_SECONDS: int = 60 * 60 * 24 -_INTERFACE_REGEX: Pattern[str] = re.compile(r"beth(?P[0-9a-fA-F]+)") -_MAX_WORKERS = 1000 - - -class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): - """ - Create CoreGrpcServer instance - - :param coreemu: coreemu object - """ - - def __init__(self, coreemu: CoreEmu) -> None: - super().__init__() - self.coreemu: CoreEmu = coreemu - self.running: bool = True - self.server: Optional[grpc.Server] = None - # catch signals - signal.signal(signal.SIGHUP, self._signal_handler) - signal.signal(signal.SIGINT, self._signal_handler) - signal.signal(signal.SIGTERM, self._signal_handler) - signal.signal(signal.SIGUSR1, self._signal_handler) - signal.signal(signal.SIGUSR2, self._signal_handler) - - def _signal_handler(self, signal_number: int, _) -> None: - logger.info("caught signal: %s", signal_number) - self.coreemu.shutdown() - self.running = False - if self.server: - self.server.stop(None) - sys.exit(signal_number) - - def _is_running(self, context) -> bool: - return self.running and context.is_active() - - def _cancel_stream(self, context) -> None: - context.abort(grpc.StatusCode.CANCELLED, "server stopping") - - def listen(self, address: str) -> None: - logger.info("CORE gRPC API listening on: %s", address) - self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=_MAX_WORKERS)) - core_pb2_grpc.add_CoreApiServicer_to_server(self, self.server) - self.server.add_insecure_port(address) - self.server.start() - - try: - while True: - time.sleep(_ONE_DAY_IN_SECONDS) - except KeyboardInterrupt: - self.server.stop(None) - - def get_session(self, session_id: int, context: ServicerContext) -> Session: - """ - Retrieve session given the session id - - :param session_id: session id - :param context: - :return: session object that satisfies, if session not found then raise an - exception - :raises Exception: raises grpc exception when session does not exist - """ - session = self.coreemu.sessions.get(session_id) - if not session: - context.abort(grpc.StatusCode.NOT_FOUND, f"session {session_id} not found") - return session - - def get_node( - self, session: Session, node_id: int, context: ServicerContext, _class: type[NT] - ) -> NT: - """ - Retrieve node given session and node id - - :param session: session that has the node - :param node_id: node id - :param context: request - :param _class: type of node we are expecting - :return: node object that satisfies. If node not found then raise an exception. - :raises Exception: raises grpc exception when node does not exist - """ - try: - return session.get_node(node_id, _class) - except CoreError as e: - context.abort(grpc.StatusCode.NOT_FOUND, str(e)) - - def move_node( - self, - context: ServicerContext, - session_id: int, - node_id: int, - geo: core_pb2.Geo = None, - position: core_pb2.Position = None, - source: str = None, - ): - if not geo and not position: - raise CoreError("move node must provide a geo or position to move") - session = self.get_session(session_id, context) - node = self.get_node(session, node_id, context, NodeBase) - if geo: - session.set_node_geo(node, geo.lon, geo.lat, geo.alt) - else: - session.set_node_pos(node, position.x, position.y) - source = source if source else None - session.broadcast_node(node, source=source) - - def validate_service( - self, name: str, context: ServicerContext - ) -> type[ConfigService]: - """ - Validates a configuration service is a valid known service. - - :param name: name of service to validate - :param context: grpc context - :return: class for service to validate - :raises Exception: raises grpc exception when service does not exist - """ - service = self.coreemu.service_manager.services.get(name) - if not service: - context.abort(grpc.StatusCode.NOT_FOUND, f"unknown service {name}") - return service - - def GetConfig( - self, request: core_pb2.GetConfigRequest, context: ServicerContext - ) -> core_pb2.GetConfigResponse: - services = [] - for name in ServiceManager.services: - service = ServiceManager.services[name] - service_proto = Service(group=service.group, name=service.name) - services.append(service_proto) - config_services = [] - for service in self.coreemu.service_manager.services.values(): - service_proto = ConfigService( - name=service.name, - group=service.group, - executables=service.executables, - dependencies=service.dependencies, - directories=service.directories, - files=service.files, - startup=service.startup, - validate=service.validate, - shutdown=service.shutdown, - validation_mode=service.validation_mode.value, - validation_timer=service.validation_timer, - validation_period=service.validation_period, - ) - config_services.append(service_proto) - emane_models = [x.name for x in EmaneModelManager.models.values()] - return core_pb2.GetConfigResponse( - services=services, - config_services=config_services, - emane_models=emane_models, - ) - - def StartSession( - self, request: core_pb2.StartSessionRequest, context: ServicerContext - ) -> core_pb2.StartSessionResponse: - """ - Start a session. - - :param request: start session request - :param context: grpc context - :return: start session response - """ - logger.debug("start session: %s", request) - session = self.get_session(request.session.id, context) - - # clear previous state and setup for creation - session.clear() - session.directory.mkdir(exist_ok=True) - if request.definition: - state = EventTypes.DEFINITION_STATE - else: - state = EventTypes.CONFIGURATION_STATE - session.set_state(state) - if request.session.user: - session.set_user(request.session.user) - - # session options - for option in request.session.options.values(): - if option.value: - session.options.set(option.name, option.value) - session.metadata = dict(request.session.metadata) - - # add servers - for server in request.session.servers: - session.distributed.add_server(server.name, server.host) - - # location - if request.session.HasField("location"): - grpcutils.session_location(session, request.session.location) - - # add all hooks - for hook in request.session.hooks: - state = EventTypes(hook.state) - session.add_hook(state, hook.file, hook.data) - - # create nodes - _, exceptions = grpcutils.create_nodes(session, request.session.nodes) - if exceptions: - exceptions = [str(x) for x in exceptions] - return core_pb2.StartSessionResponse(result=False, exceptions=exceptions) - - # check for configurations - for node in request.session.nodes: - core_node = self.get_node(session, node.id, context, NodeBase) - grpcutils.configure_node(session, node, core_node, context) - - # create links - links = [] - edit_links = [] - known_links = set() - for link in request.session.links: - iface1 = link.iface1.id if link.iface1 else None - iface2 = link.iface2.id if link.iface2 else None - if link.node1_id < link.node2_id: - link_id = (link.node1_id, iface1, link.node2_id, iface2) - else: - link_id = (link.node2_id, iface2, link.node1_id, iface1) - if link_id in known_links: - edit_links.append(link) - else: - known_links.add(link_id) - links.append(link) - _, exceptions = grpcutils.create_links(session, links) - if exceptions: - exceptions = [str(x) for x in exceptions] - return core_pb2.StartSessionResponse(result=False, exceptions=exceptions) - _, exceptions = grpcutils.edit_links(session, edit_links) - if exceptions: - exceptions = [str(x) for x in exceptions] - return core_pb2.StartSessionResponse(result=False, exceptions=exceptions) - - # set to instantiation and start - if not request.definition: - session.set_state(EventTypes.INSTANTIATION_STATE) - # boot services - boot_exceptions = session.instantiate() - if boot_exceptions: - exceptions = [] - for boot_exception in boot_exceptions: - for service_exception in boot_exception.args: - exceptions.append(str(service_exception)) - return core_pb2.StartSessionResponse( - result=False, exceptions=exceptions - ) - return core_pb2.StartSessionResponse(result=True) - - def StopSession( - self, request: core_pb2.StopSessionRequest, context: ServicerContext - ) -> core_pb2.StopSessionResponse: - """ - Stop a running session. - - :param request: stop session request - :param context: grpc context - :return: stop session response - """ - logger.debug("stop session: %s", request) - session = self.get_session(request.session_id, context) - session.data_collect() - session.shutdown() - return core_pb2.StopSessionResponse(result=True) - - def CreateSession( - self, request: core_pb2.CreateSessionRequest, context: ServicerContext - ) -> core_pb2.CreateSessionResponse: - """ - Create a session - - :param request: create-session request - :param context: - :return: a create-session response - """ - logger.debug("create session: %s", request) - session = self.coreemu.create_session(request.session_id) - session.set_state(EventTypes.DEFINITION_STATE) - session.location.setrefgeo(47.57917, -122.13232, 2.0) - session.location.refscale = 150.0 - session_proto = grpcutils.convert_session(session) - return core_pb2.CreateSessionResponse(session=session_proto) - - def DeleteSession( - self, request: core_pb2.DeleteSessionRequest, context: ServicerContext - ) -> core_pb2.DeleteSessionResponse: - """ - Delete the session - - :param request: delete-session request - :param context: context object - :return: a delete-session response - """ - logger.debug("delete session: %s", request) - result = self.coreemu.delete_session(request.session_id) - return core_pb2.DeleteSessionResponse(result=result) - - def GetSessions( - self, request: core_pb2.GetSessionsRequest, context: ServicerContext - ) -> core_pb2.GetSessionsResponse: - """ - Get all currently known session overviews. - - :param request: get sessions request - :param context: context object - :return: a get sessions response - """ - logger.debug("get sessions: %s", request) - sessions = [] - for session_id in self.coreemu.sessions: - session = self.coreemu.sessions[session_id] - session_file = str(session.file_path) if session.file_path else None - session_summary = core_pb2.SessionSummary( - id=session_id, - state=session.state.value, - nodes=session.get_node_count(), - file=session_file, - dir=str(session.directory), - ) - sessions.append(session_summary) - return core_pb2.GetSessionsResponse(sessions=sessions) - - def CheckSession( - self, request: core_pb2.GetSessionRequest, context: ServicerContext - ) -> core_pb2.CheckSessionResponse: - """ - Checks if a session exists. - - :param request: check session request - :param context: context object - :return: check session response - """ - result = request.session_id in self.coreemu.sessions - return core_pb2.CheckSessionResponse(result=result) - - def GetSession( - self, request: core_pb2.GetSessionRequest, context: ServicerContext - ) -> core_pb2.GetSessionResponse: - """ - Retrieve requested session - - :param request: get-session request - :param context: context object - :return: get-session response - """ - logger.debug("get session: %s", request) - session = self.get_session(request.session_id, context) - session_proto = grpcutils.convert_session(session) - return core_pb2.GetSessionResponse(session=session_proto) - - def SessionAlert( - self, request: core_pb2.SessionAlertRequest, context: ServicerContext - ) -> core_pb2.SessionAlertResponse: - session = self.get_session(request.session_id, context) - level = ExceptionLevels(request.level) - node_id = request.node_id if request.node_id else None - session.exception(level, request.source, request.text, node_id) - return core_pb2.SessionAlertResponse(result=True) - - def Events(self, request: core_pb2.EventsRequest, context: ServicerContext) -> None: - session = self.get_session(request.session_id, context) - event_types = set(request.events) - if not event_types: - event_types = set(core_pb2.EventType.Enum.values()) - - streamer = EventStreamer(session, event_types) - while self._is_running(context): - event = streamer.process() - if event: - yield event - - streamer.remove_handlers() - self._cancel_stream(context) - - def Throughputs( - self, request: core_pb2.ThroughputsRequest, context: ServicerContext - ) -> None: - """ - Calculate average throughput after every certain amount of delay time - - :param request: throughputs request - :param context: context object - :return: nothing - """ - session = self.get_session(request.session_id, context) - delay = 3 - last_check = None - last_stats = None - - while self._is_running(context): - now = time.monotonic() - stats = get_net_stats() - # calculate average - if last_check is not None: - interval = now - last_check - throughputs_event = core_pb2.ThroughputsEvent(session_id=session.id) - for key in stats: - current_rxtx = stats[key] - previous_rxtx = last_stats.get(key) - if not previous_rxtx: - continue - rx_kbps = ( - (current_rxtx["rx"] - previous_rxtx["rx"]) * 8.0 / interval - ) - tx_kbps = ( - (current_rxtx["tx"] - previous_rxtx["tx"]) * 8.0 / interval - ) - throughput = rx_kbps + tx_kbps - if key.startswith("beth"): - key = key.split(".") - node_id = _INTERFACE_REGEX.search(key[0]).group("node") - node_id = int(node_id, base=16) - iface_id = int(key[1]) - session_id = key[2] - if session.short_session_id() != session_id: - continue - iface_throughput = throughputs_event.iface_throughputs.add() - iface_throughput.node_id = node_id - iface_throughput.iface_id = iface_id - iface_throughput.throughput = throughput - elif key.startswith("b."): - try: - key = key.split(".") - node_id = int(key[1], base=16) - session_id = key[2] - if session.short_session_id() != session_id: - continue - bridge_throughput = ( - throughputs_event.bridge_throughputs.add() - ) - bridge_throughput.node_id = node_id - bridge_throughput.throughput = throughput - except ValueError: - pass - yield throughputs_event - - last_check = now - last_stats = stats - time.sleep(delay) - - def CpuUsage( - self, request: core_pb2.CpuUsageRequest, context: ServicerContext - ) -> None: - cpu_usage = grpcutils.CpuUsage() - while self._is_running(context): - usage = cpu_usage.run() - yield core_pb2.CpuUsageEvent(usage=usage) - time.sleep(request.delay) - - def AddNode( - self, request: core_pb2.AddNodeRequest, context: ServicerContext - ) -> core_pb2.AddNodeResponse: - """ - Add node to requested session - - :param request: add-node request - :param context: context object - :return: add-node response - """ - logger.debug("add node: %s", request) - session = self.get_session(request.session_id, context) - _type = NodeTypes(request.node.type) - _class = session.get_node_class(_type) - position, options = grpcutils.add_node_data(_class, request.node) - node = session.add_node( - _class, - request.node.id or None, - request.node.name or None, - request.node.server or None, - position, - options, - ) - grpcutils.configure_node(session, request.node, node, context) - source = request.source if request.source else None - session.broadcast_node(node, MessageFlags.ADD, source) - return core_pb2.AddNodeResponse(node_id=node.id) - - def GetNode( - self, request: core_pb2.GetNodeRequest, context: ServicerContext - ) -> core_pb2.GetNodeResponse: - """ - Retrieve node - - :param request: get-node request - :param context: context object - :return: get-node response - """ - logger.debug("get node: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, NodeBase) - ifaces = [] - for iface_id in node.ifaces: - iface = node.ifaces[iface_id] - iface_proto = grpcutils.iface_to_proto(session, iface) - ifaces.append(iface_proto) - emane_configs = grpcutils.get_emane_model_configs_dict(session) - node_emane_configs = emane_configs.get(node.id, []) - node_proto = grpcutils.get_node_proto(session, node, node_emane_configs) - links = get_links(session, node) - return core_pb2.GetNodeResponse(node=node_proto, ifaces=ifaces, links=links) - - def MoveNode( - self, request: core_pb2.MoveNodeRequest, context: ServicerContext - ) -> core_pb2.MoveNodeResponse: - """ - Move node, either by x,y position or geospatial. - - :param request: move node request - :param context: context object - :return: move nodes response - """ - geo = request.geo if request.HasField("geo") else None - position = request.position if request.HasField("position") else None - self.move_node( - context, request.session_id, request.node_id, geo, position, request.source - ) - return core_pb2.MoveNodeResponse(result=True) - - def MoveNodes( - self, - request_iterator: Iterable[core_pb2.MoveNodesRequest], - context: ServicerContext, - ) -> core_pb2.MoveNodesResponse: - """ - Stream node movements - - :param request_iterator: move nodes request iterator - :param context: context object - :return: move nodes response - """ - for request in request_iterator: - geo = request.geo if request.HasField("geo") else None - position = request.position if request.HasField("position") else None - self.move_node( - context, - request.session_id, - request.node_id, - geo, - position, - request.source, - ) - return core_pb2.MoveNodesResponse() - - def EditNode( - self, request: core_pb2.EditNodeRequest, context: ServicerContext - ) -> core_pb2.EditNodeResponse: - """ - Edit node - - :param request: edit-node request - :param context: context object - :return: edit-node response - """ - logger.debug("edit node: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, NodeBase) - node.icon = request.icon or None - source = request.source or None - session.broadcast_node(node, source=source) - return core_pb2.EditNodeResponse(result=True) - - def DeleteNode( - self, request: core_pb2.DeleteNodeRequest, context: ServicerContext - ) -> core_pb2.DeleteNodeResponse: - """ - Delete node - - :param request: delete-node request - :param context: context object - :return: core.api.grpc.core_pb2.DeleteNodeResponse - """ - logger.debug("delete node: %s", request) - session = self.get_session(request.session_id, context) - result = False - if request.node_id in session.nodes: - node = self.get_node(session, request.node_id, context, NodeBase) - result = session.delete_node(node.id) - source = request.source if request.source else None - session.broadcast_node(node, MessageFlags.DELETE, source) - return core_pb2.DeleteNodeResponse(result=result) - - def NodeCommand( - self, request: core_pb2.NodeCommandRequest, context: ServicerContext - ) -> core_pb2.NodeCommandResponse: - """ - Run command on a node - - :param request: node-command request - :param context: context object - :return: core.api.grpc.core_pb2.NodeCommandResponse - """ - logger.debug("sending node command: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - try: - output = node.cmd(request.command, request.wait, request.shell) - return_code = 0 - except CoreCommandError as e: - output = e.stderr - return_code = e.returncode - return core_pb2.NodeCommandResponse(output=output, return_code=return_code) - - def GetNodeTerminal( - self, request: core_pb2.GetNodeTerminalRequest, context: ServicerContext - ) -> core_pb2.GetNodeTerminalResponse: - """ - Retrieve terminal command string of a node - - :param request: get-node-terminal request - :param context: context object - :return: get-node-terminal response - """ - logger.debug("getting node terminal: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - terminal = node.termcmdstring("/bin/bash") - return core_pb2.GetNodeTerminalResponse(terminal=terminal) - - def AddLink( - self, request: core_pb2.AddLinkRequest, context: ServicerContext - ) -> core_pb2.AddLinkResponse: - """ - Add link to a session - - :param request: add-link request - :param context: context object - :return: add-link response - """ - logger.debug("add link: %s", request) - session = self.get_session(request.session_id, context) - node1_id = request.link.node1_id - node2_id = request.link.node2_id - self.get_node(session, node1_id, context, NodeBase) - self.get_node(session, node2_id, context, NodeBase) - iface1_data, iface2_data, options = grpcutils.add_link_data(request.link) - node1_iface, node2_iface = session.add_link( - node1_id, node2_id, iface1_data, iface2_data, options - ) - iface1_data = None - if node1_iface: - if isinstance(node1_iface.node, CoreNetwork): - iface1_data = InterfaceData(id=node1_iface.id) - else: - iface1_data = node1_iface.get_data() - iface2_data = None - if node2_iface: - if isinstance(node2_iface.node, CoreNetwork): - iface2_data = InterfaceData(id=node2_iface.id) - else: - iface2_data = node2_iface.get_data() - source = request.source if request.source else None - link_data = LinkData( - message_type=MessageFlags.ADD, - node1_id=node1_id, - node2_id=node2_id, - iface1=iface1_data, - iface2=iface2_data, - options=options, - source=source, - ) - session.broadcast_link(link_data) - iface1_proto = None - iface2_proto = None - if node1_iface: - iface1_proto = grpcutils.iface_to_proto(session, node1_iface) - if node2_iface: - iface2_proto = grpcutils.iface_to_proto(session, node2_iface) - return core_pb2.AddLinkResponse( - result=True, iface1=iface1_proto, iface2=iface2_proto - ) - - def EditLink( - self, request: core_pb2.EditLinkRequest, context: ServicerContext - ) -> core_pb2.EditLinkResponse: - """ - Edit a link - - :param request: edit-link request - :param context: context object - :return: edit-link response - """ - logger.debug("edit link: %s", request) - session = self.get_session(request.session_id, context) - node1_id = request.node1_id - node2_id = request.node2_id - iface1_id = request.iface1_id - iface2_id = request.iface2_id - options_proto = request.options - options = LinkOptions( - delay=options_proto.delay, - bandwidth=options_proto.bandwidth, - loss=options_proto.loss, - dup=options_proto.dup, - jitter=options_proto.jitter, - mer=options_proto.mer, - burst=options_proto.burst, - mburst=options_proto.mburst, - unidirectional=options_proto.unidirectional, - key=options_proto.key, - buffer=options_proto.buffer, - ) - session.update_link(node1_id, node2_id, iface1_id, iface2_id, options) - iface1 = InterfaceData(id=iface1_id) - iface2 = InterfaceData(id=iface2_id) - source = request.source if request.source else None - link_data = LinkData( - message_type=MessageFlags.NONE, - node1_id=node1_id, - node2_id=node2_id, - iface1=iface1, - iface2=iface2, - options=options, - source=source, - ) - session.broadcast_link(link_data) - return core_pb2.EditLinkResponse(result=True) - - def DeleteLink( - self, request: core_pb2.DeleteLinkRequest, context: ServicerContext - ) -> core_pb2.DeleteLinkResponse: - """ - Delete a link - - :param request: delete-link request - :param context: context object - :return: delete-link response - """ - logger.debug("delete link: %s", request) - session = self.get_session(request.session_id, context) - node1_id = request.node1_id - node2_id = request.node2_id - iface1_id = request.iface1_id - iface2_id = request.iface2_id - session.delete_link(node1_id, node2_id, iface1_id, iface2_id) - iface1 = InterfaceData(id=iface1_id) - iface2 = InterfaceData(id=iface2_id) - source = request.source if request.source else None - link_data = LinkData( - message_type=MessageFlags.DELETE, - node1_id=node1_id, - node2_id=node2_id, - iface1=iface1, - iface2=iface2, - source=source, - ) - session.broadcast_link(link_data) - return core_pb2.DeleteLinkResponse(result=True) - - def GetMobilityConfig( - self, request: GetMobilityConfigRequest, context: ServicerContext - ) -> GetMobilityConfigResponse: - """ - Retrieve mobility configuration of a node - - :param request: - get-mobility-configuration request - :param context: context object - :return: get-mobility-configuration response - """ - logger.debug("get mobility config: %s", request) - session = self.get_session(request.session_id, context) - current_config = session.mobility.get_model_config( - request.node_id, Ns2ScriptedMobility.name - ) - config = get_config_options(current_config, Ns2ScriptedMobility) - return GetMobilityConfigResponse(config=config) - - def SetMobilityConfig( - self, request: SetMobilityConfigRequest, context: ServicerContext - ) -> SetMobilityConfigResponse: - """ - Set mobility configuration of a node - - :param request: - set-mobility-configuration request - :param context: context object - :return: set-mobility-configuration response - """ - logger.debug("set mobility config: %s", request) - session = self.get_session(request.session_id, context) - mobility_config = request.mobility_config - session.mobility.set_model_config( - mobility_config.node_id, Ns2ScriptedMobility.name, mobility_config.config - ) - return SetMobilityConfigResponse(result=True) - - def MobilityAction( - self, request: MobilityActionRequest, context: ServicerContext - ) -> MobilityActionResponse: - """ - Take mobility action whether to start, pause, stop or none of those - - :param request: mobility-action - request - :param context: context object - :return: mobility-action response - """ - logger.debug("mobility action: %s", request) - session = self.get_session(request.session_id, context) - node = grpcutils.get_mobility_node(session, request.node_id, context) - if not node.mobility: - context.abort( - grpc.StatusCode.NOT_FOUND, f"node({node.name}) does not have mobility" - ) - result = True - if request.action == MobilityAction.START: - node.mobility.start() - elif request.action == MobilityAction.PAUSE: - node.mobility.pause() - elif request.action == MobilityAction.STOP: - node.mobility.stop(move_initial=True) - else: - result = False - return MobilityActionResponse(result=result) - - def GetServiceDefaults( - self, request: GetServiceDefaultsRequest, context: ServicerContext - ) -> GetServiceDefaultsResponse: - """ - Retrieve all the default services of all node types in a session - - :param request: get-default-service request - :param context: context object - :return: get-service-defaults response about all the available default services - """ - logger.debug("get service defaults: %s", request) - session = self.get_session(request.session_id, context) - defaults = grpcutils.get_default_services(session) - return GetServiceDefaultsResponse(defaults=defaults) - - def SetServiceDefaults( - self, request: SetServiceDefaultsRequest, context: ServicerContext - ) -> SetServiceDefaultsResponse: - """ - Set new default services to the session after whipping out the old ones - - :param request: set-service-defaults request - :param context: context object - :return: set-service-defaults response - """ - logger.debug("set service defaults: %s", request) - session = self.get_session(request.session_id, context) - session.services.default_services.clear() - for service_defaults in request.defaults: - session.services.default_services[ - service_defaults.model - ] = service_defaults.services - return SetServiceDefaultsResponse(result=True) - - def GetNodeService( - self, request: GetNodeServiceRequest, context: ServicerContext - ) -> GetNodeServiceResponse: - """ - Retrieve a requested service from a node - - :param request: get-node-service - request - :param context: context object - :return: get-node-service response about the requested service - """ - logger.debug("get node service: %s", request) - session = self.get_session(request.session_id, context) - service = session.services.get_service( - request.node_id, request.service, default_service=True - ) - service_proto = grpcutils.get_service_configuration(service) - return GetNodeServiceResponse(service=service_proto) - - def GetNodeServiceFile( - self, request: GetNodeServiceFileRequest, context: ServicerContext - ) -> GetNodeServiceFileResponse: - """ - Retrieve a requested service file from a node - - :param request: - get-node-service request - :param context: context object - :return: get-node-service response about the requested service - """ - logger.debug("get node service file: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - file_data = session.services.get_service_file( - node, request.service, request.file - ) - return GetNodeServiceFileResponse(data=file_data.data) - - def ServiceAction( - self, request: ServiceActionRequest, context: ServicerContext - ) -> ServiceActionResponse: - """ - Take action whether to start, stop, restart, validate the service or none of - the above. - - :param request: service-action request - :param context: context object - :return: service-action response about status of action - """ - logger.debug("service action: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - service = None - for current_service in node.services: - if current_service.name == request.service: - service = current_service - break - - if not service: - context.abort(grpc.StatusCode.NOT_FOUND, "service not found") - - status = -1 - if request.action == ServiceAction.START: - status = session.services.startup_service(node, service, wait=True) - elif request.action == ServiceAction.STOP: - status = session.services.stop_service(node, service) - elif request.action == ServiceAction.RESTART: - status = session.services.stop_service(node, service) - if not status: - status = session.services.startup_service(node, service, wait=True) - elif request.action == ServiceAction.VALIDATE: - status = session.services.validate_service(node, service) - - result = False - if not status: - result = True - - return ServiceActionResponse(result=result) - - def ConfigServiceAction( - self, request: ServiceActionRequest, context: ServicerContext - ) -> ServiceActionResponse: - """ - Take action whether to start, stop, restart, validate the config service or - none of the above. - - :param request: service action request - :param context: context object - :return: service action response about status of action - """ - logger.debug("service action: %s", request) - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - service = node.config_services.get(request.service) - if not service: - context.abort(grpc.StatusCode.NOT_FOUND, "config service not found") - result = False - if request.action == ServiceAction.START: - try: - service.start() - result = True - except ConfigServiceBootError: - pass - elif request.action == ServiceAction.STOP: - service.stop() - result = True - elif request.action == ServiceAction.RESTART: - service.stop() - try: - service.start() - result = True - except ConfigServiceBootError: - pass - elif request.action == ServiceAction.VALIDATE: - try: - service.run_validation() - result = True - except ConfigServiceBootError: - pass - return ServiceActionResponse(result=result) - - def GetWlanConfig( - self, request: GetWlanConfigRequest, context: ServicerContext - ) -> GetWlanConfigResponse: - """ - Retrieve wireless-lan configuration of a node - - :param request: get-wlan-configuration request - :param context: core.api.grpc.core_pb2.GetWlanConfigResponse - :return: get-wlan-configuration response about the wlan configuration of a node - """ - logger.debug("get wlan config: %s", request) - session = self.get_session(request.session_id, context) - current_config = session.mobility.get_model_config( - request.node_id, BasicRangeModel.name - ) - config = get_config_options(current_config, BasicRangeModel) - return GetWlanConfigResponse(config=config) - - def SetWlanConfig( - self, request: SetWlanConfigRequest, context: ServicerContext - ) -> SetWlanConfigResponse: - """ - Set configuration data for a model - - :param request: set-wlan-configuration request - :param context: context object - :return: set-wlan-configuration response - """ - logger.debug("set wlan config: %s", request) - session = self.get_session(request.session_id, context) - node_id = request.wlan_config.node_id - config = request.wlan_config.config - session.mobility.set_model_config(node_id, BasicRangeModel.name, config) - if session.is_running(): - node = self.get_node(session, node_id, context, WlanNode) - node.updatemodel(config) - return SetWlanConfigResponse(result=True) - - def GetEmaneModelConfig( - self, request: GetEmaneModelConfigRequest, context: ServicerContext - ) -> GetEmaneModelConfigResponse: - """ - Retrieve EMANE model configuration of a node - - :param request: - get-EMANE-model-configuration request - :param context: context object - :return: get-EMANE-model-configuration response - """ - logger.debug("get emane model config: %s", request) - session = self.get_session(request.session_id, context) - model = session.emane.get_model(request.model) - _id = utils.iface_config_id(request.node_id, request.iface_id) - current_config = session.emane.get_config(_id, request.model) - config = get_config_options(current_config, model) - return GetEmaneModelConfigResponse(config=config) - - def SetEmaneModelConfig( - self, request: SetEmaneModelConfigRequest, context: ServicerContext - ) -> SetEmaneModelConfigResponse: - """ - Set EMANE model configuration of a node - - :param request: - set-EMANE-model-configuration request - :param context: context object - :return: set-EMANE-model-configuration response - """ - logger.debug("set emane model config: %s", request) - session = self.get_session(request.session_id, context) - model_config = request.emane_model_config - _id = utils.iface_config_id(model_config.node_id, model_config.iface_id) - session.emane.set_config(_id, model_config.model, model_config.config) - return SetEmaneModelConfigResponse(result=True) - - def SaveXml( - self, request: core_pb2.SaveXmlRequest, context: ServicerContext - ) -> core_pb2.SaveXmlResponse: - """ - Export the session into the EmulationScript XML format - - :param request: save xml request - :param context: context object - :return: save-xml response - """ - logger.debug("save xml: %s", request) - session = self.get_session(request.session_id, context) - _, temp_path = tempfile.mkstemp() - session.save_xml(temp_path) - with open(temp_path, "r") as xml_file: - data = xml_file.read() - return core_pb2.SaveXmlResponse(data=data) - - def OpenXml( - self, request: core_pb2.OpenXmlRequest, context: ServicerContext - ) -> core_pb2.OpenXmlResponse: - """ - Import a session from the EmulationScript XML format - - :param request: open-xml request - :param context: context object - :return: Open-XML response or raise an exception if invalid XML file - """ - logger.debug("open xml: %s", request) - session = self.coreemu.create_session() - temp = tempfile.NamedTemporaryFile(delete=False) - temp.write(request.data.encode()) - temp.close() - temp_path = Path(temp.name) - file_path = Path(request.file) - try: - session.open_xml(temp_path, request.start) - session.name = file_path.name - session.file_path = file_path - return core_pb2.OpenXmlResponse(session_id=session.id, result=True) - except IOError: - logger.exception("error opening session file") - self.coreemu.delete_session(session.id) - context.abort(grpc.StatusCode.INVALID_ARGUMENT, "invalid xml file") - finally: - os.unlink(temp.name) - - def GetInterfaces( - self, request: core_pb2.GetInterfacesRequest, context: ServicerContext - ) -> core_pb2.GetInterfacesResponse: - """ - Retrieve all the interfaces of the system including bridges, virtual ethernet, - and loopback. - - :param request: get-interfaces request - :param context: context object - :return: get-interfaces response that has all the system's interfaces - """ - ifaces = [] - for iface in os.listdir("/sys/class/net"): - if iface.startswith("b.") or iface.startswith("veth") or iface == "lo": - continue - ifaces.append(iface) - return core_pb2.GetInterfacesResponse(ifaces=ifaces) - - def EmaneLink( - self, request: EmaneLinkRequest, context: ServicerContext - ) -> EmaneLinkResponse: - """ - Helps broadcast wireless link/unlink between EMANE nodes. - - :param request: get-interfaces request - :param context: context object - :return: emane link response with success status - """ - logger.debug("emane link: %s", request) - session = self.get_session(request.session_id, context) - flag = MessageFlags.ADD if request.linked else MessageFlags.DELETE - link = session.emane.get_nem_link(request.nem1, request.nem2, flag) - if link: - session.broadcast_link(link) - return EmaneLinkResponse(result=True) - else: - return EmaneLinkResponse(result=False) - - def GetNodeConfigService( - self, request: GetNodeConfigServiceRequest, context: ServicerContext - ) -> GetNodeConfigServiceResponse: - """ - Gets configuration, for a given configuration service, for a given node. - - :param request: get node config service request - :param context: grpc context - :return: get node config service response - """ - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - self.validate_service(request.name, context) - service = node.config_services.get(request.name) - if service: - config = service.render_config() - else: - service = self.coreemu.service_manager.get_service(request.name) - config = {x.id: x.default for x in service.default_configs} - return GetNodeConfigServiceResponse(config=config) - - def GetConfigServiceRendered( - self, request: GetConfigServiceRenderedRequest, context: ServicerContext - ) -> GetConfigServiceRenderedResponse: - """ - Retrieves the rendered file data for a given config service on a node. - - :param request: config service render request - :param context: grpc context - :return: rendered config service files - """ - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - self.validate_service(request.name, context) - service = node.config_services.get(request.name) - if not service: - context.abort( - grpc.StatusCode.NOT_FOUND, f"unknown node service {request.name}" - ) - rendered = service.get_rendered_templates() - return GetConfigServiceRenderedResponse(rendered=rendered) - - def GetConfigServiceDefaults( - self, request: GetConfigServiceDefaultsRequest, context: ServicerContext - ) -> GetConfigServiceDefaultsResponse: - """ - Get default values for a given configuration service. - - :param request: get config service defaults request - :param context: grpc context - :return: get config service defaults response - """ - session = self.get_session(request.session_id, context) - node = self.get_node(session, request.node_id, context, CoreNode) - service_class = self.validate_service(request.name, context) - service = service_class(node) - templates = service.get_templates() - config = {} - for configuration in service.default_configs: - config_option = common_pb2.ConfigOption( - label=configuration.label, - name=configuration.id, - value=configuration.default, - type=configuration.type.value, - select=configuration.options, - group="Settings", - ) - config[configuration.id] = config_option - modes = [] - for name, mode_config in service.modes.items(): - mode = configservices_pb2.ConfigMode(name=name, config=mode_config) - modes.append(mode) - return GetConfigServiceDefaultsResponse( - templates=templates, config=config, modes=modes - ) - - def GetEmaneEventChannel( - self, request: GetEmaneEventChannelRequest, context: ServicerContext - ) -> GetEmaneEventChannelResponse: - session = self.get_session(request.session_id, context) - service = session.emane.nem_service.get(request.nem_id) - if not service: - context.abort(grpc.StatusCode.NOT_FOUND, f"unknown nem id {request.nem_id}") - return GetEmaneEventChannelResponse( - group=service.group, port=service.port, device=service.device - ) - - def ExecuteScript(self, request, context): - existing_sessions = set(self.coreemu.sessions.keys()) - file_path = Path(request.script) - utils.execute_script(self.coreemu, file_path, request.args) - current_sessions = set(self.coreemu.sessions.keys()) - new_sessions = list(current_sessions.difference(existing_sessions)) - new_session = -1 - if new_sessions: - new_session = new_sessions[0] - return ExecuteScriptResponse(session_id=new_session) - - def WlanLink( - self, request: WlanLinkRequest, context: ServicerContext - ) -> WlanLinkResponse: - session = self.get_session(request.session_id, context) - wlan = self.get_node(session, request.wlan, context, WlanNode) - if not isinstance(wlan.wireless_model, BasicRangeModel): - context.abort( - grpc.StatusCode.NOT_FOUND, - f"wlan node {request.wlan} is not using BasicRangeModel", - ) - node1 = self.get_node(session, request.node1_id, context, CoreNode) - node2 = self.get_node(session, request.node2_id, context, CoreNode) - node1_iface, node2_iface = None, None - for iface in node1.get_ifaces(control=False): - if iface.net == wlan: - node1_iface = iface - break - for iface in node2.get_ifaces(control=False): - if iface.net == wlan: - node2_iface = iface - break - result = False - if node1_iface and node2_iface: - if request.linked: - wlan.link(node1_iface, node2_iface) - else: - wlan.unlink(node1_iface, node2_iface) - wlan.wireless_model.sendlinkmsg( - node1_iface, node2_iface, unlink=not request.linked - ) - result = True - return WlanLinkResponse(result=result) - - def EmanePathlosses( - self, - request_iterator: Iterable[EmanePathlossesRequest], - context: ServicerContext, - ) -> EmanePathlossesResponse: - for request in request_iterator: - session = self.get_session(request.session_id, context) - node1 = self.get_node(session, request.node1_id, context, CoreNode) - nem1 = grpcutils.get_nem_id(session, node1, request.iface1_id, context) - node2 = self.get_node(session, request.node2_id, context, CoreNode) - nem2 = grpcutils.get_nem_id(session, node2, request.iface2_id, context) - session.emane.publish_pathloss(nem1, nem2, request.rx1, request.rx2) - return EmanePathlossesResponse() - - def Linked( - self, request: LinkedRequest, context: ServicerContext - ) -> LinkedResponse: - session = self.get_session(request.session_id, context) - session.linked( - request.node1_id, - request.node2_id, - request.iface1_id, - request.iface2_id, - request.linked, - ) - return LinkedResponse() - - def WirelessLinked( - self, request: WirelessLinkedRequest, context: ServicerContext - ) -> WirelessLinkedResponse: - session = self.get_session(request.session_id, context) - wireless = self.get_node(session, request.wireless_id, context, WirelessNode) - wireless.link_control(request.node1_id, request.node2_id, request.linked) - return WirelessLinkedResponse() - - def WirelessConfig( - self, request: WirelessConfigRequest, context: ServicerContext - ) -> WirelessConfigResponse: - session = self.get_session(request.session_id, context) - wireless = self.get_node(session, request.wireless_id, context, WirelessNode) - options1 = request.options1 - options2 = options1 - if request.HasField("options2"): - options2 = request.options2 - options1 = grpcutils.convert_options_proto(options1) - options2 = grpcutils.convert_options_proto(options2) - wireless.link_config(request.node1_id, request.node2_id, options1, options2) - return WirelessConfigResponse() - - def GetWirelessConfig( - self, request: GetWirelessConfigRequest, context: ServicerContext - ) -> GetWirelessConfigResponse: - session = self.get_session(request.session_id, context) - try: - wireless = session.get_node(request.node_id, WirelessNode) - configs = wireless.get_config() - except CoreError: - configs = {x.id: x for x in WirelessNode.options} - config_options = {} - for config in configs.values(): - config_option = common_pb2.ConfigOption( - label=config.label, - name=config.id, - value=config.default, - type=config.type.value, - select=config.options, - group=config.group, - ) - config_options[config.id] = config_option - return GetWirelessConfigResponse(config=config_options) diff --git a/daemon/core/api/grpc/wrappers.py b/daemon/core/api/grpc/wrappers.py deleted file mode 100644 index f84e6a08..00000000 --- a/daemon/core/api/grpc/wrappers.py +++ /dev/null @@ -1,1220 +0,0 @@ -from dataclasses import dataclass, field -from enum import Enum -from pathlib import Path -from typing import Any, Optional - -from core.api.grpc import ( - common_pb2, - configservices_pb2, - core_pb2, - emane_pb2, - services_pb2, -) - - -class ConfigServiceValidationMode(Enum): - BLOCKING = 0 - NON_BLOCKING = 1 - TIMER = 2 - - -class ServiceValidationMode(Enum): - BLOCKING = 0 - NON_BLOCKING = 1 - TIMER = 2 - - -class MobilityAction(Enum): - START = 0 - PAUSE = 1 - STOP = 2 - - -class ConfigOptionType(Enum): - UINT8 = 1 - UINT16 = 2 - UINT32 = 3 - UINT64 = 4 - INT8 = 5 - INT16 = 6 - INT32 = 7 - INT64 = 8 - FLOAT = 9 - STRING = 10 - BOOL = 11 - - -class SessionState(Enum): - DEFINITION = 1 - CONFIGURATION = 2 - INSTANTIATION = 3 - RUNTIME = 4 - DATACOLLECT = 5 - SHUTDOWN = 6 - - -class NodeType(Enum): - DEFAULT = 0 - PHYSICAL = 1 - SWITCH = 4 - HUB = 5 - WIRELESS_LAN = 6 - RJ45 = 7 - TUNNEL = 8 - EMANE = 10 - TAP_BRIDGE = 11 - PEER_TO_PEER = 12 - CONTROL_NET = 13 - DOCKER = 15 - LXC = 16 - WIRELESS = 17 - PODMAN = 18 - - -class LinkType(Enum): - WIRELESS = 0 - WIRED = 1 - - -class ExceptionLevel(Enum): - DEFAULT = 0 - FATAL = 1 - ERROR = 2 - WARNING = 3 - NOTICE = 4 - - -class MessageType(Enum): - NONE = 0 - ADD = 1 - DELETE = 2 - CRI = 4 - LOCAL = 8 - STRING = 16 - TEXT = 32 - TTY = 64 - - -class ServiceAction(Enum): - START = 0 - STOP = 1 - RESTART = 2 - VALIDATE = 3 - - -class EventType: - SESSION = 0 - NODE = 1 - LINK = 2 - CONFIG = 3 - EXCEPTION = 4 - FILE = 5 - - -@dataclass -class ConfigService: - group: str - name: str - executables: list[str] - dependencies: list[str] - directories: list[str] - files: list[str] - startup: list[str] - validate: list[str] - shutdown: list[str] - validation_mode: ConfigServiceValidationMode - validation_timer: int - validation_period: float - - @classmethod - def from_proto(cls, proto: configservices_pb2.ConfigService) -> "ConfigService": - return ConfigService( - group=proto.group, - name=proto.name, - executables=proto.executables, - dependencies=proto.dependencies, - directories=proto.directories, - files=proto.files, - startup=proto.startup, - validate=proto.validate, - shutdown=proto.shutdown, - validation_mode=ConfigServiceValidationMode(proto.validation_mode), - validation_timer=proto.validation_timer, - validation_period=proto.validation_period, - ) - - -@dataclass -class ConfigServiceConfig: - node_id: int - name: str - templates: dict[str, str] - config: dict[str, str] - - @classmethod - def from_proto( - cls, proto: configservices_pb2.ConfigServiceConfig - ) -> "ConfigServiceConfig": - return ConfigServiceConfig( - node_id=proto.node_id, - name=proto.name, - templates=dict(proto.templates), - config=dict(proto.config), - ) - - -@dataclass -class ConfigServiceData: - templates: dict[str, str] = field(default_factory=dict) - config: dict[str, str] = field(default_factory=dict) - - -@dataclass -class ConfigServiceDefaults: - templates: dict[str, str] - config: dict[str, "ConfigOption"] - modes: dict[str, dict[str, str]] - - @classmethod - def from_proto( - cls, proto: configservices_pb2.GetConfigServiceDefaultsResponse - ) -> "ConfigServiceDefaults": - config = ConfigOption.from_dict(proto.config) - modes = {x.name: dict(x.config) for x in proto.modes} - return ConfigServiceDefaults( - templates=dict(proto.templates), config=config, modes=modes - ) - - -@dataclass -class Server: - name: str - host: str - - @classmethod - def from_proto(cls, proto: core_pb2.Server) -> "Server": - return Server(name=proto.name, host=proto.host) - - def to_proto(self) -> core_pb2.Server: - return core_pb2.Server(name=self.name, host=self.host) - - -@dataclass -class Service: - group: str - name: str - - @classmethod - def from_proto(cls, proto: services_pb2.Service) -> "Service": - return Service(group=proto.group, name=proto.name) - - -@dataclass -class ServiceDefault: - model: str - services: list[str] - - @classmethod - def from_proto(cls, proto: services_pb2.ServiceDefaults) -> "ServiceDefault": - return ServiceDefault(model=proto.model, services=list(proto.services)) - - -@dataclass -class NodeServiceData: - executables: list[str] = field(default_factory=list) - dependencies: list[str] = field(default_factory=list) - dirs: list[str] = field(default_factory=list) - configs: list[str] = field(default_factory=list) - startup: list[str] = field(default_factory=list) - validate: list[str] = field(default_factory=list) - validation_mode: ServiceValidationMode = ServiceValidationMode.NON_BLOCKING - validation_timer: int = 5 - shutdown: list[str] = field(default_factory=list) - meta: str = None - - @classmethod - def from_proto(cls, proto: services_pb2.NodeServiceData) -> "NodeServiceData": - return NodeServiceData( - executables=proto.executables, - dependencies=proto.dependencies, - dirs=proto.dirs, - configs=proto.configs, - startup=proto.startup, - validate=proto.validate, - validation_mode=ServiceValidationMode(proto.validation_mode), - validation_timer=proto.validation_timer, - shutdown=proto.shutdown, - meta=proto.meta, - ) - - def to_proto(self) -> services_pb2.NodeServiceData: - return services_pb2.NodeServiceData( - executables=self.executables, - dependencies=self.dependencies, - dirs=self.dirs, - configs=self.configs, - startup=self.startup, - validate=self.validate, - validation_mode=self.validation_mode.value, - validation_timer=self.validation_timer, - shutdown=self.shutdown, - meta=self.meta, - ) - - -@dataclass -class NodeServiceConfig: - node_id: int - service: str - data: NodeServiceData - files: dict[str, str] = field(default_factory=dict) - - @classmethod - def from_proto(cls, proto: services_pb2.NodeServiceConfig) -> "NodeServiceConfig": - return NodeServiceConfig( - node_id=proto.node_id, - service=proto.service, - data=NodeServiceData.from_proto(proto.data), - files=dict(proto.files), - ) - - -@dataclass -class ServiceConfig: - node_id: int - service: str - files: list[str] = None - directories: list[str] = None - startup: list[str] = None - validate: list[str] = None - shutdown: list[str] = None - - def to_proto(self) -> services_pb2.ServiceConfig: - return services_pb2.ServiceConfig( - node_id=self.node_id, - service=self.service, - files=self.files, - directories=self.directories, - startup=self.startup, - validate=self.validate, - shutdown=self.shutdown, - ) - - -@dataclass -class ServiceFileConfig: - node_id: int - service: str - file: str - data: str = field(repr=False) - - def to_proto(self) -> services_pb2.ServiceFileConfig: - return services_pb2.ServiceFileConfig( - node_id=self.node_id, service=self.service, file=self.file, data=self.data - ) - - -@dataclass -class BridgeThroughput: - node_id: int - throughput: float - - @classmethod - def from_proto(cls, proto: core_pb2.BridgeThroughput) -> "BridgeThroughput": - return BridgeThroughput(node_id=proto.node_id, throughput=proto.throughput) - - -@dataclass -class InterfaceThroughput: - node_id: int - iface_id: int - throughput: float - - @classmethod - def from_proto(cls, proto: core_pb2.InterfaceThroughput) -> "InterfaceThroughput": - return InterfaceThroughput( - node_id=proto.node_id, iface_id=proto.iface_id, throughput=proto.throughput - ) - - -@dataclass -class ThroughputsEvent: - session_id: int - bridge_throughputs: list[BridgeThroughput] - iface_throughputs: list[InterfaceThroughput] - - @classmethod - def from_proto(cls, proto: core_pb2.ThroughputsEvent) -> "ThroughputsEvent": - bridges = [BridgeThroughput.from_proto(x) for x in proto.bridge_throughputs] - ifaces = [InterfaceThroughput.from_proto(x) for x in proto.iface_throughputs] - return ThroughputsEvent( - session_id=proto.session_id, - bridge_throughputs=bridges, - iface_throughputs=ifaces, - ) - - -@dataclass -class CpuUsageEvent: - usage: float - - @classmethod - def from_proto(cls, proto: core_pb2.CpuUsageEvent) -> "CpuUsageEvent": - return CpuUsageEvent(usage=proto.usage) - - -@dataclass -class SessionLocation: - x: float - y: float - z: float - lat: float - lon: float - alt: float - scale: float - - @classmethod - def from_proto(cls, proto: core_pb2.SessionLocation) -> "SessionLocation": - return SessionLocation( - x=proto.x, - y=proto.y, - z=proto.z, - lat=proto.lat, - lon=proto.lon, - alt=proto.alt, - scale=proto.scale, - ) - - def to_proto(self) -> core_pb2.SessionLocation: - return core_pb2.SessionLocation( - x=self.x, - y=self.y, - z=self.z, - lat=self.lat, - lon=self.lon, - alt=self.alt, - scale=self.scale, - ) - - -@dataclass -class ExceptionEvent: - session_id: int - node_id: int - level: ExceptionLevel - source: str - date: str - text: str - opaque: str - - @classmethod - def from_proto( - cls, session_id: int, proto: core_pb2.ExceptionEvent - ) -> "ExceptionEvent": - return ExceptionEvent( - session_id=session_id, - node_id=proto.node_id, - level=ExceptionLevel(proto.level), - source=proto.source, - date=proto.date, - text=proto.text, - opaque=proto.opaque, - ) - - -@dataclass -class ConfigOption: - name: str - value: str - label: str = None - type: ConfigOptionType = None - group: str = None - select: list[str] = None - - @classmethod - def from_dict( - cls, config: dict[str, common_pb2.ConfigOption] - ) -> dict[str, "ConfigOption"]: - d = {} - for key, value in config.items(): - d[key] = ConfigOption.from_proto(value) - return d - - @classmethod - def to_dict(cls, config: dict[str, "ConfigOption"]) -> dict[str, str]: - return {k: v.value for k, v in config.items()} - - @classmethod - def from_proto(cls, proto: common_pb2.ConfigOption) -> "ConfigOption": - config_type = ConfigOptionType(proto.type) if proto.type is not None else None - return ConfigOption( - label=proto.label, - name=proto.name, - value=proto.value, - type=config_type, - group=proto.group, - select=proto.select, - ) - - def to_proto(self) -> common_pb2.ConfigOption: - config_type = self.type.value if self.type is not None else None - return common_pb2.ConfigOption( - label=self.label, - name=self.name, - value=self.value, - type=config_type, - select=self.select, - group=self.group, - ) - - -@dataclass -class Interface: - id: int - name: str = None - mac: str = None - ip4: str = None - ip4_mask: int = None - ip6: str = None - ip6_mask: int = None - net_id: int = None - flow_id: int = None - mtu: int = None - node_id: int = None - net2_id: int = None - nem_id: int = None - nem_port: int = None - - @classmethod - def from_proto(cls, proto: core_pb2.Interface) -> "Interface": - return Interface( - id=proto.id, - name=proto.name, - mac=proto.mac, - ip4=proto.ip4, - ip4_mask=proto.ip4_mask, - ip6=proto.ip6, - ip6_mask=proto.ip6_mask, - net_id=proto.net_id, - flow_id=proto.flow_id, - mtu=proto.mtu, - node_id=proto.node_id, - net2_id=proto.net2_id, - nem_id=proto.nem_id, - nem_port=proto.nem_port, - ) - - def to_proto(self) -> core_pb2.Interface: - return core_pb2.Interface( - id=self.id, - name=self.name, - mac=self.mac, - ip4=self.ip4, - ip4_mask=self.ip4_mask, - ip6=self.ip6, - ip6_mask=self.ip6_mask, - net_id=self.net_id, - flow_id=self.flow_id, - mtu=self.mtu, - node_id=self.node_id, - net2_id=self.net2_id, - ) - - -@dataclass -class LinkOptions: - jitter: int = 0 - key: int = 0 - mburst: int = 0 - mer: int = 0 - loss: float = 0.0 - bandwidth: int = 0 - burst: int = 0 - delay: int = 0 - dup: int = 0 - unidirectional: bool = False - buffer: int = 0 - - @classmethod - def from_proto(cls, proto: core_pb2.LinkOptions) -> "LinkOptions": - return LinkOptions( - jitter=proto.jitter, - key=proto.key, - mburst=proto.mburst, - mer=proto.mer, - loss=proto.loss, - bandwidth=proto.bandwidth, - burst=proto.burst, - delay=proto.delay, - dup=proto.dup, - unidirectional=proto.unidirectional, - buffer=proto.buffer, - ) - - def to_proto(self) -> core_pb2.LinkOptions: - return core_pb2.LinkOptions( - jitter=self.jitter, - key=self.key, - mburst=self.mburst, - mer=self.mer, - loss=self.loss, - bandwidth=self.bandwidth, - burst=self.burst, - delay=self.delay, - dup=self.dup, - unidirectional=self.unidirectional, - buffer=self.buffer, - ) - - -@dataclass -class Link: - node1_id: int - node2_id: int - type: LinkType = LinkType.WIRED - iface1: Interface = None - iface2: Interface = None - options: LinkOptions = None - network_id: int = None - label: str = None - color: str = None - - @classmethod - def from_proto(cls, proto: core_pb2.Link) -> "Link": - iface1 = None - if proto.HasField("iface1"): - iface1 = Interface.from_proto(proto.iface1) - iface2 = None - if proto.HasField("iface2"): - iface2 = Interface.from_proto(proto.iface2) - options = None - if proto.HasField("options"): - options = LinkOptions.from_proto(proto.options) - return Link( - type=LinkType(proto.type), - node1_id=proto.node1_id, - node2_id=proto.node2_id, - iface1=iface1, - iface2=iface2, - options=options, - network_id=proto.network_id, - label=proto.label, - color=proto.color, - ) - - def to_proto(self) -> core_pb2.Link: - iface1 = self.iface1.to_proto() if self.iface1 else None - iface2 = self.iface2.to_proto() if self.iface2 else None - options = self.options.to_proto() if self.options else None - return core_pb2.Link( - type=self.type.value, - node1_id=self.node1_id, - node2_id=self.node2_id, - iface1=iface1, - iface2=iface2, - options=options, - network_id=self.network_id, - label=self.label, - color=self.color, - ) - - def is_symmetric(self) -> bool: - result = True - if self.options: - result = self.options.unidirectional is False - return result - - -@dataclass -class SessionSummary: - id: int - state: SessionState - nodes: int - file: str - dir: str - - @classmethod - def from_proto(cls, proto: core_pb2.SessionSummary) -> "SessionSummary": - return SessionSummary( - id=proto.id, - state=SessionState(proto.state), - nodes=proto.nodes, - file=proto.file, - dir=proto.dir, - ) - - def to_proto(self) -> core_pb2.SessionSummary: - return core_pb2.SessionSummary( - id=self.id, - state=self.state.value, - nodes=self.nodes, - file=self.file, - dir=self.dir, - ) - - -@dataclass -class Hook: - state: SessionState - file: str - data: str - - @classmethod - def from_proto(cls, proto: core_pb2.Hook) -> "Hook": - return Hook(state=SessionState(proto.state), file=proto.file, data=proto.data) - - def to_proto(self) -> core_pb2.Hook: - return core_pb2.Hook(state=self.state.value, file=self.file, data=self.data) - - -@dataclass -class EmaneModelConfig: - node_id: int - model: str - iface_id: int = -1 - config: dict[str, ConfigOption] = None - - @classmethod - def from_proto(cls, proto: emane_pb2.GetEmaneModelConfig) -> "EmaneModelConfig": - iface_id = proto.iface_id if proto.iface_id != -1 else None - config = ConfigOption.from_dict(proto.config) - return EmaneModelConfig( - node_id=proto.node_id, iface_id=iface_id, model=proto.model, config=config - ) - - def to_proto(self) -> emane_pb2.EmaneModelConfig: - config = ConfigOption.to_dict(self.config) - return emane_pb2.EmaneModelConfig( - node_id=self.node_id, - model=self.model, - iface_id=self.iface_id, - config=config, - ) - - -@dataclass -class Position: - x: float - y: float - - @classmethod - def from_proto(cls, proto: core_pb2.Position) -> "Position": - return Position(x=proto.x, y=proto.y) - - def to_proto(self) -> core_pb2.Position: - return core_pb2.Position(x=self.x, y=self.y) - - -@dataclass -class Geo: - lat: float = None - lon: float = None - alt: float = None - - @classmethod - def from_proto(cls, proto: core_pb2.Geo) -> "Geo": - return Geo(lat=proto.lat, lon=proto.lon, alt=proto.alt) - - def to_proto(self) -> core_pb2.Geo: - return core_pb2.Geo(lat=self.lat, lon=self.lon, alt=self.alt) - - -@dataclass -class Node: - id: int = None - name: str = None - type: NodeType = NodeType.DEFAULT - model: str = None - position: Position = Position(x=0, y=0) - services: set[str] = field(default_factory=set) - config_services: set[str] = field(default_factory=set) - emane: str = None - icon: str = None - image: str = None - server: str = None - geo: Geo = None - dir: str = None - channel: str = None - canvas: int = None - - # configurations - emane_model_configs: dict[ - tuple[str, Optional[int]], dict[str, ConfigOption] - ] = field(default_factory=dict, repr=False) - wlan_config: dict[str, ConfigOption] = field(default_factory=dict, repr=False) - wireless_config: dict[str, ConfigOption] = field(default_factory=dict, repr=False) - mobility_config: dict[str, ConfigOption] = field(default_factory=dict, repr=False) - service_configs: dict[str, NodeServiceData] = field( - default_factory=dict, repr=False - ) - service_file_configs: dict[str, dict[str, str]] = field( - default_factory=dict, repr=False - ) - config_service_configs: dict[str, ConfigServiceData] = field( - default_factory=dict, repr=False - ) - - @classmethod - def from_proto(cls, proto: core_pb2.Node) -> "Node": - service_configs = {} - service_file_configs = {} - for service, node_config in proto.service_configs.items(): - service_configs[service] = NodeServiceData.from_proto(node_config.data) - service_file_configs[service] = dict(node_config.files) - emane_configs = {} - for emane_config in proto.emane_configs: - iface_id = None if emane_config.iface_id == -1 else emane_config.iface_id - model = emane_config.model - key = (model, iface_id) - emane_configs[key] = ConfigOption.from_dict(emane_config.config) - config_service_configs = {} - for service, service_config in proto.config_service_configs.items(): - config_service_configs[service] = ConfigServiceData( - templates=dict(service_config.templates), - config=dict(service_config.config), - ) - return Node( - id=proto.id, - name=proto.name, - type=NodeType(proto.type), - model=proto.model or None, - position=Position.from_proto(proto.position), - services=set(proto.services), - config_services=set(proto.config_services), - emane=proto.emane, - icon=proto.icon, - image=proto.image, - server=proto.server, - geo=Geo.from_proto(proto.geo), - dir=proto.dir, - channel=proto.channel, - canvas=proto.canvas, - wlan_config=ConfigOption.from_dict(proto.wlan_config), - mobility_config=ConfigOption.from_dict(proto.mobility_config), - service_configs=service_configs, - service_file_configs=service_file_configs, - config_service_configs=config_service_configs, - emane_model_configs=emane_configs, - wireless_config=ConfigOption.from_dict(proto.wireless_config), - ) - - def to_proto(self) -> core_pb2.Node: - emane_configs = [] - for key, config in self.emane_model_configs.items(): - model, iface_id = key - if iface_id is None: - iface_id = -1 - config = {k: v.to_proto() for k, v in config.items()} - emane_config = emane_pb2.NodeEmaneConfig( - iface_id=iface_id, model=model, config=config - ) - emane_configs.append(emane_config) - service_configs = {} - for service, service_data in self.service_configs.items(): - service_configs[service] = services_pb2.NodeServiceConfig( - service=service, data=service_data.to_proto() - ) - for service, file_configs in self.service_file_configs.items(): - service_config = service_configs.get(service) - if service_config: - service_config.files.update(file_configs) - else: - service_configs[service] = services_pb2.NodeServiceConfig( - service=service, files=file_configs - ) - config_service_configs = {} - for service, service_config in self.config_service_configs.items(): - config_service_configs[service] = configservices_pb2.ConfigServiceConfig( - templates=service_config.templates, config=service_config.config - ) - return core_pb2.Node( - id=self.id, - name=self.name, - type=self.type.value, - model=self.model, - position=self.position.to_proto(), - services=self.services, - config_services=self.config_services, - emane=self.emane, - icon=self.icon, - image=self.image, - server=self.server, - dir=self.dir, - channel=self.channel, - canvas=self.canvas, - wlan_config={k: v.to_proto() for k, v in self.wlan_config.items()}, - mobility_config={k: v.to_proto() for k, v in self.mobility_config.items()}, - service_configs=service_configs, - config_service_configs=config_service_configs, - emane_configs=emane_configs, - wireless_config={k: v.to_proto() for k, v in self.wireless_config.items()}, - ) - - def set_wlan(self, config: dict[str, str]) -> None: - for key, value in config.items(): - option = ConfigOption(name=key, value=value) - self.wlan_config[key] = option - - def set_mobility(self, config: dict[str, str]) -> None: - for key, value in config.items(): - option = ConfigOption(name=key, value=value) - self.mobility_config[key] = option - - def set_emane_model( - self, model: str, config: dict[str, str], iface_id: int = None - ) -> None: - key = (model, iface_id) - config_options = self.emane_model_configs.setdefault(key, {}) - for key, value in config.items(): - option = ConfigOption(name=key, value=value) - config_options[key] = option - - -@dataclass -class Session: - id: int = None - state: SessionState = SessionState.DEFINITION - nodes: dict[int, Node] = field(default_factory=dict) - links: list[Link] = field(default_factory=list) - dir: str = None - user: str = None - default_services: dict[str, set[str]] = field(default_factory=dict) - location: SessionLocation = SessionLocation( - x=0.0, y=0.0, z=0.0, lat=47.57917, lon=-122.13232, alt=2.0, scale=150.0 - ) - hooks: dict[str, Hook] = field(default_factory=dict) - metadata: dict[str, str] = field(default_factory=dict) - file: Path = None - options: dict[str, ConfigOption] = field(default_factory=dict) - servers: list[Server] = field(default_factory=list) - - @classmethod - def from_proto(cls, proto: core_pb2.Session) -> "Session": - nodes: dict[int, Node] = {x.id: Node.from_proto(x) for x in proto.nodes} - links = [Link.from_proto(x) for x in proto.links] - default_services = {x.model: set(x.services) for x in proto.default_services} - hooks = {x.file: Hook.from_proto(x) for x in proto.hooks} - file_path = Path(proto.file) if proto.file else None - options = ConfigOption.from_dict(proto.options) - servers = [Server.from_proto(x) for x in proto.servers] - return Session( - id=proto.id, - state=SessionState(proto.state), - nodes=nodes, - links=links, - dir=proto.dir, - user=proto.user, - default_services=default_services, - location=SessionLocation.from_proto(proto.location), - hooks=hooks, - metadata=dict(proto.metadata), - file=file_path, - options=options, - servers=servers, - ) - - def to_proto(self) -> core_pb2.Session: - nodes = [x.to_proto() for x in self.nodes.values()] - links = [x.to_proto() for x in self.links] - hooks = [x.to_proto() for x in self.hooks.values()] - options = {k: v.to_proto() for k, v in self.options.items()} - servers = [x.to_proto() for x in self.servers] - default_services = [] - for model, services in self.default_services.items(): - default_service = services_pb2.ServiceDefaults( - model=model, services=services - ) - default_services.append(default_service) - file = str(self.file) if self.file else None - return core_pb2.Session( - id=self.id, - state=self.state.value, - nodes=nodes, - links=links, - dir=self.dir, - user=self.user, - default_services=default_services, - location=self.location.to_proto(), - hooks=hooks, - metadata=self.metadata, - file=file, - options=options, - servers=servers, - ) - - def add_node( - self, - _id: int, - *, - name: str = None, - _type: NodeType = NodeType.DEFAULT, - model: str = "PC", - position: Position = None, - geo: Geo = None, - emane: str = None, - image: str = None, - server: str = None, - ) -> Node: - node = Node( - id=_id, - name=name, - type=_type, - model=model, - position=position, - geo=geo, - emane=emane, - image=image, - server=server, - ) - self.nodes[node.id] = node - return node - - def add_link( - self, - *, - node1: Node, - node2: Node, - iface1: Interface = None, - iface2: Interface = None, - options: LinkOptions = None, - ) -> Link: - link = Link( - node1_id=node1.id, - node2_id=node2.id, - iface1=iface1, - iface2=iface2, - options=options, - ) - self.links.append(link) - return link - - def set_options(self, config: dict[str, str]) -> None: - for key, value in config.items(): - option = ConfigOption(name=key, value=value) - self.options[key] = option - - -@dataclass -class CoreConfig: - services: list[Service] = field(default_factory=list) - config_services: list[ConfigService] = field(default_factory=list) - emane_models: list[str] = field(default_factory=list) - - @classmethod - def from_proto(cls, proto: core_pb2.GetConfigResponse) -> "CoreConfig": - services = [Service.from_proto(x) for x in proto.services] - config_services = [ConfigService.from_proto(x) for x in proto.config_services] - return CoreConfig( - services=services, - config_services=config_services, - emane_models=list(proto.emane_models), - ) - - -@dataclass -class LinkEvent: - message_type: MessageType - link: Link - - @classmethod - def from_proto(cls, proto: core_pb2.LinkEvent) -> "LinkEvent": - return LinkEvent( - message_type=MessageType(proto.message_type), - link=Link.from_proto(proto.link), - ) - - -@dataclass -class NodeEvent: - message_type: MessageType - node: Node - - @classmethod - def from_proto(cls, proto: core_pb2.NodeEvent) -> "NodeEvent": - return NodeEvent( - message_type=MessageType(proto.message_type), - node=Node.from_proto(proto.node), - ) - - -@dataclass -class SessionEvent: - node_id: int - event: int - name: str - data: str - time: float - - @classmethod - def from_proto(cls, proto: core_pb2.SessionEvent) -> "SessionEvent": - return SessionEvent( - node_id=proto.node_id, - event=proto.event, - name=proto.name, - data=proto.data, - time=proto.time, - ) - - -@dataclass -class FileEvent: - message_type: MessageType - node_id: int - name: str - mode: str - number: int - type: str - source: str - data: str - compressed_data: str - - @classmethod - def from_proto(cls, proto: core_pb2.FileEvent) -> "FileEvent": - return FileEvent( - message_type=MessageType(proto.message_type), - node_id=proto.node_id, - name=proto.name, - mode=proto.mode, - number=proto.number, - type=proto.type, - source=proto.source, - data=proto.data, - compressed_data=proto.compressed_data, - ) - - -@dataclass -class ConfigEvent: - message_type: MessageType - node_id: int - object: str - type: int - data_types: list[int] - data_values: str - captions: str - bitmap: str - possible_values: str - groups: str - iface_id: int - network_id: int - opaque: str - - @classmethod - def from_proto(cls, proto: core_pb2.ConfigEvent) -> "ConfigEvent": - return ConfigEvent( - message_type=MessageType(proto.message_type), - node_id=proto.node_id, - object=proto.object, - type=proto.type, - data_types=list(proto.data_types), - data_values=proto.data_values, - captions=proto.captions, - possible_values=proto.possible_values, - groups=proto.groups, - iface_id=proto.iface_id, - network_id=proto.network_id, - opaque=proto.opaque, - ) - - -@dataclass -class Event: - session_id: int - source: str = None - session_event: SessionEvent = None - node_event: NodeEvent = None - link_event: LinkEvent = None - config_event: Any = None - exception_event: ExceptionEvent = None - file_event: FileEvent = None - - @classmethod - def from_proto(cls, proto: core_pb2.Event) -> "Event": - source = proto.source if proto.source else None - node_event = None - link_event = None - exception_event = None - session_event = None - file_event = None - config_event = None - if proto.HasField("node_event"): - node_event = NodeEvent.from_proto(proto.node_event) - elif proto.HasField("link_event"): - link_event = LinkEvent.from_proto(proto.link_event) - elif proto.HasField("exception_event"): - exception_event = ExceptionEvent.from_proto( - proto.session_id, proto.exception_event - ) - elif proto.HasField("session_event"): - session_event = SessionEvent.from_proto(proto.session_event) - elif proto.HasField("file_event"): - file_event = FileEvent.from_proto(proto.file_event) - elif proto.HasField("config_event"): - config_event = ConfigEvent.from_proto(proto.config_event) - return Event( - session_id=proto.session_id, - source=source, - node_event=node_event, - link_event=link_event, - exception_event=exception_event, - session_event=session_event, - file_event=file_event, - config_event=config_event, - ) - - -@dataclass -class EmaneEventChannel: - group: str - port: int - device: str - - @classmethod - def from_proto( - cls, proto: emane_pb2.GetEmaneEventChannelResponse - ) -> "EmaneEventChannel": - return EmaneEventChannel( - group=proto.group, port=proto.port, device=proto.device - ) - - -@dataclass -class EmanePathlossesRequest: - session_id: int - node1_id: int - rx1: float - iface1_id: int - node2_id: int - rx2: float - iface2_id: int - - def to_proto(self) -> emane_pb2.EmanePathlossesRequest: - return emane_pb2.EmanePathlossesRequest( - session_id=self.session_id, - node1_id=self.node1_id, - rx1=self.rx1, - iface1_id=self.iface1_id, - node2_id=self.node2_id, - rx2=self.rx2, - iface2_id=self.iface2_id, - ) - - -@dataclass(frozen=True) -class MoveNodesRequest: - session_id: int - node_id: int - source: str = field(compare=False, default=None) - position: Position = field(compare=False, default=None) - geo: Geo = field(compare=False, default=None) - - def to_proto(self) -> core_pb2.MoveNodesRequest: - position = self.position.to_proto() if self.position else None - geo = self.geo.to_proto() if self.geo else None - return core_pb2.MoveNodesRequest( - session_id=self.session_id, - node_id=self.node_id, - source=self.source, - position=position, - geo=geo, - ) diff --git a/daemon/core/broker.py b/daemon/core/broker.py new file mode 100644 index 00000000..19d9713b --- /dev/null +++ b/daemon/core/broker.py @@ -0,0 +1,1108 @@ +""" +Broker class that is part of the session object. Handles distributing parts of the emulation out to +other emulation servers. The broker is consulted when handling messages to determine if messages +should be handled locally or forwarded on to another emulation server. +""" + +import logging +import os +import select +import socket +import threading + +from core.api import coreapi +from core.coreobj import PyCoreNet +from core.coreobj import PyCoreNode +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import FileTlvs +from core.enumerations import LinkTlvs +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import nodeutils +from core.misc.ipaddress import IpAddress +from core.netns.vif import GreTap +from core.netns.vnet import GreTapBridge +from core.phys.pnodes import PhysicalNode + + +class CoreDistributedServer(object): + """ + Represents CORE daemon servers for communication. + """ + + def __init__(self, name, host, port): + """ + Creates a CoreServer instance. + + :param str name: name of the CORE server + :param str host: server address + :param int port: server port + """ + self.name = name + self.host = host + self.port = port + self.sock = None + self.instantiation_complete = False + + def connect(self): + """ + Connect to CORE server and save connection. + + :return: nothing + """ + if self.sock: + raise ValueError("socket already connected") + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + try: + sock.connect((self.host, self.port)) + except IOError as e: + sock.close() + raise e + + self.sock = sock + + def close(self): + """ + Close connection with CORE server. + + :return: nothing + """ + if self.sock is not None: + self.sock.close() + self.sock = None + + +class CoreBroker(object): + """ + Helps with brokering messages between CORE daemon servers. + """ + + # configurable manager name + name = "broker" + + # configurable manager type + config_type = RegisterTlvs.UTILITY.value + + def __init__(self, session): + """ + Creates a CoreBroker instance. + + :param core.session.Session session: session this manager is tied to + :return: nothing + """ + + # ConfigurableManager.__init__(self) + self.session = session + self.session_clients = [] + self.session_id_master = None + self.myip = None + # dict containing tuples of (host, port, sock) + self.servers = {} + self.servers_lock = threading.Lock() + self.addserver("localhost", None, None) + # dict containing node number to server name mapping + self.nodemap = {} + # this lock also protects self.nodecounts + self.nodemap_lock = threading.Lock() + # reference counts of nodes on servers + self.nodecounts = {} + # set of node numbers that are link-layer nodes (networks) + self.network_nodes = set() + # set of node numbers that are PhysicalNode nodes + self.physical_nodes = set() + # allows for other message handlers to process API messages (e.g. EMANE) + self.handlers = set() + self.handlers.add(self.handle_distributed) + # dict with tunnel key to tunnel device mapping + self.tunnels = {} + self.dorecvloop = False + self.recvthread = None + self.bootcount = 0 + + def startup(self): + """ + Build tunnels between network-layer nodes now that all node + and link information has been received; called when session + enters the instantation state. + """ + self.addnettunnels() + self.writeservers() + + def shutdown(self): + """ + Close all active sockets; called when the session enters the + data collect state + """ + self.reset() + with self.servers_lock: + while len(self.servers) > 0: + name, server = self.servers.popitem() + if server.sock is not None: + logging.info("closing connection with %s: %s:%s", name, server.host, server.port) + server.close() + self.dorecvloop = False + if self.recvthread is not None: + self.recvthread.join() + + def reset(self): + """ + Reset to initial state. + """ + logging.info("clearing state") + self.nodemap_lock.acquire() + self.nodemap.clear() + for server, count in self.nodecounts.iteritems(): + if count < 1: + self.delserver(server) + self.nodecounts.clear() + self.bootcount = 0 + self.nodemap_lock.release() + self.network_nodes.clear() + self.physical_nodes.clear() + while len(self.tunnels) > 0: + _key, gt = self.tunnels.popitem() + gt.shutdown() + + def startrecvloop(self): + """ + Spawn the receive loop for receiving messages. + """ + if self.recvthread is not None: + logging.info("server receive loop already started") + if self.recvthread.isAlive(): + return + else: + self.recvthread.join() + # start reading data from connected sockets + logging.info("starting server receive loop") + self.dorecvloop = True + self.recvthread = threading.Thread(target=self.recvloop) + self.recvthread.daemon = True + self.recvthread.start() + + def recvloop(self): + """ + Receive loop for receiving messages from server sockets. + """ + self.dorecvloop = True + # note: this loop continues after emulation is stopped, + # even with 0 servers + while self.dorecvloop: + rlist = [] + with self.servers_lock: + # build a socket list for select call + for server in self.servers.itervalues(): + if server.sock is not None: + rlist.append(server.sock) + r, _w, _x = select.select(rlist, [], [], 1.0) + for sock in r: + server = self.getserverbysock(sock) + logging.info("attempting to receive from server: peer:%s remote:%s", + server.sock.getpeername(), server.sock.getsockname()) + if server is None: + # servers may have changed; loop again + continue + rcvlen = self.recv(server) + if rcvlen == 0: + logging.info("connection with server(%s) closed: %s:%s", server.name, server.host, server.port) + + def recv(self, server): + """ + Receive data on an emulation server socket and broadcast it to + all connected session handlers. Returns the length of data recevied + and forwarded. Return value of zero indicates the socket has closed + and should be removed from the self.servers dict. + + :param CoreDistributedServer server: server to receive from + :return: message length + :rtype: int + """ + msghdr = server.sock.recv(coreapi.CoreMessage.header_len) + if len(msghdr) == 0: + # server disconnected + logging.info("server disconnected, closing server") + server.close() + return 0 + + if len(msghdr) != coreapi.CoreMessage.header_len: + logging.warn("warning: broker received not enough data len=%s", len(msghdr)) + return len(msghdr) + + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) + msgdata = server.sock.recv(msglen) + data = msghdr + msgdata + count = None + logging.debug("received message type: %s", MessageTypes(msgtype)) + # snoop exec response for remote interactive TTYs + if msgtype == MessageTypes.EXECUTE.value and msgflags & MessageFlags.TTY.value: + data = self.fixupremotetty(msghdr, msgdata, server.host) + logging.debug("created remote tty message: %s", data) + elif msgtype == MessageTypes.NODE.value: + # snoop node delete response to decrement node counts + if msgflags & MessageFlags.DELETE.value: + msg = coreapi.CoreNodeMessage(msgflags, msghdr, msgdata) + nodenum = msg.get_tlv(NodeTlvs.NUMBER.value) + if nodenum is not None: + count = self.delnodemap(server, nodenum) + elif msgtype == MessageTypes.LINK.value: + # this allows green link lines for remote WLANs + msg = coreapi.CoreLinkMessage(msgflags, msghdr, msgdata) + self.session.sdt.handle_distributed(msg) + elif msgtype == MessageTypes.EVENT.value: + msg = coreapi.CoreEventMessage(msgflags, msghdr, msgdata) + eventtype = msg.get_tlv(EventTlvs.TYPE.value) + if eventtype == EventTypes.INSTANTIATION_COMPLETE.value: + server.instantiation_complete = True + if self.instantiation_complete(): + self.session.check_runtime() + else: + logging.error("unknown message type received: %s", msgtype) + + try: + for session_client in self.session_clients: + session_client.sendall(data) + except IOError: + logging.exception("error sending message") + + if count is not None and count < 1: + return 0 + else: + return len(data) + + def addserver(self, name, host, port): + """ + Add a new server, and try to connect to it. If we"re already connected to this + (host, port), then leave it alone. When host,port is None, do not try to connect. + + :param str name: name of server + :param str host: server address + :param int port: server port + :return: nothing + """ + with self.servers_lock: + server = self.servers.get(name) + if server is not None: + if host == server.host and port == server.port and server.sock is not None: + # leave this socket connected + return + + logging.info("closing connection with %s @ %s:%s", name, server.host, server.port) + server.close() + del self.servers[name] + + logging.info("adding broker server(%s): %s:%s", name, host, port) + server = CoreDistributedServer(name, host, port) + if host is not None and port is not None: + try: + server.connect() + except IOError: + logging.exception("error connecting to server(%s): %s:%s", name, host, port) + if server.sock is not None: + self.startrecvloop() + self.servers[name] = server + + def delserver(self, server): + """ + Remove a server and hang up any connection. + + :param CoreDistributedServer server: server to delete + :return: nothing + """ + with self.servers_lock: + try: + s = self.servers.pop(server.name) + if s != server: + raise ValueError("server removed was not the server provided") + except KeyError: + logging.exception("error deleting server") + + if server.sock is not None: + logging.info("closing connection with %s @ %s:%s", server.name, server.host, server.port) + server.close() + + def getserverbyname(self, name): + """ + Return the server object having the given name, or None. + + :param str name: name of server to retrieve + :return: server for given name + :rtype: CoreDistributedServer + """ + with self.servers_lock: + return self.servers.get(name) + + def getserverbysock(self, sock): + """ + Return the server object corresponding to the given socket, or None. + + :param sock: socket associated with a server + :return: core server associated wit the socket + :rtype: CoreDistributedServer + """ + with self.servers_lock: + for server in self.servers.itervalues(): + if server.sock == sock: + return server + return None + + def getservers(self): + """ + Return a list of servers sorted by name. + + :return: sorted server list + :rtype: list + """ + with self.servers_lock: + return sorted(self.servers.values(), key=lambda x: x.name) + + def getservernames(self): + """ + Return a sorted list of server names (keys from self.servers). + + :return: sorted server names + :rtype: list + """ + with self.servers_lock: + return sorted(self.servers.keys()) + + def tunnelkey(self, n1num, n2num): + """ + Compute a 32-bit key used to uniquely identify a GRE tunnel. + The hash(n1num), hash(n2num) values are used, so node numbers may be + None or string values (used for e.g. "ctrlnet"). + + :param int n1num: node one id + :param int n2num: node two id + :return: tunnel key for the node pair + :rtype: int + """ + sid = self.session_id_master + if sid is None: + # this is the master session + sid = self.session.id + + key = (sid << 16) ^ hash(n1num) ^ (hash(n2num) << 8) + return key & 0xFFFFFFFF + + def addtunnel(self, remoteip, n1num, n2num, localnum): + """ + Adds a new GreTapBridge between nodes on two different machines. + + :param str remoteip: remote address for tunnel + :param int n1num: node one id + :param int n2num: node two id + :param int localnum: local id + :return: nothing + """ + key = self.tunnelkey(n1num, n2num) + if localnum == n2num: + remotenum = n1num + else: + remotenum = n2num + + if key in self.tunnels.keys(): + logging.warn("tunnel with key %s (%s-%s) already exists!", key, n1num, n2num) + else: + objid = key & ((1 << 16) - 1) + logging.info("adding tunnel for %s-%s to %s with key %s", n1num, n2num, remoteip, key) + if localnum in self.physical_nodes: + # no bridge is needed on physical nodes; use the GreTap directly + gt = GreTap(node=None, name=None, session=self.session, + remoteip=remoteip, key=key) + else: + gt = self.session.add_object(cls=GreTapBridge, objid=objid, + policy="ACCEPT", remoteip=remoteip, key=key) + gt.localnum = localnum + gt.remotenum = remotenum + self.tunnels[key] = gt + + def addnettunnels(self): + """ + Add GreTaps between network devices on different machines. + The GreTapBridge is not used since that would add an extra bridge. + """ + logging.debug("adding network tunnels for nodes: %s", self.network_nodes) + for n in self.network_nodes: + self.addnettunnel(n) + + def addnettunnel(self, node_id): + """ + Add network tunnel between node and broker. + + :param int node_id: node id of network to add tunnel to + :return: list of gre taps + :rtype: list + """ + try: + net = self.session.get_object(node_id) + logging.info("adding net tunnel for: id(%s) %s", node_id, net) + except KeyError: + raise KeyError("network node %s not found" % node_id) + + # add other nets here that do not require tunnels + if nodeutils.is_node(net, NodeTypes.EMANE_NET): + logging.warn("emane network does not require a tunnel") + return None + + server_interface = getattr(net, "serverintf", None) + if nodeutils.is_node(net, NodeTypes.CONTROL_NET) and server_interface is not None: + logging.warn("control networks with server interfaces do not need a tunnel") + return None + + servers = self.getserversbynode(node_id) + if len(servers) < 2: + logging.warn("not enough servers to create a tunnel: %s", servers) + return None + + hosts = [] + for server in servers: + if server.host is None: + continue + logging.info("adding server host for net tunnel: %s", server.host) + hosts.append(server.host) + + if len(hosts) == 0: + for session_client in self.session_clients: + # get IP address from API message sender (master) + if session_client.client_address != "": + address = session_client.client_address[0] + logging.info("adding session_client host: %s", address) + hosts.append(address) + + r = [] + for host in hosts: + if self.myip: + # we are the remote emulation server + myip = self.myip + else: + # we are the session master + myip = host + key = self.tunnelkey(node_id, IpAddress.to_int(myip)) + if key in self.tunnels.keys(): + logging.info("tunnel already exists, returning existing tunnel: %s", key) + gt = self.tunnels[key] + r.append(gt) + continue + logging.info("adding tunnel for net %s to %s with key %s", node_id, host, key) + gt = GreTap(node=None, name=None, session=self.session, remoteip=host, key=key) + self.tunnels[key] = gt + r.append(gt) + # attaching to net will later allow gt to be destroyed + # during net.shutdown() + net.attach(gt) + + return r + + def deltunnel(self, n1num, n2num): + """ + Delete tunnel between nodes. + + :param int n1num: node one id + :param int n2num: node two id + :return: nothing + """ + key = self.tunnelkey(n1num, n2num) + try: + logging.info("deleting tunnel between %s - %s with key: %s", n1num, n2num, key) + gt = self.tunnels.pop(key) + except KeyError: + gt = None + if gt: + self.session.delete_object(gt.objid) + del gt + + def gettunnel(self, n1num, n2num): + """ + Return the GreTap between two nodes if it exists. + + :param int n1num: node one id + :param int n2num: node two id + :return: gre tap between nodes or none + """ + key = self.tunnelkey(n1num, n2num) + logging.debug("checking for tunnel(%s) in: %s", key, self.tunnels.keys()) + if key in self.tunnels.keys(): + return self.tunnels[key] + else: + return None + + def addnodemap(self, server, nodenum): + """ + Record a node number to emulation server mapping. + + :param CoreDistributedServer server: core server to associate node with + :param int nodenum: node id + :return: nothing + """ + with self.nodemap_lock: + if nodenum in self.nodemap: + if server in self.nodemap[nodenum]: + return + self.nodemap[nodenum].add(server) + else: + self.nodemap[nodenum] = {server} + + if server in self.nodecounts: + self.nodecounts[server] += 1 + else: + self.nodecounts[server] = 1 + + def delnodemap(self, server, nodenum): + """ + Remove a node number to emulation server mapping. + Return the number of nodes left on this server. + + :param CoreDistributedServer server: server to remove from node map + :param int nodenum: node id + :return: number of nodes left on server + :rtype: int + """ + count = None + with self.nodemap_lock: + if nodenum not in self.nodemap: + return count + + self.nodemap[nodenum].remove(server) + if server in self.nodecounts: + count = self.nodecounts[server] + count -= 1 + self.nodecounts[server] = count + + return count + + def getserversbynode(self, nodenum): + """ + Retrieve a set of emulation servers given a node number. + + :param int nodenum: node id + :return: core server associated with node + :rtype: set + """ + with self.nodemap_lock: + if nodenum not in self.nodemap: + return set() + return self.nodemap[nodenum] + + def addnet(self, nodenum): + """ + Add a node number to the list of link-layer nodes. + + :param int nodenum: node id to add + :return: nothing + """ + logging.info("adding net to broker: %s", nodenum) + self.network_nodes.add(nodenum) + logging.info("broker network nodes: %s", self.network_nodes) + + def addphys(self, nodenum): + """ + Add a node number to the list of physical nodes. + + :param int nodenum: node id to add + :return: nothing + """ + self.physical_nodes.add(nodenum) + + def handle_message(self, message): + """ + Handle an API message. Determine whether this needs to be handled + by the local server or forwarded on to another one. + Returns True when message does not need to be handled locally, + and performs forwarding if required. + Returning False indicates this message should be handled locally. + + :param core.api.coreapi.CoreMessage message: message to handle + :return: true or false for handling locally + :rtype: bool + """ + servers = set() + handle_locally = False + # Do not forward messages when in definition state + # (for e.g. configuring services) + if self.session.state == EventTypes.DEFINITION_STATE.value: + return False + + # Decide whether message should be handled locally or forwarded, or both + if message.message_type == MessageTypes.NODE.value: + handle_locally, servers = self.handlenodemsg(message) + elif message.message_type == MessageTypes.EVENT.value: + # broadcast events everywhere + servers = self.getservers() + elif message.message_type == MessageTypes.CONFIG.value: + # broadcast location and services configuration everywhere + confobj = message.get_tlv(ConfigTlvs.OBJECT.value) + if confobj == "location" or confobj == "services" or confobj == "session" or confobj == "all": + servers = self.getservers() + elif message.message_type == MessageTypes.FILE.value: + # broadcast hook scripts and custom service files everywhere + filetype = message.get_tlv(FileTlvs.TYPE.value) + if filetype is not None and (filetype[:5] == "hook:" or filetype[:8] == "service:"): + servers = self.getservers() + if message.message_type == MessageTypes.LINK.value: + # prepare a server list from two node numbers in link message + handle_locally, servers, message = self.handlelinkmsg(message) + elif len(servers) == 0: + # check for servers based on node numbers in all messages but link + nn = message.node_numbers() + if len(nn) == 0: + return False + servers = self.getserversbynode(nn[0]) + + # allow other handlers to process this message (this is used + # by e.g. EMANE to use the link add message to keep counts of + # interfaces on other servers) + for handler in self.handlers: + handler(message) + + # perform any message forwarding + handle_locally |= self.forwardmsg(message, servers) + return not handle_locally + + def setupserver(self, servername): + """ + Send the appropriate API messages for configuring the specified emulation server. + + :param str servername: name of server to configure + :return: nothing + """ + server = self.getserverbyname(servername) + if server is None: + logging.warn("ignoring unknown server: %s", servername) + return + + if server.sock is None or server.host is None or server.port is None: + logging.info("ignoring disconnected server: %s", servername) + return + + # communicate this session"s current state to the server + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, self.session.state) + msg = coreapi.CoreEventMessage.pack(0, tlvdata) + server.sock.send(msg) + + # send a Configuration message for the broker object and inform the + # server of its local name + tlvdata = "" + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "broker") + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, ConfigFlags.UPDATE.value) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.DATA_TYPES.value, (ConfigDataTypes.STRING.value,)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, + "%s:%s:%s" % (server.name, server.host, server.port)) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.SESSION.value, "%s" % self.session.id) + msg = coreapi.CoreConfMessage.pack(0, tlvdata) + server.sock.send(msg) + + @staticmethod + def fixupremotetty(msghdr, msgdata, host): + """ + When an interactive TTY request comes from the GUI, snoop the reply + and add an SSH command to the appropriate remote server. + + :param msghdr: message header + :param msgdata: message data + :param str host: host address + :return: packed core execute tlv data + """ + msgtype, msgflags, _msglen = coreapi.CoreMessage.unpack_header(msghdr) + msgcls = coreapi.CLASS_MAP[msgtype] + msg = msgcls(msgflags, msghdr, msgdata) + + nodenum = msg.get_tlv(ExecuteTlvs.NODE.value) + execnum = msg.get_tlv(ExecuteTlvs.NUMBER.value) + cmd = msg.get_tlv(ExecuteTlvs.COMMAND.value) + res = msg.get_tlv(ExecuteTlvs.RESULT.value) + + tlvdata = "" + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, nodenum) + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, execnum) + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, cmd) + res = "ssh -X -f " + host + " xterm -e " + res + tlvdata += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) + + return coreapi.CoreExecMessage.pack(msgflags, tlvdata) + + def handlenodemsg(self, message): + """ + Determine and return the servers to which this node message should + be forwarded. Also keep track of link-layer nodes and the mapping of + nodes to servers. + + :param core.api.coreapi.CoreMessage message: message to handle + :return: boolean for handling locally and set of servers + :rtype: tuple + """ + servers = set() + handle_locally = False + serverfiletxt = None + + # snoop Node Message for emulation server TLV and record mapping + n = message.tlv_data[NodeTlvs.NUMBER.value] + + # replicate link-layer nodes on all servers + nodetype = message.get_tlv(NodeTlvs.TYPE.value) + if nodetype is not None: + try: + nodecls = nodeutils.get_node_class(NodeTypes(nodetype)) + except KeyError: + logging.warn("broker invalid node type %s", nodetype) + return handle_locally, servers + if nodecls is None: + logging.warn("broker unimplemented node type %s", nodetype) + return handle_locally, servers + if issubclass(nodecls, PyCoreNet) and nodetype != NodeTypes.WIRELESS_LAN.value: + # network node replicated on all servers; could be optimized + # don"t replicate WLANs, because ebtables rules won"t work + servers = self.getservers() + handle_locally = True + self.addnet(n) + for server in servers: + self.addnodemap(server, n) + # do not record server name for networks since network + # nodes are replicated across all server + return handle_locally, servers + elif issubclass(nodecls, PyCoreNode): + name = message.get_tlv(NodeTlvs.NAME.value) + if name: + serverfiletxt = "%s %s %s" % (n, name, nodecls) + if issubclass(nodecls, PhysicalNode): + # remember physical nodes + self.addphys(n) + + # emulation server TLV specifies server + servername = message.get_tlv(NodeTlvs.EMULATION_SERVER.value) + server = self.getserverbyname(servername) + if server is not None: + self.addnodemap(server, n) + if server not in servers: + servers.add(server) + if serverfiletxt and self.session.master: + self.writenodeserver(serverfiletxt, server) + + # hook to update coordinates of physical nodes + if n in self.physical_nodes: + self.session.mobility.physnodeupdateposition(message) + + return handle_locally, servers + + def handlelinkmsg(self, message): + """ + Determine and return the servers to which this link message should + be forwarded. Also build tunnels between different servers or add + opaque data to the link message before forwarding. + + :param core.api.coreapi.CoreMessage message: message to handle + :return: boolean to handle locally, a set of server, and message + :rtype: tuple + """ + servers = set() + handle_locally = False + + # determine link message destination using non-network nodes + nn = message.node_numbers() + logging.debug("checking link nodes (%s) with network nodes (%s)", nn, self.network_nodes) + if nn[0] in self.network_nodes: + if nn[1] in self.network_nodes: + # two network nodes linked together - prevent loops caused by + # the automatic tunnelling + handle_locally = True + else: + servers = self.getserversbynode(nn[1]) + elif nn[1] in self.network_nodes: + servers = self.getserversbynode(nn[0]) + else: + logging.debug("link nodes are not network nodes") + servers1 = self.getserversbynode(nn[0]) + logging.debug("servers for node(%s): %s", nn[0], servers1) + servers2 = self.getserversbynode(nn[1]) + logging.debug("servers for node(%s): %s", nn[1], servers2) + # nodes are on two different servers, build tunnels as needed + if servers1 != servers2: + localn = None + if len(servers1) == 0 or len(servers2) == 0: + handle_locally = True + servers = servers1.union(servers2) + host = None + # get the IP of remote server and decide which node number + # is for a local node + for server in servers: + host = server.host + if host is None: + # server is local + handle_locally = True + if server in servers1: + localn = nn[0] + else: + localn = nn[1] + if handle_locally and localn is None: + # having no local node at this point indicates local node is + # the one with the empty server set + if len(servers1) == 0: + localn = nn[0] + elif len(servers2) == 0: + localn = nn[1] + if host is None: + host = self.getlinkendpoint(message, localn == nn[0]) + + logging.debug("handle locally(%s) and local node(%s)", handle_locally, localn) + if localn is None: + message = self.addlinkendpoints(message, servers1, servers2) + elif message.flags & MessageFlags.ADD.value: + self.addtunnel(host, nn[0], nn[1], localn) + elif message.flags & MessageFlags.DELETE.value: + self.deltunnel(nn[0], nn[1]) + handle_locally = False + else: + servers = servers1.union(servers2) + + return handle_locally, servers, message + + def addlinkendpoints(self, message, servers1, servers2): + """ + For a link message that is not handled locally, inform the remote + servers of the IP addresses used as tunnel endpoints by adding + opaque data to the link message. + + :param core.api.coreapi.CoreMessage message: message to link end points + :param servers1: + :param servers2: + :return: core link message + :rtype: coreapi.CoreLinkMessage + """ + ip1 = "" + for server in servers1: + if server.host is not None: + ip1 = server.host + break + ip2 = "" + for server in servers2: + if server.host is not None: + ip2 = server.host + break + tlvdata = message.raw_message[coreapi.CoreMessage.header_len:] + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.OPAQUE.value, "%s:%s" % (ip1, ip2)) + newraw = coreapi.CoreLinkMessage.pack(message.flags, tlvdata) + msghdr = newraw[:coreapi.CoreMessage.header_len] + return coreapi.CoreLinkMessage(message.flags, msghdr, tlvdata) + + def getlinkendpoint(self, msg, first_is_local): + """ + A link message between two different servers has been received, + and we need to determine the tunnel endpoint. First look for + opaque data in the link message, otherwise use the IP of the message + sender (the master server). + + :param coreapi.CoreLinkMessage msg: + :param bool first_is_local: is first local + :return: host address + :rtype: str + """ + host = None + opaque = msg.get_tlv(LinkTlvs.OPAQUE.value) + if opaque is not None: + if first_is_local: + host = opaque.split(":")[1] + else: + host = opaque.split(":")[0] + if host == "": + host = None + + if host is None: + for session_client in self.session_clients: + # get IP address from API message sender (master) + if session_client.client_address != "": + host = session_client.client_address[0] + break + + return host + + def handlerawmsg(self, msg): + """ + Helper to invoke message handler, using raw (packed) message bytes. + + :param msg: raw message butes + :return: should handle locally or not + :rtype: bool + """ + hdr = msg[:coreapi.CoreMessage.header_len] + msgtype, flags, _msglen = coreapi.CoreMessage.unpack_header(hdr) + msgcls = coreapi.CLASS_MAP[msgtype] + return self.handle_message(msgcls(flags, hdr, msg[coreapi.CoreMessage.header_len:])) + + def forwardmsg(self, message, servers): + """ + Forward API message to all given servers. + + Return True if an empty host/port is encountered, indicating + the message should be handled locally. + + :param core.api.coreapi.CoreMessage message: message to forward + :param list servers: server to forward message to + :return: handle locally value + :rtype: bool + """ + handle_locally = len(servers) == 0 + for server in servers: + if server.host is None and server.port is None: + # local emulation server, handle this locally + handle_locally = True + elif server.sock is None: + logging.info("server %s @ %s:%s is disconnected", server.name, server.host, server.port) + else: + logging.info("forwarding message to server(%s): %s:%s", server.name, server.host, server.port) + logging.debug("message being forwarded:\n%s", message) + server.sock.send(message.raw_message) + return handle_locally + + def writeservers(self): + """ + Write the server list to a text file in the session directory upon + startup: /tmp/pycore.nnnnn/servers + + :return: nothing + """ + servers = self.getservers() + filename = os.path.join(self.session.session_dir, "servers") + master = self.session_id_master + if master is None: + master = self.session.id + try: + with open(filename, "w") as f: + f.write("master=%s\n" % master) + for server in servers: + if server.name == "localhost": + continue + + lhost, lport = None, None + if server.sock: + lhost, lport = server.sock.getsockname() + f.write("%s %s %s %s %s\n" % (server.name, server.host, server.port, lhost, lport)) + except IOError: + logging.exception("error writing server list to the file: %s", filename) + + def writenodeserver(self, nodestr, server): + """ + Creates a /tmp/pycore.nnnnn/nX.conf/server file having the node + and server info. This may be used by scripts for accessing nodes on + other machines, much like local nodes may be accessed via the + VnodeClient class. + + :param str nodestr: node string + :param CoreDistributedServer server: core server + :return: nothing + """ + serverstr = "%s %s %s" % (server.name, server.host, server.port) + name = nodestr.split()[1] + dirname = os.path.join(self.session.session_dir, name + ".conf") + filename = os.path.join(dirname, "server") + try: + os.makedirs(dirname) + except OSError: + # directory may already exist from previous distributed run + logging.exception("error creating directory: %s", dirname) + + try: + with open(filename, "w") as f: + f.write("%s\n%s\n" % (serverstr, nodestr)) + except IOError: + logging.exception("error writing server file %s for node %s", filename, name) + + def local_instantiation_complete(self): + """ + Set the local server"s instantiation-complete status to True. + + :return: nothing + """ + # TODO: do we really want to allow a localhost to not exist? + with self.servers_lock: + server = self.servers.get("localhost") + if server is not None: + server.instantiation_complete = True + + # broadcast out instantiate complete + tlvdata = "" + tlvdata += coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_COMPLETE.value) + message = coreapi.CoreEventMessage.pack(0, tlvdata) + for session_client in self.session_clients: + session_client.sendall(message) + + def instantiation_complete(self): + """ + Return True if all servers have completed instantiation, False + otherwise. + + :return: have all server completed instantiation + :rtype: bool + """ + with self.servers_lock: + for server in self.servers.itervalues(): + if not server.instantiation_complete: + return False + return True + + def handle_distributed(self, message): + """ + Handle the session options config message as it has reached the + broker. Options requiring modification for distributed operation should + be handled here. + + :param message: message to handle + :return: nothing + """ + if not self.session.master: + return + + if message.message_type != MessageTypes.CONFIG.value or message.get_tlv(ConfigTlvs.OBJECT.value) != "session": + return + + values_str = message.get_tlv(ConfigTlvs.VALUES.value) + if values_str is None: + return + + value_strings = values_str.split("|") + for value_string in value_strings: + key, _value = value_string.split("=", 1) + if key == "controlnet": + self.handle_distributed_control_net(message, value_strings, value_strings.index(value_string)) + + def handle_distributed_control_net(self, message, values, index): + """ + Modify Config Message if multiple control network prefixes are + defined. Map server names to prefixes and repack the message before + it is forwarded to slave servers. + + :param message: message to handle + :param list values: values to handle + :param int index: index ti get key value from + :return: nothing + """ + key_value = values[index] + _key, value = key_value.split("=", 1) + control_nets = value.split() + + if len(control_nets) < 2: + logging.warn("multiple controlnet prefixes do not exist") + return + + servers = self.session.broker.getservernames() + if len(servers) < 2: + logging.warn("not distributed") + return + + servers.remove("localhost") + # master always gets first prefix + servers.insert(0, "localhost") + # create list of "server1:ctrlnet1 server2:ctrlnet2 ..." + control_nets = map(lambda x: "%s:%s" % (x[0], x[1]), zip(servers, control_nets)) + values[index] = "controlnet=%s" % (" ".join(control_nets)) + values_str = "|".join(values) + message.tlv_data[ConfigTlvs.VALUES.value] = values_str + message.repack() diff --git a/daemon/core/conf.py b/daemon/core/conf.py new file mode 100644 index 00000000..b2696b7a --- /dev/null +++ b/daemon/core/conf.py @@ -0,0 +1,398 @@ +""" +Common support for configurable CORE objects. +""" + +import logging +from collections import OrderedDict + +from core.data import ConfigData + + +class ConfigShim(object): + """ + Provides helper methods for converting newer configuration values into TLV compatible formats. + """ + + @classmethod + def str_to_dict(cls, key_values): + """ + Converts a TLV key/value string into an ordered mapping. + + :param str key_values: + :return: ordered mapping of key/value pairs + :rtype: OrderedDict + """ + key_values = key_values.split("|") + values = OrderedDict() + for key_value in key_values: + key, value = key_value.split("=", 1) + values[key] = value + return values + + @classmethod + def groups_to_str(cls, config_groups): + """ + Converts configuration groups to a TLV formatted string. + + :param list[ConfigGroup] config_groups: configuration groups to format + :return: TLV configuration group string + :rtype: str + """ + group_strings = [] + for config_group in config_groups: + group_string = "%s:%s-%s" % (config_group.name, config_group.start, config_group.stop) + group_strings.append(group_string) + return "|".join(group_strings) + + @classmethod + def config_data(cls, flags, node_id, type_flags, configurable_options, config): + """ + Convert this class to a Config API message. Some TLVs are defined + by the class, but node number, conf type flags, and values must + be passed in. + + :param int flags: message flags + :param int node_id: node id + :param int type_flags: type flags + :param ConfigurableOptions configurable_options: options to create config data for + :param dict config: configuration values for options + :return: configuration data object + :rtype: ConfigData + """ + key_values = None + captions = None + data_types = [] + possible_values = [] + logging.debug("configurable: %s", configurable_options) + logging.debug("configuration options: %s", configurable_options.configurations) + logging.debug("configuration data: %s", config) + for configuration in configurable_options.configurations(): + if not captions: + captions = configuration.label + else: + captions += "|%s" % configuration.label + + data_types.append(configuration.type.value) + + options = ",".join(configuration.options) + possible_values.append(options) + + _id = configuration.id + config_value = config.get(_id, configuration.default) + key_value = "%s=%s" % (_id, config_value) + if not key_values: + key_values = key_value + else: + key_values += "|%s" % key_value + + groups_str = cls.groups_to_str(configurable_options.config_groups()) + return ConfigData( + message_type=flags, + node=node_id, + object=configurable_options.name, + type=type_flags, + data_types=tuple(data_types), + data_values=key_values, + captions=captions, + possible_values="|".join(possible_values), + bitmap=configurable_options.bitmap, + groups=groups_str + ) + + +class Configuration(object): + """ + Represents a configuration options. + """ + + def __init__(self, _id, _type, label=None, default="", options=None): + """ + Creates a Configuration object. + + :param str _id: unique name for configuration + :param core.enumerations.ConfigDataTypes _type: configuration data type + :param str label: configuration label for display + :param str default: default value for configuration + :param list options: list options if this is a configuration with a combobox + """ + self.id = _id + self.type = _type + self.default = default + if not options: + options = [] + self.options = options + if not label: + label = _id + self.label = label + + def __str__(self): + return "%s(id=%s, type=%s, default=%s, options=%s)" % ( + self.__class__.__name__, self.id, self.type, self.default, self.options) + + +class ConfigurableManager(object): + """ + Provides convenience methods for storing and retrieving configuration options for nodes. + """ + _default_node = -1 + _default_type = _default_node + + def __init__(self): + """ + Creates a ConfigurableManager object. + """ + self.node_configurations = {} + + def nodes(self): + """ + Retrieves the ids of all node configurations known by this manager. + + :return: list of node ids + :rtype: list + """ + return [node_id for node_id in self.node_configurations.iterkeys() if node_id != self._default_node] + + def config_reset(self, node_id=None): + """ + Clears all configurations or configuration for a specific node. + + :param int node_id: node id to clear configurations for, default is None and clears all configurations + :return: nothing + """ + logging.debug("resetting all configurations: %s", self.__class__.__name__) + if not node_id: + self.node_configurations.clear() + elif node_id in self.node_configurations: + self.node_configurations.pop(node_id) + + def set_config(self, _id, value, node_id=_default_node, config_type=_default_type): + """ + Set a specific configuration value for a node and configuration type. + + :param str _id: configuration key + :param str value: configuration value + :param int node_id: node id to store configuration for + :param str config_type: configuration type to store configuration for + :return: nothing + """ + logging.debug("setting config for node(%s) type(%s): %s=%s", node_id, config_type, _id, value) + node_configs = self.node_configurations.setdefault(node_id, OrderedDict()) + node_type_configs = node_configs.setdefault(config_type, OrderedDict()) + node_type_configs[_id] = value + + def set_configs(self, config, node_id=_default_node, config_type=_default_type): + """ + Set configurations for a node and configuration type. + + :param dict config: configurations to set + :param int node_id: node id to store configuration for + :param str config_type: configuration type to store configuration for + :return: nothing + """ + logging.debug("setting config for node(%s) type(%s): %s", node_id, config_type, config) + node_configs = self.node_configurations.setdefault(node_id, OrderedDict()) + node_configs[config_type] = config + + def get_config(self, _id, node_id=_default_node, config_type=_default_type, default=None): + """ + Retrieves a specific configuration for a node and configuration type. + + :param str _id: specific configuration to retrieve + :param int node_id: node id to store configuration for + :param str config_type: configuration type to store configuration for + :param default: default value to return when value is not found + :return: configuration value + :rtype str + """ + logging.debug("getting config for node(%s) type(%s): %s", node_id, config_type, _id) + result = default + node_type_configs = self.get_configs(node_id, config_type) + if node_type_configs: + result = node_type_configs.get(_id, default) + return result + + def get_configs(self, node_id=_default_node, config_type=_default_type): + """ + Retrieve configurations for a node and configuration type. + + :param int node_id: node id to store configuration for + :param str config_type: configuration type to store configuration for + :return: configurations + :rtype: dict + """ + logging.debug("getting configs for node(%s) type(%s)", node_id, config_type) + result = None + node_configs = self.node_configurations.get(node_id) + if node_configs: + result = node_configs.get(config_type) + return result + + def get_all_configs(self, node_id=_default_node): + """ + Retrieve all current configuration types for a node. + + :param int node_id: node id to retrieve configurations for + :return: all configuration types for a node + :rtype: dict + """ + logging.debug("getting all configs for node(%s)", node_id) + return self.node_configurations.get(node_id) + + +class ConfigGroup(object): + """ + Defines configuration group tabs used for display by ConfigurationOptions. + """ + + def __init__(self, name, start, stop): + """ + Creates a ConfigGroup object. + + :param str name: configuration group display name + :param int start: configurations start index for this group + :param int stop: configurations stop index for this group + """ + self.name = name + self.start = start + self.stop = stop + + +class ConfigurableOptions(object): + """ + Provides a base for defining configuration options within CORE. + """ + name = None + bitmap = None + options = [] + + @classmethod + def configurations(cls): + """ + Provides the configurations for this class. + + :return: configurations + :rtype: list[Configuration] + """ + return cls.options + + @classmethod + def config_groups(cls): + """ + Defines how configurations are grouped. + + :return: configuration group definition + :rtype: list[ConfigGroup] + """ + return [ + ConfigGroup("Options", 1, len(cls.configurations())) + ] + + @classmethod + def default_values(cls): + """ + Provides an ordered mapping of configuration keys to default values. + + :return: ordered configuration mapping default values + :rtype: OrderedDict + """ + return OrderedDict([(config.id, config.default) for config in cls.configurations()]) + + +class ModelManager(ConfigurableManager): + """ + Helps handle setting models for nodes and managing their model configurations. + """ + + def __init__(self): + """ + Creates a ModelManager object. + """ + super(ModelManager, self).__init__() + self.models = {} + self.node_models = {} + + def set_model_config(self, node_id, model_name, config=None): + """ + Set configuration data for a model. + + :param int node_id: node id to set model configuration for + :param str model_name: model to set configuration for + :param dict config: configuration data to set for model + :return: nothing + """ + # get model class to configure + model_class = self.models.get(model_name) + if not model_class: + raise ValueError("%s is an invalid model" % model_name) + + # retrieve default values + model_config = self.get_model_config(node_id, model_name) + if not config: + config = {} + for key, value in config.iteritems(): + model_config[key] = value + + # set as node model for startup + self.node_models[node_id] = model_name + + # set configuration + self.set_configs(model_config, node_id=node_id, config_type=model_name) + + def get_model_config(self, node_id, model_name): + """ + Set configuration data for a model. + + :param int node_id: node id to set model configuration for + :param str model_name: model to set configuration for + :return: current model configuration for node + :rtype: dict + """ + # get model class to configure + model_class = self.models.get(model_name) + if not model_class: + raise ValueError("%s is an invalid model" % model_name) + + config = self.get_configs(node_id=node_id, config_type=model_name) + if not config: + # set default values, when not already set + config = model_class.default_values() + self.set_configs(config, node_id=node_id, config_type=model_name) + + return config + + def set_model(self, node, model_class, config=None): + """ + Set model and model configuration for node. + + :param node: node to set model for + :param model_class: model class to set for node + :param dict config: model configuration, None for default configuration + :return: nothing + """ + logging.info("setting mobility model(%s) for node(%s): %s", model_class.name, node.objid, config) + self.set_model_config(node.objid, model_class.name, config) + config = self.get_model_config(node.objid, model_class.name) + node.setmodel(model_class, config) + + def get_models(self, node): + """ + Return a list of model classes and values for a net if one has been + configured. This is invoked when exporting a session to XML. + + :param node: network node to get models for + :return: list of model and values tuples for the network node + :rtype: list + """ + all_configs = self.get_all_configs(node.objid) + if not all_configs: + all_configs = {} + + models = [] + for model_name, config in all_configs.iteritems(): + if model_name == ModelManager._default_node: + continue + model_class = self.models[model_name] + models.append((model_class, config)) + + logging.debug("models for node(%s): %s", node.objid, models) + return models diff --git a/daemon/core/config.py b/daemon/core/config.py deleted file mode 100644 index 7a6ffa49..00000000 --- a/daemon/core/config.py +++ /dev/null @@ -1,385 +0,0 @@ -""" -Common support for configurable CORE objects. -""" - -import logging -from collections import OrderedDict -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Optional, Union - -from core.emane.nodes import EmaneNet -from core.emulator.enumerations import ConfigDataTypes -from core.errors import CoreConfigError -from core.nodes.network import WlanNode - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.location.mobility import WirelessModel - - WirelessModelType = type[WirelessModel] - -_BOOL_OPTIONS: set[str] = {"0", "1"} - - -@dataclass -class ConfigGroup: - """ - Defines configuration group tabs used for display by ConfigurationOptions. - """ - - name: str - start: int - stop: int - - -@dataclass -class Configuration: - """ - Represents a configuration option. - """ - - id: str - type: ConfigDataTypes - label: str = None - default: str = "" - options: list[str] = field(default_factory=list) - group: str = "Configuration" - - def __post_init__(self) -> None: - self.label = self.label if self.label else self.id - if self.type == ConfigDataTypes.BOOL: - if self.default and self.default not in _BOOL_OPTIONS: - raise CoreConfigError( - f"{self.id} bool value must be one of: {_BOOL_OPTIONS}: " - f"{self.default}" - ) - elif self.type == ConfigDataTypes.FLOAT: - if self.default: - try: - float(self.default) - except ValueError: - raise CoreConfigError( - f"{self.id} is not a valid float: {self.default}" - ) - elif self.type != ConfigDataTypes.STRING: - if self.default: - try: - int(self.default) - except ValueError: - raise CoreConfigError( - f"{self.id} is not a valid int: {self.default}" - ) - - -@dataclass -class ConfigBool(Configuration): - """ - Represents a boolean configuration option. - """ - - type: ConfigDataTypes = ConfigDataTypes.BOOL - value: bool = False - - -@dataclass -class ConfigFloat(Configuration): - """ - Represents a float configuration option. - """ - - type: ConfigDataTypes = ConfigDataTypes.FLOAT - value: float = 0.0 - - -@dataclass -class ConfigInt(Configuration): - """ - Represents an integer configuration option. - """ - - type: ConfigDataTypes = ConfigDataTypes.INT32 - value: int = 0 - - -@dataclass -class ConfigString(Configuration): - """ - Represents a string configuration option. - """ - - type: ConfigDataTypes = ConfigDataTypes.STRING - value: str = "" - - -class ConfigurableOptions: - """ - Provides a base for defining configuration options within CORE. - """ - - name: Optional[str] = None - options: list[Configuration] = [] - - @classmethod - def configurations(cls) -> list[Configuration]: - """ - Provides the configurations for this class. - - :return: configurations - """ - return cls.options - - @classmethod - def config_groups(cls) -> list[ConfigGroup]: - """ - Defines how configurations are grouped. - - :return: configuration group definition - """ - return [ConfigGroup("Options", 1, len(cls.configurations()))] - - @classmethod - def default_values(cls) -> dict[str, str]: - """ - Provides an ordered mapping of configuration keys to default values. - - :return: ordered configuration mapping default values - """ - return OrderedDict( - [(config.id, config.default) for config in cls.configurations()] - ) - - -class ConfigurableManager: - """ - Provides convenience methods for storing and retrieving configuration options for - nodes. - """ - - _default_node: int = -1 - _default_type: int = _default_node - - def __init__(self) -> None: - """ - Creates a ConfigurableManager object. - """ - self.node_configurations = {} - - def nodes(self) -> list[int]: - """ - Retrieves the ids of all node configurations known by this manager. - - :return: list of node ids - """ - return [x for x in self.node_configurations if x != self._default_node] - - def config_reset(self, node_id: int = None) -> None: - """ - Clears all configurations or configuration for a specific node. - - :param node_id: node id to clear configurations for, default is None and clears - all configurations - :return: nothing - """ - if not node_id: - self.node_configurations.clear() - elif node_id in self.node_configurations: - self.node_configurations.pop(node_id) - - def set_config( - self, - _id: str, - value: str, - node_id: int = _default_node, - config_type: str = _default_type, - ) -> None: - """ - Set a specific configuration value for a node and configuration type. - - :param _id: configuration key - :param value: configuration value - :param node_id: node id to store configuration for - :param config_type: configuration type to store configuration for - :return: nothing - """ - node_configs = self.node_configurations.setdefault(node_id, OrderedDict()) - node_type_configs = node_configs.setdefault(config_type, OrderedDict()) - node_type_configs[_id] = value - - def set_configs( - self, - config: dict[str, str], - node_id: int = _default_node, - config_type: str = _default_type, - ) -> None: - """ - Set configurations for a node and configuration type. - - :param config: configurations to set - :param node_id: node id to store configuration for - :param config_type: configuration type to store configuration for - :return: nothing - """ - logger.debug( - "setting config for node(%s) type(%s): %s", node_id, config_type, config - ) - node_configs = self.node_configurations.setdefault(node_id, OrderedDict()) - node_configs[config_type] = config - - def get_config( - self, - _id: str, - node_id: int = _default_node, - config_type: str = _default_type, - default: str = None, - ) -> str: - """ - Retrieves a specific configuration for a node and configuration type. - - :param _id: specific configuration to retrieve - :param node_id: node id to store configuration for - :param config_type: configuration type to store configuration for - :param default: default value to return when value is not found - :return: configuration value - """ - result = default - node_type_configs = self.get_configs(node_id, config_type) - if node_type_configs: - result = node_type_configs.get(_id, default) - return result - - def get_configs( - self, node_id: int = _default_node, config_type: str = _default_type - ) -> Optional[dict[str, str]]: - """ - Retrieve configurations for a node and configuration type. - - :param node_id: node id to store configuration for - :param config_type: configuration type to store configuration for - :return: configurations - """ - result = None - node_configs = self.node_configurations.get(node_id) - if node_configs: - result = node_configs.get(config_type) - return result - - def get_all_configs(self, node_id: int = _default_node) -> dict[str, Any]: - """ - Retrieve all current configuration types for a node. - - :param node_id: node id to retrieve configurations for - :return: all configuration types for a node - """ - return self.node_configurations.get(node_id) - - -class ModelManager(ConfigurableManager): - """ - Helps handle setting models for nodes and managing their model configurations. - """ - - def __init__(self) -> None: - """ - Creates a ModelManager object. - """ - super().__init__() - self.models: dict[str, Any] = {} - self.node_models: dict[int, str] = {} - - def set_model_config( - self, node_id: int, model_name: str, config: dict[str, str] = None - ) -> None: - """ - Set configuration data for a model. - - :param node_id: node id to set model configuration for - :param model_name: model to set configuration for - :param config: configuration data to set for model - :return: nothing - """ - # get model class to configure - model_class = self.models.get(model_name) - if not model_class: - raise ValueError(f"{model_name} is an invalid model") - - # retrieve default values - model_config = self.get_model_config(node_id, model_name) - if not config: - config = {} - for key in config: - value = config[key] - model_config[key] = value - - # set as node model for startup - self.node_models[node_id] = model_name - - # set configuration - self.set_configs(model_config, node_id=node_id, config_type=model_name) - - def get_model_config(self, node_id: int, model_name: str) -> dict[str, str]: - """ - Retrieve configuration data for a model. - - :param node_id: node id to set model configuration for - :param model_name: model to set configuration for - :return: current model configuration for node - """ - # get model class to configure - model_class = self.models.get(model_name) - if not model_class: - raise ValueError(f"{model_name} is an invalid model") - - config = self.get_configs(node_id=node_id, config_type=model_name) - if not config: - # set default values, when not already set - config = model_class.default_values() - self.set_configs(config, node_id=node_id, config_type=model_name) - - return config - - def set_model( - self, - node: Union[WlanNode, EmaneNet], - model_class: "WirelessModelType", - config: dict[str, str] = None, - ) -> None: - """ - Set model and model configuration for node. - - :param node: node to set model for - :param model_class: model class to set for node - :param config: model configuration, None for default configuration - :return: nothing - """ - logger.debug( - "setting model(%s) for node(%s): %s", model_class.name, node.id, config - ) - self.set_model_config(node.id, model_class.name, config) - config = self.get_model_config(node.id, model_class.name) - node.setmodel(model_class, config) - - def get_models( - self, node: Union[WlanNode, EmaneNet] - ) -> list[tuple[type, dict[str, str]]]: - """ - Return a list of model classes and values for a net if one has been - configured. This is invoked when exporting a session to XML. - - :param node: network node to get models for - :return: list of model and values tuples for the network node - """ - all_configs = self.get_all_configs(node.id) - if not all_configs: - all_configs = {} - - models = [] - for model_name in all_configs: - config = all_configs[model_name] - if model_name == ModelManager._default_node: - continue - model_class = self.models[model_name] - models.append((model_class, config)) - - logger.debug("models for node(%s): %s", node.id, models) - return models diff --git a/daemon/core/configservice/base.py b/daemon/core/configservice/base.py deleted file mode 100644 index e15260eb..00000000 --- a/daemon/core/configservice/base.py +++ /dev/null @@ -1,510 +0,0 @@ -import abc -import enum -import inspect -import logging -import time -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Optional - -from mako import exceptions -from mako.lookup import TemplateLookup -from mako.template import Template - -from core.config import Configuration -from core.errors import CoreCommandError, CoreError -from core.nodes.base import CoreNode - -logger = logging.getLogger(__name__) -TEMPLATES_DIR: str = "templates" - - -def get_template_path(file_path: Path) -> str: - """ - Utility to convert a given file path to a valid template path format. - - :param file_path: file path to convert - :return: template path - """ - if file_path.is_absolute(): - template_path = str(file_path.relative_to("/")) - else: - template_path = str(file_path) - return template_path - - -class ConfigServiceMode(enum.Enum): - BLOCKING = 0 - NON_BLOCKING = 1 - TIMER = 2 - - -class ConfigServiceBootError(Exception): - pass - - -class ConfigServiceTemplateError(Exception): - pass - - -@dataclass -class ShadowDir: - path: str - src: Optional[str] = None - templates: bool = False - has_node_paths: bool = False - - -class ConfigService(abc.ABC): - """ - Base class for creating configurable services. - """ - - # validation period in seconds, how frequent validation is attempted - validation_period: float = 0.5 - - # time to wait in seconds for determining if service started successfully - validation_timer: int = 5 - - # directories to shadow and copy files from - shadow_directories: list[ShadowDir] = [] - - def __init__(self, node: CoreNode) -> None: - """ - Create ConfigService instance. - - :param node: node this service is assigned to - """ - self.node: CoreNode = node - class_file = inspect.getfile(self.__class__) - templates_path = Path(class_file).parent.joinpath(TEMPLATES_DIR) - self.templates: TemplateLookup = TemplateLookup(directories=templates_path) - self.config: dict[str, Configuration] = {} - self.custom_templates: dict[str, str] = {} - self.custom_config: dict[str, str] = {} - configs = self.default_configs[:] - self._define_config(configs) - - @staticmethod - def clean_text(text: str) -> str: - """ - Returns space stripped text for string literals, while keeping space - indentations. - - :param text: text to clean - :return: cleaned text - """ - return inspect.cleandoc(text) - - @property - @abc.abstractmethod - def name(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def group(self) -> str: - raise NotImplementedError - - @property - @abc.abstractmethod - def directories(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def files(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def default_configs(self) -> list[Configuration]: - raise NotImplementedError - - @property - @abc.abstractmethod - def modes(self) -> dict[str, dict[str, str]]: - raise NotImplementedError - - @property - @abc.abstractmethod - def executables(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def dependencies(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def startup(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def validate(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def shutdown(self) -> list[str]: - raise NotImplementedError - - @property - @abc.abstractmethod - def validation_mode(self) -> ConfigServiceMode: - raise NotImplementedError - - def start(self) -> None: - """ - Creates services files/directories, runs startup, and validates based on - validation mode. - - :return: nothing - :raises ConfigServiceBootError: when there is an error starting service - """ - logger.info("node(%s) service(%s) starting...", self.node.name, self.name) - self.create_shadow_dirs() - self.create_dirs() - self.create_files() - wait = self.validation_mode == ConfigServiceMode.BLOCKING - self.run_startup(wait) - if not wait: - if self.validation_mode == ConfigServiceMode.TIMER: - self.wait_validation() - else: - self.run_validation() - - def stop(self) -> None: - """ - Stop service using shutdown commands. - - :return: nothing - """ - for cmd in self.shutdown: - try: - self.node.cmd(cmd) - except CoreCommandError: - logger.exception( - f"node({self.node.name}) service({self.name}) " - f"failed shutdown: {cmd}" - ) - - def restart(self) -> None: - """ - Restarts service by running stop and then start. - - :return: nothing - """ - self.stop() - self.start() - - def create_shadow_dirs(self) -> None: - """ - Creates a shadow of a host system directory recursively - to be mapped and live within a node. - - :return: nothing - :raises CoreError: when there is a failure creating a directory or file - """ - for shadow_dir in self.shadow_directories: - # setup shadow and src paths, using node unique paths when configured - shadow_path = Path(shadow_dir.path) - if shadow_dir.src is None: - src_path = shadow_path - else: - src_path = Path(shadow_dir.src) - if shadow_dir.has_node_paths: - src_path = src_path / self.node.name - # validate shadow and src paths - if not shadow_path.is_absolute(): - raise CoreError(f"shadow dir({shadow_path}) is not absolute") - if not src_path.is_absolute(): - raise CoreError(f"shadow source dir({src_path}) is not absolute") - if not src_path.is_dir(): - raise CoreError(f"shadow source dir({src_path}) does not exist") - # create root of the shadow path within node - logger.info( - "node(%s) creating shadow directory(%s) src(%s) node paths(%s) " - "templates(%s)", - self.node.name, - shadow_path, - src_path, - shadow_dir.has_node_paths, - shadow_dir.templates, - ) - self.node.create_dir(shadow_path) - # find all directories and files to create - dir_paths = [] - file_paths = [] - for path in src_path.rglob("*"): - shadow_src_path = shadow_path / path.relative_to(src_path) - if path.is_dir(): - dir_paths.append(shadow_src_path) - else: - file_paths.append((path, shadow_src_path)) - # create all directories within node - for path in dir_paths: - self.node.create_dir(path) - # create all files within node, from templates when configured - data = self.data() - templates = TemplateLookup(directories=src_path) - for path, dst_path in file_paths: - if shadow_dir.templates: - template = templates.get_template(path.name) - rendered = self._render(template, data) - self.node.create_file(dst_path, rendered) - else: - self.node.copy_file(path, dst_path) - - def create_dirs(self) -> None: - """ - Creates directories for service. - - :return: nothing - :raises CoreError: when there is a failure creating a directory - """ - logger.debug("creating config service directories") - for directory in sorted(self.directories): - dir_path = Path(directory) - try: - self.node.create_dir(dir_path) - except (CoreCommandError, CoreError): - raise CoreError( - f"node({self.node.name}) service({self.name}) " - f"failure to create service directory: {directory}" - ) - - def data(self) -> dict[str, Any]: - """ - Returns key/value data, used when rendering file templates. - - :return: key/value template data - """ - return {} - - def set_template(self, name: str, template: str) -> None: - """ - Store custom template to render for a given file. - - :param name: file to store custom template for - :param template: custom template to render - :return: nothing - """ - self.custom_templates[name] = template - - def get_text_template(self, name: str) -> str: - """ - Retrieves text based template for files that do not have a file based template. - - :param name: name of file to get template for - :return: template to render - """ - raise CoreError(f"service({self.name}) unknown template({name})") - - def get_templates(self) -> dict[str, str]: - """ - Retrieves mapping of file names to templates for all cases, which - includes custom templates, file templates, and text templates. - - :return: mapping of files to templates - """ - templates = {} - for file in self.files: - file_path = Path(file) - template_path = get_template_path(file_path) - if file in self.custom_templates: - template = self.custom_templates[file] - template = self.clean_text(template) - elif self.templates.has_template(template_path): - template = self.templates.get_template(template_path).source - else: - try: - template = self.get_text_template(file) - except Exception as e: - raise ConfigServiceTemplateError( - f"node({self.node.name}) service({self.name}) file({file}) " - f"failure getting template: {e}" - ) - template = self.clean_text(template) - templates[file] = template - return templates - - def get_rendered_templates(self) -> dict[str, str]: - templates = {} - data = self.data() - for file in sorted(self.files): - rendered = self._get_rendered_template(file, data) - templates[file] = rendered - return templates - - def _get_rendered_template(self, file: str, data: dict[str, Any]) -> str: - file_path = Path(file) - template_path = get_template_path(file_path) - if file in self.custom_templates: - text = self.custom_templates[file] - rendered = self.render_text(text, data) - elif self.templates.has_template(template_path): - rendered = self.render_template(template_path, data) - else: - try: - text = self.get_text_template(file) - except Exception as e: - raise ConfigServiceTemplateError( - f"node({self.node.name}) service({self.name}) file({file}) " - f"failure getting template: {e}" - ) - rendered = self.render_text(text, data) - return rendered - - def create_files(self) -> None: - """ - Creates service files inside associated node. - - :return: nothing - """ - data = self.data() - for file in sorted(self.files): - logger.debug( - "node(%s) service(%s) template(%s)", self.node.name, self.name, file - ) - rendered = self._get_rendered_template(file, data) - file_path = Path(file) - self.node.create_file(file_path, rendered) - - def run_startup(self, wait: bool) -> None: - """ - Run startup commands for service on node. - - :param wait: wait successful command exit status when True, ignore status - otherwise - :return: nothing - :raises ConfigServiceBootError: when a command that waits fails - """ - for cmd in self.startup: - try: - self.node.cmd(cmd, wait=wait) - except CoreCommandError as e: - raise ConfigServiceBootError( - f"node({self.node.name}) service({self.name}) failed startup: {e}" - ) - - def wait_validation(self) -> None: - """ - Waits for a period of time to consider service started successfully. - - :return: nothing - """ - time.sleep(self.validation_timer) - - def run_validation(self) -> None: - """ - Runs validation commands for service on node. - - :return: nothing - :raises ConfigServiceBootError: if there is a validation failure - """ - start = time.monotonic() - cmds = self.validate[:] - index = 0 - while cmds: - cmd = cmds[index] - try: - self.node.cmd(cmd) - del cmds[index] - index += 1 - except CoreCommandError: - logger.debug( - f"node({self.node.name}) service({self.name}) " - f"validate command failed: {cmd}" - ) - time.sleep(self.validation_period) - - if cmds and time.monotonic() - start > self.validation_timer: - raise ConfigServiceBootError( - f"node({self.node.name}) service({self.name}) failed to validate" - ) - - def _render(self, template: Template, data: dict[str, Any] = None) -> str: - """ - Renders template providing all associated data to template. - - :param template: template to render - :param data: service specific defined data for template - :return: rendered template - """ - if data is None: - data = {} - return template.render_unicode( - node=self.node, config=self.render_config(), **data - ) - - def render_text(self, text: str, data: dict[str, Any] = None) -> str: - """ - Renders text based template providing all associated data to template. - - :param text: text to render - :param data: service specific defined data for template - :return: rendered template - """ - text = self.clean_text(text) - try: - template = Template(text) - return self._render(template, data) - except Exception: - raise CoreError( - f"node({self.node.name}) service({self.name}) " - f"{exceptions.text_error_template().render_unicode()}" - ) - - def render_template(self, template_path: str, data: dict[str, Any] = None) -> str: - """ - Renders file based template providing all associated data to template. - - :param template_path: path of file to render - :param data: service specific defined data for template - :return: rendered template - """ - try: - template = self.templates.get_template(template_path) - return self._render(template, data) - except Exception: - raise CoreError( - f"node({self.node.name}) service({self.name}) file({template_path})" - f"{exceptions.text_error_template().render_unicode()}" - ) - - def _define_config(self, configs: list[Configuration]) -> None: - """ - Initializes default configuration data. - - :param configs: configs to initialize - :return: nothing - """ - for config in configs: - self.config[config.id] = config - - def render_config(self) -> dict[str, str]: - """ - Returns configuration data key/value pairs for rendering a template. - - :return: nothing - """ - if self.custom_config: - return self.custom_config - else: - return {k: v.default for k, v in self.config.items()} - - def set_config(self, data: dict[str, str]) -> None: - """ - Set configuration data from key/value pairs. - - :param data: configuration key/values to set - :return: nothing - :raise CoreError: when an unknown configuration value is given - """ - for key, value in data.items(): - if key not in self.config: - raise CoreError(f"unknown config: {key}") - self.custom_config[key] = value diff --git a/daemon/core/configservice/dependencies.py b/daemon/core/configservice/dependencies.py deleted file mode 100644 index 1fbc4e48..00000000 --- a/daemon/core/configservice/dependencies.py +++ /dev/null @@ -1,125 +0,0 @@ -import logging -from typing import TYPE_CHECKING - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.configservice.base import ConfigService - - -class ConfigServiceDependencies: - """ - Generates sets of services to start in order of their dependencies. - """ - - def __init__(self, services: dict[str, "ConfigService"]) -> None: - """ - Create a ConfigServiceDependencies instance. - - :param services: services for determining dependency sets - """ - # helpers to check validity - self.dependents: dict[str, set[str]] = {} - self.started: set[str] = set() - self.node_services: dict[str, "ConfigService"] = {} - for service in services.values(): - self.node_services[service.name] = service - for dependency in service.dependencies: - dependents = self.dependents.setdefault(dependency, set()) - dependents.add(service.name) - - # used to find paths - self.path: list["ConfigService"] = [] - self.visited: set[str] = set() - self.visiting: set[str] = set() - - def startup_paths(self) -> list[list["ConfigService"]]: - """ - Find startup path sets based on service dependencies. - - :return: lists of lists of services that can be started in parallel - """ - paths = [] - for name in self.node_services: - service = self.node_services[name] - if service.name in self.started: - logger.debug( - "skipping service that will already be started: %s", service.name - ) - continue - - path = self._start(service) - if path: - paths.append(path) - - if self.started != set(self.node_services): - raise ValueError( - f"failure to start all services: {self.started} != " - f"{self.node_services.keys()}" - ) - - return paths - - def _reset(self) -> None: - """ - Clear out metadata used for finding service dependency sets. - - :return: nothing - """ - self.path = [] - self.visited.clear() - self.visiting.clear() - - def _start(self, service: "ConfigService") -> list["ConfigService"]: - """ - Starts a oath for checking dependencies for a given service. - - :param service: service to check dependencies for - :return: list of config services to start in order - """ - logger.debug("starting service dependency check: %s", service.name) - self._reset() - return self._visit(service) - - def _visit(self, current_service: "ConfigService") -> list["ConfigService"]: - """ - Visits a service when discovering dependency chains for service. - - :param current_service: service being visited - :return: list of dependent services for a visited service - """ - logger.debug("visiting service(%s): %s", current_service.name, self.path) - self.visited.add(current_service.name) - self.visiting.add(current_service.name) - - # dive down - for service_name in current_service.dependencies: - if service_name not in self.node_services: - raise ValueError( - "required dependency was not included in node " - f"services: {service_name}" - ) - - if service_name in self.visiting: - raise ValueError( - f"cyclic dependency at service({current_service.name}): " - f"{service_name}" - ) - - if service_name not in self.visited: - service = self.node_services[service_name] - self._visit(service) - - # add service when bottom is found - logger.debug("adding service to startup path: %s", current_service.name) - self.started.add(current_service.name) - self.path.append(current_service) - self.visiting.remove(current_service.name) - - # rise back up - for service_name in self.dependents.get(current_service.name, []): - if service_name not in self.visited: - service = self.node_services[service_name] - self._visit(service) - - return self.path diff --git a/daemon/core/configservice/manager.py b/daemon/core/configservice/manager.py deleted file mode 100644 index 542f3cc5..00000000 --- a/daemon/core/configservice/manager.py +++ /dev/null @@ -1,103 +0,0 @@ -import logging -import pathlib -import pkgutil -from pathlib import Path - -from core import configservices, utils -from core.configservice.base import ConfigService -from core.errors import CoreError - -logger = logging.getLogger(__name__) - - -class ConfigServiceManager: - """ - Manager for configurable services. - """ - - def __init__(self): - """ - Create a ConfigServiceManager instance. - """ - self.services: dict[str, type[ConfigService]] = {} - - def get_service(self, name: str) -> type[ConfigService]: - """ - Retrieve a service by name. - - :param name: name of service - :return: service class - :raises CoreError: when service is not found - """ - service_class = self.services.get(name) - if service_class is None: - raise CoreError(f"service does not exist {name}") - return service_class - - def add(self, service: type[ConfigService]) -> None: - """ - Add service to manager, checking service requirements have been met. - - :param service: service to add to manager - :return: nothing - :raises CoreError: when service is a duplicate or has unmet executables - """ - name = service.name - logger.debug( - "loading service: class(%s) name(%s)", service.__class__.__name__, name - ) - - # avoid duplicate services - if name in self.services: - raise CoreError(f"duplicate service being added: {name}") - - # validate dependent executables are present - for executable in service.executables: - try: - utils.which(executable, required=True) - except CoreError as e: - raise CoreError(f"config service({service.name}): {e}") - - # make service available - self.services[name] = service - - def load_locals(self) -> list[str]: - """ - Search and add config service from local core module. - - :return: list of errors when loading services - """ - errors = [] - for module_info in pkgutil.walk_packages( - configservices.__path__, f"{configservices.__name__}." - ): - services = utils.load_module(module_info.name, ConfigService) - for service in services: - try: - self.add(service) - except CoreError as e: - errors.append(service.name) - logger.debug("not loading config service(%s): %s", service.name, e) - return errors - - def load(self, path: Path) -> list[str]: - """ - Search path provided for config services and add them for being managed. - - :param path: path to search configurable services - :return: list errors when loading services - """ - path = pathlib.Path(path) - subdirs = [x for x in path.iterdir() if x.is_dir()] - subdirs.append(path) - service_errors = [] - for subdir in subdirs: - logger.debug("loading config services from: %s", subdir) - services = utils.load_classes(subdir, ConfigService) - for service in services: - try: - self.add(service) - except CoreError as e: - service_errors.append(service.name) - logger.debug("not loading service(%s): %s", service.name, e) - return service_errors diff --git a/daemon/core/configservices/frrservices/services.py b/daemon/core/configservices/frrservices/services.py deleted file mode 100644 index 378d42f8..00000000 --- a/daemon/core/configservices/frrservices/services.py +++ /dev/null @@ -1,420 +0,0 @@ -import abc -from typing import Any - -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNodeBase, NodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import PtpNet, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode - -GROUP: str = "FRR" -FRR_STATE_DIR: str = "/var/run/frr" - - -def is_wireless(node: NodeBase) -> bool: - """ - Check if the node is a wireless type node. - - :param node: node to check type for - :return: True if wireless type, False otherwise - """ - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) - - -def has_mtu_mismatch(iface: CoreInterface) -> bool: - """ - Helper to detect MTU mismatch and add the appropriate FRR - mtu-ignore command. This is needed when e.g. a node is linked via a - GreTap device. - """ - if iface.mtu != DEFAULT_MTU: - return True - if not iface.net: - return False - for iface in iface.net.get_ifaces(): - if iface.mtu != iface.mtu: - return True - return False - - -def get_min_mtu(iface: CoreInterface) -> int: - """ - Helper to discover the minimum MTU of interfaces linked with the - given interface. - """ - mtu = iface.mtu - if not iface.net: - return mtu - for iface in iface.net.get_ifaces(): - if iface.mtu < mtu: - mtu = iface.mtu - return mtu - - -def get_router_id(node: CoreNodeBase) -> str: - """ - Helper to return the first IPv4 address of a node as its router ID. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return "0.0.0.0" - - -def rj45_check(iface: CoreInterface) -> bool: - """ - Helper to detect whether interface is connected an external RJ45 - link. - """ - if iface.net: - for peer_iface in iface.net.get_ifaces(): - if peer_iface == iface: - continue - if isinstance(peer_iface.node, Rj45Node): - return True - return False - - -class FRRZebra(ConfigService): - name: str = "FRRzebra" - group: str = GROUP - directories: list[str] = ["/usr/local/etc/frr", "/var/run/frr", "/var/log/frr"] - files: list[str] = [ - "/usr/local/etc/frr/frr.conf", - "frrboot.sh", - "/usr/local/etc/frr/vtysh.conf", - "/usr/local/etc/frr/daemons", - ] - executables: list[str] = ["zebra"] - dependencies: list[str] = [] - startup: list[str] = ["bash frrboot.sh zebra"] - validate: list[str] = ["pidof zebra"] - shutdown: list[str] = ["killall zebra"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - frr_conf = self.files[0] - frr_bin_search = self.node.session.options.get( - "frr_bin_search", default="/usr/local/bin /usr/bin /usr/lib/frr" - ).strip('"') - frr_sbin_search = self.node.session.options.get( - "frr_sbin_search", - default="/usr/local/sbin /usr/sbin /usr/lib/frr /usr/libexec/frr", - ).strip('"') - - services = [] - want_ip4 = False - want_ip6 = False - for service in self.node.config_services.values(): - if self.name not in service.dependencies: - continue - if not isinstance(service, FrrService): - continue - if service.ipv4_routing: - want_ip4 = True - if service.ipv6_routing: - want_ip6 = True - services.append(service) - - ifaces = [] - for iface in self.node.get_ifaces(): - ip4s = [] - ip6s = [] - for ip4 in iface.ip4s: - ip4s.append(str(ip4.ip)) - for ip6 in iface.ip6s: - ip6s.append(str(ip6.ip)) - ifaces.append((iface, ip4s, ip6s, iface.control)) - - return dict( - frr_conf=frr_conf, - frr_sbin_search=frr_sbin_search, - frr_bin_search=frr_bin_search, - frr_state_dir=FRR_STATE_DIR, - ifaces=ifaces, - want_ip4=want_ip4, - want_ip6=want_ip6, - services=services, - ) - - -class FrrService(abc.ABC): - group: str = GROUP - directories: list[str] = [] - files: list[str] = [] - executables: list[str] = [] - dependencies: list[str] = ["FRRzebra"] - startup: list[str] = [] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - ipv4_routing: bool = False - ipv6_routing: bool = False - - @abc.abstractmethod - def frr_iface_config(self, iface: CoreInterface) -> str: - raise NotImplementedError - - @abc.abstractmethod - def frr_config(self) -> str: - raise NotImplementedError - - -class FRROspfv2(FrrService, ConfigService): - """ - The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified frr.conf file. - """ - - name: str = "FRROSPFv2" - shutdown: list[str] = ["killall ospfd"] - validate: list[str] = ["pidof ospfd"] - ipv4_routing: bool = True - - def frr_config(self) -> str: - router_id = get_router_id(self.node) - addresses = [] - for iface in self.node.get_ifaces(control=False): - for ip4 in iface.ip4s: - addresses.append(str(ip4)) - data = dict(router_id=router_id, addresses=addresses) - text = """ - router ospf - router-id ${router_id} - % for addr in addresses: - network ${addr} area 0 - % endfor - ospf opaque-lsa - ! - """ - return self.render_text(text, data) - - def frr_iface_config(self, iface: CoreInterface) -> str: - has_mtu = has_mtu_mismatch(iface) - has_rj45 = rj45_check(iface) - is_ptp = isinstance(iface.net, PtpNet) - data = dict(has_mtu=has_mtu, is_ptp=is_ptp, has_rj45=has_rj45) - text = """ - % if has_mtu: - ip ospf mtu-ignore - % endif - % if has_rj45: - <% return STOP_RENDERING %> - % endif - % if is_ptp: - ip ospf network point-to-point - % endif - ip ospf hello-interval 2 - ip ospf dead-interval 6 - ip ospf retransmit-interval 5 - """ - return self.render_text(text, data) - - -class FRROspfv3(FrrService, ConfigService): - """ - The OSPFv3 service provides IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified frr.conf file. - """ - - name: str = "FRROSPFv3" - shutdown: list[str] = ["killall ospf6d"] - validate: list[str] = ["pidof ospf6d"] - ipv4_routing: bool = True - ipv6_routing: bool = True - - def frr_config(self) -> str: - router_id = get_router_id(self.node) - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - data = dict(router_id=router_id, ifnames=ifnames) - text = """ - router ospf6 - router-id ${router_id} - % for ifname in ifnames: - interface ${ifname} area 0.0.0.0 - % endfor - ! - """ - return self.render_text(text, data) - - def frr_iface_config(self, iface: CoreInterface) -> str: - mtu = get_min_mtu(iface) - if mtu < iface.mtu: - return f"ipv6 ospf6 ifmtu {mtu}" - else: - return "" - - -class FRRBgp(FrrService, ConfigService): - """ - The BGP service provides interdomain routing. - Peers must be manually configured, with a full mesh for those - having the same AS number. - """ - - name: str = "FRRBGP" - shutdown: list[str] = ["killall bgpd"] - validate: list[str] = ["pidof bgpd"] - custom_needed: bool = True - ipv4_routing: bool = True - ipv6_routing: bool = True - - def frr_config(self) -> str: - router_id = get_router_id(self.node) - text = f""" - ! BGP configuration - ! You should configure the AS number below - ! along with this router's peers. - router bgp {self.node.id} - bgp router-id {router_id} - redistribute connected - !neighbor 1.2.3.4 remote-as 555 - ! - """ - return self.clean_text(text) - - def frr_iface_config(self, iface: CoreInterface) -> str: - return "" - - -class FRRRip(FrrService, ConfigService): - """ - The RIP service provides IPv4 routing for wired networks. - """ - - name: str = "FRRRIP" - shutdown: list[str] = ["killall ripd"] - validate: list[str] = ["pidof ripd"] - ipv4_routing: bool = True - - def frr_config(self) -> str: - text = """ - router rip - redistribute static - redistribute connected - redistribute ospf - network 0.0.0.0/0 - ! - """ - return self.clean_text(text) - - def frr_iface_config(self, iface: CoreInterface) -> str: - return "" - - -class FRRRipng(FrrService, ConfigService): - """ - The RIP NG service provides IPv6 routing for wired networks. - """ - - name: str = "FRRRIPNG" - shutdown: list[str] = ["killall ripngd"] - validate: list[str] = ["pidof ripngd"] - ipv6_routing: bool = True - - def frr_config(self) -> str: - text = """ - router ripng - redistribute static - redistribute connected - redistribute ospf6 - network ::/0 - ! - """ - return self.clean_text(text) - - def frr_iface_config(self, iface: CoreInterface) -> str: - return "" - - -class FRRBabel(FrrService, ConfigService): - """ - The Babel service provides a loop-avoiding distance-vector routing - protocol for IPv6 and IPv4 with fast convergence properties. - """ - - name: str = "FRRBabel" - shutdown: list[str] = ["killall babeld"] - validate: list[str] = ["pidof babeld"] - ipv6_routing: bool = True - - def frr_config(self) -> str: - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - text = """ - router babel - % for ifname in ifnames: - network ${ifname} - % endfor - redistribute static - redistribute ipv4 connected - ! - """ - data = dict(ifnames=ifnames) - return self.render_text(text, data) - - def frr_iface_config(self, iface: CoreInterface) -> str: - if is_wireless(iface.net): - text = """ - babel wireless - no babel split-horizon - """ - else: - text = """ - babel wired - babel split-horizon - """ - return self.clean_text(text) - - -class FRRpimd(FrrService, ConfigService): - """ - PIM multicast routing based on XORP. - """ - - name: str = "FRRpimd" - shutdown: list[str] = ["killall pimd"] - validate: list[str] = ["pidof pimd"] - ipv4_routing: bool = True - - def frr_config(self) -> str: - ifname = "eth0" - for iface in self.node.get_ifaces(): - if iface.name != "lo": - ifname = iface.name - break - - text = f""" - router mfea - ! - router igmp - ! - router pim - !ip pim rp-address 10.0.0.1 - ip pim bsr-candidate {ifname} - ip pim rp-candidate {ifname} - !ip pim spt-threshold interval 10 bytes 80000 - ! - """ - return self.clean_text(text) - - def frr_iface_config(self, iface: CoreInterface) -> str: - text = """ - ip mfea - ip igmp - ip pim - """ - return self.clean_text(text) diff --git a/daemon/core/configservices/frrservices/templates/frrboot.sh b/daemon/core/configservices/frrservices/templates/frrboot.sh deleted file mode 100644 index c1c11d28..00000000 --- a/daemon/core/configservices/frrservices/templates/frrboot.sh +++ /dev/null @@ -1,109 +0,0 @@ -#!/bin/sh -# auto-generated by zebra service (frr.py) -FRR_CONF="${frr_conf}" -FRR_SBIN_SEARCH="${frr_sbin_search}" -FRR_BIN_SEARCH="${frr_bin_search}" -FRR_STATE_DIR="${frr_state_dir}" - -searchforprog() -{ - prog=$1 - searchpath=$@ - ret= - for p in $searchpath; do - if [ -x $p/$prog ]; then - ret=$p - break - fi - done - echo $ret -} - -confcheck() -{ - CONF_DIR=`dirname $FRR_CONF` - # if /etc/frr exists, point /etc/frr/frr.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/frr.conf ]; then - ln -s $CONF_DIR/frr.conf /etc/frr/frr.conf - fi - # if /etc/frr exists, point /etc/frr/vtysh.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/vtysh.conf ]; then - ln -s $CONF_DIR/vtysh.conf /etc/frr/vtysh.conf - fi -} - -bootdaemon() -{ - FRR_SBIN_DIR=$(searchforprog $1 $FRR_SBIN_SEARCH) - if [ "z$FRR_SBIN_DIR" = "z" ]; then - echo "ERROR: FRR's '$1' daemon not found in search path:" - echo " $FRR_SBIN_SEARCH" - return 1 - fi - - flags="" - - if [ "$1" = "pimd" ] && \\ - grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $FRR_CONF; then - flags="$flags -6" - fi - - if [ "$1" = "ospfd" ]; then - flags="$flags --apiserver" - fi - - #force FRR to use CORE generated conf file - flags="$flags -d -f $FRR_CONF" - $FRR_SBIN_DIR/$1 $flags - - if [ "$?" != "0" ]; then - echo "ERROR: FRR's '$1' daemon failed to start!:" - return 1 - fi -} - -bootfrr() -{ - FRR_BIN_DIR=$(searchforprog 'vtysh' $FRR_BIN_SEARCH) - if [ "z$FRR_BIN_DIR" = "z" ]; then - echo "ERROR: FRR's 'vtysh' program not found in search path:" - echo " $FRR_BIN_SEARCH" - return 1 - fi - - # fix /var/run/frr permissions - id -u frr 2>/dev/null >/dev/null - if [ "$?" = "0" ]; then - chown frr $FRR_STATE_DIR - fi - - bootdaemon "zebra" - if grep -q "^ip route " $FRR_CONF; then - bootdaemon "staticd" - fi - for r in rip ripng ospf6 ospf bgp babel; do - if grep -q "^router \\<$${}{r}\\>" $FRR_CONF; then - bootdaemon "$${}{r}d" - fi - done - - if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $FRR_CONF; then - bootdaemon "pimd" - fi - - $FRR_BIN_DIR/vtysh -b -} - -if [ "$1" != "zebra" ]; then - echo "WARNING: '$1': all FRR daemons are launched by the 'zebra' service!" - exit 1 -fi -confcheck -bootfrr - -# reset interfaces -% for iface, _, _ , _ in ifaces: -ip link set dev ${iface.name} down -sleep 1 -ip link set dev ${iface.name} up -% endfor diff --git a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/daemons b/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/daemons deleted file mode 100644 index dbd42108..00000000 --- a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/daemons +++ /dev/null @@ -1,60 +0,0 @@ -# -# When activation a daemon at the first time, a config file, even if it is -# empty, has to be present *and* be owned by the user and group "frr", else -# the daemon will not be started by /etc/init.d/frr. The permissions should -# be u=rw,g=r,o=. -# When using "vtysh" such a config file is also needed. It should be owned by -# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. -# -# The watchfrr and zebra daemons are always started. -# -bgpd=yes -ospfd=yes -ospf6d=yes -ripd=yes -ripngd=yes -isisd=yes -pimd=yes -ldpd=yes -nhrpd=yes -eigrpd=yes -babeld=yes -sharpd=yes -staticd=yes -pbrd=yes -bfdd=yes -fabricd=yes - -# -# If this option is set the /etc/init.d/frr script automatically loads -# the config via "vtysh -b" when the servers are started. -# Check /etc/pam.d/frr if you intend to use "vtysh"! -# -vtysh_enable=yes -zebra_options=" -A 127.0.0.1 -s 90000000" -bgpd_options=" -A 127.0.0.1" -ospfd_options=" -A 127.0.0.1" -ospf6d_options=" -A ::1" -ripd_options=" -A 127.0.0.1" -ripngd_options=" -A ::1" -isisd_options=" -A 127.0.0.1" -pimd_options=" -A 127.0.0.1" -ldpd_options=" -A 127.0.0.1" -nhrpd_options=" -A 127.0.0.1" -eigrpd_options=" -A 127.0.0.1" -babeld_options=" -A 127.0.0.1" -sharpd_options=" -A 127.0.0.1" -pbrd_options=" -A 127.0.0.1" -staticd_options="-A 127.0.0.1" -bfdd_options=" -A 127.0.0.1" -fabricd_options="-A 127.0.0.1" - -# The list of daemons to watch is automatically generated by the init script. -#watchfrr_options="" - -# for debugging purposes, you can specify a "wrap" command to start instead -# of starting the daemon directly, e.g. to use valgrind on ospfd: -# ospfd_wrap="/usr/bin/valgrind" -# or you can use "all_wrap" for all daemons, e.g. to use perf record: -# all_wrap="/usr/bin/perf record --call-graph -" -# the normal daemon command is added to this at the end. diff --git a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/frr.conf b/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/frr.conf deleted file mode 100644 index 8e036136..00000000 --- a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/frr.conf +++ /dev/null @@ -1,25 +0,0 @@ -% for iface, ip4s, ip6s, is_control in ifaces: -interface ${iface.name} - % if want_ip4: - % for addr in ip4s: - ip address ${addr} - % endfor - % endif - % if want_ip6: - % for addr in ip6s: - ipv6 address ${addr} - % endfor - % endif - % if not is_control: - % for service in services: - % for line in service.frr_iface_config(iface).split("\n"): - ${line} - % endfor - % endfor - % endif -! -% endfor - -% for service in services: -${service.frr_config()} -% endfor diff --git a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/vtysh.conf b/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/vtysh.conf deleted file mode 100644 index e0ab9cb6..00000000 --- a/daemon/core/configservices/frrservices/templates/usr/local/etc/frr/vtysh.conf +++ /dev/null @@ -1 +0,0 @@ -service integrated-vtysh-config diff --git a/daemon/core/configservices/nrlservices/services.py b/daemon/core/configservices/nrlservices/services.py deleted file mode 100644 index 3002cd94..00000000 --- a/daemon/core/configservices/nrlservices/services.py +++ /dev/null @@ -1,164 +0,0 @@ -from typing import Any - -from core import utils -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode - -GROUP: str = "ProtoSvc" - - -class MgenSinkService(ConfigService): - name: str = "MGEN_Sink" - group: str = GROUP - directories: list[str] = [] - files: list[str] = ["mgensink.sh", "sink.mgen"] - executables: list[str] = ["mgen"] - dependencies: list[str] = [] - startup: list[str] = ["bash mgensink.sh"] - validate: list[str] = ["pidof mgen"] - shutdown: list[str] = ["killall mgen"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifnames = [] - for iface in self.node.get_ifaces(): - name = utils.sysctl_devname(iface.name) - ifnames.append(name) - return dict(ifnames=ifnames) - - -class NrlNhdp(ConfigService): - name: str = "NHDP" - group: str = GROUP - directories: list[str] = [] - files: list[str] = ["nrlnhdp.sh"] - executables: list[str] = ["nrlnhdp"] - dependencies: list[str] = [] - startup: list[str] = ["bash nrlnhdp.sh"] - validate: list[str] = ["pidof nrlnhdp"] - shutdown: list[str] = ["killall nrlnhdp"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - return dict(has_smf=has_smf, ifnames=ifnames) - - -class NrlSmf(ConfigService): - name: str = "SMF" - group: str = GROUP - directories: list[str] = [] - files: list[str] = ["startsmf.sh"] - executables: list[str] = ["nrlsmf", "killall"] - dependencies: list[str] = [] - startup: list[str] = ["bash startsmf.sh"] - validate: list[str] = ["pidof nrlsmf"] - shutdown: list[str] = ["killall nrlsmf"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - has_nhdp = "NHDP" in self.node.config_services - has_olsr = "OLSR" in self.node.config_services - ifnames = [] - ip4_prefix = None - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - ip4 = iface.get_ip4() - if ip4: - ip4_prefix = f"{ip4.ip}/{24}" - break - return dict( - has_nhdp=has_nhdp, has_olsr=has_olsr, ifnames=ifnames, ip4_prefix=ip4_prefix - ) - - -class NrlOlsr(ConfigService): - name: str = "OLSR" - group: str = GROUP - directories: list[str] = [] - files: list[str] = ["nrlolsrd.sh"] - executables: list[str] = ["nrlolsrd"] - dependencies: list[str] = [] - startup: list[str] = ["bash nrlolsrd.sh"] - validate: list[str] = ["pidof nrlolsrd"] - shutdown: list[str] = ["killall nrlolsrd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services - has_zebra = "zebra" in self.node.config_services - ifname = None - for iface in self.node.get_ifaces(control=False): - ifname = iface.name - break - return dict(has_smf=has_smf, has_zebra=has_zebra, ifname=ifname) - - -class NrlOlsrv2(ConfigService): - name: str = "OLSRv2" - group: str = GROUP - directories: list[str] = [] - files: list[str] = ["nrlolsrv2.sh"] - executables: list[str] = ["nrlolsrv2"] - dependencies: list[str] = [] - startup: list[str] = ["bash nrlolsrv2.sh"] - validate: list[str] = ["pidof nrlolsrv2"] - shutdown: list[str] = ["killall nrlolsrv2"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - return dict(has_smf=has_smf, ifnames=ifnames) - - -class OlsrOrg(ConfigService): - name: str = "OLSRORG" - group: str = GROUP - directories: list[str] = ["/etc/olsrd"] - files: list[str] = ["olsrd.sh", "/etc/olsrd/olsrd.conf"] - executables: list[str] = ["olsrd"] - dependencies: list[str] = [] - startup: list[str] = ["bash olsrd.sh"] - validate: list[str] = ["pidof olsrd"] - shutdown: list[str] = ["killall olsrd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - has_smf = "SMF" in self.node.config_services - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - return dict(has_smf=has_smf, ifnames=ifnames) - - -class MgenActor(ConfigService): - name: str = "MgenActor" - group: str = GROUP - directories: list[str] = [] - files: list[str] = ["start_mgen_actor.sh"] - executables: list[str] = ["mgen"] - dependencies: list[str] = [] - startup: list[str] = ["bash start_mgen_actor.sh"] - validate: list[str] = ["pidof mgen"] - shutdown: list[str] = ["killall mgen"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} diff --git a/daemon/core/configservices/nrlservices/templates/etc/olsrd/olsrd.conf b/daemon/core/configservices/nrlservices/templates/etc/olsrd/olsrd.conf deleted file mode 100644 index a5716a98..00000000 --- a/daemon/core/configservices/nrlservices/templates/etc/olsrd/olsrd.conf +++ /dev/null @@ -1,312 +0,0 @@ -# -# OLSR.org routing daemon config file -# This file contains the usual options for an ETX based -# stationary network without fisheye -# (for other options see olsrd.conf.default.full) -# -# Lines starting with a # are discarded -# - -#### ATTENTION for IPv6 users #### -# Because of limitations in the parser IPv6 addresses must NOT -# begin with a ":", so please add a "0" as a prefix. - -########################### -### Basic configuration ### -########################### -# keep this settings at the beginning of your first configuration file - -# Debug level (0-9) -# If set to 0 the daemon runs in the background, unless "NoFork" is set to true -# (Default is 1) - -# DebugLevel 1 - -# IP version to use (4 or 6) -# (Default is 4) - -# IpVersion 4 - -################################# -### OLSRd agent configuration ### -################################# -# this parameters control the settings of the routing agent which are not -# related to the OLSR protocol and it's extensions - -# FIBMetric controls the metric value of the host-routes OLSRd sets. -# - "flat" means that the metric value is always 2. This is the preferred value -# because it helps the linux kernel routing to clean up older routes -# - "correct" use the hopcount as the metric value. -# - "approx" use the hopcount as the metric value too, but does only update the -# hopcount if the nexthop changes too -# (Default is "flat") - -# FIBMetric "flat" - -####################################### -### Linux specific OLSRd extensions ### -####################################### -# these parameters are only working on linux at the moment - -# SrcIpRoutes tells OLSRd to set the Src flag of host routes to the originator-ip -# of the node. In addition to this an additional localhost device is created -# to make sure the returning traffic can be received. -# (Default is "no") - -# SrcIpRoutes no - -# Specify the proto tag to be used for routes olsr inserts into kernel -# currently only implemented for linux -# valid values under linux are 1 .. 254 -# 1 gets remapped by olsrd to 0 UNSPECIFIED (1 is reserved for ICMP redirects) -# 2 KERNEL routes (not very wise to use) -# 3 BOOT (should in fact not be used by routing daemons) -# 4 STATIC -# 8 .. 15 various routing daemons (gated, zebra, bird, & co) -# (defaults to 0 which gets replaced by an OS-specific default value -# under linux 3 (BOOT) (for backward compatibility) - -# RtProto 0 - -# Activates (in IPv6 mode) the automatic use of NIIT -# (see README-Olsr-Extensions) -# (default is "yes") - -# UseNiit yes - -# Activates the smartgateway ipip tunnel feature. -# See README-Olsr-Extensions for a description of smartgateways. -# (default is "no") - -# SmartGateway no - -# Signals that the server tunnel must always be removed on shutdown, -# irrespective of the interface up/down state during startup. -# (default is "no") - -# SmartGatewayAlwaysRemoveServerTunnel no - -# Determines the maximum number of gateways that can be in use at any given -# time. This setting is used to mitigate the effects of breaking connections -# (due to the selection of a new gateway) on a dynamic network. -# (default is 1) - -# SmartGatewayUseCount 1 - -# Determines the take-down percentage for a non-current smart gateway tunnel. -# If the cost of the current smart gateway tunnel is less than this percentage -# of the cost of the non-current smart gateway tunnel, then the non-current smart -# gateway tunnel is taken down because it is then presumed to be 'too expensive'. -# This setting is only relevant when SmartGatewayUseCount is larger than 1; -# a value of 0 will result in the tunnels not being taken down proactively. -# (default is 0) - -# SmartGatewayTakeDownPercentage 0 - -# Determines the policy routing script that is executed during startup and -# shutdown of olsrd. The script is only executed when SmartGatewayUseCount -# is set to a value larger than 1. The script must setup policy routing -# rules such that multi-gateway mode works. A sample script is included. -# (default is not set) - -# SmartGatewayPolicyRoutingScript "" - -# Determines the egress interfaces that are part of the multi-gateway setup and -# therefore only relevant when SmartGatewayUseCount is larger than 1 (in which -# case it must be explicitly set). -# (default is not set) - -# SmartGatewayEgressInterfaces "" - -# Determines the routing tables offset for multi-gateway policy routing tables -# See the policy routing script for an explanation. -# (default is 90) - -# SmartGatewayTablesOffset 90 - -# Determines the policy routing rules offset for multi-gateway policy routing -# rules. See the policy routing script for an explanation. -# (default is 0, which indicates that the rules and tables should be aligned and -# puts this value at SmartGatewayTablesOffset - # egress interfaces - -# # olsr interfaces) - -# SmartGatewayRulesOffset 87 - -# Allows the selection of a smartgateway with NAT (only for IPv4) -# (default is "yes") - -# SmartGatewayAllowNAT yes - -# Determines the period (in milliseconds) on which a new smart gateway -# selection is performed. -# (default is 10000 milliseconds) - -# SmartGatewayPeriod 10000 - -# Determines the number of times the link state database must be stable -# before a new smart gateway is selected. -# (default is 6) - -# SmartGatewayStableCount 6 - -# When another gateway than the current one has a cost of less than the cost -# of the current gateway multiplied by SmartGatewayThreshold then the smart -# gateway is switched to the other gateway. The unit is percentage. -# (defaults to 0) - -# SmartGatewayThreshold 0 - -# The weighing factor for the gateway uplink bandwidth (exit link, uplink). -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 1) - -# SmartGatewayWeightExitLinkUp 1 - -# The weighing factor for the gateway downlink bandwidth (exit link, downlink). -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 1) - -# SmartGatewayWeightExitLinkDown 1 - -# The weighing factor for the ETX costs. -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 1) - -# SmartGatewayWeightEtx 1 - -# The divider for the ETX costs. -# See README-Olsr-Extensions for a description of smart gateways. -# (default is 0) - -# SmartGatewayDividerEtx 0 - -# Defines what kind of Uplink this node will publish as a -# smartgateway. The existence of the uplink is detected by -# a route to 0.0.0.0/0, ::ffff:0:0/96 and/or 2000::/3. -# possible values are "none", "ipv4", "ipv6", "both" -# (default is "both") - -# SmartGatewayUplink "both" - -# Specifies if the local ipv4 uplink use NAT -# (default is "yes") - -# SmartGatewayUplinkNAT yes - -# Specifies the speed of the uplink in kilobit/s. -# First parameter is upstream, second parameter is downstream -# (default is 128/1024) - -# SmartGatewaySpeed 128 1024 - -# Specifies the EXTERNAL ipv6 prefix of the uplink. A prefix -# length of more than 64 is not allowed. -# (default is 0::/0 - -# SmartGatewayPrefix 0::/0 - -############################## -### OLSR protocol settings ### -############################## - -# HNA (Host network association) allows the OLSR to announce -# additional IPs or IP subnets to the net that are reachable -# through this node. -# Syntax for HNA4 is "network-address network-mask" -# Syntax for HNA6 is "network-address prefix-length" -# (default is no HNA) -Hna4 -{ -# Internet gateway -# 0.0.0.0 0.0.0.0 -# specific small networks reachable through this node -# 15.15.0.0 255.255.255.0 -} -Hna6 -{ -# Internet gateway -# 0:: 0 -# specific small networks reachable through this node -# fec0:2200:106:0:0:0:0:0 48 -} - -################################ -### OLSR protocol extensions ### -################################ - -# Link quality algorithm (only for lq level 2) -# (see README-Olsr-Extensions) -# - "etx_float", a floating point ETX with exponential aging -# - "etx_fpm", same as ext_float, but with integer arithmetic -# - "etx_ff" (ETX freifunk), an etx variant which use all OLSR -# traffic (instead of only hellos) for ETX calculation -# - "etx_ffeth", an incompatible variant of etx_ff that allows -# ethernet links with ETX 0.1. -# (defaults to "etx_ff") - -# LinkQualityAlgorithm "etx_ff" - -# Fisheye mechanism for TCs (0 meansoff, 1 means on) -# (default is 1) - -LinkQualityFishEye 0 - -##################################### -### Example plugin configurations ### -##################################### -# Olsrd plugins to load -# This must be the absolute path to the file -# or the loader will use the following scheme: -# - Try the paths in the LD_LIBRARY_PATH -# environment variable. -# - The list of libraries cached in /etc/ld.so.cache -# - /lib, followed by /usr/lib -# -# the examples in this list are for linux, so check if the plugin is -# available if you use windows. -# each plugin should have a README file in it's lib subfolder - -# LoadPlugin "olsrd_txtinfo.dll" -#LoadPlugin "olsrd_txtinfo.so.0.1" -#{ - # the default port is 2006 but you can change it like this: - #PlParam "port" "8080" - - # You can set a "accept" single address to allow to connect to - # txtinfo. If no address is specified, then localhost (127.0.0.1) - # is allowed by default. txtinfo will only use the first "accept" - # parameter specified and will ignore the rest. - - # to allow a specific host: - #PlParam "accept" "172.29.44.23" - # if you set it to 0.0.0.0, it will accept all connections - #PlParam "accept" "0.0.0.0" -#} - -############################################# -### OLSRD default interface configuration ### -############################################# -# the default interface section can have the same values as the following -# interface configuration. It will allow you so set common options for all -# interfaces. - -InterfaceDefaults { - Ip4Broadcast 255.255.255.255 -} - -###################################### -### OLSRd Interfaces configuration ### -###################################### -# multiple interfaces can be specified for a single configuration block -# multiple configuration blocks can be specified - -# WARNING, don't forget to insert your interface names here ! -#Interface "" "" -#{ - # Interface Mode is used to prevent unnecessary - # packet forwarding on switched ethernet interfaces - # valid Modes are "mesh" and "ether" - # (default is "mesh") - - # Mode "mesh" -#} diff --git a/daemon/core/configservices/nrlservices/templates/mgensink.sh b/daemon/core/configservices/nrlservices/templates/mgensink.sh deleted file mode 100644 index bdbd0a8d..00000000 --- a/daemon/core/configservices/nrlservices/templates/mgensink.sh +++ /dev/null @@ -1 +0,0 @@ -mgen input sink.mgen output mgen_${node.name}.log diff --git a/daemon/core/configservices/nrlservices/templates/nrlnhdp.sh b/daemon/core/configservices/nrlservices/templates/nrlnhdp.sh deleted file mode 100644 index 4513dfe9..00000000 --- a/daemon/core/configservices/nrlservices/templates/nrlnhdp.sh +++ /dev/null @@ -1,7 +0,0 @@ -<% - ifaces = "-i " + " -i ".join(ifnames) - smf = "" - if has_smf: - smf = "-flooding ecds -smfClient %s_smf" % node.name -%> -nrlnhdp -l /var/log/nrlnhdp.log -rpipe ${node.name}_nhdp ${smf} ${ifaces} diff --git a/daemon/core/configservices/nrlservices/templates/nrlolsrd.sh b/daemon/core/configservices/nrlservices/templates/nrlolsrd.sh deleted file mode 100644 index 4072d189..00000000 --- a/daemon/core/configservices/nrlservices/templates/nrlolsrd.sh +++ /dev/null @@ -1,9 +0,0 @@ -<% - smf = "" - if has_smf: - smf = "-flooding s-mpr -smfClient %s_smf" % node.name - zebra = "" - if has_zebra: - zebra = "-z" -%> -nrlolsrd -i ${ifname} -l /var/log/nrlolsrd.log -rpipe ${node.name}_olsr ${smf} ${zebra} diff --git a/daemon/core/configservices/nrlservices/templates/nrlolsrv2.sh b/daemon/core/configservices/nrlservices/templates/nrlolsrv2.sh deleted file mode 100644 index 81196e26..00000000 --- a/daemon/core/configservices/nrlservices/templates/nrlolsrv2.sh +++ /dev/null @@ -1,7 +0,0 @@ -<% - ifaces = "-i " + " -i ".join(ifnames) - smf = "" - if has_smf: - smf = "-flooding ecds -smfClient %s_smf" % node.name -%> -nrlolsrv2 -l /var/log/nrlolsrv2.log -rpipe ${node.name}_olsrv2 -p olsr ${smf} ${ifaces} diff --git a/daemon/core/configservices/nrlservices/templates/olsrd.sh b/daemon/core/configservices/nrlservices/templates/olsrd.sh deleted file mode 100644 index 3040ca6b..00000000 --- a/daemon/core/configservices/nrlservices/templates/olsrd.sh +++ /dev/null @@ -1,4 +0,0 @@ -<% - ifaces = "-i " + " -i ".join(ifnames) -%> -olsrd ${ifaces} diff --git a/daemon/core/configservices/nrlservices/templates/sink.mgen b/daemon/core/configservices/nrlservices/templates/sink.mgen deleted file mode 100644 index 21d4fde6..00000000 --- a/daemon/core/configservices/nrlservices/templates/sink.mgen +++ /dev/null @@ -1,4 +0,0 @@ -0.0 LISTEN UDP 5000 -% for ifname in ifnames: -0.0 Join 224.225.1.2 INTERFACE ${ifname} -% endfor diff --git a/daemon/core/configservices/nrlservices/templates/start_mgen_actor.sh b/daemon/core/configservices/nrlservices/templates/start_mgen_actor.sh deleted file mode 100644 index 12630442..00000000 --- a/daemon/core/configservices/nrlservices/templates/start_mgen_actor.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -# auto-generated by MgenActor service -mgenBasicActor.py -n ${node.name} -a 0.0.0.0 < /dev/null > /dev/null 2>&1 & diff --git a/daemon/core/configservices/nrlservices/templates/startsmf.sh b/daemon/core/configservices/nrlservices/templates/startsmf.sh deleted file mode 100644 index 458b3ee9..00000000 --- a/daemon/core/configservices/nrlservices/templates/startsmf.sh +++ /dev/null @@ -1,12 +0,0 @@ -<% - ifaces = ",".join(ifnames) - if has_nhdp: - flood = "ecds" - elif has_olsr: - flood = "smpr" - else: - flood = "cf" -%> -#!/bin/sh -# auto-generated by NrlSmf service -nrlsmf instance ${node.name}_smf ${flood} ${ifaces} hash MD5 log /var/log/nrlsmf.log < /dev/null > /dev/null 2>&1 & diff --git a/daemon/core/configservices/quaggaservices/services.py b/daemon/core/configservices/quaggaservices/services.py deleted file mode 100644 index 8b4d4909..00000000 --- a/daemon/core/configservices/quaggaservices/services.py +++ /dev/null @@ -1,453 +0,0 @@ -import abc -import logging -from typing import Any - -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNodeBase, NodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import PtpNet, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode - -logger = logging.getLogger(__name__) -GROUP: str = "Quagga" -QUAGGA_STATE_DIR: str = "/var/run/quagga" - - -def is_wireless(node: NodeBase) -> bool: - """ - Check if the node is a wireless type node. - - :param node: node to check type for - :return: True if wireless type, False otherwise - """ - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) - - -def has_mtu_mismatch(iface: CoreInterface) -> bool: - """ - Helper to detect MTU mismatch and add the appropriate OSPF - mtu-ignore command. This is needed when e.g. a node is linked via a - GreTap device. - """ - if iface.mtu != DEFAULT_MTU: - return True - if not iface.net: - return False - for iface in iface.net.get_ifaces(): - if iface.mtu != iface.mtu: - return True - return False - - -def get_min_mtu(iface: CoreInterface): - """ - Helper to discover the minimum MTU of interfaces linked with the - given interface. - """ - mtu = iface.mtu - if not iface.net: - return mtu - for iface in iface.net.get_ifaces(): - if iface.mtu < mtu: - mtu = iface.mtu - return mtu - - -def get_router_id(node: CoreNodeBase) -> str: - """ - Helper to return the first IPv4 address of a node as its router ID. - """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return "0.0.0.0" - - -def rj45_check(iface: CoreInterface) -> bool: - """ - Helper to detect whether interface is connected an external RJ45 - link. - """ - if iface.net: - for peer_iface in iface.net.get_ifaces(): - if peer_iface == iface: - continue - if isinstance(peer_iface.node, Rj45Node): - return True - return False - - -class Zebra(ConfigService): - name: str = "zebra" - group: str = GROUP - directories: list[str] = ["/usr/local/etc/quagga", "/var/run/quagga"] - files: list[str] = [ - "/usr/local/etc/quagga/Quagga.conf", - "quaggaboot.sh", - "/usr/local/etc/quagga/vtysh.conf", - ] - executables: list[str] = ["zebra"] - dependencies: list[str] = [] - startup: list[str] = ["bash quaggaboot.sh zebra"] - validate: list[str] = ["pidof zebra"] - shutdown: list[str] = ["killall zebra"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - quagga_bin_search = self.node.session.options.get( - "quagga_bin_search", default="/usr/local/bin /usr/bin /usr/lib/quagga" - ).strip('"') - quagga_sbin_search = self.node.session.options.get( - "quagga_sbin_search", default="/usr/local/sbin /usr/sbin /usr/lib/quagga" - ).strip('"') - quagga_state_dir = QUAGGA_STATE_DIR - quagga_conf = self.files[0] - - services = [] - want_ip4 = False - want_ip6 = False - for service in self.node.config_services.values(): - if self.name not in service.dependencies: - continue - if not isinstance(service, QuaggaService): - continue - if service.ipv4_routing: - want_ip4 = True - if service.ipv6_routing: - want_ip6 = True - services.append(service) - - ifaces = [] - for iface in self.node.get_ifaces(): - ip4s = [] - ip6s = [] - for ip4 in iface.ip4s: - ip4s.append(str(ip4)) - for ip6 in iface.ip6s: - ip6s.append(str(ip6)) - configs = [] - if not iface.control: - for service in services: - config = service.quagga_iface_config(iface) - if config: - configs.append(config.split("\n")) - ifaces.append((iface, ip4s, ip6s, configs)) - - return dict( - quagga_bin_search=quagga_bin_search, - quagga_sbin_search=quagga_sbin_search, - quagga_state_dir=quagga_state_dir, - quagga_conf=quagga_conf, - ifaces=ifaces, - want_ip4=want_ip4, - want_ip6=want_ip6, - services=services, - ) - - -class QuaggaService(abc.ABC): - group: str = GROUP - directories: list[str] = [] - files: list[str] = [] - executables: list[str] = [] - dependencies: list[str] = ["zebra"] - startup: list[str] = [] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - ipv4_routing: bool = False - ipv6_routing: bool = False - - @abc.abstractmethod - def quagga_iface_config(self, iface: CoreInterface) -> str: - raise NotImplementedError - - @abc.abstractmethod - def quagga_config(self) -> str: - raise NotImplementedError - - -class Ospfv2(QuaggaService, ConfigService): - """ - The OSPFv2 service provides IPv4 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified Quagga.conf file. - """ - - name: str = "OSPFv2" - validate: list[str] = ["pidof ospfd"] - shutdown: list[str] = ["killall ospfd"] - ipv4_routing: bool = True - - def quagga_iface_config(self, iface: CoreInterface) -> str: - has_mtu = has_mtu_mismatch(iface) - has_rj45 = rj45_check(iface) - is_ptp = isinstance(iface.net, PtpNet) - data = dict(has_mtu=has_mtu, is_ptp=is_ptp, has_rj45=has_rj45) - text = """ - % if has_mtu: - ip ospf mtu-ignore - % endif - % if has_rj45: - <% return STOP_RENDERING %> - % endif - % if is_ptp: - ip ospf network point-to-point - % endif - ip ospf hello-interval 2 - ip ospf dead-interval 6 - ip ospf retransmit-interval 5 - """ - return self.render_text(text, data) - - def quagga_config(self) -> str: - router_id = get_router_id(self.node) - addresses = [] - for iface in self.node.get_ifaces(control=False): - for ip4 in iface.ip4s: - addresses.append(str(ip4)) - data = dict(router_id=router_id, addresses=addresses) - text = """ - router ospf - router-id ${router_id} - % for addr in addresses: - network ${addr} area 0 - % endfor - ! - """ - return self.render_text(text, data) - - -class Ospfv3(QuaggaService, ConfigService): - """ - The OSPFv3 service provides IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified Quagga.conf file. - """ - - name: str = "OSPFv3" - shutdown: list[str] = ["killall ospf6d"] - validate: list[str] = ["pidof ospf6d"] - ipv4_routing: bool = True - ipv6_routing: bool = True - - def quagga_iface_config(self, iface: CoreInterface) -> str: - mtu = get_min_mtu(iface) - if mtu < iface.mtu: - return f"ipv6 ospf6 ifmtu {mtu}" - else: - return "" - - def quagga_config(self) -> str: - router_id = get_router_id(self.node) - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - data = dict(router_id=router_id, ifnames=ifnames) - text = """ - router ospf6 - instance-id 65 - router-id ${router_id} - % for ifname in ifnames: - interface ${ifname} area 0.0.0.0 - % endfor - ! - """ - return self.render_text(text, data) - - -class Ospfv3mdr(Ospfv3): - """ - The OSPFv3 MANET Designated Router (MDR) service provides IPv6 - routing for wireless networks. It does not build its own - configuration file but has hooks for adding to the - unified Quagga.conf file. - """ - - name: str = "OSPFv3MDR" - - def quagga_iface_config(self, iface: CoreInterface) -> str: - config = super().quagga_iface_config(iface) - if is_wireless(iface.net): - config = self.clean_text( - f""" - {config} - ipv6 ospf6 hello-interval 2 - ipv6 ospf6 dead-interval 6 - ipv6 ospf6 retransmit-interval 5 - ipv6 ospf6 network manet-designated-router - ipv6 ospf6 twohoprefresh 3 - ipv6 ospf6 adjacencyconnectivity uniconnected - ipv6 ospf6 lsafullness mincostlsa - """ - ) - return config - - -class Bgp(QuaggaService, ConfigService): - """ - The BGP service provides interdomain routing. - Peers must be manually configured, with a full mesh for those - having the same AS number. - """ - - name: str = "BGP" - shutdown: list[str] = ["killall bgpd"] - validate: list[str] = ["pidof bgpd"] - ipv4_routing: bool = True - ipv6_routing: bool = True - - def quagga_config(self) -> str: - router_id = get_router_id(self.node) - text = f""" - ! BGP configuration - ! You should configure the AS number below - ! along with this router's peers. - router bgp {self.node.id} - bgp router-id {router_id} - redistribute connected - !neighbor 1.2.3.4 remote-as 555 - ! - """ - return self.clean_text(text) - - def quagga_iface_config(self, iface: CoreInterface) -> str: - return "" - - -class Rip(QuaggaService, ConfigService): - """ - The RIP service provides IPv4 routing for wired networks. - """ - - name: str = "RIP" - shutdown: list[str] = ["killall ripd"] - validate: list[str] = ["pidof ripd"] - ipv4_routing: bool = True - - def quagga_config(self) -> str: - text = """ - router rip - redistribute static - redistribute connected - redistribute ospf - network 0.0.0.0/0 - ! - """ - return self.clean_text(text) - - def quagga_iface_config(self, iface: CoreInterface) -> str: - return "" - - -class Ripng(QuaggaService, ConfigService): - """ - The RIP NG service provides IPv6 routing for wired networks. - """ - - name: str = "RIPNG" - shutdown: list[str] = ["killall ripngd"] - validate: list[str] = ["pidof ripngd"] - ipv6_routing: bool = True - - def quagga_config(self) -> str: - text = """ - router ripng - redistribute static - redistribute connected - redistribute ospf6 - network ::/0 - ! - """ - return self.clean_text(text) - - def quagga_iface_config(self, iface: CoreInterface) -> str: - return "" - - -class Babel(QuaggaService, ConfigService): - """ - The Babel service provides a loop-avoiding distance-vector routing - protocol for IPv6 and IPv4 with fast convergence properties. - """ - - name: str = "Babel" - shutdown: list[str] = ["killall babeld"] - validate: list[str] = ["pidof babeld"] - ipv6_routing: bool = True - - def quagga_config(self) -> str: - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - text = """ - router babel - % for ifname in ifnames: - network ${ifname} - % endfor - redistribute static - redistribute connected - ! - """ - data = dict(ifnames=ifnames) - return self.render_text(text, data) - - def quagga_iface_config(self, iface: CoreInterface) -> str: - if is_wireless(iface.net): - text = """ - babel wireless - no babel split-horizon - """ - else: - text = """ - babel wired - babel split-horizon - """ - return self.clean_text(text) - - -class Xpimd(QuaggaService, ConfigService): - """ - PIM multicast routing based on XORP. - """ - - name: str = "Xpimd" - shutdown: list[str] = ["killall xpimd"] - validate: list[str] = ["pidof xpimd"] - ipv4_routing: bool = True - - def quagga_config(self) -> str: - ifname = "eth0" - for iface in self.node.get_ifaces(): - if iface.name != "lo": - ifname = iface.name - break - - text = f""" - router mfea - ! - router igmp - ! - router pim - !ip pim rp-address 10.0.0.1 - ip pim bsr-candidate {ifname} - ip pim rp-candidate {ifname} - !ip pim spt-threshold interval 10 bytes 80000 - ! - """ - return self.clean_text(text) - - def quagga_iface_config(self, iface: CoreInterface) -> str: - text = """ - ip mfea - ip pim - """ - return self.clean_text(text) diff --git a/daemon/core/configservices/quaggaservices/templates/quaggaboot.sh b/daemon/core/configservices/quaggaservices/templates/quaggaboot.sh deleted file mode 100644 index c22fdd5f..00000000 --- a/daemon/core/configservices/quaggaservices/templates/quaggaboot.sh +++ /dev/null @@ -1,92 +0,0 @@ -#!/bin/sh -# auto-generated by zebra service (quagga.py) -QUAGGA_CONF="${quagga_conf}" -QUAGGA_SBIN_SEARCH="${quagga_sbin_search}" -QUAGGA_BIN_SEARCH="${quagga_bin_search}" -QUAGGA_STATE_DIR="${quagga_state_dir}" - -searchforprog() -{ - prog=$1 - searchpath=$@ - ret= - for p in $searchpath; do - if [ -x $p/$prog ]; then - ret=$p - break - fi - done - echo $ret -} - -confcheck() -{ - CONF_DIR=`dirname $QUAGGA_CONF` - # if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then - ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf - fi - # if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR - if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then - ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf - fi -} - -bootdaemon() -{ - QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH) - if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then - echo "ERROR: Quagga's '$1' daemon not found in search path:" - echo " $QUAGGA_SBIN_SEARCH" - return 1 - fi - - flags="" - - if [ "$1" = "xpimd" ] && \\ - grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $QUAGGA_CONF; then - flags="$flags -6" - fi - - $QUAGGA_SBIN_DIR/$1 $flags -d - if [ "$?" != "0" ]; then - echo "ERROR: Quagga's '$1' daemon failed to start!:" - return 1 - fi -} - -bootquagga() -{ - QUAGGA_BIN_DIR=$(searchforprog 'vtysh' $QUAGGA_BIN_SEARCH) - if [ "z$QUAGGA_BIN_DIR" = "z" ]; then - echo "ERROR: Quagga's 'vtysh' program not found in search path:" - echo " $QUAGGA_BIN_SEARCH" - return 1 - fi - - # fix /var/run/quagga permissions - id -u quagga 2>/dev/null >/dev/null - if [ "$?" = "0" ]; then - chown quagga $QUAGGA_STATE_DIR - fi - - bootdaemon "zebra" - for r in rip ripng ospf6 ospf bgp babel; do - if grep -q "^router \\<$${}{r}\\>" $QUAGGA_CONF; then - bootdaemon "$${}{r}d" - fi - done - - if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $QUAGGA_CONF; then - bootdaemon "xpimd" - fi - - $QUAGGA_BIN_DIR/vtysh -b -} - -if [ "$1" != "zebra" ]; then - echo "WARNING: '$1': all Quagga daemons are launched by the 'zebra' service!" - exit 1 -fi -confcheck -bootquagga diff --git a/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf b/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf deleted file mode 100644 index b7916f96..00000000 --- a/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/Quagga.conf +++ /dev/null @@ -1,23 +0,0 @@ -% for iface, ip4s, ip6s, configs in ifaces: -interface ${iface.name} - % if want_ip4: - % for addr in ip4s: - ip address ${addr} - % endfor - % endif - % if want_ip6: - % for addr in ip6s: - ipv6 address ${addr} - % endfor - % endif - % for config in configs: - % for line in config: - ${line} - % endfor - % endfor -! -% endfor - -% for service in services: -${service.quagga_config()} -% endfor diff --git a/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf b/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf deleted file mode 100644 index e0ab9cb6..00000000 --- a/daemon/core/configservices/quaggaservices/templates/usr/local/etc/quagga/vtysh.conf +++ /dev/null @@ -1 +0,0 @@ -service integrated-vtysh-config diff --git a/daemon/core/configservices/securityservices/__init__.py b/daemon/core/configservices/securityservices/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/configservices/securityservices/services.py b/daemon/core/configservices/securityservices/services.py deleted file mode 100644 index e6243b2c..00000000 --- a/daemon/core/configservices/securityservices/services.py +++ /dev/null @@ -1,104 +0,0 @@ -from typing import Any - -from core.config import ConfigString, Configuration -from core.configservice.base import ConfigService, ConfigServiceMode - -GROUP_NAME: str = "Security" - - -class VpnClient(ConfigService): - name: str = "VPNClient" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["vpnclient.sh"] - executables: list[str] = ["openvpn", "ip", "killall"] - dependencies: list[str] = [] - startup: list[str] = ["bash vpnclient.sh"] - validate: list[str] = ["pidof openvpn"] - shutdown: list[str] = ["killall openvpn"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [ - ConfigString(id="keydir", label="Key Dir", default="/etc/core/keys"), - ConfigString(id="keyname", label="Key Name", default="client1"), - ConfigString(id="server", label="Server", default="10.0.2.10"), - ] - modes: dict[str, dict[str, str]] = {} - - -class VpnServer(ConfigService): - name: str = "VPNServer" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["vpnserver.sh"] - executables: list[str] = ["openvpn", "ip", "killall"] - dependencies: list[str] = [] - startup: list[str] = ["bash vpnserver.sh"] - validate: list[str] = ["pidof openvpn"] - shutdown: list[str] = ["killall openvpn"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [ - ConfigString(id="keydir", label="Key Dir", default="/etc/core/keys"), - ConfigString(id="keyname", label="Key Name", default="server"), - ConfigString(id="subnet", label="Subnet", default="10.0.200.0"), - ] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - address = None - for iface in self.node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - address = str(ip4.ip) - break - return dict(address=address) - - -class IPsec(ConfigService): - name: str = "IPsec" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["ipsec.sh"] - executables: list[str] = ["racoon", "ip", "setkey", "killall"] - dependencies: list[str] = [] - startup: list[str] = ["bash ipsec.sh"] - validate: list[str] = ["pidof racoon"] - shutdown: list[str] = ["killall racoon"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - -class Firewall(ConfigService): - name: str = "Firewall" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["firewall.sh"] - executables: list[str] = ["iptables"] - dependencies: list[str] = [] - startup: list[str] = ["bash firewall.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - -class Nat(ConfigService): - name: str = "NAT" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["nat.sh"] - executables: list[str] = ["iptables"] - dependencies: list[str] = [] - startup: list[str] = ["bash nat.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - return dict(ifnames=ifnames) diff --git a/daemon/core/configservices/securityservices/templates/firewall.sh b/daemon/core/configservices/securityservices/templates/firewall.sh deleted file mode 100644 index a445d133..00000000 --- a/daemon/core/configservices/securityservices/templates/firewall.sh +++ /dev/null @@ -1,30 +0,0 @@ -# -------- CUSTOMIZATION REQUIRED -------- -# -# Below are sample iptables firewall rules that you can uncomment and edit. -# You can also use ip6tables rules for IPv6. -# - -# start by flushing all firewall rules (so this script may be re-run) -#iptables -F - -# allow traffic related to established connections -#iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT - -# allow TCP packets from any source destined for 192.168.1.1 -#iptables -A INPUT -s 0/0 -i eth0 -d 192.168.1.1 -p TCP -j ACCEPT - -# allow OpenVPN server traffic from eth0 -#iptables -A INPUT -p udp --dport 1194 -j ACCEPT -#iptables -A INPUT -i eth0 -j DROP -#iptables -A OUTPUT -p udp --sport 1194 -j ACCEPT -#iptables -A OUTPUT -o eth0 -j DROP - -# allow ICMP ping traffic -#iptables -A OUTPUT -p icmp --icmp-type echo-request -j ACCEPT -#iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT - -# allow SSH traffic -#iptables -A -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT - -# drop all other traffic coming in eth0 -#iptables -A INPUT -i eth0 -j DROP diff --git a/daemon/core/configservices/securityservices/templates/ipsec.sh b/daemon/core/configservices/securityservices/templates/ipsec.sh deleted file mode 100644 index e8fde77e..00000000 --- a/daemon/core/configservices/securityservices/templates/ipsec.sh +++ /dev/null @@ -1,114 +0,0 @@ -# -------- CUSTOMIZATION REQUIRED -------- -# -# The IPsec service builds ESP tunnels between the specified peers using the -# racoon IKEv2 keying daemon. You need to provide keys and the addresses of -# peers, along with subnets to tunnel. - -# directory containing the certificate and key described below -keydir=/etc/core/keys - -# the name used for the "$certname.pem" x509 certificate and -# "$certname.key" RSA private key, which can be generated using openssl -certname=ipsec1 - -# list the public-facing IP addresses, starting with the localhost and followed -# by each tunnel peer, separated with a single space -tunnelhosts="172.16.0.1AND172.16.0.2 172.16.0.1AND172.16.2.1" - -# Define T where i is the index for each tunnel peer host from -# the tunnel_hosts list above (0 is localhost). -# T is a list of IPsec tunnels with peer i, with a local subnet address -# followed by the remote subnet address: -# T="AND AND" -# For example, 172.16.0.0/24 is a local network (behind this node) to be -# tunneled and 172.16.2.0/24 is a remote network (behind peer 1) -T1="172.16.3.0/24AND172.16.5.0/24" -T2="172.16.4.0/24AND172.16.5.0/24 172.16.4.0/24AND172.16.6.0/24" - -# -------- END CUSTOMIZATION -------- - -echo "building config $PWD/ipsec.conf..." -echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log - -checkip=0 -if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then - echo "WARNING: ip validation disabled because package sipcalc not installed - " >> $PWD/ipsec.log - checkip=1 -fi - -echo "#!/usr/sbin/setkey -f - # Flush the SAD and SPD - flush; - spdflush; - - # Security policies " > $PWD/ipsec.conf -i=0 -for hostpair in $tunnelhosts; do - i=`expr $i + 1` - # parse tunnel host IP - thishost=$${}{hostpair%%AND*} - peerhost=$${}{hostpair##*AND} - if [ $checkip = "0" ] && - [ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then - echo "ERROR: invalid host address $thishost or $peerhost " >> $PWD/ipsec.log - fi - # parse each tunnel addresses - tunnel_list_var_name=T$i - eval tunnels="$"$tunnel_list_var_name"" - for ttunnel in $tunnels; do - lclnet=$${}{ttunnel%%AND*} - rmtnet=$${}{ttunnel##*AND} - if [ $checkip = "0" ] && - [ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then - echo "ERROR: invalid tunnel address $lclnet and $rmtnet " >> $PWD/ipsec.log - fi - # add tunnel policies - echo " - spdadd $lclnet $rmtnet any -P out ipsec - esp/tunnel/$thishost-$peerhost/require; - spdadd $rmtnet $lclnet any -P in ipsec - esp/tunnel/$peerhost-$thishost/require; " >> $PWD/ipsec.conf - done -done - -echo "building config $PWD/racoon.conf..." -if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then - echo "ERROR: missing certification files under $keydir $certname.key or $certname.pem " >> $PWD/ipsec.log -fi -echo " - path certificate \"$keydir\"; - listen { - adminsock disabled; - } - remote anonymous - { - exchange_mode main; - certificate_type x509 \"$certname.pem\" \"$certname.key\"; - ca_type x509 \"ca-cert.pem\"; - my_identifier asn1dn; - peers_identifier asn1dn; - - proposal { - encryption_algorithm 3des ; - hash_algorithm sha1; - authentication_method rsasig ; - dh_group modp768; - } - } - sainfo anonymous - { - pfs_group modp768; - lifetime time 1 hour ; - encryption_algorithm 3des, blowfish 448, rijndael ; - authentication_algorithm hmac_sha1, hmac_md5 ; - compression_algorithm deflate ; - } - " > $PWD/racoon.conf - -# the setkey program is required from the ipsec-tools package -echo "running setkey -f $PWD/ipsec.conf..." -setkey -f $PWD/ipsec.conf - -echo "running racoon -d -f $PWD/racoon.conf..." -racoon -d -f $PWD/racoon.conf -l racoon.log diff --git a/daemon/core/configservices/securityservices/templates/nat.sh b/daemon/core/configservices/securityservices/templates/nat.sh deleted file mode 100644 index 80b96a08..00000000 --- a/daemon/core/configservices/securityservices/templates/nat.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -# generated by security.py -# NAT out the first interface by default -% for index, ifname in enumerate(ifnames): -% if index == 0: -iptables -t nat -A POSTROUTING -o ${ifname} -j MASQUERADE -iptables -A FORWARD -i ${ifname} -m state --state RELATED,ESTABLISHED -j ACCEPT -iptables -A FORWARD -i ${ifname} -j DROP -% else: -# iptables -t nat -A POSTROUTING -o ${ifname} -j MASQUERADE -# iptables -A FORWARD -i ${ifname} -m state --state RELATED,ESTABLISHED -j ACCEPT -# iptables -A FORWARD -i ${ifname} -j DROP -% endif -% endfor diff --git a/daemon/core/configservices/securityservices/templates/vpnclient.sh b/daemon/core/configservices/securityservices/templates/vpnclient.sh deleted file mode 100644 index 5cbf7ad1..00000000 --- a/daemon/core/configservices/securityservices/templates/vpnclient.sh +++ /dev/null @@ -1,61 +0,0 @@ -# -------- CUSTOMIZATION REQUIRED -------- -# -# The VPNClient service builds a VPN tunnel to the specified VPN server using -# OpenVPN software and a virtual TUN/TAP device. - -# directory containing the certificate and key described below -keydir=${config["keydir"]} - -# the name used for a "$keyname.crt" certificate and "$keyname.key" private key. -keyname=${config["keyname"]} - -# the public IP address of the VPN server this client should connect with -vpnserver=${config["server"]} - -# optional next hop for adding a static route to reach the VPN server -#nexthop="10.0.1.1" - -# --------- END CUSTOMIZATION -------- - -# validate addresses -if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then - echo "WARNING: ip validation disabled because package sipcalc not installed - " > $PWD/vpnclient.log -else - if [ "$(sipcalc "$vpnserver" "$nexthop" | grep ERR)" != "" ]; then - echo "ERROR: invalide address $vpnserver or $nexthop " > $PWD/vpnclient.log - fi -fi - -# validate key and certification files -if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \ - || [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then - echo "ERROR: missing certification or key files under $keydir $keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnclient.log -fi - -# if necessary, add a static route for reaching the VPN server IP via the IF -vpnservernet=$${}{vpnserver%.*}.0/24 -if [ "$nexthop" != "" ]; then - ip route add $vpnservernet via $nexthop -fi - -# create openvpn client.conf -( -cat << EOF -client -dev tun -proto udp -remote $vpnserver 1194 -nobind -ca $keydir/ca.crt -cert $keydir/$keyname.crt -key $keydir/$keyname.key -dh $keydir/dh1024.pem -cipher AES-256-CBC -log $PWD/openvpn-client.log -verb 4 -daemon -EOF -) > client.conf - -openvpn --config client.conf diff --git a/daemon/core/configservices/securityservices/templates/vpnserver.sh b/daemon/core/configservices/securityservices/templates/vpnserver.sh deleted file mode 100644 index 7a580ac7..00000000 --- a/daemon/core/configservices/securityservices/templates/vpnserver.sh +++ /dev/null @@ -1,147 +0,0 @@ -# -------- CUSTOMIZATION REQUIRED -------- -# -# The VPNServer service sets up the OpenVPN server for building VPN tunnels -# that allow access via TUN/TAP device to private networks. -# -# note that the IPForward and DefaultRoute services should be enabled - -# directory containing the certificate and key described below, in addition to -# a CA certificate and DH key -keydir=${config["keydir"]} - -# the name used for a "$keyname.crt" certificate and "$keyname.key" private key. -keyname=${config["keyname"]} - -# the VPN subnet address from which the client VPN IP (for the TUN/TAP) -# will be allocated -vpnsubnet=${config["subnet"]} - -# public IP address of this vpn server (same as VPNClient vpnserver= setting) -vpnserver=${address} - -# optional list of private subnets reachable behind this VPN server -# each subnet and next hop is separated by a space -# ", , ..." -#privatenets="10.0.11.0,10.0.10.1 10.0.12.0,10.0.10.1" - -# optional list of VPN clients, for statically assigning IP addresses to -# clients; also, an optional client subnet can be specified for adding static -# routes via the client -# Note: VPN addresses x.x.x.0-3 are reserved -# ",, ,, ..." -#vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,," - -# NOTE: you may need to enable the StaticRoutes service on nodes within the -# private subnet, in order to have routes back to the client. -# /sbin/ip ro add /24 via -# /sbin/ip ro add /24 via - -# -------- END CUSTOMIZATION -------- - -echo > $PWD/vpnserver.log -rm -f -r $PWD/ccd - -# validate key and certification files -if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \ - || [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then - echo "ERROR: missing certification or key files under $keydir \ -$keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnserver.log -fi - -# validate configuration IP addresses -checkip=0 -if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then - echo "WARNING: ip validation disabled because package sipcalc not installed\ - " >> $PWD/vpnserver.log - checkip=1 -else - if [ "$(sipcalc "$vpnsubnet" "$vpnserver" | grep ERR)" != "" ]; then - echo "ERROR: invalid vpn subnet or server address \ -$vpnsubnet or $vpnserver " >> $PWD/vpnserver.log - fi -fi - -# create client vpn ip pool file -( -cat << EOF -EOF -)> $PWD/ippool.txt - -# create server.conf file -( -cat << EOF -# openvpn server config -local $vpnserver -server $vpnsubnet 255.255.255.0 -push "redirect-gateway def1" -EOF -)> $PWD/server.conf - -# add routes to VPN server private subnets, and push these routes to clients -for privatenet in $privatenets; do - if [ $privatenet != "" ]; then - net=$${}{privatenet%%,*} - nexthop=$${}{privatenet##*,} - if [ $checkip = "0" ] && - [ "$(sipcalc "$net" "$nexthop" | grep ERR)" != "" ]; then - echo "ERROR: invalid vpn server private net address \ -$net or $nexthop " >> $PWD/vpnserver.log - fi - echo push route $net 255.255.255.0 >> $PWD/server.conf - ip ro add $net/24 via $nexthop - ip ro add $vpnsubnet/24 via $nexthop - fi -done - -# allow subnet through this VPN, one route for each client subnet -for client in $vpnclients; do - if [ $client != "" ]; then - cSubnetIP=$${}{client##*,} - cVpnIP=$${}{client#*,} - cVpnIP=$${}{cVpnIP%%,*} - cKeyFilename=$${}{client%%,*} - if [ "$cSubnetIP" != "" ]; then - if [ $checkip = "0" ] && - [ "$(sipcalc "$cSubnetIP" "$cVpnIP" | grep ERR)" != "" ]; then - echo "ERROR: invalid vpn client and subnet address \ -$cSubnetIP or $cVpnIP " >> $PWD/vpnserver.log - fi - echo route $cSubnetIP 255.255.255.0 >> $PWD/server.conf - if ! test -d $PWD/ccd; then - mkdir -p $PWD/ccd - echo client-config-dir $PWD/ccd >> $PWD/server.conf - fi - if test -e $PWD/ccd/$cKeyFilename; then - echo iroute $cSubnetIP 255.255.255.0 >> $PWD/ccd/$cKeyFilename - else - echo iroute $cSubnetIP 255.255.255.0 > $PWD/ccd/$cKeyFilename - fi - fi - if [ "$cVpnIP" != "" ]; then - echo $cKeyFilename,$cVpnIP >> $PWD/ippool.txt - fi - fi -done - -( -cat << EOF -keepalive 10 120 -ca $keydir/ca.crt -cert $keydir/$keyname.crt -key $keydir/$keyname.key -dh $keydir/dh1024.pem -cipher AES-256-CBC -status /var/log/openvpn-status.log -log /var/log/openvpn-server.log -ifconfig-pool-linear -ifconfig-pool-persist $PWD/ippool.txt -port 1194 -proto udp -dev tun -verb 4 -daemon -EOF -)>> $PWD/server.conf - -# start vpn server -openvpn --config server.conf diff --git a/daemon/core/configservices/utilservices/__init__.py b/daemon/core/configservices/utilservices/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/configservices/utilservices/services.py b/daemon/core/configservices/utilservices/services.py deleted file mode 100644 index 73d72060..00000000 --- a/daemon/core/configservices/utilservices/services.py +++ /dev/null @@ -1,291 +0,0 @@ -from typing import Any - -import netaddr - -from core import utils -from core.config import Configuration -from core.configservice.base import ConfigService, ConfigServiceMode - -GROUP_NAME = "Utility" - - -class DefaultRouteService(ConfigService): - name: str = "DefaultRoute" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["defaultroute.sh"] - executables: list[str] = ["ip"] - dependencies: list[str] = [] - startup: list[str] = ["bash defaultroute.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - # only add default routes for linked routing nodes - routes = [] - ifaces = self.node.get_ifaces() - if ifaces: - iface = ifaces[0] - for ip in iface.ips(): - net = ip.cidr - if net.size > 1: - router = net[1] - routes.append(str(router)) - return dict(routes=routes) - - -class DefaultMulticastRouteService(ConfigService): - name: str = "DefaultMulticastRoute" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["defaultmroute.sh"] - executables: list[str] = [] - dependencies: list[str] = [] - startup: list[str] = ["bash defaultmroute.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifname = None - for iface in self.node.get_ifaces(control=False): - ifname = iface.name - break - return dict(ifname=ifname) - - -class StaticRouteService(ConfigService): - name: str = "StaticRoute" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["staticroute.sh"] - executables: list[str] = [] - dependencies: list[str] = [] - startup: list[str] = ["bash staticroute.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - routes = [] - for iface in self.node.get_ifaces(control=False): - for ip in iface.ips(): - address = str(ip.ip) - if netaddr.valid_ipv6(address): - dst = "3ffe:4::/64" - else: - dst = "10.9.8.0/24" - if ip[-2] != ip[1]: - routes.append((dst, ip[1])) - return dict(routes=routes) - - -class IpForwardService(ConfigService): - name: str = "IPForward" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["ipforward.sh"] - executables: list[str] = ["sysctl"] - dependencies: list[str] = [] - startup: list[str] = ["bash ipforward.sh"] - validate: list[str] = [] - shutdown: list[str] = [] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - devnames = [] - for iface in self.node.get_ifaces(): - devname = utils.sysctl_devname(iface.name) - devnames.append(devname) - return dict(devnames=devnames) - - -class SshService(ConfigService): - name: str = "SSH" - group: str = GROUP_NAME - directories: list[str] = ["/etc/ssh", "/var/run/sshd"] - files: list[str] = ["startsshd.sh", "/etc/ssh/sshd_config"] - executables: list[str] = ["sshd"] - dependencies: list[str] = [] - startup: list[str] = ["bash startsshd.sh"] - validate: list[str] = [] - shutdown: list[str] = ["killall sshd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - return dict( - sshcfgdir=self.directories[0], - sshstatedir=self.directories[1], - sshlibdir="/usr/lib/openssh", - ) - - -class DhcpService(ConfigService): - name: str = "DHCP" - group: str = GROUP_NAME - directories: list[str] = ["/etc/dhcp", "/var/lib/dhcp"] - files: list[str] = ["/etc/dhcp/dhcpd.conf"] - executables: list[str] = ["dhcpd"] - dependencies: list[str] = [] - startup: list[str] = ["touch /var/lib/dhcp/dhcpd.leases", "dhcpd"] - validate: list[str] = ["pidof dhcpd"] - shutdown: list[str] = ["killall dhcpd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - subnets = [] - for iface in self.node.get_ifaces(control=False): - for ip4 in iface.ip4s: - if ip4.size == 1: - continue - # divide the address space in half - index = (ip4.size - 2) / 2 - rangelow = ip4[index] - rangehigh = ip4[-2] - subnets.append((ip4.cidr.ip, ip4.netmask, rangelow, rangehigh, ip4.ip)) - return dict(subnets=subnets) - - -class DhcpClientService(ConfigService): - name: str = "DHCPClient" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["startdhcpclient.sh"] - executables: list[str] = ["dhclient"] - dependencies: list[str] = [] - startup: list[str] = ["bash startdhcpclient.sh"] - validate: list[str] = ["pidof dhclient"] - shutdown: list[str] = ["killall dhclient"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - return dict(ifnames=ifnames) - - -class FtpService(ConfigService): - name: str = "FTP" - group: str = GROUP_NAME - directories: list[str] = ["/var/run/vsftpd/empty", "/var/ftp"] - files: list[str] = ["vsftpd.conf"] - executables: list[str] = ["vsftpd"] - dependencies: list[str] = [] - startup: list[str] = ["vsftpd ./vsftpd.conf"] - validate: list[str] = ["pidof vsftpd"] - shutdown: list[str] = ["killall vsftpd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - -class PcapService(ConfigService): - name: str = "pcap" - group: str = GROUP_NAME - directories: list[str] = [] - files: list[str] = ["pcap.sh"] - executables: list[str] = ["tcpdump"] - dependencies: list[str] = [] - startup: list[str] = ["bash pcap.sh start"] - validate: list[str] = ["pidof tcpdump"] - shutdown: list[str] = ["bash pcap.sh stop"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifnames = [] - for iface in self.node.get_ifaces(control=False): - ifnames.append(iface.name) - return dict(ifnames=ifnames) - - -class RadvdService(ConfigService): - name: str = "radvd" - group: str = GROUP_NAME - directories: list[str] = ["/etc/radvd", "/var/run/radvd"] - files: list[str] = ["/etc/radvd/radvd.conf"] - executables: list[str] = ["radvd"] - dependencies: list[str] = [] - startup: list[str] = [ - "radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log" - ] - validate: list[str] = ["pidof radvd"] - shutdown: list[str] = ["pkill radvd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifaces = [] - for iface in self.node.get_ifaces(control=False): - prefixes = [] - for ip6 in iface.ip6s: - prefixes.append(str(ip6)) - if not prefixes: - continue - ifaces.append((iface.name, prefixes)) - return dict(ifaces=ifaces) - - -class AtdService(ConfigService): - name: str = "atd" - group: str = GROUP_NAME - directories: list[str] = ["/var/spool/cron/atjobs", "/var/spool/cron/atspool"] - files: list[str] = ["startatd.sh"] - executables: list[str] = ["atd"] - dependencies: list[str] = [] - startup: list[str] = ["bash startatd.sh"] - validate: list[str] = ["pidof atd"] - shutdown: list[str] = ["pkill atd"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - -class HttpService(ConfigService): - name: str = "HTTP" - group: str = GROUP_NAME - directories: list[str] = [ - "/etc/apache2", - "/var/run/apache2", - "/var/log/apache2", - "/run/lock", - "/var/lock/apache2", - "/var/www", - ] - files: list[str] = [ - "/etc/apache2/apache2.conf", - "/etc/apache2/envvars", - "/var/www/index.html", - ] - executables: list[str] = ["apache2ctl"] - dependencies: list[str] = [] - startup: list[str] = ["chown www-data /var/lock/apache2", "apache2ctl start"] - validate: list[str] = ["pidof apache2"] - shutdown: list[str] = ["apache2ctl stop"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: list[Configuration] = [] - modes: dict[str, dict[str, str]] = {} - - def data(self) -> dict[str, Any]: - ifaces = [] - for iface in self.node.get_ifaces(control=False): - ifaces.append(iface) - return dict(ifaces=ifaces) diff --git a/daemon/core/configservices/utilservices/templates/defaultmroute.sh b/daemon/core/configservices/utilservices/templates/defaultmroute.sh deleted file mode 100644 index 4a8d9403..00000000 --- a/daemon/core/configservices/utilservices/templates/defaultmroute.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh -# auto-generated by DefaultMulticastRoute service (utility.py) -# the first interface is chosen below; please change it as needed -ip route add 224.0.0.0/4 dev ${ifname} diff --git a/daemon/core/configservices/utilservices/templates/defaultroute.sh b/daemon/core/configservices/utilservices/templates/defaultroute.sh deleted file mode 100644 index d5cdfd78..00000000 --- a/daemon/core/configservices/utilservices/templates/defaultroute.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -# auto-generated by DefaultRoute service -% for route in routes: -ip route add default via ${route} -% endfor diff --git a/daemon/core/configservices/utilservices/templates/etc/apache2/apache2.conf b/daemon/core/configservices/utilservices/templates/etc/apache2/apache2.conf deleted file mode 100644 index c53e48af..00000000 --- a/daemon/core/configservices/utilservices/templates/etc/apache2/apache2.conf +++ /dev/null @@ -1,102 +0,0 @@ -# apache2.conf generated by utility.py:HttpService -Mutex file:$APACHE_LOCK_DIR default - -PidFile $APACHE_PID_FILE -Timeout 300 -KeepAlive On -MaxKeepAliveRequests 100 -KeepAliveTimeout 5 - -LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so - - - StartServers 5 - MinSpareServers 5 - MaxSpareServers 10 - MaxClients 150 - MaxRequestsPerChild 0 - - - - StartServers 2 - MinSpareThreads 25 - MaxSpareThreads 75 - ThreadLimit 64 - ThreadsPerChild 25 - MaxClients 150 - MaxRequestsPerChild 0 - - - - StartServers 2 - MinSpareThreads 25 - MaxSpareThreads 75 - ThreadLimit 64 - ThreadsPerChild 25 - MaxClients 150 - MaxRequestsPerChild 0 - - -User $APACHE_RUN_USER -Group $APACHE_RUN_GROUP - -AccessFileName .htaccess - - - Require all denied - - -DefaultType None - -HostnameLookups Off - -ErrorLog $APACHE_LOG_DIR/error.log -LogLevel warn - -#Include mods-enabled/*.load -#Include mods-enabled/*.conf -LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so -LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so -LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so -LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so -LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so -LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so -LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so -LoadModule env_module /usr/lib/apache2/modules/mod_env.so - -NameVirtualHost *:80 -Listen 80 - - - Listen 443 - - - Listen 443 - - -LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined -LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined -LogFormat "%h %l %u %t \\"%r\\" %>s %O" common -LogFormat "%{Referer}i -> %U" referer -LogFormat "%{User-agent}i" agent - -ServerTokens OS -ServerSignature On -TraceEnable Off - - - ServerAdmin webmaster@localhost - DocumentRoot /var/www - - Options FollowSymLinks - AllowOverride None - - - Options Indexes FollowSymLinks MultiViews - AllowOverride None - Require all granted - - ErrorLog $APACHE_LOG_DIR/error.log - LogLevel warn - CustomLog $APACHE_LOG_DIR/access.log combined - diff --git a/daemon/core/configservices/utilservices/templates/etc/apache2/envvars b/daemon/core/configservices/utilservices/templates/etc/apache2/envvars deleted file mode 100644 index fcfc4d9e..00000000 --- a/daemon/core/configservices/utilservices/templates/etc/apache2/envvars +++ /dev/null @@ -1,10 +0,0 @@ -# this file is used by apache2ctl - generated by utility.py:HttpService -# these settings come from a default Ubuntu apache2 installation -export APACHE_RUN_USER=www-data -export APACHE_RUN_GROUP=www-data -export APACHE_PID_FILE=/var/run/apache2.pid -export APACHE_RUN_DIR=/var/run/apache2 -export APACHE_LOCK_DIR=/var/lock/apache2 -export APACHE_LOG_DIR=/var/log/apache2 -export LANG=C -export LANG diff --git a/daemon/core/configservices/utilservices/templates/etc/dhcp/dhcpd.conf b/daemon/core/configservices/utilservices/templates/etc/dhcp/dhcpd.conf deleted file mode 100644 index 7be7f4e8..00000000 --- a/daemon/core/configservices/utilservices/templates/etc/dhcp/dhcpd.conf +++ /dev/null @@ -1,22 +0,0 @@ -# auto-generated by DHCP service (utility.py) -# NOTE: move these option lines into the desired pool { } block(s) below -#option domain-name "test.com"; -#option domain-name-servers 10.0.0.1; -#option routers 10.0.0.1; - -log-facility local6; - -default-lease-time 600; -max-lease-time 7200; - -ddns-update-style none; - -% for subnet, netmask, rangelow, rangehigh, addr in subnets: -subnet ${subnet} netmask ${netmask} { - pool { - range ${rangelow} ${rangehigh}; - default-lease-time 600; - option routers ${addr}; - } -} -% endfor diff --git a/daemon/core/configservices/utilservices/templates/etc/radvd/radvd.conf b/daemon/core/configservices/utilservices/templates/etc/radvd/radvd.conf deleted file mode 100644 index d003b4b1..00000000 --- a/daemon/core/configservices/utilservices/templates/etc/radvd/radvd.conf +++ /dev/null @@ -1,19 +0,0 @@ -# auto-generated by RADVD service (utility.py) -% for ifname, prefixes in ifaces: -interface ${ifname} -{ - AdvSendAdvert on; - MinRtrAdvInterval 3; - MaxRtrAdvInterval 10; - AdvDefaultPreference low; - AdvHomeAgentFlag off; -% for prefix in prefixes: - prefix ${prefix} - { - AdvOnLink on; - AdvAutonomous on; - AdvRouterAddr on; - }; -% endfor -}; -% endfor diff --git a/daemon/core/configservices/utilservices/templates/etc/ssh/sshd_config b/daemon/core/configservices/utilservices/templates/etc/ssh/sshd_config deleted file mode 100644 index 826dd098..00000000 --- a/daemon/core/configservices/utilservices/templates/etc/ssh/sshd_config +++ /dev/null @@ -1,37 +0,0 @@ -# auto-generated by SSH service (utility.py) -Port 22 -Protocol 2 -HostKey ${sshcfgdir}/ssh_host_rsa_key -UsePrivilegeSeparation yes -PidFile ${sshstatedir}/sshd.pid - -KeyRegenerationInterval 3600 -ServerKeyBits 768 - -SyslogFacility AUTH -LogLevel INFO - -LoginGraceTime 120 -PermitRootLogin yes -StrictModes yes - -RSAAuthentication yes -PubkeyAuthentication yes - -IgnoreRhosts yes -RhostsRSAAuthentication no -HostbasedAuthentication no - -PermitEmptyPasswords no -ChallengeResponseAuthentication no - -X11Forwarding yes -X11DisplayOffset 10 -PrintMotd no -PrintLastLog yes -TCPKeepAlive yes - -AcceptEnv LANG LC_* -Subsystem sftp ${sshlibdir}/sftp-server -UsePAM yes -UseDNS no diff --git a/daemon/core/configservices/utilservices/templates/ipforward.sh b/daemon/core/configservices/utilservices/templates/ipforward.sh deleted file mode 100644 index 75717ecf..00000000 --- a/daemon/core/configservices/utilservices/templates/ipforward.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/sh -# auto-generated by IPForward service (utility.py) -sysctl -w net.ipv4.conf.all.forwarding=1 -sysctl -w net.ipv4.conf.default.forwarding=1 -sysctl -w net.ipv6.conf.all.forwarding=1 -sysctl -w net.ipv6.conf.default.forwarding=1 -sysctl -w net.ipv4.conf.all.send_redirects=0 -sysctl -w net.ipv4.conf.default.send_redirects=0 -sysctl -w net.ipv4.conf.all.rp_filter=0 -sysctl -w net.ipv4.conf.default.rp_filter=0 -# setup forwarding for node interfaces -% for devname in devnames: -sysctl -w net.ipv4.conf.${devname}.forwarding=1 -sysctl -w net.ipv4.conf.${devname}.send_redirects=0 -sysctl -w net.ipv4.conf.${devname}.rp_filter=0 -sysctl -w net.ipv6.conf.${devname}.forwarding=1 -% endfor diff --git a/daemon/core/configservices/utilservices/templates/pcap.sh b/daemon/core/configservices/utilservices/templates/pcap.sh deleted file mode 100644 index d4a0ea9f..00000000 --- a/daemon/core/configservices/utilservices/templates/pcap.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh -# set tcpdump options here (see 'man tcpdump' for help) -# (-s snap length, -C limit pcap file length, -n disable name resolution) -if [ "x$1" = "xstart" ]; then -% for ifname in ifnames: - tcpdump -s 12288 -C 10 -n -w ${node.name}.${ifname}.pcap -i ${ifname} > /dev/null 2>&1 & -% endfor -elif [ "x$1" = "xstop" ]; then - mkdir -p $SESSION_DIR/pcap - mv *.pcap $SESSION_DIR/pcap -fi; diff --git a/daemon/core/configservices/utilservices/templates/startatd.sh b/daemon/core/configservices/utilservices/templates/startatd.sh deleted file mode 100644 index 6d9d2949..00000000 --- a/daemon/core/configservices/utilservices/templates/startatd.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -echo 00001 > /var/spool/cron/atjobs/.SEQ -chown -R daemon /var/spool/cron/* -chmod -R 700 /var/spool/cron/* -atd diff --git a/daemon/core/configservices/utilservices/templates/startdhcpclient.sh b/daemon/core/configservices/utilservices/templates/startdhcpclient.sh deleted file mode 100644 index 061e66d7..00000000 --- a/daemon/core/configservices/utilservices/templates/startdhcpclient.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/sh -# auto-generated by DHCPClient service (utility.py) -# uncomment this mkdir line and symlink line to enable client-side DNS\n# resolution based on the DHCP server response. -#mkdir -p /var/run/resolvconf/interface -% for ifname in ifnames: -#ln -s /var/run/resolvconf/interface/${ifname}.dhclient /var/run/resolvconf/resolv.conf -dhclient -nw -pf /var/run/dhclient-${ifname}.pid -lf /var/run/dhclient-${ifname}.lease ${ifname} -% endfor diff --git a/daemon/core/configservices/utilservices/templates/startsshd.sh b/daemon/core/configservices/utilservices/templates/startsshd.sh deleted file mode 100644 index b35fdb07..00000000 --- a/daemon/core/configservices/utilservices/templates/startsshd.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -# auto-generated by SSH service (utility.py) -ssh-keygen -q -t rsa -N "" -f ${sshcfgdir}/ssh_host_rsa_key -chmod 655 ${sshstatedir} -# wait until RSA host key has been generated to launch sshd -$(which sshd) -f ${sshcfgdir}/sshd_config diff --git a/daemon/core/configservices/utilservices/templates/staticroute.sh b/daemon/core/configservices/utilservices/templates/staticroute.sh deleted file mode 100644 index c47c09fd..00000000 --- a/daemon/core/configservices/utilservices/templates/staticroute.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/sh -# auto-generated by StaticRoute service (utility.py) -# NOTE: this service must be customized to be of any use -# Below are samples that you can uncomment and edit. -% for dest, addr in routes: -#ip route add ${dest} via ${addr} -% endfor diff --git a/daemon/core/configservices/utilservices/templates/var/www/index.html b/daemon/core/configservices/utilservices/templates/var/www/index.html deleted file mode 100644 index bed270ae..00000000 --- a/daemon/core/configservices/utilservices/templates/var/www/index.html +++ /dev/null @@ -1,13 +0,0 @@ - - - -

${node.name} web server

-

This is the default web page for this server.

-

The web server software is running but no content has been added, yet.

-
    -% for iface in ifaces: -
  • ${iface.name} - ${iface.addrlist}
  • -% endfor -
- - diff --git a/daemon/core/configservices/utilservices/templates/vsftpd.conf b/daemon/core/configservices/utilservices/templates/vsftpd.conf deleted file mode 100644 index 988b8727..00000000 --- a/daemon/core/configservices/utilservices/templates/vsftpd.conf +++ /dev/null @@ -1,12 +0,0 @@ -# vsftpd.conf auto-generated by FTP service (utility.py) -listen=YES -anonymous_enable=YES -local_enable=YES -dirmessage_enable=YES -use_localtime=YES -xferlog_enable=YES -connect_from_port_20=YES -xferlog_file=/var/log/vsftpd.log -ftpd_banner=Welcome to the CORE FTP service -secure_chroot_dir=/var/run/vsftpd/empty -anon_root=/var/ftp diff --git a/daemon/core/constants.py.in b/daemon/core/constants.py.in index 1ade8287..0cb3750e 100644 --- a/daemon/core/constants.py.in +++ b/daemon/core/constants.py.in @@ -1,5 +1,28 @@ -from pathlib import Path +import os -COREDPY_VERSION: str = "@PACKAGE_VERSION@" -CORE_CONF_DIR: Path = Path("@CORE_CONF_DIR@") -CORE_DATA_DIR: Path = Path("@CORE_DATA_DIR@") +COREDPY_VERSION = "@PACKAGE_VERSION@" +CORE_STATE_DIR = "@CORE_STATE_DIR@" +CORE_CONF_DIR = "@CORE_CONF_DIR@" +CORE_DATA_DIR = "@CORE_DATA_DIR@" +QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga" +FRR_STATE_DIR = "@CORE_STATE_DIR@/run/frr" + + +def which(command): + for path in os.environ["PATH"].split(os.pathsep): + command_path = os.path.join(path, command) + if os.path.isfile(command_path) and os.access(command_path, os.X_OK): + return command_path + + +VNODED_BIN = which("vnoded") +VCMD_BIN = which("vcmd") +BRCTL_BIN = which("brctl") +SYSCTL_BIN = which("sysctl") +IP_BIN = which("ip") +TC_BIN = which("tc") +EBTABLES_BIN = which("ebtables") +MOUNT_BIN = which("mount") +UMOUNT_BIN = which("umount") +OVS_BIN = which("ovs-vsctl") +OVS_FLOW_BIN = which("ovs-ofctl") diff --git a/daemon/core/corehandlers.py b/daemon/core/corehandlers.py new file mode 100644 index 00000000..7c4a377c --- /dev/null +++ b/daemon/core/corehandlers.py @@ -0,0 +1,1776 @@ +""" +socket server request handlers leveraged by core servers. +""" + +import Queue +import SocketServer +import logging +import os +import shlex +import shutil +import sys +import threading +import time +from itertools import repeat + +from core.api import coreapi +from core.api import dataconversion +from core.conf import ConfigShim +from core.data import ConfigData, ExceptionData +from core.data import EventData +from core.data import FileData +from core.emulator.emudata import InterfaceData +from core.emulator.emudata import LinkOptions +from core.emulator.emudata import NodeOptions +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExceptionTlvs +from core.enumerations import ExecuteTlvs +from core.enumerations import FileTlvs +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.enumerations import SessionTlvs +from core.misc import nodeutils +from core.misc import structutils +from core.misc import utils +from core.service import ServiceManager +from core.service import ServiceShim + + +class CoreHandler(SocketServer.BaseRequestHandler): + """ + The SocketServer class uses the RequestHandler class for servicing requests. + """ + + def __init__(self, request, client_address, server): + """ + Create a CoreRequestHandler instance. + + :param request: request object + :param str client_address: client address + :param CoreServer server: core server instance + """ + self.done = False + self.message_handlers = { + MessageTypes.NODE.value: self.handle_node_message, + MessageTypes.LINK.value: self.handle_link_message, + MessageTypes.EXECUTE.value: self.handle_execute_message, + MessageTypes.REGISTER.value: self.handle_register_message, + MessageTypes.CONFIG.value: self.handle_config_message, + MessageTypes.FILE.value: self.handle_file_message, + MessageTypes.INTERFACE.value: self.handle_interface_message, + MessageTypes.EVENT.value: self.handle_event_message, + MessageTypes.SESSION.value: self.handle_session_message, + } + self.message_queue = Queue.Queue() + self.node_status_request = {} + self._shutdown_lock = threading.Lock() + self._sessions_lock = threading.Lock() + + self.handler_threads = [] + num_threads = int(server.config["numthreads"]) + if num_threads < 1: + raise ValueError("invalid number of threads: %s" % num_threads) + + logging.debug("launching core server handler threads: %s", num_threads) + for _ in xrange(num_threads): + thread = threading.Thread(target=self.handler_thread) + self.handler_threads.append(thread) + thread.start() + + self.master = False + self.session = None + + # core emulator + self.coreemu = server.coreemu + + utils.close_onexec(request.fileno()) + SocketServer.BaseRequestHandler.__init__(self, request, client_address, server) + + def setup(self): + """ + Client has connected, set up a new connection. + + :return: nothing + """ + logging.debug("new TCP connection: %s", self.client_address) + + def finish(self): + """ + Client has disconnected, end this request handler and disconnect + from the session. Shutdown sessions that are not running. + + :return: nothing + """ + logging.debug("finishing request handler") + logging.debug("remaining message queue size: %s", self.message_queue.qsize()) + + # give some time for message queue to deplete + timeout = 10 + wait = 0 + while not self.message_queue.empty(): + logging.debug("waiting for message queue to empty: %s seconds", wait) + time.sleep(1) + wait += 1 + if wait == timeout: + logging.warn("queue failed to be empty, finishing request handler") + break + + logging.info("client disconnected: notifying threads") + self.done = True + for thread in self.handler_threads: + logging.info("waiting for thread: %s", thread.getName()) + thread.join(timeout) + if thread.isAlive(): + logging.warn("joining %s failed: still alive after %s sec", thread.getName(), timeout) + + logging.info("connection closed: %s", self.client_address) + if self.session: + # remove client from session broker and shutdown if there are no clients + self.remove_session_handlers() + self.session.broker.session_clients.remove(self) + if not self.session.broker.session_clients and not self.session.is_active(): + logging.info("no session clients left and not active, initiating shutdown") + self.coreemu.delete_session(self.session.id) + + return SocketServer.BaseRequestHandler.finish(self) + + def session_message(self, flags=0): + """ + Build CORE API Sessions message based on current session info. + + :param int flags: message flags + :return: session message + """ + id_list = [] + name_list = [] + file_list = [] + node_count_list = [] + date_list = [] + thumb_list = [] + num_sessions = 0 + + with self._sessions_lock: + for _id, session in self.coreemu.sessions.iteritems(): + num_sessions += 1 + id_list.append(str(_id)) + + name = session.name + if not name: + name = "" + name_list.append(name) + + file = session.file_name + if not file: + file = "" + file_list.append(file) + + node_count_list.append(str(session.get_node_count())) + + date_list.append(time.ctime(session._state_time)) + + thumb = session.thumbnail + if not thumb: + thumb = "" + thumb_list.append(thumb) + + session_ids = "|".join(id_list) + names = "|".join(name_list) + files = "|".join(file_list) + node_counts = "|".join(node_count_list) + dates = "|".join(date_list) + thumbs = "|".join(thumb_list) + + if num_sessions > 0: + tlv_data = "" + if len(session_ids) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session_ids) + if len(names) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NAME.value, names) + if len(files) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.FILE.value, files) + if len(node_counts) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NODE_COUNT.value, node_counts) + if len(dates) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.DATE.value, dates) + if len(thumbs) > 0: + tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.THUMB.value, thumbs) + message = coreapi.CoreSessionMessage.pack(flags, tlv_data) + else: + message = None + + return message + + def handle_broadcast_event(self, event_data): + """ + Callback to handle an event broadcast out from a session. + + :param core.data.EventData event_data: event data to handle + :return: nothing + """ + logging.debug("handling broadcast event: %s", event_data) + + tlv_data = structutils.pack_values(coreapi.CoreEventTlv, [ + (EventTlvs.NODE, event_data.node), + (EventTlvs.TYPE, event_data.event_type), + (EventTlvs.NAME, event_data.name), + (EventTlvs.DATA, event_data.data), + (EventTlvs.TIME, event_data.time), + (EventTlvs.TIME, event_data.session) + ]) + message = coreapi.CoreEventMessage.pack(0, tlv_data) + + try: + self.sendall(message) + except IOError: + logging.exception("error sending event message") + + def handle_broadcast_file(self, file_data): + """ + Callback to handle a file broadcast out from a session. + + :param core.data.FileData file_data: file data to handle + :return: nothing + """ + logging.debug("handling broadcast file: %s", file_data) + + tlv_data = structutils.pack_values(coreapi.CoreFileTlv, [ + (FileTlvs.NODE, file_data.node), + (FileTlvs.NAME, file_data.name), + (FileTlvs.MODE, file_data.mode), + (FileTlvs.NUMBER, file_data.number), + (FileTlvs.TYPE, file_data.type), + (FileTlvs.SOURCE_NAME, file_data.source), + (FileTlvs.SESSION, file_data.session), + (FileTlvs.DATA, file_data.data), + (FileTlvs.COMPRESSED_DATA, file_data.compressed_data), + ]) + message = coreapi.CoreFileMessage.pack(file_data.message_type, tlv_data) + + try: + self.sendall(message) + except IOError: + logging.exception("error sending file message") + + def handle_broadcast_config(self, config_data): + """ + Callback to handle a config broadcast out from a session. + + :param core.data.ConfigData config_data: config data to handle + :return: nothing + """ + logging.debug("handling broadcast config: %s", config_data) + message = dataconversion.convert_config(config_data) + try: + self.sendall(message) + except IOError: + logging.exception("error sending config message") + + def handle_broadcast_exception(self, exception_data): + """ + Callback to handle an exception broadcast out from a session. + + :param core.data.ExceptionData exception_data: exception data to handle + :return: nothing + """ + logging.debug("handling broadcast exception: %s", exception_data) + tlv_data = structutils.pack_values(coreapi.CoreExceptionTlv, [ + (ExceptionTlvs.NODE, exception_data.node), + (ExceptionTlvs.SESSION, exception_data.session), + (ExceptionTlvs.LEVEL, exception_data.level), + (ExceptionTlvs.SOURCE, exception_data.source), + (ExceptionTlvs.DATE, exception_data.date), + (ExceptionTlvs.TEXT, exception_data.text) + ]) + message = coreapi.CoreExceptionMessage.pack(0, tlv_data) + + try: + self.sendall(message) + except IOError: + logging.exception("error sending exception message") + + def handle_broadcast_node(self, node_data): + """ + Callback to handle an node broadcast out from a session. + + :param core.data.NodeData node_data: node data to handle + :return: nothing + """ + logging.debug("handling broadcast node: %s", node_data) + message = dataconversion.convert_node(node_data) + try: + self.sendall(message) + except IOError: + logging.exception("error sending node message") + + def handle_broadcast_link(self, link_data): + """ + Callback to handle an link broadcast out from a session. + + :param core.data.LinkData link_data: link data to handle + :return: nothing + """ + logging.debug("handling broadcast link: %s", link_data) + per = "" + if link_data.per is not None: + per = str(link_data.per) + + tlv_data = structutils.pack_values(coreapi.CoreLinkTlv, [ + (LinkTlvs.N1_NUMBER, link_data.node1_id), + (LinkTlvs.N2_NUMBER, link_data.node2_id), + (LinkTlvs.DELAY, link_data.delay), + (LinkTlvs.BANDWIDTH, link_data.bandwidth), + (LinkTlvs.PER, per), + (LinkTlvs.DUP, link_data.dup), + (LinkTlvs.JITTER, link_data.jitter), + (LinkTlvs.MER, link_data.mer), + (LinkTlvs.BURST, link_data.burst), + (LinkTlvs.SESSION, link_data.session), + (LinkTlvs.MBURST, link_data.mburst), + (LinkTlvs.TYPE, link_data.link_type), + (LinkTlvs.GUI_ATTRIBUTES, link_data.gui_attributes), + (LinkTlvs.UNIDIRECTIONAL, link_data.unidirectional), + (LinkTlvs.EMULATION_ID, link_data.emulation_id), + (LinkTlvs.NETWORK_ID, link_data.network_id), + (LinkTlvs.KEY, link_data.key), + (LinkTlvs.INTERFACE1_NUMBER, link_data.interface1_id), + (LinkTlvs.INTERFACE1_NAME, link_data.interface1_name), + (LinkTlvs.INTERFACE1_IP4, link_data.interface1_ip4), + (LinkTlvs.INTERFACE1_IP4_MASK, link_data.interface1_ip4_mask), + (LinkTlvs.INTERFACE1_MAC, link_data.interface1_mac), + (LinkTlvs.INTERFACE1_IP6, link_data.interface1_ip6), + (LinkTlvs.INTERFACE1_IP6_MASK, link_data.interface1_ip6_mask), + (LinkTlvs.INTERFACE2_NUMBER, link_data.interface2_id), + (LinkTlvs.INTERFACE2_NAME, link_data.interface2_name), + (LinkTlvs.INTERFACE2_IP4, link_data.interface2_ip4), + (LinkTlvs.INTERFACE2_IP4_MASK, link_data.interface2_ip4_mask), + (LinkTlvs.INTERFACE2_MAC, link_data.interface2_mac), + (LinkTlvs.INTERFACE2_IP6, link_data.interface2_ip6), + (LinkTlvs.INTERFACE2_IP6_MASK, link_data.interface2_ip6_mask), + (LinkTlvs.OPAQUE, link_data.opaque) + ]) + + message = coreapi.CoreLinkMessage.pack(link_data.message_type, tlv_data) + + try: + self.sendall(message) + except IOError: + logging.exception("error sending Event Message") + + def register(self): + """ + Return a Register Message + + :return: register message data + """ + logging.info("GUI has connected to session %d at %s", self.session.id, time.ctime()) + + tlv_data = "" + tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, "core-daemon") + tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.EMULATION_SERVER.value, "core-daemon") + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.broker.config_type, self.session.broker.name) + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.location.config_type, self.session.location.name) + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.mobility.config_type, self.session.mobility.name) + for model_class in self.session.mobility.models.itervalues(): + tlv_data += coreapi.CoreRegisterTlv.pack(model_class.config_type, model_class.name) + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.services.config_type, self.session.services.name) + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.emane.config_type, self.session.emane.name) + for model_class in self.session.emane.models.itervalues(): + tlv_data += coreapi.CoreRegisterTlv.pack(model_class.config_type, model_class.name) + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.options.config_type, self.session.options.name) + tlv_data += coreapi.CoreRegisterTlv.pack(self.session.metadata.config_type, self.session.metadata.name) + + return coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlv_data) + + def sendall(self, data): + """ + Send raw data to the other end of this TCP connection + using socket"s sendall(). + + :param data: data to send over request socket + :return: data sent + """ + return self.request.sendall(data) + + def receive_message(self): + """ + Receive data and return a CORE API message object. + + :return: received message + :rtype: coreapi.CoreMessage + """ + try: + header = self.request.recv(coreapi.CoreMessage.header_len) + except IOError as e: + raise IOError("error receiving header (%s)" % e) + + if len(header) != coreapi.CoreMessage.header_len: + if len(header) == 0: + raise EOFError("client disconnected") + else: + raise IOError("invalid message header size") + + message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header(header) + if message_len == 0: + logging.warn("received message with no data") + + data = "" + while len(data) < message_len: + data += self.request.recv(message_len - len(data)) + if len(data) > message_len: + error_message = "received message length does not match received data (%s != %s)" % ( + len(data), message_len) + logging.error(error_message) + raise IOError(error_message) + + try: + message_class = coreapi.CLASS_MAP[message_type] + message = message_class(message_flags, header, data) + except KeyError: + message = coreapi.CoreMessage(message_flags, header, data) + message.message_type = message_type + logging.exception("unimplemented core message type: %s", message.type_str()) + + return message + + def queue_message(self, message): + """ + Queue an API message for later processing. + + :param message: message to queue + :return: nothing + """ + logging.debug("queueing msg (queuedtimes = %s): type %s", message.queuedtimes, MessageTypes( + message.message_type)) + self.message_queue.put(message) + + def handler_thread(self): + """ + CORE API message handling loop that is spawned for each server + thread; get CORE API messages from the incoming message queue, + and call handlemsg() for processing. + + :return: nothing + """ + while not self.done: + try: + message = self.message_queue.get(timeout=1) + self.handle_message(message) + except Queue.Empty: + pass + + def handle_message(self, message): + """ + Handle an incoming message; dispatch based on message type, + optionally sending replies. + + :param message: message to handle + :return: nothing + """ + if self.session and self.session.broker.handle_message(message): + logging.debug("message not being handled locally") + return + + logging.debug("%s handling message:\n%s", threading.currentThread().getName(), message) + + if message.message_type not in self.message_handlers: + logging.error("no handler for message type: %s", message.type_str()) + return + + message_handler = self.message_handlers[message.message_type] + + try: + # TODO: this needs to be removed, make use of the broadcast message methods + replies = message_handler(message) + self.dispatch_replies(replies, message) + except: + logging.exception("%s: exception while handling message: %s", threading.currentThread().getName(), message) + + def dispatch_replies(self, replies, message): + """ + Dispatch replies by CORE to message msg previously received from the client. + + :param list replies: reply messages to dispatch + :param message: message for replies + :return: nothing + """ + logging.debug("dispatching replies") + for reply in replies: + message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(reply) + try: + reply_message = coreapi.CLASS_MAP[message_type]( + message_flags, + reply[:coreapi.CoreMessage.header_len], + reply[coreapi.CoreMessage.header_len:] + ) + except KeyError: + # multiple TLVs of same type cause KeyError exception + reply_message = "CoreMessage (type %d flags %d length %d)" % ( + message_type, message_flags, message_length) + + logging.debug("dispatch reply:\n%s", reply_message) + + try: + self.sendall(reply) + except IOError: + logging.exception("error dispatching reply") + + def handle(self): + """ + Handle a new connection request from a client. Dispatch to the + recvmsg() method for receiving data into CORE API messages, and + add them to an incoming message queue. + + :return: nothing + """ + # use port as session id + port = self.request.getpeername()[1] + + # TODO: add shutdown handler for session + self.session = self.coreemu.create_session(port, master=False) + # self.session.shutdown_handlers.append(self.session_shutdown) + logging.debug("created new session for client: %s", self.session.id) + + # TODO: hack to associate this handler with this sessions broker for broadcasting + # TODO: broker needs to be pulled out of session to the server/handler level + if self.master: + logging.debug("session set to master") + self.session.master = True + self.session.broker.session_clients.append(self) + + # add handlers for various data + self.add_session_handlers() + + # set initial session state + self.session.set_state(EventTypes.DEFINITION_STATE) + + while True: + try: + message = self.receive_message() + except EOFError: + logging.info("client disconnected") + break + except IOError: + logging.exception("error receiving message") + break + + message.queuedtimes = 0 + self.queue_message(message) + + # delay is required for brief connections, allow session joining + if message.message_type == MessageTypes.SESSION.value: + time.sleep(0.125) + + # broadcast node/link messages to other connected clients + if message.message_type not in [MessageTypes.NODE.value, MessageTypes.LINK.value]: + continue + + for client in self.session.broker.session_clients: + if client == self: + continue + + logging.debug("BROADCAST TO OTHER CLIENT: %s", client) + client.sendall(message.raw_message) + + def send_exception(self, level, source, text, node=None): + """ + Sends an exception for display within the GUI. + + :param core.enumerations.ExceptionLevel level: level for exception + :param str source: source where exception came from + :param str text: details about exception + :param int node: node id, if related to a specific node + :return: + """ + exception_data = ExceptionData( + session=str(self.session.id), + node=node, + date=time.ctime(), + level=level.value, + source=source, + text=text + ) + self.handle_broadcast_exception(exception_data) + + def add_session_handlers(self): + logging.debug("adding session broadcast handlers") + self.session.event_handlers.append(self.handle_broadcast_event) + self.session.exception_handlers.append(self.handle_broadcast_exception) + self.session.node_handlers.append(self.handle_broadcast_node) + self.session.link_handlers.append(self.handle_broadcast_link) + self.session.file_handlers.append(self.handle_broadcast_file) + self.session.config_handlers.append(self.handle_broadcast_config) + + def remove_session_handlers(self): + logging.debug("removing session broadcast handlers") + self.session.event_handlers.remove(self.handle_broadcast_event) + self.session.exception_handlers.remove(self.handle_broadcast_exception) + self.session.node_handlers.remove(self.handle_broadcast_node) + self.session.link_handlers.remove(self.handle_broadcast_link) + self.session.file_handlers.remove(self.handle_broadcast_file) + self.session.config_handlers.remove(self.handle_broadcast_config) + + def handle_node_message(self, message): + """ + Node Message handler + + :param coreapi.CoreNodeMessage message: node message + :return: replies to node message + """ + replies = [] + if message.flags & MessageFlags.ADD.value and message.flags & MessageFlags.DELETE.value: + logging.warn("ignoring invalid message: add and delete flag both set") + return () + + node_type = None + node_type_value = message.get_tlv(NodeTlvs.TYPE.value) + if node_type_value is not None: + node_type = NodeTypes(node_type_value) + + node_id = message.get_tlv(NodeTlvs.NUMBER.value) + + node_options = NodeOptions( + name=message.get_tlv(NodeTlvs.NAME.value), + model=message.get_tlv(NodeTlvs.MODEL.value) + ) + + node_options.set_position( + x=message.get_tlv(NodeTlvs.X_POSITION.value), + y=message.get_tlv(NodeTlvs.Y_POSITION.value) + ) + + lat = message.get_tlv(NodeTlvs.LATITUDE.value) + if lat is not None: + lat = float(lat) + lon = message.get_tlv(NodeTlvs.LONGITUDE.value) + if lon is not None: + lon = float(lon) + alt = message.get_tlv(NodeTlvs.ALTITUDE.value) + if alt is not None: + alt = float(alt) + node_options.set_location(lat=lat, lon=lon, alt=alt) + + node_options.icon = message.get_tlv(NodeTlvs.ICON.value) + node_options.canvas = message.get_tlv(NodeTlvs.CANVAS.value) + node_options.opaque = message.get_tlv(NodeTlvs.OPAQUE.value) + + services = message.get_tlv(NodeTlvs.SERVICES.value) + if services: + node_options.services = services.split("|") + + if message.flags & MessageFlags.ADD.value: + node = self.session.add_node(node_type, node_id, node_options) + if node: + if message.flags & MessageFlags.STRING.value: + self.node_status_request[node.objid] = True + + if self.session.state == EventTypes.RUNTIME_STATE.value: + self.send_node_emulation_id(node.objid) + elif message.flags & MessageFlags.DELETE.value: + with self._shutdown_lock: + result = self.session.delete_node(node_id) + + # if we deleted a node broadcast out its removal + if result and message.flags & MessageFlags.STRING.value: + tlvdata = "" + tlvdata += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id) + flags = MessageFlags.DELETE.value | MessageFlags.LOCAL.value + replies.append(coreapi.CoreNodeMessage.pack(flags, tlvdata)) + # node update + else: + self.session.update_node(node_id, node_options) + + return replies + + def handle_link_message(self, message): + """ + Link Message handler + + :param coreapi.CoreLinkMessage message: link message to handle + :return: link message replies + """ + node_one_id = message.get_tlv(LinkTlvs.N1_NUMBER.value) + node_two_id = message.get_tlv(LinkTlvs.N2_NUMBER.value) + + interface_one = InterfaceData( + _id=message.get_tlv(LinkTlvs.INTERFACE1_NUMBER.value), + name=message.get_tlv(LinkTlvs.INTERFACE1_NAME.value), + mac=message.get_tlv(LinkTlvs.INTERFACE1_MAC.value), + ip4=message.get_tlv(LinkTlvs.INTERFACE1_IP4.value), + ip4_mask=message.get_tlv(LinkTlvs.INTERFACE1_IP4_MASK.value), + ip6=message.get_tlv(LinkTlvs.INTERFACE1_IP6.value), + ip6_mask=message.get_tlv(LinkTlvs.INTERFACE1_IP6_MASK.value), + ) + interface_two = InterfaceData( + _id=message.get_tlv(LinkTlvs.INTERFACE2_NUMBER.value), + name=message.get_tlv(LinkTlvs.INTERFACE2_NAME.value), + mac=message.get_tlv(LinkTlvs.INTERFACE2_MAC.value), + ip4=message.get_tlv(LinkTlvs.INTERFACE2_IP4.value), + ip4_mask=message.get_tlv(LinkTlvs.INTERFACE2_IP4_MASK.value), + ip6=message.get_tlv(LinkTlvs.INTERFACE2_IP6.value), + ip6_mask=message.get_tlv(LinkTlvs.INTERFACE2_IP6_MASK.value), + ) + + link_type = None + link_type_value = message.get_tlv(LinkTlvs.TYPE.value) + if link_type_value is not None: + link_type = LinkTypes(link_type_value) + + link_options = LinkOptions(_type=link_type) + link_options.delay = message.get_tlv(LinkTlvs.DELAY.value) + link_options.bandwidth = message.get_tlv(LinkTlvs.BANDWIDTH.value) + link_options.session = message.get_tlv(LinkTlvs.SESSION.value) + link_options.per = message.get_tlv(LinkTlvs.PER.value) + link_options.dup = message.get_tlv(LinkTlvs.DUP.value) + link_options.jitter = message.get_tlv(LinkTlvs.JITTER.value) + link_options.mer = message.get_tlv(LinkTlvs.MER.value) + link_options.burst = message.get_tlv(LinkTlvs.BURST.value) + link_options.mburst = message.get_tlv(LinkTlvs.MBURST.value) + link_options.gui_attributes = message.get_tlv(LinkTlvs.GUI_ATTRIBUTES.value) + link_options.unidirectional = message.get_tlv(LinkTlvs.UNIDIRECTIONAL.value) + link_options.emulation_id = message.get_tlv(LinkTlvs.EMULATION_ID.value) + link_options.network_id = message.get_tlv(LinkTlvs.NETWORK_ID.value) + link_options.key = message.get_tlv(LinkTlvs.KEY.value) + link_options.opaque = message.get_tlv(LinkTlvs.OPAQUE.value) + + if message.flags & MessageFlags.ADD.value: + self.session.add_link(node_one_id, node_two_id, interface_one, interface_two, link_options) + elif message.flags & MessageFlags.DELETE.value: + self.session.delete_link(node_one_id, node_two_id, interface_one.id, interface_two.id) + else: + self.session.update_link(node_one_id, node_two_id, interface_one.id, interface_two.id, link_options) + + return () + + def handle_execute_message(self, message): + """ + Execute Message handler + + :param coreapi.CoreExecMessage message: execute message to handle + :return: reply messages + """ + node_num = message.get_tlv(ExecuteTlvs.NODE.value) + execute_num = message.get_tlv(ExecuteTlvs.NUMBER.value) + execute_time = message.get_tlv(ExecuteTlvs.TIME.value) + command = message.get_tlv(ExecuteTlvs.COMMAND.value) + + # local flag indicates command executed locally, not on a node + if node_num is None and not message.flags & MessageFlags.LOCAL.value: + raise ValueError("Execute Message is missing node number.") + + if execute_num is None: + raise ValueError("Execute Message is missing execution number.") + + if execute_time is not None: + self.session.add_event(execute_time, node=node_num, name=None, data=command) + return () + + try: + node = self.session.get_object(node_num) + + # build common TLV items for reply + tlv_data = "" + if node_num is not None: + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node_num) + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, execute_num) + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command) + + if message.flags & MessageFlags.TTY.value: + if node_num is None: + raise NotImplementedError + # echo back exec message with cmd for spawning interactive terminal + if command == "bash": + command = "/bin/bash" + res = node.termcmdstring(command) + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) + reply = coreapi.CoreExecMessage.pack(MessageFlags.TTY.value, tlv_data) + return reply, + else: + logging.info("execute message with cmd=%s", command) + # execute command and send a response + if message.flags & MessageFlags.STRING.value or message.flags & MessageFlags.TEXT.value: + # shlex.split() handles quotes within the string + if message.flags & MessageFlags.LOCAL.value: + status, res = utils.cmd_output(command) + else: + status, res = node.cmd_output(command) + logging.info("done exec cmd=%s with status=%d res=(%d bytes)", command, status, len(res)) + if message.flags & MessageFlags.TEXT.value: + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) + if message.flags & MessageFlags.STRING.value: + tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.STATUS.value, status) + reply = coreapi.CoreExecMessage.pack(0, tlv_data) + return reply, + # execute the command with no response + else: + if message.flags & MessageFlags.LOCAL.value: + utils.mute_detach(command) + else: + node.cmd(command, wait=False) + except KeyError: + logging.exception("error getting object: %s", node_num) + # XXX wait and queue this message to try again later + # XXX maybe this should be done differently + if not message.flags & MessageFlags.LOCAL.value: + time.sleep(0.125) + self.queue_message(message) + + return () + + def handle_register_message(self, message): + """ + Register Message Handler + + :param coreapi.CoreRegMessage message: register message to handle + :return: reply messages + """ + replies = [] + + # execute a Python script or XML file + execute_server = message.get_tlv(RegisterTlvs.EXECUTE_SERVER.value) + if execute_server: + try: + logging.info("executing: %s", execute_server) + if message.flags & MessageFlags.STRING.value: + old_session_ids = set(self.coreemu.sessions.keys()) + sys.argv = shlex.split(execute_server) + file_name = sys.argv[0] + + if os.path.splitext(file_name)[1].lower() == ".xml": + session = self.coreemu.create_session(master=False) + try: + session.open_xml(file_name, start=True) + except: + self.coreemu.delete_session(session.id) + raise + else: + thread = threading.Thread( + target=execfile, + args=(file_name, {"__file__": file_name, "coreemu": self.coreemu}) + ) + thread.daemon = True + thread.start() + # allow time for session creation + time.sleep(0.25) + + if message.flags & MessageFlags.STRING.value: + new_session_ids = set(self.coreemu.sessions.keys()) + new_sid = new_session_ids.difference(old_session_ids) + try: + sid = new_sid.pop() + logging.info("executed: %s as session %d", execute_server, sid) + except KeyError: + logging.info("executed %s with unknown session ID", execute_server) + return replies + + logging.debug("checking session %d for RUNTIME state", sid) + session = self.coreemu.sessions.get(sid) + retries = 10 + # wait for session to enter RUNTIME state, to prevent GUI from + # connecting while nodes are still being instantiated + while session.state != EventTypes.RUNTIME_STATE.value: + logging.debug("waiting for session %d to enter RUNTIME state", sid) + time.sleep(1) + retries -= 1 + if retries <= 0: + logging.debug("session %d did not enter RUNTIME state", sid) + return replies + + tlv_data = coreapi.CoreRegisterTlv.pack(RegisterTlvs.EXECUTE_SERVER.value, execute_server) + tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.SESSION.value, "%s" % sid) + message = coreapi.CoreRegMessage.pack(0, tlv_data) + replies.append(message) + except Exception as e: + logging.exception("error executing: %s", execute_server) + tlv_data = coreapi.CoreExceptionTlv.pack(ExceptionTlvs.LEVEL.value, 2) + tlv_data += coreapi.CoreExceptionTlv.pack(ExceptionTlvs.TEXT.value, str(e)) + message = coreapi.CoreExceptionMessage.pack(0, tlv_data) + replies.append(message) + + return replies + + gui = message.get_tlv(RegisterTlvs.GUI.value) + if gui is None: + logging.debug("ignoring Register message") + else: + # register capabilities with the GUI + self.master = True + + # find the session containing this client and set the session to master + for session in self.coreemu.sessions.itervalues(): + if self in session.broker.session_clients: + logging.debug("setting session to master: %s", session.id) + session.master = True + break + + replies.append(self.register()) + replies.append(self.session_message()) + + return replies + + def handle_config_message(self, message): + """ + Configuration Message handler + + :param coreapi.CoreConfMessage message: configuration message to handle + :return: reply messages + """ + # convert config message to standard config data object + config_data = ConfigData( + node=message.get_tlv(ConfigTlvs.NODE.value), + object=message.get_tlv(ConfigTlvs.OBJECT.value), + type=message.get_tlv(ConfigTlvs.TYPE.value), + data_types=message.get_tlv(ConfigTlvs.DATA_TYPES.value), + data_values=message.get_tlv(ConfigTlvs.VALUES.value), + captions=message.get_tlv(ConfigTlvs.CAPTIONS.value), + bitmap=message.get_tlv(ConfigTlvs.BITMAP.value), + possible_values=message.get_tlv(ConfigTlvs.POSSIBLE_VALUES.value), + groups=message.get_tlv(ConfigTlvs.GROUPS.value), + session=message.get_tlv(ConfigTlvs.SESSION.value), + interface_number=message.get_tlv(ConfigTlvs.INTERFACE_NUMBER.value), + network_id=message.get_tlv(ConfigTlvs.NETWORK_ID.value), + opaque=message.get_tlv(ConfigTlvs.OPAQUE.value) + ) + logging.debug("configuration message for %s node %s", config_data.object, config_data.node) + message_type = ConfigFlags(config_data.type) + + replies = [] + + # handle session configuration + if config_data.object == "all": + replies = self.handle_config_all(message_type, config_data) + elif config_data.object == self.session.options.name: + replies = self.handle_config_session(message_type, config_data) + elif config_data.object == self.session.location.name: + self.handle_config_location(message_type, config_data) + elif config_data.object == self.session.metadata.name: + replies = self.handle_config_metadata(message_type, config_data) + elif config_data.object == self.session.broker.name: + self.handle_config_broker(message_type, config_data) + elif config_data.object == self.session.services.name: + replies = self.handle_config_services(message_type, config_data) + elif config_data.object == self.session.mobility.name: + self.handle_config_mobility(message_type, config_data) + elif config_data.object in self.session.mobility.models: + replies = self.handle_config_mobility_models(message_type, config_data) + elif config_data.object == self.session.emane.name: + replies = self.handle_config_emane(message_type, config_data) + elif config_data.object in self.session.emane.models: + replies = self.handle_config_emane_models(message_type, config_data) + else: + raise Exception("no handler for configuration: %s", config_data.object) + + for reply in replies: + self.handle_broadcast_config(reply) + + return [] + + def handle_config_all(self, message_type, config_data): + replies = [] + + if message_type == ConfigFlags.RESET: + node_id = config_data.node + self.session.location.reset() + self.session.services.reset() + self.session.mobility.config_reset(node_id) + self.session.emane.config_reset(node_id) + else: + raise Exception("cant handle config all: %s" % message_type) + + return replies + + def handle_config_session(self, message_type, config_data): + replies = [] + if message_type == ConfigFlags.REQUEST: + type_flags = ConfigFlags.NONE.value + config = self.session.options.get_configs() + config_response = ConfigShim.config_data(0, None, type_flags, self.session.options, config) + replies.append(config_response) + elif message_type != ConfigFlags.RESET and config_data.data_values: + values = ConfigShim.str_to_dict(config_data.data_values) + for key, value in values.iteritems(): + self.session.options.set_config(key, value) + return replies + + def handle_config_location(self, message_type, config_data): + if message_type == ConfigFlags.RESET: + self.session.location.reset() + else: + if not config_data.data_values: + logging.warn("location data missing") + else: + values = [float(x) for x in config_data.data_values.split("|")] + + # Cartesian coordinate reference point + refx, refy = values[0], values[1] + refz = 0.0 + lat, lon, alt = values[2], values[3], values[4] + # xyz point + self.session.location.refxyz = (refx, refy, refz) + # geographic reference point + self.session.location.setrefgeo(lat, lon, alt) + self.session.location.refscale = values[5] + logging.info("location configured: %s = %s scale=%s", self.session.location.refxyz, + self.session.location.refgeo, self.session.location.refscale) + logging.info("location configured: UTM%s", self.session.location.refutm) + + def handle_config_metadata(self, message_type, config_data): + replies = [] + if message_type == ConfigFlags.REQUEST: + node_id = config_data.node + data_values = "|".join(["%s=%s" % item for item in self.session.metadata.get_configs().iteritems()]) + data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs()) + config_response = ConfigData( + message_type=0, + node=node_id, + object=self.session.metadata.name, + type=ConfigFlags.NONE.value, + data_types=data_types, + data_values=data_values + ) + replies.append(config_response) + elif message_type != ConfigFlags.RESET and config_data.data_values: + values = ConfigShim.str_to_dict(config_data.data_values) + for key, value in values.iteritems(): + self.session.metadata.set_config(key, value) + return replies + + def handle_config_broker(self, message_type, config_data): + if message_type not in [ConfigFlags.REQUEST, ConfigFlags.RESET]: + session_id = config_data.session + if not config_data.data_values: + logging.info("emulation server data missing") + else: + values = config_data.data_values.split("|") + + # string of "server:ip:port,server:ip:port,..." + server_strings = values[0] + server_list = server_strings.split(",") + + for server in server_list: + server_items = server.split(":") + name, host, port = server_items[:3] + + if host == "": + host = None + + if port == "": + port = None + else: + port = int(port) + + if session_id is not None: + # receive session ID and my IP from master + self.session.broker.session_id_master = int(session_id.split("|")[0]) + self.session.broker.myip = host + host = None + port = None + + # this connects to the server immediately; maybe we should wait + # or spin off a new "client" thread here + self.session.broker.addserver(name, host, port) + self.session.broker.setupserver(name) + + def handle_config_services(self, message_type, config_data): + replies = [] + node_id = config_data.node + opaque = config_data.opaque + + if message_type == ConfigFlags.REQUEST: + session_id = config_data.session + opaque = config_data.opaque + + logging.debug("configuration request: node(%s) session(%s) opaque(%s)", node_id, session_id, opaque) + + # send back a list of available services + if opaque is None: + type_flag = ConfigFlags.NONE.value + data_types = tuple(repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services))) + + # sort groups by name and map services to groups + groups = set() + group_map = {} + for service_name in ServiceManager.services.itervalues(): + group = service_name.group + groups.add(group) + group_map.setdefault(group, []).append(service_name) + groups = sorted(groups, key=lambda x: x.lower()) + + # define tlv values in proper order + captions = [] + possible_values = [] + values = [] + group_strings = [] + start_index = 1 + logging.info("sorted groups: %s", groups) + for group in groups: + services = sorted(group_map[group], key=lambda x: x.name.lower()) + logging.info("sorted services for group(%s): %s", group, services) + end_index = start_index + len(services) - 1 + group_strings.append("%s:%s-%s" % (group, start_index, end_index)) + start_index += len(services) + for service_name in services: + captions.append(service_name.name) + values.append("0") + if service_name.custom_needed: + possible_values.append("1") + else: + possible_values.append("") + + # format for tlv + captions = "|".join(captions) + possible_values = "|".join(possible_values) + values = "|".join(values) + groups = "|".join(group_strings) + # send back the properties for this service + else: + if not node_id: + return replies + + node = self.session.get_object(node_id) + if node is None: + logging.warn("request to configure service for unknown node %s", node_id) + return replies + + services = ServiceShim.servicesfromopaque(opaque) + if not services: + return replies + + servicesstring = opaque.split(":") + if len(servicesstring) == 3: + # a file request: e.g. "service:zebra:quagga.conf" + file_name = servicesstring[2] + service_name = services[0] + file_data = self.session.services.get_service_file(node, service_name, file_name) + self.session.broadcast_file(file_data) + # short circuit this request early to avoid returning response below + return replies + + # the first service in the list is the one being configured + service_name = services[0] + # send back: + # dirs, configs, startindex, startup, shutdown, metadata, config + type_flag = ConfigFlags.UPDATE.value + data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys))) + service = self.session.services.get_service(node_id, service_name, default_service=True) + values = ServiceShim.tovaluelist(node, service) + captions = None + possible_values = None + groups = None + + config_response = ConfigData( + message_type=0, + node=node_id, + object=self.session.services.name, + type=type_flag, + data_types=data_types, + data_values=values, + captions=captions, + possible_values=possible_values, + groups=groups, + session=session_id, + opaque=opaque + ) + replies.append(config_response) + elif message_type == ConfigFlags.RESET: + self.session.services.reset() + else: + data_types = config_data.data_types + values = config_data.data_values + + error_message = "services config message that I don't know how to handle" + if values is None: + logging.error(error_message) + else: + if opaque is None: + values = values.split("|") + # store default services for a node type in self.defaultservices[] + if data_types is None or data_types[0] != ConfigDataTypes.STRING.value: + logging.info(error_message) + return None + key = values.pop(0) + self.session.services.default_services[key] = values + logging.debug("default services for type %s set to %s", key, values) + elif node_id: + services = ServiceShim.servicesfromopaque(opaque) + if services: + service_name = services[0] + + # set custom service for node + self.session.services.set_service(node_id, service_name) + + # set custom values for custom service + service = self.session.services.get_service(node_id, service_name) + if not service: + raise ValueError("custom service(%s) for node(%s) does not exist", service_name, node_id) + + values = ConfigShim.str_to_dict(values) + for name, value in values.iteritems(): + ServiceShim.setvalue(service, name, value) + + return replies + + def handle_config_mobility(self, message_type, _): + if message_type == ConfigFlags.RESET: + self.session.mobility.reset() + + def handle_config_mobility_models(self, message_type, config_data): + replies = [] + node_id = config_data.node + object_name = config_data.object + interface_id = config_data.interface_number + values_str = config_data.data_values + + if interface_id is not None: + node_id = node_id * 1000 + interface_id + + logging.debug("received configure message for %s nodenum: %s", object_name, node_id) + if message_type == ConfigFlags.REQUEST: + logging.info("replying to configure request for model: %s", object_name) + typeflags = ConfigFlags.NONE.value + + model_class = self.session.mobility.models.get(object_name) + if not model_class: + logging.warn("model class does not exist: %s", object_name) + return [] + + config = self.session.mobility.get_model_config(node_id, object_name) + config_response = ConfigShim.config_data(0, node_id, typeflags, model_class, config) + replies.append(config_response) + elif message_type != ConfigFlags.RESET: + # store the configuration values for later use, when the node + if not object_name: + logging.warn("no configuration object for node: %s", node_id) + return [] + + parsed_config = {} + if values_str: + parsed_config = ConfigShim.str_to_dict(values_str) + + self.session.mobility.set_model_config(node_id, object_name, parsed_config) + + return replies + + def handle_config_emane(self, message_type, config_data): + replies = [] + node_id = config_data.node + object_name = config_data.object + interface_id = config_data.interface_number + values_str = config_data.data_values + + if interface_id is not None: + node_id = node_id * 1000 + interface_id + + logging.debug("received configure message for %s nodenum: %s", object_name, node_id) + if message_type == ConfigFlags.REQUEST: + logging.info("replying to configure request for %s model", object_name) + typeflags = ConfigFlags.NONE.value + config = self.session.emane.get_configs() + config_response = ConfigShim.config_data(0, node_id, typeflags, self.session.emane.emane_config, config) + replies.append(config_response) + elif message_type != ConfigFlags.RESET: + if not object_name: + logging.info("no configuration object for node %s", node_id) + return [] + + if values_str: + config = ConfigShim.str_to_dict(values_str) + self.session.emane.set_configs(config) + + # extra logic to start slave Emane object after nemid has been configured from the master + if message_type == ConfigFlags.UPDATE and self.session.master is False: + # instantiation was previously delayed by setup returning Emane.NOT_READY + self.session.instantiate() + + return replies + + def handle_config_emane_models(self, message_type, config_data): + replies = [] + node_id = config_data.node + object_name = config_data.object + interface_id = config_data.interface_number + values_str = config_data.data_values + + if interface_id is not None: + node_id = node_id * 1000 + interface_id + + logging.debug("received configure message for %s nodenum: %s", object_name, node_id) + if message_type == ConfigFlags.REQUEST: + logging.info("replying to configure request for model: %s", object_name) + typeflags = ConfigFlags.NONE.value + + model_class = self.session.emane.models.get(object_name) + if not model_class: + logging.warn("model class does not exist: %s", object_name) + return [] + + config = self.session.emane.get_model_config(node_id, object_name) + config_response = ConfigShim.config_data(0, node_id, typeflags, model_class, config) + replies.append(config_response) + elif message_type != ConfigFlags.RESET: + # store the configuration values for later use, when the node + if not object_name: + logging.warn("no configuration object for node: %s", node_id) + return [] + + parsed_config = {} + if values_str: + parsed_config = ConfigShim.str_to_dict(values_str) + + self.session.emane.set_model_config(node_id, object_name, parsed_config) + + return replies + + def handle_file_message(self, message): + """ + File Message handler + + :param coreapi.CoreFileMessage message: file message to handle + :return: reply messages + """ + if message.flags & MessageFlags.ADD.value: + node_num = message.get_tlv(FileTlvs.NODE.value) + file_name = message.get_tlv(FileTlvs.NAME.value) + file_type = message.get_tlv(FileTlvs.TYPE.value) + source_name = message.get_tlv(FileTlvs.SOURCE_NAME.value) + data = message.get_tlv(FileTlvs.DATA.value) + compressed_data = message.get_tlv(FileTlvs.COMPRESSED_DATA.value) + + if compressed_data: + logging.warn("Compressed file data not implemented for File message.") + return () + + if source_name and data: + logging.warn("ignoring invalid File message: source and data TLVs are both present") + return () + + # some File Messages store custom files in services, + # prior to node creation + if file_type is not None: + if file_type.startswith("service:"): + _, service_name = file_type.split(':')[:2] + self.session.services.set_service_file(node_num, service_name, file_name, data) + return () + elif file_type.startswith("hook:"): + _, state = file_type.split(':')[:2] + if not state.isdigit(): + logging.error("error setting hook having state '%s'", state) + return () + state = int(state) + self.session.add_hook(state, file_name, source_name, data) + return () + + # writing a file to the host + if node_num is None: + if source_name is not None: + shutil.copy2(source_name, file_name) + else: + with open(file_name, "w") as open_file: + open_file.write(data) + return () + + self.session.node_add_file(node_num, source_name, file_name, data) + else: + raise NotImplementedError + + return () + + def handle_interface_message(self, message): + """ + Interface Message handler. + + :param message: interface message to handle + :return: reply messages + """ + logging.info("ignoring Interface message") + return () + + def handle_event_message(self, message): + """ + Event Message handler + + :param coreapi.CoreEventMessage message: event message to handle + :return: reply messages + """ + event_data = EventData( + node=message.get_tlv(EventTlvs.NODE.value), + event_type=message.get_tlv(EventTlvs.TYPE.value), + name=message.get_tlv(EventTlvs.NAME.value), + data=message.get_tlv(EventTlvs.DATA.value), + time=message.get_tlv(EventTlvs.TIME.value), + session=message.get_tlv(EventTlvs.SESSION.value) + ) + + if event_data.event_type is None: + raise NotImplementedError("Event message missing event type") + event_type = EventTypes(event_data.event_type) + node_id = event_data.node + + logging.debug("handling event %s at %s", event_type.name, time.ctime()) + if event_type.value <= EventTypes.SHUTDOWN_STATE.value: + if node_id is not None: + try: + node = self.session.get_object(node_id) + except KeyError: + raise KeyError("Event message for unknown node %d" % node_id) + + # configure mobility models for WLAN added during runtime + if event_type == EventTypes.INSTANTIATION_STATE and nodeutils.is_node(node, NodeTypes.WIRELESS_LAN): + self.session.start_mobility(node_ids=(node.objid,)) + return () + + logging.warn("dropping unhandled Event message with node number") + return () + self.session.set_state(event_type) + + if event_type == EventTypes.DEFINITION_STATE: + # clear all session objects in order to receive new definitions + self.session.clear() + elif event_type == EventTypes.INSTANTIATION_STATE: + if len(self.handler_threads) > 1: + # TODO: sync handler threads here before continuing + time.sleep(2.0) # XXX + # done receiving node/link configuration, ready to instantiate + self.session.instantiate() + + # after booting nodes attempt to send emulation id for nodes waiting on status + for obj in self.session.objects.itervalues(): + self.send_node_emulation_id(obj.objid) + elif event_type == EventTypes.RUNTIME_STATE: + if self.session.master: + logging.warn("Unexpected event message: RUNTIME state received at session master") + else: + # master event queue is started in session.checkruntime() + self.session.start_events() + elif event_type == EventTypes.DATACOLLECT_STATE: + self.session.data_collect() + elif event_type == EventTypes.SHUTDOWN_STATE: + if self.session.master: + logging.warn("Unexpected event message: SHUTDOWN state received at session master") + elif event_type in {EventTypes.START, EventTypes.STOP, EventTypes.RESTART, EventTypes.PAUSE, + EventTypes.RECONFIGURE}: + handled = False + name = event_data.name + if name: + # TODO: register system for event message handlers, + # like confobjs + if name.startswith("service:"): + self.handle_service_event(event_data) + handled = True + elif name.startswith("mobility:"): + self.session.mobility_event(event_data) + handled = True + if not handled: + logging.warn("Unhandled event message: event type %s ", event_type.name) + elif event_type == EventTypes.FILE_OPEN: + filename = event_data.name + self.session.open_xml(filename, start=False) + self.send_objects() + return () + elif event_type == EventTypes.FILE_SAVE: + filename = event_data.name + self.session.save_xml(filename) + elif event_type == EventTypes.SCHEDULED: + etime = event_data.time + node = event_data.node + name = event_data.name + data = event_data.data + if etime is None: + logging.warn("Event message scheduled event missing start time") + return () + if message.flags & MessageFlags.ADD.value: + self.session.add_event(float(etime), node=node, name=name, data=data) + else: + raise NotImplementedError + else: + logging.warn("unhandled event message: event type %s", event_type) + + return () + + def handle_service_event(self, event_data): + """ + Handle an Event Message used to start, stop, restart, or validate + a service on a given node. + + :param EventData event_data: event data to handle + :return: nothing + """ + event_type = event_data.event_type + node_id = event_data.node + name = event_data.name + + try: + node = self.session.get_object(node_id) + except KeyError: + logging.warn("ignoring event for service '%s', unknown node '%s'", name, node_id) + return + + fail = "" + unknown = [] + services = ServiceShim.servicesfromopaque(name) + for service_name in services: + service = self.session.services.get_service(node_id, service_name, default_service=True) + if not service: + unknown.append(service_name) + continue + + if event_type == EventTypes.STOP.value or event_type == EventTypes.RESTART.value: + status = self.session.services.stop_service(node, service) + if status: + fail += "Stop %s," % service.name + if event_type == EventTypes.START.value or event_type == EventTypes.RESTART.value: + status = self.session.services.startup_service(node, service) + if status: + fail += "Start %s(%s)," % service.name + if event_type == EventTypes.PAUSE.value: + status = self.session.services.validate_service(node, service) + if status: + fail += "%s," % service.name + if event_type == EventTypes.RECONFIGURE.value: + self.session.services.service_reconfigure(node, service) + + fail_data = "" + if len(fail) > 0: + fail_data += "Fail:" + fail + unknown_data = "" + num = len(unknown) + if num > 0: + for u in unknown: + unknown_data += u + if num > 1: + unknown_data += ", " + num -= 1 + logging.warn("Event requested for unknown service(s): %s", unknown_data) + unknown_data = "Unknown:" + unknown_data + + event_data = EventData( + node=node_id, + event_type=event_type, + name=name, + data=fail_data + ";" + unknown_data, + time="%s" % time.time() + ) + + self.session.broadcast_event(event_data) + + def handle_session_message(self, message): + """ + Session Message handler + + :param coreapi.CoreSessionMessage message: session message to handle + :return: reply messages + """ + session_id_str = message.get_tlv(SessionTlvs.NUMBER.value) + session_ids = coreapi.str_to_list(session_id_str) + name_str = message.get_tlv(SessionTlvs.NAME.value) + names = coreapi.str_to_list(name_str) + file_str = message.get_tlv(SessionTlvs.FILE.value) + files = coreapi.str_to_list(file_str) + thumb = message.get_tlv(SessionTlvs.THUMB.value) + user = message.get_tlv(SessionTlvs.USER.value) + logging.debug("SESSION message flags=0x%x sessions=%s" % (message.flags, session_id_str)) + + if message.flags == 0: + for index, session_id in enumerate(session_ids): + session_id = int(session_id) + if session_id == 0: + session = self.session + else: + session = self.coreemu.sessions.get(session_id) + + if session is None: + logging.warn("session %s not found", session_id) + continue + + logging.info("request to modify to session: %s", session.id) + if names is not None: + session.name = names[index] + + if files is not None: + session.file_name = files[index] + + if thumb: + session.set_thumbnail(thumb) + + if user: + session.set_user(user) + elif message.flags & MessageFlags.STRING.value and not message.flags & MessageFlags.ADD.value: + # status request flag: send list of sessions + return self.session_message(), + else: + # handle ADD or DEL flags + for session_id in session_ids: + session_id = int(session_id) + session = self.coreemu.sessions.get(session_id) + + if session is None: + logging.info("session %s not found (flags=0x%x)", session_id, message.flags) + continue + + if message.flags & MessageFlags.ADD.value: + # connect to the first session that exists + logging.info("request to connect to session %s", session_id) + + # remove client from session broker and shutdown if needed + self.remove_session_handlers() + self.session.broker.session_clients.remove(self) + if not self.session.broker.session_clients and not self.session.is_active(): + self.coreemu.delete_session(self.session.id) + + # set session to join + self.session = session + + # add client to session broker and set master if needed + if self.master: + self.session.master = True + self.session.broker.session_clients.append(self) + + # add broadcast handlers + logging.info("adding session broadcast handlers") + self.add_session_handlers() + + if user: + self.session.set_user(user) + + if message.flags & MessageFlags.STRING.value: + self.send_objects() + elif message.flags & MessageFlags.DELETE.value: + # shut down the specified session(s) + logging.info("request to terminate session %s", session_id) + self.coreemu.delete_session(session_id) + else: + logging.warn("unhandled session flags for session %s", session_id) + + return () + + def send_node_emulation_id(self, node_id): + """ + Node emulation id to send. + + :param int node_id: node id to send + :return: nothing + """ + if node_id in self.node_status_request: + tlv_data = "" + tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id) + tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.EMULATION_ID.value, node_id) + reply = coreapi.CoreNodeMessage.pack(MessageFlags.ADD.value | MessageFlags.LOCAL.value, tlv_data) + + try: + self.sendall(reply) + except IOError: + logging.exception("error sending node emulation id message: %s", node_id) + + del self.node_status_request[node_id] + + def send_objects(self): + """ + Return API messages that describe the current session. + """ + # find all nodes and links + + nodes_data = [] + links_data = [] + with self.session._objects_lock: + for obj in self.session.objects.itervalues(): + node_data = obj.data(message_type=MessageFlags.ADD.value) + if node_data: + nodes_data.append(node_data) + + node_links = obj.all_link_data(flags=MessageFlags.ADD.value) + for link_data in node_links: + links_data.append(link_data) + + # send all nodes first, so that they will exist for any links + for node_data in nodes_data: + self.session.broadcast_node(node_data) + + for link_data in links_data: + self.session.broadcast_link(link_data) + + # send mobility model info + for node_id in self.session.mobility.nodes(): + for model_name, config in self.session.mobility.get_all_configs(node_id).iteritems(): + model_class = self.session.mobility.models[model_name] + logging.debug("mobility config: node(%s) class(%s) values(%s)", node_id, model_class, config) + config_data = ConfigShim.config_data(0, node_id, ConfigFlags.UPDATE.value, model_class, config) + self.session.broadcast_config(config_data) + + # send emane model info + for node_id in self.session.emane.nodes(): + for model_name, config in self.session.emane.get_all_configs(node_id).iteritems(): + model_class = self.session.emane.models[model_name] + logging.debug("emane config: node(%s) class(%s) values(%s)", node_id, model_class, config) + config_data = ConfigShim.config_data(0, node_id, ConfigFlags.UPDATE.value, model_class, config) + self.session.broadcast_config(config_data) + + # service customizations + service_configs = self.session.services.all_configs() + for node_id, service in service_configs: + opaque = "service:%s" % service.name + data_types = tuple(repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys))) + node = self.session.get_object(node_id) + values = ServiceShim.tovaluelist(node, service) + config_data = ConfigData( + message_type=0, + node=node_id, + object=self.session.services.name, + type=ConfigFlags.UPDATE.value, + data_types=data_types, + data_values=values, + session=str(self.session.id), + opaque=opaque + ) + self.session.broadcast_config(config_data) + + for file_name, config_data in self.session.services.all_files(service): + file_data = FileData( + message_type=MessageFlags.ADD.value, + node=node_id, + name=str(file_name), + type=opaque, + data=str(config_data) + ) + self.session.broadcast_file(file_data) + + # TODO: send location info + + # send hook scripts + for state in sorted(self.session._hooks.keys()): + for file_name, config_data in self.session._hooks[state]: + file_data = FileData( + message_type=MessageFlags.ADD.value, + name=str(file_name), + type="hook:%s" % state, + data=str(config_data) + ) + self.session.broadcast_file(file_data) + + # send session configuration + session_config = self.session.options.get_configs() + config_data = ConfigShim.config_data(0, None, ConfigFlags.UPDATE.value, self.session.options, session_config) + self.session.broadcast_config(config_data) + + # send session metadata + metadata_configs = self.session.metadata.get_configs() + if metadata_configs: + data_values = "|".join(["%s=%s" % item for item in metadata_configs.iteritems()]) + data_types = tuple(ConfigDataTypes.STRING.value for _ in self.session.metadata.get_configs()) + config_data = ConfigData( + message_type=0, + object=self.session.metadata.name, + type=ConfigFlags.NONE.value, + data_types=data_types, + data_values=data_values + ) + self.session.broadcast_config(config_data) + + logging.info("informed GUI about %d nodes and %d links", len(nodes_data), len(links_data)) diff --git a/daemon/core/coreobj.py b/daemon/core/coreobj.py new file mode 100644 index 00000000..daf5d9da --- /dev/null +++ b/daemon/core/coreobj.py @@ -0,0 +1,758 @@ +""" +Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode, +PyCoreNet, and PyCoreNetIf. +""" + +import os +import shutil +import socket +import threading +from socket import AF_INET +from socket import AF_INET6 + +from core.data import NodeData, LinkData +from core.enumerations import LinkTypes +from core.misc import ipaddress + + +class Position(object): + """ + Helper class for Cartesian coordinate position + """ + + def __init__(self, x=None, y=None, z=None): + """ + Creates a Position instance. + + :param x: x position + :param y: y position + :param z: z position + :return: + """ + self.x = x + self.y = y + self.z = z + + def set(self, x=None, y=None, z=None): + """ + Returns True if the position has actually changed. + + :param float x: x position + :param float y: y position + :param float z: z position + :return: True if position changed, False otherwise + :rtype: bool + """ + if self.x == x and self.y == y and self.z == z: + return False + self.x = x + self.y = y + self.z = z + return True + + def get(self): + """ + Retrieve x,y,z position. + + :return: x,y,z position tuple + :rtype: tuple + """ + return self.x, self.y, self.z + + +class PyCoreObj(object): + """ + Base class for CORE objects (nodes and networks) + """ + apitype = None + + # TODO: appears start has no usage, verify and remove + def __init__(self, session, objid=None, name=None, start=True): + """ + Creates a PyCoreObj instance. + + :param core.session.Session session: CORE session object + :param int objid: object id + :param str name: object name + :param bool start: start value + :return: + """ + + self.session = session + if objid is None: + objid = session.get_object_id() + self.objid = objid + if name is None: + name = "o%s" % self.objid + self.name = name + self.type = None + self.server = None + self.services = None + # ifindex is key, PyCoreNetIf instance is value + self._netif = {} + self.ifindex = 0 + self.canvas = None + self.icon = None + self.opaque = None + self.position = Position() + + def startup(self): + """ + Each object implements its own startup method. + + :return: nothing + """ + raise NotImplementedError + + def shutdown(self): + """ + Each object implements its own shutdown method. + + :return: nothing + """ + raise NotImplementedError + + def setposition(self, x=None, y=None, z=None): + """ + Set the (x,y,z) position of the object. + + :param float x: x position + :param float y: y position + :param float z: z position + :return: True if position changed, False otherwise + :rtype: bool + """ + return self.position.set(x=x, y=y, z=z) + + def getposition(self): + """ + Return an (x,y,z) tuple representing this object's position. + + :return: x,y,z position tuple + :rtype: tuple + """ + return self.position.get() + + def ifname(self, ifindex): + """ + Retrieve interface name for index. + + :param int ifindex: interface index + :return: interface name + :rtype: str + """ + return self._netif[ifindex].name + + def netifs(self, sort=False): + """ + Retrieve network interfaces, sorted if desired. + + :param bool sort: boolean used to determine if interfaces should be sorted + :return: network interfaces + :rtype: list + """ + if sort: + return map(lambda k: self._netif[k], sorted(self._netif.keys())) + else: + return self._netif.itervalues() + + def numnetif(self): + """ + Return the attached interface count. + + :return: number of network interfaces + :rtype: int + """ + return len(self._netif) + + def getifindex(self, netif): + """ + Retrieve index for an interface. + + :param PyCoreNetIf netif: interface to get index for + :return: interface index if found, -1 otherwise + :rtype: int + """ + + for ifindex in self._netif: + if self._netif[ifindex] is netif: + return ifindex + + return -1 + + def newifindex(self): + """ + Create a new interface index. + + :return: interface index + :rtype: int + """ + while self.ifindex in self._netif: + self.ifindex += 1 + ifindex = self.ifindex + self.ifindex += 1 + return ifindex + + def data(self, message_type, lat=None, lon=None, alt=None): + """ + Build a data object for this node. + + :param message_type: purpose for the data object we are creating + :param str lat: latitude + :param str lon: longitude + :param str alt: altitude + :return: node data object + :rtype: core.data.NodeData + """ + if self.apitype is None: + return None + + x, y, _ = self.getposition() + model = self.type + emulation_server = self.server + + services = self.services + if services is not None: + services = "|".join([service.name for service in services]) + + node_data = NodeData( + message_type=message_type, + id=self.objid, + node_type=self.apitype, + name=self.name, + emulation_id=self.objid, + canvas=self.canvas, + icon=self.icon, + opaque=self.opaque, + x_position=x, + y_position=y, + latitude=lat, + longitude=lon, + altitude=alt, + model=model, + emulation_server=emulation_server, + services=services + ) + + return node_data + + def all_link_data(self, flags): + """ + Build CORE Link data for this object. There is no default + method for PyCoreObjs as PyCoreNodes do not implement this but + PyCoreNets do. + + :param flags: message flags + :return: list of link data + :rtype: core.data.LinkData + """ + return [] + + +class PyCoreNode(PyCoreObj): + """ + Base class for CORE nodes. + """ + + def __init__(self, session, objid=None, name=None, start=True): + """ + Create a PyCoreNode instance. + + :param core.session.Session session: CORE session object + :param int objid: object id + :param str name: object name + :param bool start: boolean for starting + """ + super(PyCoreNode, self).__init__(session, objid, name, start=start) + self.services = [] + self.nodedir = None + self.tmpnodedir = False + + def addservice(self, service): + """ + Add a services to the service list. + + :param core.service.CoreService service: service to add + :return: nothing + """ + if service is not None: + self.services.append(service) + + def makenodedir(self): + """ + Create the node directory. + + :return: nothing + """ + if self.nodedir is None: + self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf") + os.makedirs(self.nodedir) + self.tmpnodedir = True + else: + self.tmpnodedir = False + + def rmnodedir(self): + """ + Remove the node directory, unless preserve directory has been set. + + :return: nothing + """ + preserve = self.session.options.get_config("preservedir") == "1" + if preserve: + return + + if self.tmpnodedir: + shutil.rmtree(self.nodedir, ignore_errors=True) + + def addnetif(self, netif, ifindex): + """ + Add network interface to node and set the network interface index if successful. + + :param PyCoreNetIf netif: network interface to add + :param int ifindex: interface index + :return: nothing + """ + if ifindex in self._netif: + raise ValueError("ifindex %s already exists" % ifindex) + self._netif[ifindex] = netif + # TODO: this should have probably been set ahead, seems bad to me, check for failure and fix + netif.netindex = ifindex + + def delnetif(self, ifindex): + """ + Delete a network interface + + :param int ifindex: interface index to delete + :return: nothing + """ + if ifindex not in self._netif: + raise ValueError("ifindex %s does not exist" % ifindex) + netif = self._netif.pop(ifindex) + netif.shutdown() + del netif + + # TODO: net parameter is not used, remove + def netif(self, ifindex, net=None): + """ + Retrieve network interface. + + :param int ifindex: index of interface to retrieve + :param PyCoreNetIf net: network node + :return: network interface, or None if not found + :rtype: PyCoreNetIf + """ + if ifindex in self._netif: + return self._netif[ifindex] + else: + return None + + def attachnet(self, ifindex, net): + """ + Attach a network. + + :param int ifindex: interface of index to attach + :param PyCoreNetIf net: network to attach + :return: + """ + if ifindex not in self._netif: + raise ValueError("ifindex %s does not exist" % ifindex) + self._netif[ifindex].attachnet(net) + + def detachnet(self, ifindex): + """ + Detach network interface. + + :param int ifindex: interface index to detach + :return: nothing + """ + if ifindex not in self._netif: + raise ValueError("ifindex %s does not exist" % ifindex) + self._netif[ifindex].detachnet() + + def setposition(self, x=None, y=None, z=None): + """ + Set position. + + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + changed = super(PyCoreNode, self).setposition(x, y, z) + if changed: + for netif in self.netifs(sort=True): + netif.setposition(x, y, z) + + def commonnets(self, obj, want_ctrl=False): + """ + Given another node or net object, return common networks between + this node and that object. A list of tuples is returned, with each tuple + consisting of (network, interface1, interface2). + + :param obj: object to get common network with + :param want_ctrl: flag set to determine if control network are wanted + :return: tuples of common networks + :rtype: list + """ + common = [] + for netif1 in self.netifs(): + if not want_ctrl and hasattr(netif1, "control"): + continue + for netif2 in obj.netifs(): + if netif1.net == netif2.net: + common.append((netif1.net, netif1, netif2)) + + return common + + def check_cmd(self, args): + """ + Runs shell command on node. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when a non-zero exit status occurs + """ + raise NotImplementedError + + def cmd(self, args, wait=True): + """ + Runs shell command on node, with option to not wait for a result. + + :param list[str]|str args: command to run + :param bool wait: wait for command to exit, defaults to True + :return: exit status for command + :rtype: int + """ + raise NotImplementedError + + def cmd_output(self, args): + """ + Runs shell command on node and get exit status and output. + + :param list[str]|str args: command to run + :return: exit status and combined stdout and stderr + :rtype: tuple[int, str] + """ + raise NotImplementedError + + def termcmdstring(self, sh): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + raise NotImplementedError + + +class PyCoreNet(PyCoreObj): + """ + Base class for networks + """ + linktype = LinkTypes.WIRED.value + + def __init__(self, session, objid, name, start=True): + """ + Create a PyCoreNet instance. + + :param core.session.Session session: CORE session object + :param int objid: object id + :param str name: object name + :param bool start: should object start + """ + super(PyCoreNet, self).__init__(session, objid, name, start=start) + self._linked = {} + self._linked_lock = threading.Lock() + + def startup(self): + """ + Each object implements its own startup method. + + :return: nothing + """ + raise NotImplementedError + + def shutdown(self): + """ + Each object implements its own shutdown method. + + :return: nothing + """ + raise NotImplementedError + + def attach(self, netif): + """ + Attach network interface. + + :param PyCoreNetIf netif: network interface to attach + :return: nothing + """ + i = self.newifindex() + self._netif[i] = netif + netif.netifi = i + with self._linked_lock: + self._linked[netif] = {} + + def detach(self, netif): + """ + Detach network interface. + + :param PyCoreNetIf netif: network interface to detach + :return: nothing + """ + del self._netif[netif.netifi] + netif.netifi = None + with self._linked_lock: + del self._linked[netif] + + def all_link_data(self, flags): + """ + Build link data objects for this network. Each link object describes a link + between this network and a node. + """ + all_links = [] + + # build a link message from this network node to each node having a + # connected interface + for netif in self.netifs(sort=True): + if not hasattr(netif, "node"): + continue + otherobj = netif.node + uni = False + if otherobj is None: + # two layer-2 switches/hubs linked together via linknet() + if not hasattr(netif, "othernet"): + continue + otherobj = netif.othernet + if otherobj.objid == self.objid: + continue + netif.swapparams('_params_up') + upstream_params = netif.getparams() + netif.swapparams('_params_up') + if netif.getparams() != upstream_params: + uni = True + + unidirectional = 0 + if uni: + unidirectional = 1 + + interface2_ip4 = None + interface2_ip4_mask = None + interface2_ip6 = None + interface2_ip6_mask = None + for address in netif.addrlist: + ip, _sep, mask = address.partition("/") + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip6_mask = mask + + link_data = LinkData( + message_type=flags, + node1_id=self.objid, + node2_id=otherobj.objid, + link_type=self.linktype, + unidirectional=unidirectional, + interface2_id=otherobj.getifindex(netif), + interface2_mac=netif.hwaddr, + interface2_ip4=interface2_ip4, + interface2_ip4_mask=interface2_ip4_mask, + interface2_ip6=interface2_ip6, + interface2_ip6_mask=interface2_ip6_mask, + delay=netif.getparam("delay"), + bandwidth=netif.getparam("bw"), + dup=netif.getparam("duplicate"), + jitter=netif.getparam("jitter"), + per=netif.getparam("loss") + ) + + all_links.append(link_data) + + if not uni: + continue + + netif.swapparams('_params_up') + link_data = LinkData( + message_type=0, + node1_id=otherobj.objid, + node2_id=self.objid, + unidirectional=1, + delay=netif.getparam("delay"), + bandwidth=netif.getparam("bw"), + dup=netif.getparam("duplicate"), + jitter=netif.getparam("jitter"), + per=netif.getparam("loss") + ) + netif.swapparams('_params_up') + + all_links.append(link_data) + + return all_links + + +class PyCoreNetIf(object): + """ + Base class for network interfaces. + """ + + def __init__(self, node, name, mtu): + """ + Creates a PyCoreNetIf instance. + + :param core.coreobj.PyCoreNode node: node for interface + :param str name: interface name + :param mtu: mtu value + """ + + self.node = node + self.name = name + if not isinstance(mtu, (int, long)): + raise ValueError + self.mtu = mtu + self.net = None + self._params = {} + self.addrlist = [] + self.hwaddr = None + # placeholder position hook + self.poshook = lambda a, b, c, d: None + # used with EMANE + self.transport_type = None + # interface index on the network + self.netindex = None + # index used to find flow data + self.flow_id = None + + def startup(self): + """ + Startup method for the interface. + + :return: nothing + """ + pass + + def shutdown(self): + """ + Shutdown method for the interface. + + :return: nothing + """ + pass + + def attachnet(self, net): + """ + Attach network. + + :param core.coreobj.PyCoreNet net: network to attach + :return: nothing + """ + if self.net: + self.detachnet() + self.net = None + + net.attach(self) + self.net = net + + def detachnet(self): + """ + Detach from a network. + + :return: nothing + """ + if self.net is not None: + self.net.detach(self) + + def addaddr(self, addr): + """ + Add address. + + :param str addr: address to add + :return: nothing + """ + + self.addrlist.append(addr) + + def deladdr(self, addr): + """ + Delete address. + + :param str addr: address to delete + :return: nothing + """ + self.addrlist.remove(addr) + + def sethwaddr(self, addr): + """ + Set hardware address. + + :param core.misc.ipaddress.MacAddress addr: hardware address to set to. + :return: nothing + """ + self.hwaddr = addr + + def getparam(self, key): + """ + Retrieve a parameter from the, or None if the parameter does not exist. + + :param key: parameter to get value for + :return: parameter value + """ + return self._params.get(key) + + def getparams(self): + """ + Return (key, value) pairs for parameters. + """ + parameters = [] + for k in sorted(self._params.keys()): + parameters.append((k, self._params[k])) + return parameters + + def setparam(self, key, value): + """ + Set a parameter value, returns True if the parameter has changed. + + :param key: parameter name to set + :param value: parameter value + :return: True if parameter changed, False otherwise + """ + # treat None and 0 as unchanged values + current_value = self._params.get(key) + if current_value == value or current_value <= 0 and value <= 0: + return False + + self._params[key] = value + return True + + def swapparams(self, name): + """ + Swap out parameters dict for name. If name does not exist, + intialize it. This is for supporting separate upstream/downstream + parameters when two layer-2 nodes are linked together. + + :param str name: name of parameter to swap + :return: nothing + """ + tmp = self._params + if not hasattr(self, name): + setattr(self, name, {}) + self._params = getattr(self, name) + setattr(self, name, tmp) + + def setposition(self, x, y, z): + """ + Dispatch position hook handler. + + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + self.poshook(self, x, y, z) diff --git a/daemon/core/coreserver.py b/daemon/core/coreserver.py new file mode 100644 index 00000000..2c052b05 --- /dev/null +++ b/daemon/core/coreserver.py @@ -0,0 +1,30 @@ +""" +Defines core server for handling TCP connections. +""" + +import SocketServer + +from core.emulator.coreemu import CoreEmu + + +class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): + """ + TCP server class, manages sessions and spawns request handlers for + incoming connections. + """ + daemon_threads = True + allow_reuse_address = True + + def __init__(self, server_address, handler_class, config=None): + """ + Server class initialization takes configuration data and calls + the SocketServer constructor + + :param tuple[str, int] server_address: server host and port to use + :param class handler_class: request handler + :param dict config: configuration setting + :return: + """ + self.coreemu = CoreEmu(config) + self.config = config + SocketServer.TCPServer.__init__(self, server_address, handler_class) diff --git a/daemon/core/data.py b/daemon/core/data.py new file mode 100644 index 00000000..715eb5d5 --- /dev/null +++ b/daemon/core/data.py @@ -0,0 +1,120 @@ +""" +CORE data objects. +""" + +import collections + +ConfigData = collections.namedtuple("ConfigData", [ + "message_type", + "node", + "object", + "type", + "data_types", + "data_values", + "captions", + "bitmap", + "possible_values", + "groups", + "session", + "interface_number", + "network_id", + "opaque" +]) +ConfigData.__new__.__defaults__ = (None,) * len(ConfigData._fields) + +EventData = collections.namedtuple("EventData", [ + "node", + "event_type", + "name", + "data", + "time", + "session" +]) +EventData.__new__.__defaults__ = (None,) * len(EventData._fields) + +ExceptionData = collections.namedtuple("ExceptionData", [ + "node", + "session", + "level", + "source", + "date", + "text", + "opaque" +]) +ExceptionData.__new__.__defaults__ = (None,) * len(ExceptionData._fields) + +FileData = collections.namedtuple("FileData", [ + "message_type", + "node", + "name", + "mode", + "number", + "type", + "source", + "session", + "data", + "compressed_data" +]) +FileData.__new__.__defaults__ = (None,) * len(FileData._fields) + +NodeData = collections.namedtuple("NodeData", [ + "message_type", + "id", + "node_type", + "name", + "ip_address", + "mac_address", + "ip6_address", + "model", + "emulation_id", + "emulation_server", + "session", + "x_position", + "y_position", + "canvas", + "network_id", + "services", + "latitude", + "longitude", + "altitude", + "icon", + "opaque" +]) +NodeData.__new__.__defaults__ = (None,) * len(NodeData._fields) + +LinkData = collections.namedtuple("LinkData", [ + "message_type", + "node1_id", + "node2_id", + "delay", + "bandwidth", + "per", + "dup", + "jitter", + "mer", + "burst", + "session", + "mburst", + "link_type", + "gui_attributes", + "unidirectional", + "emulation_id", + "network_id", + "key", + "interface1_id", + "interface1_name", + "interface1_ip4", + "interface1_ip4_mask", + "interface1_mac", + "interface1_ip6", + "interface1_ip6_mask", + "interface2_id", + "interface2_name", + "interface2_ip4", + "interface2_ip4_mask", + "interface2_mac", + "interface2_ip6", + "interface2_ip6_mask", + "opaque" +]) +LinkData.__new__.__defaults__ = (None,) * len(LinkData._fields) diff --git a/daemon/core/emane/bypass.py b/daemon/core/emane/bypass.py new file mode 100644 index 00000000..91a01b37 --- /dev/null +++ b/daemon/core/emane/bypass.py @@ -0,0 +1,42 @@ +""" +EMANE Bypass model for CORE +""" +from core.conf import ConfigGroup +from core.conf import Configuration +from core.emane import emanemodel +from core.enumerations import ConfigDataTypes + + +class EmaneBypassModel(emanemodel.EmaneModel): + name = "emane_bypass" + + # values to ignore, when writing xml files + config_ignore = {"none"} + + # mac definitions + mac_library = "bypassmaclayer" + mac_config = [ + Configuration( + _id="none", + _type=ConfigDataTypes.BOOL, + default="0", + options=["True", "False"], + label="There are no parameters for the bypass model." + ) + ] + + # phy definitions + phy_library = "bypassphylayer" + phy_config = [] + + @classmethod + def load(cls, emane_prefix): + # ignore default logic + pass + + # override config groups + @classmethod + def config_groups(cls): + return [ + ConfigGroup("Bypass Parameters", 1, 1), + ] diff --git a/daemon/core/emane/commeffect.py b/daemon/core/emane/commeffect.py new file mode 100644 index 00000000..62676c16 --- /dev/null +++ b/daemon/core/emane/commeffect.py @@ -0,0 +1,142 @@ +""" +commeffect.py: EMANE CommEffect model for CORE +""" + +import logging +import os + +from lxml import etree + +from core.conf import ConfigGroup +from core.emane import emanemanifest +from core.emane import emanemodel +from core.xml import emanexml + +try: + from emane.events.commeffectevent import CommEffectEvent +except ImportError: + try: + from emanesh.events.commeffectevent import CommEffectEvent + except ImportError: + logging.debug("compatible emane python bindings not installed") + + +def convert_none(x): + """ + Helper to use 0 for None values. + """ + if isinstance(x, basestring): + x = float(x) + if x is None: + return 0 + else: + return int(x) + + +class EmaneCommEffectModel(emanemodel.EmaneModel): + name = "emane_commeffect" + + shim_library = "commeffectshim" + shim_xml = "commeffectshim.xml" + shim_defaults = {} + config_shim = [] + + # comm effect does not need the default phy and external configurations + phy_config = [] + external_config = [] + + @classmethod + def load(cls, emane_prefix): + shim_xml_path = os.path.join(emane_prefix, "share/emane/manifest", cls.shim_xml) + cls.config_shim = emanemanifest.parse(shim_xml_path, cls.shim_defaults) + + @classmethod + def configurations(cls): + return cls.config_shim + + @classmethod + def config_groups(cls): + return [ + ConfigGroup("CommEffect SHIM Parameters", 1, len(cls.configurations())) + ] + + def build_xml_files(self, config, interface=None): + """ + Build the necessary nem and commeffect XMLs in the given path. + If an individual NEM has a nonstandard config, we need to build + that file also. Otherwise the WLAN-wide + nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used. + + :param dict config: emane model configuration for the node and interface + :param interface: interface for the emane node + :return: nothing + """ + # retrieve xml names + nem_name = emanexml.nem_file_name(self, interface) + shim_name = emanexml.shim_file_name(self, interface) + + # create and write nem document + nem_element = etree.Element("nem", name="%s NEM" % self.name, type="unstructured") + transport_type = "virtual" + if interface and interface.transport_type == "raw": + transport_type = "raw" + transport_file = emanexml.transport_file_name(self.object_id, transport_type) + etree.SubElement(nem_element, "transport", definition=transport_file) + + # set shim configuration + etree.SubElement(nem_element, "shim", definition=shim_name) + + nem_file = os.path.join(self.session.session_dir, nem_name) + emanexml.create_file(nem_element, "nem", nem_file) + + # create and write shim document + shim_element = etree.Element("shim", name="%s SHIM" % self.name, library=self.shim_library) + + # append all shim options (except filterfile) to shimdoc + for configuration in self.config_shim: + name = configuration.id + if name == "filterfile": + continue + value = config[name] + emanexml.add_param(shim_element, name, value) + + # empty filterfile is not allowed + ff = config["filterfile"] + if ff.strip() != "": + emanexml.add_param(shim_element, "filterfile", ff) + + shim_file = os.path.join(self.session.session_dir, shim_name) + emanexml.create_file(shim_element, "shim", shim_file) + + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None): + """ + Generate CommEffect events when a Link Message is received having + link parameters. + """ + service = self.session.emane.service + if service is None: + logging.warn("%s: EMANE event service unavailable", self.name) + return + + if netif is None or netif2 is None: + logging.warn("%s: missing NEM information", self.name) + return + + # TODO: batch these into multiple events per transmission + # TODO: may want to split out seconds portion of delay and jitter + event = CommEffectEvent() + emane_node = self.session.get_object(self.object_id) + nemid = emane_node.getnemid(netif) + nemid2 = emane_node.getnemid(netif2) + mbw = bw + logging.info("sending comm effect event") + event.append( + nemid, + latency=convert_none(delay), + jitter=convert_none(jitter), + loss=convert_none(loss), + duplicate=convert_none(duplicate), + unicast=long(convert_none(bw)), + broadcast=long(convert_none(mbw)) + ) + service.publish(nemid2, event) diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py index c02570c9..a7c9b121 100644 --- a/daemon/core/emane/emanemanager.py +++ b/daemon/core/emane/emanemanager.py @@ -1,652 +1,803 @@ """ -Implements configuration and control of an EMANE emulation. +emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation. """ import logging import os import threading -from enum import Enum -from typing import TYPE_CHECKING, Optional, Union -from core import utils +from core import CoreCommandError +from core import constants +from core.api import coreapi +from core.api import dataconversion +from core.conf import ConfigGroup +from core.conf import ConfigShim +from core.conf import Configuration +from core.conf import ModelManager +from core.emane import emanemanifest +from core.emane.bypass import EmaneBypassModel +from core.emane.commeffect import EmaneCommEffectModel from core.emane.emanemodel import EmaneModel -from core.emane.linkmonitor import EmaneLinkMonitor -from core.emane.modelmanager import EmaneModelManager -from core.emane.nodes import EmaneNet, TunTap -from core.emulator.data import LinkData -from core.emulator.enumerations import LinkTypes, MessageFlags, RegisterTlvs -from core.errors import CoreCommandError, CoreError -from core.nodes.base import CoreNode, NodeBase -from core.nodes.interface import CoreInterface +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emane.rfpipe import EmaneRfPipeModel +from core.emane.tdma import EmaneTdmaModel +from core.enumerations import ConfigDataTypes +from core.enumerations import ConfigFlags +from core.enumerations import ConfigTlvs +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import nodeutils +from core.misc import utils from core.xml import emanexml -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - try: - from emane.events import EventService, PathlossEvent, CommEffectEvent, LocationEvent + from emane.events import EventService + from emane.events import LocationEvent from emane.events.eventserviceexception import EventServiceException except ImportError: try: - from emanesh.events import ( - EventService, - PathlossEvent, - CommEffectEvent, - LocationEvent, - ) + from emanesh.events import EventService + from emanesh.events import LocationEvent from emanesh.events.eventserviceexception import EventServiceException except ImportError: - CommEffectEvent = None - EventService = None - LocationEvent = None - PathlossEvent = None - EventServiceException = None - logger.debug("compatible emane python bindings not installed") + logging.debug("compatible emane python bindings not installed") -DEFAULT_LOG_LEVEL: int = 3 +EMANE_MODELS = [ + EmaneRfPipeModel, + EmaneIeee80211abgModel, + EmaneCommEffectModel, + EmaneBypassModel, + EmaneTdmaModel +] +DEFAULT_EMANE_PREFIX = "/usr" -class EmaneState(Enum): - SUCCESS = 0 - NOT_NEEDED = 1 - NOT_READY = 2 - - -class EmaneEventService: - def __init__( - self, manager: "EmaneManager", device: str, group: str, port: int - ) -> None: - self.manager: "EmaneManager" = manager - self.device: str = device - self.group: str = group - self.port: int = port - self.running: bool = False - self.thread: Optional[threading.Thread] = None - logger.info("starting emane event service %s %s:%s", device, group, port) - self.events: EventService = EventService( - eventchannel=(group, port, device), otachannel=None - ) - - def start(self) -> None: - self.running = True - self.thread = threading.Thread(target=self.run, daemon=True) - self.thread.start() - - def run(self) -> None: - """ - Run and monitor events. - """ - logger.info("subscribing to emane location events") - while self.running: - _uuid, _seq, events = self.events.nextEvent() - # this occurs with 0.9.1 event service - if not self.running: - break - for event in events: - nem, eid, data = event - if eid == LocationEvent.IDENTIFIER: - self.manager.handlelocationevent(nem, eid, data) - logger.info("unsubscribing from emane location events") - - def stop(self) -> None: - """ - Stop service and monitoring events. - """ - self.events.breakloop() - self.running = False - if self.thread: - self.thread.join() - self.thread = None - for fd in self.events._readFd, self.events._writeFd: - if fd >= 0: - os.close(fd) - for f in self.events._socket, self.events._socketOTA: - if f: - f.close() - - -class EmaneManager: +class EmaneManager(ModelManager): """ EMANE controller object. Lives in a Session instance and is used for - building EMANE config files for all EMANE networks in this emulation, and for - controlling the EMANE daemons. + building EMANE config files from all of the EmaneNode objects in this + emulation, and for controlling the EMANE daemons. """ + name = "emane" + config_type = RegisterTlvs.EMULATION_SERVER.value + SUCCESS, NOT_NEEDED, NOT_READY = (0, 1, 2) + EVENTCFGVAR = "LIBEMANEEVENTSERVICECONFIG" + DEFAULT_LOG_LEVEL = 3 - name: str = "emane" - config_type: RegisterTlvs = RegisterTlvs.EMULATION_SERVER - - def __init__(self, session: "Session") -> None: + def __init__(self, session): """ Creates a Emane instance. - :param session: session this manager is tied to + :param core.session.Session session: session this manager is tied to :return: nothing """ - super().__init__() - self.session: "Session" = session - self.nems_to_ifaces: dict[int, CoreInterface] = {} - self.ifaces_to_nems: dict[CoreInterface, int] = {} - self._emane_nets: dict[int, EmaneNet] = {} - self._emane_node_lock: threading.Lock = threading.Lock() + super(EmaneManager, self).__init__() + self.session = session + self._emane_nodes = {} + self._emane_node_lock = threading.Lock() + self._ifccounts = {} + self._ifccountslock = threading.Lock() # port numbers are allocated from these counters - self.platformport: int = self.session.options.get_int( - "emane_platform_port", 8100 - ) - self.transformport: int = self.session.options.get_int( - "emane_transform_port", 8200 - ) - self.doeventloop: bool = False - self.eventmonthread: Optional[threading.Thread] = None + self.platformport = self.session.options.get_config_int("emane_platform_port", 8100) + self.transformport = self.session.options.get_config_int("emane_transform_port", 8200) + self.doeventloop = False + self.eventmonthread = None # model for global EMANE configuration options - self.node_configs: dict[int, dict[str, dict[str, str]]] = {} - self.node_models: dict[int, str] = {} + self.emane_config = EmaneGlobalModel(session) + self.set_configs(self.emane_config.default_values()) - # link monitor - self.link_monitor: EmaneLinkMonitor = EmaneLinkMonitor(self) - # emane event monitoring - self.services: dict[str, EmaneEventService] = {} - self.nem_service: dict[int, EmaneEventService] = {} + session.broker.handlers.add(self.handledistributed) + self.service = None + self.event_device = None + self.emane_check() - def next_nem_id(self, iface: CoreInterface) -> int: - nem_id = self.session.options.get_int("nem_id_start") - while nem_id in self.nems_to_ifaces: - nem_id += 1 - self.nems_to_ifaces[nem_id] = iface - self.ifaces_to_nems[iface] = nem_id - self.write_nem(iface, nem_id) - return nem_id - - def get_config( - self, key: int, model: str, default: bool = True - ) -> Optional[dict[str, str]]: + def getifcconfig(self, node_id, interface, model_name): """ - Get the current or default configuration for an emane model. + Retrieve interface configuration or node configuration if not provided. - :param key: key to get configuration for - :param model: emane model to get configuration for - :param default: True to return default configuration when none exists, False - otherwise - :return: emane model configuration - :raises CoreError: when model does not exist + :param int node_id: node id + :param interface: node interface + :param str model_name: model to get configuration for + :return: node/interface model configuration + :rtype: dict """ - model_class = self.get_model(model) - model_configs = self.node_configs.get(key) - config = None - if model_configs: - config = model_configs.get(model) - if config is None and default: - config = model_class.default_values() - return config - - def set_config(self, key: int, model: str, config: dict[str, str] = None) -> None: - """ - Sets and update the provided configuration against the default model - or currently set emane model configuration. - - :param key: configuration key to set - :param model: model to set configuration for - :param config: configuration to update current configuration with - :return: nothing - :raises CoreError: when model does not exist - """ - self.get_model(model) - model_config = self.get_config(key, model) - config = config if config else {} - model_config.update(config) - model_configs = self.node_configs.setdefault(key, {}) - model_configs[model] = model_config - - def get_model(self, model_name: str) -> type[EmaneModel]: - """ - Convenience method for getting globally loaded emane models. - - :param model_name: name of model to retrieve - :return: emane model class - :raises CoreError: when model does not exist - """ - return EmaneModelManager.get(model_name) - - def get_iface_config( - self, emane_net: EmaneNet, iface: CoreInterface - ) -> dict[str, str]: - """ - Retrieve configuration for a given interface, first checking for interface - specific config, node specific config, network specific config, and finally - falling back to the default configuration settings. - - :param emane_net: emane network the interface is connected to - :param iface: interface running emane - :return: net, node, or interface model configuration - """ - model_name = emane_net.wireless_model.name - # try to retrieve interface specific configuration - key = utils.iface_config_id(iface.node.id, iface.id) - config = self.get_config(key, model_name, default=False) - # attempt to retrieve node specific config, when iface config is not present - if not config: - config = self.get_config(iface.node.id, model_name, default=False) - # attempt to get emane net specific config, when node config is not present - if not config: - # with EMANE 0.9.2+, we need an extra NEM XML from - # model.buildnemxmlfiles(), so defaults are returned here - config = self.get_config(emane_net.id, model_name, default=False) - # return default config values, when a config is not present - if not config: - config = emane_net.wireless_model.default_values() - return config - - def config_reset(self, node_id: int = None) -> None: - if node_id is None: - self.node_configs.clear() - self.node_models.clear() + # use the network-wide config values or interface(NEM)-specific values? + if interface is None: + return self.get_configs(node_id=node_id, config_type=model_name) else: - self.node_configs.get(node_id, {}).clear() - self.node_models.pop(node_id, None) + # don"t use default values when interface config is the same as net + # note here that using ifc.node.objid as key allows for only one type + # of each model per node; + # TODO: use both node and interface as key - def add_node(self, emane_net: EmaneNet) -> None: + # Adamson change: first check for iface config keyed by "node:ifc.name" + # (so that nodes w/ multiple interfaces of same conftype can have + # different configs for each separate interface) + key = 1000 * interface.node.objid + if interface.netindex is not None: + key += interface.netindex + + # try retrieve interface specific configuration, avoid getting defaults + config = self.get_configs(node_id=key, config_type=model_name) + + # otherwise retrieve the interfaces node configuration, avoid using defaults + if not config: + config = self.get_configs(node_id=interface.node.objid, config_type=model_name) + + # get non interface config, when none found + if not config: + # with EMANE 0.9.2+, we need an extra NEM XML from + # model.buildnemxmlfiles(), so defaults are returned here + config = self.get_configs(node_id=node_id, config_type=model_name) + + return config + + def config_reset(self, node_id=None): + super(EmaneManager, self).config_reset(node_id) + self.set_configs(self.emane_config.default_values()) + + def emane_check(self): """ - Add EMANE network object to this manager. + Check if emane is installed and load models. - :param emane_net: emane node to add + :return: nothing + """ + try: + # check for emane + emane_version = utils.check_cmd(["emane", "--version"]) + logging.info("using EMANE: %s", emane_version) + + # load default emane models + self.load_models(EMANE_MODELS) + + # load custom models + custom_models_path = self.session.options.get_config("emane_models_dir") + if custom_models_path: + emane_models = utils.load_classes(custom_models_path, EmaneModel) + self.load_models(emane_models) + except CoreCommandError: + logging.info("emane is not installed") + + def deleteeventservice(self): + if self.service: + for fd in self.service._readFd, self.service._writeFd: + if fd >= 0: + os.close(fd) + for f in self.service._socket, self.service._socketOTA: + if f: + f.close() + self.service = None + self.event_device = None + + def initeventservice(self, filename=None, shutdown=False): + """ + Re-initialize the EMANE Event service. + The multicast group and/or port may be configured. + """ + self.deleteeventservice() + + if shutdown: + return + + # Get the control network to be used for events + group, port = self.get_config("eventservicegroup").split(":") + self.event_device = self.get_config("eventservicedevice") + eventnetidx = self.session.get_control_net_index(self.event_device) + if eventnetidx < 0: + logging.error("invalid emane event service device provided: %s", self.event_device) + return False + + # make sure the event control network is in place + eventnet = self.session.add_remove_control_net(net_index=eventnetidx, remove=False, conf_required=False) + if eventnet is not None: + # direct EMANE events towards control net bridge + self.event_device = eventnet.brname + eventchannel = (group, int(port), self.event_device) + + # disabled otachannel for event service + # only needed for e.g. antennaprofile events xmit by models + logging.info("using %s for event service traffic", self.event_device) + try: + self.service = EventService(eventchannel=eventchannel, otachannel=None) + except EventServiceException: + logging.exception("error instantiating emane EventService") + + return True + + def load_models(self, emane_models): + """ + Load EMANE models and make them available. + """ + for emane_model in emane_models: + logging.info("loading emane model: %s", emane_model.__name__) + emane_prefix = self.session.options.get_config("emane_prefix", default=DEFAULT_EMANE_PREFIX) + emane_model.load(emane_prefix) + self.models[emane_model.name] = emane_model + + def add_node(self, emane_node): + """ + Add a new EmaneNode object to this Emane controller object + + :param core.emane.nodes.EmaneNode emane_node: emane node to add :return: nothing """ with self._emane_node_lock: - if emane_net.id in self._emane_nets: - raise CoreError( - f"duplicate emane network({emane_net.id}): {emane_net.name}" - ) - self._emane_nets[emane_net.id] = emane_net + if emane_node.objid in self._emane_nodes: + raise KeyError("non-unique EMANE object id %s for %s" % (emane_node.objid, emane_node)) + self._emane_nodes[emane_node.objid] = emane_node - def getnodes(self) -> set[CoreNode]: + def getnodes(self): """ - Return a set of CoreNodes that are linked to an EMANE network, + Return a set of CoreNodes that are linked to an EmaneNode, e.g. containers having one or more radio interfaces. """ + # assumes self._objslock already held nodes = set() - for emane_net in self._emane_nets.values(): - for iface in emane_net.get_ifaces(): - if isinstance(iface.node, CoreNode): - nodes.add(iface.node) + for emane_node in self._emane_nodes.values(): + for netif in emane_node.netifs(): + nodes.add(netif.node) return nodes - def setup(self) -> EmaneState: + def setup(self): """ - Setup duties for EMANE manager. + Populate self._objs with EmaneNodes; perform distributed setup; + associate models with EmaneNodes from self.config. Returns + Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session + instantiation. + """ + logging.debug("emane setup") - :return: SUCCESS, NOT_NEEDED, NOT_READY in order to delay session - instantiation - """ - logger.debug("emane setup") - with self.session.nodes_lock: - for node_id in self.session.nodes: - node = self.session.nodes[node_id] - if isinstance(node, EmaneNet): - logger.debug( - "adding emane node: id(%s) name(%s)", node.id, node.name - ) + # TODO: drive this from the session object + with self.session._objects_lock: + for node in self.session.objects.itervalues(): + if nodeutils.is_node(node, NodeTypes.EMANE): + logging.debug("adding emane node: id(%s) name(%s)", node.objid, node.name) self.add_node(node) - if not self._emane_nets: - logger.debug("no emane nodes in session") - return EmaneState.NOT_NEEDED - # check if bindings were installed - if EventService is None: - raise CoreError("EMANE python bindings are not installed") + + if not self._emane_nodes: + logging.debug("no emane nodes in session") + return EmaneManager.NOT_NEEDED + + # control network bridge required for EMANE 0.9.2 + # - needs to be configured before checkdistributed() for distributed + # - needs to exist when eventservice binds to it (initeventservice) + if self.session.master: + otadev = self.get_config("otamanagerdevice") + netidx = self.session.get_control_net_index(otadev) + logging.debug("emane ota manager device: index(%s) otadev(%s)", netidx, otadev) + if netidx < 0: + logging.error("EMANE cannot start, check core config. invalid OTA device provided: %s", otadev) + return EmaneManager.NOT_READY + + ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False) + self.distributedctrlnet(ctrlnet) + eventdev = self.get_config("eventservicedevice") + logging.debug("emane event service device: eventdev(%s)", eventdev) + if eventdev != otadev: + netidx = self.session.get_control_net_index(eventdev) + logging.debug("emane event service device index: %s", netidx) + if netidx < 0: + logging.error("EMANE cannot start, check core config. invalid event service device: %s", eventdev) + return EmaneManager.NOT_READY + + ctrlnet = self.session.add_remove_control_net(net_index=netidx, remove=False, conf_required=False) + self.distributedctrlnet(ctrlnet) + + if self.checkdistributed(): + # we are slave, but haven't received a platformid yet + platform_id_start = "platform_id_start" + default_values = self.emane_config.default_values() + value = self.get_config(platform_id_start) + if value == default_values[platform_id_start]: + return EmaneManager.NOT_READY + self.check_node_models() - return EmaneState.SUCCESS + return EmaneManager.SUCCESS - def startup(self) -> EmaneState: + def startup(self): """ - After all the EMANE networks have been added, build XML files - and start the daemons. - - :return: SUCCESS, NOT_NEEDED, NOT_READY in order to delay session - instantiation + After all the EmaneNode objects have been added, build XML files + and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or + NOT_READY) which is used to delay session instantiation. """ self.reset() - status = self.setup() - if status != EmaneState.SUCCESS: - return status - self.startup_nodes() - if self.links_enabled(): - self.link_monitor.start() - return EmaneState.SUCCESS + r = self.setup() - def startup_nodes(self) -> None: + # NOT_NEEDED or NOT_READY + if r != EmaneManager.SUCCESS: + return r + + nems = [] with self._emane_node_lock: - logger.info("emane building xmls...") - for emane_net, iface in self.get_ifaces(): - self.start_iface(emane_net, iface) + self.buildxml() + self.initeventservice() + self.starteventmonitor() - def start_iface(self, emane_net: EmaneNet, iface: TunTap) -> None: - nem_id = self.next_nem_id(iface) - nem_port = self.get_nem_port(iface) - logger.info( - "starting emane for node(%s) iface(%s) nem(%s)", - iface.node.name, - iface.name, - nem_id, - ) - config = self.get_iface_config(emane_net, iface) - self.setup_control_channels(nem_id, iface, config) - emanexml.build_platform_xml(nem_id, nem_port, emane_net, iface, config) - self.start_daemon(iface) - self.install_iface(iface, config) + if self.numnems() > 0: + self.startdaemons() + self.installnetifs() - def get_ifaces(self) -> list[tuple[EmaneNet, TunTap]]: - ifaces = [] - for emane_net in self._emane_nets.values(): - if not emane_net.wireless_model: - logger.error("emane net(%s) has no model", emane_net.name) - continue - for iface in emane_net.get_ifaces(): - if not iface.node: - logger.error( - "emane net(%s) connected interface(%s) missing node", - emane_net.name, - iface.name, - ) - continue - if isinstance(iface, TunTap): - ifaces.append((emane_net, iface)) - return sorted(ifaces, key=lambda x: (x[1].node.id, x[1].id)) + for emane_node in self._emane_nodes.itervalues(): + for netif in emane_node.netifs(): + nems.append((netif.node.name, netif.name, emane_node.getnemid(netif))) - def setup_control_channels( - self, nem_id: int, iface: CoreInterface, config: dict[str, str] - ) -> None: - node = iface.node - # setup ota device - otagroup, _otaport = config["otamanagergroup"].split(":") - otadev = config["otamanagerdevice"] - ota_index = self.session.get_control_net_index(otadev) - self.session.add_remove_control_net(ota_index, conf_required=False) - if isinstance(node, CoreNode): - self.session.add_remove_control_iface(node, ota_index, conf_required=False) - # setup event device - eventgroup, eventport = config["eventservicegroup"].split(":") - eventdev = config["eventservicedevice"] - event_index = self.session.get_control_net_index(eventdev) - event_net = self.session.add_remove_control_net( - event_index, conf_required=False - ) - if isinstance(node, CoreNode): - self.session.add_remove_control_iface( - node, event_index, conf_required=False - ) - # initialize emane event services - service = self.services.get(event_net.brname) - if not service: + if nems: + emane_nems_filename = os.path.join(self.session.session_dir, "emane_nems") try: - service = EmaneEventService( - self, event_net.brname, eventgroup, int(eventport) - ) - if self.doeventmonitor(): - service.start() - self.services[event_net.brname] = service - self.nem_service[nem_id] = service - except EventServiceException: - raise CoreError( - "failed to start emane event services " - f"{event_net.brname} {eventgroup}:{eventport}" - ) - else: - self.nem_service[nem_id] = service - # setup multicast routes as needed - logger.info( - "node(%s) interface(%s) ota(%s:%s) event(%s:%s)", - node.name, - iface.name, - otagroup, - otadev, - eventgroup, - eventdev, - ) - node.node_net_client.create_route(otagroup, otadev) - if eventgroup != otagroup: - node.node_net_client.create_route(eventgroup, eventdev) + with open(emane_nems_filename, "w") as f: + for nodename, ifname, nemid in nems: + f.write("%s %s %s\n" % (nodename, ifname, nemid)) + except IOError: + logging.exception("Error writing EMANE NEMs file: %s") - def get_iface(self, nem_id: int) -> Optional[CoreInterface]: - return self.nems_to_ifaces.get(nem_id) + return EmaneManager.SUCCESS - def get_nem_id(self, iface: CoreInterface) -> Optional[int]: - return self.ifaces_to_nems.get(iface) - - def get_nem_port(self, iface: CoreInterface) -> int: - nem_id = self.get_nem_id(iface) - return int(f"47{nem_id:03}") - - def get_nem_position( - self, iface: CoreInterface - ) -> Optional[tuple[int, float, float, int]]: - """ - Retrieves nem position for a given interface. - - :param iface: interface to get nem emane position for - :return: nem position tuple, None otherwise - """ - nem_id = self.get_nem_id(iface) - if nem_id is None: - logger.info("nem for %s is unknown", iface.localname) - return - node = iface.node - x, y, z = node.getposition() - lat, lon, alt = self.session.location.getgeo(x, y, z) - if node.position.alt is not None: - alt = node.position.alt - node.position.set_geo(lon, lat, alt) - # altitude must be an integer or warning is printed - alt = int(round(alt)) - return nem_id, lon, lat, alt - - def set_nem_position(self, iface: CoreInterface) -> None: - """ - Publish a NEM location change event using the EMANE event service. - - :param iface: interface to set nem position for - """ - position = self.get_nem_position(iface) - if position: - nemid, lon, lat, alt = position - event = LocationEvent() - event.append(nemid, latitude=lat, longitude=lon, altitude=alt) - self.publish_event(nemid, event, send_all=True) - - def set_nem_positions(self, moved_ifaces: list[CoreInterface]) -> None: - """ - Several NEMs have moved, from e.g. a WaypointMobilityModel - calculation. Generate an EMANE Location Event having several - entries for each interface that has moved. - """ - if not moved_ifaces: - return - services = {} - for iface in moved_ifaces: - position = self.get_nem_position(iface) - if not position: - continue - nem_id, lon, lat, alt = position - service = self.nem_service.get(nem_id) - if not service: - continue - event = services.setdefault(service, LocationEvent()) - event.append(nem_id, latitude=lat, longitude=lon, altitude=alt) - for service, event in services.items(): - service.events.publish(0, event) - - def write_nem(self, iface: CoreInterface, nem_id: int) -> None: - path = self.session.directory / "emane_nems" - try: - with path.open("a") as f: - f.write(f"{iface.node.name} {iface.name} {nem_id}\n") - except OSError: - logger.exception("error writing to emane nem file") - - def links_enabled(self) -> bool: - return self.session.options.get_int("link_enabled") == 1 - - def poststartup(self) -> None: + def poststartup(self): """ Retransmit location events now that all NEMs are active. """ - events_enabled = self.genlocationevents() - with self._emane_node_lock: - for node_id in sorted(self._emane_nets): - emane_net = self._emane_nets[node_id] - logger.debug( - "post startup for emane node: %s - %s", emane_net.id, emane_net.name - ) - for iface in emane_net.get_ifaces(): - emane_net.wireless_model.post_startup(iface) - if events_enabled: - iface.setposition() + if not self.genlocationevents(): + return - def reset(self) -> None: + with self._emane_node_lock: + for key in sorted(self._emane_nodes.keys()): + emane_node = self._emane_nodes[key] + logging.debug("post startup for emane node: %s - %s", emane_node.objid, emane_node.name) + emane_node.model.post_startup() + for netif in emane_node.netifs(): + x, y, z = netif.node.position.get() + emane_node.setnemposition(netif, x, y, z) + + def reset(self): """ - Remove all EMANE networks from the dictionary, reset port numbers and - nem id counters + remove all EmaneNode objects from the dictionary, + reset port numbers and nem id counters """ with self._emane_node_lock: - self._emane_nets.clear() - self.nems_to_ifaces.clear() - self.ifaces_to_nems.clear() - self.nems_to_ifaces.clear() - self.services.clear() + self._emane_nodes.clear() - def shutdown(self) -> None: + # don't clear self._ifccounts here; NEM counts are needed for buildxml + self.platformport = self.session.options.get_config_int("emane_platform_port", 8100) + self.transformport = self.session.options.get_config_int("emane_transform_port", 8200) + + def shutdown(self): """ stop all EMANE daemons """ - with self._emane_node_lock: - if not self._emane_nets: - return - logger.info("stopping EMANE daemons") - if self.links_enabled(): - self.link_monitor.stop() - # shutdown interfaces - for _, iface in self.get_ifaces(): - node = iface.node - if not node.up: - continue - kill_cmd = f'pkill -f "emane.+{iface.name}"' - if isinstance(node, CoreNode): - iface.shutdown() - node.cmd(kill_cmd, wait=False) - else: - node.host_cmd(kill_cmd, wait=False) - iface.poshook = None - # stop emane event services - while self.services: - _, service = self.services.popitem() - service.stop() - self.nem_service.clear() + with self._ifccountslock: + self._ifccounts.clear() - def check_node_models(self) -> None: + with self._emane_node_lock: + if not self._emane_nodes: + return + logging.info("stopping EMANE daemons.") + self.deinstallnetifs() + self.stopdaemons() + self.stopeventmonitor() + + def handledistributed(self, message): """ - Associate EMANE model classes with EMANE network nodes. + Broker handler for processing CORE API messages as they are + received. This is used to snoop the Link add messages to get NEM + counts of NEMs that exist on other servers. """ - for node_id in self._emane_nets: - emane_net = self._emane_nets[node_id] - logger.debug("checking emane model for node: %s", node_id) - # skip nodes that already have a model set - if emane_net.wireless_model: - logger.debug( - "node(%s) already has model(%s)", - emane_net.id, - emane_net.wireless_model.name, - ) + if message.message_type == MessageTypes.LINK.value and message.flags & MessageFlags.ADD.value: + nn = message.node_numbers() + # first node is always link layer node in Link add message + if nn[0] in self.session.broker.network_nodes: + serverlist = self.session.broker.getserversbynode(nn[1]) + for server in serverlist: + with self._ifccountslock: + if server not in self._ifccounts: + self._ifccounts[server] = 1 + else: + self._ifccounts[server] += 1 + + def checkdistributed(self): + """ + Check for EMANE nodes that exist on multiple emulation servers and + coordinate the NEM id and port number space. + If we are the master EMANE node, return False so initialization will + proceed as normal; otherwise slaves return True here and + initialization is deferred. + """ + # check with the session if we are the "master" Emane object? + master = False + + with self._emane_node_lock: + if self._emane_nodes: + master = self.session.master + logging.info("emane check distributed as master: %s.", master) + + # we are not the master Emane object, wait for nem id and ports + if not master: + return True + + nemcount = 0 + with self._emane_node_lock: + for key in self._emane_nodes: + emane_node = self._emane_nodes[key] + nemcount += emane_node.numnetif() + + nemid = int(self.get_config("nem_id_start")) + nemid += nemcount + + platformid = int(self.get_config("platform_id_start")) + + # build an ordered list of servers so platform ID is deterministic + servers = [] + for key in sorted(self._emane_nodes): + for server in self.session.broker.getserversbynode(key): + if server not in servers: + servers.append(server) + + servers.sort(key=lambda x: x.name) + for server in servers: + if server.name == "localhost": continue - # set model configured for node, due to legacy messaging configuration - # before nodes exist + + if server.sock is None: + continue + + platformid += 1 + typeflags = ConfigFlags.UPDATE.value + self.set_config("platform_id_start", str(platformid)) + self.set_config("nem_id_start", str(nemid)) + config_data = ConfigShim.config_data(0, None, typeflags, self.emane_config, self.get_configs()) + message = dataconversion.convert_config(config_data) + server.sock.send(message) + # increment nemid for next server by number of interfaces + with self._ifccountslock: + if server in self._ifccounts: + nemid += self._ifccounts[server] + + return False + + def buildxml(self): + """ + Build XML files required to run EMANE on each node. + NEMs run inside containers using the control network for passing + events and data. + """ + # assume self._objslock is already held here + logging.info("emane building xml...") + # on master, control network bridge added earlier in startup() + ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False) + self.buildplatformxml(ctrlnet) + self.buildnemxml() + self.buildeventservicexml() + + # TODO: remove need for tlv messaging + def distributedctrlnet(self, ctrlnet): + """ + Distributed EMANE requires multiple control network prefixes to + be configured. This generates configuration for slave control nets + using the default list of prefixes. + """ + session = self.session + # slave server + if not session.master: + return + + servers = session.broker.getservernames() + # not distributed + if len(servers) < 2: + return + + prefix = session.options.get_config("controlnet") + prefixes = prefix.split() + # normal Config messaging will distribute controlnets + if len(prefixes) >= len(servers): + return + + # this generates a config message having controlnet prefix assignments + logging.info("Setting up default controlnet prefixes for distributed (%d configured)" % len(prefixes)) + prefixes = ctrlnet.DEFAULT_PREFIX_LIST[0] + vals = 'controlnet="%s"' % prefixes + tlvdata = "" + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session") + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0) + tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, vals) + rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata) + msghdr = rawmsg[:coreapi.CoreMessage.header_len] + msg = coreapi.CoreConfMessage(flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len:]) + self.session.broker.handle_message(msg) + + def check_node_models(self): + """ + Associate EmaneModel classes with EmaneNode nodes. The model + configurations are stored in self.configs. + """ + for node_id in self._emane_nodes: + emane_node = self._emane_nodes[node_id] + logging.debug("checking emane model for node: %s", node_id) + + # skip nodes that already have a model set + if emane_node.model: + logging.debug("node(%s) already has model(%s)", emane_node.objid, emane_node.model.name) + continue + + # set model configured for node, due to legacy messaging configuration before nodes exist model_name = self.node_models.get(node_id) if not model_name: - logger.error("emane node(%s) has no node model", node_id) + logging.error("emane node(%s) has no node model", node_id) raise ValueError("emane node has no model set") - config = self.get_config(node_id, model_name) - logger.debug("setting emane model(%s) config(%s)", model_name, config) - model_class = self.get_model(model_name) - emane_net.setmodel(model_class, config) + config = self.get_model_config(node_id=node_id, model_name=model_name) + logging.debug("setting emane model(%s) config(%s)", model_name, config) + model_class = self.models[model_name] + emane_node.setmodel(model_class, config) - def get_nem_link( - self, nem1: int, nem2: int, flags: MessageFlags = MessageFlags.NONE - ) -> Optional[LinkData]: - iface1 = self.get_iface(nem1) - if not iface1: - logger.error("invalid nem: %s", nem1) - return None - node1 = iface1.node - iface2 = self.get_iface(nem2) - if not iface2: - logger.error("invalid nem: %s", nem2) - return None - node2 = iface2.node - if iface1.net != iface2.net: - return None - emane_net = iface1.net - color = self.session.get_link_color(emane_net.id) - return LinkData( - message_type=flags, - type=LinkTypes.WIRELESS, - node1_id=node1.id, - node2_id=node2.id, - network_id=emane_net.id, - color=color, - ) - - def start_daemon(self, iface: CoreInterface) -> None: + def nemlookup(self, nemid): """ - Start emane daemon for a given nem/interface. - - :param iface: interface to start emane daemon for - :return: nothing + Look for the given numerical NEM ID and return the first matching + EmaneNode and NEM interface. """ - node = iface.node - loglevel = str(DEFAULT_LOG_LEVEL) - cfgloglevel = self.session.options.get_int("emane_log_level", 2) - realtime = self.session.options.get_bool("emane_realtime", True) + emane_node = None + netif = None + + for node_id in self._emane_nodes: + emane_node = self._emane_nodes[node_id] + netif = emane_node.getnemnetif(nemid) + if netif is not None: + break + else: + emane_node = None + + return emane_node, netif + + def numnems(self): + """ + Return the number of NEMs emulated locally. + """ + count = 0 + for emane_node in self._emane_nodes.itervalues(): + count += len(emane_node.netifs()) + return count + + def buildplatformxml(self, ctrlnet): + """ + Build a platform.xml file now that all nodes are configured. + """ + nemid = int(self.get_config("nem_id_start")) + platform_xmls = {} + + # assume self._objslock is already held here + for key in sorted(self._emane_nodes.keys()): + emane_node = self._emane_nodes[key] + nemid = emanexml.build_node_platform_xml(self, ctrlnet, emane_node, nemid, platform_xmls) + + def buildnemxml(self): + """ + Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which + are defined on a per-EmaneNode basis. + """ + for key in sorted(self._emane_nodes.keys()): + emane_node = self._emane_nodes[key] + emanexml.build_xml_files(self, emane_node) + + def buildtransportxml(self): + """ + Calls emanegentransportxml using a platform.xml file to build the transportdaemon*.xml. + """ + utils.check_cmd(["emanegentransportxml", "platform.xml"], cwd=self.session.session_dir) + + def buildeventservicexml(self): + """ + Build the libemaneeventservice.xml file if event service options + were changed in the global config. + """ + need_xml = False + default_values = self.emane_config.default_values() + for name in ["eventservicegroup", "eventservicedevice"]: + a = default_values[name] + b = self.get_config(name) + if a != b: + need_xml = True + + if not need_xml: + # reset to using default config + self.initeventservice() + return + + try: + group, port = self.get_config("eventservicegroup").split(":") + except ValueError: + logging.exception("invalid eventservicegroup in EMANE config") + return + + dev = self.get_config("eventservicedevice") + + emanexml.create_event_service_xml(group, port, dev, self.session.session_dir) + + def startdaemons(self): + """ + Start one EMANE daemon per node having a radio. + Add a control network even if the user has not configured one. + """ + logging.info("starting emane daemons...") + loglevel = str(EmaneManager.DEFAULT_LOG_LEVEL) + cfgloglevel = self.session.options.get_config_int("emane_log_level") + realtime = self.session.options.get_config_bool("emane_realtime", default=True) if cfgloglevel: - logger.info("setting user-defined emane log level: %d", cfgloglevel) + logging.info("setting user-defined EMANE log level: %d", cfgloglevel) loglevel = str(cfgloglevel) - emanecmd = f"emane -d -l {loglevel}" + + emanecmd = ["emane", "-d", "-l", loglevel] if realtime: - emanecmd += " -r" - if isinstance(node, CoreNode): + emanecmd += "-r", + + otagroup, _otaport = self.get_config("otamanagergroup").split(":") + otadev = self.get_config("otamanagerdevice") + otanetidx = self.session.get_control_net_index(otadev) + + eventgroup, _eventport = self.get_config("eventservicegroup").split(":") + eventdev = self.get_config("eventservicedevice") + eventservicenetidx = self.session.get_control_net_index(eventdev) + + run_emane_on_host = False + for node in self.getnodes(): + if hasattr(node, "transport_type") and node.transport_type == "raw": + run_emane_on_host = True + continue + path = self.session.session_dir + n = node.objid + + # control network not yet started here + self.session.add_remove_control_interface(node, 0, remove=False, conf_required=False) + + if otanetidx > 0: + logging.info("adding ota device ctrl%d", otanetidx) + self.session.add_remove_control_interface(node, otanetidx, remove=False, conf_required=False) + + if eventservicenetidx >= 0: + logging.info("adding event service device ctrl%d", eventservicenetidx) + self.session.add_remove_control_interface(node, eventservicenetidx, remove=False, conf_required=False) + + # multicast route is needed for OTA data + args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev] + node.check_cmd(args) + + # multicast route is also needed for event data if on control network + if eventservicenetidx >= 0 and eventgroup != otagroup: + args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev] + node.check_cmd(args) + # start emane - log_file = node.directory / f"{iface.name}-emane.log" - platform_xml = node.directory / emanexml.platform_file_name(iface) - args = f"{emanecmd} -f {log_file} {platform_xml}" - node.cmd(args) - else: - log_file = self.session.directory / f"{iface.name}-emane.log" - platform_xml = self.session.directory / emanexml.platform_file_name(iface) - args = f"{emanecmd} -f {log_file} {platform_xml}" - node.host_cmd(args, cwd=self.session.directory) + args = emanecmd + ["-f", os.path.join(path, "emane%d.log" % n), os.path.join(path, "platform%d.xml" % n)] + output = node.check_cmd(args) + logging.info("node(%s) emane daemon running: %s", node.name, args) + logging.info("node(%s) emane daemon output: %s", node.name, output) - def install_iface(self, iface: TunTap, config: dict[str, str]) -> None: - external = config.get("external", "0") - if external == "0": - iface.set_ips() - # at this point we register location handlers for generating - # EMANE location events - if self.genlocationevents(): - iface.poshook = self.set_nem_position - iface.setposition() + if not run_emane_on_host: + return - def doeventmonitor(self) -> bool: + path = self.session.session_dir + emanecmd += ["-f", os.path.join(path, "emane.log")] + args = emanecmd + [os.path.join(path, "platform.xml")] + utils.check_cmd(args, cwd=path) + logging.info("host emane daemon running: %s", args) + + def stopdaemons(self): + """ + Kill the appropriate EMANE daemons. + """ + # TODO: we may want to improve this if we had the PIDs from the specific EMANE daemons that we"ve started + args = ["killall", "-q", "emane"] + stop_emane_on_host = False + for node in self.getnodes(): + if hasattr(node, "transport_type") and node.transport_type == "raw": + stop_emane_on_host = True + continue + + if node.up: + node.cmd(args, wait=False) + # TODO: RJ45 node + + if stop_emane_on_host: + try: + utils.check_cmd(args) + utils.check_cmd(["killall", "-q", "emanetransportd"]) + except CoreCommandError: + logging.exception("error shutting down emane daemons") + + def installnetifs(self): + """ + Install TUN/TAP virtual interfaces into their proper namespaces + now that the EMANE daemons are running. + """ + for key in sorted(self._emane_nodes.keys()): + emane_node = self._emane_nodes[key] + logging.info("emane install netifs for node: %d", key) + emane_node.installnetifs() + + def deinstallnetifs(self): + """ + Uninstall TUN/TAP virtual interfaces. + """ + for key in sorted(self._emane_nodes.keys()): + emane_node = self._emane_nodes[key] + emane_node.deinstallnetifs() + + def doeventmonitor(self): """ Returns boolean whether or not EMANE events will be monitored. """ - return self.session.options.get_bool("emane_event_monitor", False) + # this support must be explicitly turned on; by default, CORE will + # generate the EMANE events when nodes are moved + return self.session.options.get_config_bool("emane_event_monitor") - def genlocationevents(self) -> bool: + def genlocationevents(self): """ Returns boolean whether or not EMANE events will be generated. """ - return self.session.options.get_bool("emane_event_generate", True) + # By default, CORE generates EMANE location events when nodes + # are moved; this can be explicitly disabled in core.conf + tmp = self.session.options.get_config_bool("emane_event_generate") + if tmp is None: + tmp = not self.doeventmonitor() + return tmp - def handlelocationevent(self, rxnemid: int, eid: int, data: str) -> None: + def starteventmonitor(self): + """ + Start monitoring EMANE location events if configured to do so. + """ + logging.info("emane start event monitor") + if not self.doeventmonitor(): + return + + if self.service is None: + logging.error("Warning: EMANE events will not be generated " + "because the emaneeventservice\n binding was " + "unable to load " + "(install the python-emaneeventservice bindings)") + return + self.doeventloop = True + self.eventmonthread = threading.Thread(target=self.eventmonitorloop) + self.eventmonthread.daemon = True + self.eventmonthread.start() + + def stopeventmonitor(self): + """ + Stop monitoring EMANE location events. + """ + self.doeventloop = False + if self.service is not None: + self.service.breakloop() + # reset the service, otherwise nextEvent won"t work + self.initeventservice(shutdown=True) + + if self.eventmonthread is not None: + # TODO: fix this + self.eventmonthread._Thread__stop() + self.eventmonthread.join() + self.eventmonthread = None + + def eventmonitorloop(self): + """ + Thread target that monitors EMANE location events. + """ + if self.service is None: + return + logging.info("subscribing to EMANE location events. (%s)", threading.currentThread().getName()) + while self.doeventloop is True: + _uuid, _seq, events = self.service.nextEvent() + + # this occurs with 0.9.1 event service + if not self.doeventloop: + break + + for event in events: + nem, eid, data = event + if eid == LocationEvent.IDENTIFIER: + self.handlelocationevent(nem, eid, data) + + logging.info("unsubscribing from EMANE location events. (%s)", threading.currentThread().getName()) + + def handlelocationevent(self, rxnemid, eid, data): """ Handle an EMANE location event. """ @@ -654,117 +805,109 @@ class EmaneManager: events.restore(data) for event in events: txnemid, attrs = event - if ( - "latitude" not in attrs - or "longitude" not in attrs - or "altitude" not in attrs - ): - logger.warning("dropped invalid location event") + if "latitude" not in attrs or "longitude" not in attrs or "altitude" not in attrs: + logging.warn("dropped invalid location event") continue + # yaw,pitch,roll,azimuth,elevation,velocity are unhandled lat = attrs["latitude"] lon = attrs["longitude"] alt = attrs["altitude"] - logger.debug("emane location event: %s,%s,%s", lat, lon, alt) + logging.debug("emane location event: %s,%s,%s", lat, lon, alt) self.handlelocationeventtoxyz(txnemid, lat, lon, alt) - def handlelocationeventtoxyz( - self, nemid: int, lat: float, lon: float, alt: float - ) -> bool: + def handlelocationeventtoxyz(self, nemid, lat, lon, alt): """ Convert the (NEM ID, lat, long, alt) from a received location event into a node and x,y,z coordinate values, sending a Node Message. Returns True if successfully parsed and a Node Message was sent. """ # convert nemid to node number - iface = self.get_iface(nemid) - if iface is None: - logger.info("location event for unknown NEM %s", nemid) + _emanenode, netif = self.nemlookup(nemid) + if netif is None: + logging.info("location event for unknown NEM %s", nemid) return False - n = iface.node.id + n = netif.node.objid # convert from lat/long/alt to x,y,z coordinates x, y, z = self.session.location.getxyz(lat, lon, alt) x = int(x) y = int(y) z = int(z) - logger.debug( - "location event NEM %s (%s, %s, %s) -> (%s, %s, %s)", - nemid, - lat, - lon, - alt, - x, - y, - z, - ) + logging.info("location event NEM %s (%s, %s, %s) -> (%s, %s, %s)", nemid, lat, lon, alt, x, y, z) xbit_check = x.bit_length() > 16 or x < 0 ybit_check = y.bit_length() > 16 or y < 0 zbit_check = z.bit_length() > 16 or z < 0 if any([xbit_check, ybit_check, zbit_check]): - logger.error( - "Unable to build node location message, received lat/long/alt " - "exceeds coordinate space: NEM %s (%d, %d, %d)", - nemid, - x, - y, - z, - ) + logging.error("Unable to build node location message, received lat/long/alt exceeds coordinate " + "space: NEM %s (%d, %d, %d)", nemid, x, y, z) return False # generate a node message for this location update try: - node = self.session.get_node(n, NodeBase) - except CoreError: - logger.exception( - "location event NEM %s has no corresponding node %s", nemid, n - ) + node = self.session.get_object(n) + except KeyError: + logging.exception("location event NEM %s has no corresponding node %s" % (nemid, n)) return False # don"t use node.setposition(x,y,z) which generates an event node.position.set(x, y, z) - node.position.set_geo(lon, lat, alt) - self.session.broadcast_node(node) + node_data = node.data(message_type=0, lat=str(lat), lon=str(lon), alt=str(alt)) + self.session.broadcast_node(node_data) return True - def emanerunning(self, node: CoreNode) -> bool: + def emanerunning(self, node): """ - Return True if an EMANE process associated with the given node is running, - False otherwise. + Return True if an EMANE process associated with the given node is running, False otherwise. """ - args = "pkill -0 -x emane" - try: - node.cmd(args) - result = True - except CoreCommandError: - result = False - return result + args = ["pkill", "-0", "-x", "emane"] + status = node.cmd(args) + return status == 0 - def publish_pathloss(self, nem1: int, nem2: int, rx1: float, rx2: float) -> None: - """ - Publish pathloss events between provided nems, using provided rx power. - :param nem1: interface one for pathloss - :param nem2: interface two for pathloss - :param rx1: received power from nem2 to nem1 - :param rx2: received power from nem1 to nem2 - :return: nothing - """ - event = PathlossEvent() - event.append(nem1, forward=rx1) - event.append(nem2, forward=rx2) - self.publish_event(nem1, event) - self.publish_event(nem2, event) - def publish_event( - self, - nem_id: int, - event: Union[PathlossEvent, CommEffectEvent, LocationEvent], - send_all: bool = False, - ) -> None: - service = self.nem_service.get(nem_id) - if not service: - logger.error("no service to publish event nem(%s)", nem_id) - return - if send_all: - nem_id = 0 - service.events.publish(nem_id, event) +class EmaneGlobalModel(EmaneModel): + """ + Global EMANE configuration options. + """ + + _DEFAULT_DEV = "ctrl0" + + name = "emane" + + emulator_xml = "/usr/share/emane/manifest/nemmanager.xml" + emulator_defaults = { + "eventservicedevice": _DEFAULT_DEV, + "eventservicegroup": "224.1.2.8:45703", + "otamanagerdevice": _DEFAULT_DEV, + "otamanagergroup": "224.1.2.8:45702" + } + emulator_config = emanemanifest.parse(emulator_xml, emulator_defaults) + emulator_config.insert( + 0, + Configuration(_id="platform_id_start", _type=ConfigDataTypes.INT32, default="1", + label="Starting Platform ID (core)") + ) + + nem_config = [ + Configuration(_id="nem_id_start", _type=ConfigDataTypes.INT32, default="1", + label="Starting NEM ID (core)") + ] + + @classmethod + def configurations(cls): + return cls.emulator_config + cls.nem_config + + @classmethod + def config_groups(cls): + emulator_len = len(cls.emulator_config) + config_len = len(cls.configurations()) + return [ + ConfigGroup("Platform Attributes", 1, emulator_len), + ConfigGroup("NEM Parameters", emulator_len + 1, config_len) + ] + + def __init__(self, session, object_id=None): + super(EmaneGlobalModel, self).__init__(session, object_id) + + def build_xml_files(self, config, interface=None): + raise NotImplementedError diff --git a/daemon/core/emane/emanemanifest.py b/daemon/core/emane/emanemanifest.py index ea2b05fd..b07833c9 100644 --- a/daemon/core/emane/emanemanifest.py +++ b/daemon/core/emane/emanemanifest.py @@ -1,10 +1,7 @@ import logging -from pathlib import Path -from core.config import Configuration -from core.emulator.enumerations import ConfigDataTypes - -logger = logging.getLogger(__name__) +from core.conf import Configuration +from core.enumerations import ConfigDataTypes manifest = None try: @@ -13,16 +10,15 @@ except ImportError: try: from emanesh import manifest except ImportError: - manifest = None - logger.debug("compatible emane python bindings not installed") + logging.debug("compatible emane python bindings not installed") -def _type_value(config_type: str) -> ConfigDataTypes: +def _type_value(config_type): """ Convert emane configuration type to core configuration value. - :param config_type: emane configuration type - :return: core config type + :param str config_type: emane configuration type + :return: """ config_type = config_type.upper() if config_type == "DOUBLE": @@ -32,13 +28,14 @@ def _type_value(config_type: str) -> ConfigDataTypes: return ConfigDataTypes[config_type] -def _get_possible(config_type: str, config_regex: str) -> list[str]: +def _get_possible(config_type, config_regex): """ Retrieve possible config value options based on emane regexes. - :param config_type: emane configuration type - :param config_regex: emane configuration regex + :param str config_type: emane configuration type + :param str config_regex: emane configuration regex :return: a string listing comma delimited values, if needed, empty string otherwise + :rtype: list """ if config_type == "bool": return ["On", "Off"] @@ -50,14 +47,16 @@ def _get_possible(config_type: str, config_regex: str) -> list[str]: return [] -def _get_default(config_type_name: str, config_value: list[str]) -> str: +def _get_default(config_type_name, config_value): """ Convert default configuration values to one used by core. - :param config_type_name: emane configuration type name - :param config_value: emane configuration value list + :param str config_type_name: emane configuration type name + :param list config_value: emane configuration value list :return: default core config value + :rtype: str """ + config_default = "" if config_type_name == "bool": @@ -73,14 +72,14 @@ def _get_default(config_type_name: str, config_value: list[str]) -> str: return config_default -def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]: +def parse(manifest_path, defaults): """ - Parses a valid emane manifest file and converts the provided configuration values - into ones used by core. + Parses a valid emane manifest file and converts the provided configuration values into ones used by core. - :param manifest_path: absolute manifest file path - :param defaults: used to override default values for configurations + :param str manifest_path: absolute manifest file path + :param dict defaults: used to override default values for configurations :return: list of core configuration values + :rtype: list """ # no results when emane bindings are not present @@ -88,7 +87,7 @@ def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]: return [] # load configuration file - manifest_file = manifest.Manifest(str(manifest_path)) + manifest_file = manifest.Manifest(manifest_path) manifest_configurations = manifest_file.getAllConfiguration() configurations = [] @@ -116,14 +115,14 @@ def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]: # define description and account for gui quirks config_descriptions = config_name if config_name.endswith("uri"): - config_descriptions = f"{config_descriptions} file" + config_descriptions = "%s file" % config_descriptions configuration = Configuration( - id=config_name, - type=config_type_value, + _id=config_name, + _type=config_type_value, default=config_default, options=possible, - label=config_descriptions, + label=config_descriptions ) configurations.append(configuration) diff --git a/daemon/core/emane/emanemodel.py b/daemon/core/emane/emanemodel.py index 4e31d632..01bf1835 100644 --- a/daemon/core/emane/emanemodel.py +++ b/daemon/core/emane/emanemodel.py @@ -2,21 +2,15 @@ Defines Emane Models used within CORE. """ import logging -from pathlib import Path -from typing import Optional +import os -from core.config import ConfigBool, ConfigGroup, ConfigString, Configuration +from core.conf import ConfigGroup +from core.conf import Configuration from core.emane import emanemanifest -from core.emulator.data import LinkOptions -from core.errors import CoreError -from core.location.mobility import WirelessModel -from core.nodes.interface import CoreInterface +from core.enumerations import ConfigDataTypes +from core.mobility import WirelessModel from core.xml import emanexml -logger = logging.getLogger(__name__) -DEFAULT_DEV: str = "ctrl0" -MANIFEST_PATH: str = "share/emane/manifest" - class EmaneModel(WirelessModel): """ @@ -24,151 +18,142 @@ class EmaneModel(WirelessModel): handling configuration messages based on the list of configurable parameters. Helper functions also live here. """ - - # default platform configuration settings - platform_controlport: str = "controlportendpoint" - platform_xml: str = "nemmanager.xml" - platform_defaults: dict[str, str] = { - "eventservicedevice": DEFAULT_DEV, - "eventservicegroup": "224.1.2.8:45703", - "otamanagerdevice": DEFAULT_DEV, - "otamanagergroup": "224.1.2.8:45702", - } - platform_config: list[Configuration] = [] - # default mac configuration settings - mac_library: Optional[str] = None - mac_xml: Optional[str] = None - mac_defaults: dict[str, str] = {} - mac_config: list[Configuration] = [] + mac_library = None + mac_xml = None + mac_defaults = {} + mac_config = [] # default phy configuration settings, using the universal model - phy_library: Optional[str] = None - phy_xml: str = "emanephy.xml" - phy_defaults: dict[str, str] = { + phy_library = None + phy_xml = "emanephy.xml" + phy_defaults = { "subid": "1", "propagationmodel": "2ray", - "noisemode": "none", + "noisemode": "none" } - phy_config: list[Configuration] = [] + phy_config = [] # support for external configurations - external_config: list[Configuration] = [ - ConfigBool(id="external", default="0"), - ConfigString(id="platformendpoint", default="127.0.0.1:40001"), - ConfigString(id="transportendpoint", default="127.0.0.1:50002"), + external_config = [ + Configuration("external", ConfigDataTypes.BOOL, default="0"), + Configuration("platformendpoint", ConfigDataTypes.STRING, default="127.0.0.1:40001"), + Configuration("transportendpoint", ConfigDataTypes.STRING, default="127.0.0.1:50002") ] - config_ignore: set[str] = set() + config_ignore = set() @classmethod - def load(cls, emane_prefix: Path) -> None: + def load(cls, emane_prefix): """ - Called after being loaded within the EmaneManager. Provides configured - emane_prefix for parsing xml files. + Called after being loaded within the EmaneManager. Provides configured emane_prefix for + parsing xml files. - :param emane_prefix: configured emane prefix path + :param str emane_prefix: configured emane prefix path :return: nothing """ - cls._load_platform_config(emane_prefix) + manifest_path = "share/emane/manifest" # load mac configuration - mac_xml_path = emane_prefix / MANIFEST_PATH / cls.mac_xml + mac_xml_path = os.path.join(emane_prefix, manifest_path, cls.mac_xml) cls.mac_config = emanemanifest.parse(mac_xml_path, cls.mac_defaults) + # load phy configuration - phy_xml_path = emane_prefix / MANIFEST_PATH / cls.phy_xml + phy_xml_path = os.path.join(emane_prefix, manifest_path, cls.phy_xml) cls.phy_config = emanemanifest.parse(phy_xml_path, cls.phy_defaults) @classmethod - def _load_platform_config(cls, emane_prefix: Path) -> None: - platform_xml_path = emane_prefix / MANIFEST_PATH / cls.platform_xml - cls.platform_config = emanemanifest.parse( - platform_xml_path, cls.platform_defaults - ) - # remove controlport configuration, since core will set this directly - controlport_index = None - for index, configuration in enumerate(cls.platform_config): - if configuration.id == cls.platform_controlport: - controlport_index = index - break - if controlport_index is not None: - cls.platform_config.pop(controlport_index) - - @classmethod - def configurations(cls) -> list[Configuration]: + def configurations(cls): """ Returns the combination all all configurations (mac, phy, and external). :return: all configurations + :rtype: list[Configuration] """ - return ( - cls.platform_config + cls.mac_config + cls.phy_config + cls.external_config - ) + return cls.mac_config + cls.phy_config + cls.external_config @classmethod - def config_groups(cls) -> list[ConfigGroup]: + def config_groups(cls): """ Returns the defined configuration groups. :return: list of configuration groups. + :rtype: list[ConfigGroup] """ - platform_len = len(cls.platform_config) - mac_len = len(cls.mac_config) + platform_len + mac_len = len(cls.mac_config) phy_len = len(cls.phy_config) + mac_len config_len = len(cls.configurations()) return [ - ConfigGroup("Platform Parameters", 1, platform_len), - ConfigGroup("MAC Parameters", platform_len + 1, mac_len), + ConfigGroup("MAC Parameters", 1, mac_len), ConfigGroup("PHY Parameters", mac_len + 1, phy_len), - ConfigGroup("External Parameters", phy_len + 1, config_len), + ConfigGroup("External Parameters", phy_len + 1, config_len) ] - def build_xml_files(self, config: dict[str, str], iface: CoreInterface) -> None: + def build_xml_files(self, config, interface=None): """ - Builds xml files for this emane model. Creates a nem.xml file that points to - both mac.xml and phy.xml definitions. + Builds xml files for this emane model. Creates a nem.xml file that points to both mac.xml and phy.xml + definitions. - :param config: emane model configuration for the node and interface - :param iface: interface to run emane for + :param dict config: emane model configuration for the node and interface + :param interface: interface for the emane node :return: nothing """ - # create nem, mac, and phy xml files - emanexml.create_nem_xml(self, iface, config) - emanexml.create_mac_xml(self, iface, config) - emanexml.create_phy_xml(self, iface, config) - emanexml.create_transport_xml(iface, config) + nem_name = emanexml.nem_file_name(self, interface) + mac_name = emanexml.mac_file_name(self, interface) + phy_name = emanexml.phy_file_name(self, interface) - def post_startup(self, iface: CoreInterface) -> None: + # check if this is external + transport_type = "virtual" + if interface and interface.transport_type == "raw": + transport_type = "raw" + transport_name = emanexml.transport_file_name(self.object_id, transport_type) + + # create nem xml file + nem_file = os.path.join(self.session.session_dir, nem_name) + emanexml.create_nem_xml(self, config, nem_file, transport_name, mac_name, phy_name) + + # create mac xml file + mac_file = os.path.join(self.session.session_dir, mac_name) + emanexml.create_mac_xml(self, config, mac_file) + + # create phy xml file + phy_file = os.path.join(self.session.session_dir, phy_name) + emanexml.create_phy_xml(self, config, phy_file) + + def post_startup(self): """ Logic to execute after the emane manager is finished with startup. - :param iface: interface for post startup :return: nothing """ - logger.debug("emane model(%s) has no post setup tasks", self.name) + logging.info("emane model(%s) has no post setup tasks", self.name) - def update(self, moved_ifaces: list[CoreInterface]) -> None: + def update(self, moved, moved_netifs): """ Invoked from MobilityModel when nodes are moved; this causes emane location events to be generated for the nodes in the moved list, making EmaneModels compatible with Ns2ScriptedMobility. - :param moved_ifaces: interfaces that were moved - :return: nothing + :param bool moved: were nodes moved + :param list moved_netifs: interfaces that were moved + :return: """ try: - self.session.emane.set_nem_positions(moved_ifaces) - except CoreError: - logger.exception("error during update") + wlan = self.session.get_object(self.object_id) + wlan.setnempositions(moved_netifs) + except KeyError: + logging.exception("error during update") - def linkconfig( - self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None - ) -> None: + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None): """ Invoked when a Link Message is received. Default is unimplemented. - :param iface: interface one - :param options: options for configuring link - :param iface2: interface two + :param core.netns.vif.Veth netif: interface one + :param bw: bandwidth to set to + :param delay: packet delay to set to + :param loss: packet loss to set to + :param duplicate: duplicate percentage to set to + :param jitter: jitter to set to + :param core.netns.vif.Veth netif2: interface two :return: nothing """ - logger.warning("emane model(%s) does not support link config", self.name) + logging.warn("emane model(%s) does not support link configuration", self.name) diff --git a/daemon/core/emane/ieee80211abg.py b/daemon/core/emane/ieee80211abg.py new file mode 100644 index 00000000..e99ebe3b --- /dev/null +++ b/daemon/core/emane/ieee80211abg.py @@ -0,0 +1,23 @@ +""" +ieee80211abg.py: EMANE IEEE 802.11abg model for CORE +""" +import os + +from core.emane import emanemodel + + +class EmaneIeee80211abgModel(emanemodel.EmaneModel): + # model name + name = "emane_ieee80211abg" + + # mac configuration + mac_library = "ieee80211abgmaclayer" + mac_xml = "ieee80211abgmaclayer.xml" + + @classmethod + def load(cls, emane_prefix): + cls.mac_defaults["pcrcurveuri"] = os.path.join( + emane_prefix, + "share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml" + ) + super(EmaneIeee80211abgModel, cls).load(emane_prefix) diff --git a/daemon/core/emane/linkmonitor.py b/daemon/core/emane/linkmonitor.py deleted file mode 100644 index 1997e9f8..00000000 --- a/daemon/core/emane/linkmonitor.py +++ /dev/null @@ -1,328 +0,0 @@ -import logging -import sched -import threading -import time -from typing import TYPE_CHECKING, Optional - -from lxml import etree - -from core.emane.nodes import EmaneNet -from core.emulator.data import LinkData -from core.emulator.enumerations import LinkTypes, MessageFlags -from core.nodes.network import CtrlNet - -logger = logging.getLogger(__name__) - -try: - from emane import shell -except ImportError: - try: - from emanesh import shell - except ImportError: - shell = None - logger.debug("compatible emane python bindings not installed") - -if TYPE_CHECKING: - from core.emane.emanemanager import EmaneManager - -MAC_COMPONENT_INDEX: int = 1 -EMANE_RFPIPE: str = "rfpipemaclayer" -EMANE_80211: str = "ieee80211abgmaclayer" -EMANE_TDMA: str = "tdmaeventschedulerradiomodel" -SINR_TABLE: str = "NeighborStatusTable" -NEM_SELF: int = 65535 - - -class LossTable: - def __init__(self, losses: dict[float, float]) -> None: - self.losses: dict[float, float] = losses - self.sinrs: list[float] = sorted(self.losses.keys()) - self.loss_lookup: dict[int, float] = {} - for index, value in enumerate(self.sinrs): - self.loss_lookup[index] = self.losses[value] - self.mac_id: Optional[str] = None - - def get_loss(self, sinr: float) -> float: - index = self._get_index(sinr) - loss = 100.0 - self.loss_lookup[index] - return loss - - def _get_index(self, current_sinr: float) -> int: - for index, sinr in enumerate(self.sinrs): - if current_sinr <= sinr: - return index - return len(self.sinrs) - 1 - - -class EmaneLink: - def __init__(self, from_nem: int, to_nem: int, sinr: float) -> None: - self.from_nem: int = from_nem - self.to_nem: int = to_nem - self.sinr: float = sinr - self.last_seen: Optional[float] = None - self.updated: bool = False - self.touch() - - def update(self, sinr: float) -> None: - self.updated = self.sinr != sinr - self.sinr = sinr - self.touch() - - def touch(self) -> None: - self.last_seen = time.monotonic() - - def is_dead(self, timeout: int) -> bool: - return (time.monotonic() - self.last_seen) >= timeout - - def __repr__(self) -> str: - return f"EmaneLink({self.from_nem}, {self.to_nem}, {self.sinr})" - - -class EmaneClient: - def __init__(self, address: str, port: int) -> None: - self.address: str = address - self.client: shell.ControlPortClient = shell.ControlPortClient( - self.address, port - ) - self.nems: dict[int, LossTable] = {} - self.setup() - - def setup(self) -> None: - manifest = self.client.getManifest() - for nem_id, components in manifest.items(): - # get mac config - mac_id, _, emane_model = components[MAC_COMPONENT_INDEX] - mac_config = self.client.getConfiguration(mac_id) - logger.debug( - "address(%s) nem(%s) emane(%s)", self.address, nem_id, emane_model - ) - - # create loss table based on current configuration - if emane_model == EMANE_80211: - loss_table = self.handle_80211(mac_config) - elif emane_model == EMANE_RFPIPE: - loss_table = self.handle_rfpipe(mac_config) - else: - logger.warning("unknown emane link model: %s", emane_model) - continue - logger.info("monitoring links nem(%s) model(%s)", nem_id, emane_model) - loss_table.mac_id = mac_id - self.nems[nem_id] = loss_table - - def check_links( - self, links: dict[tuple[int, int], EmaneLink], loss_threshold: int - ) -> None: - for from_nem, loss_table in self.nems.items(): - tables = self.client.getStatisticTable(loss_table.mac_id, (SINR_TABLE,)) - table = tables[SINR_TABLE][1:][0] - for row in table: - row = row - to_nem = row[0][0] - sinr = row[5][0] - age = row[-1][0] - - # exclude invalid links - is_self = to_nem == NEM_SELF - has_valid_age = 0 <= age <= 1 - if is_self or not has_valid_age: - continue - - # check if valid link loss - link_key = (from_nem, to_nem) - loss = loss_table.get_loss(sinr) - if loss < loss_threshold: - link = links.get(link_key) - if link: - link.update(sinr) - else: - link = EmaneLink(from_nem, to_nem, sinr) - links[link_key] = link - - def handle_tdma(self, config: dict[str, tuple]): - pcr = config["pcrcurveuri"][0][0] - logger.debug("tdma pcr: %s", pcr) - - def handle_80211(self, config: dict[str, tuple]) -> LossTable: - unicastrate = config["unicastrate"][0][0] - pcr = config["pcrcurveuri"][0][0] - logger.debug("80211 pcr: %s", pcr) - tree = etree.parse(pcr) - root = tree.getroot() - table = root.find("table") - losses = {} - for rate in table.iter("datarate"): - index = int(rate.get("index")) - if index == unicastrate: - for row in rate.iter("row"): - sinr = float(row.get("sinr")) - por = float(row.get("por")) - losses[sinr] = por - return LossTable(losses) - - def handle_rfpipe(self, config: dict[str, tuple]) -> LossTable: - pcr = config["pcrcurveuri"][0][0] - logger.debug("rfpipe pcr: %s", pcr) - tree = etree.parse(pcr) - root = tree.getroot() - table = root.find("table") - losses = {} - for row in table.iter("row"): - sinr = float(row.get("sinr")) - por = float(row.get("por")) - losses[sinr] = por - return LossTable(losses) - - def stop(self) -> None: - self.client.stop() - - -class EmaneLinkMonitor: - def __init__(self, emane_manager: "EmaneManager") -> None: - self.emane_manager: "EmaneManager" = emane_manager - self.clients: list[EmaneClient] = [] - self.links: dict[tuple[int, int], EmaneLink] = {} - self.complete_links: set[tuple[int, int]] = set() - self.loss_threshold: Optional[int] = None - self.link_interval: Optional[int] = None - self.link_timeout: Optional[int] = None - self.scheduler: Optional[sched.scheduler] = None - self.running: bool = False - - def start(self) -> None: - options = self.emane_manager.session.options - self.loss_threshold = options.get_int("loss_threshold") - self.link_interval = options.get_int("link_interval") - self.link_timeout = options.get_int("link_timeout") - self.initialize() - if not self.clients: - logger.info("no valid emane models to monitor links") - return - self.scheduler = sched.scheduler() - self.scheduler.enter(0, 0, self.check_links) - self.running = True - thread = threading.Thread(target=self.scheduler.run, daemon=True) - thread.start() - - def initialize(self) -> None: - addresses = self.get_addresses() - for address, port in addresses: - client = EmaneClient(address, port) - if client.nems: - self.clients.append(client) - - def get_addresses(self) -> list[tuple[str, int]]: - addresses = [] - nodes = self.emane_manager.getnodes() - for node in nodes: - control = None - ports = [] - for iface in node.get_ifaces(): - if isinstance(iface.net, CtrlNet): - ip4 = iface.get_ip4() - if ip4: - control = str(ip4.ip) - if isinstance(iface.net, EmaneNet): - port = self.emane_manager.get_nem_port(iface) - ports.append(port) - if control: - for port in ports: - addresses.append((control, port)) - return addresses - - def check_links(self) -> None: - # check for new links - previous_links = set(self.links.keys()) - for client in self.clients: - try: - client.check_links(self.links, self.loss_threshold) - except shell.ControlPortException: - if self.running: - logger.exception("link monitor error") - - # find new links - current_links = set(self.links.keys()) - new_links = current_links - previous_links - - # find updated and dead links - dead_links = [] - for link_id, link in self.links.items(): - complete_id = self.get_complete_id(link_id) - if link.is_dead(self.link_timeout): - dead_links.append(link_id) - elif link.updated and complete_id in self.complete_links: - link.updated = False - self.send_link(MessageFlags.NONE, complete_id) - - # announce dead links - for link_id in dead_links: - complete_id = self.get_complete_id(link_id) - if complete_id in self.complete_links: - self.complete_links.remove(complete_id) - self.send_link(MessageFlags.DELETE, complete_id) - del self.links[link_id] - - # announce new links - for link_id in new_links: - complete_id = self.get_complete_id(link_id) - if complete_id in self.complete_links: - continue - if self.is_complete_link(link_id): - self.complete_links.add(complete_id) - self.send_link(MessageFlags.ADD, complete_id) - - if self.running: - self.scheduler.enter(self.link_interval, 0, self.check_links) - - def get_complete_id(self, link_id: tuple[int, int]) -> tuple[int, int]: - value1, value2 = link_id - if value1 < value2: - return value1, value2 - else: - return value2, value1 - - def is_complete_link(self, link_id: tuple[int, int]) -> bool: - reverse_id = link_id[1], link_id[0] - return link_id in self.links and reverse_id in self.links - - def get_link_label(self, link_id: tuple[int, int]) -> str: - source_id = tuple(sorted(link_id)) - source_link = self.links[source_id] - dest_id = link_id[::-1] - dest_link = self.links[dest_id] - return f"{source_link.sinr:.1f} / {dest_link.sinr:.1f}" - - def send_link(self, message_type: MessageFlags, link_id: tuple[int, int]) -> None: - nem1, nem2 = link_id - link = self.emane_manager.get_nem_link(nem1, nem2, message_type) - if link: - label = self.get_link_label(link_id) - link.label = label - self.emane_manager.session.broadcast_link(link) - - def send_message( - self, - message_type: MessageFlags, - label: str, - node1: int, - node2: int, - emane_id: int, - ) -> None: - color = self.emane_manager.session.get_link_color(emane_id) - link_data = LinkData( - message_type=message_type, - type=LinkTypes.WIRELESS, - label=label, - node1_id=node1, - node2_id=node2, - network_id=emane_id, - color=color, - ) - self.emane_manager.session.broadcast_link(link_data) - - def stop(self) -> None: - self.running = False - for client in self.clients: - client.stop() - self.clients.clear() - self.links.clear() - self.complete_links.clear() diff --git a/daemon/core/emane/modelmanager.py b/daemon/core/emane/modelmanager.py deleted file mode 100644 index 92dd5b8e..00000000 --- a/daemon/core/emane/modelmanager.py +++ /dev/null @@ -1,69 +0,0 @@ -import logging -import pkgutil -from pathlib import Path - -from core import utils -from core.emane import models as emane_models -from core.emane.emanemodel import EmaneModel -from core.errors import CoreError - -logger = logging.getLogger(__name__) - - -class EmaneModelManager: - models: dict[str, type[EmaneModel]] = {} - - @classmethod - def load_locals(cls, emane_prefix: Path) -> list[str]: - """ - Load local core emane models and make them available. - - :param emane_prefix: installed emane prefix - :return: list of errors encountered loading emane models - """ - errors = [] - for module_info in pkgutil.walk_packages( - emane_models.__path__, f"{emane_models.__name__}." - ): - models = utils.load_module(module_info.name, EmaneModel) - for model in models: - logger.debug("loading emane model: %s", model.name) - try: - model.load(emane_prefix) - cls.models[model.name] = model - except CoreError as e: - errors.append(model.name) - logger.debug("not loading emane model(%s): %s", model.name, e) - return errors - - @classmethod - def load(cls, path: Path, emane_prefix: Path) -> list[str]: - """ - Search and load custom emane models and make them available. - - :param path: path to search for custom emane models - :param emane_prefix: installed emane prefix - :return: list of errors encountered loading emane models - """ - subdirs = [x for x in path.iterdir() if x.is_dir()] - subdirs.append(path) - errors = [] - for subdir in subdirs: - logger.debug("loading emane models from: %s", subdir) - models = utils.load_classes(subdir, EmaneModel) - for model in models: - logger.debug("loading emane model: %s", model.name) - try: - model.load(emane_prefix) - cls.models[model.name] = model - except CoreError as e: - errors.append(model.name) - logger.debug("not loading emane model(%s): %s", model.name, e) - return errors - - @classmethod - def get(cls, name: str) -> type[EmaneModel]: - model = cls.models.get(name) - if model is None: - raise CoreError(f"emame model does not exist {name}") - return model diff --git a/daemon/core/emane/models/__init__.py b/daemon/core/emane/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/emane/models/bypass.py b/daemon/core/emane/models/bypass.py deleted file mode 100644 index e8f2ed39..00000000 --- a/daemon/core/emane/models/bypass.py +++ /dev/null @@ -1,32 +0,0 @@ -""" -EMANE Bypass model for CORE -""" -from pathlib import Path - -from core.config import ConfigBool, Configuration -from core.emane import emanemodel - - -class EmaneBypassModel(emanemodel.EmaneModel): - name: str = "emane_bypass" - - # values to ignore, when writing xml files - config_ignore: set[str] = {"none"} - - # mac definitions - mac_library: str = "bypassmaclayer" - mac_config: list[Configuration] = [ - ConfigBool( - id="none", - default="0", - label="There are no parameters for the bypass model.", - ) - ] - - # phy definitions - phy_library: str = "bypassphylayer" - phy_config: list[Configuration] = [] - - @classmethod - def load(cls, emane_prefix: Path) -> None: - cls._load_platform_config(emane_prefix) diff --git a/daemon/core/emane/models/commeffect.py b/daemon/core/emane/models/commeffect.py deleted file mode 100644 index aa093a93..00000000 --- a/daemon/core/emane/models/commeffect.py +++ /dev/null @@ -1,142 +0,0 @@ -""" -commeffect.py: EMANE CommEffect model for CORE -""" - -import logging -from pathlib import Path - -from lxml import etree - -from core.config import ConfigGroup, Configuration -from core.emane import emanemanifest, emanemodel -from core.emulator.data import LinkOptions -from core.nodes.interface import CoreInterface -from core.xml import emanexml - -logger = logging.getLogger(__name__) - -try: - from emane.events.commeffectevent import CommEffectEvent -except ImportError: - try: - from emanesh.events.commeffectevent import CommEffectEvent - except ImportError: - CommEffectEvent = None - logger.debug("compatible emane python bindings not installed") - - -def convert_none(x: float) -> int: - """ - Helper to use 0 for None values. - """ - if isinstance(x, str): - x = float(x) - if x is None: - return 0 - else: - return int(x) - - -class EmaneCommEffectModel(emanemodel.EmaneModel): - name: str = "emane_commeffect" - shim_library: str = "commeffectshim" - shim_xml: str = "commeffectshim.xml" - shim_defaults: dict[str, str] = {} - config_shim: list[Configuration] = [] - - # comm effect does not need the default phy and external configurations - phy_config: list[Configuration] = [] - external_config: list[Configuration] = [] - - @classmethod - def load(cls, emane_prefix: Path) -> None: - cls._load_platform_config(emane_prefix) - shim_xml_path = emane_prefix / "share/emane/manifest" / cls.shim_xml - cls.config_shim = emanemanifest.parse(shim_xml_path, cls.shim_defaults) - - @classmethod - def configurations(cls) -> list[Configuration]: - return cls.platform_config + cls.config_shim - - @classmethod - def config_groups(cls) -> list[ConfigGroup]: - platform_len = len(cls.platform_config) - return [ - ConfigGroup("Platform Parameters", 1, platform_len), - ConfigGroup( - "CommEffect SHIM Parameters", - platform_len + 1, - len(cls.configurations()), - ), - ] - - def build_xml_files(self, config: dict[str, str], iface: CoreInterface) -> None: - """ - Build the necessary nem and commeffect XMLs in the given path. - If an individual NEM has a nonstandard config, we need to build - that file also. Otherwise the WLAN-wide - nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used. - - :param config: emane model configuration for the node and interface - :param iface: interface for the emane node - :return: nothing - """ - # create and write nem document - nem_element = etree.Element("nem", name=f"{self.name} NEM", type="unstructured") - transport_name = emanexml.transport_file_name(iface) - etree.SubElement(nem_element, "transport", definition=transport_name) - - # set shim configuration - nem_name = emanexml.nem_file_name(iface) - shim_name = emanexml.shim_file_name(iface) - etree.SubElement(nem_element, "shim", definition=shim_name) - emanexml.create_node_file(iface.node, nem_element, "nem", nem_name) - - # create and write shim document - shim_element = etree.Element( - "shim", name=f"{self.name} SHIM", library=self.shim_library - ) - - # append all shim options (except filterfile) to shimdoc - for configuration in self.config_shim: - name = configuration.id - if name == "filterfile": - continue - value = config[name] - emanexml.add_param(shim_element, name, value) - - # empty filterfile is not allowed - ff = config["filterfile"] - if ff.strip() != "": - emanexml.add_param(shim_element, "filterfile", ff) - emanexml.create_node_file(iface.node, shim_element, "shim", shim_name) - - # create transport xml - emanexml.create_transport_xml(iface, config) - - def linkconfig( - self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None - ) -> None: - """ - Generate CommEffect events when a Link Message is received having - link parameters. - """ - if iface is None or iface2 is None: - logger.warning("%s: missing NEM information", self.name) - return - # TODO: batch these into multiple events per transmission - # TODO: may want to split out seconds portion of delay and jitter - event = CommEffectEvent() - nem1 = self.session.emane.get_nem_id(iface) - nem2 = self.session.emane.get_nem_id(iface2) - logger.info("sending comm effect event") - event.append( - nem1, - latency=convert_none(options.delay), - jitter=convert_none(options.jitter), - loss=convert_none(options.loss), - duplicate=convert_none(options.dup), - unicast=int(convert_none(options.bandwidth)), - broadcast=int(convert_none(options.bandwidth)), - ) - self.session.emane.publish_event(nem2, event) diff --git a/daemon/core/emane/models/ieee80211abg.py b/daemon/core/emane/models/ieee80211abg.py deleted file mode 100644 index f6b32264..00000000 --- a/daemon/core/emane/models/ieee80211abg.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -ieee80211abg.py: EMANE IEEE 802.11abg model for CORE -""" -from pathlib import Path - -from core.emane import emanemodel - - -class EmaneIeee80211abgModel(emanemodel.EmaneModel): - # model name - name: str = "emane_ieee80211abg" - - # mac configuration - mac_library: str = "ieee80211abgmaclayer" - mac_xml: str = "ieee80211abgmaclayer.xml" - - @classmethod - def load(cls, emane_prefix: Path) -> None: - cls.mac_defaults["pcrcurveuri"] = str( - emane_prefix / "share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml" - ) - super().load(emane_prefix) diff --git a/daemon/core/emane/models/rfpipe.py b/daemon/core/emane/models/rfpipe.py deleted file mode 100644 index 7dace8c7..00000000 --- a/daemon/core/emane/models/rfpipe.py +++ /dev/null @@ -1,22 +0,0 @@ -""" -rfpipe.py: EMANE RF-PIPE model for CORE -""" -from pathlib import Path - -from core.emane import emanemodel - - -class EmaneRfPipeModel(emanemodel.EmaneModel): - # model name - name: str = "emane_rfpipe" - - # mac configuration - mac_library: str = "rfpipemaclayer" - mac_xml: str = "rfpipemaclayer.xml" - - @classmethod - def load(cls, emane_prefix: Path) -> None: - cls.mac_defaults["pcrcurveuri"] = str( - emane_prefix / "share/emane/xml/models/mac/rfpipe/rfpipepcr.xml" - ) - super().load(emane_prefix) diff --git a/daemon/core/emane/models/tdma.py b/daemon/core/emane/models/tdma.py deleted file mode 100644 index 100e960d..00000000 --- a/daemon/core/emane/models/tdma.py +++ /dev/null @@ -1,65 +0,0 @@ -""" -tdma.py: EMANE TDMA model bindings for CORE -""" - -import logging -from pathlib import Path - -from core import constants, utils -from core.config import ConfigString -from core.emane import emanemodel -from core.emane.nodes import EmaneNet -from core.nodes.interface import CoreInterface - -logger = logging.getLogger(__name__) - - -class EmaneTdmaModel(emanemodel.EmaneModel): - # model name - name: str = "emane_tdma" - - # mac configuration - mac_library: str = "tdmaeventschedulerradiomodel" - mac_xml: str = "tdmaeventschedulerradiomodel.xml" - - # add custom schedule options and ignore it when writing emane xml - schedule_name: str = "schedule" - default_schedule: Path = ( - constants.CORE_DATA_DIR / "examples" / "tdma" / "schedule.xml" - ) - config_ignore: set[str] = {schedule_name} - - @classmethod - def load(cls, emane_prefix: Path) -> None: - cls.mac_defaults["pcrcurveuri"] = str( - emane_prefix - / "share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml" - ) - super().load(emane_prefix) - config_item = ConfigString( - id=cls.schedule_name, - default=str(cls.default_schedule), - label="TDMA schedule file (core)", - ) - cls.mac_config.insert(0, config_item) - - def post_startup(self, iface: CoreInterface) -> None: - # get configured schedule - emane_net = self.session.get_node(self.id, EmaneNet) - config = self.session.emane.get_iface_config(emane_net, iface) - schedule = Path(config[self.schedule_name]) - if not schedule.is_file(): - logger.error("ignoring invalid tdma schedule: %s", schedule) - return - # initiate tdma schedule - nem_id = self.session.emane.get_nem_id(iface) - if not nem_id: - logger.error("could not find nem for interface") - return - service = self.session.emane.nem_service.get(nem_id) - if service: - device = service.device - logger.info( - "setting up tdma schedule: schedule(%s) device(%s)", schedule, device - ) - utils.cmd(f"emaneevent-tdmaschedule -i {device} {schedule}") diff --git a/daemon/core/emane/nodes.py b/daemon/core/emane/nodes.py index ecf684d7..b599d177 100644 --- a/daemon/core/emane/nodes.py +++ b/daemon/core/emane/nodes.py @@ -1,26 +1,15 @@ """ -Provides an EMANE network node class, which has several attached NEMs that +nodes.py: definition of an EmaneNode class for implementing configuration +control of an EMANE emulation. An EmaneNode has several attached NEMs that share the same MAC+PHY model. """ import logging -import time -from dataclasses import dataclass -from typing import TYPE_CHECKING, Callable, Optional, Union -from core.emulator.data import InterfaceData, LinkData, LinkOptions -from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import MessageFlags, RegisterTlvs -from core.errors import CoreCommandError, CoreError -from core.nodes.base import CoreNetworkBase, CoreNode, NodeOptions -from core.nodes.interface import CoreInterface - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emane.emanemodel import EmaneModel - from core.emulator.session import Session - from core.location.mobility import WayPointMobility +from core.coreobj import PyCoreNet +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs try: from emane.events import LocationEvent @@ -28,263 +17,194 @@ except ImportError: try: from emanesh.events import LocationEvent except ImportError: - LocationEvent = None - logger.debug("compatible emane python bindings not installed") + logging.debug("compatible emane python bindings not installed") -class TunTap(CoreInterface): +class EmaneNet(PyCoreNet): """ - TUN/TAP virtual device in TAP mode + EMANE network base class. """ - - def __init__( - self, - _id: int, - name: str, - localname: str, - use_ovs: bool, - node: CoreNode = None, - server: "DistributedServer" = None, - ) -> None: - super().__init__(_id, name, localname, use_ovs, node=node, server=server) - self.node: CoreNode = node - - def startup(self) -> None: - """ - Startup logic for a tunnel tap. - - :return: nothing - """ - self.up = True - - def shutdown(self) -> None: - """ - Shutdown functionality for a tunnel tap. - - :return: nothing - """ - if not self.up: - return - self.up = False - - def waitfor( - self, func: Callable[[], int], attempts: int = 10, maxretrydelay: float = 0.25 - ) -> bool: - """ - Wait for func() to return zero with exponential backoff. - - :param func: function to wait for a result of zero - :param attempts: number of attempts to wait for a zero result - :param maxretrydelay: maximum retry delay - :return: True if wait succeeded, False otherwise - """ - delay = 0.01 - result = False - for i in range(1, attempts + 1): - r = func() - if r == 0: - result = True - break - msg = f"attempt {i} failed with nonzero exit status {r}" - if i < attempts + 1: - msg += ", retrying..." - logger.info(msg) - time.sleep(delay) - delay += delay - if delay > maxretrydelay: - delay = maxretrydelay - else: - msg += ", giving up" - logger.info(msg) - return result - - def nodedevexists(self) -> int: - """ - Checks if device exists. - - :return: 0 if device exists, 1 otherwise - """ - try: - self.node.node_net_client.device_show(self.name) - return 0 - except CoreCommandError: - return 1 - - def waitfordevicenode(self) -> None: - """ - Check for presence of a node device - tap device may not appear right away waits. - - :return: nothing - """ - logger.debug("waiting for device node: %s", self.name) - count = 0 - while True: - result = self.waitfor(self.nodedevexists) - if result: - break - should_retry = count < 5 - is_emane_running = self.node.session.emane.emanerunning(self.node) - if all([should_retry, is_emane_running]): - count += 1 - else: - raise RuntimeError("node device failed to exist") - - def set_ips(self) -> None: - """ - Set interface ip addresses. - - :return: nothing - """ - self.waitfordevicenode() - for ip in self.ips(): - self.node.node_net_client.create_address(self.name, str(ip)) + apitype = NodeTypes.EMANE.value + linktype = LinkTypes.WIRELESS.value + # icon used + type = "wlan" -@dataclass -class EmaneOptions(NodeOptions): - emane_model: str = None - """name of emane model to associate an emane network to""" - - -class EmaneNet(CoreNetworkBase): +class EmaneNode(EmaneNet): """ EMANE node contains NEM configuration and causes connected nodes to have TAP interfaces (instead of VEth). These are managed by the Emane controller object that exists in a session. """ - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: EmaneOptions = None, - ) -> None: - options = options or EmaneOptions() - super().__init__(session, _id, name, server, options) - self.conf: str = "" - self.mobility: Optional[WayPointMobility] = None - model_class = self.session.emane.get_model(options.emane_model) - self.wireless_model: Optional["EmaneModel"] = model_class(self.session, self.id) - if self.session.is_running(): - self.session.emane.add_node(self) + def __init__(self, session, objid=None, name=None, start=True): + super(EmaneNode, self).__init__(session, objid, name, start) + self.conf = "" + self.up = False + self.nemidmap = {} + self.model = None + self.mobility = None - @classmethod - def create_options(cls) -> EmaneOptions: - return EmaneOptions() - - def linkconfig( - self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None - ) -> None: + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None): """ The CommEffect model supports link configuration. """ - if not self.wireless_model: + if not self.model: return - self.wireless_model.linkconfig(iface, options, iface2) + return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss, + duplicate=duplicate, jitter=jitter, netif2=netif2) - def startup(self) -> None: - self.up = True + def config(self, conf): + self.conf = conf - def shutdown(self) -> None: - self.up = False - - def link(self, iface1: CoreInterface, iface2: CoreInterface) -> None: + def shutdown(self): pass - def unlink(self, iface1: CoreInterface, iface2: CoreInterface) -> None: + def link(self, netif1, netif2): pass - def updatemodel(self, config: dict[str, str]) -> None: - """ - Update configuration for the current model. + def unlink(self, netif1, netif2): + pass - :param config: configuration to update model with - :return: nothing - """ - if not self.wireless_model: - raise CoreError(f"no model set to update for node({self.name})") - logger.info( - "node(%s) updating model(%s): %s", self.id, self.wireless_model.name, config - ) - self.wireless_model.update_config(config) + def updatemodel(self, config): + if not self.model: + raise ValueError("no model set to update for node(%s)", self.objid) + logging.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config) + self.model.set_configs(config, node_id=self.objid) - def setmodel( - self, - model: Union[type["EmaneModel"], type["WayPointMobility"]], - config: dict[str, str], - ) -> None: + def setmodel(self, model, config): """ set the EmaneModel associated with this node """ - if model.config_type == RegisterTlvs.WIRELESS: - self.wireless_model = model(session=self.session, _id=self.id) - self.wireless_model.update_config(config) - elif model.config_type == RegisterTlvs.MOBILITY: - self.mobility = model(session=self.session, _id=self.id) + logging.info("adding model: %s", model.name) + if model.config_type == RegisterTlvs.WIRELESS.value: + # EmaneModel really uses values from ConfigurableManager + # when buildnemxml() is called, not during init() + self.model = model(session=self.session, object_id=self.objid) + self.model.update_config(config) + elif model.config_type == RegisterTlvs.MOBILITY.value: + self.mobility = model(session=self.session, object_id=self.objid) self.mobility.update_config(config) - def links(self, flags: MessageFlags = MessageFlags.NONE) -> list[LinkData]: - links = [] - emane_manager = self.session.emane - # gather current emane links - nem_ids = set() - for iface in self.get_ifaces(): - nem_id = emane_manager.get_nem_id(iface) - nem_ids.add(nem_id) - emane_links = emane_manager.link_monitor.links - considered = set() - for link_key in emane_links: - considered_key = tuple(sorted(link_key)) - if considered_key in considered: - continue - considered.add(considered_key) - nem1, nem2 = considered_key - # ignore links not related to this node - if nem1 not in nem_ids and nem2 not in nem_ids: - continue - # ignore incomplete links - if (nem2, nem1) not in emane_links: - continue - link = emane_manager.get_nem_link(nem1, nem2, flags) - if link: - links.append(link) - return links - - def create_tuntap(self, node: CoreNode, iface_data: InterfaceData) -> CoreInterface: + def setnemid(self, netif, nemid): """ - Create a tuntap interface for the provided node. - - :param node: node to create tuntap interface for - :param iface_data: interface data to create interface with - :return: created tuntap interface + Record an interface to numerical ID mapping. The Emane controller + object manages and assigns these IDs for all NEMs. """ - with node.lock: - if iface_data.id is not None and iface_data.id in node.ifaces: - raise CoreError( - f"node({self.id}) interface({iface_data.id}) already exists" - ) - iface_id = ( - iface_data.id if iface_data.id is not None else node.next_iface_id() - ) - name = iface_data.name if iface_data.name is not None else f"eth{iface_id}" - session_id = self.session.short_session_id() - localname = f"tap{node.id}.{iface_id}.{session_id}" - iface = TunTap(iface_id, name, localname, self.session.use_ovs(), node=node) - if iface_data.mac: - iface.set_mac(iface_data.mac) - for ip in iface_data.get_ips(): - iface.add_ip(ip) - node.ifaces[iface_id] = iface - self.attach(iface) - if self.up: - iface.startup() - if self.session.is_running(): - self.session.emane.start_iface(self, iface) - return iface + self.nemidmap[netif] = nemid - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - raise CoreError( - f"emane network({self.name}) do not support adopting interfaces" - ) + def getnemid(self, netif): + """ + Given an interface, return its numerical ID. + """ + if netif not in self.nemidmap: + return None + else: + return self.nemidmap[netif] + + def getnemnetif(self, nemid): + """ + Given a numerical NEM ID, return its interface. This returns the + first interface that matches the given NEM ID. + """ + for netif in self.nemidmap: + if self.nemidmap[netif] == nemid: + return netif + return None + + def netifs(self, sort=True): + """ + Retrieve list of linked interfaces sorted by node number. + """ + return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid) + + def installnetifs(self): + """ + Install TAP devices into their namespaces. This is done after + EMANE daemons have been started, because that is their only chance + to bind to the TAPs. + """ + if self.session.emane.genlocationevents() and self.session.emane.service is None: + warntxt = "unable to publish EMANE events because the eventservice " + warntxt += "Python bindings failed to load" + logging.error(warntxt) + + for netif in self.netifs(): + external = self.session.emane.get_config("external", self.objid, self.model.name) + if external == "0": + netif.setaddrs() + + if not self.session.emane.genlocationevents(): + netif.poshook = None + continue + + # at this point we register location handlers for generating + # EMANE location events + netif.poshook = self.setnemposition + x, y, z = netif.node.position.get() + self.setnemposition(netif, x, y, z) + + def deinstallnetifs(self): + """ + Uninstall TAP devices. This invokes their shutdown method for + any required cleanup; the device may be actually removed when + emanetransportd terminates. + """ + for netif in self.netifs(): + if "virtual" in netif.transport_type.lower(): + netif.shutdown() + netif.poshook = None + + def setnemposition(self, netif, x, y, z): + """ + Publish a NEM location change event using the EMANE event service. + """ + if self.session.emane.service is None: + logging.info("position service not available") + return + nemid = self.getnemid(netif) + ifname = netif.localname + if nemid is None: + logging.info("nemid for %s is unknown" % ifname) + return + lat, long, alt = self.session.location.getgeo(x, y, z) + logging.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, long, alt) + event = LocationEvent() + + # altitude must be an integer or warning is printed + # unused: yaw, pitch, roll, azimuth, elevation, velocity + alt = int(round(alt)) + event.append(nemid, latitude=lat, longitude=long, altitude=alt) + self.session.emane.service.publish(0, event) + + def setnempositions(self, moved_netifs): + """ + Several NEMs have moved, from e.g. a WaypointMobilityModel + calculation. Generate an EMANE Location Event having several + entries for each netif that has moved. + """ + if len(moved_netifs) == 0: + return + + if self.session.emane.service is None: + logging.info("position service not available") + return + + event = LocationEvent() + i = 0 + for netif in moved_netifs: + nemid = self.getnemid(netif) + ifname = netif.localname + if nemid is None: + logging.info("nemid for %s is unknown" % ifname) + continue + x, y, z = netif.node.getposition() + lat, long, alt = self.session.location.getgeo(x, y, z) + logging.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", + i, ifname, nemid, x, y, z, lat, long, alt) + # altitude must be an integer or warning is printed + alt = int(round(alt)) + event.append(nemid, latitude=lat, longitude=long, altitude=alt) + i += 1 + + self.session.emane.service.publish(0, event) diff --git a/daemon/core/emane/rfpipe.py b/daemon/core/emane/rfpipe.py new file mode 100644 index 00000000..4942d89e --- /dev/null +++ b/daemon/core/emane/rfpipe.py @@ -0,0 +1,23 @@ +""" +rfpipe.py: EMANE RF-PIPE model for CORE +""" +import os + +from core.emane import emanemodel + + +class EmaneRfPipeModel(emanemodel.EmaneModel): + # model name + name = "emane_rfpipe" + + # mac configuration + mac_library = "rfpipemaclayer" + mac_xml = "rfpipemaclayer.xml" + + @classmethod + def load(cls, emane_prefix): + cls.mac_defaults["pcrcurveuri"] = os.path.join( + emane_prefix, + "share/emane/xml/models/mac/rfpipe/rfpipepcr.xml" + ) + super(EmaneRfPipeModel, cls).load(emane_prefix) diff --git a/daemon/core/emane/tdma.py b/daemon/core/emane/tdma.py new file mode 100644 index 00000000..b599638d --- /dev/null +++ b/daemon/core/emane/tdma.py @@ -0,0 +1,62 @@ +""" +tdma.py: EMANE TDMA model bindings for CORE +""" + +import logging +import os + +from core import constants +from core.conf import Configuration +from core.emane import emanemodel +from core.enumerations import ConfigDataTypes +from core.misc import utils + + +class EmaneTdmaModel(emanemodel.EmaneModel): + # model name + name = "emane_tdma" + + # mac configuration + mac_library = "tdmaeventschedulerradiomodel" + mac_xml = "tdmaeventschedulerradiomodel.xml" + + # add custom schedule options and ignore it when writing emane xml + schedule_name = "schedule" + default_schedule = os.path.join(constants.CORE_DATA_DIR, "examples", "tdma", "schedule.xml") + config_ignore = {schedule_name} + + @classmethod + def load(cls, emane_prefix): + cls.mac_defaults["pcrcurveuri"] = os.path.join( + emane_prefix, + "share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml" + ) + super(EmaneTdmaModel, cls).load(emane_prefix) + cls.mac_config.insert( + 0, + Configuration( + _id=cls.schedule_name, + _type=ConfigDataTypes.STRING, + default=cls.default_schedule, + label="TDMA schedule file (core)" + ) + ) + + def post_startup(self): + """ + Logic to execute after the emane manager is finished with startup. + + :return: nothing + """ + # get configured schedule + config = self.session.emane.get_configs(node_id=self.object_id, config_type=self.name) + if not config: + return + schedule = config[self.schedule_name] + + # get the set event device + event_device = self.session.emane.event_device + + # initiate tdma schedule + logging.info("setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device) + utils.check_cmd(["emaneevent-tdmaschedule", "-i", event_device, schedule]) diff --git a/daemon/core/emulator/broadcast.py b/daemon/core/emulator/broadcast.py deleted file mode 100644 index bf56f99d..00000000 --- a/daemon/core/emulator/broadcast.py +++ /dev/null @@ -1,67 +0,0 @@ -from collections.abc import Callable -from typing import TypeVar, Union - -from core.emulator.data import ( - ConfigData, - EventData, - ExceptionData, - FileData, - LinkData, - NodeData, -) -from core.errors import CoreError - -T = TypeVar( - "T", bound=Union[EventData, ExceptionData, NodeData, LinkData, FileData, ConfigData] -) - - -class BroadcastManager: - def __init__(self) -> None: - """ - Creates a BroadcastManager instance. - """ - self.handlers: dict[type[T], set[Callable[[T], None]]] = {} - - def send(self, data: T) -> None: - """ - Retrieve handlers for data, and run all current handlers. - - :param data: data to provide to handlers - :return: nothing - """ - handlers = self.handlers.get(type(data), set()) - for handler in handlers: - handler(data) - - def add_handler(self, data_type: type[T], handler: Callable[[T], None]) -> None: - """ - Add a handler for a given data type. - - :param data_type: type of data to add handler for - :param handler: handler to add - :return: nothing - """ - handlers = self.handlers.setdefault(data_type, set()) - if handler in handlers: - raise CoreError( - f"cannot add data({data_type}) handler({repr(handler)}), " - f"already exists" - ) - handlers.add(handler) - - def remove_handler(self, data_type: type[T], handler: Callable[[T], None]) -> None: - """ - Remove a handler for a given data type. - - :param data_type: type of data to remove handler for - :param handler: handler to remove - :return: nothing - """ - handlers = self.handlers.get(data_type, set()) - if handler not in handlers: - raise CoreError( - f"cannot remove data({data_type}) handler({repr(handler)}), " - f"does not exist" - ) - handlers.remove(handler) diff --git a/daemon/core/emulator/controlnets.py b/daemon/core/emulator/controlnets.py deleted file mode 100644 index 27b00367..00000000 --- a/daemon/core/emulator/controlnets.py +++ /dev/null @@ -1,239 +0,0 @@ -import logging -from typing import TYPE_CHECKING, Optional - -from core import utils -from core.emulator.data import InterfaceData -from core.errors import CoreError -from core.nodes.base import CoreNode -from core.nodes.interface import DEFAULT_MTU -from core.nodes.network import CtrlNet - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - -CTRL_NET_ID: int = 9001 -ETC_HOSTS_PATH: str = "/etc/hosts" - - -class ControlNetManager: - def __init__(self, session: "Session") -> None: - self.session: "Session" = session - self.etc_hosts_header: str = f"CORE session {self.session.id} host entries" - - def _etc_hosts_enabled(self) -> bool: - """ - Determines if /etc/hosts should be configured. - - :return: True if /etc/hosts should be configured, False otherwise - """ - return self.session.options.get_bool("update_etc_hosts", False) - - def _get_server_ifaces( - self, - ) -> tuple[None, Optional[str], Optional[str], Optional[str]]: - """ - Retrieve control net server interfaces. - - :return: control net server interfaces - """ - d0 = self.session.options.get("controlnetif0") - if d0: - logger.error("controlnet0 cannot be assigned with a host interface") - d1 = self.session.options.get("controlnetif1") - d2 = self.session.options.get("controlnetif2") - d3 = self.session.options.get("controlnetif3") - return None, d1, d2, d3 - - def _get_prefixes( - self, - ) -> tuple[Optional[str], Optional[str], Optional[str], Optional[str]]: - """ - Retrieve control net prefixes. - - :return: control net prefixes - """ - p = self.session.options.get("controlnet") - p0 = self.session.options.get("controlnet0") - p1 = self.session.options.get("controlnet1") - p2 = self.session.options.get("controlnet2") - p3 = self.session.options.get("controlnet3") - if not p0 and p: - p0 = p - return p0, p1, p2, p3 - - def update_etc_hosts(self) -> None: - """ - Add the IP addresses of control interfaces to the /etc/hosts file. - - :return: nothing - """ - if not self._etc_hosts_enabled(): - return - control_net = self.get_control_net(0) - entries = "" - for iface in control_net.get_ifaces(): - name = iface.node.name - for ip in iface.ips(): - entries += f"{ip.ip} {name}\n" - logger.info("adding entries to /etc/hosts") - utils.file_munge(ETC_HOSTS_PATH, self.etc_hosts_header, entries) - - def clear_etc_hosts(self) -> None: - """ - Clear IP addresses of control interfaces from the /etc/hosts file. - - :return: nothing - """ - if not self._etc_hosts_enabled(): - return - logger.info("removing /etc/hosts file entries") - utils.file_demunge(ETC_HOSTS_PATH, self.etc_hosts_header) - - def get_control_net_index(self, dev: str) -> int: - """ - Retrieve control net index. - - :param dev: device to get control net index for - :return: control net index, -1 otherwise - """ - if dev[0:4] == "ctrl" and int(dev[4]) in (0, 1, 2, 3): - index = int(dev[4]) - if index == 0: - return index - if index < 4 and self._get_prefixes()[index] is not None: - return index - return -1 - - def get_control_net(self, index: int) -> Optional[CtrlNet]: - """ - Retrieve a control net based on index. - - :param index: control net index - :return: control net when available, None otherwise - """ - try: - return self.session.get_node(CTRL_NET_ID + index, CtrlNet) - except CoreError: - return None - - def add_control_net( - self, index: int, conf_required: bool = True - ) -> Optional[CtrlNet]: - """ - Create a control network bridge as necessary. The conf_reqd flag, - when False, causes a control network bridge to be added even if - one has not been configured. - - :param index: network index to add - :param conf_required: flag to check if conf is required - :return: control net node - """ - logger.info( - "checking to add control net index(%s) conf_required(%s)", - index, - conf_required, - ) - # check for valid index - if not (0 <= index <= 3): - raise CoreError(f"invalid control net index({index})") - # return any existing control net bridge - control_net = self.get_control_net(index) - if control_net: - logger.info("control net index(%s) already exists", index) - return control_net - # retrieve prefix for current index - index_prefix = self._get_prefixes()[index] - if not index_prefix: - if conf_required: - return None - else: - index_prefix = CtrlNet.DEFAULT_PREFIX_LIST[index] - # retrieve valid prefix from old style values - prefixes = index_prefix.split() - if len(prefixes) > 1: - # a list of per-host prefixes is provided - try: - prefix = prefixes[0].split(":", 1)[1] - except IndexError: - prefix = prefixes[0] - else: - prefix = prefixes[0] - # use the updown script for control net 0 only - updown_script = None - if index == 0: - updown_script = self.session.options.get("controlnet_updown_script") - # build a new controlnet bridge - _id = CTRL_NET_ID + index - server_iface = self._get_server_ifaces()[index] - logger.info( - "adding controlnet(%s) prefix(%s) updown(%s) server interface(%s)", - _id, - prefix, - updown_script, - server_iface, - ) - options = CtrlNet.create_options() - options.prefix = prefix - options.updown_script = updown_script - options.serverintf = server_iface - control_net = self.session.create_node(CtrlNet, False, _id, options=options) - control_net.brname = f"ctrl{index}.{self.session.short_session_id()}" - control_net.startup() - return control_net - - def remove_control_net(self, index: int) -> None: - """ - Removes control net. - - :param index: index of control net to remove - :return: nothing - """ - control_net = self.get_control_net(index) - if control_net: - logger.info("removing control net index(%s)", index) - self.session.delete_node(control_net.id) - - def add_control_iface(self, node: CoreNode, index: int) -> None: - """ - Adds a control net interface to a node. - - :param node: node to add control net interface to - :param index: index of control net to add interface to - :return: nothing - :raises CoreError: if control net doesn't exist, interface already exists, - or there is an error creating the interface - """ - control_net = self.get_control_net(index) - if not control_net: - raise CoreError(f"control net index({index}) does not exist") - iface_id = control_net.CTRLIF_IDX_BASE + index - if node.ifaces.get(iface_id): - raise CoreError(f"control iface({iface_id}) already exists") - try: - logger.info( - "node(%s) adding control net index(%s) interface(%s)", - node.name, - index, - iface_id, - ) - ip4 = control_net.prefix[node.id] - ip4_mask = control_net.prefix.prefixlen - iface_data = InterfaceData( - id=iface_id, - name=f"ctrl{index}", - mac=utils.random_mac(), - ip4=ip4, - ip4_mask=ip4_mask, - mtu=DEFAULT_MTU, - ) - iface = node.create_iface(iface_data) - control_net.attach(iface) - iface.control = True - except ValueError: - raise CoreError( - f"error adding control net interface to node({node.id}), " - f"invalid control net prefix({control_net.prefix}), " - "a longer prefix length may be required" - ) diff --git a/daemon/core/emulator/coreemu.py b/daemon/core/emulator/coreemu.py index 574002e6..9f7e128a 100644 --- a/daemon/core/emulator/coreemu.py +++ b/daemon/core/emulator/coreemu.py @@ -1,156 +1,888 @@ +import atexit import logging import os -from pathlib import Path +import signal +import sys -from core import utils -from core.configservice.manager import ConfigServiceManager -from core.emane.modelmanager import EmaneModelManager -from core.emulator.session import Session -from core.executables import get_requirements -from core.services.coreservices import ServiceManager - -logger = logging.getLogger(__name__) - -DEFAULT_EMANE_PREFIX: str = "/usr" +import core.services +from core.coreobj import PyCoreNet +from core.coreobj import PyCoreNode +from core.data import NodeData +from core.emulator.emudata import LinkOptions +from core.emulator.emudata import NodeOptions +from core.enumerations import EventTypes +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.misc import nodemaps +from core.misc import nodeutils +from core.service import ServiceManager +from core.session import Session +from core.xml.corexml import CoreXmlReader, CoreXmlWriter -class CoreEmu: +def signal_handler(signal_number, _): + """ + Handle signals and force an exit with cleanup. + + :param int signal_number: signal number + :param _: ignored + :return: nothing + """ + logging.info("caught signal: %s", signal_number) + sys.exit(signal_number) + + +signal.signal(signal.SIGHUP, signal_handler) +signal.signal(signal.SIGINT, signal_handler) +signal.signal(signal.SIGTERM, signal_handler) +signal.signal(signal.SIGUSR1, signal_handler) +signal.signal(signal.SIGUSR2, signal_handler) + + +def create_interface(node, network, interface_data): + """ + Create an interface for a node on a network using provided interface data. + + :param node: node to create interface for + :param network: network to associate interface with + :param core.emulator.emudata.InterfaceData interface_data: interface data + :return: created interface + """ + node.newnetif( + network, + addrlist=interface_data.get_addresses(), + hwaddr=interface_data.mac, + ifindex=interface_data.id, + ifname=interface_data.name + ) + return node.netif(interface_data.id, network) + + +def link_config(network, interface, link_options, devname=None, interface_two=None): + """ + Convenience method for configuring a link, + + :param network: network to configure link for + :param interface: interface to configure + :param core.emulator.emudata.LinkOptions link_options: data to configure link with + :param str devname: device name, default is None + :param interface_two: other interface associated, default is None + :return: nothing + """ + config = { + "netif": interface, + "bw": link_options.bandwidth, + "delay": link_options.delay, + "loss": link_options.per, + "duplicate": link_options.dup, + "jitter": link_options.jitter, + "netif2": interface_two + } + + # hacky check here, because physical and emane nodes do not conform to the same linkconfig interface + if not nodeutils.is_node(network, [NodeTypes.EMANE, NodeTypes.PHYSICAL]): + config["devname"] = devname + + network.linkconfig(**config) + + +def is_net_node(node): + """ + Convenience method for testing if a legacy core node is considered a network node. + + :param object node: object to test against + :return: True if object is an instance of a network node, False otherwise + :rtype: bool + """ + return isinstance(node, PyCoreNet) + + +def is_core_node(node): + """ + Convenience method for testing if a legacy core node is considered a core node. + + :param object node: object to test against + :return: True if object is an instance of a core node, False otherwise + :rtype: bool + """ + return isinstance(node, PyCoreNode) + + +class IdGen(object): + def __init__(self, _id=0): + self.id = _id + + def next(self): + self.id += 1 + return self.id + + +class EmuSession(Session): + def __init__(self, _id, config=None, mkdir=True): + super(EmuSession, self).__init__(_id, config, mkdir) + + # object management + self.node_id_gen = IdGen() + + # set default services + self.services.default_services = { + "mdr": ("zebra", "OSPFv3MDR", "IPForward"), + "PC": ("DefaultRoute",), + "prouter": ("zebra", "OSPFv2", "OSPFv3", "IPForward"), + "router": ("zebra", "OSPFv2", "OSPFv3", "IPForward"), + "host": ("DefaultRoute", "SSH"), + } + + def _link_nodes(self, node_one_id, node_two_id): + """ + Convenience method for retrieving nodes within link data. + + :param int node_one_id: node one id + :param int node_two_id: node two id + :return: nodes, network nodes if present, and tunnel if present + :rtype: tuple + """ + logging.debug("link message between node1(%s) and node2(%s)", node_one_id, node_two_id) + + # values to fill + net_one = None + net_two = None + + # retrieve node one + node_one = self.get_object(node_one_id) + node_two = self.get_object(node_two_id) + + # both node ids are provided + tunnel = self.broker.gettunnel(node_one_id, node_two_id) + logging.debug("tunnel between nodes: %s", tunnel) + if nodeutils.is_node(tunnel, NodeTypes.TAP_BRIDGE): + net_one = tunnel + if tunnel.remotenum == node_one_id: + node_one = None + else: + node_two = None + # physical node connected via gre tap tunnel + elif tunnel: + if tunnel.remotenum == node_one_id: + node_one = None + else: + node_two = None + + if is_net_node(node_one): + if not net_one: + net_one = node_one + else: + net_two = node_one + node_one = None + + if is_net_node(node_two): + if not net_one: + net_one = node_two + else: + net_two = node_two + node_two = None + + logging.debug("link node types n1(%s) n2(%s) net1(%s) net2(%s) tunnel(%s)", + node_one, node_two, net_one, net_two, tunnel) + return node_one, node_two, net_one, net_two, tunnel + + # TODO: this doesn't appear to ever be used, EMANE or basic wireless range + def _link_wireless(self, objects, connect): + """ + Objects to deal with when connecting/disconnecting wireless links. + + :param list objects: possible objects to deal with + :param bool connect: link interfaces if True, unlink otherwise + :return: nothing + """ + objects = [x for x in objects if x] + if len(objects) < 2: + raise ValueError("wireless link failure: %s", objects) + logging.debug("handling wireless linking objects(%s) connect(%s)", objects, connect) + common_networks = objects[0].commonnets(objects[1]) + if not common_networks: + raise ValueError("no common network found for wireless link/unlink") + + for common_network, interface_one, interface_two in common_networks: + if not nodeutils.is_node(common_network, [NodeTypes.WIRELESS_LAN, NodeTypes.EMANE]): + logging.info("skipping common network that is not wireless/emane: %s", common_network) + continue + + logging.info("wireless linking connect(%s): %s - %s", connect, interface_one, interface_two) + if connect: + common_network.link(interface_one, interface_two) + else: + common_network.unlink(interface_one, interface_two) + + def add_link(self, node_one_id, node_two_id, interface_one=None, interface_two=None, link_options=LinkOptions()): + """ + Add a link between nodes. + + :param int node_one_id: node one id + :param int node_two_id: node two id + :param core.emulator.emudata.InterfaceData interface_one: node one interface data, defaults to none + :param core.emulator.emudata.InterfaceData interface_two: node two interface data, defaults to none + :param core.emulator.emudata.LinkOptions link_options: data for creating link, defaults to no options + :return: + """ + # get node objects identified by link data + node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id) + + if node_one: + node_one.lock.acquire() + if node_two: + node_two.lock.acquire() + + try: + # wireless link + if link_options.type == LinkTypes.WIRELESS: + objects = [node_one, node_two, net_one, net_two] + self._link_wireless(objects, connect=True) + # wired link + else: + # 2 nodes being linked, ptp network + if all([node_one, node_two]) and not net_one: + logging.info("adding link for peer to peer nodes: %s - %s", node_one.name, node_two.name) + ptp_class = nodeutils.get_node_class(NodeTypes.PEER_TO_PEER) + start = self.state > EventTypes.DEFINITION_STATE.value + net_one = self.add_object(cls=ptp_class, start=start) + + # node to network + if node_one and net_one: + logging.info("adding link from node to network: %s - %s", node_one.name, net_one.name) + interface = create_interface(node_one, net_one, interface_one) + link_config(net_one, interface, link_options) + + # network to node + if node_two and net_one: + logging.info("adding link from network to node: %s - %s", node_two.name, net_one.name) + interface = create_interface(node_two, net_one, interface_two) + if not link_options.unidirectional: + link_config(net_one, interface, link_options) + + # network to network + if net_one and net_two: + logging.info("adding link from network to network: %s - %s", net_one.name, net_two.name) + if nodeutils.is_node(net_two, NodeTypes.RJ45): + interface = net_two.linknet(net_one) + else: + interface = net_one.linknet(net_two) + + link_config(net_one, interface, link_options) + + if not link_options.unidirectional: + interface.swapparams("_params_up") + link_config(net_two, interface, link_options, devname=interface.name) + interface.swapparams("_params_up") + + # a tunnel node was found for the nodes + addresses = [] + if not node_one and all([net_one, interface_one]): + addresses.extend(interface_one.get_addresses()) + + if not node_two and all([net_two, interface_two]): + addresses.extend(interface_two.get_addresses()) + + # tunnel node logic + key = link_options.key + if key and nodeutils.is_node(net_one, NodeTypes.TUNNEL): + logging.info("setting tunnel key for: %s", net_one.name) + net_one.setkey(key) + if addresses: + net_one.addrconfig(addresses) + if key and nodeutils.is_node(net_two, NodeTypes.TUNNEL): + logging.info("setting tunnel key for: %s", net_two.name) + net_two.setkey(key) + if addresses: + net_two.addrconfig(addresses) + + # physical node connected with tunnel + if not net_one and not net_two and (node_one or node_two): + if node_one and nodeutils.is_node(node_one, NodeTypes.PHYSICAL): + logging.info("adding link for physical node: %s", node_one.name) + addresses = interface_one.get_addresses() + node_one.adoptnetif(tunnel, interface_one.id, interface_one.mac, addresses) + link_config(node_one, tunnel, link_options) + elif node_two and nodeutils.is_node(node_two, NodeTypes.PHYSICAL): + logging.info("adding link for physical node: %s", node_two.name) + addresses = interface_two.get_addresses() + node_two.adoptnetif(tunnel, interface_two.id, interface_two.mac, addresses) + link_config(node_two, tunnel, link_options) + finally: + if node_one: + node_one.lock.release() + if node_two: + node_two.lock.release() + + def delete_link(self, node_one_id, node_two_id, interface_one_id, interface_two_id, link_type=LinkTypes.WIRED): + """ + Delete a link between nodes. + + :param int node_one_id: node one id + :param int node_two_id: node two id + :param int interface_one_id: interface id for node one + :param int interface_two_id: interface id for node two + :param core.enumerations.LinkTypes link_type: link type to delete + :return: nothing + """ + # get node objects identified by link data + node_one, node_two, net_one, net_two, _tunnel = self._link_nodes(node_one_id, node_two_id) + + if node_one: + node_one.lock.acquire() + if node_two: + node_two.lock.acquire() + + try: + # wireless link + if link_type == LinkTypes.WIRELESS: + objects = [node_one, node_two, net_one, net_two] + self._link_wireless(objects, connect=False) + # wired link + else: + if all([node_one, node_two]): + # TODO: fix this for the case where ifindex[1,2] are not specified + # a wired unlink event, delete the connecting bridge + interface_one = node_one.netif(interface_one_id) + interface_two = node_two.netif(interface_two_id) + + # get interfaces from common network, if no network node + # otherwise get interfaces between a node and network + if not interface_one and not interface_two: + common_networks = node_one.commonnets(node_two) + for network, common_interface_one, common_interface_two in common_networks: + if (net_one and network == net_one) or not net_one: + interface_one = common_interface_one + interface_two = common_interface_two + break + + if all([interface_one, interface_two]) and any([interface_one.net, interface_two.net]): + if interface_one.net != interface_two.net and all([interface_one.up, interface_two.up]): + raise ValueError("no common network found") + + logging.info("deleting link node(%s):interface(%s) node(%s):interface(%s)", + node_one.name, interface_one.name, node_two.name, interface_two.name) + net_one = interface_one.net + interface_one.detachnet() + interface_two.detachnet() + if net_one.numnetif() == 0: + self.delete_object(net_one.objid) + node_one.delnetif(interface_one.netindex) + node_two.delnetif(interface_two.netindex) + finally: + if node_one: + node_one.lock.release() + if node_two: + node_two.lock.release() + + def update_link(self, node_one_id, node_two_id, interface_one_id=None, interface_two_id=None, + link_options=LinkOptions()): + """ + Update link information between nodes. + + :param int node_one_id: node one id + :param int node_two_id: node two id + :param int interface_one_id: interface id for node one + :param int interface_two_id: interface id for node two + :param core.emulator.emudata.LinkOptions link_options: data to update link with + :return: nothing + """ + # get node objects identified by link data + node_one, node_two, net_one, net_two, _tunnel = self._link_nodes(node_one_id, node_two_id) + + if node_one: + node_one.lock.acquire() + if node_two: + node_two.lock.acquire() + + try: + # wireless link + if link_options.type == LinkTypes.WIRELESS.value: + raise ValueError("cannot update wireless link") + else: + if not node_one and not node_two: + if net_one and net_two: + # modify link between nets + interface = net_one.getlinknetif(net_two) + upstream = False + + if not interface: + upstream = True + interface = net_two.getlinknetif(net_one) + + if not interface: + raise ValueError("modify unknown link between nets") + + if upstream: + interface.swapparams("_params_up") + link_config(net_one, interface, link_options, devname=interface.name) + interface.swapparams("_params_up") + else: + link_config(net_one, interface, link_options) + + if not link_options.unidirectional: + if upstream: + link_config(net_two, interface, link_options) + else: + interface.swapparams("_params_up") + link_config(net_two, interface, link_options, devname=interface.name) + interface.swapparams("_params_up") + else: + raise ValueError("modify link for unknown nodes") + elif not node_one: + # node1 = layer 2node, node2 = layer3 node + interface = node_two.netif(interface_two_id, net_one) + link_config(net_one, interface, link_options) + elif not node_two: + # node2 = layer 2node, node1 = layer3 node + interface = node_one.netif(interface_one_id, net_one) + link_config(net_one, interface, link_options) + else: + common_networks = node_one.commonnets(node_two) + if not common_networks: + raise ValueError("no common network found") + + for net_one, interface_one, interface_two in common_networks: + if interface_one_id is not None and interface_one_id != node_one.getifindex(interface_one): + continue + + link_config(net_one, interface_one, link_options, interface_two=interface_two) + if not link_options.unidirectional: + link_config(net_one, interface_two, link_options, interface_two=interface_one) + + finally: + if node_one: + node_one.lock.release() + if node_two: + node_two.lock.release() + + def add_node(self, _type=NodeTypes.DEFAULT, _id=None, node_options=NodeOptions()): + """ + Add a node to the session, based on the provided node data. + + :param core.enumerations.NodeTypes _type: type of node to create + :param int _id: id for node, defaults to None for generated id + :param core.emulator.emudata.NodeOptions node_options: data to create node with + :return: created node + """ + + # retrieve node class for given node type + try: + node_class = nodeutils.get_node_class(_type) + except KeyError: + logging.error("invalid node type to create: %s", _type) + return None + + # set node start based on current session state, override and check when rj45 + start = self.state > EventTypes.DEFINITION_STATE.value + enable_rj45 = self.options.get_config("enablerj45") == "1" + if _type == NodeTypes.RJ45 and not enable_rj45: + start = False + + # determine node id + if not _id: + while True: + _id = self.node_id_gen.next() + if _id not in self.objects: + break + + # generate name if not provided + name = node_options.name + if not name: + name = "%s%s" % (node_class.__name__, _id) + + # create node + logging.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start) + node = self.add_object(cls=node_class, objid=_id, name=name, start=start) + + # set node attributes + node.icon = node_options.icon + node.canvas = node_options.canvas + node.opaque = node_options.opaque + + # set node position and broadcast it + self.set_node_position(node, node_options) + + # add services to default and physical nodes only + if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]: + node.type = node_options.model + logging.debug("set node type: %s", node.type) + self.services.add_services(node, node.type, node_options.services) + + # boot nodes if created after runtime, LcxNodes, Physical, and RJ45 are all PyCoreNodes + is_boot_node = isinstance(node, PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45) + if self.state == EventTypes.RUNTIME_STATE.value and is_boot_node: + self.write_objects() + self.add_remove_control_interface(node=node, remove=False) + self.services.boot_services(node) + + return node + + def update_node(self, node_id, node_options): + """ + Update node information. + + :param int node_id: id of node to update + :param core.emulator.emudata.NodeOptions node_options: data to update node with + :return: True if node updated, False otherwise + :rtype: bool + """ + result = False + try: + # get node to update + node = self.get_object(node_id) + + # set node position and broadcast it + self.set_node_position(node, node_options) + + # update attributes + node.canvas = node_options.canvas + node.icon = node_options.icon + + # set node as updated successfully + result = True + except KeyError: + logging.error("failure to update node that does not exist: %s", node_id) + + return result + + def delete_node(self, node_id): + """ + Delete a node from the session and check if session should shutdown, if no nodes are left. + + :param int node_id: id of node to delete + :return: True if node deleted, False otherwise + :rtype: bool + """ + # delete node and check for session shutdown if a node was removed + result = self.custom_delete_object(node_id) + if result: + self.check_shutdown() + return result + + def set_node_position(self, node, node_options): + """ + Set position for a node, use lat/lon/alt if needed. + + :param node: node to set position for + :param core.emulator.emudata.NodeOptions node_options: data for node + :return: nothing + """ + # extract location values + x = node_options.x + y = node_options.y + lat = node_options.lat + lon = node_options.lon + alt = node_options.alt + + # check if we need to generate position from lat/lon/alt + has_empty_position = all(i is None for i in [x, y]) + has_lat_lon_alt = all(i is not None for i in [lat, lon, alt]) + using_lat_lon_alt = has_empty_position and has_lat_lon_alt + if using_lat_lon_alt: + x, y, _ = self.location.getxyz(lat, lon, alt) + + # set position and broadcast + if None not in [x, y]: + node.setposition(x, y, None) + + # broadcast updated location when using lat/lon/alt + if using_lat_lon_alt: + self.broadcast_node_location(node) + + def broadcast_node_location(self, node): + """ + Broadcast node location to all listeners. + + :param core.netns.nodes.PyCoreObj node: node to broadcast location for + :return: nothing + """ + node_data = NodeData( + message_type=0, + id=node.objid, + x_position=node.position.x, + y_position=node.position.y + ) + self.broadcast_node(node_data) + + def start_mobility(self, node_ids=None): + """ + Start mobility for the provided node ids. + + :param list[int] node_ids: nodes to start mobility for + :return: nothing + """ + self.mobility.startup(node_ids) + + def shutdown(self): + """ + Shutdown session. + + :return: nothing + """ + logging.info("session(%s) shutting down", self.id) + self.set_state(EventTypes.DATACOLLECT_STATE, send_event=True) + self.set_state(EventTypes.SHUTDOWN_STATE, send_event=True) + super(EmuSession, self).shutdown() + + def custom_delete_object(self, object_id): + """ + Remove an emulation object. + + :param int object_id: object id to remove + :return: True if object deleted, False otherwise + """ + result = False + with self._objects_lock: + if object_id in self.objects: + obj = self.objects.pop(object_id) + obj.shutdown() + result = True + return result + + def is_active(self): + """ + Determine if this session is considered to be active. (Runtime or Data collect states) + + :return: True if active, False otherwise + """ + result = self.state in {EventTypes.RUNTIME_STATE.value, EventTypes.DATACOLLECT_STATE.value} + logging.info("session(%s) checking if active: %s", self.id, result) + return result + + def open_xml(self, file_name, start=False): + """ + Import a session from the EmulationScript XML format. + + :param str file_name: xml file to load session from + :param bool start: instantiate session if true, false otherwise + :return: nothing + """ + # clear out existing session + self.clear() + + # write out xml file + CoreXmlReader(self).read(file_name) + + # start session if needed + if start: + self.name = os.path.basename(file_name) + self.file_name = file_name + self.instantiate() + + def save_xml(self, file_name): + """ + Export a session to the EmulationScript XML format. + + :param str file_name: file name to write session xml to + :return: nothing + """ + CoreXmlWriter(self).write(file_name) + + def add_hook(self, state, file_name, source_name, data): + """ + Store a hook from a received file message. + + :param int state: when to run hook + :param str file_name: file name for hook + :param str source_name: source name + :param data: hook data + :return: nothing + """ + # hack to conform with old logic until updated + state = ":%s" % state + self.set_hook(state, file_name, source_name, data) + + def add_node_file(self, node_id, source_name, file_name, data): + """ + Add a file to a node. + + :param int node_id: node to add file to + :param str source_name: source file name + :param str file_name: file name to add + :param str data: file data + :return: nothing + """ + + node = self.get_object(node_id) + + if source_name is not None: + node.addfile(source_name, file_name) + elif data is not None: + node.nodefile(file_name, data) + + def clear(self): + """ + Clear all CORE session data. (objects, hooks, broker) + + :return: nothing + """ + self.delete_objects() + self.del_hooks() + self.broker.reset() + self.emane.reset() + + def start_events(self): + """ + Start event loop. + + :return: nothing + """ + self.event_loop.run() + + def mobility_event(self, event_data): + """ + Handle a mobility event. + + :param core.data.EventData event_data: event data to handle + :return: nothing + """ + self.mobility.handleevent(event_data) + + def create_wireless_node(self, _id=None, node_options=NodeOptions()): + """ + Create a wireless node for use within an wireless/EMANE networks. + + :param int _id: int for node, defaults to None and will be generated + :param core.emulator.emudata.NodeOptions node_options: options for emane node, model will always be "mdr" + :return: new emane node + :rtype: core.netns.nodes.CoreNode + """ + node_options.model = "mdr" + return self.add_node(_type=NodeTypes.DEFAULT, _id=_id, node_options=node_options) + + def create_emane_network(self, model, geo_reference, geo_scale=None, node_options=NodeOptions(), config=None): + """ + Convenience method for creating an emane network. + + :param model: emane model to use for emane network + :param geo_reference: geo reference point to use for emane node locations + :param geo_scale: geo scale to use for emane node locations, defaults to 1.0 + :param core.emulator.emudata.NodeOptions node_options: options for emane node being created + :param dict config: emane model configuration + :return: create emane network + """ + # required to be set for emane to function properly + self.location.setrefgeo(*geo_reference) + if geo_scale: + self.location.refscale = geo_scale + + # create and return network + emane_network = self.add_node(_type=NodeTypes.EMANE, node_options=node_options) + self.emane.set_model(emane_network, model, config) + return emane_network + + +class CoreEmu(object): """ Provides logic for creating and configuring CORE sessions and the nodes within them. """ - def __init__(self, config: dict[str, str] = None) -> None: + def __init__(self, config=None): """ Create a CoreEmu object. - :param config: configuration options + :param dict config: configuration options """ # set umask 0 os.umask(0) # configuration - config = config if config else {} - self.config: dict[str, str] = config + if not config: + config = {} + self.config = config # session management - self.sessions: dict[int, Session] = {} + self.session_id_gen = IdGen(_id=59999) + self.sessions = {} + + # set default nodes + node_map = nodemaps.NODES + nodeutils.set_node_map(node_map) # load services - self.service_errors: list[str] = [] - self.service_manager: ConfigServiceManager = ConfigServiceManager() - self._load_services() + self.service_errors = [] + self.load_services() - # check and load emane - self.has_emane: bool = False - self._load_emane() + # catch exit event + atexit.register(self.shutdown) - # check executables exist on path - self._validate_env() - - def _validate_env(self) -> None: - """ - Validates executables CORE depends on exist on path. - - :return: nothing - :raises core.errors.CoreError: when an executable does not exist on path - """ - use_ovs = self.config.get("ovs") == "1" - for requirement in get_requirements(use_ovs): - utils.which(requirement, required=True) - - def _load_services(self) -> None: - """ - Loads default and custom services for use within CORE. - - :return: nothing - """ + def load_services(self): # load default services - self.service_errors = ServiceManager.load_locals() + self.service_errors = core.services.load() + # load custom services service_paths = self.config.get("custom_services_dir") - logger.debug("custom service paths: %s", service_paths) - if service_paths is not None: - for service_path in service_paths.split(","): - service_path = Path(service_path.strip()) + logging.debug("custom service paths: %s", service_paths) + if service_paths: + for service_path in service_paths.split(','): + service_path = service_path.strip() custom_service_errors = ServiceManager.add_services(service_path) self.service_errors.extend(custom_service_errors) - # load default config services - self.service_manager.load_locals() - # load custom config services - custom_dir = self.config.get("custom_config_services_dir") - if custom_dir is not None: - custom_dir = Path(custom_dir) - self.service_manager.load(custom_dir) - def _load_emane(self) -> None: + def update_nodes(self, node_map): """ - Check if emane is installed and load models. + Updates node map used by core. + :param dict node_map: node map to update existing node map with :return: nothing """ - # check for emane - path = utils.which("emane", required=False) - self.has_emane = path is not None - if not self.has_emane: - logger.info("emane is not installed, emane functionality disabled") - return - # get version - emane_version = utils.cmd("emane --version") - logger.info("using emane: %s", emane_version) - emane_prefix = self.config.get("emane_prefix", DEFAULT_EMANE_PREFIX) - emane_prefix = Path(emane_prefix) - EmaneModelManager.load_locals(emane_prefix) - # load custom models - custom_path = self.config.get("emane_models_dir") - if custom_path is not None: - logger.info("loading custom emane models: %s", custom_path) - custom_path = Path(custom_path) - EmaneModelManager.load(custom_path, emane_prefix) + nodeutils.update_node_map(node_map) - def shutdown(self) -> None: + def shutdown(self): """ Shutdown all CORE session. :return: nothing """ - logger.info("shutting down all sessions") - while self.sessions: - _, session = self.sessions.popitem() + logging.info("shutting down all sessions") + sessions = self.sessions.copy() + self.sessions.clear() + for session in sessions.itervalues(): session.shutdown() - def create_session(self, _id: int = None, _cls: type[Session] = Session) -> Session: + def create_session(self, _id=None, master=True, _cls=EmuSession): """ - Create a new CORE session. + Create a new CORE session, set to master if running standalone. - :param _id: session id for new session - :param _cls: Session class to use + :param int _id: session id for new session + :param bool master: sets session to master + :param class _cls: EmuSession class to use :return: created session + :rtype: EmuSession """ - if not _id: - _id = 1 - while _id in self.sessions: - _id += 1 - session = _cls(_id, config=self.config) - session.service_manager = self.service_manager - logger.info("created session: %s", _id) - self.sessions[_id] = session + + session_id = _id + if not session_id: + while True: + session_id = self.session_id_gen.next() + if session_id not in self.sessions: + break + + session = _cls(session_id, config=self.config) + logging.info("created session: %s", session_id) + if master: + session.master = True + + self.sessions[session_id] = session return session - def delete_session(self, _id: int) -> bool: + def delete_session(self, _id): """ Shutdown and delete a CORE session. - :param _id: session id to delete + :param int _id: session id to delete :return: True if deleted, False otherwise + :rtype: bool """ - logger.info("deleting session: %s", _id) + logging.info("deleting session: %s", _id) session = self.sessions.pop(_id, None) result = False if session: - logger.info("shutting session down: %s", _id) - session.data_collect() + logging.info("shutting session down: %s", _id) session.shutdown() result = True else: - logger.error("session to delete did not exist: %s", _id) + logging.error("session to delete did not exist: %s", _id) + return result diff --git a/daemon/core/emulator/data.py b/daemon/core/emulator/data.py deleted file mode 100644 index 7d3dc8dc..00000000 --- a/daemon/core/emulator/data.py +++ /dev/null @@ -1,357 +0,0 @@ -""" -CORE data objects. -""" -from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Optional - -import netaddr - -from core import utils -from core.emulator.enumerations import ( - EventTypes, - ExceptionLevels, - LinkTypes, - MessageFlags, -) - -if TYPE_CHECKING: - from core.nodes.base import CoreNode, NodeBase - - -@dataclass -class ConfigData: - message_type: int = None - node: int = None - object: str = None - type: int = None - data_types: tuple[int] = None - data_values: str = None - captions: str = None - bitmap: str = None - possible_values: str = None - groups: str = None - session: int = None - iface_id: int = None - network_id: int = None - opaque: str = None - - -@dataclass -class EventData: - node: int = None - event_type: EventTypes = None - name: str = None - data: str = None - time: str = None - session: int = None - - -@dataclass -class ExceptionData: - node: int = None - session: int = None - level: ExceptionLevels = None - source: str = None - date: str = None - text: str = None - opaque: str = None - - -@dataclass -class FileData: - message_type: MessageFlags = None - node: int = None - name: str = None - mode: str = None - number: int = None - type: str = None - source: str = None - session: int = None - data: str = None - compressed_data: str = None - - -@dataclass -class NodeOptions: - """ - Options for creating and updating nodes within core. - """ - - name: str = None - model: Optional[str] = "PC" - canvas: int = None - icon: str = None - services: list[str] = field(default_factory=list) - config_services: list[str] = field(default_factory=list) - x: float = None - y: float = None - lat: float = None - lon: float = None - alt: float = None - server: str = None - image: str = None - emane: str = None - legacy: bool = False - # src, dst - binds: list[tuple[str, str]] = field(default_factory=list) - # src, dst, unique, delete - volumes: list[tuple[str, str, bool, bool]] = field(default_factory=list) - - def set_position(self, x: float, y: float) -> None: - """ - Convenience method for setting position. - - :param x: x position - :param y: y position - :return: nothing - """ - self.x = x - self.y = y - - def set_location(self, lat: float, lon: float, alt: float) -> None: - """ - Convenience method for setting location. - - :param lat: latitude - :param lon: longitude - :param alt: altitude - :return: nothing - """ - self.lat = lat - self.lon = lon - self.alt = alt - - -@dataclass -class NodeData: - """ - Node to broadcast. - """ - - node: "NodeBase" - message_type: MessageFlags = None - source: str = None - - -@dataclass -class InterfaceData: - """ - Convenience class for storing interface data. - """ - - id: int = None - name: str = None - mac: str = None - ip4: str = None - ip4_mask: int = None - ip6: str = None - ip6_mask: int = None - mtu: int = None - - def get_ips(self) -> list[str]: - """ - Returns a list of ip4 and ip6 addresses when present. - - :return: list of ip addresses - """ - ips = [] - if self.ip4 and self.ip4_mask: - ips.append(f"{self.ip4}/{self.ip4_mask}") - if self.ip6 and self.ip6_mask: - ips.append(f"{self.ip6}/{self.ip6_mask}") - return ips - - -@dataclass -class LinkOptions: - """ - Options for creating and updating links within core. - """ - - delay: int = None - bandwidth: int = None - loss: float = None - dup: int = None - jitter: int = None - mer: int = None - burst: int = None - mburst: int = None - unidirectional: int = None - key: int = None - buffer: int = None - - def update(self, options: "LinkOptions") -> bool: - """ - Updates current options with values from other options. - - :param options: options to update with - :return: True if any value has changed, False otherwise - """ - changed = False - if options.delay is not None and 0 <= options.delay != self.delay: - self.delay = options.delay - changed = True - if options.bandwidth is not None and 0 <= options.bandwidth != self.bandwidth: - self.bandwidth = options.bandwidth - changed = True - if options.loss is not None and 0 <= options.loss != self.loss: - self.loss = options.loss - changed = True - if options.dup is not None and 0 <= options.dup != self.dup: - self.dup = options.dup - changed = True - if options.jitter is not None and 0 <= options.jitter != self.jitter: - self.jitter = options.jitter - changed = True - if options.buffer is not None and 0 <= options.buffer != self.buffer: - self.buffer = options.buffer - changed = True - return changed - - def is_clear(self) -> bool: - """ - Checks if the current option values represent a clear state. - - :return: True if the current values should clear, False otherwise - """ - clear = self.delay is None or self.delay <= 0 - clear &= self.jitter is None or self.jitter <= 0 - clear &= self.loss is None or self.loss <= 0 - clear &= self.dup is None or self.dup <= 0 - clear &= self.bandwidth is None or self.bandwidth <= 0 - clear &= self.buffer is None or self.buffer <= 0 - return clear - - def __eq__(self, other: Any) -> bool: - """ - Custom logic to check if this link options is equivalent to another. - - :param other: other object to check - :return: True if they are both link options with the same values, - False otherwise - """ - if not isinstance(other, LinkOptions): - return False - return ( - self.delay == other.delay - and self.jitter == other.jitter - and self.loss == other.loss - and self.dup == other.dup - and self.bandwidth == other.bandwidth - and self.buffer == other.buffer - ) - - -@dataclass -class LinkData: - """ - Represents all data associated with a link. - """ - - message_type: MessageFlags = None - type: LinkTypes = LinkTypes.WIRED - label: str = None - node1_id: int = None - node2_id: int = None - network_id: int = None - iface1: InterfaceData = None - iface2: InterfaceData = None - options: LinkOptions = LinkOptions() - color: str = None - source: str = None - - -class IpPrefixes: - """ - Convenience class to help generate IP4 and IP6 addresses for nodes within CORE. - """ - - def __init__(self, ip4_prefix: str = None, ip6_prefix: str = None) -> None: - """ - Creates an IpPrefixes object. - - :param ip4_prefix: ip4 prefix to use for generation - :param ip6_prefix: ip6 prefix to use for generation - :raises ValueError: when both ip4 and ip6 prefixes have not been provided - """ - if not ip4_prefix and not ip6_prefix: - raise ValueError("ip4 or ip6 must be provided") - - self.ip4 = None - if ip4_prefix: - self.ip4 = netaddr.IPNetwork(ip4_prefix) - self.ip6 = None - if ip6_prefix: - self.ip6 = netaddr.IPNetwork(ip6_prefix) - - def ip4_address(self, node_id: int) -> str: - """ - Convenience method to return the IP4 address for a node. - - :param node_id: node id to get IP4 address for - :return: IP4 address or None - """ - if not self.ip4: - raise ValueError("ip4 prefixes have not been set") - return str(self.ip4[node_id]) - - def ip6_address(self, node_id: int) -> str: - """ - Convenience method to return the IP6 address for a node. - - :param node_id: node id to get IP6 address for - :return: IP4 address or None - """ - if not self.ip6: - raise ValueError("ip6 prefixes have not been set") - return str(self.ip6[node_id]) - - def gen_iface(self, node_id: int, name: str = None, mac: str = None): - """ - Creates interface data for linking nodes, using the nodes unique id for - generation, along with a random mac address, unless provided. - - :param node_id: node id to create an interface for - :param name: name to set for interface, default is eth{id} - :param mac: mac address to use for this interface, default is random - generation - :return: new interface data for the provided node - """ - # generate ip4 data - ip4 = None - ip4_mask = None - if self.ip4: - ip4 = self.ip4_address(node_id) - ip4_mask = self.ip4.prefixlen - - # generate ip6 data - ip6 = None - ip6_mask = None - if self.ip6: - ip6 = self.ip6_address(node_id) - ip6_mask = self.ip6.prefixlen - - # random mac - if not mac: - mac = utils.random_mac() - - return InterfaceData( - name=name, ip4=ip4, ip4_mask=ip4_mask, ip6=ip6, ip6_mask=ip6_mask, mac=mac - ) - - def create_iface( - self, node: "CoreNode", name: str = None, mac: str = None - ) -> InterfaceData: - """ - Creates interface data for linking nodes, using the nodes unique id for - generation, along with a random mac address, unless provided. - - :param node: node to create interface for - :param name: name to set for interface, default is eth{id} - :param mac: mac address to use for this interface, default is random - generation - :return: new interface data for the provided node - """ - iface_data = self.gen_iface(node.id, name, mac) - iface_data.id = node.next_iface_id() - return iface_data diff --git a/daemon/core/emulator/distributed.py b/daemon/core/emulator/distributed.py deleted file mode 100644 index 1c0d3c92..00000000 --- a/daemon/core/emulator/distributed.py +++ /dev/null @@ -1,266 +0,0 @@ -""" -Defines distributed server functionality. -""" - -import logging -import os -import threading -from collections import OrderedDict -from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING, Callable - -import netaddr -from fabric import Connection -from invoke import UnexpectedExit - -from core import utils -from core.emulator.links import CoreLink -from core.errors import CoreCommandError, CoreError -from core.executables import get_requirements -from core.nodes.interface import GreTap -from core.nodes.network import CoreNetwork, CtrlNet - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - -LOCK = threading.Lock() -CMD_HIDE = True - - -class DistributedServer: - """ - Provides distributed server interactions. - """ - - def __init__(self, name: str, host: str) -> None: - """ - Create a DistributedServer instance. - - :param name: convenience name to associate with host - :param host: host to connect to - """ - self.name: str = name - self.host: str = host - self.conn: Connection = Connection(host, user="root") - self.lock: threading.Lock = threading.Lock() - - def remote_cmd( - self, cmd: str, env: dict[str, str] = None, cwd: str = None, wait: bool = True - ) -> str: - """ - Run command remotely using server connection. - - :param cmd: command to run - :param env: environment for remote command, default is None - :param cwd: directory to run command in, defaults to None, which is the - user's home directory - :param wait: True to wait for status, False to background process - :return: stdout when success - :raises CoreCommandError: when a non-zero exit status occurs - """ - - replace_env = env is not None - if not wait: - cmd += " &" - logger.debug( - "remote cmd server(%s) cwd(%s) wait(%s): %s", self.host, cwd, wait, cmd - ) - try: - if cwd is None: - result = self.conn.run( - cmd, hide=CMD_HIDE, env=env, replace_env=replace_env - ) - else: - with self.conn.cd(cwd): - result = self.conn.run( - cmd, hide=CMD_HIDE, env=env, replace_env=replace_env - ) - return result.stdout.strip() - except UnexpectedExit as e: - stdout, stderr = e.streams_for_display() - raise CoreCommandError(e.result.exited, cmd, stdout, stderr) - - def remote_put(self, src_path: Path, dst_path: Path) -> None: - """ - Push file to remote server. - - :param src_path: source file to push - :param dst_path: destination file location - :return: nothing - """ - with self.lock: - self.conn.put(str(src_path), str(dst_path)) - - def remote_put_temp(self, dst_path: Path, data: str) -> None: - """ - Remote push file contents to a remote server, using a temp file as an - intermediate step. - - :param dst_path: file destination for data - :param data: data to store in remote file - :return: nothing - """ - with self.lock: - temp = NamedTemporaryFile(delete=False) - temp.write(data.encode()) - temp.close() - self.conn.put(temp.name, str(dst_path)) - os.unlink(temp.name) - - -class DistributedController: - """ - Provides logic for dealing with remote tunnels and distributed servers. - """ - - def __init__(self, session: "Session") -> None: - """ - Create - - :param session: session - """ - self.session: "Session" = session - self.servers: dict[str, DistributedServer] = OrderedDict() - self.tunnels: dict[int, tuple[GreTap, GreTap]] = {} - self.address: str = self.session.options.get("distributed_address") - - def add_server(self, name: str, host: str) -> None: - """ - Add distributed server configuration. - - :param name: distributed server name - :param host: distributed server host address - :return: nothing - :raises CoreError: when there is an error validating server - """ - server = DistributedServer(name, host) - for requirement in get_requirements(self.session.use_ovs()): - try: - server.remote_cmd(f"which {requirement}") - except CoreCommandError: - raise CoreError( - f"server({server.name}) failed validation for " - f"command({requirement})" - ) - self.servers[name] = server - cmd = f"mkdir -p {self.session.directory}" - server.remote_cmd(cmd) - - def execute(self, func: Callable[[DistributedServer], None]) -> None: - """ - Convenience for executing logic against all distributed servers. - - :param func: function to run, that takes a DistributedServer as a parameter - :return: nothing - """ - for name in self.servers: - server = self.servers[name] - func(server) - - def shutdown(self) -> None: - """ - Shutdown logic for dealing with distributed tunnels and server session - directories. - - :return: nothing - """ - # shutdown all tunnels - for key in self.tunnels: - tunnels = self.tunnels[key] - for tunnel in tunnels: - tunnel.shutdown() - # remove all remote session directories - for name in self.servers: - server = self.servers[name] - cmd = f"rm -rf {self.session.directory}" - server.remote_cmd(cmd) - # clear tunnels - self.tunnels.clear() - - def start(self) -> None: - """ - Start distributed network tunnels for control networks. - - :return: nothing - """ - mtu = self.session.options.get_int("mtu") - for node in self.session.nodes.values(): - if not isinstance(node, CtrlNet) or node.serverintf is not None: - continue - for name in self.servers: - server = self.servers[name] - self.create_gre_tunnel(node, server, mtu, True) - - def create_gre_tunnels(self, core_link: CoreLink) -> None: - """ - Creates gre tunnels for a core link with a ptp network connection. - - :param core_link: core link to create gre tunnel for - :return: nothing - """ - if not self.servers: - return - if not core_link.ptp: - raise CoreError( - "attempted to create gre tunnel for core link without a ptp network" - ) - mtu = self.session.options.get_int("mtu") - for server in self.servers.values(): - self.create_gre_tunnel(core_link.ptp, server, mtu, True) - - def create_gre_tunnel( - self, node: CoreNetwork, server: DistributedServer, mtu: int, start: bool - ) -> tuple[GreTap, GreTap]: - """ - Create gre tunnel using a pair of gre taps between the local and remote server. - - :param node: node to create gre tunnel for - :param server: server to create tunnel for - :param mtu: mtu for gre taps - :param start: True to start gre taps, False otherwise - :return: local and remote gre taps created for tunnel - """ - host = server.host - key = self.tunnel_key(node.id, netaddr.IPAddress(host).value) - tunnel = self.tunnels.get(key) - if tunnel is not None: - return tunnel - # local to server - logger.info("local tunnel node(%s) to remote(%s) key(%s)", node.name, host, key) - local_tap = GreTap(self.session, host, key=key, mtu=mtu) - if start: - local_tap.startup() - local_tap.net_client.set_iface_master(node.brname, local_tap.localname) - # server to local - logger.info( - "remote tunnel node(%s) to local(%s) key(%s)", node.name, self.address, key - ) - remote_tap = GreTap(self.session, self.address, key=key, server=server, mtu=mtu) - if start: - remote_tap.startup() - remote_tap.net_client.set_iface_master(node.brname, remote_tap.localname) - # save tunnels for shutdown - tunnel = (local_tap, remote_tap) - self.tunnels[key] = tunnel - return tunnel - - def tunnel_key(self, node1_id: int, node2_id: int) -> int: - """ - Compute a 32-bit key used to uniquely identify a GRE tunnel. - The hash(n1num), hash(n2num) values are used, so node numbers may be - None or string values (used for e.g. "ctrlnet"). - - :param node1_id: node one id - :param node2_id: node two id - :return: tunnel key for the node pair - """ - logger.debug("creating tunnel key for: %s, %s", node1_id, node2_id) - key = ( - (self.session.id << 16) - ^ utils.hashkey(node1_id) - ^ (utils.hashkey(node2_id) << 8) - ) - return key & 0xFFFFFFFF diff --git a/daemon/core/emulator/emudata.py b/daemon/core/emulator/emudata.py new file mode 100644 index 00000000..2d70d367 --- /dev/null +++ b/daemon/core/emulator/emudata.py @@ -0,0 +1,223 @@ +from core.enumerations import LinkTypes +from core.misc.ipaddress import Ipv4Prefix +from core.misc.ipaddress import Ipv6Prefix +from core.misc.ipaddress import MacAddress + + +class NodeOptions(object): + """ + Options for creating and updating nodes within core. + """ + + def __init__(self, name=None, model="router"): + """ + Create a NodeOptions object. + + :param str name: name of node, defaults to node class name postfix with its id + :param str model: defines services for default and physical nodes, defaults to "router" + """ + self.name = name + self.model = model + self.canvas = None + self.icon = None + self.opaque = None + self.services = [] + self.x = None + self.y = None + self.lat = None + self.lon = None + self.alt = None + self.emulation_id = None + self.emulation_server = None + + def set_position(self, x, y): + """ + Convenience method for setting position. + + :param float x: x position + :param float y: y position + :return: nothing + """ + self.x = x + self.y = y + + def set_location(self, lat, lon, alt): + """ + Convenience method for setting location. + + :param float lat: latitude + :param float lon: longitude + :param float alt: altitude + :return: nothing + """ + self.lat = lat + self.lon = lon + self.alt = alt + + +class LinkOptions(object): + """ + Options for creating and updating links within core. + """ + + def __init__(self, _type=LinkTypes.WIRED): + """ + Create a LinkOptions object. + + :param core.enumerations.LinkTypes _type: type of link, defaults to wired + """ + self.type = _type + self.session = None + self.delay = None + self.bandwidth = None + self.per = None + self.dup = None + self.jitter = None + self.mer = None + self.burst = None + self.mburst = None + self.gui_attributes = None + self.unidirectional = None + self.emulation_id = None + self.network_id = None + self.key = None + self.opaque = None + + +class IpPrefixes(object): + """ + Convenience class to help generate IP4 and IP6 addresses for nodes within CORE. + """ + + def __init__(self, ip4_prefix=None, ip6_prefix=None): + """ + Creates an IpPrefixes object. + + :param str ip4_prefix: ip4 prefix to use for generation + :param str ip6_prefix: ip6 prefix to use for generation + :raises ValueError: when both ip4 and ip6 prefixes have not been provided + """ + if not ip4_prefix and not ip6_prefix: + raise ValueError("ip4 or ip6 must be provided") + + self.ip4 = None + if ip4_prefix: + self.ip4 = Ipv4Prefix(ip4_prefix) + self.ip6 = None + if ip6_prefix: + self.ip6 = Ipv6Prefix(ip6_prefix) + + def ip4_address(self, node): + """ + Convenience method to return the IP4 address for a node. + + :param node: node to get IP4 address for + :return: IP4 address or None + :rtype: str + """ + if not self.ip4: + raise ValueError("ip4 prefixes have not been set") + return str(self.ip4.addr(node.objid)) + + def ip6_address(self, node): + """ + Convenience method to return the IP6 address for a node. + + :param node: node to get IP6 address for + :return: IP4 address or None + :rtype: str + """ + if not self.ip6: + raise ValueError("ip6 prefixes have not been set") + return str(self.ip6.addr(node.objid)) + + def create_interface(self, node, name=None, mac=None): + """ + Creates interface data for linking nodes, using the nodes unique id for generation, along with a random + mac address, unless provided. + + :param core.coreobj.PyCoreNode node: node to create interface for + :param str name: name to set for interface, default is eth{id} + :param str mac: mac address to use for this interface, default is random generation + :return: new interface data for the provided node + :rtype: InterfaceData + """ + # interface id + inteface_id = node.newifindex() + + # generate ip4 data + ip4 = None + ip4_mask = None + if self.ip4: + ip4 = str(self.ip4.addr(node.objid)) + ip4_mask = self.ip4.prefixlen + + # generate ip6 data + ip6 = None + ip6_mask = None + if self.ip6: + ip6 = str(self.ip6.addr(node.objid)) + ip6_mask = self.ip6.prefixlen + + # random mac + if not mac: + mac = MacAddress.random() + + return InterfaceData( + _id=inteface_id, + name=name, + ip4=ip4, + ip4_mask=ip4_mask, + ip6=ip6, + ip6_mask=ip6_mask, + mac=mac + ) + + +class InterfaceData(object): + """ + Convenience class for storing interface data. + """ + + def __init__(self, _id, name, mac, ip4, ip4_mask, ip6, ip6_mask): + """ + Creates an InterfaceData object. + + :param int _id: interface id + :param str name: name for interface + :param core.misc.ipaddress.MacAddress mac: mac address + :param str ip4: ipv4 address + :param int ip4_mask: ipv4 bit mask + :param str ip6: ipv6 address + :param int ip6_mask: ipv6 bit mask + """ + self.id = _id + self.name = name + self.mac = mac + self.ip4 = ip4 + self.ip4_mask = ip4_mask + self.ip6 = ip6 + self.ip6_mask = ip6_mask + + def has_ip4(self): + return all([self.ip4, self.ip4_mask]) + + def has_ip6(self): + return all([self.ip6, self.ip6_mask]) + + def ip4_address(self): + if self.has_ip4(): + return "%s/%s" % (self.ip4, self.ip4_mask) + else: + return None + + def ip6_address(self): + if self.has_ip6(): + return "%s/%s" % (self.ip6, self.ip6_mask) + else: + return None + + def get_addresses(self): + ip4 = self.ip4_address() + ip6 = self.ip6_address() + return [i for i in [ip4, ip6] if i] diff --git a/daemon/core/emulator/enumerations.py b/daemon/core/emulator/enumerations.py deleted file mode 100644 index 96fb919b..00000000 --- a/daemon/core/emulator/enumerations.py +++ /dev/null @@ -1,145 +0,0 @@ -""" -Common enumerations used within CORE. -""" - -from enum import Enum - - -class MessageFlags(Enum): - """ - CORE message flags. - """ - - NONE = 0x00 - ADD = 0x01 - DELETE = 0x02 - CRI = 0x04 - LOCAL = 0x08 - STRING = 0x10 - TEXT = 0x20 - TTY = 0x40 - - -class ConfigFlags(Enum): - """ - Configuration flags. - """ - - NONE = 0x00 - REQUEST = 0x01 - UPDATE = 0x02 - RESET = 0x03 - - -class NodeTypes(Enum): - """ - Node types. - """ - - DEFAULT = 0 - PHYSICAL = 1 - SWITCH = 4 - HUB = 5 - WIRELESS_LAN = 6 - RJ45 = 7 - TUNNEL = 8 - EMANE = 10 - TAP_BRIDGE = 11 - PEER_TO_PEER = 12 - CONTROL_NET = 13 - DOCKER = 15 - LXC = 16 - WIRELESS = 17 - PODMAN = 18 - - -class LinkTypes(Enum): - """ - Link types. - """ - - WIRELESS = 0 - WIRED = 1 - - -class RegisterTlvs(Enum): - """ - Register type, length, value enumerations. - """ - - WIRELESS = 0x01 - MOBILITY = 0x02 - UTILITY = 0x03 - EXECUTE_SERVER = 0x04 - GUI = 0x05 - EMULATION_SERVER = 0x06 - SESSION = 0x0A - - -class ConfigDataTypes(Enum): - """ - Configuration data types. - """ - - UINT8 = 0x01 - UINT16 = 0x02 - UINT32 = 0x03 - UINT64 = 0x04 - INT8 = 0x05 - INT16 = 0x06 - INT32 = 0x07 - INT64 = 0x08 - FLOAT = 0x09 - STRING = 0x0A - BOOL = 0x0B - - -class EventTypes(Enum): - """ - Event types. - """ - - NONE = 0 - DEFINITION_STATE = 1 - CONFIGURATION_STATE = 2 - INSTANTIATION_STATE = 3 - RUNTIME_STATE = 4 - DATACOLLECT_STATE = 5 - SHUTDOWN_STATE = 6 - START = 7 - STOP = 8 - PAUSE = 9 - RESTART = 10 - FILE_OPEN = 11 - FILE_SAVE = 12 - SCHEDULED = 13 - RECONFIGURE = 14 - INSTANTIATION_COMPLETE = 15 - - def should_start(self) -> bool: - return self.value > self.DEFINITION_STATE.value - - def already_collected(self) -> bool: - return self.value >= self.DATACOLLECT_STATE.value - - -class ExceptionLevels(Enum): - """ - Exception levels. - """ - - NONE = 0 - FATAL = 1 - ERROR = 2 - WARNING = 3 - NOTICE = 4 - - -class NetworkPolicy(Enum): - ACCEPT = "ACCEPT" - DROP = "DROP" - - -class TransportType(Enum): - RAW = "raw" - VIRTUAL = "virtual" diff --git a/daemon/core/emulator/hooks.py b/daemon/core/emulator/hooks.py deleted file mode 100644 index ffeeafeb..00000000 --- a/daemon/core/emulator/hooks.py +++ /dev/null @@ -1,145 +0,0 @@ -import logging -import subprocess -from collections.abc import Callable -from pathlib import Path - -from core.emulator.enumerations import EventTypes -from core.errors import CoreError - -logger = logging.getLogger(__name__) - - -class HookManager: - """ - Provides functionality for managing and running script/callback hooks. - """ - - def __init__(self) -> None: - """ - Create a HookManager instance. - """ - self.script_hooks: dict[EventTypes, dict[str, str]] = {} - self.callback_hooks: dict[EventTypes, list[Callable[[], None]]] = {} - - def reset(self) -> None: - """ - Clear all current hooks. - - :return: nothing - """ - self.script_hooks.clear() - self.callback_hooks.clear() - - def add_script_hook(self, state: EventTypes, file_name: str, data: str) -> None: - """ - Add a hook script to run for a given state. - - :param state: state to run hook on - :param file_name: hook file name - :param data: file data - :return: nothing - """ - logger.info("setting state hook: %s - %s", state, file_name) - state_hooks = self.script_hooks.setdefault(state, {}) - if file_name in state_hooks: - raise CoreError( - f"adding duplicate state({state.name}) hook script({file_name})" - ) - state_hooks[file_name] = data - - def delete_script_hook(self, state: EventTypes, file_name: str) -> None: - """ - Delete a script hook from a given state. - - :param state: state to delete script hook from - :param file_name: name of script to delete - :return: nothing - """ - state_hooks = self.script_hooks.get(state, {}) - if file_name not in state_hooks: - raise CoreError( - f"deleting state({state.name}) hook script({file_name}) " - "that does not exist" - ) - del state_hooks[file_name] - - def add_callback_hook( - self, state: EventTypes, hook: Callable[[EventTypes], None] - ) -> None: - """ - Add a hook callback to run for a state. - - :param state: state to add hook for - :param hook: callback to run - :return: nothing - """ - hooks = self.callback_hooks.setdefault(state, []) - if hook in hooks: - name = getattr(callable, "__name__", repr(hook)) - raise CoreError( - f"adding duplicate state({state.name}) hook callback({name})" - ) - hooks.append(hook) - - def delete_callback_hook( - self, state: EventTypes, hook: Callable[[EventTypes], None] - ) -> None: - """ - Delete a state hook. - - :param state: state to delete hook for - :param hook: hook to delete - :return: nothing - """ - hooks = self.callback_hooks.get(state, []) - if hook not in hooks: - name = getattr(callable, "__name__", repr(hook)) - raise CoreError( - f"deleting state({state.name}) hook callback({name}) " - "that does not exist" - ) - hooks.remove(hook) - - def run_hooks( - self, state: EventTypes, directory: Path, env: dict[str, str] - ) -> None: - """ - Run all hooks for the current state. - - :param state: state to run hooks for - :param directory: directory to run script hooks within - :param env: environment to run script hooks with - :return: nothing - """ - for state_hooks in self.script_hooks.get(state, {}): - for file_name, data in state_hooks.items(): - logger.info("running hook %s", file_name) - file_path = directory / file_name - log_path = directory / f"{file_name}.log" - try: - with file_path.open("w") as f: - f.write(data) - with log_path.open("w") as f: - args = ["/bin/sh", file_name] - subprocess.check_call( - args, - stdout=f, - stderr=subprocess.STDOUT, - close_fds=True, - cwd=directory, - env=env, - ) - except (OSError, subprocess.CalledProcessError) as e: - raise CoreError( - f"failure running state({state.name}) " - f"hook script({file_name}): {e}" - ) - for hook in self.callback_hooks.get(state, []): - try: - hook() - except Exception as e: - name = getattr(callable, "__name__", repr(hook)) - raise CoreError( - f"failure running state({state.name}) " - f"hook callback({name}): {e}" - ) diff --git a/daemon/core/emulator/links.py b/daemon/core/emulator/links.py deleted file mode 100644 index 5df29d90..00000000 --- a/daemon/core/emulator/links.py +++ /dev/null @@ -1,257 +0,0 @@ -""" -Provides functionality for maintaining information about known links -for a session. -""" - -import logging -from collections.abc import ValuesView -from dataclasses import dataclass -from typing import Optional - -from core.emulator.data import LinkData, LinkOptions -from core.emulator.enumerations import LinkTypes, MessageFlags -from core.errors import CoreError -from core.nodes.base import NodeBase -from core.nodes.interface import CoreInterface -from core.nodes.network import PtpNet - -logger = logging.getLogger(__name__) -LinkKeyType = tuple[int, Optional[int], int, Optional[int]] - - -def create_key( - node1: NodeBase, - iface1: Optional[CoreInterface], - node2: NodeBase, - iface2: Optional[CoreInterface], -) -> LinkKeyType: - """ - Creates a unique key for tracking links. - - :param node1: first node in link - :param iface1: node1 interface - :param node2: second node in link - :param iface2: node2 interface - :return: link key - """ - iface1_id = iface1.id if iface1 else None - iface2_id = iface2.id if iface2 else None - if node1.id < node2.id: - return node1.id, iface1_id, node2.id, iface2_id - else: - return node2.id, iface2_id, node1.id, iface1_id - - -@dataclass -class CoreLink: - """ - Provides a core link data structure. - """ - - node1: NodeBase - iface1: Optional[CoreInterface] - node2: NodeBase - iface2: Optional[CoreInterface] - ptp: PtpNet = None - label: str = None - color: str = None - - def key(self) -> LinkKeyType: - """ - Retrieve the key for this link. - - :return: link key - """ - return create_key(self.node1, self.iface1, self.node2, self.iface2) - - def is_unidirectional(self) -> bool: - """ - Checks if this link is considered unidirectional, due to current - iface configurations. - - :return: True if unidirectional, False otherwise - """ - unidirectional = False - if self.iface1 and self.iface2: - unidirectional = self.iface1.options != self.iface2.options - return unidirectional - - def options(self) -> LinkOptions: - """ - Retrieve the options for this link. - - :return: options for this link - """ - if self.is_unidirectional(): - options = self.iface1.options - else: - if self.iface1: - options = self.iface1.options - else: - options = self.iface2.options - return options - - def get_data(self, message_type: MessageFlags, source: str = None) -> LinkData: - """ - Create link data for this link. - - :param message_type: link data message type - :param source: source for this data - :return: link data - """ - iface1_data = self.iface1.get_data() if self.iface1 else None - iface2_data = self.iface2.get_data() if self.iface2 else None - return LinkData( - message_type=message_type, - type=LinkTypes.WIRED, - node1_id=self.node1.id, - node2_id=self.node2.id, - iface1=iface1_data, - iface2=iface2_data, - options=self.options(), - label=self.label, - color=self.color, - source=source, - ) - - def get_data_unidirectional(self, source: str = None) -> LinkData: - """ - Create other unidirectional link data. - - :param source: source for this data - :return: unidirectional link data - """ - iface1_data = self.iface1.get_data() if self.iface1 else None - iface2_data = self.iface2.get_data() if self.iface2 else None - return LinkData( - message_type=MessageFlags.NONE, - type=LinkTypes.WIRED, - node1_id=self.node2.id, - node2_id=self.node1.id, - iface1=iface2_data, - iface2=iface1_data, - options=self.iface2.options, - label=self.label, - color=self.color, - source=source, - ) - - -class LinkManager: - """ - Provides core link management. - """ - - def __init__(self) -> None: - """ - Create a LinkManager instance. - """ - self._links: dict[LinkKeyType, CoreLink] = {} - self._node_links: dict[int, dict[LinkKeyType, CoreLink]] = {} - - def add(self, core_link: CoreLink) -> None: - """ - Add a core link to be tracked. - - :param core_link: link to track - :return: nothing - """ - node1, iface1 = core_link.node1, core_link.iface1 - node2, iface2 = core_link.node2, core_link.iface2 - if core_link.key() in self._links: - raise CoreError( - f"node1({node1.name}) iface1({iface1.id}) " - f"node2({node2.name}) iface2({iface2.id}) link already exists" - ) - logger.info( - "adding link from node(%s:%s) to node(%s:%s)", - node1.name, - iface1.name if iface1 else None, - node2.name, - iface2.name if iface2 else None, - ) - self._links[core_link.key()] = core_link - node1_links = self._node_links.setdefault(node1.id, {}) - node1_links[core_link.key()] = core_link - node2_links = self._node_links.setdefault(node2.id, {}) - node2_links[core_link.key()] = core_link - - def delete( - self, - node1: NodeBase, - iface1: Optional[CoreInterface], - node2: NodeBase, - iface2: Optional[CoreInterface], - ) -> CoreLink: - """ - Remove a link from being tracked. - - :param node1: first node in link - :param iface1: node1 interface - :param node2: second node in link - :param iface2: node2 interface - :return: removed core link - """ - key = create_key(node1, iface1, node2, iface2) - if key not in self._links: - raise CoreError( - f"node1({node1.name}) iface1({iface1.id}) " - f"node2({node2.name}) iface2({iface2.id}) is not linked" - ) - logger.info( - "deleting link from node(%s:%s) to node(%s:%s)", - node1.name, - iface1.name if iface1 else None, - node2.name, - iface2.name if iface2 else None, - ) - node1_links = self._node_links[node1.id] - node1_links.pop(key) - node2_links = self._node_links[node2.id] - node2_links.pop(key) - return self._links.pop(key) - - def reset(self) -> None: - """ - Resets and clears all tracking information. - - :return: nothing - """ - self._links.clear() - self._node_links.clear() - - def get_link( - self, - node1: NodeBase, - iface1: Optional[CoreInterface], - node2: NodeBase, - iface2: Optional[CoreInterface], - ) -> Optional[CoreLink]: - """ - Retrieve a link for provided values. - - :param node1: first node in link - :param iface1: interface for node1 - :param node2: second node in link - :param iface2: interface for node2 - :return: core link if present, None otherwise - """ - key = create_key(node1, iface1, node2, iface2) - return self._links.get(key) - - def links(self) -> ValuesView[CoreLink]: - """ - Retrieve all known links - - :return: iterator for all known links - """ - return self._links.values() - - def node_links(self, node: NodeBase) -> ValuesView[CoreLink]: - """ - Retrieve all links for a given node. - - :param node: node to get links for - :return: node links - """ - return self._node_links.get(node.id, {}).values() diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py deleted file mode 100644 index 5a6557ee..00000000 --- a/daemon/core/emulator/session.py +++ /dev/null @@ -1,1453 +0,0 @@ -""" -session.py: defines the Session class used by the core-daemon daemon program -that manages a CORE session. -""" - -import logging -import math -import os -import pwd -import shutil -import subprocess -import sys -import tempfile -import threading -import time -from pathlib import Path -from typing import Callable, Optional, TypeVar, Union - -from core import constants, utils -from core.configservice.manager import ConfigServiceManager -from core.emane.emanemanager import EmaneManager, EmaneState -from core.emane.nodes import EmaneNet -from core.emulator.data import ( - ConfigData, - EventData, - ExceptionData, - FileData, - InterfaceData, - LinkData, - LinkOptions, - NodeData, -) -from core.emulator.distributed import DistributedController -from core.emulator.enumerations import ( - EventTypes, - ExceptionLevels, - MessageFlags, - NodeTypes, -) -from core.emulator.links import CoreLink, LinkManager -from core.emulator.sessionconfig import SessionConfig -from core.errors import CoreError -from core.location.event import EventLoop -from core.location.geo import GeoLocation -from core.location.mobility import BasicRangeModel, MobilityManager -from core.nodes.base import CoreNode, CoreNodeBase, NodeBase, NodeOptions, Position -from core.nodes.docker import DockerNode -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.lxd import LxcNode -from core.nodes.network import ( - CtrlNet, - GreTapBridge, - HubNode, - PtpNet, - SwitchNode, - TunnelNode, - WlanNode, -) -from core.nodes.physical import PhysicalNode, Rj45Node -from core.nodes.podman import PodmanNode -from core.nodes.wireless import WirelessNode -from core.plugins.sdt import Sdt -from core.services.coreservices import CoreServices -from core.xml import corexml, corexmldeployment -from core.xml.corexml import CoreXmlReader, CoreXmlWriter - -logger = logging.getLogger(__name__) - -# maps for converting from API call node type values to classes and vice versa -NODES: dict[NodeTypes, type[NodeBase]] = { - NodeTypes.DEFAULT: CoreNode, - NodeTypes.PHYSICAL: PhysicalNode, - NodeTypes.SWITCH: SwitchNode, - NodeTypes.HUB: HubNode, - NodeTypes.WIRELESS_LAN: WlanNode, - NodeTypes.RJ45: Rj45Node, - NodeTypes.TUNNEL: TunnelNode, - NodeTypes.EMANE: EmaneNet, - NodeTypes.TAP_BRIDGE: GreTapBridge, - NodeTypes.PEER_TO_PEER: PtpNet, - NodeTypes.CONTROL_NET: CtrlNet, - NodeTypes.DOCKER: DockerNode, - NodeTypes.LXC: LxcNode, - NodeTypes.WIRELESS: WirelessNode, - NodeTypes.PODMAN: PodmanNode, -} -NODES_TYPE: dict[type[NodeBase], NodeTypes] = {NODES[x]: x for x in NODES} -CTRL_NET_ID: int = 9001 -LINK_COLORS: list[str] = ["green", "blue", "orange", "purple", "turquoise"] -NT: TypeVar = TypeVar("NT", bound=NodeBase) -WIRELESS_TYPE: tuple[type[WlanNode], type[EmaneNet], type[WirelessNode]] = ( - WlanNode, - EmaneNet, - WirelessNode, -) - - -class Session: - """ - CORE session manager. - """ - - def __init__( - self, _id: int, config: dict[str, str] = None, mkdir: bool = True - ) -> None: - """ - Create a Session instance. - - :param _id: session id - :param config: session configuration - :param mkdir: flag to determine if a directory should be made - """ - self.id: int = _id - - # define and create session directory when desired - self.directory: Path = Path(tempfile.gettempdir()) / f"pycore.{self.id}" - if mkdir: - self.directory.mkdir() - - self.name: Optional[str] = None - self.file_path: Optional[Path] = None - self.thumbnail: Optional[Path] = None - self.user: Optional[str] = None - self.event_loop: EventLoop = EventLoop() - self.link_colors: dict[int, str] = {} - - # dict of nodes: all nodes and nets - self.nodes: dict[int, NodeBase] = {} - self.nodes_lock: threading.Lock = threading.Lock() - self.link_manager: LinkManager = LinkManager() - - # states and hooks handlers - self.state: EventTypes = EventTypes.DEFINITION_STATE - self.state_time: float = time.monotonic() - self.hooks: dict[EventTypes, list[tuple[str, str]]] = {} - self.state_hooks: dict[EventTypes, list[Callable[[EventTypes], None]]] = {} - self.add_state_hook( - state=EventTypes.RUNTIME_STATE, hook=self.runtime_state_hook - ) - - # handlers for broadcasting information - self.event_handlers: list[Callable[[EventData], None]] = [] - self.exception_handlers: list[Callable[[ExceptionData], None]] = [] - self.node_handlers: list[Callable[[NodeData], None]] = [] - self.link_handlers: list[Callable[[LinkData], None]] = [] - self.file_handlers: list[Callable[[FileData], None]] = [] - self.config_handlers: list[Callable[[ConfigData], None]] = [] - - # session options/metadata - self.options: SessionConfig = SessionConfig(config) - self.metadata: dict[str, str] = {} - - # distributed support and logic - self.distributed: DistributedController = DistributedController(self) - - # initialize session feature helpers - self.location: GeoLocation = GeoLocation() - self.mobility: MobilityManager = MobilityManager(self) - self.services: CoreServices = CoreServices(self) - self.emane: EmaneManager = EmaneManager(self) - self.sdt: Sdt = Sdt(self) - - # config services - self.service_manager: Optional[ConfigServiceManager] = None - - @classmethod - def get_node_class(cls, _type: NodeTypes) -> type[NodeBase]: - """ - Retrieve the class for a given node type. - - :param _type: node type to get class for - :return: node class - """ - node_class = NODES.get(_type) - if node_class is None: - raise CoreError(f"invalid node type: {_type}") - return node_class - - @classmethod - def get_node_type(cls, _class: type[NodeBase]) -> NodeTypes: - """ - Retrieve node type for a given node class. - - :param _class: node class to get a node type for - :return: node type - :raises CoreError: when node type does not exist - """ - node_type = NODES_TYPE.get(_class) - if node_type is None: - raise CoreError(f"invalid node class: {_class}") - return node_type - - def use_ovs(self) -> bool: - return self.options.get_int("ovs") == 1 - - def linked( - self, node1_id: int, node2_id: int, iface1_id: int, iface2_id: int, linked: bool - ) -> None: - """ - Links or unlinks wired core link interfaces from being connected to the same - bridge. - - :param node1_id: first node in link - :param node2_id: second node in link - :param iface1_id: node1 interface - :param iface2_id: node2 interface - :param linked: True if interfaces should be connected, False for disconnected - :return: nothing - """ - node1 = self.get_node(node1_id, NodeBase) - node2 = self.get_node(node2_id, NodeBase) - logger.info( - "link node(%s):interface(%s) node(%s):interface(%s) linked(%s)", - node1.name, - iface1_id, - node2.name, - iface2_id, - linked, - ) - iface1 = node1.get_iface(iface1_id) - iface2 = node2.get_iface(iface2_id) - core_link = self.link_manager.get_link(node1, iface1, node2, iface2) - if not core_link: - raise CoreError( - f"there is no link for node({node1.name}):interface({iface1_id}) " - f"node({node2.name}):interface({iface2_id})" - ) - if linked: - core_link.ptp.attach(iface1) - core_link.ptp.attach(iface2) - else: - core_link.ptp.detach(iface1) - core_link.ptp.detach(iface2) - - def add_link( - self, - node1_id: int, - node2_id: int, - iface1_data: InterfaceData = None, - iface2_data: InterfaceData = None, - options: LinkOptions = None, - ) -> tuple[Optional[CoreInterface], Optional[CoreInterface]]: - """ - Add a link between nodes. - - :param node1_id: node one id - :param node2_id: node two id - :param iface1_data: node one interface - data, defaults to none - :param iface2_data: node two interface - data, defaults to none - :param options: data for creating link, - defaults to no options - :return: tuple of created core interfaces, depending on link - """ - options = options if options else LinkOptions() - # set mtu - mtu = self.options.get_int("mtu") or DEFAULT_MTU - if iface1_data: - iface1_data.mtu = mtu - if iface2_data: - iface2_data.mtu = mtu - node1 = self.get_node(node1_id, NodeBase) - node2 = self.get_node(node2_id, NodeBase) - # check for invalid linking - if ( - isinstance(node1, WIRELESS_TYPE) - and isinstance(node2, WIRELESS_TYPE) - or isinstance(node1, WIRELESS_TYPE) - and not isinstance(node2, CoreNodeBase) - or not isinstance(node1, CoreNodeBase) - and isinstance(node2, WIRELESS_TYPE) - ): - raise CoreError(f"cannot link node({type(node1)}) node({type(node2)})") - # custom links - iface1 = None - iface2 = None - if isinstance(node1, (WlanNode, WirelessNode)): - iface2 = self._add_wlan_link(node2, iface2_data, node1) - elif isinstance(node2, (WlanNode, WirelessNode)): - iface1 = self._add_wlan_link(node1, iface1_data, node2) - elif isinstance(node1, EmaneNet) and isinstance(node2, CoreNode): - iface2 = self._add_emane_link(node2, iface2_data, node1) - elif isinstance(node2, EmaneNet) and isinstance(node1, CoreNode): - iface1 = self._add_emane_link(node1, iface1_data, node2) - else: - iface1, iface2 = self._add_wired_link( - node1, node2, iface1_data, iface2_data, options - ) - # configure tunnel nodes - key = options.key - if isinstance(node1, TunnelNode): - logger.info("setting tunnel key for: %s", node1.name) - node1.setkey(key, iface1_data) - if isinstance(node2, TunnelNode): - logger.info("setting tunnel key for: %s", node2.name) - node2.setkey(key, iface2_data) - self.sdt.add_link(node1_id, node2_id) - return iface1, iface2 - - def _add_wlan_link( - self, - node: NodeBase, - iface_data: InterfaceData, - net: Union[WlanNode, WirelessNode], - ) -> CoreInterface: - """ - Create a wlan link. - - :param node: node to link to wlan network - :param iface_data: data to create interface with - :param net: wlan network to link to - :return: interface created for node - """ - # create interface - iface = node.create_iface(iface_data) - # attach to wlan - net.attach(iface) - # track link - core_link = CoreLink(node, iface, net, None) - self.link_manager.add(core_link) - return iface - - def _add_emane_link( - self, node: CoreNode, iface_data: InterfaceData, net: EmaneNet - ) -> CoreInterface: - """ - Create am emane link. - - :param node: node to link to emane network - :param iface_data: data to create interface with - :param net: emane network to link to - :return: interface created for node - """ - # create iface tuntap - iface = net.create_tuntap(node, iface_data) - # track link - core_link = CoreLink(node, iface, net, None) - self.link_manager.add(core_link) - return iface - - def _add_wired_link( - self, - node1: NodeBase, - node2: NodeBase, - iface1_data: InterfaceData = None, - iface2_data: InterfaceData = None, - options: LinkOptions = None, - ) -> tuple[CoreInterface, CoreInterface]: - """ - Create a wired link between two nodes. - - :param node1: first node to be linked - :param node2: second node to be linked - :param iface1_data: data to create interface for node1 - :param iface2_data: data to create interface for node2 - :param options: options to configure interfaces with - :return: interfaces created for both nodes - """ - # create interfaces - iface1 = node1.create_iface(iface1_data, options) - iface2 = node2.create_iface(iface2_data, options) - # join and attach to ptp bridge - ptp = self.create_node(PtpNet, self.state.should_start()) - ptp.attach(iface1) - ptp.attach(iface2) - # track link - core_link = CoreLink(node1, iface1, node2, iface2, ptp) - self.link_manager.add(core_link) - # setup link for gre tunnels if needed - if ptp.up: - self.distributed.create_gre_tunnels(core_link) - return iface1, iface2 - - def delete_link( - self, node1_id: int, node2_id: int, iface1_id: int = None, iface2_id: int = None - ) -> None: - """ - Delete a link between nodes. - - :param node1_id: node one id - :param node2_id: node two id - :param iface1_id: interface id for node one - :param iface2_id: interface id for node two - :return: nothing - :raises core.CoreError: when no common network is found for link being deleted - """ - node1 = self.get_node(node1_id, NodeBase) - node2 = self.get_node(node2_id, NodeBase) - logger.info( - "deleting link node(%s):interface(%s) node(%s):interface(%s)", - node1.name, - iface1_id, - node2.name, - iface2_id, - ) - iface1 = None - iface2 = None - if isinstance(node1, (WlanNode, WirelessNode)): - iface2 = node2.delete_iface(iface2_id) - node1.detach(iface2) - elif isinstance(node2, (WlanNode, WirelessNode)): - iface1 = node1.delete_iface(iface1_id) - node2.detach(iface1) - elif isinstance(node1, EmaneNet): - iface2 = node2.delete_iface(iface2_id) - node1.detach(iface2) - elif isinstance(node2, EmaneNet): - iface1 = node1.delete_iface(iface1_id) - node2.detach(iface1) - else: - iface1 = node1.delete_iface(iface1_id) - iface2 = node2.delete_iface(iface2_id) - core_link = self.link_manager.delete(node1, iface1, node2, iface2) - if core_link.ptp: - self.delete_node(core_link.ptp.id) - self.sdt.delete_link(node1_id, node2_id) - - def update_link( - self, - node1_id: int, - node2_id: int, - iface1_id: int = None, - iface2_id: int = None, - options: LinkOptions = None, - ) -> None: - """ - Update link information between nodes. - - :param node1_id: node one id - :param node2_id: node two id - :param iface1_id: interface id for node one - :param iface2_id: interface id for node two - :param options: data to update link with - :return: nothing - :raises core.CoreError: when updating a wireless type link, when there is a - unknown link between networks - """ - if not options: - options = LinkOptions() - node1 = self.get_node(node1_id, NodeBase) - node2 = self.get_node(node2_id, NodeBase) - logger.info( - "update link node(%s):interface(%s) node(%s):interface(%s)", - node1.name, - iface1_id, - node2.name, - iface2_id, - ) - iface1 = node1.get_iface(iface1_id) if iface1_id is not None else None - iface2 = node2.get_iface(iface2_id) if iface2_id is not None else None - core_link = self.link_manager.get_link(node1, iface1, node2, iface2) - if not core_link: - raise CoreError( - f"there is no link for node({node1.name}):interface({iface1_id}) " - f"node({node2.name}):interface({iface2_id})" - ) - if iface1: - iface1.options.update(options) - iface1.set_config() - if iface2 and not options.unidirectional: - iface2.options.update(options) - iface2.set_config() - - def next_node_id(self) -> int: - """ - Find the next valid node id, starting from 1. - - :return: next node id - """ - _id = 1 - while True: - if _id not in self.nodes: - break - _id += 1 - return _id - - def add_node( - self, - _class: type[NT], - _id: int = None, - name: str = None, - server: str = None, - position: Position = None, - options: NodeOptions = None, - ) -> NT: - """ - Add a node to the session, based on the provided node data. - - :param _class: node class to create - :param _id: id for node, defaults to None for generated id - :param name: name to assign to node - :param server: distributed server for node, if desired - :param position: geo or x/y/z position to set - :param options: options to create node with - :return: created node - :raises core.CoreError: when an invalid node type is given - """ - # set node start based on current session state, override and check when rj45 - start = self.state.should_start() - enable_rj45 = self.options.get_int("enablerj45") == 1 - if _class == Rj45Node and not enable_rj45: - start = False - # generate options if not provided - options = options if options else _class.create_options() - # verify distributed server - dist_server = None - if server is not None: - dist_server = self.distributed.servers.get(server) - if not dist_server: - raise CoreError(f"invalid distributed server: {server}") - # create node - node = self.create_node(_class, start, _id, name, dist_server, options) - # set node position - position = position or Position() - if position.has_geo(): - self.set_node_geo(node, position.lon, position.lat, position.alt) - else: - self.set_node_pos(node, position.x, position.y) - # setup default wlan - if isinstance(node, WlanNode): - self.mobility.set_model_config(node.id, BasicRangeModel.name) - # boot core nodes after runtime - if self.is_running() and isinstance(node, CoreNode): - self.add_remove_control_iface(node, remove=False) - self.boot_node(node) - self.sdt.add_node(node) - return node - - def set_node_pos(self, node: NodeBase, x: float, y: float) -> None: - node.setposition(x, y, None) - self.sdt.edit_node( - node, node.position.lon, node.position.lat, node.position.alt - ) - - def set_node_geo(self, node: NodeBase, lon: float, lat: float, alt: float) -> None: - x, y, _ = self.location.getxyz(lat, lon, alt) - if math.isinf(x) or math.isinf(y): - raise CoreError( - f"invalid geo for current reference/scale: {lon},{lat},{alt}" - ) - node.setposition(x, y, None) - node.position.set_geo(lon, lat, alt) - self.sdt.edit_node(node, lon, lat, alt) - - def open_xml(self, file_path: Path, start: bool = False) -> None: - """ - Import a session from the EmulationScript XML format. - - :param file_path: xml file to load session from - :param start: instantiate session if true, false otherwise - :return: nothing - """ - logger.info("opening xml: %s", file_path) - # clear out existing session - self.clear() - # set state and read xml - state = EventTypes.CONFIGURATION_STATE if start else EventTypes.DEFINITION_STATE - self.set_state(state) - self.name = file_path.name - self.file_path = file_path - CoreXmlReader(self).read(file_path) - # start session if needed - if start: - self.set_state(EventTypes.INSTANTIATION_STATE) - self.instantiate() - - def save_xml(self, file_path: Path) -> None: - """ - Export a session to the EmulationScript XML format. - - :param file_path: file name to write session xml to - :return: nothing - """ - CoreXmlWriter(self).write(file_path) - - def add_hook( - self, state: EventTypes, file_name: str, data: str, src_name: str = None - ) -> None: - """ - Store a hook from a received file message. - - :param state: when to run hook - :param file_name: file name for hook - :param data: hook data - :param src_name: source name - :return: nothing - """ - logger.info( - "setting state hook: %s - %s source(%s)", state, file_name, src_name - ) - hook = file_name, data - state_hooks = self.hooks.setdefault(state, []) - state_hooks.append(hook) - - # immediately run a hook if it is in the current state - if self.state == state: - logger.info("immediately running new state hook") - self.run_hook(hook) - - def clear(self) -> None: - """ - Clear all CORE session data. (nodes, hooks, etc) - - :return: nothing - """ - self.emane.shutdown() - self.delete_nodes() - self.link_manager.reset() - self.distributed.shutdown() - self.hooks.clear() - self.emane.reset() - self.emane.config_reset() - self.location.reset() - self.services.reset() - self.mobility.config_reset() - self.link_colors.clear() - - def set_location(self, lat: float, lon: float, alt: float, scale: float) -> None: - """ - Set session geospatial location. - - :param lat: latitude - :param lon: longitude - :param alt: altitude - :param scale: reference scale - :return: nothing - """ - self.location.setrefgeo(lat, lon, alt) - self.location.refscale = scale - - def shutdown(self) -> None: - """ - Shutdown all session nodes and remove the session directory. - """ - if self.state == EventTypes.SHUTDOWN_STATE: - logger.info("session(%s) state(%s) already shutdown", self.id, self.state) - else: - logger.info("session(%s) state(%s) shutting down", self.id, self.state) - self.set_state(EventTypes.SHUTDOWN_STATE, send_event=True) - # clear out current core session - self.clear() - # shutdown sdt - self.sdt.shutdown() - # remove this sessions working directory - preserve = self.options.get_int("preservedir") == 1 - if not preserve: - shutil.rmtree(self.directory, ignore_errors=True) - - def broadcast_event(self, event_data: EventData) -> None: - """ - Handle event data that should be provided to event handler. - - :param event_data: event data to send out - :return: nothing - """ - for handler in self.event_handlers: - handler(event_data) - - def broadcast_exception(self, exception_data: ExceptionData) -> None: - """ - Handle exception data that should be provided to exception handlers. - - :param exception_data: exception data to send out - :return: nothing - """ - for handler in self.exception_handlers: - handler(exception_data) - - def broadcast_node( - self, - node: NodeBase, - message_type: MessageFlags = MessageFlags.NONE, - source: str = None, - ) -> None: - """ - Handle node data that should be provided to node handlers. - - :param node: node to broadcast - :param message_type: type of message to broadcast, None by default - :param source: source of broadcast, None by default - :return: nothing - """ - node_data = NodeData(node=node, message_type=message_type, source=source) - for handler in self.node_handlers: - handler(node_data) - - def broadcast_file(self, file_data: FileData) -> None: - """ - Handle file data that should be provided to file handlers. - - :param file_data: file data to send out - :return: nothing - """ - for handler in self.file_handlers: - handler(file_data) - - def broadcast_config(self, config_data: ConfigData) -> None: - """ - Handle config data that should be provided to config handlers. - - :param config_data: config data to send out - :return: nothing - """ - for handler in self.config_handlers: - handler(config_data) - - def broadcast_link(self, link_data: LinkData) -> None: - """ - Handle link data that should be provided to link handlers. - - :param link_data: link data to send out - :return: nothing - """ - for handler in self.link_handlers: - handler(link_data) - - def set_state(self, state: EventTypes, send_event: bool = False) -> None: - """ - Set the session's current state. - - :param state: state to set to - :param send_event: if true, generate core API event messages - :return: nothing - """ - if self.state == state: - return - self.state = state - self.state_time = time.monotonic() - logger.info("changing session(%s) to state %s", self.id, state.name) - self.run_hooks(state) - self.run_state_hooks(state) - if send_event: - event_data = EventData(event_type=state, time=str(time.monotonic())) - self.broadcast_event(event_data) - - def run_hooks(self, state: EventTypes) -> None: - """ - Run hook scripts upon changing states. If hooks is not specified, run all hooks - in the given state. - - :param state: state to run hooks for - :return: nothing - """ - hooks = self.hooks.get(state, []) - for hook in hooks: - self.run_hook(hook) - - def run_hook(self, hook: tuple[str, str]) -> None: - """ - Run a hook. - - :param hook: hook to run - :return: nothing - """ - file_name, data = hook - logger.info("running hook %s", file_name) - file_path = self.directory / file_name - log_path = self.directory / f"{file_name}.log" - try: - with file_path.open("w") as f: - f.write(data) - with log_path.open("w") as f: - args = ["/bin/sh", file_name] - subprocess.check_call( - args, - stdout=f, - stderr=subprocess.STDOUT, - close_fds=True, - cwd=self.directory, - env=self.get_environment(), - ) - except (OSError, subprocess.CalledProcessError): - logger.exception("error running hook: %s", file_path) - - def run_state_hooks(self, state: EventTypes) -> None: - """ - Run state hooks. - - :param state: state to run hooks for - :return: nothing - """ - for hook in self.state_hooks.get(state, []): - self.run_state_hook(state, hook) - - def run_state_hook(self, state: EventTypes, hook: Callable[[EventTypes], None]): - try: - hook(state) - except Exception: - message = f"exception occurred when running {state.name} state hook: {hook}" - logger.exception(message) - self.exception(ExceptionLevels.ERROR, "Session.run_state_hooks", message) - - def add_state_hook( - self, state: EventTypes, hook: Callable[[EventTypes], None] - ) -> None: - """ - Add a state hook. - - :param state: state to add hook for - :param hook: hook callback for the state - :return: nothing - """ - hooks = self.state_hooks.setdefault(state, []) - if hook in hooks: - raise CoreError("attempting to add duplicate state hook") - hooks.append(hook) - if self.state == state: - self.run_state_hook(state, hook) - - def del_state_hook( - self, state: EventTypes, hook: Callable[[EventTypes], None] - ) -> None: - """ - Delete a state hook. - - :param state: state to delete hook for - :param hook: hook to delete - :return: nothing - """ - hooks = self.state_hooks.get(state, []) - if hook in hooks: - hooks.remove(hook) - - def runtime_state_hook(self, _state: EventTypes) -> None: - """ - Runtime state hook check. - - :param _state: state to check - :return: nothing - """ - self.emane.poststartup() - # create session deployed xml - xml_writer = corexml.CoreXmlWriter(self) - corexmldeployment.CoreXmlDeployment(self, xml_writer.scenario) - xml_file_path = self.directory / "session-deployed.xml" - xml_writer.write(xml_file_path) - - def get_environment(self, state: bool = True) -> dict[str, str]: - """ - Get an environment suitable for a subprocess.Popen call. - This is the current process environment with some session-specific - variables. - - :param state: flag to determine if session state should be included - :return: environment variables - """ - env = os.environ.copy() - env["CORE_PYTHON"] = sys.executable - env["SESSION"] = str(self.id) - env["SESSION_SHORT"] = self.short_session_id() - env["SESSION_DIR"] = str(self.directory) - env["SESSION_NAME"] = str(self.name) - env["SESSION_FILENAME"] = str(self.file_path) - env["SESSION_USER"] = str(self.user) - if state: - env["SESSION_STATE"] = str(self.state) - # try reading and merging optional environments from: - # /etc/core/environment - # /home/user/.coregui/environment - # /tmp/pycore./environment - core_env_path = constants.CORE_CONF_DIR / "environment" - session_env_path = self.directory / "environment" - if self.user: - user_home_path = Path(f"~{self.user}").expanduser() - user_env = user_home_path / ".coregui" / "environment" - paths = [core_env_path, user_env, session_env_path] - else: - paths = [core_env_path, session_env_path] - for path in paths: - if path.is_file(): - try: - utils.load_config(path, env) - except OSError: - logger.exception("error reading environment file: %s", path) - return env - - def set_user(self, user: str) -> None: - """ - Set the username for this session. Update the permissions of the - session dir to allow the user write access. - - :param user: user to give write permissions to for the session directory - :return: nothing - """ - self.user = user - try: - uid = pwd.getpwnam(user).pw_uid - gid = self.directory.stat().st_gid - os.chown(self.directory, uid, gid) - except OSError: - logger.exception("failed to set permission on %s", self.directory) - - def create_node( - self, - _class: type[NT], - start: bool, - _id: int = None, - name: str = None, - server: str = None, - options: NodeOptions = None, - ) -> NT: - """ - Create an emulation node. - - :param _class: node class to create - :param start: True to start node, False otherwise - :param _id: id for node, defaults to None for generated id - :param name: name to assign to node - :param server: distributed server for node, if desired - :param options: options to create node with - :return: the created node instance - :raises core.CoreError: when id of the node to create already exists - """ - with self.nodes_lock: - node = _class(self, _id=_id, name=name, server=server, options=options) - if node.id in self.nodes: - node.shutdown() - raise CoreError(f"duplicate node id {node.id} for {node.name}") - self.nodes[node.id] = node - logger.info( - "created node(%s) id(%s) name(%s) start(%s)", - _class.__name__, - node.id, - node.name, - start, - ) - if start: - node.startup() - return node - - def get_node(self, _id: int, _class: type[NT]) -> NT: - """ - Get a session node. - - :param _id: node id to retrieve - :param _class: expected node class - :return: node for the given id - :raises core.CoreError: when node does not exist - """ - node = self.nodes.get(_id) - if node is None: - raise CoreError(f"unknown node id {_id}") - if not isinstance(node, _class): - actual = node.__class__.__name__ - expected = _class.__name__ - raise CoreError(f"node class({actual}) is not expected({expected})") - return node - - def delete_node(self, _id: int) -> bool: - """ - Delete a node from the session and check if session should shutdown, if no nodes - are left. - - :param _id: id of node to delete - :return: True if node deleted, False otherwise - """ - # delete node and check for session shutdown if a node was removed - node = None - with self.nodes_lock: - if _id in self.nodes: - node = self.nodes.pop(_id) - logger.info("deleted node(%s)", node.name) - if node: - node.shutdown() - self.sdt.delete_node(_id) - return node is not None - - def delete_nodes(self) -> None: - """ - Clear the nodes dictionary, and call shutdown for each node. - """ - nodes_ids = [] - with self.nodes_lock: - funcs = [] - while self.nodes: - _, node = self.nodes.popitem() - nodes_ids.append(node.id) - funcs.append((node.shutdown, [], {})) - utils.threadpool(funcs) - for node_id in nodes_ids: - self.sdt.delete_node(node_id) - - def exception( - self, level: ExceptionLevels, source: str, text: str, node_id: int = None - ) -> None: - """ - Generate and broadcast an exception event. - - :param level: exception level - :param source: source name - :param text: exception message - :param node_id: node related to exception - :return: nothing - """ - exception_data = ExceptionData( - node=node_id, - session=self.id, - level=level, - source=source, - date=time.ctime(), - text=text, - ) - self.broadcast_exception(exception_data) - - def instantiate(self) -> list[Exception]: - """ - We have entered the instantiation state, invoke startup methods - of various managers and boot the nodes. Validate nodes and check - for transition to the runtime state. - - :return: list of service boot errors during startup - """ - if self.is_running(): - logger.warning("ignoring instantiate, already in runtime state") - return [] - # create control net interfaces and network tunnels - # which need to exist for emane to sync on location events - # in distributed scenarios - self.add_remove_control_net(0, remove=False) - # initialize distributed tunnels - self.distributed.start() - # instantiate will be invoked again upon emane configure - if self.emane.startup() == EmaneState.NOT_READY: - return [] - # boot node services and then start mobility - exceptions = self.boot_nodes() - if not exceptions: - # complete wireless node - for node in self.nodes.values(): - if isinstance(node, WirelessNode): - node.post_startup() - self.mobility.startup() - # notify listeners that instantiation is complete - event = EventData(event_type=EventTypes.INSTANTIATION_COMPLETE) - self.broadcast_event(event) - # startup event loop - self.event_loop.run() - self.set_state(EventTypes.RUNTIME_STATE, send_event=True) - return exceptions - - def get_node_count(self) -> int: - """ - Returns the number of CoreNodes and CoreNets, except for those - that are not considered in the GUI's node count. - - :return: created node count - """ - with self.nodes_lock: - count = 0 - for node in self.nodes.values(): - is_p2p_ctrlnet = isinstance(node, (PtpNet, CtrlNet)) - is_tap = isinstance(node, GreTapBridge) and not isinstance( - node, TunnelNode - ) - if is_p2p_ctrlnet or is_tap: - continue - count += 1 - return count - - def data_collect(self) -> None: - """ - Tear down a running session. Stop the event loop and any running - nodes, and perform clean-up. - - :return: nothing - """ - if self.state.already_collected(): - logger.info( - "session(%s) state(%s) already data collected", self.id, self.state - ) - return - logger.info("session(%s) state(%s) data collection", self.id, self.state) - self.set_state(EventTypes.DATACOLLECT_STATE, send_event=True) - - # stop event loop - self.event_loop.stop() - - # stop mobility and node services - with self.nodes_lock: - funcs = [] - for node in self.nodes.values(): - if isinstance(node, CoreNodeBase) and node.up: - args = (node,) - funcs.append((self.services.stop_services, args, {})) - funcs.append((node.stop_config_services, (), {})) - utils.threadpool(funcs) - - # shutdown emane - self.emane.shutdown() - - # update control interface hosts - self.update_control_iface_hosts(remove=True) - - # remove all four possible control networks - for i in range(4): - self.add_remove_control_net(i, remove=True) - - def short_session_id(self) -> str: - """ - Return a shorter version of the session ID, appropriate for - interface names, where length may be limited. - - :return: short session id - """ - ssid = (self.id >> 8) ^ (self.id & ((1 << 8) - 1)) - return f"{ssid:x}" - - def boot_node(self, node: CoreNode) -> None: - """ - Boot node by adding a control interface when necessary and starting - node services. - - :param node: node to boot - :return: nothing - """ - logger.info( - "booting node(%s): config services(%s) services(%s)", - node.name, - ", ".join(node.config_services.keys()), - ", ".join(x.name for x in node.services), - ) - self.services.boot_services(node) - node.start_config_services() - - def boot_nodes(self) -> list[Exception]: - """ - Invoke the boot() procedure for all nodes and send back node - messages to the GUI for node messages that had the status - request flag. - - :return: service boot exceptions - """ - with self.nodes_lock: - funcs = [] - start = time.monotonic() - for node in self.nodes.values(): - if isinstance(node, CoreNode): - self.add_remove_control_iface(node, remove=False) - funcs.append((self.boot_node, (node,), {})) - results, exceptions = utils.threadpool(funcs) - total = time.monotonic() - start - logger.debug("boot run time: %s", total) - if not exceptions: - self.update_control_iface_hosts() - return exceptions - - def get_control_net_prefixes(self) -> list[str]: - """ - Retrieve control net prefixes. - - :return: control net prefix list - """ - p = self.options.get("controlnet") - p0 = self.options.get("controlnet0") - p1 = self.options.get("controlnet1") - p2 = self.options.get("controlnet2") - p3 = self.options.get("controlnet3") - if not p0 and p: - p0 = p - return [p0, p1, p2, p3] - - def get_control_net_server_ifaces(self) -> list[str]: - """ - Retrieve control net server interfaces. - - :return: list of control net server interfaces - """ - d0 = self.options.get("controlnetif0") - if d0: - logger.error("controlnet0 cannot be assigned with a host interface") - d1 = self.options.get("controlnetif1") - d2 = self.options.get("controlnetif2") - d3 = self.options.get("controlnetif3") - return [None, d1, d2, d3] - - def get_control_net_index(self, dev: str) -> int: - """ - Retrieve control net index. - - :param dev: device to get control net index for - :return: control net index, -1 otherwise - """ - if dev[0:4] == "ctrl" and int(dev[4]) in [0, 1, 2, 3]: - index = int(dev[4]) - if index == 0: - return index - if index < 4 and self.get_control_net_prefixes()[index] is not None: - return index - return -1 - - def get_control_net(self, net_index: int) -> CtrlNet: - """ - Retrieve a control net based on index. - - :param net_index: control net index - :return: control net - :raises CoreError: when control net is not found - """ - return self.get_node(CTRL_NET_ID + net_index, CtrlNet) - - def add_remove_control_net( - self, net_index: int, remove: bool = False, conf_required: bool = True - ) -> Optional[CtrlNet]: - """ - Create a control network bridge as necessary. - When the remove flag is True, remove the bridge that connects control - interfaces. The conf_reqd flag, when False, causes a control network - bridge to be added even if one has not been configured. - - :param net_index: network index - :param remove: flag to check if it should be removed - :param conf_required: flag to check if conf is required - :return: control net node - """ - logger.debug( - "add/remove control net: index(%s) remove(%s) conf_required(%s)", - net_index, - remove, - conf_required, - ) - prefix_spec_list = self.get_control_net_prefixes() - prefix_spec = prefix_spec_list[net_index] - if not prefix_spec: - if conf_required: - # no controlnet needed - return None - else: - prefix_spec = CtrlNet.DEFAULT_PREFIX_LIST[net_index] - logger.debug("prefix spec: %s", prefix_spec) - server_iface = self.get_control_net_server_ifaces()[net_index] - - # return any existing controlnet bridge - try: - control_net = self.get_control_net(net_index) - if remove: - self.delete_node(control_net.id) - return None - return control_net - except CoreError: - if remove: - return None - - # build a new controlnet bridge - _id = CTRL_NET_ID + net_index - - # use the updown script for control net 0 only. - updown_script = None - if net_index == 0: - updown_script = self.options.get("controlnet_updown_script") or None - if not updown_script: - logger.debug("controlnet updown script not configured") - - prefixes = prefix_spec.split() - if len(prefixes) > 1: - # a list of per-host prefixes is provided - try: - # split first (master) entry into server and prefix - prefix = prefixes[0].split(":", 1)[1] - except IndexError: - # no server name. possibly only one server - prefix = prefixes[0] - else: - prefix = prefixes[0] - - logger.info( - "controlnet(%s) prefix(%s) updown(%s) serverintf(%s)", - _id, - prefix, - updown_script, - server_iface, - ) - options = CtrlNet.create_options() - options.prefix = prefix - options.updown_script = updown_script - options.serverintf = server_iface - control_net = self.create_node(CtrlNet, False, _id, options=options) - control_net.brname = f"ctrl{net_index}.{self.short_session_id()}" - control_net.startup() - return control_net - - def add_remove_control_iface( - self, - node: CoreNode, - net_index: int = 0, - remove: bool = False, - conf_required: bool = True, - ) -> None: - """ - Add a control interface to a node when a 'controlnet' prefix is - listed in the config file or session options. Uses - addremovectrlnet() to build or remove the control bridge. - If conf_reqd is False, the control network may be built even - when the user has not configured one (e.g. for EMANE.) - - :param node: node to add or remove control interface - :param net_index: network index - :param remove: flag to check if it should be removed - :param conf_required: flag to check if conf is required - :return: nothing - """ - control_net = self.add_remove_control_net(net_index, remove, conf_required) - if not control_net: - return - if not node: - return - # ctrl# already exists - if node.ifaces.get(control_net.CTRLIF_IDX_BASE + net_index): - return - try: - ip4 = control_net.prefix[node.id] - ip4_mask = control_net.prefix.prefixlen - iface_data = InterfaceData( - id=control_net.CTRLIF_IDX_BASE + net_index, - name=f"ctrl{net_index}", - mac=utils.random_mac(), - ip4=ip4, - ip4_mask=ip4_mask, - mtu=DEFAULT_MTU, - ) - iface = node.create_iface(iface_data) - control_net.attach(iface) - iface.control = True - except ValueError: - msg = f"Control interface not added to node {node.id}. " - msg += f"Invalid control network prefix ({control_net.prefix}). " - msg += "A longer prefix length may be required for this many nodes." - logger.exception(msg) - - def update_control_iface_hosts( - self, net_index: int = 0, remove: bool = False - ) -> None: - """ - Add the IP addresses of control interfaces to the /etc/hosts file. - - :param net_index: network index to update - :param remove: flag to check if it should be removed - :return: nothing - """ - if not self.options.get_bool("update_etc_hosts", False): - return - - try: - control_net = self.get_control_net(net_index) - except CoreError: - logger.exception("error retrieving control net node") - return - - header = f"CORE session {self.id} host entries" - if remove: - logger.info("Removing /etc/hosts file entries.") - utils.file_demunge("/etc/hosts", header) - return - - entries = [] - for iface in control_net.get_ifaces(): - name = iface.node.name - for ip in iface.ips(): - entries.append(f"{ip.ip} {name}") - - logger.info("Adding %d /etc/hosts file entries.", len(entries)) - utils.file_munge("/etc/hosts", header, "\n".join(entries) + "\n") - - def runtime(self) -> float: - """ - Return the current time we have been in the runtime state, or zero - if not in runtime. - """ - if self.is_running(): - return time.monotonic() - self.state_time - else: - return 0.0 - - def add_event( - self, event_time: float, node_id: int = None, name: str = None, data: str = None - ) -> None: - """ - Add an event to the event queue, with a start time relative to the - start of the runtime state. - - :param event_time: event time - :param node_id: node to add event for - :param name: name of event - :param data: data for event - :return: nothing - """ - current_time = self.runtime() - if current_time > 0: - if event_time <= current_time: - logger.warning( - "could not schedule past event for time %s (run time is now %s)", - event_time, - current_time, - ) - return - event_time = event_time - current_time - self.event_loop.add_event( - event_time, self.run_event, node_id=node_id, name=name, data=data - ) - if not name: - name = "" - logger.info( - "scheduled event %s at time %s data=%s", - name, - event_time + current_time, - data, - ) - - def run_event( - self, node_id: int = None, name: str = None, data: str = None - ) -> None: - """ - Run a scheduled event, executing commands in the data string. - - :param node_id: node id to run event - :param name: event name - :param data: event data - :return: nothing - """ - if data is None: - logger.warning("no data for event node(%s) name(%s)", node_id, name) - return - now = self.runtime() - if not name: - name = "" - logger.info("running event %s at time %s cmd=%s", name, now, data) - if not node_id: - utils.mute_detach(data) - else: - node = self.get_node(node_id, CoreNodeBase) - node.cmd(data, wait=False) - - def get_link_color(self, network_id: int) -> str: - """ - Assign a color for links associated with a network. - - :param network_id: network to get a link color for - :return: link color - """ - color = self.link_colors.get(network_id) - if not color: - index = len(self.link_colors) % len(LINK_COLORS) - color = LINK_COLORS[index] - self.link_colors[network_id] = color - return color - - def is_running(self) -> bool: - """ - Convenience for checking if this session is in the runtime state. - - :return: True if in the runtime state, False otherwise - """ - return self.state == EventTypes.RUNTIME_STATE diff --git a/daemon/core/emulator/sessionconfig.py b/daemon/core/emulator/sessionconfig.py deleted file mode 100644 index b6d5bcd3..00000000 --- a/daemon/core/emulator/sessionconfig.py +++ /dev/null @@ -1,114 +0,0 @@ -from typing import Optional - -from core.config import ConfigBool, ConfigInt, ConfigString, Configuration -from core.errors import CoreError -from core.plugins.sdt import Sdt - - -class SessionConfig: - """ - Provides session configuration. - """ - - options: list[Configuration] = [ - ConfigString(id="controlnet", label="Control Network"), - ConfigString(id="controlnet0", label="Control Network 0"), - ConfigString(id="controlnet1", label="Control Network 1"), - ConfigString(id="controlnet2", label="Control Network 2"), - ConfigString(id="controlnet3", label="Control Network 3"), - ConfigString(id="controlnet_updown_script", label="Control Network Script"), - ConfigBool(id="enablerj45", default="1", label="Enable RJ45s"), - ConfigBool(id="preservedir", default="0", label="Preserve session dir"), - ConfigBool(id="enablesdt", default="0", label="Enable SDT3D output"), - ConfigString(id="sdturl", default=Sdt.DEFAULT_SDT_URL, label="SDT3D URL"), - ConfigBool(id="ovs", default="0", label="Enable OVS"), - ConfigInt(id="platform_id_start", default="1", label="EMANE Platform ID Start"), - ConfigInt(id="nem_id_start", default="1", label="EMANE NEM ID Start"), - ConfigBool(id="link_enabled", default="1", label="EMANE Links?"), - ConfigInt( - id="loss_threshold", default="30", label="EMANE Link Loss Threshold (%)" - ), - ConfigInt( - id="link_interval", default="1", label="EMANE Link Check Interval (sec)" - ), - ConfigInt(id="link_timeout", default="4", label="EMANE Link Timeout (sec)"), - ConfigInt(id="mtu", default="0", label="MTU for All Devices"), - ] - - def __init__(self, config: dict[str, str] = None) -> None: - """ - Create a SessionConfig instance. - - :param config: configuration to initialize with - """ - self._config: dict[str, str] = {x.id: x.default for x in self.options} - self._config.update(config or {}) - - def update(self, config: dict[str, str]) -> None: - """ - Update current configuration with provided values. - - :param config: configuration to update with - :return: nothing - """ - self._config.update(config) - - def set(self, name: str, value: str) -> None: - """ - Set a configuration value. - - :param name: name of configuration to set - :param value: value to set - :return: nothing - """ - self._config[name] = value - - def get(self, name: str, default: str = None) -> Optional[str]: - """ - Retrieve configuration value. - - :param name: name of configuration to get - :param default: value to return as default - :return: return found configuration value or default - """ - return self._config.get(name, default) - - def all(self) -> dict[str, str]: - """ - Retrieve all configuration options. - - :return: configuration value dict - """ - return self._config - - def get_bool(self, name: str, default: bool = None) -> bool: - """ - Get configuration value as a boolean. - - :param name: configuration name - :param default: default value if not found - :return: boolean for configuration value - """ - value = self._config.get(name) - if value is None and default is None: - raise CoreError(f"missing session options for {name}") - if value is None: - return default - else: - return value.lower() == "true" - - def get_int(self, name: str, default: int = None) -> int: - """ - Get configuration value as int. - - :param name: configuration name - :param default: default value if not found - :return: int for configuration value - """ - value = self._config.get(name) - if value is None and default is None: - raise CoreError(f"missing session options for {name}") - if value is None: - return default - else: - return int(value) diff --git a/daemon/core/enumerations.py b/daemon/core/enumerations.py new file mode 100644 index 00000000..ac287272 --- /dev/null +++ b/daemon/core/enumerations.py @@ -0,0 +1,317 @@ +""" +Contains all legacy enumerations for interacting with legacy CORE code. +""" + +from enum import Enum + +CORE_API_VERSION = "1.23" +CORE_API_PORT = 4038 + + +class MessageTypes(Enum): + """ + CORE message types. + """ + NODE = 0x01 + LINK = 0x02 + EXECUTE = 0x03 + REGISTER = 0x04 + CONFIG = 0x05 + FILE = 0x06 + INTERFACE = 0x07 + EVENT = 0x08 + SESSION = 0x09 + EXCEPTION = 0x0A + + +class MessageFlags(Enum): + """ + CORE message flags. + """ + ADD = 0x01 + DELETE = 0x02 + CRI = 0x04 + LOCAL = 0x08 + STRING = 0x10 + TEXT = 0x20 + TTY = 0x40 + + +class NodeTlvs(Enum): + """ + Node type, length, value enumerations. + """ + NUMBER = 0x01 + TYPE = 0x02 + NAME = 0x03 + IP_ADDRESS = 0x04 + MAC_ADDRESS = 0x05 + IP6_ADDRESS = 0x06 + MODEL = 0x07 + EMULATION_SERVER = 0x08 + SESSION = 0x0A + X_POSITION = 0x20 + Y_POSITION = 0x21 + CANVAS = 0x22 + EMULATION_ID = 0x23 + NETWORK_ID = 0x24 + SERVICES = 0x25 + LATITUDE = 0x30 + LONGITUDE = 0x31 + ALTITUDE = 0x32 + ICON = 0x42 + OPAQUE = 0x50 + + +class NodeTypes(Enum): + """ + Node types. + """ + DEFAULT = 0 + PHYSICAL = 1 + TBD = 3 + SWITCH = 4 + HUB = 5 + WIRELESS_LAN = 6 + RJ45 = 7 + TUNNEL = 8 + KTUNNEL = 9 + EMANE = 10 + TAP_BRIDGE = 11 + PEER_TO_PEER = 12 + CONTROL_NET = 13 + EMANE_NET = 14 + + +class Rj45Models(Enum): + """ + RJ45 model types. + """ + LINKED = 0 + WIRELESS = 1 + INSTALLED = 2 + + +# Link Message TLV Types +class LinkTlvs(Enum): + """ + Link type, length, value enumerations. + """ + N1_NUMBER = 0x01 + N2_NUMBER = 0x02 + DELAY = 0x03 + BANDWIDTH = 0x04 + PER = 0x05 + DUP = 0x06 + JITTER = 0x07 + MER = 0x08 + BURST = 0x09 + SESSION = 0x0A + MBURST = 0x10 + TYPE = 0x20 + GUI_ATTRIBUTES = 0x21 + UNIDIRECTIONAL = 0x22 + EMULATION_ID = 0x23 + NETWORK_ID = 0x24 + KEY = 0x25 + INTERFACE1_NUMBER = 0x30 + INTERFACE1_IP4 = 0x31 + INTERFACE1_IP4_MASK = 0x32 + INTERFACE1_MAC = 0x33 + INTERFACE1_IP6 = 0x34 + INTERFACE1_IP6_MASK = 0x35 + INTERFACE2_NUMBER = 0x36 + INTERFACE2_IP4 = 0x37 + INTERFACE2_IP4_MASK = 0x38 + INTERFACE2_MAC = 0x39 + INTERFACE2_IP6 = 0x40 + INTERFACE2_IP6_MASK = 0x41 + INTERFACE1_NAME = 0x42 + INTERFACE2_NAME = 0x43 + OPAQUE = 0x50 + + +class LinkTypes(Enum): + """ + Link types. + """ + WIRELESS = 0 + WIRED = 1 + + +class ExecuteTlvs(Enum): + """ + Execute type, length, value enumerations. + """ + NODE = 0x01 + NUMBER = 0x02 + TIME = 0x03 + COMMAND = 0x04 + RESULT = 0x05 + STATUS = 0x06 + SESSION = 0x0A + + +class RegisterTlvs(Enum): + """ + Register type, length, value enumerations. + """ + WIRELESS = 0x01 + MOBILITY = 0x02 + UTILITY = 0x03 + EXECUTE_SERVER = 0x04 + GUI = 0x05 + EMULATION_SERVER = 0x06 + SESSION = 0x0A + + +class ConfigTlvs(Enum): + """ + Configuration type, length, value enumerations. + """ + NODE = 0x01 + OBJECT = 0x02 + TYPE = 0x03 + DATA_TYPES = 0x04 + VALUES = 0x05 + CAPTIONS = 0x06 + BITMAP = 0x07 + POSSIBLE_VALUES = 0x08 + GROUPS = 0x09 + SESSION = 0x0A + INTERFACE_NUMBER = 0x0B + NETWORK_ID = 0x24 + OPAQUE = 0x50 + + +class ConfigFlags(Enum): + """ + Configuration flags. + """ + NONE = 0x00 + REQUEST = 0x01 + UPDATE = 0x02 + RESET = 0x03 + + +class ConfigDataTypes(Enum): + """ + Configuration data types. + """ + UINT8 = 0x01 + UINT16 = 0x02 + UINT32 = 0x03 + UINT64 = 0x04 + INT8 = 0x05 + INT16 = 0x06 + INT32 = 0x07 + INT64 = 0x08 + FLOAT = 0x09 + STRING = 0x0A + BOOL = 0x0B + + +class FileTlvs(Enum): + """ + File type, length, value enumerations. + """ + NODE = 0x01 + NAME = 0x02 + MODE = 0x03 + NUMBER = 0x04 + TYPE = 0x05 + SOURCE_NAME = 0x06 + SESSION = 0x0A + DATA = 0x10 + COMPRESSED_DATA = 0x11 + + +class InterfaceTlvs(Enum): + """ + Interface type, length, value enumerations. + """ + NODE = 0x01 + NUMBER = 0x02 + NAME = 0x03 + IP_ADDRESS = 0x04 + MASK = 0x05 + MAC_ADDRESS = 0x06 + IP6_ADDRESS = 0x07 + IP6_MASK = 0x08 + TYPE = 0x09 + SESSION = 0x0A + STATE = 0x0B + EMULATION_ID = 0x23 + NETWORK_ID = 0x24 + + +class EventTlvs(Enum): + """ + Event type, length, value enumerations. + """ + NODE = 0x01 + TYPE = 0x02 + NAME = 0x03 + DATA = 0x04 + TIME = 0x05 + SESSION = 0x0A + + +class EventTypes(Enum): + """ + Event types. + """ + NONE = 0 + DEFINITION_STATE = 1 + CONFIGURATION_STATE = 2 + INSTANTIATION_STATE = 3 + RUNTIME_STATE = 4 + DATACOLLECT_STATE = 5 + SHUTDOWN_STATE = 6 + START = 7 + STOP = 8 + PAUSE = 9 + RESTART = 10 + FILE_OPEN = 11 + FILE_SAVE = 12 + SCHEDULED = 13 + RECONFIGURE = 14 + INSTANTIATION_COMPLETE = 15 + + +class SessionTlvs(Enum): + """ + Session type, length, value enumerations. + """ + NUMBER = 0x01 + NAME = 0x02 + FILE = 0x03 + NODE_COUNT = 0x04 + DATE = 0x05 + THUMB = 0x06 + USER = 0x07 + OPAQUE = 0x0A + + +class ExceptionTlvs(Enum): + """ + Exception type, length, value enumerations. + """ + NODE = 0x01 + SESSION = 0x02 + LEVEL = 0x03 + SOURCE = 0x04 + DATE = 0x05 + TEXT = 0x06 + OPAQUE = 0x0A + + +class ExceptionLevels(Enum): + """ + Exception levels. + """ + NONE = 0 + FATAL = 1 + ERROR = 2 + WARNING = 3 + NOTICE = 4 diff --git a/daemon/core/errors.py b/daemon/core/errors.py deleted file mode 100644 index 83d252b8..00000000 --- a/daemon/core/errors.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Provides CORE specific errors. -""" -import subprocess - - -class CoreCommandError(subprocess.CalledProcessError): - """ - Used when encountering internal CORE command errors. - """ - - def __str__(self) -> str: - return ( - f"command({self.cmd}), status({self.returncode}):\n" - f"stdout: {self.output}\nstderr: {self.stderr}" - ) - - -class CoreError(Exception): - """ - Used for errors when dealing with CoreEmu and Sessions. - """ - - pass - - -class CoreXmlError(Exception): - """ - Used when there was an error parsing a CORE xml file. - """ - - pass - - -class CoreServiceError(Exception): - """ - Used when there is an error related to accessing a service. - """ - - pass - - -class CoreServiceBootError(Exception): - """ - Used when there is an error booting a service. - """ - - pass - - -class CoreConfigError(Exception): - """ - Used when there is an error defining a configurable option. - """ - - pass diff --git a/daemon/core/executables.py b/daemon/core/executables.py deleted file mode 100644 index f04d88de..00000000 --- a/daemon/core/executables.py +++ /dev/null @@ -1,40 +0,0 @@ -BASH: str = "bash" -ETHTOOL: str = "ethtool" -IP: str = "ip" -MOUNT: str = "mount" -NFTABLES: str = "nft" -OVS_VSCTL: str = "ovs-vsctl" -SYSCTL: str = "sysctl" -TC: str = "tc" -TEST: str = "test" -UMOUNT: str = "umount" -VCMD: str = "vcmd" -VNODED: str = "vnoded" - -COMMON_REQUIREMENTS: list[str] = [ - BASH, - ETHTOOL, - IP, - MOUNT, - NFTABLES, - SYSCTL, - TC, - TEST, - UMOUNT, - VCMD, - VNODED, -] -OVS_REQUIREMENTS: list[str] = [OVS_VSCTL] - - -def get_requirements(use_ovs: bool) -> list[str]: - """ - Retrieve executable requirements needed to run CORE. - - :param use_ovs: True if OVS is being used, False otherwise - :return: list of executable requirements - """ - requirements = COMMON_REQUIREMENTS - if use_ovs: - requirements += OVS_REQUIREMENTS - return requirements diff --git a/daemon/core/api/grpc/__init__.py b/daemon/core/grpc/__init__.py similarity index 100% rename from daemon/core/api/grpc/__init__.py rename to daemon/core/grpc/__init__.py diff --git a/daemon/core/grpc/client.py b/daemon/core/grpc/client.py new file mode 100644 index 00000000..a6863e37 --- /dev/null +++ b/daemon/core/grpc/client.py @@ -0,0 +1,831 @@ +""" +gRpc client for interfacing with CORE, when gRPC mode is enabled. +""" + +from __future__ import print_function + +import logging +import threading +from contextlib import contextmanager + +import grpc + +from core.grpc import core_pb2 +from core.grpc import core_pb2_grpc +from core.misc.ipaddress import Ipv4Prefix, Ipv6Prefix, MacAddress + + +class InterfaceHelper(object): + """ + Convenience class to help generate IP4 and IP6 addresses for gRPC clients. + """ + + def __init__(self, ip4_prefix=None, ip6_prefix=None): + """ + Creates an InterfaceHelper object. + + :param str ip4_prefix: ip4 prefix to use for generation + :param str ip6_prefix: ip6 prefix to use for generation + :raises ValueError: when both ip4 and ip6 prefixes have not been provided + """ + if not ip4_prefix and not ip6_prefix: + raise ValueError("ip4 or ip6 must be provided") + + self.ip4 = None + if ip4_prefix: + self.ip4 = Ipv4Prefix(ip4_prefix) + self.ip6 = None + if ip6_prefix: + self.ip6 = Ipv6Prefix(ip6_prefix) + + def ip4_address(self, node_id): + """ + Convenience method to return the IP4 address for a node. + + :param int node_id: node id to get IP4 address for + :return: IP4 address or None + :rtype: str + """ + if not self.ip4: + raise ValueError("ip4 prefixes have not been set") + return str(self.ip4.addr(node_id)) + + def ip6_address(self, node_id): + """ + Convenience method to return the IP6 address for a node. + + :param int node_id: node id to get IP6 address for + :return: IP4 address or None + :rtype: str + """ + if not self.ip6: + raise ValueError("ip6 prefixes have not been set") + return str(self.ip6.addr(node_id)) + + def create_interface(self, node_id, interface_id, name=None, mac=None): + """ + Creates interface data for linking nodes, using the nodes unique id for generation, along with a random + mac address, unless provided. + + :param int node_id: node id to create interface for + :param int interface_id: interface id for interface + :param str name: name to set for interface, default is eth{id} + :param str mac: mac address to use for this interface, default is random generation + :return: new interface data for the provided node + :rtype: core_pb2.Interface + """ + # generate ip4 data + ip4 = None + ip4_mask = None + if self.ip4: + ip4 = str(self.ip4.addr(node_id)) + ip4_mask = self.ip4.prefixlen + + # generate ip6 data + ip6 = None + ip6_mask = None + if self.ip6: + ip6 = str(self.ip6.addr(node_id)) + ip6_mask = self.ip6.prefixlen + + # random mac + if not mac: + mac = MacAddress.random() + + return core_pb2.Interface( + id=interface_id, + name=name, + ip4=ip4, + ip4mask=ip4_mask, + ip6=ip6, + ip6mask=ip6_mask, + mac=str(mac) + ) + + +def stream_listener(stream, handler): + """ + Listen for stream events and provide them to the handler. + + :param stream: grpc stream that will provide events + :param handler: function that handles an event + :return: nothing + """ + try: + for event in stream: + handler(event) + except grpc.RpcError as e: + if e.code() == grpc.StatusCode.CANCELLED: + logging.debug("stream closed") + else: + logging.exception("stream error") + + +def start_streamer(stream, handler): + """ + Convenience method for starting a grpc stream thread for handling streamed events. + + :param stream: grpc stream that will provide events + :param handler: function that handles an event + :return: nothing + """ + thread = threading.Thread(target=stream_listener, args=(stream, handler)) + thread.daemon = True + thread.start() + + +class CoreGrpcClient(object): + """ + Provides convenience methods for interfacing with the CORE grpc server. + """ + + def __init__(self, address="localhost:50051"): + """ + Creates a CoreGrpcClient instance. + + :param str address: grpc server address to connect to + """ + self.address = address + self.stub = None + self.channel = None + + def create_session(self, _id=None): + """ + Create a session. + + :param int _id: id for session, default is None and one will be created for you + :return: response with created session id + :rtype: core_pb2.CreateSessionResponse + """ + request = core_pb2.CreateSessionRequest(id=_id) + return self.stub.CreateSession(request) + + def delete_session(self, _id): + """ + Delete a session. + + :param int _id: id of session + :return: response with result of deletion success or failure + :rtype: core_pb2.DeleteSessionResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.DeleteSessionRequest(id=_id) + return self.stub.DeleteSession(request) + + def get_sessions(self): + """ + Retrieves all currently known sessions. + + :return: response with a list of currently known session, their state and number of nodes + :rtype: core_pb2.GetSessionsResponse + """ + return self.stub.GetSessions(core_pb2.GetSessionsRequest()) + + def get_session(self, _id): + """ + Retrieve a session. + + :param int _id: id of session + :return: response with sessions state, nodes, and links + :rtype: core_pb2.GetSessionResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetSessionRequest(id=_id) + return self.stub.GetSession(request) + + def get_session_options(self, _id): + """ + Retrieve session options. + + :param int _id: id of session + :return: response with a list of configuration groups + :rtype: core_pb2.GetSessionOptionsResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetSessionOptionsRequest(id=_id) + return self.stub.GetSessionOptions(request) + + def set_session_options(self, _id, config): + """ + Set options for a session. + + :param int _id: id of session + :param dict[str, str] config: configuration values to set + :return: response with result of success or failure + :rtype: core_pb2.SetSessionOptionsResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.SetSessionOptionsRequest(id=_id, config=config) + return self.stub.SetSessionOptions(request) + + def get_session_location(self, _id): + """ + Get session location. + + :param int _id: id of session + :return: response with session position reference and scale + :rtype: core_pb2.GetSessionLocationResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetSessionLocationRequest(id=_id) + return self.stub.GetSessionLocation(request) + + def set_session_location(self, _id, x=None, y=None, z=None, lat=None, lon=None, alt=None, scale=None): + """ + Set session location. + + :param int _id: id of session + :param float x: x position + :param float y: y position + :param float z: z position + :param float lat: latitude position + :param float lon: longitude position + :param float alt: altitude position + :param float scale: geo scale + :return: response with result of success or failure + :rtype: core_pb2.SetSessionLocationResponse + :raises grpc.RpcError: when session doesn't exist + """ + position = core_pb2.Position(x=x, y=y, z=z, lat=lat, lon=lon, alt=alt) + request = core_pb2.SetSessionLocationRequest(id=_id, position=position, scale=scale) + return self.stub.SetSessionLocation(request) + + def set_session_state(self, _id, state): + """ + Set session state. + + :param int _id: id of session + :param core_pb2.SessionState state: session state to transition to + :return: response with result of success or failure + :rtype: core_pb2.SetSessionStateResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.SetSessionStateRequest(id=_id, state=state) + return self.stub.SetSessionState(request) + + def node_events(self, _id, handler): + """ + Listen for session node events. + + :param int _id: id of session + :param handler: handler for every event + :return: nothing + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.NodeEventsRequest(id=_id) + stream = self.stub.NodeEvents(request) + start_streamer(stream, handler) + + def link_events(self, _id, handler): + """ + Listen for session link events. + + :param int _id: id of session + :param handler: handler for every event + :return: nothing + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.LinkEventsRequest(id=_id) + stream = self.stub.LinkEvents(request) + start_streamer(stream, handler) + + def session_events(self, _id, handler): + """ + Listen for session events. + + :param int _id: id of session + :param handler: handler for every event + :return: nothing + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.SessionEventsRequest(id=_id) + stream = self.stub.SessionEvents(request) + start_streamer(stream, handler) + + def config_events(self, _id, handler): + """ + Listen for session config events. + + :param int _id: id of session + :param handler: handler for every event + :return: nothing + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.ConfigEventsRequest(id=_id) + stream = self.stub.ConfigEvents(request) + start_streamer(stream, handler) + + def exception_events(self, _id, handler): + """ + Listen for session exception events. + + :param int _id: id of session + :param handler: handler for every event + :return: nothing + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.ExceptionEventsRequest(id=_id) + stream = self.stub.ExceptionEvents(request) + start_streamer(stream, handler) + + def file_events(self, _id, handler): + """ + Listen for session file events. + + :param int _id: id of session + :param handler: handler for every event + :return: nothing + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.FileEventsRequest(id=_id) + stream = self.stub.FileEvents(request) + start_streamer(stream, handler) + + def add_node(self, session, node): + """ + Add node to session. + + :param int session: session id + :param core_pb2.Node node: node to add + :return: response with node id + :rtype: core_pb2.AddNodeResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.AddNodeRequest(session=session, node=node) + return self.stub.AddNode(request) + + def get_node(self, session, _id): + """ + Get node details. + + :param int session: session id + :param int _id: node id + :return: response with node details + :rtype: core_pb2.GetNodeResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.GetNodeRequest(session=session, id=_id) + return self.stub.GetNode(request) + + def edit_node(self, session, _id, position): + """ + Edit a node, currently only changes position. + + :param int session: session id + :param int _id: node id + :param core_pb2.Position position: position to set node to + :return: response with result of success or failure + :rtype: core_pb2.EditNodeResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.EditNodeRequest(session=session, id=_id, position=position) + return self.stub.EditNode(request) + + def delete_node(self, session, _id): + """ + Delete node from session. + + :param int session: session id + :param int _id: node id + :return: response with result of success or failure + :rtype: core_pb2.DeleteNodeResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.DeleteNodeRequest(session=session, id=_id) + return self.stub.DeleteNode(request) + + def get_node_links(self, session, _id): + """ + Get current links for a node. + + :param int session: session id + :param int _id: node id + :return: response with a list of links + :rtype: core_pb2.GetNodeLinksResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.GetNodeLinksRequest(session=session, id=_id) + return self.stub.GetNodeLinks(request) + + def add_link(self, session, node_one, node_two, interface_one=None, interface_two=None, options=None): + """ + Add a link between nodes. + + :param int session: session id + :param int node_one: node one id + :param int node_two: node two id + :param core_pb2.Interface interface_one: node one interface data + :param core_pb2.Interface interface_two: node two interface data + :param core_pb2.LinkOptions options: options for link (jitter, bandwidth, etc) + :return: response with result of success or failure + :rtype: core_pb2.AddLinkResponse + :raises grpc.RpcError: when session or one of the nodes don't exist + """ + link = core_pb2.Link( + node_one=node_one, node_two=node_two, type=core_pb2.LINK_WIRED, + interface_one=interface_one, interface_two=interface_two, options=options) + request = core_pb2.AddLinkRequest(session=session, link=link) + return self.stub.AddLink(request) + + def edit_link(self, session, node_one, node_two, options, interface_one=None, interface_two=None): + """ + Edit a link between nodes. + + :param int session: session id + :param int node_one: node one id + :param int node_two: node two id + :param core_pb2.LinkOptions options: options for link (jitter, bandwidth, etc) + :param int interface_one: node one interface id + :param int interface_two: node two interface id + :return: response with result of success or failure + :rtype: core_pb2.EditLinkResponse + :raises grpc.RpcError: when session or one of the nodes don't exist + """ + request = core_pb2.EditLinkRequest( + session=session, node_one=node_one, node_two=node_two, options=options, + interface_one=interface_one, interface_two=interface_two) + return self.stub.EditLink(request) + + def delete_link(self, session, node_one, node_two, interface_one=None, interface_two=None): + """ + Delete a link between nodes. + + :param int session: session id + :param int node_one: node one id + :param int node_two: node two id + :param int interface_one: node one interface id + :param int interface_two: node two interface id + :return: response with result of success or failure + :rtype: core_pb2.DeleteLinkResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.DeleteLinkRequest( + session=session, node_one=node_one, node_two=node_two, + interface_one=interface_one, interface_two=interface_two) + return self.stub.DeleteLink(request) + + def get_hooks(self, session): + """ + Get all hook scripts. + + :param int session: session id + :return: response with a list of hooks + :rtype: core_pb2.GetHooksResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetHooksRequest(session=session) + return self.stub.GetHooks(request) + + def add_hook(self, session, state, file_name, file_data): + """ + Add hook scripts. + + :param int session: session id + :param core_pb2.SessionState state: state to trigger hook + :param str file_name: name of file for hook script + :param bytes file_data: hook script contents + :return: response with result of success or failure + :rtype: core_pb2.AddHookResponse + :raises grpc.RpcError: when session doesn't exist + """ + hook = core_pb2.Hook(state=state, file=file_name, data=file_data) + request = core_pb2.AddHookRequest(session=session, hook=hook) + return self.stub.AddHook(request) + + def get_mobility_configs(self, session): + """ + Get all mobility configurations. + + :param int session: session id + :return: response with a dict of node ids to mobility configurations + :rtype: core_pb2.GetMobilityConfigsResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetMobilityConfigsRequest(session=session) + return self.stub.GetMobilityConfigs(request) + + def get_mobility_config(self, session, _id): + """ + Get mobility configuration for a node. + + :param int session: session id + :param int _id: node id + :return: response with a list of configuration groups + :rtype: core_pb2.GetMobilityConfigResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.GetMobilityConfigRequest(session=session, id=_id) + return self.stub.GetMobilityConfig(request) + + def set_mobility_config(self, session, _id, config): + """ + Set mobility configuration for a node. + + :param int session: session id + :param int _id: node id + :param dict[str, str] config: mobility configuration + :return: response with result of success or failure + :rtype: core_pb2.SetMobilityConfigResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.SetMobilityConfigRequest(session=session, id=_id, config=config) + return self.stub.SetMobilityConfig(request) + + def mobility_action(self, session, _id, action): + """ + Send a mobility action for a node. + + :param int session: session id + :param int _id: node id + :param core_pb2.ServiceAction action: action to take + :return: response with result of success or failure + :rtype: core_pb2.MobilityActionResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.MobilityActionRequest(session=session, id=_id, action=action) + return self.stub.MobilityAction(request) + + def get_services(self): + """ + Get all currently loaded services. + + :return: response with a list of services + :rtype: core_pb2.GetServicesResponse + """ + request = core_pb2.GetServicesRequest() + return self.stub.GetServices(request) + + def get_service_defaults(self, session): + """ + Get default services for different default node models. + + :param int session: session id + :return: response with a dict of node model to a list of services + :rtype: core_pb2.GetServiceDefaultsResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetServiceDefaultsRequest(session=session) + return self.stub.GetServiceDefaults(request) + + def set_service_defaults(self, session, service_defaults): + """ + Set default services for node models. + + :param int session: session id + :param dict service_defaults: node models to lists of services + :return: response with result of success or failure + :rtype: core_pb2.SetServiceDefaultsResponse + :raises grpc.RpcError: when session doesn't exist + """ + defaults = [] + for node_type in service_defaults: + services = service_defaults[node_type] + default = core_pb2.ServiceDefaults(node_type=node_type, services=services) + defaults.append(default) + request = core_pb2.SetServiceDefaultsRequest(session=session, defaults=defaults) + return self.stub.SetServiceDefaults(request) + + def get_node_service(self, session, _id, service): + """ + Get service data for a node. + + :param int session: session id + :param int _id: node id + :param str service: service name + :return: response with node service data + :rtype: core_pb2.GetNodeServiceResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.GetNodeServiceRequest(session=session, id=_id, service=service) + return self.stub.GetNodeService(request) + + def get_node_service_file(self, session, _id, service, file_name): + """ + Get a service file for a node. + + :param int session: session id + :param int _id: node id + :param str service: service name + :param str file_name: file name to get data for + :return: response with file data + :rtype: core_pb2.GetNodeServiceFileResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.GetNodeServiceFileRequest(session=session, id=_id, service=service, file=file_name) + return self.stub.GetNodeServiceFile(request) + + def set_node_service(self, session, _id, service, startup, validate, shutdown): + """ + Set service data for a node. + + :param int session: session id + :param int _id: node id + :param str service: service name + :param list startup: startup commands + :param list validate: validation commands + :param list shutdown: shutdown commands + :return: response with result of success or failure + :rtype: core_pb2.SetNodeServiceResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.SetNodeServiceRequest( + session=session, id=_id, service=service, startup=startup, validate=validate, shutdown=shutdown) + return self.stub.SetNodeService(request) + + def set_node_service_file(self, session, _id, service, file_name, data): + """ + Set a service file for a node. + + :param int session: session id + :param int _id: node id + :param str service: service name + :param str file_name: file name to save + :param bytes data: data to save for file + :return: response with result of success or failure + :rtype: core_pb2.SetNodeServiceFileResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.SetNodeServiceFileRequest( + session=session, id=_id, service=service, file=file_name, data=data) + return self.stub.SetNodeServiceFile(request) + + def service_action(self, session, _id, service, action): + """ + Send an action to a service for a node. + + :param int session: session id + :param int _id: node id + :param str service: service name + :param core_pb2.ServiceAction action: action for service (start, stop, restart, validate) + :return: response with result of success or failure + :rtype: core_pb2.ServiceActionResponse + :raises grpc.RpcError: when session or node doesn't exist + """ + request = core_pb2.ServiceActionRequest(session=session, id=_id, service=service, action=action) + return self.stub.ServiceAction(request) + + def get_wlan_config(self, session, _id): + """ + Get wlan configuration for a node. + + :param int session: session id + :param int _id: node id + :return: response with a list of configuration groups + :rtype: core_pb2.GetWlanConfigResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetWlanConfigRequest(session=session, id=_id) + return self.stub.GetWlanConfig(request) + + def set_wlan_config(self, session, _id, config): + """ + Set wlan configuration for a node. + + :param int session: session id + :param int _id: node id + :param dict[str, str] config: wlan configuration + :return: response with result of success or failure + :rtype: core_pb2.SetWlanConfigResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.SetWlanConfigRequest(session=session, id=_id, config=config) + return self.stub.SetWlanConfig(request) + + def get_emane_config(self, session): + """ + Get session emane configuration. + + :param int session: session id + :return: response with a list of configuration groups + :rtype: core_pb2.GetEmaneConfigResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetEmaneConfigRequest(session=session) + return self.stub.GetEmaneConfig(request) + + def set_emane_config(self, session, config): + """ + Set session emane configuration. + + :param int session: session id + :param dict[str, str] config: emane configuration + :return: response with result of success or failure + :rtype: core_pb2.SetEmaneConfigResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.SetEmaneConfigRequest(session=session, config=config) + return self.stub.SetEmaneConfig(request) + + def get_emane_models(self, session): + """ + Get session emane models. + + :param int session: session id + :return: response with a list of emane models + :rtype: core_pb2.GetEmaneModelsResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetEmaneModelsRequest(session=session) + return self.stub.GetEmaneModels(request) + + def get_emane_model_config(self, session, _id, model, interface_id=-1): + """ + Get emane model configuration for a node or a node's interface. + + :param int session: session id + :param int _id: node id + :param str model: emane model name + :param int interface_id: node interface id + :return: response with a list of configuration groups + :rtype: core_pb2.GetEmaneModelConfigResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetEmaneModelConfigRequest(session=session, id=_id, model=model, interface=interface_id) + return self.stub.GetEmaneModelConfig(request) + + def set_emane_model_config(self, session, _id, model, config, interface_id=-1): + """ + Set emane model configuration for a node or a node's interface. + + :param int session: session id + :param int _id: node id + :param str model: emane model name + :param dict[str, str] config: emane model configuration + :param int interface_id: node interface id + :return: response with result of success or failure + :rtype: core_pb2.SetEmaneModelConfigResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.SetEmaneModelConfigRequest( + session=session, id=_id, model=model, config=config, interface=interface_id) + return self.stub.SetEmaneModelConfig(request) + + def get_emane_model_configs(self, session): + """ + Get all emane model configurations for a session. + + :param int session: session id + :return: response with a dictionary of node/interface ids to configurations + :rtype: core_pb2.GetEmaneModelConfigsResponse + :raises grpc.RpcError: when session doesn't exist + """ + request = core_pb2.GetEmaneModelConfigsRequest(session=session) + return self.stub.GetEmaneModelConfigs(request) + + def save_xml(self, session, file_path): + """ + Save the current scenario to an XML file. + + :param int session: session id + :param str file_path: local path to save scenario XML file to + :return: nothing + """ + request = core_pb2.SaveXmlRequest(session=session) + response = self.stub.SaveXml(request) + with open(file_path, "wb") as xml_file: + xml_file.write(response.data) + + def open_xml(self, file_path): + """ + Load a local scenario XML file to open as a new session. + + :param str file_path: path of scenario XML file + :return: response with opened session id + :rtype: core_pb2.OpenXmlResponse + """ + with open(file_path, "rb") as xml_file: + data = xml_file.read() + request = core_pb2.OpenXmlRequest(data=data) + return self.stub.OpenXml(request) + + def connect(self): + """ + Open connection to server, must be closed manually. + + :return: nothing + """ + self.channel = grpc.insecure_channel(self.address) + self.stub = core_pb2_grpc.CoreApiStub(self.channel) + + def close(self): + """ + Close currently opened server channel connection. + + :return: nothing + """ + if self.channel: + self.channel.close() + self.channel = None + + @contextmanager + def context_connect(self): + """ + Makes a context manager based connection to the server, will close after context ends. + + :return: nothing + """ + try: + self.connect() + yield + finally: + self.close() diff --git a/daemon/core/grpc/server.py b/daemon/core/grpc/server.py new file mode 100644 index 00000000..45c35fc8 --- /dev/null +++ b/daemon/core/grpc/server.py @@ -0,0 +1,888 @@ +import atexit +import logging +import os +import tempfile +import time +from Queue import Queue, Empty + +import grpc +from concurrent import futures + +from core.emulator.emudata import NodeOptions, InterfaceData, LinkOptions +from core.enumerations import NodeTypes, EventTypes, LinkTypes +from core.grpc import core_pb2 +from core.grpc import core_pb2_grpc +from core.misc import nodeutils +from core.misc.ipaddress import MacAddress +from core.mobility import BasicRangeModel, Ns2ScriptedMobility +from core.service import ServiceManager + +_ONE_DAY_IN_SECONDS = 60 * 60 * 24 + + +def convert_value(value): + if value is not None: + value = str(value) + return value + + +def get_config_groups(config, configurable_options): + groups = [] + config_options = [] + + for configuration in configurable_options.configurations(): + value = config[configuration.id] + config_option = core_pb2.ConfigOption() + config_option.label = configuration.label + config_option.name = configuration.id + config_option.value = value + config_option.type = configuration.type.value + config_option.select.extend(configuration.options) + config_options.append(config_option) + + for config_group in configurable_options.config_groups(): + start = config_group.start - 1 + stop = config_group.stop + options = config_options[start: stop] + config_group_proto = core_pb2.ConfigGroup(name=config_group.name, options=options) + groups.append(config_group_proto) + + return groups + + +def get_links(session, node): + links = [] + for link_data in node.all_link_data(0): + link = convert_link(session, link_data) + links.append(link) + return links + + +def get_emane_model_id(_id, interface): + if interface >= 0: + return _id * 1000 + interface + else: + return _id + + +def convert_link(session, link_data): + interface_one = None + if link_data.interface1_id is not None: + node = session.get_object(link_data.node1_id) + interface = node.netif(link_data.interface1_id) + interface_one = core_pb2.Interface( + id=link_data.interface1_id, name=interface.name, mac=convert_value(link_data.interface1_mac), + ip4=convert_value(link_data.interface1_ip4), ip4mask=link_data.interface1_ip4_mask, + ip6=convert_value(link_data.interface1_ip6), ip6mask=link_data.interface1_ip6_mask) + + interface_two = None + if link_data.interface2_id is not None: + node = session.get_object(link_data.node2_id) + interface = node.netif(link_data.interface2_id) + interface_two = core_pb2.Interface( + id=link_data.interface2_id, name=interface.name, mac=convert_value(link_data.interface2_mac), + ip4=convert_value(link_data.interface2_ip4), ip4mask=link_data.interface2_ip4_mask, + ip6=convert_value(link_data.interface2_ip6), ip6mask=link_data.interface2_ip6_mask) + + options = core_pb2.LinkOptions( + opaque=link_data.opaque, + jitter=link_data.jitter, + key=link_data.key, + mburst=link_data.mburst, + mer=link_data.mer, + per=link_data.per, + bandwidth=link_data.bandwidth, + burst=link_data.burst, + delay=link_data.delay, + dup=link_data.dup, + unidirectional=link_data.unidirectional + ) + + return core_pb2.Link( + type=link_data.link_type, node_one=link_data.node1_id, node_two=link_data.node2_id, + interface_one=interface_one, interface_two=interface_two, options=options + ) + + +class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): + def __init__(self, coreemu): + super(CoreGrpcServer, self).__init__() + self.coreemu = coreemu + self.running = True + self.server = None + atexit.register(self._exit_handler) + + def _exit_handler(self): + logging.debug("catching exit, stop running") + self.running = False + + def _is_running(self, context): + return self.running and context.is_active() + + def _cancel_stream(self, context): + context.abort(grpc.StatusCode.CANCELLED, "server stopping") + + def listen(self, address="[::]:50051"): + logging.info("starting grpc api: %s", address) + self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + core_pb2_grpc.add_CoreApiServicer_to_server(self, self.server) + self.server.add_insecure_port(address) + self.server.start() + + try: + while True: + time.sleep(_ONE_DAY_IN_SECONDS) + except KeyboardInterrupt: + self.server.stop(None) + + def get_session(self, _id, context): + session = self.coreemu.sessions.get(_id) + if not session: + context.abort(grpc.StatusCode.NOT_FOUND, "session {} not found".format(_id)) + return session + + def get_node(self, session, _id, context): + try: + return session.get_object(_id) + except KeyError: + context.abort(grpc.StatusCode.NOT_FOUND, "node {} not found".format(_id)) + + def CreateSession(self, request, context): + logging.debug("create session: %s", request) + session = self.coreemu.create_session(request.id) + session.set_state(EventTypes.DEFINITION_STATE) + session.location.setrefgeo(47.57917, -122.13232, 2.0) + session.location.refscale = 150000.0 + return core_pb2.CreateSessionResponse(id=session.id, state=session.state) + + def DeleteSession(self, request, context): + logging.debug("delete session: %s", request) + result = self.coreemu.delete_session(request.id) + return core_pb2.DeleteSessionResponse(result=result) + + def GetSessions(self, request, context): + logging.debug("get sessions: %s", request) + sessions = [] + for session_id in self.coreemu.sessions: + session = self.coreemu.sessions[session_id] + session_summary = core_pb2.SessionSummary( + id=session_id, state=session.state, nodes=session.get_node_count()) + sessions.append(session_summary) + return core_pb2.GetSessionsResponse(sessions=sessions) + + def GetSessionLocation(self, request, context): + logging.debug("get session location: %s", request) + session = self.get_session(request.id, context) + x, y, z = session.location.refxyz + lat, lon, alt = session.location.refgeo + position = core_pb2.Position(x=x, y=y, z=z, lat=lat, lon=lon, alt=alt) + return core_pb2.GetSessionLocationResponse(position=position, scale=session.location.refscale) + + def SetSessionLocation(self, request, context): + logging.debug("set session location: %s", request) + session = self.get_session(request.id, context) + session.location.refxyz = (request.position.x, request.position.y, request.position.z) + session.location.setrefgeo(request.position.lat, request.position.lon, request.position.alt) + session.location.refscale = request.scale + return core_pb2.SetSessionLocationResponse(result=True) + + def SetSessionState(self, request, context): + logging.debug("set session state: %s", request) + session = self.get_session(request.id, context) + + try: + state = EventTypes(request.state) + session.set_state(state) + + if state == EventTypes.INSTANTIATION_STATE: + if not os.path.exists(session.session_dir): + os.mkdir(session.session_dir) + session.instantiate() + elif state == EventTypes.SHUTDOWN_STATE: + session.shutdown() + elif state == EventTypes.DATACOLLECT_STATE: + session.data_collect() + elif state == EventTypes.DEFINITION_STATE: + session.clear() + + result = True + except KeyError: + result = False + + return core_pb2.SetSessionStateResponse(result=result) + + def GetSessionOptions(self, request, context): + logging.debug("get session options: %s", request) + session = self.get_session(request.id, context) + config = session.options.get_configs() + defaults = session.options.default_values() + defaults.update(config) + groups = get_config_groups(defaults, session.options) + return core_pb2.GetSessionOptionsResponse(groups=groups) + + def SetSessionOptions(self, request, context): + logging.debug("set session options: %s", request) + session = self.get_session(request.id, context) + config = session.options.get_configs() + config.update(request.config) + return core_pb2.SetSessionOptionsResponse(result=True) + + def GetSession(self, request, context): + logging.debug("get session: %s", request) + session = self.get_session(request.id, context) + + links = [] + nodes = [] + for node_id in session.objects: + node = session.objects[node_id] + if not isinstance(node.objid, int): + continue + + node_type = nodeutils.get_node_type(node.__class__).value + model = getattr(node, "type", None) + position = core_pb2.Position(x=node.position.x, y=node.position.y, z=node.position.z) + + services = getattr(node, "services", []) + if services is None: + services = [] + services = [x.name for x in services] + + emane_model = None + if nodeutils.is_node(node, NodeTypes.EMANE): + emane_model = node.model.name + + node_proto = core_pb2.Node( + id=node.objid, name=node.name, emane=emane_model, model=model, + type=node_type, position=position, services=services) + nodes.append(node_proto) + + node_links = get_links(session, node) + links.extend(node_links) + + session_proto = core_pb2.Session(state=session.state, nodes=nodes, links=links) + return core_pb2.GetSessionResponse(session=session_proto) + + def NodeEvents(self, request, context): + session = self.get_session(request.id, context) + queue = Queue() + session.node_handlers.append(queue.put) + + while self._is_running(context): + try: + node = queue.get(timeout=1) + position = core_pb2.Position(x=node.x_position, y=node.y_position) + services = node.services or "" + services = services.split("|") + node_proto = core_pb2.Node( + id=node.id, name=node.name, model=node.model, position=position, services=services) + node_event = core_pb2.NodeEvent(node=node_proto) + yield node_event + except Empty: + continue + + self._cancel_stream(context) + + def LinkEvents(self, request, context): + session = self.get_session(request.id, context) + queue = Queue() + session.link_handlers.append(queue.put) + + while self._is_running(context): + try: + event = queue.get(timeout=1) + interface_one = None + if event.interface1_id is not None: + interface_one = core_pb2.Interface( + id=event.interface1_id, name=event.interface1_name, mac=convert_value(event.interface1_mac), + ip4=convert_value(event.interface1_ip4), ip4mask=event.interface1_ip4_mask, + ip6=convert_value(event.interface1_ip6), ip6mask=event.interface1_ip6_mask) + + interface_two = None + if event.interface2_id is not None: + interface_two = core_pb2.Interface( + id=event.interface2_id, name=event.interface2_name, mac=convert_value(event.interface2_mac), + ip4=convert_value(event.interface2_ip4), ip4mask=event.interface2_ip4_mask, + ip6=convert_value(event.interface2_ip6), ip6mask=event.interface2_ip6_mask) + + options = core_pb2.LinkOptions( + opaque=event.opaque, + jitter=event.jitter, + key=event.key, + mburst=event.mburst, + mer=event.mer, + per=event.per, + bandwidth=event.bandwidth, + burst=event.burst, + delay=event.delay, + dup=event.dup, + unidirectional=event.unidirectional + ) + link = core_pb2.Link( + type=event.link_type, node_one=event.node1_id, node_two=event.node2_id, + interface_one=interface_one, interface_two=interface_two, options=options) + link_event = core_pb2.LinkEvent(message_type=event.message_type, link=link) + yield link_event + except Empty: + continue + + self._cancel_stream(context) + + def SessionEvents(self, request, context): + session = self.get_session(request.id, context) + queue = Queue() + session.event_handlers.append(queue.put) + + while self._is_running(context): + try: + event = queue.get(timeout=1) + event_time = event.time + if event_time is not None: + event_time = float(event_time) + session_event = core_pb2.SessionEvent( + node=event.node, + event=event.event_type, + name=event.name, + data=event.data, + time=event_time, + session=session.id + ) + yield session_event + except Empty: + continue + + self._cancel_stream(context) + + def ConfigEvents(self, request, context): + session = self.get_session(request.id, context) + queue = Queue() + session.config_handlers.append(queue.put) + + while self._is_running(context): + try: + event = queue.get(timeout=1) + config_event = core_pb2.ConfigEvent( + message_type=event.message_type, + node=event.node, + object=event.object, + type=event.type, + captions=event.captions, + bitmap=event.bitmap, + data_values=event.data_values, + possible_values=event.possible_values, + groups=event.groups, + session=event.session, + interface=event.interface_number, + network_id=event.network_id, + opaque=event.opaque, + data_types=event.data_types + ) + yield config_event + except Empty: + continue + + self._cancel_stream(context) + + def ExceptionEvents(self, request, context): + session = self.get_session(request.id, context) + queue = Queue() + session.exception_handlers.append(queue.put) + + while self._is_running(context): + try: + event = queue.get(timeout=1) + exception_event = core_pb2.ExceptionEvent( + node=event.node, + session=int(event.session), + level=event.level.value, + source=event.source, + date=event.date, + text=event.text, + opaque=event.opaque + ) + yield exception_event + except Empty: + continue + + self._cancel_stream(context) + + def FileEvents(self, request, context): + session = self.get_session(request.id, context) + queue = Queue() + session.file_handlers.append(queue.put) + + while self._is_running(context): + try: + event = queue.get(timeout=1) + file_event = core_pb2.FileEvent( + message_type=event.message_type, + node=event.node, + name=event.name, + mode=event.mode, + number=event.number, + type=event.type, + source=event.source, + session=event.session, + data=event.data, + compressed_data=event.compressed_data + ) + yield file_event + except Empty: + continue + + self._cancel_stream(context) + + def AddNode(self, request, context): + logging.debug("add node: %s", request) + session = self.get_session(request.session, context) + + node_proto = request.node + node_id = node_proto.id + node_type = node_proto.type + if node_type is None: + node_type = NodeTypes.DEFAULT.value + node_type = NodeTypes(node_type) + + node_options = NodeOptions(name=node_proto.name, model=node_proto.model) + node_options.icon = node_proto.icon + node_options.opaque = node_proto.opaque + node_options.services = node_proto.services + + position = node_proto.position + node_options.set_position(position.x, position.y) + node_options.set_location(position.lat, position.lon, position.alt) + node = session.add_node(_type=node_type, _id=node_id, node_options=node_options) + + # configure emane if provided + emane_model = node_proto.emane + if emane_model: + session.emane.set_model_config(node_id, emane_model) + + return core_pb2.AddNodeResponse(id=node.objid) + + def GetNode(self, request, context): + logging.debug("get node: %s", request) + session = self.get_session(request.session, context) + node = self.get_node(session, request.id, context) + + interfaces = [] + for interface_id, interface in node._netif.iteritems(): + net_id = None + if interface.net: + net_id = interface.net.objid + interface_proto = core_pb2.Interface( + id=interface_id, netid=net_id, name=interface.name, mac=str(interface.hwaddr), + mtu=interface.mtu, flowid=interface.flow_id) + interfaces.append(interface_proto) + + emane_model = None + if nodeutils.is_node(node, NodeTypes.EMANE): + emane_model = node.model.name + + services = [x.name for x in getattr(node, "services", [])] + position = core_pb2.Position(x=node.position.x, y=node.position.y, z=node.position.z) + node_type = nodeutils.get_node_type(node.__class__).value + node = core_pb2.Node( + id=node.objid, name=node.name, type=node_type, emane=emane_model, model=node.type, position=position, + services=services) + + return core_pb2.GetNodeResponse(node=node, interfaces=interfaces) + + def EditNode(self, request, context): + logging.debug("edit node: %s", request) + session = self.get_session(request.session, context) + node_id = request.id + node_options = NodeOptions() + x = request.position.x + y = request.position.y + node_options.set_position(x, y) + lat = request.position.lat + lon = request.position.lon + alt = request.position.alt + node_options.set_location(lat, lon, alt) + result = session.update_node(node_id, node_options) + return core_pb2.EditNodeResponse(result=result) + + def DeleteNode(self, request, context): + logging.debug("delete node: %s", request) + session = self.get_session(request.session, context) + result = session.delete_node(request.id) + return core_pb2.DeleteNodeResponse(result=result) + + def GetNodeLinks(self, request, context): + logging.debug("get node links: %s", request) + session = self.get_session(request.session, context) + node = self.get_node(session, request.id, context) + links = get_links(session, node) + return core_pb2.GetNodeLinksResponse(links=links) + + def AddLink(self, request, context): + logging.debug("add link: %s", request) + session = self.get_session(request.session, context) + + # validate node exist + self.get_node(session, request.link.node_one, context) + self.get_node(session, request.link.node_two, context) + node_one = request.link.node_one + node_two = request.link.node_two + + interface_one = None + interface_one_data = request.link.interface_one + if interface_one_data: + name = interface_one_data.name + if name == "": + name = None + mac = interface_one_data.mac + if mac == "": + mac = None + else: + mac = MacAddress.from_string(mac) + interface_one = InterfaceData( + _id=interface_one_data.id, + name=name, + mac=mac, + ip4=interface_one_data.ip4, + ip4_mask=interface_one_data.ip4mask, + ip6=interface_one_data.ip6, + ip6_mask=interface_one_data.ip6mask, + ) + + interface_two = None + interface_two_data = request.link.interface_two + if interface_two_data: + name = interface_two_data.name + if name == "": + name = None + mac = interface_two_data.mac + if mac == "": + mac = None + else: + mac = MacAddress.from_string(mac) + interface_two = InterfaceData( + _id=interface_two_data.id, + name=name, + mac=mac, + ip4=interface_two_data.ip4, + ip4_mask=interface_two_data.ip4mask, + ip6=interface_two_data.ip6, + ip6_mask=interface_two_data.ip6mask, + ) + + link_type = None + link_type_value = request.link.type + if link_type_value is not None: + link_type = LinkTypes(link_type_value) + + options_data = request.link.options + link_options = LinkOptions(_type=link_type) + if options_data: + link_options.delay = options_data.delay + link_options.bandwidth = options_data.bandwidth + link_options.per = options_data.per + link_options.dup = options_data.dup + link_options.jitter = options_data.jitter + link_options.mer = options_data.mer + link_options.burst = options_data.burst + link_options.mburst = options_data.mburst + link_options.unidirectional = options_data.unidirectional + link_options.key = options_data.key + link_options.opaque = options_data.opaque + + session.add_link(node_one, node_two, interface_one, interface_two, link_options=link_options) + return core_pb2.AddLinkResponse(result=True) + + def EditLink(self, request, context): + logging.debug("edit link: %s", request) + session = self.get_session(request.session, context) + node_one = request.node_one + node_two = request.node_two + interface_one_id = request.interface_one + interface_two_id = request.interface_two + options_data = request.options + link_options = LinkOptions() + link_options.delay = options_data.delay + link_options.bandwidth = options_data.bandwidth + link_options.per = options_data.per + link_options.dup = options_data.dup + link_options.jitter = options_data.jitter + link_options.mer = options_data.mer + link_options.burst = options_data.burst + link_options.mburst = options_data.mburst + link_options.unidirectional = options_data.unidirectional + link_options.key = options_data.key + link_options.opaque = options_data.opaque + session.update_link(node_one, node_two, interface_one_id, interface_two_id, link_options) + return core_pb2.EditLinkResponse(result=True) + + def DeleteLink(self, request, context): + logging.debug("delete link: %s", request) + session = self.get_session(request.session, context) + node_one = request.node_one + node_two = request.node_two + interface_one = request.interface_one + interface_two = request.interface_two + session.delete_link(node_one, node_two, interface_one, interface_two) + return core_pb2.DeleteLinkResponse(result=True) + + def GetHooks(self, request, context): + logging.debug("get hooks: %s", request) + session = self.get_session(request.session, context) + hooks = [] + for state, state_hooks in session._hooks.iteritems(): + for file_name, file_data in state_hooks: + hook = core_pb2.Hook(state=state, file=file_name, data=file_data) + hooks.append(hook) + return core_pb2.GetHooksResponse(hooks=hooks) + + def AddHook(self, request, context): + logging.debug("add hook: %s", request) + session = self.get_session(request.session, context) + hook = request.hook + session.add_hook(hook.state, hook.file, None, hook.data) + return core_pb2.AddHookResponse(result=True) + + def GetMobilityConfigs(self, request, context): + logging.debug("get mobility configs: %s", request) + session = self.get_session(request.session, context) + response = core_pb2.GetMobilityConfigsResponse() + for node_id, model_config in session.mobility.node_configurations.iteritems(): + if node_id == -1: + continue + for model_name in model_config.iterkeys(): + if model_name != Ns2ScriptedMobility.name: + continue + config = session.mobility.get_model_config(node_id, model_name) + groups = get_config_groups(config, Ns2ScriptedMobility) + response.configs[node_id].groups.extend(groups) + return response + + def GetMobilityConfig(self, request, context): + logging.debug("get mobility config: %s", request) + session = self.get_session(request.session, context) + config = session.mobility.get_model_config(request.id, Ns2ScriptedMobility.name) + groups = get_config_groups(config, Ns2ScriptedMobility) + return core_pb2.GetMobilityConfigResponse(groups=groups) + + def SetMobilityConfig(self, request, context): + logging.debug("set mobility config: %s", request) + session = self.get_session(request.session, context) + session.mobility.set_model_config(request.id, Ns2ScriptedMobility.name, request.config) + return core_pb2.SetMobilityConfigResponse(result=True) + + def MobilityAction(self, request, context): + logging.debug("mobility action: %s", request) + session = self.get_session(request.session, context) + node = self.get_node(session, request.id, context) + result = True + if request.action == core_pb2.MOBILITY_START: + node.mobility.start() + elif request.action == core_pb2.MOBILITY_PAUSE: + node.mobility.pause() + elif request.action == core_pb2.MOBILITY_STOP: + node.mobility.stop(move_initial=True) + else: + result = False + return core_pb2.MobilityActionResponse(result=result) + + def GetServices(self, request, context): + logging.debug("get services: %s", request) + services = [] + for service in ServiceManager.services.itervalues(): + service_proto = core_pb2.Service(group=service.group, name=service.name) + services.append(service_proto) + return core_pb2.GetServicesResponse(services=services) + + def GetServiceDefaults(self, request, context): + logging.debug("get service defaults: %s", request) + session = self.get_session(request.session, context) + all_service_defaults = [] + for node_type in session.services.default_services: + services = session.services.default_services[node_type] + service_defaults = core_pb2.ServiceDefaults(node_type=node_type, services=services) + all_service_defaults.append(service_defaults) + return core_pb2.GetServiceDefaultsResponse(defaults=all_service_defaults) + + def SetServiceDefaults(self, request, context): + logging.debug("set service defaults: %s", request) + session = self.get_session(request.session, context) + session.services.default_services.clear() + for service_defaults in request.defaults: + session.services.default_services[service_defaults.node_type] = service_defaults.services + return core_pb2.SetServiceDefaultsResponse(result=True) + + def GetNodeService(self, request, context): + logging.debug("get node service: %s", request) + session = self.get_session(request.session, context) + service = session.services.get_service(request.id, request.service, default_service=True) + service_proto = core_pb2.NodeServiceData( + executables=service.executables, + dependencies=service.dependencies, + dirs=service.dirs, + configs=service.configs, + startup=service.startup, + validate=service.validate, + validation_mode=service.validation_mode.value, + validation_timer=service.validation_timer, + shutdown=service.shutdown, + meta=service.meta + ) + return core_pb2.GetNodeServiceResponse(service=service_proto) + + def GetNodeServiceFile(self, request, context): + logging.debug("get node service file: %s", request) + session = self.get_session(request.session, context) + node = self.get_node(session, request.id, context) + service = None + for current_service in node.services: + if current_service.name == request.service: + service = current_service + break + if not service: + context.abort(grpc.StatusCode.NOT_FOUND, "service not found") + file_data = session.services.get_service_file(node, request.service, request.file) + return core_pb2.GetNodeServiceFileResponse(data=file_data.data) + + def SetNodeService(self, request, context): + logging.debug("set node service: %s", request) + session = self.get_session(request.session, context) + session.services.set_service(request.id, request.service) + service = session.services.get_service(request.id, request.service) + service.startup = tuple(request.startup) + service.validate = tuple(request.validate) + service.shutdown = tuple(request.shutdown) + return core_pb2.SetNodeServiceResponse(result=True) + + def SetNodeServiceFile(self, request, context): + logging.debug("set node service file: %s", request) + session = self.get_session(request.session, context) + session.services.set_service_file(request.id, request.service, request.file, request.data) + return core_pb2.SetNodeServiceFileResponse(result=True) + + def ServiceAction(self, request, context): + logging.debug("service action: %s", request) + session = self.get_session(request.session, context) + node = self.get_node(session, request.id, context) + service = None + for current_service in node.services: + if current_service.name == request.service: + service = current_service + break + + if not service: + context.abort(grpc.StatusCode.NOT_FOUND, "service not found") + + status = -1 + if request.action == core_pb2.SERVICE_START: + status = session.services.startup_service(node, service, wait=True) + elif request.action == core_pb2.SERVICE_STOP: + status = session.services.stop_service(node, service) + elif request.action == core_pb2.SERVICE_RESTART: + status = session.services.stop_service(node, service) + if not status: + status = session.services.startup_service(node, service, wait=True) + elif request.action == core_pb2.SERVICE_VALIDATE: + status = session.services.validate_service(node, service) + + result = False + if not status: + result = True + + return core_pb2.ServiceActionResponse(result=result) + + def GetWlanConfig(self, request, context): + logging.debug("get wlan config: %s", request) + session = self.get_session(request.session, context) + config = session.mobility.get_model_config(request.id, BasicRangeModel.name) + groups = get_config_groups(config, BasicRangeModel) + return core_pb2.GetWlanConfigResponse(groups=groups) + + def SetWlanConfig(self, request, context): + logging.debug("set wlan config: %s", request) + session = self.get_session(request.session, context) + session.mobility.set_model_config(request.id, BasicRangeModel.name, request.config) + return core_pb2.SetWlanConfigResponse(result=True) + + def GetEmaneConfig(self, request, context): + logging.debug("get emane config: %s", request) + session = self.get_session(request.session, context) + config = session.emane.get_configs() + groups = get_config_groups(config, session.emane.emane_config) + return core_pb2.GetEmaneConfigResponse(groups=groups) + + def SetEmaneConfig(self, request, context): + logging.debug("set emane config: %s", request) + session = self.get_session(request.session, context) + config = session.emane.get_configs() + config.update(request.config) + return core_pb2.SetEmaneConfigResponse(result=True) + + def GetEmaneModels(self, request, context): + logging.debug("get emane models: %s", request) + session = self.get_session(request.session, context) + models = [] + for model in session.emane.models.keys(): + if len(model.split("_")) != 2: + continue + models.append(model) + return core_pb2.GetEmaneModelsResponse(models=models) + + def GetEmaneModelConfig(self, request, context): + logging.debug("get emane model config: %s", request) + session = self.get_session(request.session, context) + model = session.emane.models[request.model] + _id = get_emane_model_id(request.id, request.interface) + config = session.emane.get_model_config(_id, request.model) + groups = get_config_groups(config, model) + return core_pb2.GetEmaneModelConfigResponse(groups=groups) + + def SetEmaneModelConfig(self, request, context): + logging.debug("set emane model config: %s", request) + session = self.get_session(request.session, context) + _id = get_emane_model_id(request.id, request.interface) + session.emane.set_model_config(_id, request.model, request.config) + return core_pb2.SetEmaneModelConfigResponse(result=True) + + def GetEmaneModelConfigs(self, request, context): + logging.debug("get emane model configs: %s", request) + session = self.get_session(request.session, context) + response = core_pb2.GetEmaneModelConfigsResponse() + for node_id, model_config in session.emane.node_configurations.iteritems(): + if node_id == -1: + continue + + for model_name in model_config.iterkeys(): + model = session.emane.models[model_name] + config = session.emane.get_model_config(node_id, model_name) + config_groups = get_config_groups(config, model) + node_configurations = response.configs[node_id] + node_configurations.model = model_name + node_configurations.groups.extend(config_groups) + return response + + def SaveXml(self, request, context): + logging.debug("save xml: %s", request) + session = self.get_session(request.session, context) + + _, temp_path = tempfile.mkstemp() + session.save_xml(temp_path) + + with open(temp_path, "rb") as xml_file: + data = xml_file.read() + + return core_pb2.SaveXmlResponse(data=data) + + def OpenXml(self, request, context): + logging.debug("open xml: %s", request) + session = self.coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + + _, temp_path = tempfile.mkstemp() + with open(temp_path, "wb") as xml_file: + xml_file.write(request.data) + + try: + session.open_xml(temp_path, start=True) + return core_pb2.OpenXmlResponse(session=session.id, result=True) + except IOError: + logging.exception("error opening session file") + self.coreemu.delete_session(session.id) + context.abort(grpc.StatusCode.INVALID_ARGUMENT, "invalid xml file") diff --git a/daemon/core/gui/__init__.py b/daemon/core/gui/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/gui/app.py b/daemon/core/gui/app.py deleted file mode 100644 index 4fd1dce5..00000000 --- a/daemon/core/gui/app.py +++ /dev/null @@ -1,220 +0,0 @@ -import logging -import math -import tkinter as tk -from tkinter import PhotoImage, font, messagebox, ttk -from tkinter.ttk import Progressbar -from typing import Any, Optional - -import grpc - -from core.gui import appconfig, images -from core.gui import nodeutils as nutils -from core.gui import themes -from core.gui.appconfig import GuiConfig -from core.gui.coreclient import CoreClient -from core.gui.dialogs.error import ErrorDialog -from core.gui.frames.base import InfoFrameBase -from core.gui.frames.default import DefaultInfoFrame -from core.gui.graph.manager import CanvasManager -from core.gui.images import ImageEnum -from core.gui.menubar import Menubar -from core.gui.statusbar import StatusBar -from core.gui.themes import PADY -from core.gui.toolbar import Toolbar - -logger = logging.getLogger(__name__) -WIDTH: int = 1000 -HEIGHT: int = 800 - - -class Application(ttk.Frame): - def __init__(self, proxy: bool, session_id: int = None) -> None: - super().__init__() - # load node icons - nutils.setup() - - # widgets - self.menubar: Optional[Menubar] = None - self.toolbar: Optional[Toolbar] = None - self.right_frame: Optional[ttk.Frame] = None - self.manager: Optional[CanvasManager] = None - self.statusbar: Optional[StatusBar] = None - self.progress: Optional[Progressbar] = None - self.infobar: Optional[ttk.Frame] = None - self.info_frame: Optional[InfoFrameBase] = None - self.show_infobar: tk.BooleanVar = tk.BooleanVar(value=False) - - # fonts - self.fonts_size: dict[str, int] = {} - self.icon_text_font: Optional[font.Font] = None - self.edge_font: Optional[font.Font] = None - - # setup - self.guiconfig: GuiConfig = appconfig.read() - self.app_scale: float = self.guiconfig.scale - self.setup_scaling() - self.style: ttk.Style = ttk.Style() - self.setup_theme() - self.core: CoreClient = CoreClient(self, proxy) - self.setup_app() - self.draw() - self.core.setup(session_id) - - def setup_scaling(self) -> None: - self.fonts_size = {name: font.nametofont(name)["size"] for name in font.names()} - text_scale = self.app_scale if self.app_scale < 1 else math.sqrt(self.app_scale) - themes.scale_fonts(self.fonts_size, self.app_scale) - self.icon_text_font = font.Font(family="TkIconFont", size=int(12 * text_scale)) - self.edge_font = font.Font( - family="TkDefaultFont", size=int(8 * text_scale), weight=font.BOLD - ) - - def setup_theme(self) -> None: - themes.load(self.style) - self.master.bind_class("Menu", "<>", themes.theme_change_menu) - self.master.bind("<>", themes.theme_change) - self.style.theme_use(self.guiconfig.preferences.theme) - - def setup_app(self) -> None: - self.master.title("CORE") - self.center() - self.master.protocol("WM_DELETE_WINDOW", self.on_closing) - image = images.from_enum(ImageEnum.CORE, width=images.DIALOG_SIZE) - self.master.tk.call("wm", "iconphoto", self.master._w, image) - self.master.option_add("*tearOff", tk.FALSE) - self.setup_file_dialogs() - - def setup_file_dialogs(self) -> None: - """ - Hack code that needs to initialize a bad dialog so that we can apply, - global settings for dialogs to not show hidden files by default and display - the hidden file toggle. - - :return: nothing - """ - try: - self.master.tk.call("tk_getOpenFile", "-foobar") - except tk.TclError: - pass - self.master.tk.call("set", "::tk::dialog::file::showHiddenBtn", "1") - self.master.tk.call("set", "::tk::dialog::file::showHiddenVar", "0") - - def center(self) -> None: - screen_width = self.master.winfo_screenwidth() - screen_height = self.master.winfo_screenheight() - x = int((screen_width / 2) - (WIDTH * self.app_scale / 2)) - y = int((screen_height / 2) - (HEIGHT * self.app_scale / 2)) - self.master.geometry( - f"{int(WIDTH * self.app_scale)}x{int(HEIGHT * self.app_scale)}+{x}+{y}" - ) - - def draw(self) -> None: - self.master.rowconfigure(0, weight=1) - self.master.columnconfigure(0, weight=1) - self.rowconfigure(0, weight=1) - self.columnconfigure(1, weight=1) - self.grid(sticky=tk.NSEW) - self.toolbar = Toolbar(self) - self.toolbar.grid(sticky=tk.NS) - self.right_frame = ttk.Frame(self) - self.right_frame.columnconfigure(0, weight=1) - self.right_frame.rowconfigure(0, weight=1) - self.right_frame.grid(row=0, column=1, sticky=tk.NSEW) - self.draw_canvas() - self.draw_infobar() - self.draw_status() - self.progress = Progressbar(self.right_frame, mode="indeterminate") - self.menubar = Menubar(self) - self.master.config(menu=self.menubar) - - def draw_infobar(self) -> None: - self.infobar = ttk.Frame(self.right_frame, padding=5, relief=tk.RAISED) - self.infobar.columnconfigure(0, weight=1) - self.infobar.rowconfigure(1, weight=1) - label_font = font.Font(weight=font.BOLD, underline=tk.TRUE) - label = ttk.Label( - self.infobar, text="Details", anchor=tk.CENTER, font=label_font - ) - label.grid(sticky=tk.EW, pady=PADY) - - def draw_canvas(self) -> None: - self.manager = CanvasManager(self.right_frame, self, self.core) - self.manager.notebook.grid(sticky=tk.NSEW) - - def draw_status(self) -> None: - self.statusbar = StatusBar(self.right_frame, self) - self.statusbar.grid(sticky=tk.EW, columnspan=2) - - def display_info(self, frame_class: type[InfoFrameBase], **kwargs: Any) -> None: - if not self.show_infobar.get(): - return - self.clear_info() - self.info_frame = frame_class(self.infobar, **kwargs) - self.info_frame.draw() - self.info_frame.grid(sticky=tk.NSEW) - - def clear_info(self) -> None: - if self.info_frame: - self.info_frame.destroy() - self.info_frame = None - - def default_info(self) -> None: - self.clear_info() - self.display_info(DefaultInfoFrame, app=self) - - def show_info(self) -> None: - self.default_info() - self.infobar.grid(row=0, column=1, sticky=tk.NSEW) - - def hide_info(self) -> None: - self.infobar.grid_forget() - - def show_grpc_exception( - self, message: str, e: grpc.RpcError, blocking: bool = False - ) -> None: - logger.exception("app grpc exception", exc_info=e) - dialog = ErrorDialog(self, "GRPC Exception", message, e.details()) - if blocking: - dialog.show() - else: - self.after(0, lambda: dialog.show()) - - def show_exception(self, message: str, e: Exception) -> None: - logger.exception("app exception", exc_info=e) - self.after( - 0, lambda: ErrorDialog(self, "App Exception", message, str(e)).show() - ) - - def show_exception_data(self, title: str, message: str, details: str) -> None: - self.after(0, lambda: ErrorDialog(self, title, message, details).show()) - - def show_error(self, title: str, message: str, blocking: bool = False) -> None: - if blocking: - messagebox.showerror(title, message, parent=self) - else: - self.after(0, lambda: messagebox.showerror(title, message, parent=self)) - - def on_closing(self) -> None: - if self.toolbar.picker: - self.toolbar.picker.destroy() - self.menubar.prompt_save_running_session(True) - - def save_config(self) -> None: - appconfig.save(self.guiconfig) - - def joined_session_update(self) -> None: - if self.core.is_runtime(): - self.menubar.set_state(is_runtime=True) - self.toolbar.set_runtime() - else: - self.menubar.set_state(is_runtime=False) - self.toolbar.set_design() - - def get_enum_icon(self, image_enum: ImageEnum, *, width: int) -> PhotoImage: - return images.from_enum(image_enum, width=width, scale=self.app_scale) - - def get_file_icon(self, file_path: str, *, width: int) -> PhotoImage: - return images.from_file(file_path, width=width, scale=self.app_scale) - - def close(self) -> None: - self.master.destroy() diff --git a/daemon/core/gui/appconfig.py b/daemon/core/gui/appconfig.py deleted file mode 100644 index 0a5ae76b..00000000 --- a/daemon/core/gui/appconfig.py +++ /dev/null @@ -1,224 +0,0 @@ -import os -import shutil -from pathlib import Path -from typing import Optional - -import yaml - -from core.gui import themes - -HOME_PATH: Path = Path.home().joinpath(".coregui") -BACKGROUNDS_PATH: Path = HOME_PATH.joinpath("backgrounds") -CUSTOM_EMANE_PATH: Path = HOME_PATH.joinpath("custom_emane") -CUSTOM_SERVICE_PATH: Path = HOME_PATH.joinpath("custom_services") -ICONS_PATH: Path = HOME_PATH.joinpath("icons") -MOBILITY_PATH: Path = HOME_PATH.joinpath("mobility") -XMLS_PATH: Path = HOME_PATH.joinpath("xmls") -CONFIG_PATH: Path = HOME_PATH.joinpath("config.yaml") -LOG_PATH: Path = HOME_PATH.joinpath("gui.log") -SCRIPT_PATH: Path = HOME_PATH.joinpath("scripts") - -# local paths -DATA_PATH: Path = Path(__file__).parent.joinpath("data") -LOCAL_ICONS_PATH: Path = DATA_PATH.joinpath("icons").absolute() -LOCAL_BACKGROUND_PATH: Path = DATA_PATH.joinpath("backgrounds").absolute() -LOCAL_XMLS_PATH: Path = DATA_PATH.joinpath("xmls").absolute() -LOCAL_MOBILITY_PATH: Path = DATA_PATH.joinpath("mobility").absolute() - -# configuration data -TERMINALS: dict[str, str] = { - "xterm": "xterm -e", - "aterm": "aterm -e", - "eterm": "eterm -e", - "rxvt": "rxvt -e", - "konsole": "konsole -e", - "lxterminal": "lxterminal -e", - "xfce4-terminal": "xfce4-terminal -x", - "gnome-terminal": "gnome-terminal --window --", -} -EDITORS: list[str] = ["$EDITOR", "vim", "emacs", "gedit", "nano", "vi"] - - -class IndentDumper(yaml.Dumper): - def increase_indent(self, flow: bool = False, indentless: bool = False) -> None: - super().increase_indent(flow, False) - - -class CustomNode(yaml.YAMLObject): - yaml_tag: str = "!CustomNode" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__(self, name: str, image: str, services: list[str]) -> None: - self.name: str = name - self.image: str = image - self.services: list[str] = services - - -class CoreServer(yaml.YAMLObject): - yaml_tag: str = "!CoreServer" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__(self, name: str, address: str) -> None: - self.name: str = name - self.address: str = address - - -class Observer(yaml.YAMLObject): - yaml_tag: str = "!Observer" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__(self, name: str, cmd: str) -> None: - self.name: str = name - self.cmd: str = cmd - - -class PreferencesConfig(yaml.YAMLObject): - yaml_tag: str = "!PreferencesConfig" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__( - self, - editor: str = EDITORS[1], - terminal: str = None, - theme: str = themes.THEME_DARK, - gui3d: str = "/usr/local/bin/std3d.sh", - width: int = 1000, - height: int = 750, - ) -> None: - self.theme: str = theme - self.editor: str = editor - self.terminal: str = terminal - self.gui3d: str = gui3d - self.width: int = width - self.height: int = height - - -class LocationConfig(yaml.YAMLObject): - yaml_tag: str = "!LocationConfig" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__( - self, - x: float = 0.0, - y: float = 0.0, - z: float = 0.0, - lat: float = 47.5791667, - lon: float = -122.132322, - alt: float = 2.0, - scale: float = 150.0, - ) -> None: - self.x: float = x - self.y: float = y - self.z: float = z - self.lat: float = lat - self.lon: float = lon - self.alt: float = alt - self.scale: float = scale - - -class IpConfigs(yaml.YAMLObject): - yaml_tag: str = "!IpConfigs" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__(self, **kwargs) -> None: - self.__setstate__(kwargs) - - def __setstate__(self, kwargs): - self.ip4s: list[str] = kwargs.get( - "ip4s", ["10.0.0.0", "192.168.0.0", "172.16.0.0"] - ) - self.ip4: str = kwargs.get("ip4", self.ip4s[0]) - self.ip6s: list[str] = kwargs.get("ip6s", ["2001::", "2002::", "a::"]) - self.ip6: str = kwargs.get("ip6", self.ip6s[0]) - self.enable_ip4: bool = kwargs.get("enable_ip4", True) - self.enable_ip6: bool = kwargs.get("enable_ip6", True) - - -class GuiConfig(yaml.YAMLObject): - yaml_tag: str = "!GuiConfig" - yaml_loader: type[yaml.SafeLoader] = yaml.SafeLoader - - def __init__( - self, - preferences: PreferencesConfig = None, - location: LocationConfig = None, - servers: list[CoreServer] = None, - nodes: list[CustomNode] = None, - recentfiles: list[str] = None, - observers: list[Observer] = None, - scale: float = 1.0, - ips: IpConfigs = None, - mac: str = "00:00:00:aa:00:00", - ) -> None: - if preferences is None: - preferences = PreferencesConfig() - self.preferences: PreferencesConfig = preferences - if location is None: - location = LocationConfig() - self.location: LocationConfig = location - if servers is None: - servers = [] - self.servers: list[CoreServer] = servers - if nodes is None: - nodes = [] - self.nodes: list[CustomNode] = nodes - if recentfiles is None: - recentfiles = [] - self.recentfiles: list[str] = recentfiles - if observers is None: - observers = [] - self.observers: list[Observer] = observers - self.scale: float = scale - if ips is None: - ips = IpConfigs() - self.ips: IpConfigs = ips - self.mac: str = mac - - -def copy_files(current_path: Path, new_path: Path) -> None: - for current_file in current_path.glob("*"): - new_file = new_path.joinpath(current_file.name) - if not new_file.exists(): - shutil.copy(current_file, new_file) - - -def find_terminal() -> Optional[str]: - for term in sorted(TERMINALS): - cmd = TERMINALS[term] - if shutil.which(term): - return cmd - return None - - -def check_directory() -> None: - HOME_PATH.mkdir(exist_ok=True) - BACKGROUNDS_PATH.mkdir(exist_ok=True) - CUSTOM_EMANE_PATH.mkdir(exist_ok=True) - CUSTOM_SERVICE_PATH.mkdir(exist_ok=True) - ICONS_PATH.mkdir(exist_ok=True) - MOBILITY_PATH.mkdir(exist_ok=True) - XMLS_PATH.mkdir(exist_ok=True) - SCRIPT_PATH.mkdir(exist_ok=True) - copy_files(LOCAL_ICONS_PATH, ICONS_PATH) - copy_files(LOCAL_BACKGROUND_PATH, BACKGROUNDS_PATH) - copy_files(LOCAL_XMLS_PATH, XMLS_PATH) - copy_files(LOCAL_MOBILITY_PATH, MOBILITY_PATH) - if not CONFIG_PATH.exists(): - terminal = find_terminal() - if "EDITOR" in os.environ: - editor = EDITORS[0] - else: - editor = EDITORS[1] - preferences = PreferencesConfig(editor, terminal) - config = GuiConfig(preferences=preferences) - save(config) - - -def read() -> GuiConfig: - with CONFIG_PATH.open("r") as f: - return yaml.safe_load(f) - - -def save(config: GuiConfig) -> None: - with CONFIG_PATH.open("w") as f: - yaml.dump(config, f, Dumper=IndentDumper, default_flow_style=False) diff --git a/daemon/core/gui/coreclient.py b/daemon/core/gui/coreclient.py deleted file mode 100644 index da2ca6d6..00000000 --- a/daemon/core/gui/coreclient.py +++ /dev/null @@ -1,840 +0,0 @@ -""" -Incorporate grpc into python tkinter GUI -""" -import getpass -import json -import logging -import os -import tkinter as tk -from collections.abc import Iterable -from pathlib import Path -from tkinter import messagebox -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc import client, configservices_pb2, core_pb2 -from core.api.grpc.wrappers import ( - ConfigOption, - ConfigService, - ConfigServiceDefaults, - EmaneModelConfig, - Event, - ExceptionEvent, - Link, - LinkEvent, - LinkType, - MessageType, - Node, - NodeEvent, - NodeServiceData, - NodeType, - Position, - Server, - ServiceConfig, - ServiceFileConfig, - Session, - SessionLocation, - SessionState, - ThroughputsEvent, -) -from core.gui import nodeutils as nutils -from core.gui.appconfig import XMLS_PATH, CoreServer, Observer -from core.gui.dialogs.emaneinstall import EmaneInstallDialog -from core.gui.dialogs.mobilityplayer import MobilityPlayer -from core.gui.dialogs.sessions import SessionsDialog -from core.gui.graph.edges import CanvasEdge -from core.gui.graph.node import CanvasNode -from core.gui.interface import InterfaceManager -from core.gui.nodeutils import NodeDraw - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -GUI_SOURCE = "gui" -CPU_USAGE_DELAY = 3 - - -def to_dict(config: dict[str, ConfigOption]) -> dict[str, str]: - return {x: y.value for x, y in config.items()} - - -class CoreClient: - def __init__(self, app: "Application", proxy: bool) -> None: - """ - Create a CoreGrpc instance - """ - self.app: "Application" = app - self.master: tk.Tk = app.master - self._client: client.CoreGrpcClient = client.CoreGrpcClient(proxy=proxy) - self.session: Optional[Session] = None - self.user = getpass.getuser() - - # menu options - self.show_throughputs: tk.BooleanVar = tk.BooleanVar(value=False) - - # global service settings - self.services: dict[str, set[str]] = {} - self.config_services_groups: dict[str, set[str]] = {} - self.config_services: dict[str, ConfigService] = {} - - # loaded configuration data - self.emane_models: list[str] = [] - self.servers: dict[str, CoreServer] = {} - self.custom_nodes: dict[str, NodeDraw] = {} - self.custom_observers: dict[str, Observer] = {} - self.read_config() - - # helpers - self.iface_to_edge: dict[tuple[int, ...], CanvasEdge] = {} - self.ifaces_manager: InterfaceManager = InterfaceManager(self.app) - self.observer: Optional[str] = None - - # session data - self.mobility_players: dict[int, MobilityPlayer] = {} - self.canvas_nodes: dict[int, CanvasNode] = {} - self.links: dict[str, CanvasEdge] = {} - self.handling_throughputs: Optional[grpc.Future] = None - self.handling_cpu_usage: Optional[grpc.Future] = None - self.handling_events: Optional[grpc.Future] = None - - @property - def client(self) -> client.CoreGrpcClient: - if self.session: - if not self._client.check_session(self.session.id): - throughputs_enabled = self.handling_throughputs is not None - self.cancel_throughputs() - self.cancel_events() - self._client.create_session(self.session.id) - self.handling_events = self._client.events( - self.session.id, self.handle_events - ) - if throughputs_enabled: - self.enable_throughputs() - self.setup_cpu_usage() - return self._client - - def set_canvas_node(self, node: Node, canvas_node: CanvasNode) -> None: - self.canvas_nodes[node.id] = canvas_node - - def get_canvas_node(self, node_id: int) -> CanvasNode: - return self.canvas_nodes[node_id] - - def reset(self) -> None: - # helpers - self.ifaces_manager.reset() - self.iface_to_edge.clear() - # session data - self.canvas_nodes.clear() - self.links.clear() - self.close_mobility_players() - self.mobility_players.clear() - # clear streams - self.cancel_throughputs() - self.cancel_events() - - def close_mobility_players(self) -> None: - for mobility_player in self.mobility_players.values(): - mobility_player.close() - - def set_observer(self, value: Optional[str]) -> None: - self.observer = value - - def read_config(self) -> None: - # read distributed servers - for server in self.app.guiconfig.servers: - self.servers[server.name] = server - # read custom nodes - for custom_node in self.app.guiconfig.nodes: - node_draw = NodeDraw.from_custom(custom_node) - self.custom_nodes[custom_node.name] = node_draw - # read observers - for observer in self.app.guiconfig.observers: - self.custom_observers[observer.name] = observer - - def handle_events(self, event: Event) -> None: - if not self.session or event.source == GUI_SOURCE: - return - if event.session_id != self.session.id: - logger.warning( - "ignoring event session(%s) current(%s)", - event.session_id, - self.session.id, - ) - return - if event.link_event: - self.app.after(0, self.handle_link_event, event.link_event) - elif event.session_event: - logger.info("session event: %s", event) - session_event = event.session_event - if session_event.event <= SessionState.SHUTDOWN.value: - self.session.state = SessionState(session_event.event) - elif session_event.event in {7, 8, 9}: - node_id = session_event.node_id - dialog = self.mobility_players.get(node_id) - if dialog: - if session_event.event == 7: - dialog.set_play() - elif session_event.event == 8: - dialog.set_stop() - else: - dialog.set_pause() - else: - logger.warning("unknown session event: %s", session_event) - elif event.node_event: - self.app.after(0, self.handle_node_event, event.node_event) - elif event.config_event: - logger.info("config event: %s", event) - elif event.exception_event: - self.handle_exception_event(event.exception_event) - else: - logger.info("unhandled event: %s", event) - - def handle_link_event(self, event: LinkEvent) -> None: - logger.debug("Link event: %s", event) - node1_id = event.link.node1_id - node2_id = event.link.node2_id - if node1_id == node2_id: - logger.warning("ignoring links with loops: %s", event) - return - canvas_node1 = self.canvas_nodes[node1_id] - canvas_node2 = self.canvas_nodes[node2_id] - if event.link.type == LinkType.WIRELESS: - if event.message_type == MessageType.ADD: - self.app.manager.add_wireless_edge( - canvas_node1, canvas_node2, event.link - ) - elif event.message_type == MessageType.DELETE: - self.app.manager.delete_wireless_edge( - canvas_node1, canvas_node2, event.link - ) - elif event.message_type == MessageType.NONE: - self.app.manager.update_wireless_edge( - canvas_node1, canvas_node2, event.link - ) - else: - logger.warning("unknown link event: %s", event) - else: - if event.message_type == MessageType.ADD: - self.app.manager.add_wired_edge(canvas_node1, canvas_node2, event.link) - elif event.message_type == MessageType.DELETE: - self.app.manager.delete_wired_edge(event.link) - elif event.message_type == MessageType.NONE: - self.app.manager.update_wired_edge(event.link) - else: - logger.warning("unknown link event: %s", event) - - def handle_node_event(self, event: NodeEvent) -> None: - logger.debug("node event: %s", event) - node = event.node - if event.message_type == MessageType.NONE: - canvas_node = self.canvas_nodes[node.id] - x = node.position.x - y = node.position.y - canvas_node.move(x, y) - if node.icon and node.icon != canvas_node.core_node.icon: - canvas_node.update_icon(node.icon) - elif event.message_type == MessageType.DELETE: - canvas_node = self.canvas_nodes[node.id] - canvas_node.canvas_delete() - elif event.message_type == MessageType.ADD: - if node.id in self.session.nodes: - logger.error("core node already exists: %s", node) - self.app.manager.add_core_node(node) - else: - logger.warning("unknown node event: %s", event) - - def enable_throughputs(self) -> None: - if not self.handling_throughputs: - self.handling_throughputs = self.client.throughputs( - self.session.id, self.handle_throughputs - ) - - def cancel_throughputs(self) -> None: - if self.handling_throughputs: - self.handling_throughputs.cancel() - self.handling_throughputs = None - self.app.manager.clear_throughputs() - - def cancel_events(self) -> None: - if self.handling_events: - self.handling_events.cancel() - self.handling_events = None - - def cancel_cpu_usage(self) -> None: - if self.handling_cpu_usage: - self.handling_cpu_usage.cancel() - self.handling_cpu_usage = None - - def setup_cpu_usage(self) -> None: - if self.handling_cpu_usage and self.handling_cpu_usage.running(): - return - if self.handling_cpu_usage: - self.handling_cpu_usage.cancel() - self.handling_cpu_usage = self._client.cpu_usage( - CPU_USAGE_DELAY, self.handle_cpu_event - ) - - def handle_throughputs(self, event: ThroughputsEvent) -> None: - if event.session_id != self.session.id: - logger.warning( - "ignoring throughput event session(%s) current(%s)", - event.session_id, - self.session.id, - ) - return - logger.debug("handling throughputs event: %s", event) - self.app.after(0, self.app.manager.set_throughputs, event) - - def handle_cpu_event(self, event: core_pb2.CpuUsageEvent) -> None: - self.app.after(0, self.app.statusbar.set_cpu, event.usage) - - def handle_exception_event(self, event: ExceptionEvent) -> None: - logger.info("exception event: %s", event) - self.app.statusbar.add_alert(event) - - def update_session_title(self) -> None: - title_file = self.session.file.name if self.session.file else "" - self.master.title(f"CORE Session({self.session.id}) {title_file}") - - def join_session(self, session_id: int) -> None: - logger.info("joining session(%s)", session_id) - self.reset() - try: - self.session = self.client.get_session(session_id) - self.session.user = self.user - self.update_session_title() - self.handling_events = self.client.events( - self.session.id, self.handle_events - ) - self.ifaces_manager.joined(self.session.links) - self.app.manager.join(self.session) - if self.is_runtime(): - self.show_mobility_players() - self.app.after(0, self.app.joined_session_update) - except grpc.RpcError as e: - self.app.show_grpc_exception("Join Session Error", e) - - def is_runtime(self) -> bool: - return self.session and self.session.state == SessionState.RUNTIME - - def create_new_session(self) -> None: - """ - Create a new session - """ - try: - session = self.client.create_session() - logger.info("created session: %s", session.id) - self.join_session(session.id) - location_config = self.app.guiconfig.location - self.session.location = SessionLocation( - x=location_config.x, - y=location_config.y, - z=location_config.z, - lat=location_config.lat, - lon=location_config.lon, - alt=location_config.alt, - scale=location_config.scale, - ) - except grpc.RpcError as e: - self.app.show_grpc_exception("New Session Error", e) - - def delete_session(self, session_id: int = None) -> None: - if session_id is None and not self.session: - return - if session_id is None: - session_id = self.session.id - try: - response = self.client.delete_session(session_id) - logger.info("deleted session(%s), Result: %s", session_id, response) - except grpc.RpcError as e: - self.app.show_grpc_exception("Delete Session Error", e) - - def setup(self, session_id: int = None) -> None: - """ - Query sessions, if there exist any, prompt whether to join one - """ - try: - self.client.connect() - # get current core configurations services/config services - core_config = self.client.get_config() - self.emane_models = sorted(core_config.emane_models) - for service in core_config.services: - group_services = self.services.setdefault(service.group, set()) - group_services.add(service.name) - for service in core_config.config_services: - self.config_services[service.name] = service - group_services = self.config_services_groups.setdefault( - service.group, set() - ) - group_services.add(service.name) - # join provided session, create new session, or show dialog to select an - # existing session - sessions = self.client.get_sessions() - if session_id: - session_ids = {x.id for x in sessions} - if session_id not in session_ids: - self.app.show_error( - "Join Session Error", - f"{session_id} does not exist", - blocking=True, - ) - self.app.close() - else: - self.join_session(session_id) - else: - if not sessions: - self.create_new_session() - else: - dialog = SessionsDialog(self.app, True) - dialog.show() - except grpc.RpcError as e: - logger.exception("core setup error") - self.app.show_grpc_exception("Setup Error", e, blocking=True) - self.app.close() - - def edit_node(self, core_node: Node) -> None: - try: - self.client.move_node( - self.session.id, core_node.id, core_node.position, source=GUI_SOURCE - ) - except grpc.RpcError as e: - self.app.show_grpc_exception("Edit Node Error", e) - - def get_links(self, definition: bool = False) -> list[Link]: - if not definition: - self.ifaces_manager.set_macs([x.link for x in self.links.values()]) - links = [] - for edge in self.links.values(): - link = edge.link - if not definition: - node1 = self.session.nodes[link.node1_id] - node2 = self.session.nodes[link.node2_id] - if nutils.is_container(node1) and link.iface1 and not link.iface1.mac: - link.iface1.mac = self.ifaces_manager.next_mac() - if nutils.is_container(node2) and link.iface2 and not link.iface2.mac: - link.iface2.mac = self.ifaces_manager.next_mac() - links.append(link) - if edge.asymmetric_link: - links.append(edge.asymmetric_link) - return links - - def start_session(self, definition: bool = False) -> tuple[bool, list[str]]: - self.session.links = self.get_links(definition) - self.session.metadata = self.get_metadata() - self.session.servers.clear() - for server in self.servers.values(): - self.session.servers.append(Server(name=server.name, host=server.address)) - result = False - exceptions = [] - try: - result, exceptions = self.client.start_session(self.session, definition) - logger.info( - "start session(%s) definition(%s), result: %s", - self.session.id, - definition, - result, - ) - if self.show_throughputs.get(): - self.enable_throughputs() - except grpc.RpcError as e: - self.app.show_grpc_exception("Start Session Error", e) - return result, exceptions - - def stop_session(self, session_id: int = None) -> bool: - session_id = session_id or self.session.id - self.cancel_throughputs() - result = False - try: - result = self.client.stop_session(session_id) - logger.info("stopped session(%s), result: %s", session_id, result) - except grpc.RpcError as e: - self.app.show_grpc_exception("Stop Session Error", e) - return result - - def show_mobility_players(self) -> None: - for node in self.session.nodes.values(): - if not nutils.is_mobility(node): - continue - if node.mobility_config: - mobility_player = MobilityPlayer(self.app, node) - self.mobility_players[node.id] = mobility_player - mobility_player.show() - - def get_metadata(self) -> dict[str, str]: - # create canvas data - canvas_config = self.app.manager.get_metadata() - canvas_config = json.dumps(canvas_config) - - # create shapes data - shapes = [] - for canvas in self.app.manager.all(): - for shape in canvas.shapes.values(): - shapes.append(shape.metadata()) - shapes = json.dumps(shapes) - - # create edges config - edges_config = [] - for edge in self.links.values(): - if not edge.is_customized(): - continue - edge_config = dict(token=edge.token, width=edge.width, color=edge.color) - edges_config.append(edge_config) - edges_config = json.dumps(edges_config) - - # create hidden metadata - hidden = [x.core_node.id for x in self.canvas_nodes.values() if x.hidden] - hidden = json.dumps(hidden) - - # save metadata - return dict( - canvas=canvas_config, shapes=shapes, edges=edges_config, hidden=hidden - ) - - def launch_terminal(self, node_id: int) -> None: - try: - terminal = self.app.guiconfig.preferences.terminal - if not terminal: - messagebox.showerror( - "Terminal Error", - "No terminal set, please set within the preferences menu", - parent=self.app, - ) - return - node_term = self.client.get_node_terminal(self.session.id, node_id) - cmd = f"{terminal} {node_term} &" - logger.info("launching terminal %s", cmd) - os.system(cmd) - except grpc.RpcError as e: - self.app.show_grpc_exception("Node Terminal Error", e) - - def get_xml_dir(self) -> str: - return str(self.session.file.parent) if self.session.file else str(XMLS_PATH) - - def save_xml(self, file_path: Path = None) -> bool: - """ - Save core session as to an xml file - """ - if not file_path and not self.session.file: - logger.error("trying to save xml for session with no file") - return False - if not file_path: - file_path = self.session.file - result = False - try: - if not self.is_runtime(): - logger.debug("sending session data to the daemon") - result, exceptions = self.start_session(definition=True) - if not result: - message = "\n".join(exceptions) - self.app.show_exception_data( - "Session Definition Exception", - "Failed to define session", - message, - ) - self.client.save_xml(self.session.id, str(file_path)) - if self.session.file != file_path: - self.session.file = file_path - self.update_session_title() - logger.info("saved xml file %s", file_path) - result = True - except grpc.RpcError as e: - self.app.show_grpc_exception("Save XML Error", e) - return result - - def open_xml(self, file_path: Path) -> None: - """ - Open core xml - """ - try: - result, session_id = self._client.open_xml(file_path) - logger.info( - "open xml file %s, result(%s) session(%s)", - file_path, - result, - session_id, - ) - self.join_session(session_id) - except grpc.RpcError as e: - self.app.show_grpc_exception("Open XML Error", e) - - def get_node_service(self, node_id: int, service_name: str) -> NodeServiceData: - node_service = self.client.get_node_service( - self.session.id, node_id, service_name - ) - logger.debug( - "get node(%s) service(%s): %s", node_id, service_name, node_service - ) - return node_service - - def get_node_service_file( - self, node_id: int, service_name: str, file_name: str - ) -> str: - data = self.client.get_node_service_file( - self.session.id, node_id, service_name, file_name - ) - logger.debug( - "get service file for node(%s), service: %s, file: %s, data: %s", - node_id, - service_name, - file_name, - data, - ) - return data - - def close(self) -> None: - """ - Clean ups when done using grpc - """ - logger.debug("close grpc") - self.client.close() - - def next_node_id(self) -> int: - """ - Get the next usable node id. - """ - i = 1 - while True: - if i not in self.session.nodes: - break - i += 1 - return i - - def create_node( - self, x: float, y: float, node_type: NodeType, model: str - ) -> Optional[Node]: - """ - Add node, with information filled in, to grpc manager - """ - node_id = self.next_node_id() - position = Position(x=x, y=y) - image = None - if nutils.has_image(node_type): - image = "ubuntu:latest" - emane = None - if node_type == NodeType.EMANE: - if not self.emane_models: - dialog = EmaneInstallDialog(self.app) - dialog.show() - return - emane = self.emane_models[0] - name = f"emane{node_id}" - elif node_type == NodeType.WIRELESS_LAN: - name = f"wlan{node_id}" - elif node_type in [NodeType.RJ45, NodeType.TUNNEL]: - name = "unassigned" - else: - name = f"n{node_id}" - node = Node( - id=node_id, - type=node_type, - name=name, - model=model, - position=position, - image=image, - emane=emane, - ) - if nutils.is_custom(node): - services = nutils.get_custom_services(self.app.guiconfig, model) - node.config_services = set(services) - # assign default services to CORE node - else: - services = self.session.default_services.get(model) - if services: - node.config_services = set(services) - logger.info( - "add node(%s) to session(%s), coordinates(%s, %s)", - node.name, - self.session.id, - x, - y, - ) - self.session.nodes[node.id] = node - return node - - def deleted_canvas_nodes(self, canvas_nodes: list[CanvasNode]) -> None: - """ - remove the nodes selected by the user and anything related to that node - such as link, configurations, interfaces - """ - for canvas_node in canvas_nodes: - node = canvas_node.core_node - del self.canvas_nodes[node.id] - del self.session.nodes[node.id] - - def deleted_canvas_edges(self, edges: Iterable[CanvasEdge]) -> None: - links = [] - for edge in edges: - del self.links[edge.token] - links.append(edge.link) - self.ifaces_manager.removed(links) - - def save_edge(self, edge: CanvasEdge) -> None: - self.links[edge.token] = edge - src_node = edge.src.core_node - dst_node = edge.dst.core_node - if edge.link.iface1: - src_iface_id = edge.link.iface1.id - self.iface_to_edge[(src_node.id, src_iface_id)] = edge - if edge.link.iface2: - dst_iface_id = edge.link.iface2.id - self.iface_to_edge[(dst_node.id, dst_iface_id)] = edge - - def get_wlan_configs(self) -> list[tuple[int, dict[str, str]]]: - configs = [] - for node in self.session.nodes.values(): - if node.type != NodeType.WIRELESS_LAN: - continue - if not node.wlan_config: - continue - config = ConfigOption.to_dict(node.wlan_config) - configs.append((node.id, config)) - return configs - - def get_mobility_configs(self) -> list[tuple[int, dict[str, str]]]: - configs = [] - for node in self.session.nodes.values(): - if not nutils.is_mobility(node): - continue - if not node.mobility_config: - continue - config = ConfigOption.to_dict(node.mobility_config) - configs.append((node.id, config)) - return configs - - def get_emane_model_configs(self) -> list[EmaneModelConfig]: - configs = [] - for node in self.session.nodes.values(): - for key, config in node.emane_model_configs.items(): - model, iface_id = key - # config = ConfigOption.to_dict(config) - if iface_id is None: - iface_id = -1 - config = EmaneModelConfig( - node_id=node.id, model=model, iface_id=iface_id, config=config - ) - configs.append(config) - return configs - - def get_service_configs(self) -> list[ServiceConfig]: - configs = [] - for node in self.session.nodes.values(): - if not nutils.is_container(node): - continue - if not node.service_configs: - continue - for name, config in node.service_configs.items(): - config = ServiceConfig( - node_id=node.id, - service=name, - files=config.configs, - directories=config.dirs, - startup=config.startup, - validate=config.validate, - shutdown=config.shutdown, - ) - configs.append(config) - return configs - - def get_service_file_configs(self) -> list[ServiceFileConfig]: - configs = [] - for node in self.session.nodes.values(): - if not nutils.is_container(node): - continue - if not node.service_file_configs: - continue - for service, file_configs in node.service_file_configs.items(): - for file, data in file_configs.items(): - config = ServiceFileConfig(node.id, service, file, data) - configs.append(config) - return configs - - def get_config_service_rendered(self, node_id: int, name: str) -> dict[str, str]: - return self.client.get_config_service_rendered(self.session.id, node_id, name) - - def get_config_service_defaults( - self, node_id: int, name: str - ) -> ConfigServiceDefaults: - return self.client.get_config_service_defaults(self.session.id, node_id, name) - - def get_config_service_configs_proto( - self, - ) -> list[configservices_pb2.ConfigServiceConfig]: - config_service_protos = [] - for node in self.session.nodes.values(): - if not nutils.is_container(node): - continue - if not node.config_service_configs: - continue - for name, service_config in node.config_service_configs.items(): - config_proto = configservices_pb2.ConfigServiceConfig( - node_id=node.id, - name=name, - templates=service_config.templates, - config=service_config.config, - ) - config_service_protos.append(config_proto) - return config_service_protos - - def run(self, node_id: int) -> str: - logger.info("running node(%s) cmd: %s", node_id, self.observer) - _, output = self.client.node_command(self.session.id, node_id, self.observer) - return output - - def get_wlan_config(self, node_id: int) -> dict[str, ConfigOption]: - config = self.client.get_wlan_config(self.session.id, node_id) - logger.debug( - "get wlan configuration from node %s, result configuration: %s", - node_id, - config, - ) - return config - - def get_wireless_config(self, node_id: int) -> dict[str, ConfigOption]: - return self.client.get_wireless_config(self.session.id, node_id) - - def get_mobility_config(self, node_id: int) -> dict[str, ConfigOption]: - config = self.client.get_mobility_config(self.session.id, node_id) - logger.debug( - "get mobility config from node %s, result configuration: %s", - node_id, - config, - ) - return config - - def get_emane_model_config( - self, node_id: int, model: str, iface_id: int = None - ) -> dict[str, ConfigOption]: - if iface_id is None: - iface_id = -1 - config = self.client.get_emane_model_config( - self.session.id, node_id, model, iface_id - ) - logger.debug( - "get emane model config: node id: %s, EMANE model: %s, " - "interface: %s, config: %s", - node_id, - model, - iface_id, - config, - ) - return config - - def execute_script(self, script: str, options: str) -> None: - session_id = self.client.execute_script(script, options) - logger.info("execute python script %s", session_id) - if session_id != -1: - self.join_session(session_id) - - def add_link(self, link: Link) -> None: - result, _, _ = self.client.add_link(self.session.id, link, source=GUI_SOURCE) - logger.debug("added link: %s", result) - if not result: - logger.error("error adding link: %s", link) - - def edit_link(self, link: Link) -> None: - result = self.client.edit_link(self.session.id, link, source=GUI_SOURCE) - if not result: - logger.error("error editing link: %s", link) diff --git a/daemon/core/gui/data/icons/alert.png b/daemon/core/gui/data/icons/alert.png deleted file mode 100644 index 718fa9f1..00000000 Binary files a/daemon/core/gui/data/icons/alert.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/antenna.png b/daemon/core/gui/data/icons/antenna.png deleted file mode 100644 index 4247aa3d..00000000 Binary files a/daemon/core/gui/data/icons/antenna.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/cancel.png b/daemon/core/gui/data/icons/cancel.png deleted file mode 100644 index 1d95ba0c..00000000 Binary files a/daemon/core/gui/data/icons/cancel.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/delete.png b/daemon/core/gui/data/icons/delete.png deleted file mode 100644 index 01b498d4..00000000 Binary files a/daemon/core/gui/data/icons/delete.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/docker.png b/daemon/core/gui/data/icons/docker.png deleted file mode 100644 index 6021c640..00000000 Binary files a/daemon/core/gui/data/icons/docker.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/edit-node.png b/daemon/core/gui/data/icons/edit-node.png deleted file mode 100644 index 28490eff..00000000 Binary files a/daemon/core/gui/data/icons/edit-node.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/emane.png b/daemon/core/gui/data/icons/emane.png deleted file mode 100644 index 1baa933d..00000000 Binary files a/daemon/core/gui/data/icons/emane.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/error.png b/daemon/core/gui/data/icons/error.png deleted file mode 100644 index d73d1dd4..00000000 Binary files a/daemon/core/gui/data/icons/error.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/host.png b/daemon/core/gui/data/icons/host.png deleted file mode 100644 index e6efda08..00000000 Binary files a/daemon/core/gui/data/icons/host.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/hub.png b/daemon/core/gui/data/icons/hub.png deleted file mode 100644 index c9a2523b..00000000 Binary files a/daemon/core/gui/data/icons/hub.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/lanswitch.png b/daemon/core/gui/data/icons/lanswitch.png deleted file mode 100644 index eb9ba593..00000000 Binary files a/daemon/core/gui/data/icons/lanswitch.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/link.png b/daemon/core/gui/data/icons/link.png deleted file mode 100644 index d6b6745b..00000000 Binary files a/daemon/core/gui/data/icons/link.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/lxc.png b/daemon/core/gui/data/icons/lxc.png deleted file mode 100644 index b944b231..00000000 Binary files a/daemon/core/gui/data/icons/lxc.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/marker.png b/daemon/core/gui/data/icons/marker.png deleted file mode 100644 index 8c60bacb..00000000 Binary files a/daemon/core/gui/data/icons/marker.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/markerclear.png b/daemon/core/gui/data/icons/markerclear.png deleted file mode 100644 index 6f58c005..00000000 Binary files a/daemon/core/gui/data/icons/markerclear.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/mdr.png b/daemon/core/gui/data/icons/mdr.png deleted file mode 100644 index b0678ee7..00000000 Binary files a/daemon/core/gui/data/icons/mdr.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/observe.png b/daemon/core/gui/data/icons/observe.png deleted file mode 100644 index e32005da..00000000 Binary files a/daemon/core/gui/data/icons/observe.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/oval.png b/daemon/core/gui/data/icons/oval.png deleted file mode 100644 index 1babf1b7..00000000 Binary files a/daemon/core/gui/data/icons/oval.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/pause.png b/daemon/core/gui/data/icons/pause.png deleted file mode 100644 index 9ac4e6ea..00000000 Binary files a/daemon/core/gui/data/icons/pause.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/pc.png b/daemon/core/gui/data/icons/pc.png deleted file mode 100644 index 3f587e70..00000000 Binary files a/daemon/core/gui/data/icons/pc.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/podman.png b/daemon/core/gui/data/icons/podman.png deleted file mode 100644 index 771e04a0..00000000 Binary files a/daemon/core/gui/data/icons/podman.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/prouter.png b/daemon/core/gui/data/icons/prouter.png deleted file mode 100644 index b0ccf664..00000000 Binary files a/daemon/core/gui/data/icons/prouter.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/rectangle.png b/daemon/core/gui/data/icons/rectangle.png deleted file mode 100644 index ca6c8c06..00000000 Binary files a/daemon/core/gui/data/icons/rectangle.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/rj45.png b/daemon/core/gui/data/icons/rj45.png deleted file mode 100644 index c9d87cfd..00000000 Binary files a/daemon/core/gui/data/icons/rj45.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/router.png b/daemon/core/gui/data/icons/router.png deleted file mode 100644 index 1de5014a..00000000 Binary files a/daemon/core/gui/data/icons/router.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/run.png b/daemon/core/gui/data/icons/run.png deleted file mode 100644 index a39a997f..00000000 Binary files a/daemon/core/gui/data/icons/run.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/select.png b/daemon/core/gui/data/icons/select.png deleted file mode 100644 index 04e18891..00000000 Binary files a/daemon/core/gui/data/icons/select.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/shadow.png b/daemon/core/gui/data/icons/shadow.png deleted file mode 100644 index 6d6f3571..00000000 Binary files a/daemon/core/gui/data/icons/shadow.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/shutdown.png b/daemon/core/gui/data/icons/shutdown.png deleted file mode 100644 index 532f2cb9..00000000 Binary files a/daemon/core/gui/data/icons/shutdown.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/start.png b/daemon/core/gui/data/icons/start.png deleted file mode 100644 index 719f4cd9..00000000 Binary files a/daemon/core/gui/data/icons/start.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/stop.png b/daemon/core/gui/data/icons/stop.png deleted file mode 100644 index 1e87c929..00000000 Binary files a/daemon/core/gui/data/icons/stop.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/text.png b/daemon/core/gui/data/icons/text.png deleted file mode 100644 index 14a85dc0..00000000 Binary files a/daemon/core/gui/data/icons/text.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/tunnel.png b/daemon/core/gui/data/icons/tunnel.png deleted file mode 100644 index 2871b74f..00000000 Binary files a/daemon/core/gui/data/icons/tunnel.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/twonode.png b/daemon/core/gui/data/icons/twonode.png deleted file mode 100644 index 6828db8e..00000000 Binary files a/daemon/core/gui/data/icons/twonode.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/wireless.png b/daemon/core/gui/data/icons/wireless.png deleted file mode 100644 index 2b42b8dd..00000000 Binary files a/daemon/core/gui/data/icons/wireless.png and /dev/null differ diff --git a/daemon/core/gui/data/icons/wlan.png b/daemon/core/gui/data/icons/wlan.png deleted file mode 100644 index db979a09..00000000 Binary files a/daemon/core/gui/data/icons/wlan.png and /dev/null differ diff --git a/daemon/core/gui/data/xmls/emane-demo-antenna.xml b/daemon/core/gui/data/xmls/emane-demo-antenna.xml deleted file mode 100644 index 00616339..00000000 --- a/daemon/core/gui/data/xmls/emane-demo-antenna.xml +++ /dev/null @@ -1,476 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemon/core/gui/data/xmls/emane-demo-eel.xml b/daemon/core/gui/data/xmls/emane-demo-eel.xml deleted file mode 100644 index 66f8f2a8..00000000 --- a/daemon/core/gui/data/xmls/emane-demo-eel.xml +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemon/core/gui/data/xmls/emane-demo-files.xml b/daemon/core/gui/data/xmls/emane-demo-files.xml deleted file mode 100644 index 9e71d58f..00000000 --- a/daemon/core/gui/data/xmls/emane-demo-files.xml +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemon/core/gui/data/xmls/emane-demo-gpsd.xml b/daemon/core/gui/data/xmls/emane-demo-gpsd.xml deleted file mode 100644 index 2dbc1294..00000000 --- a/daemon/core/gui/data/xmls/emane-demo-gpsd.xml +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemon/core/gui/data/xmls/emane-demo-precomputed.xml b/daemon/core/gui/data/xmls/emane-demo-precomputed.xml deleted file mode 100644 index d53e26ba..00000000 --- a/daemon/core/gui/data/xmls/emane-demo-precomputed.xml +++ /dev/null @@ -1,158 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemon/core/gui/data/xmls/sample1.xml b/daemon/core/gui/data/xmls/sample1.xml deleted file mode 100644 index 64d093a8..00000000 --- a/daemon/core/gui/data/xmls/sample1.xml +++ /dev/null @@ -1,263 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/daemon/core/gui/dialogs/__init__.py b/daemon/core/gui/dialogs/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/gui/dialogs/about.py b/daemon/core/gui/dialogs/about.py deleted file mode 100644 index c932807d..00000000 --- a/daemon/core/gui/dialogs/about.py +++ /dev/null @@ -1,54 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING - -from core.gui.dialogs.dialog import Dialog -from core.gui.widgets import CodeText - -if TYPE_CHECKING: - from core.gui.app import Application - -LICENSE = """\ -Copyright (c) 2005-2020, the Boeing Company. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE.\ -""" - - -class AboutDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "About CORE") - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - codetext = CodeText(self.top) - codetext.text.insert("1.0", LICENSE) - codetext.text.config(state=tk.DISABLED) - codetext.grid(sticky=tk.NSEW) - - label = ttk.Label( - self.top, text="Icons from https://icons8.com", anchor=tk.CENTER - ) - label.grid(sticky=tk.EW) diff --git a/daemon/core/gui/dialogs/alerts.py b/daemon/core/gui/dialogs/alerts.py deleted file mode 100644 index b13f0797..00000000 --- a/daemon/core/gui/dialogs/alerts.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -check engine light -""" -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import ExceptionEvent, ExceptionLevel -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import CodeText - -if TYPE_CHECKING: - from core.gui.app import Application - - -class AlertsDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Alerts") - self.tree: Optional[ttk.Treeview] = None - self.codetext: Optional[CodeText] = None - self.alarm_map: dict[int, ExceptionEvent] = {} - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.rowconfigure(0, weight=1) - frame.grid(sticky=tk.NSEW, pady=PADY) - self.tree = ttk.Treeview( - frame, - columns=("time", "level", "session_id", "node", "source"), - show="headings", - ) - self.tree.grid(row=0, column=0, sticky=tk.NSEW) - self.tree.column("time", stretch=tk.YES) - self.tree.heading("time", text="Time") - self.tree.column("level", stretch=tk.YES, width=100) - self.tree.heading("level", text="Level") - self.tree.column("session_id", stretch=tk.YES, width=100) - self.tree.heading("session_id", text="Session ID") - self.tree.column("node", stretch=tk.YES, width=100) - self.tree.heading("node", text="Node") - self.tree.column("source", stretch=tk.YES, width=100) - self.tree.heading("source", text="Source") - self.tree.bind("<>", self.click_select) - - for exception in self.app.statusbar.core_alarms: - level_name = exception.level.name - node_id = exception.node_id if exception.node_id else "" - insert_id = self.tree.insert( - "", - tk.END, - text=exception.date, - values=( - exception.date, - level_name, - exception.session_id, - node_id, - exception.source, - ), - tags=(level_name,), - ) - self.alarm_map[insert_id] = exception - - error_name = ExceptionLevel.ERROR.name - self.tree.tag_configure(error_name, background="#ff6666") - fatal_name = ExceptionLevel.FATAL.name - self.tree.tag_configure(fatal_name, background="#d9d9d9") - warning_name = ExceptionLevel.WARNING.name - self.tree.tag_configure(warning_name, background="#ffff99") - notice_name = ExceptionLevel.NOTICE.name - self.tree.tag_configure(notice_name, background="#85e085") - - yscrollbar = ttk.Scrollbar(frame, orient="vertical", command=self.tree.yview) - yscrollbar.grid(row=0, column=1, sticky=tk.NS) - self.tree.configure(yscrollcommand=yscrollbar.set) - - xscrollbar = ttk.Scrollbar(frame, orient="horizontal", command=self.tree.xview) - xscrollbar.grid(row=1, sticky=tk.EW) - self.tree.configure(xscrollcommand=xscrollbar.set) - - self.codetext = CodeText(self.top) - self.codetext.text.config(state=tk.DISABLED, height=11) - self.codetext.grid(sticky=tk.NSEW, pady=PADY) - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - button = ttk.Button(frame, text="Reset", command=self.reset_alerts) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Close", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def reset_alerts(self) -> None: - self.codetext.text.config(state=tk.NORMAL) - self.codetext.text.delete(1.0, tk.END) - self.codetext.text.config(state=tk.DISABLED) - for item in self.tree.get_children(): - self.tree.delete(item) - self.app.statusbar.clear_alerts() - - def click_select(self, event: tk.Event) -> None: - current = self.tree.selection()[0] - exception = self.alarm_map[current] - self.codetext.text.config(state=tk.NORMAL) - self.codetext.text.delete(1.0, tk.END) - self.codetext.text.insert(1.0, exception.text) - self.codetext.text.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/canvassizeandscale.py b/daemon/core/gui/dialogs/canvassizeandscale.py deleted file mode 100644 index 863d1174..00000000 --- a/daemon/core/gui/dialogs/canvassizeandscale.py +++ /dev/null @@ -1,213 +0,0 @@ -""" -size and scale -""" -import tkinter as tk -from tkinter import font, ttk -from typing import TYPE_CHECKING - -from core.gui import validation -from core.gui.dialogs.dialog import Dialog -from core.gui.graph.manager import CanvasManager -from core.gui.themes import FRAME_PAD, PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - -PIXEL_SCALE: int = 100 - - -class SizeAndScaleDialog(Dialog): - def __init__(self, app: "Application") -> None: - """ - create an instance for size and scale object - """ - super().__init__(app, "Canvas Size and Scale") - self.manager: CanvasManager = self.app.manager - self.section_font: font.Font = font.Font(weight=font.BOLD) - width, height = self.manager.current().current_dimensions - self.pixel_width: tk.IntVar = tk.IntVar(value=width) - self.pixel_height: tk.IntVar = tk.IntVar(value=height) - location = self.app.core.session.location - self.x: tk.DoubleVar = tk.DoubleVar(value=location.x) - self.y: tk.DoubleVar = tk.DoubleVar(value=location.y) - self.lat: tk.DoubleVar = tk.DoubleVar(value=location.lat) - self.lon: tk.DoubleVar = tk.DoubleVar(value=location.lon) - self.alt: tk.DoubleVar = tk.DoubleVar(value=location.alt) - self.scale: tk.DoubleVar = tk.DoubleVar(value=location.scale) - self.meters_width: tk.IntVar = tk.IntVar( - value=width / PIXEL_SCALE * location.scale - ) - self.meters_height: tk.IntVar = tk.IntVar( - value=height / PIXEL_SCALE * location.scale - ) - self.save_default: tk.BooleanVar = tk.BooleanVar(value=False) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.draw_size() - self.draw_scale() - self.draw_reference_point() - self.draw_save_as_default() - self.draw_spacer() - self.draw_buttons() - - def draw_size(self) -> None: - label_frame = ttk.Labelframe(self.top, text="Size", padding=FRAME_PAD) - label_frame.grid(sticky=tk.EW) - label_frame.columnconfigure(0, weight=1) - - # draw size row 1 - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(3, weight=1) - label = ttk.Label(frame, text="Width") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = validation.PositiveIntEntry(frame, textvariable=self.pixel_width) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - entry.bind("", self.size_scale_keyup) - label = ttk.Label(frame, text="x Height") - label.grid(row=0, column=2, sticky=tk.W, padx=PADX) - entry = validation.PositiveIntEntry(frame, textvariable=self.pixel_height) - entry.grid(row=0, column=3, sticky=tk.EW, padx=PADX) - entry.bind("", self.size_scale_keyup) - label = ttk.Label(frame, text="Pixels") - label.grid(row=0, column=4, sticky=tk.W) - - # draw size row 2 - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(3, weight=1) - label = ttk.Label(frame, text="Width") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = validation.PositiveFloatEntry( - frame, textvariable=self.meters_width, state=tk.DISABLED - ) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - label = ttk.Label(frame, text="x Height") - label.grid(row=0, column=2, sticky=tk.W, padx=PADX) - entry = validation.PositiveFloatEntry( - frame, textvariable=self.meters_height, state=tk.DISABLED - ) - entry.grid(row=0, column=3, sticky=tk.EW, padx=PADX) - label = ttk.Label(frame, text="Meters") - label.grid(row=0, column=4, sticky=tk.W) - - def draw_scale(self) -> None: - label_frame = ttk.Labelframe(self.top, text="Scale", padding=FRAME_PAD) - label_frame.grid(sticky=tk.EW) - label_frame.columnconfigure(0, weight=1) - - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text=f"{PIXEL_SCALE} Pixels =") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = validation.PositiveFloatEntry(frame, textvariable=self.scale) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - entry.bind("", self.size_scale_keyup) - label = ttk.Label(frame, text="Meters") - label.grid(row=0, column=2, sticky=tk.W) - - def draw_reference_point(self) -> None: - label_frame = ttk.Labelframe( - self.top, text="Reference Point", padding=FRAME_PAD - ) - label_frame.grid(sticky=tk.EW) - label_frame.columnconfigure(0, weight=1) - - label = ttk.Label( - label_frame, text="Default is (0, 0), the upper left corner of the canvas" - ) - label.grid() - - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(3, weight=1) - - label = ttk.Label(frame, text="X") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = validation.PositiveFloatEntry(frame, textvariable=self.x) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - label = ttk.Label(frame, text="Y") - label.grid(row=0, column=2, sticky=tk.W, padx=PADX) - entry = validation.PositiveFloatEntry(frame, textvariable=self.y) - entry.grid(row=0, column=3, sticky=tk.EW, padx=PADX) - - label = ttk.Label(label_frame, text="Translates To") - label.grid() - - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(3, weight=1) - frame.columnconfigure(5, weight=1) - - label = ttk.Label(frame, text="Lat") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = validation.FloatEntry(frame, textvariable=self.lat) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - label = ttk.Label(frame, text="Lon") - label.grid(row=0, column=2, sticky=tk.W, padx=PADX) - entry = validation.FloatEntry(frame, textvariable=self.lon) - entry.grid(row=0, column=3, sticky=tk.EW, padx=PADX) - - label = ttk.Label(frame, text="Alt") - label.grid(row=0, column=4, sticky=tk.W, padx=PADX) - entry = validation.FloatEntry(frame, textvariable=self.alt) - entry.grid(row=0, column=5, sticky=tk.EW) - - def draw_save_as_default(self) -> None: - button = ttk.Checkbutton( - self.top, text="Save as default?", variable=self.save_default - ) - button.grid(sticky=tk.W, pady=PADY) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.grid(sticky=tk.EW) - - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def size_scale_keyup(self, _event: tk.Event) -> None: - scale = self.scale.get() - width = self.pixel_width.get() - height = self.pixel_height.get() - self.meters_width.set(width / PIXEL_SCALE * scale) - self.meters_height.set(height / PIXEL_SCALE * scale) - - def click_apply(self) -> None: - width, height = self.pixel_width.get(), self.pixel_height.get() - self.manager.redraw_canvas((width, height)) - location = self.app.core.session.location - location.x = self.x.get() - location.y = self.y.get() - location.lat = self.lat.get() - location.lon = self.lon.get() - location.alt = self.alt.get() - location.scale = self.scale.get() - if self.save_default.get(): - location_config = self.app.guiconfig.location - location_config.x = location.x - location_config.y = location.y - location_config.z = location.z - location_config.lat = location.lat - location_config.lon = location.lon - location_config.alt = location.alt - location_config.scale = location.scale - preferences = self.app.guiconfig.preferences - preferences.width = width - preferences.height = height - self.app.save_config() - self.destroy() diff --git a/daemon/core/gui/dialogs/canvaswallpaper.py b/daemon/core/gui/dialogs/canvaswallpaper.py deleted file mode 100644 index 5b0f27b3..00000000 --- a/daemon/core/gui/dialogs/canvaswallpaper.py +++ /dev/null @@ -1,173 +0,0 @@ -""" -set wallpaper -""" -import logging -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui import images -from core.gui.appconfig import BACKGROUNDS_PATH -from core.gui.dialogs.dialog import Dialog -from core.gui.graph.graph import CanvasGraph -from core.gui.themes import PADX, PADY -from core.gui.widgets import image_chooser - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class CanvasWallpaperDialog(Dialog): - def __init__(self, app: "Application") -> None: - """ - create an instance of CanvasWallpaper object - """ - super().__init__(app, "Canvas Background") - self.canvas: CanvasGraph = self.app.manager.current() - self.scale_option: tk.IntVar = tk.IntVar(value=self.canvas.scale_option.get()) - self.adjust_to_dim: tk.BooleanVar = tk.BooleanVar( - value=self.canvas.adjust_to_dim.get() - ) - self.filename: tk.StringVar = tk.StringVar(value=self.canvas.wallpaper_file) - self.image_label: Optional[ttk.Label] = None - self.options: list[ttk.Radiobutton] = [] - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.draw_image() - self.draw_image_label() - self.draw_image_selection() - self.draw_options() - self.draw_additional_options() - self.draw_spacer() - self.draw_buttons() - - def draw_image(self) -> None: - self.image_label = ttk.Label( - self.top, text="(image preview)", width=32, anchor=tk.CENTER - ) - self.image_label.grid(pady=PADY) - - def draw_image_label(self) -> None: - label = ttk.Label(self.top, text="Image filename: ") - label.grid(sticky=tk.EW) - if self.filename.get(): - self.draw_preview() - - def draw_image_selection(self) -> None: - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=2) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(2, weight=1) - frame.grid(sticky=tk.EW, pady=PADY) - - entry = ttk.Entry(frame, textvariable=self.filename) - entry.focus() - entry.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="...", command=self.click_open_image) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Clear", command=self.click_clear) - button.grid(row=0, column=2, sticky=tk.EW) - - def draw_options(self) -> None: - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(2, weight=1) - frame.columnconfigure(3, weight=1) - frame.grid(sticky=tk.EW, pady=PADY) - - button = ttk.Radiobutton( - frame, text="upper-left", value=1, variable=self.scale_option - ) - button.grid(row=0, column=0, sticky=tk.EW) - self.options.append(button) - - button = ttk.Radiobutton( - frame, text="centered", value=2, variable=self.scale_option - ) - button.grid(row=0, column=1, sticky=tk.EW) - self.options.append(button) - - button = ttk.Radiobutton( - frame, text="scaled", value=3, variable=self.scale_option - ) - button.grid(row=0, column=2, sticky=tk.EW) - self.options.append(button) - - button = ttk.Radiobutton( - frame, text="titled", value=4, variable=self.scale_option - ) - button.grid(row=0, column=3, sticky=tk.EW) - self.options.append(button) - - def draw_additional_options(self) -> None: - checkbutton = ttk.Checkbutton( - self.top, - text="Adjust canvas size to image dimensions", - variable=self.adjust_to_dim, - command=self.click_adjust_canvas, - ) - checkbutton.grid(sticky=tk.EW, padx=PADX, pady=PADY) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_open_image(self) -> None: - filename = image_chooser(self, BACKGROUNDS_PATH) - if filename: - self.filename.set(filename) - self.draw_preview() - - def draw_preview(self) -> None: - image = images.from_file(self.filename.get(), width=250, height=135) - self.image_label.config(image=image) - self.image_label.image = image - - def click_clear(self) -> None: - """ - delete like shown in image link entry if there is any - """ - # delete entry - self.filename.set("") - # delete display image - self.image_label.config(image="", width=32) - self.image_label.image = None - - def click_adjust_canvas(self) -> None: - # deselect all radio buttons and grey them out - if self.adjust_to_dim.get(): - self.scale_option.set(0) - for option in self.options: - option.config(state=tk.DISABLED) - # turn back the radio button to active state so that user can choose again - else: - self.scale_option.set(1) - for option in self.options: - option.config(state=tk.NORMAL) - - def click_apply(self) -> None: - self.canvas.scale_option.set(self.scale_option.get()) - self.canvas.adjust_to_dim.set(self.adjust_to_dim.get()) - filename = self.filename.get() - if not filename: - filename = None - try: - self.canvas.set_wallpaper(filename) - except FileNotFoundError: - logger.error("invalid background: %s", filename) - self.destroy() diff --git a/daemon/core/gui/dialogs/colorpicker.py b/daemon/core/gui/dialogs/colorpicker.py deleted file mode 100644 index a27b1698..00000000 --- a/daemon/core/gui/dialogs/colorpicker.py +++ /dev/null @@ -1,223 +0,0 @@ -""" -custom color picker -""" -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui import validation -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - - -def get_rgb(red: int, green: int, blue: int) -> str: - """ - Convert rgb integers to an rgb hex code (#). - - :param red: red value - :param green: green value - :param blue: blue value - :return: rgb hex code - """ - return f"#{red:02x}{green:02x}{blue:02x}" - - -def get_rgb_values(hex_code: str) -> tuple[int, int, int]: - """ - Convert a valid rgb hex code (#) to rgb integers. - - :param hex_code: valid rgb hex code - :return: a tuple of red, blue, and green values - """ - if len(hex_code) == 4: - red = hex_code[1] - green = hex_code[2] - blue = hex_code[3] - else: - red = hex_code[1:3] - green = hex_code[3:5] - blue = hex_code[5:] - return int(red, 16), int(green, 16), int(blue, 16) - - -class ColorPickerDialog(Dialog): - def __init__( - self, master: tk.BaseWidget, app: "Application", initcolor: str = "#000000" - ): - super().__init__(app, "Color Picker", master=master) - self.red_entry: Optional[validation.RgbEntry] = None - self.blue_entry: Optional[validation.RgbEntry] = None - self.green_entry: Optional[validation.RgbEntry] = None - self.hex_entry: Optional[validation.HexEntry] = None - self.red_label: Optional[ttk.Label] = None - self.green_label: Optional[ttk.Label] = None - self.blue_label: Optional[ttk.Label] = None - self.display: Optional[tk.Frame] = None - self.color: str = initcolor - red, green, blue = get_rgb_values(initcolor) - self.red: tk.IntVar = tk.IntVar(value=red) - self.blue: tk.IntVar = tk.IntVar(value=blue) - self.green: tk.IntVar = tk.IntVar(value=green) - self.hex: tk.StringVar = tk.StringVar(value=initcolor) - self.red_scale: tk.IntVar = tk.IntVar(value=red) - self.green_scale: tk.IntVar = tk.IntVar(value=green) - self.blue_scale: tk.IntVar = tk.IntVar(value=blue) - self.draw() - self.set_bindings() - - def askcolor(self) -> str: - self.show() - return self.color - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(3, weight=1) - - # rgb frames - frame = ttk.Frame(self.top) - frame.grid(row=0, column=0, sticky=tk.EW, pady=PADY) - frame.columnconfigure(2, weight=3) - frame.columnconfigure(3, weight=1) - label = ttk.Label(frame, text="R") - label.grid(row=0, column=0, padx=PADX) - self.red_entry = validation.RgbEntry(frame, width=3, textvariable=self.red) - self.red_entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - scale = ttk.Scale( - frame, - from_=0, - to=255, - value=0, - orient=tk.HORIZONTAL, - variable=self.red_scale, - command=lambda x: self.scale_callback(self.red_scale, self.red), - ) - scale.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - self.red_label = ttk.Label( - frame, background=get_rgb(self.red.get(), 0, 0), width=5 - ) - self.red_label.grid(row=0, column=3, sticky=tk.EW) - - frame = ttk.Frame(self.top) - frame.grid(row=1, column=0, sticky=tk.EW, pady=PADY) - frame.columnconfigure(2, weight=3) - frame.columnconfigure(3, weight=1) - label = ttk.Label(frame, text="G") - label.grid(row=0, column=0, padx=PADX) - self.green_entry = validation.RgbEntry(frame, width=3, textvariable=self.green) - self.green_entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - scale = ttk.Scale( - frame, - from_=0, - to=255, - value=0, - orient=tk.HORIZONTAL, - variable=self.green_scale, - command=lambda x: self.scale_callback(self.green_scale, self.green), - ) - scale.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - self.green_label = ttk.Label( - frame, background=get_rgb(0, self.green.get(), 0), width=5 - ) - self.green_label.grid(row=0, column=3, sticky=tk.EW) - - frame = ttk.Frame(self.top) - frame.grid(row=2, column=0, sticky=tk.EW, pady=PADY) - frame.columnconfigure(2, weight=3) - frame.columnconfigure(3, weight=1) - label = ttk.Label(frame, text="B") - label.grid(row=0, column=0, padx=PADX) - self.blue_entry = validation.RgbEntry(frame, width=3, textvariable=self.blue) - self.blue_entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - scale = ttk.Scale( - frame, - from_=0, - to=255, - value=0, - orient=tk.HORIZONTAL, - variable=self.blue_scale, - command=lambda x: self.scale_callback(self.blue_scale, self.blue), - ) - scale.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - self.blue_label = ttk.Label( - frame, background=get_rgb(0, 0, self.blue.get()), width=5 - ) - self.blue_label.grid(row=0, column=3, sticky=tk.EW) - - # hex code and color display - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.rowconfigure(1, weight=1) - self.hex_entry = validation.HexEntry(frame, textvariable=self.hex) - self.hex_entry.grid(sticky=tk.EW, pady=PADY) - self.display = tk.Frame(frame, background=self.color, width=100, height=100) - self.display.grid(sticky=tk.NSEW) - frame.grid(row=3, column=0, sticky=tk.NSEW, pady=PADY) - - # button frame - frame = ttk.Frame(self.top) - frame.grid(row=4, column=0, sticky=tk.EW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - button = ttk.Button(frame, text="OK", command=self.button_ok) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def set_bindings(self) -> None: - self.red_entry.bind("", lambda x: self.current_focus("rgb")) - self.green_entry.bind("", lambda x: self.current_focus("rgb")) - self.blue_entry.bind("", lambda x: self.current_focus("rgb")) - self.hex_entry.bind("", lambda x: self.current_focus("hex")) - self.red.trace_add("write", self.update_color) - self.green.trace_add("write", self.update_color) - self.blue.trace_add("write", self.update_color) - self.hex.trace_add("write", self.update_color) - - def button_ok(self) -> None: - self.color = self.hex.get() - self.destroy() - - def current_focus(self, focus: str) -> None: - self.focus = focus - - def update_color(self, arg1=None, arg2=None, arg3=None) -> None: - if self.focus == "rgb": - red = int(self.red_entry.get() or 0) - blue = int(self.blue_entry.get() or 0) - green = int(self.green_entry.get() or 0) - self.set_scale(red, green, blue) - hex_code = get_rgb(red, green, blue) - self.hex.set(hex_code) - self.display.config(background=hex_code) - self.set_label(red, green, blue) - elif self.focus == "hex": - hex_code = self.hex.get() - if len(hex_code) == 4 or len(hex_code) == 7: - red, green, blue = get_rgb_values(hex_code) - self.set_entry(red, green, blue) - self.set_scale(red, green, blue) - self.display.config(background=hex_code) - self.set_label(red, green, blue) - - def scale_callback(self, var: tk.IntVar, color_var: tk.IntVar) -> None: - color_var.set(var.get()) - self.focus = "rgb" - self.update_color() - - def set_scale(self, red: int, green: int, blue: int): - self.red_scale.set(red) - self.green_scale.set(green) - self.blue_scale.set(blue) - - def set_entry(self, red: int, green: int, blue: int) -> None: - self.red.set(red) - self.green.set(green) - self.blue.set(blue) - - def set_label(self, red: int, green: int, blue: int) -> None: - self.red_label.configure(background=get_rgb(red, 0, 0)) - self.green_label.configure(background=get_rgb(0, green, 0)) - self.blue_label.configure(background=get_rgb(0, 0, blue)) diff --git a/daemon/core/gui/dialogs/configserviceconfig.py b/daemon/core/gui/dialogs/configserviceconfig.py deleted file mode 100644 index 0e873a79..00000000 --- a/daemon/core/gui/dialogs/configserviceconfig.py +++ /dev/null @@ -1,414 +0,0 @@ -""" -Service configuration dialog -""" -import logging -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import ( - ConfigOption, - ConfigServiceData, - Node, - ServiceValidationMode, -) -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CodeText, ConfigFrame, ListboxScroll - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.coreclient import CoreClient - - -class ConfigServiceConfigDialog(Dialog): - def __init__( - self, master: tk.BaseWidget, app: "Application", service_name: str, node: Node - ) -> None: - title = f"{service_name} Config Service" - super().__init__(app, title, master=master) - self.core: "CoreClient" = app.core - self.node: Node = node - self.service_name: str = service_name - self.radiovar: tk.IntVar = tk.IntVar(value=2) - self.directories: list[str] = [] - self.templates: list[str] = [] - self.rendered: dict[str, str] = {} - self.dependencies: list[str] = [] - self.executables: list[str] = [] - self.startup_commands: list[str] = [] - self.validation_commands: list[str] = [] - self.shutdown_commands: list[str] = [] - self.default_startup: list[str] = [] - self.default_validate: list[str] = [] - self.default_shutdown: list[str] = [] - self.validation_mode: Optional[ServiceValidationMode] = None - self.validation_time: Optional[int] = None - self.validation_period: tk.DoubleVar = tk.DoubleVar() - self.modes: list[str] = [] - self.mode_configs: dict[str, dict[str, str]] = {} - self.notebook: Optional[ttk.Notebook] = None - self.templates_combobox: Optional[ttk.Combobox] = None - self.modes_combobox: Optional[ttk.Combobox] = None - self.startup_commands_listbox: Optional[tk.Listbox] = None - self.shutdown_commands_listbox: Optional[tk.Listbox] = None - self.validate_commands_listbox: Optional[tk.Listbox] = None - self.validation_time_entry: Optional[ttk.Entry] = None - self.validation_mode_entry: Optional[ttk.Entry] = None - self.template_text: Optional[CodeText] = None - self.rendered_text: Optional[CodeText] = None - self.validation_period_entry: Optional[ttk.Entry] = None - self.original_service_files: dict[str, str] = {} - self.temp_service_files: dict[str, str] = {} - self.modified_files: set[str] = set() - self.config_frame: Optional[ConfigFrame] = None - self.default_config: dict[str, str] = {} - self.config: dict[str, ConfigOption] = {} - self.has_error: bool = False - self.load() - if not self.has_error: - self.draw() - - def load(self) -> None: - try: - self.core.start_session(definition=True) - service = self.core.config_services[self.service_name] - self.dependencies = service.dependencies[:] - self.executables = service.executables[:] - self.directories = service.directories[:] - self.templates = service.files[:] - self.startup_commands = service.startup[:] - self.validation_commands = service.validate[:] - self.shutdown_commands = service.shutdown[:] - self.validation_mode = service.validation_mode - self.validation_time = service.validation_timer - self.validation_period.set(service.validation_period) - defaults = self.core.get_config_service_defaults( - self.node.id, self.service_name - ) - self.original_service_files = defaults.templates - self.temp_service_files = dict(self.original_service_files) - self.modes = sorted(defaults.modes) - self.mode_configs = defaults.modes - self.config = ConfigOption.from_dict(defaults.config) - self.default_config = {x.name: x.value for x in self.config.values()} - self.rendered = self.core.get_config_service_rendered( - self.node.id, self.service_name - ) - service_config = self.node.config_service_configs.get(self.service_name) - if service_config: - for key, value in service_config.config.items(): - self.config[key].value = value - logger.info("default config: %s", self.default_config) - for file, data in service_config.templates.items(): - self.modified_files.add(file) - self.temp_service_files[file] = data - except grpc.RpcError as e: - self.app.show_grpc_exception("Get Config Service Error", e) - self.has_error = True - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - # draw notebook - self.notebook = ttk.Notebook(self.top) - self.notebook.grid(sticky=tk.NSEW, pady=PADY) - self.draw_tab_files() - if self.config: - self.draw_tab_config() - self.draw_tab_startstop() - self.draw_tab_validation() - self.draw_buttons() - - def draw_tab_files(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - tab.rowconfigure(2, weight=1) - self.notebook.add(tab, text="Directories/Files") - - label = ttk.Label( - tab, text="Directories and templates that will be used for this service." - ) - label.grid(pady=PADY) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Directories") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - state = "readonly" if self.directories else tk.DISABLED - directories_combobox = ttk.Combobox(frame, values=self.directories, state=state) - directories_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - if self.directories: - directories_combobox.current(0) - label = ttk.Label(frame, text="Files") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - state = "readonly" if self.templates else tk.DISABLED - self.templates_combobox = ttk.Combobox( - frame, values=self.templates, state=state - ) - self.templates_combobox.bind( - "<>", self.handle_template_changed - ) - self.templates_combobox.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - # draw file template tab - notebook = ttk.Notebook(tab) - notebook.rowconfigure(0, weight=1) - notebook.columnconfigure(0, weight=1) - notebook.grid(sticky=tk.NSEW, pady=PADY) - # draw rendered file tab - rendered_tab = ttk.Frame(notebook, padding=FRAME_PAD) - rendered_tab.grid(sticky=tk.NSEW) - rendered_tab.rowconfigure(0, weight=1) - rendered_tab.columnconfigure(0, weight=1) - notebook.add(rendered_tab, text="Rendered") - self.rendered_text = CodeText(rendered_tab) - self.rendered_text.grid(sticky=tk.NSEW) - self.rendered_text.text.bind("", self.update_template_file_data) - # draw template file tab - template_tab = ttk.Frame(notebook, padding=FRAME_PAD) - template_tab.grid(sticky=tk.NSEW) - template_tab.rowconfigure(0, weight=1) - template_tab.columnconfigure(0, weight=1) - notebook.add(template_tab, text="Template") - self.template_text = CodeText(template_tab) - self.template_text.grid(sticky=tk.NSEW) - self.template_text.text.bind("", self.update_template_file_data) - if self.templates: - self.templates_combobox.current(0) - template_name = self.templates[0] - temp_data = self.temp_service_files[template_name] - self.template_text.set_text(temp_data) - rendered_data = self.rendered[template_name] - self.rendered_text.set_text(rendered_data) - else: - self.template_text.text.configure(state=tk.DISABLED) - self.rendered_text.text.configure(state=tk.DISABLED) - - def draw_tab_config(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Configuration") - - if self.modes: - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Modes") - label.grid(row=0, column=0, padx=PADX) - self.modes_combobox = ttk.Combobox( - frame, values=self.modes, state="readonly" - ) - self.modes_combobox.bind("<>", self.handle_mode_changed) - self.modes_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - - logger.info("config service config: %s", self.config) - self.config_frame = ConfigFrame(tab, self.app, self.config) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - tab.rowconfigure(self.config_frame.grid_info()["row"], weight=1) - - def draw_tab_startstop(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - for i in range(3): - tab.rowconfigure(i, weight=1) - self.notebook.add(tab, text="Startup/Shutdown") - commands = [] - # tab 3 - for i in range(3): - label_frame = None - if i == 0: - label_frame = ttk.LabelFrame( - tab, text="Startup Commands", padding=FRAME_PAD - ) - commands = self.startup_commands - elif i == 1: - label_frame = ttk.LabelFrame( - tab, text="Shutdown Commands", padding=FRAME_PAD - ) - commands = self.shutdown_commands - elif i == 2: - label_frame = ttk.LabelFrame( - tab, text="Validation Commands", padding=FRAME_PAD - ) - commands = self.validation_commands - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - label_frame.grid(row=i, column=0, sticky=tk.NSEW, pady=PADY) - listbox_scroll = ListboxScroll(label_frame) - for command in commands: - listbox_scroll.listbox.insert("end", command) - listbox_scroll.listbox.config(height=4) - listbox_scroll.grid(sticky=tk.NSEW) - if i == 0: - self.startup_commands_listbox = listbox_scroll.listbox - elif i == 1: - self.shutdown_commands_listbox = listbox_scroll.listbox - elif i == 2: - self.validate_commands_listbox = listbox_scroll.listbox - - def draw_tab_validation(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.EW) - tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Validation", sticky=tk.NSEW) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - - label = ttk.Label(frame, text="Validation Time") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - self.validation_time_entry = ttk.Entry(frame) - self.validation_time_entry.insert("end", str(self.validation_time)) - self.validation_time_entry.config(state=tk.DISABLED) - self.validation_time_entry.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - - label = ttk.Label(frame, text="Validation Mode") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - if self.validation_mode == ServiceValidationMode.BLOCKING: - mode = "BLOCKING" - elif self.validation_mode == ServiceValidationMode.NON_BLOCKING: - mode = "NON_BLOCKING" - else: - mode = "TIMER" - self.validation_mode_entry = ttk.Entry( - frame, textvariable=tk.StringVar(value=mode) - ) - self.validation_mode_entry.insert("end", mode) - self.validation_mode_entry.config(state=tk.DISABLED) - self.validation_mode_entry.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - - label = ttk.Label(frame, text="Validation Period") - label.grid(row=2, column=0, sticky=tk.W, padx=PADX) - self.validation_period_entry = ttk.Entry( - frame, state=tk.DISABLED, textvariable=self.validation_period - ) - self.validation_period_entry.grid(row=2, column=1, sticky=tk.EW, pady=PADY) - - label_frame = ttk.LabelFrame(tab, text="Executables", padding=FRAME_PAD) - label_frame.grid(sticky=tk.NSEW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.grid(sticky=tk.NSEW) - tab.rowconfigure(listbox_scroll.grid_info()["row"], weight=1) - for executable in self.executables: - listbox_scroll.listbox.insert("end", executable) - - label_frame = ttk.LabelFrame(tab, text="Dependencies", padding=FRAME_PAD) - label_frame.grid(sticky=tk.NSEW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.grid(sticky=tk.NSEW) - tab.rowconfigure(listbox_scroll.grid_info()["row"], weight=1) - for dependency in self.dependencies: - listbox_scroll.listbox.insert("end", dependency) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Defaults", command=self.click_defaults) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Copy...", command=self.click_copy) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=3, sticky=tk.EW) - - def click_apply(self) -> None: - current_listbox = self.master.current.listbox - if not self.is_custom(): - self.node.config_service_configs.pop(self.service_name, None) - current_listbox.itemconfig(current_listbox.curselection()[0], bg="") - self.destroy() - return - service_config = self.node.config_service_configs.setdefault( - self.service_name, ConfigServiceData() - ) - if self.config_frame: - self.config_frame.parse_config() - service_config.config = {x.name: x.value for x in self.config.values()} - for file in self.modified_files: - service_config.templates[file] = self.temp_service_files[file] - all_current = current_listbox.get(0, tk.END) - current_listbox.itemconfig(all_current.index(self.service_name), bg="green") - self.destroy() - - def handle_template_changed(self, event: tk.Event) -> None: - template_name = self.templates_combobox.get() - temp_data = self.temp_service_files[template_name] - self.template_text.set_text(temp_data) - rendered = self.rendered[template_name] - self.rendered_text.set_text(rendered) - - def handle_mode_changed(self, event: tk.Event) -> None: - mode = self.modes_combobox.get() - config = self.mode_configs[mode] - logger.info("mode config: %s", config) - self.config_frame.set_values(config) - - def update_template_file_data(self, _event: tk.Event) -> None: - template = self.templates_combobox.get() - self.temp_service_files[template] = self.rendered_text.get_text() - if self.rendered[template] != self.temp_service_files[template]: - self.modified_files.add(template) - return - self.temp_service_files[template] = self.template_text.get_text() - if self.temp_service_files[template] != self.original_service_files[template]: - self.modified_files.add(template) - else: - self.modified_files.discard(template) - - def is_custom(self) -> bool: - has_custom_templates = len(self.modified_files) > 0 - has_custom_config = False - if self.config_frame: - current = self.config_frame.parse_config() - has_custom_config = self.default_config != current - return has_custom_templates or has_custom_config - - def click_defaults(self) -> None: - # clear all saved state data - self.modified_files.clear() - self.node.config_service_configs.pop(self.service_name, None) - self.temp_service_files = dict(self.original_service_files) - # reset session definition and retrieve default rendered templates - self.core.start_session(definition=True) - self.rendered = self.core.get_config_service_rendered( - self.node.id, self.service_name - ) - logger.info( - "cleared config service config: %s", self.node.config_service_configs - ) - # reset current selected file data and config data, if present - template_name = self.templates_combobox.get() - temp_data = self.temp_service_files[template_name] - self.template_text.set_text(temp_data) - rendered_data = self.rendered[template_name] - self.rendered_text.set_text(rendered_data) - if self.config_frame: - logger.info("resetting defaults: %s", self.default_config) - self.config_frame.set_values(self.default_config) - - def click_copy(self) -> None: - pass - - def append_commands( - self, commands: list[str], listbox: tk.Listbox, to_add: list[str] - ) -> None: - for cmd in to_add: - commands.append(cmd) - listbox.insert(tk.END, cmd) diff --git a/daemon/core/gui/dialogs/copyserviceconfig.py b/daemon/core/gui/dialogs/copyserviceconfig.py deleted file mode 100644 index 6b2f4927..00000000 --- a/daemon/core/gui/dialogs/copyserviceconfig.py +++ /dev/null @@ -1,119 +0,0 @@ -""" -copy service config dialog -""" - -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import CodeText, ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.dialogs.serviceconfig import ServiceConfigDialog - - -class CopyServiceConfigDialog(Dialog): - def __init__( - self, - app: "Application", - dialog: "ServiceConfigDialog", - name: str, - service: str, - file_name: str, - ) -> None: - super().__init__(app, f"Copy Custom File to {name}", master=dialog) - self.dialog: "ServiceConfigDialog" = dialog - self.service: str = service - self.file_name: str = file_name - self.listbox: Optional[tk.Listbox] = None - self.nodes: dict[str, int] = {} - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - label = ttk.Label( - self.top, text=f"{self.service} - {self.file_name}", anchor=tk.CENTER - ) - label.grid(sticky=tk.EW, pady=PADY) - - listbox_scroll = ListboxScroll(self.top) - listbox_scroll.grid(sticky=tk.NSEW, pady=PADY) - self.listbox = listbox_scroll.listbox - for node in self.app.core.session.nodes.values(): - file_configs = node.service_file_configs.get(self.service) - if not file_configs: - continue - data = file_configs.get(self.file_name) - if not data: - continue - self.nodes[node.name] = node.id - self.listbox.insert(tk.END, node.name) - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(3): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Copy", command=self.click_copy) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="View", command=self.click_view) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=2, sticky=tk.EW) - - def click_copy(self) -> None: - selection = self.listbox.curselection() - if not selection: - return - name = self.listbox.get(selection) - node_id = self.nodes[name] - node = self.app.core.session.nodes[node_id] - data = node.service_file_configs[self.service][self.file_name] - self.dialog.temp_service_files[self.file_name] = data - self.dialog.modified_files.add(self.file_name) - self.dialog.service_file_data.text.delete(1.0, tk.END) - self.dialog.service_file_data.text.insert(tk.END, data) - self.destroy() - - def click_view(self) -> None: - selection = self.listbox.curselection() - if not selection: - return - name = self.listbox.get(selection) - node_id = self.nodes[name] - node = self.app.core.session.nodes[node_id] - data = node.service_file_configs[self.service][self.file_name] - dialog = ViewConfigDialog( - self.app, self, name, self.service, self.file_name, data - ) - dialog.show() - - -class ViewConfigDialog(Dialog): - def __init__( - self, - app: "Application", - master: tk.BaseWidget, - name: str, - service: str, - file_name: str, - data: str, - ) -> None: - title = f"{name} Service({service}) File({file_name})" - super().__init__(app, title, master=master) - self.data = data - self.service_data = None - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.service_data = CodeText(self.top) - self.service_data.grid(sticky=tk.NSEW, pady=PADY) - self.service_data.text.insert(tk.END, self.data) - self.service_data.text.config(state=tk.DISABLED) - button = ttk.Button(self.top, text="Close", command=self.destroy) - button.grid(sticky=tk.EW) diff --git a/daemon/core/gui/dialogs/customnodes.py b/daemon/core/gui/dialogs/customnodes.py deleted file mode 100644 index ea4421e8..00000000 --- a/daemon/core/gui/dialogs/customnodes.py +++ /dev/null @@ -1,280 +0,0 @@ -import logging -import tkinter as tk -from pathlib import Path -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from PIL.ImageTk import PhotoImage - -from core.gui import images -from core.gui.appconfig import ICONS_PATH, CustomNode -from core.gui.dialogs.dialog import Dialog -from core.gui.nodeutils import NodeDraw -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CheckboxList, ListboxScroll, image_chooser - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class ServicesSelectDialog(Dialog): - def __init__( - self, master: tk.BaseWidget, app: "Application", current_services: set[str] - ) -> None: - super().__init__(app, "Node Config Services", master=master) - self.groups: Optional[ListboxScroll] = None - self.services: Optional[CheckboxList] = None - self.current: Optional[ListboxScroll] = None - self.current_services: set[str] = current_services - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - frame = ttk.LabelFrame(self.top) - frame.grid(stick=tk.NSEW, pady=PADY) - frame.rowconfigure(0, weight=1) - for i in range(3): - frame.columnconfigure(i, weight=1) - label_frame = ttk.LabelFrame(frame, text="Groups", padding=FRAME_PAD) - label_frame.grid(row=0, column=0, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - self.groups = ListboxScroll(label_frame) - self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.config_services_groups): - self.groups.listbox.insert(tk.END, group) - self.groups.listbox.bind("<>", self.handle_group_change) - self.groups.listbox.selection_set(0) - - label_frame = ttk.LabelFrame(frame, text="Services") - label_frame.grid(row=0, column=1, sticky=tk.NSEW) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - self.services = CheckboxList( - label_frame, self.app, clicked=self.service_clicked, padding=FRAME_PAD - ) - self.services.grid(sticky=tk.NSEW) - - label_frame = ttk.LabelFrame(frame, text="Selected", padding=FRAME_PAD) - label_frame.grid(row=0, column=2, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - self.current = ListboxScroll(label_frame) - self.current.grid(sticky=tk.NSEW) - for service in sorted(self.current_services): - self.current.listbox.insert(tk.END, service) - - frame = ttk.Frame(self.top) - frame.grid(stick=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Save", command=self.destroy) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.click_cancel) - button.grid(row=0, column=1, sticky=tk.EW) - - # trigger group change - self.handle_group_change() - - def handle_group_change(self, event: tk.Event = None) -> None: - selection = self.groups.listbox.curselection() - if selection: - index = selection[0] - group = self.groups.listbox.get(index) - self.services.clear() - for name in sorted(self.app.core.config_services_groups[group]): - checked = name in self.current_services - self.services.add(name, checked) - - def service_clicked(self, name: str, var: tk.BooleanVar) -> None: - if var.get() and name not in self.current_services: - self.current_services.add(name) - elif not var.get() and name in self.current_services: - self.current_services.remove(name) - self.current.listbox.delete(0, tk.END) - for name in sorted(self.current_services): - self.current.listbox.insert(tk.END, name) - - def click_cancel(self) -> None: - self.current_services = None - self.destroy() - - -class CustomNodesDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Custom Nodes") - self.edit_button: Optional[ttk.Button] = None - self.delete_button: Optional[ttk.Button] = None - self.nodes_list: Optional[ListboxScroll] = None - self.name: tk.StringVar = tk.StringVar() - self.image_button: Optional[ttk.Button] = None - self.image: Optional[PhotoImage] = None - self.image_file: Optional[str] = None - self.services: set[str] = set() - self.selected: Optional[str] = None - self.selected_index: Optional[int] = None - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.draw_node_config() - self.draw_node_buttons() - self.draw_buttons() - - def draw_node_config(self) -> None: - frame = ttk.LabelFrame(self.top, text="Nodes", padding=FRAME_PAD) - frame.grid(sticky=tk.NSEW, pady=PADY) - frame.columnconfigure(0, weight=1) - frame.rowconfigure(0, weight=1) - - self.nodes_list = ListboxScroll(frame) - self.nodes_list.grid(row=0, column=0, sticky=tk.NSEW, padx=PADX) - self.nodes_list.listbox.bind("<>", self.handle_node_select) - for name in sorted(self.app.core.custom_nodes): - self.nodes_list.listbox.insert(tk.END, name) - - frame = ttk.Frame(frame) - frame.grid(row=0, column=2, sticky=tk.NSEW) - frame.columnconfigure(0, weight=1) - entry = ttk.Entry(frame, textvariable=self.name) - entry.grid(sticky=tk.EW, pady=PADY) - self.image_button = ttk.Button( - frame, text="Icon", compound=tk.LEFT, command=self.click_icon - ) - self.image_button.grid(sticky=tk.EW, pady=PADY) - button = ttk.Button(frame, text="Config Services", command=self.click_services) - button.grid(sticky=tk.EW) - - def draw_node_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - for i in range(3): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Create", command=self.click_create) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - self.edit_button = ttk.Button( - frame, text="Edit", state=tk.DISABLED, command=self.click_edit - ) - self.edit_button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - self.delete_button = ttk.Button( - frame, text="Delete", state=tk.DISABLED, command=self.click_delete - ) - self.delete_button.grid(row=0, column=2, sticky=tk.EW) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def reset_values(self) -> None: - self.name.set("") - self.image = None - self.image_file = None - self.services = set() - self.image_button.config(image="") - - def click_icon(self) -> None: - file_path = image_chooser(self, ICONS_PATH) - if file_path: - image = images.from_file(file_path, width=images.NODE_SIZE) - self.image = image - self.image_file = file_path - self.image_button.config(image=self.image) - - def click_services(self) -> None: - dialog = ServicesSelectDialog(self, self.app, set(self.services)) - dialog.show() - if dialog.current_services is not None: - self.services.clear() - self.services.update(dialog.current_services) - - def click_save(self) -> None: - self.app.guiconfig.nodes.clear() - for name in self.app.core.custom_nodes: - node_draw = self.app.core.custom_nodes[name] - custom_node = CustomNode( - name, node_draw.image_file, list(node_draw.services) - ) - self.app.guiconfig.nodes.append(custom_node) - logger.info("saving custom nodes: %s", self.app.guiconfig.nodes) - self.app.save_config() - self.destroy() - - def click_create(self) -> None: - name = self.name.get() - if name not in self.app.core.custom_nodes: - image_file = str(Path(self.image_file).absolute()) - custom_node = CustomNode(name, image_file, list(self.services)) - node_draw = NodeDraw.from_custom(custom_node) - logger.info( - "created new custom node (%s), image file (%s), services: (%s)", - name, - image_file, - self.services, - ) - self.app.core.custom_nodes[name] = node_draw - self.nodes_list.listbox.insert(tk.END, name) - self.reset_values() - - def click_edit(self) -> None: - name = self.name.get() - if self.selected: - previous_name = self.selected - self.selected = name - node_draw = self.app.core.custom_nodes.pop(previous_name) - node_draw.model = name - node_draw.image_file = str(Path(self.image_file).absolute()) - node_draw.image = self.image - node_draw.services = set(self.services) - logger.debug( - "edit custom node (%s), image: (%s), services (%s)", - node_draw.model, - node_draw.image_file, - node_draw.services, - ) - self.app.core.custom_nodes[name] = node_draw - self.nodes_list.listbox.delete(self.selected_index) - self.nodes_list.listbox.insert(self.selected_index, name) - self.nodes_list.listbox.selection_set(self.selected_index) - - def click_delete(self) -> None: - if self.selected and self.selected in self.app.core.custom_nodes: - self.nodes_list.listbox.delete(self.selected_index) - del self.app.core.custom_nodes[self.selected] - self.reset_values() - self.nodes_list.listbox.selection_clear(0, tk.END) - self.nodes_list.listbox.event_generate("<>") - - def handle_node_select(self, event: tk.Event) -> None: - selection = self.nodes_list.listbox.curselection() - if selection: - self.selected_index = selection[0] - self.selected = self.nodes_list.listbox.get(self.selected_index) - node_draw = self.app.core.custom_nodes[self.selected] - self.name.set(node_draw.model) - self.services = node_draw.services - self.image = node_draw.image - self.image_file = node_draw.image_file - self.image_button.config(image=self.image) - self.edit_button.config(state=tk.NORMAL) - self.delete_button.config(state=tk.NORMAL) - else: - self.selected = None - self.selected_index = None - self.edit_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/dialog.py b/daemon/core/gui/dialogs/dialog.py deleted file mode 100644 index 5233bb27..00000000 --- a/daemon/core/gui/dialogs/dialog.py +++ /dev/null @@ -1,50 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING - -from core.gui import images -from core.gui.images import ImageEnum -from core.gui.themes import DIALOG_PAD - -if TYPE_CHECKING: - from core.gui.app import Application - - -class Dialog(tk.Toplevel): - def __init__( - self, - app: "Application", - title: str, - modal: bool = True, - master: tk.BaseWidget = None, - ) -> None: - if master is None: - master = app - super().__init__(master) - self.withdraw() - self.app: "Application" = app - self.modal: bool = modal - self.title(title) - self.protocol("WM_DELETE_WINDOW", self.destroy) - image = images.from_enum(ImageEnum.CORE, width=images.DIALOG_SIZE) - self.tk.call("wm", "iconphoto", self._w, image) - self.columnconfigure(0, weight=1) - self.rowconfigure(0, weight=1) - self.top: ttk.Frame = ttk.Frame(self, padding=DIALOG_PAD) - self.top.grid(sticky=tk.NSEW) - - def show(self) -> None: - self.transient(self.master) - self.focus_force() - self.update() - self.deiconify() - if self.modal: - self.wait_visibility() - self.grab_set() - self.wait_window() - - def draw_spacer(self, row: int = None) -> None: - frame = ttk.Frame(self.top) - frame.grid(row=row, sticky=tk.NSEW) - frame.rowconfigure(0, weight=1) - self.top.rowconfigure(frame.grid_info()["row"], weight=1) diff --git a/daemon/core/gui/dialogs/emaneconfig.py b/daemon/core/gui/dialogs/emaneconfig.py deleted file mode 100644 index 00eda694..00000000 --- a/daemon/core/gui/dialogs/emaneconfig.py +++ /dev/null @@ -1,191 +0,0 @@ -""" -emane configuration -""" -import tkinter as tk -import webbrowser -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import ConfigOption, Node -from core.gui import images -from core.gui.dialogs.dialog import Dialog -from core.gui.images import ImageEnum -from core.gui.themes import PADX, PADY -from core.gui.widgets import ConfigFrame - -if TYPE_CHECKING: - from core.gui.app import Application - - -class EmaneModelDialog(Dialog): - def __init__( - self, - master: tk.BaseWidget, - app: "Application", - node: Node, - model: str, - iface_id: int = None, - ) -> None: - super().__init__(app, f"{node.name} {model} Configuration", master=master) - self.node: Node = node - self.model: str = f"emane_{model}" - self.iface_id: int = iface_id - self.config_frame: Optional[ConfigFrame] = None - self.enabled: bool = not self.app.core.is_runtime() - self.has_error: bool = False - try: - config = self.node.emane_model_configs.get((self.model, self.iface_id)) - if not config: - config = self.node.emane_model_configs.get((self.model, None)) - if not config: - config = self.app.core.get_emane_model_config( - self.node.id, self.model, self.iface_id - ) - self.config: dict[str, ConfigOption] = config - self.draw() - except grpc.RpcError as e: - self.app.show_grpc_exception("Get EMANE Config Error", e) - self.has_error: bool = True - self.destroy() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.config_frame = ConfigFrame(self.top, self.app, self.config, self.enabled) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - self.draw_buttons() - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - state = tk.NORMAL if self.enabled else tk.DISABLED - button = ttk.Button(frame, text="Apply", command=self.click_apply, state=state) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_apply(self) -> None: - self.config_frame.parse_config() - key = (self.model, self.iface_id) - self.node.emane_model_configs[key] = self.config - self.destroy() - - -class EmaneConfigDialog(Dialog): - def __init__(self, app: "Application", node: Node) -> None: - super().__init__(app, f"{node.name} EMANE Configuration") - self.node: Node = node - self.radiovar: tk.IntVar = tk.IntVar() - self.radiovar.set(1) - self.emane_models: list[str] = [ - x.split("_")[1] for x in self.app.core.emane_models - ] - model = self.node.emane.split("_")[1] - self.emane_model: tk.StringVar = tk.StringVar(value=model) - self.emane_model_button: Optional[ttk.Button] = None - self.enabled: bool = not self.app.core.is_runtime() - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.draw_emane_configuration() - self.draw_emane_models() - self.draw_emane_buttons() - self.draw_spacer() - self.draw_apply_and_cancel() - - def draw_emane_configuration(self) -> None: - """ - draw the main frame for emane configuration - """ - label = ttk.Label( - self.top, - text="The EMANE emulation system provides more complex wireless radio " - "emulation \nusing pluggable MAC and PHY modules. Refer to the wiki " - "for configuration option details", - justify=tk.CENTER, - ) - label.grid(pady=PADY) - - image = images.from_enum(ImageEnum.EDITNODE, width=images.BUTTON_SIZE) - button = ttk.Button( - self.top, - image=image, - text="EMANE Wiki", - compound=tk.RIGHT, - command=lambda: webbrowser.open_new( - "https://github.com/adjacentlink/emane/wiki" - ), - ) - button.image = image - button.grid(sticky=tk.EW, pady=PADY) - - def draw_emane_models(self) -> None: - """ - create a combobox that has all the known emane models - """ - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - - label = ttk.Label(frame, text="Model") - label.grid(row=0, column=0, sticky=tk.W) - - # create combo box and its binding - state = "readonly" if self.enabled else tk.DISABLED - combobox = ttk.Combobox( - frame, textvariable=self.emane_model, values=self.emane_models, state=state - ) - combobox.grid(row=0, column=1, sticky=tk.EW) - combobox.bind("<>", self.emane_model_change) - - def draw_emane_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(0, weight=1) - image = images.from_enum(ImageEnum.EDITNODE, width=images.BUTTON_SIZE) - self.emane_model_button = ttk.Button( - frame, - text=f"{self.emane_model.get()} options", - image=image, - compound=tk.RIGHT, - command=self.click_model_config, - ) - self.emane_model_button.image = image - self.emane_model_button.grid(padx=PADX, sticky=tk.EW) - - def draw_apply_and_cancel(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - state = tk.NORMAL if self.enabled else tk.DISABLED - button = ttk.Button(frame, text="Apply", command=self.click_apply, state=state) - button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_model_config(self) -> None: - """ - draw emane model configuration - """ - model_name = self.emane_model.get() - dialog = EmaneModelDialog(self, self.app, self.node, model_name) - if not dialog.has_error: - dialog.show() - - def emane_model_change(self, event: tk.Event) -> None: - """ - update emane model options button - """ - model_name = self.emane_model.get() - self.emane_model_button.config(text=f"{model_name} options") - - def click_apply(self) -> None: - self.node.emane = f"emane_{self.emane_model.get()}" - self.destroy() diff --git a/daemon/core/gui/dialogs/emaneinstall.py b/daemon/core/gui/dialogs/emaneinstall.py deleted file mode 100644 index 9f9f2f5c..00000000 --- a/daemon/core/gui/dialogs/emaneinstall.py +++ /dev/null @@ -1,26 +0,0 @@ -import tkinter as tk -import webbrowser -from tkinter import ttk - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADY - - -class EmaneInstallDialog(Dialog): - def __init__(self, app) -> None: - super().__init__(app, "EMANE Error") - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - label = ttk.Label(self.top, text="EMANE needs to be installed!") - label.grid(sticky=tk.EW, pady=PADY) - button = ttk.Button( - self.top, text="EMANE Documentation", command=self.click_doc - ) - button.grid(sticky=tk.EW, pady=PADY) - button = ttk.Button(self.top, text="Close", command=self.destroy) - button.grid(sticky=tk.EW) - - def click_doc(self) -> None: - webbrowser.open_new("https://coreemu.github.io/core/emane.html") diff --git a/daemon/core/gui/dialogs/error.py b/daemon/core/gui/dialogs/error.py deleted file mode 100644 index 726f8617..00000000 --- a/daemon/core/gui/dialogs/error.py +++ /dev/null @@ -1,39 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui import images -from core.gui.dialogs.dialog import Dialog -from core.gui.images import ImageEnum -from core.gui.themes import PADY -from core.gui.widgets import CodeText - -if TYPE_CHECKING: - from core.gui.app import Application - - -class ErrorDialog(Dialog): - def __init__( - self, app: "Application", title: str, message: str, details: str - ) -> None: - super().__init__(app, title) - self.message: str = message - self.details: str = details - self.error_message: Optional[CodeText] = None - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - image = images.from_enum(ImageEnum.ERROR, width=images.ERROR_SIZE) - label = ttk.Label( - self.top, text=self.message, image=image, compound=tk.LEFT, anchor=tk.CENTER - ) - label.image = image - label.grid(sticky=tk.W, pady=PADY) - self.error_message = CodeText(self.top) - self.error_message.text.insert("1.0", self.details) - self.error_message.text.config(state=tk.DISABLED) - self.error_message.grid(sticky=tk.EW, pady=PADY) - button = ttk.Button(self.top, text="Close", command=lambda: self.destroy()) - button.grid(sticky=tk.EW) diff --git a/daemon/core/gui/dialogs/executepython.py b/daemon/core/gui/dialogs/executepython.py deleted file mode 100644 index 8c9b31ba..00000000 --- a/daemon/core/gui/dialogs/executepython.py +++ /dev/null @@ -1,90 +0,0 @@ -import logging -import tkinter as tk -from tkinter import filedialog, ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.appconfig import SCRIPT_PATH -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class ExecutePythonDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Execute Python Script") - self.with_options: tk.IntVar = tk.IntVar(value=0) - self.options: tk.StringVar = tk.StringVar(value="") - self.option_entry: Optional[ttk.Entry] = None - self.file_entry: Optional[ttk.Entry] = None - self.draw() - - def draw(self) -> None: - i = 0 - frame = ttk.Frame(self.top, padding=FRAME_PAD) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.grid(row=i, column=0, sticky=tk.NSEW) - i = i + 1 - var = tk.StringVar(value="") - self.file_entry = ttk.Entry(frame, textvariable=var) - self.file_entry.grid(row=0, column=0, sticky=tk.EW) - button = ttk.Button(frame, text="...", command=self.select_file) - button.grid(row=0, column=1, sticky=tk.EW) - - self.top.columnconfigure(0, weight=1) - button = ttk.Checkbutton( - self.top, - text="With Options", - variable=self.with_options, - command=self.add_options, - ) - button.grid(row=i, column=0, sticky=tk.EW) - i = i + 1 - - label = ttk.Label( - self.top, text="Any command-line options for running the Python script" - ) - label.grid(row=i, column=0, sticky=tk.EW) - i = i + 1 - self.option_entry = ttk.Entry( - self.top, textvariable=self.options, state="disabled" - ) - self.option_entry.grid(row=i, column=0, sticky=tk.EW) - i = i + 1 - - frame = ttk.Frame(self.top, padding=FRAME_PAD) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.grid(row=i, column=0) - button = ttk.Button(frame, text="Execute", command=self.script_execute) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - def add_options(self) -> None: - if self.with_options.get(): - self.option_entry.configure(state="normal") - else: - self.option_entry.configure(state="disabled") - - def select_file(self) -> None: - file = filedialog.askopenfilename( - parent=self.top, - initialdir=str(SCRIPT_PATH), - title="Open python script", - filetypes=((".py Files", "*.py"), ("All Files", "*")), - ) - if file: - self.file_entry.delete(0, "end") - self.file_entry.insert("end", file) - - def script_execute(self) -> None: - file = self.file_entry.get() - options = self.option_entry.get() - logger.info("Execute %s with options %s", file, options) - self.app.core.execute_script(file, options) - self.destroy() diff --git a/daemon/core/gui/dialogs/find.py b/daemon/core/gui/dialogs/find.py deleted file mode 100644 index 54be81b0..00000000 --- a/daemon/core/gui/dialogs/find.py +++ /dev/null @@ -1,160 +0,0 @@ -import logging -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class FindDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Find", modal=False) - self.find_text: tk.StringVar = tk.StringVar(value="") - self.tree: Optional[ttk.Treeview] = None - self.draw() - self.protocol("WM_DELETE_WINDOW", self.close_dialog) - self.bind("", self.find_node) - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - - # Find node frame - frame = ttk.Frame(self.top, padding=FRAME_PAD) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Find:") - label.grid() - entry = ttk.Entry(frame, textvariable=self.find_text) - entry.grid(row=0, column=1, sticky=tk.NSEW) - - # node list frame - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.rowconfigure(0, weight=1) - frame.grid(sticky=tk.NSEW, pady=PADY) - self.tree = ttk.Treeview( - frame, - columns=("nodeid", "name", "location", "detail"), - show="headings", - selectmode=tk.BROWSE, - ) - self.tree.grid(sticky=tk.NSEW, pady=PADY) - style = ttk.Style() - heading_size = int(self.app.guiconfig.scale * 10) - style.configure("Treeview.Heading", font=(None, heading_size, "bold")) - self.tree.column("nodeid", stretch=tk.YES, anchor="center") - self.tree.heading("nodeid", text="Node ID") - self.tree.column("name", stretch=tk.YES, anchor="center") - self.tree.heading("name", text="Name") - self.tree.column("location", stretch=tk.YES, anchor="center") - self.tree.heading("location", text="Location") - self.tree.column("detail", stretch=tk.YES, anchor="center") - self.tree.heading("detail", text="Detail") - self.tree.bind("<>", self.click_select) - yscrollbar = ttk.Scrollbar(frame, orient="vertical", command=self.tree.yview) - yscrollbar.grid(row=0, column=1, sticky=tk.NS) - self.tree.configure(yscrollcommand=yscrollbar.set) - xscrollbar = ttk.Scrollbar(frame, orient="horizontal", command=self.tree.xview) - xscrollbar.grid(row=1, sticky=tk.EW) - self.tree.configure(xscrollcommand=xscrollbar.set) - - # button frame - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - button = ttk.Button(frame, text="Find", command=self.find_node) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.close_dialog) - button.grid(row=0, column=1, sticky=tk.EW) - - def clear_treeview_items(self) -> None: - """ - clear all items in the treeview - """ - for i in list(self.tree.get_children("")): - self.tree.delete(i) - - def find_node(self, _event: tk.Event = None) -> None: - """ - Query nodes that have the same node name as our search key, - display results to tree view - """ - node_name = self.find_text.get().strip() - self.clear_treeview_items() - for node in self.app.core.session.nodes.values(): - name = node.name - if not node_name or node_name == name: - pos_x = round(node.position.x, 1) - pos_y = round(node.position.y, 1) - # TODO: I am not sure what to insert for Detail column - # leaving it blank for now - self.tree.insert( - "", - tk.END, - text=str(node.id), - values=(node.id, name, f"<{pos_x}, {pos_y}>", ""), - ) - results = self.tree.get_children("") - if results: - self.tree.selection_set(results[0]) - - def close_dialog(self) -> None: - self.clear_find() - self.destroy() - - def clear_find(self): - for canvas in self.app.manager.all(): - canvas.delete("find") - - def click_select(self, _event: tk.Event = None) -> None: - """ - find the node that matches search criteria, circle around that node - and scroll the x and y scrollbar to be able to see the node if - it is out of sight - """ - item = self.tree.selection() - if item: - self.clear_find() - node_id = int(self.tree.item(item, "text")) - canvas_node = self.app.core.get_canvas_node(node_id) - self.app.manager.select(canvas_node.canvas.id) - x0, y0, x1, y1 = canvas_node.canvas.bbox(canvas_node.id) - dist = 5 * self.app.guiconfig.scale - canvas_node.canvas.create_oval( - x0 - dist, - y0 - dist, - x1 + dist, - y1 + dist, - tags="find", - outline="red", - width=3.0 * self.app.guiconfig.scale, - ) - - _x, _y, _, _ = canvas_node.canvas.bbox(canvas_node.id) - oid = canvas_node.canvas.find_withtag("rectangle") - x0, y0, x1, y1 = canvas_node.canvas.bbox(oid[0]) - logger.debug("Dist to most left: %s", abs(x0 - _x)) - logger.debug("White canvas width: %s", abs(x0 - x1)) - - # calculate the node's location - # (as fractions of white canvas's width and height) - # and instantly scroll the x and y scrollbar to that location - xscroll_fraction = abs(x0 - _x) / abs(x0 - x1) - yscroll_fraction = abs(y0 - _y) / abs(y0 - y1) - # scroll a little more to the left or a little bit up so that the node - # doesn't always fall in the most top-left corner - for i in range(2): - if xscroll_fraction > 0.05: - xscroll_fraction = xscroll_fraction - 0.05 - if yscroll_fraction > 0.05: - yscroll_fraction = yscroll_fraction - 0.05 - canvas_node.canvas.xview_moveto(xscroll_fraction) - canvas_node.canvas.yview_moveto(yscroll_fraction) diff --git a/daemon/core/gui/dialogs/hooks.py b/daemon/core/gui/dialogs/hooks.py deleted file mode 100644 index 391df18f..00000000 --- a/daemon/core/gui/dialogs/hooks.py +++ /dev/null @@ -1,181 +0,0 @@ -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import Hook, SessionState -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import CodeText, ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - - -class HookDialog(Dialog): - def __init__(self, master: tk.BaseWidget, app: "Application") -> None: - super().__init__(app, "Hook", master=master) - self.name: tk.StringVar = tk.StringVar() - self.codetext: Optional[CodeText] = None - self.hook: Optional[Hook] = None - self.state: tk.StringVar = tk.StringVar() - self.editing: bool = False - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - - # name and states - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(0, weight=2) - frame.columnconfigure(1, weight=7) - frame.columnconfigure(2, weight=1) - label = ttk.Label(frame, text="Name") - label.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.name) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - values = tuple(x.name for x in SessionState) - initial_state = SessionState.RUNTIME.name - self.state.set(initial_state) - self.name.set(f"{initial_state.lower()}_hook.sh") - combobox = ttk.Combobox( - frame, textvariable=self.state, values=values, state="readonly" - ) - combobox.grid(row=0, column=2, sticky=tk.EW) - combobox.bind("<>", self.state_change) - - # data - self.codetext = CodeText(self.top) - self.codetext.text.insert( - 1.0, - ( - "#!/bin/sh\n" - "# session hook script; write commands here to execute on the host at the\n" - "# specified state\n" - ), - ) - self.codetext.grid(sticky=tk.NSEW, pady=PADY) - - # button row - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Save", command=lambda: self.save()) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=lambda: self.destroy()) - button.grid(row=0, column=1, sticky=tk.EW) - - def state_change(self, event: tk.Event) -> None: - if self.editing: - return - state_name = self.state.get() - self.name.set(f"{state_name.lower()}_hook.sh") - - def set(self, hook: Hook) -> None: - self.editing = True - self.hook = hook - self.name.set(hook.file) - self.codetext.text.delete(1.0, tk.END) - self.codetext.text.insert(tk.END, hook.data) - state_name = hook.state.name - self.state.set(state_name) - - def save(self) -> None: - data = self.codetext.text.get("1.0", tk.END).strip() - state = SessionState[self.state.get()] - file_name = self.name.get() - if self.editing: - self.hook.state = state - self.hook.file = file_name - self.hook.data = data - else: - if file_name in self.app.core.session.hooks: - messagebox.showerror( - "Hook Error", - f"Hook {file_name} already exists!", - parent=self.master, - ) - return - self.hook = Hook(state=state, file=file_name, data=data) - self.destroy() - - -class HooksDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Hooks") - self.listbox: Optional[tk.Listbox] = None - self.edit_button: Optional[ttk.Button] = None - self.delete_button: Optional[ttk.Button] = None - self.selected: Optional[str] = None - self.selected_index: Optional[int] = None - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - listbox_scroll = ListboxScroll(self.top) - listbox_scroll.grid(sticky=tk.NSEW, pady=PADY) - self.listbox = listbox_scroll.listbox - self.listbox.bind("<>", self.select) - session = self.app.core.session - for file in session.hooks: - self.listbox.insert(tk.END, file) - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Create", command=self.click_create) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - self.edit_button = ttk.Button( - frame, text="Edit", state=tk.DISABLED, command=self.click_edit - ) - self.edit_button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - self.delete_button = ttk.Button( - frame, text="Delete", state=tk.DISABLED, command=self.click_delete - ) - self.delete_button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=lambda: self.destroy()) - button.grid(row=0, column=3, sticky=tk.EW) - - def click_create(self) -> None: - dialog = HookDialog(self, self.app) - dialog.show() - hook = dialog.hook - if hook: - self.app.core.session.hooks[hook.file] = hook - self.listbox.insert(tk.END, hook.file) - - def click_edit(self) -> None: - session = self.app.core.session - hook = session.hooks.pop(self.selected) - dialog = HookDialog(self, self.app) - dialog.set(hook) - dialog.show() - session.hooks[hook.file] = hook - self.selected = hook.file - self.listbox.delete(self.selected_index) - self.listbox.insert(self.selected_index, hook.file) - self.listbox.select_set(self.selected_index) - - def click_delete(self) -> None: - session = self.app.core.session - del session.hooks[self.selected] - self.listbox.delete(self.selected_index) - self.edit_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) - - def select(self, event: tk.Event) -> None: - if self.listbox.curselection(): - self.selected_index = self.listbox.curselection()[0] - self.selected = self.listbox.get(self.selected_index) - self.edit_button.config(state=tk.NORMAL) - self.delete_button.config(state=tk.NORMAL) - else: - self.selected = None - self.selected_index = None - self.edit_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/ipdialog.py b/daemon/core/gui/dialogs/ipdialog.py deleted file mode 100644 index 99388548..00000000 --- a/daemon/core/gui/dialogs/ipdialog.py +++ /dev/null @@ -1,170 +0,0 @@ -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -import netaddr - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - - -class IpConfigDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "IP Configuration") - self.ip4: str = self.app.guiconfig.ips.ip4 - self.ip6: str = self.app.guiconfig.ips.ip6 - self.ip4s: list[str] = self.app.guiconfig.ips.ip4s - self.ip6s: list[str] = self.app.guiconfig.ips.ip6s - self.ip4_entry: Optional[ttk.Entry] = None - self.ip4_listbox: Optional[ListboxScroll] = None - self.ip6_entry: Optional[ttk.Entry] = None - self.ip6_listbox: Optional[ListboxScroll] = None - self.enable_ip4 = tk.BooleanVar(value=self.app.guiconfig.ips.enable_ip4) - self.enable_ip6 = tk.BooleanVar(value=self.app.guiconfig.ips.enable_ip6) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - # draw ip4 and ip6 lists - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.rowconfigure(0, weight=1) - frame.grid(sticky=tk.NSEW, pady=PADY) - - ip4_checkbox = ttk.Checkbutton( - frame, text="Enable IP4?", variable=self.enable_ip4 - ) - ip4_checkbox.grid(row=0, column=0, sticky=tk.EW) - ip6_checkbox = ttk.Checkbutton( - frame, text="Enable IP6?", variable=self.enable_ip6 - ) - ip6_checkbox.grid(row=0, column=1, sticky=tk.EW) - - ip4_frame = ttk.LabelFrame(frame, text="IPv4", padding=FRAME_PAD) - ip4_frame.columnconfigure(0, weight=1) - ip4_frame.rowconfigure(1, weight=1) - ip4_frame.grid(row=1, column=0, stick=tk.NSEW) - self.ip4_listbox = ListboxScroll(ip4_frame) - self.ip4_listbox.listbox.bind("<>", self.select_ip4) - self.ip4_listbox.grid(sticky=tk.NSEW, pady=PADY) - for index, ip4 in enumerate(self.ip4s): - self.ip4_listbox.listbox.insert(tk.END, ip4) - if self.ip4 == ip4: - self.ip4_listbox.listbox.select_set(index) - self.ip4_entry = ttk.Entry(ip4_frame) - self.ip4_entry.grid(sticky=tk.EW, pady=PADY) - ip4_button_frame = ttk.Frame(ip4_frame) - ip4_button_frame.columnconfigure(0, weight=1) - ip4_button_frame.columnconfigure(1, weight=1) - ip4_button_frame.grid(sticky=tk.EW) - ip4_add = ttk.Button(ip4_button_frame, text="Add", command=self.click_add_ip4) - ip4_add.grid(row=0, column=0, sticky=tk.EW) - ip4_del = ttk.Button( - ip4_button_frame, text="Delete", command=self.click_del_ip4 - ) - ip4_del.grid(row=0, column=1, sticky=tk.EW) - - ip6_frame = ttk.LabelFrame(frame, text="IPv6", padding=FRAME_PAD) - ip6_frame.columnconfigure(0, weight=1) - ip6_frame.rowconfigure(0, weight=1) - ip6_frame.grid(row=1, column=1, stick=tk.NSEW) - self.ip6_listbox = ListboxScroll(ip6_frame) - self.ip6_listbox.listbox.bind("<>", self.select_ip6) - self.ip6_listbox.grid(sticky=tk.NSEW, pady=PADY) - for index, ip6 in enumerate(self.ip6s): - self.ip6_listbox.listbox.insert(tk.END, ip6) - if self.ip6 == ip6: - self.ip6_listbox.listbox.select_set(index) - self.ip6_entry = ttk.Entry(ip6_frame) - self.ip6_entry.grid(sticky=tk.EW, pady=PADY) - ip6_button_frame = ttk.Frame(ip6_frame) - ip6_button_frame.columnconfigure(0, weight=1) - ip6_button_frame.columnconfigure(1, weight=1) - ip6_button_frame.grid(sticky=tk.EW) - ip6_add = ttk.Button(ip6_button_frame, text="Add", command=self.click_add_ip6) - ip6_add.grid(row=0, column=0, sticky=tk.EW) - ip6_del = ttk.Button( - ip6_button_frame, text="Delete", command=self.click_del_ip6 - ) - ip6_del.grid(row=0, column=1, sticky=tk.EW) - - # draw buttons - frame = ttk.Frame(self.top) - frame.grid(stick=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_add_ip4(self) -> None: - ip4 = self.ip4_entry.get() - if not ip4 or not netaddr.valid_ipv4(ip4): - messagebox.showerror("IPv4 Error", f"Invalid IPv4 {ip4}") - else: - self.ip4_listbox.listbox.insert(tk.END, ip4) - - def click_del_ip4(self) -> None: - if self.ip4_listbox.listbox.size() == 1: - messagebox.showerror("IPv4 Error", "Must have at least one address") - else: - selection = self.ip4_listbox.listbox.curselection() - self.ip4_listbox.listbox.delete(selection) - self.ip4_listbox.listbox.select_set(0) - - def click_add_ip6(self) -> None: - ip6 = self.ip6_entry.get() - if not ip6 or not netaddr.valid_ipv6(ip6): - messagebox.showerror("IPv6 Error", f"Invalid IPv6 {ip6}") - else: - self.ip6_listbox.listbox.insert(tk.END, ip6) - - def click_del_ip6(self) -> None: - if self.ip6_listbox.listbox.size() == 1: - messagebox.showerror("IPv6 Error", "Must have at least one address") - else: - selection = self.ip6_listbox.listbox.curselection() - self.ip6_listbox.listbox.delete(selection) - self.ip6_listbox.listbox.select_set(0) - - def select_ip4(self, _event: tk.Event) -> None: - selection = self.ip4_listbox.listbox.curselection() - self.ip4 = self.ip4_listbox.listbox.get(selection) - - def select_ip6(self, _event: tk.Event) -> None: - selection = self.ip6_listbox.listbox.curselection() - self.ip6 = self.ip6_listbox.listbox.get(selection) - - def click_save(self) -> None: - ip4s = [] - for index in range(self.ip4_listbox.listbox.size()): - ip4 = self.ip4_listbox.listbox.get(index) - ip4s.append(ip4) - ip6s = [] - for index in range(self.ip6_listbox.listbox.size()): - ip6 = self.ip6_listbox.listbox.get(index) - ip6s.append(ip6) - ip_config = self.app.guiconfig.ips - ip_changed = False - if ip_config.ip4 != self.ip4: - ip_config.ip4 = self.ip4 - ip_changed = True - if ip_config.ip6 != self.ip6: - ip_config.ip6 = self.ip6 - ip_changed = True - ip_config.ip4s = ip4s - ip_config.ip6s = ip6s - ip_config.enable_ip4 = self.enable_ip4.get() - ip_config.enable_ip6 = self.enable_ip6.get() - if ip_changed: - self.app.core.ifaces_manager.update_ips(self.ip4, self.ip6) - self.app.save_config() - self.destroy() diff --git a/daemon/core/gui/dialogs/linkconfig.py b/daemon/core/gui/dialogs/linkconfig.py deleted file mode 100644 index 6b27d373..00000000 --- a/daemon/core/gui/dialogs/linkconfig.py +++ /dev/null @@ -1,336 +0,0 @@ -""" -link configuration -""" -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import Interface, Link, LinkOptions -from core.gui import validation -from core.gui.dialogs.colorpicker import ColorPickerDialog -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.graph import CanvasEdge - - -def get_int(var: tk.StringVar) -> Optional[int]: - value = var.get() - if value != "": - return int(value) - else: - return 0 - - -def get_float(var: tk.StringVar) -> Optional[float]: - value = var.get() - if value != "": - return float(value) - else: - return 0.0 - - -class LinkConfigurationDialog(Dialog): - def __init__(self, app: "Application", edge: "CanvasEdge") -> None: - super().__init__(app, "Link Configuration") - self.edge: "CanvasEdge" = edge - - self.is_symmetric: bool = edge.link.is_symmetric() - if self.is_symmetric: - symmetry_var = tk.StringVar(value=">>") - else: - symmetry_var = tk.StringVar(value="<<") - self.symmetry_var: tk.StringVar = symmetry_var - - self.bandwidth: tk.StringVar = tk.StringVar() - self.delay: tk.StringVar = tk.StringVar() - self.jitter: tk.StringVar = tk.StringVar() - self.loss: tk.StringVar = tk.StringVar() - self.duplicate: tk.StringVar = tk.StringVar() - self.buffer: tk.StringVar = tk.StringVar() - - self.down_bandwidth: tk.StringVar = tk.StringVar() - self.down_delay: tk.StringVar = tk.StringVar() - self.down_jitter: tk.StringVar = tk.StringVar() - self.down_loss: tk.StringVar = tk.StringVar() - self.down_duplicate: tk.StringVar = tk.StringVar() - self.down_buffer: tk.StringVar = tk.StringVar() - - self.color: tk.StringVar = tk.StringVar(value=self.edge.color) - self.color_button: Optional[tk.Button] = None - self.width: tk.DoubleVar = tk.DoubleVar(value=self.edge.width) - - self.load_link_config() - self.symmetric_frame: Optional[ttk.Frame] = None - self.asymmetric_frame: Optional[ttk.Frame] = None - - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - src_label = self.edge.src.core_node.name - if self.edge.link.iface1: - src_label += f":{self.edge.link.iface1.name}" - dst_label = self.edge.dst.core_node.name - if self.edge.link.iface2: - dst_label += f":{self.edge.link.iface2.name}" - label = ttk.Label( - self.top, text=f"{src_label} to {dst_label}", anchor=tk.CENTER - ) - label.grid(row=0, column=0, sticky=tk.EW, pady=PADY) - - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.grid(row=1, column=0, sticky=tk.EW, pady=PADY) - if self.is_symmetric: - button = ttk.Button( - frame, textvariable=self.symmetry_var, command=self.change_symmetry - ) - else: - button = ttk.Button( - frame, textvariable=self.symmetry_var, command=self.change_symmetry - ) - button.grid(sticky=tk.EW) - - if self.is_symmetric: - self.symmetric_frame = self.get_frame() - self.symmetric_frame.grid(row=2, column=0, sticky=tk.EW, pady=PADY) - else: - self.asymmetric_frame = self.get_frame() - self.asymmetric_frame.grid(row=2, column=0, sticky=tk.EW, pady=PADY) - - self.draw_spacer(row=3) - - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.grid(row=4, column=0, sticky=tk.EW) - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def get_frame(self) -> ttk.Frame: - frame = ttk.Frame(self.top) - frame.columnconfigure(1, weight=1) - if self.is_symmetric: - label_name = "Symmetric Link Effects" - else: - label_name = "Asymmetric Effects: Downstream / Upstream " - row = 0 - label = ttk.Label(frame, text=label_name, anchor=tk.CENTER) - label.grid(row=row, column=0, columnspan=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Bandwidth (bps)") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.bandwidth - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - if not self.is_symmetric: - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.down_bandwidth - ) - entry.grid(row=row, column=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Delay (us)") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.delay - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - if not self.is_symmetric: - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.down_delay - ) - entry.grid(row=row, column=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Jitter (us)") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.jitter - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - if not self.is_symmetric: - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.down_jitter - ) - entry.grid(row=row, column=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Loss (%)") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveFloatEntry( - frame, empty_enabled=False, textvariable=self.loss - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - if not self.is_symmetric: - entry = validation.PositiveFloatEntry( - frame, empty_enabled=False, textvariable=self.down_loss - ) - entry.grid(row=row, column=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Duplicate (%)") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.duplicate - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - if not self.is_symmetric: - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.down_duplicate - ) - entry.grid(row=row, column=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Buffer (Packets)") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.buffer - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - if not self.is_symmetric: - entry = validation.PositiveIntEntry( - frame, empty_enabled=False, textvariable=self.down_buffer - ) - entry.grid(row=row, column=2, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Color") - label.grid(row=row, column=0, sticky=tk.EW) - self.color_button = tk.Button( - frame, - textvariable=self.color, - background=self.color.get(), - bd=0, - relief=tk.FLAT, - highlightthickness=0, - command=self.click_color, - ) - self.color_button.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - row = row + 1 - - label = ttk.Label(frame, text="Width") - label.grid(row=row, column=0, sticky=tk.EW) - entry = validation.PositiveFloatEntry( - frame, empty_enabled=False, textvariable=self.width - ) - entry.grid(row=row, column=1, sticky=tk.EW, pady=PADY) - - return frame - - def click_color(self) -> None: - dialog = ColorPickerDialog(self, self.app, self.color.get()) - color = dialog.askcolor() - self.color.set(color) - self.color_button.config(background=color) - - def click_apply(self) -> None: - self.edge.width = self.width.get() - self.edge.color = self.color.get() - link = self.edge.link - bandwidth = get_int(self.bandwidth) - jitter = get_int(self.jitter) - delay = get_int(self.delay) - duplicate = get_int(self.duplicate) - buffer = get_int(self.buffer) - loss = get_float(self.loss) - options = LinkOptions( - bandwidth=bandwidth, - jitter=jitter, - delay=delay, - dup=duplicate, - loss=loss, - buffer=buffer, - ) - link.options = options - iface1_id = link.iface1.id if link.iface1 else None - iface2_id = link.iface2.id if link.iface2 else None - if not self.is_symmetric: - link.options.unidirectional = True - asym_iface1 = None - if iface1_id is not None: - asym_iface1 = Interface(id=iface1_id) - asym_iface2 = None - if iface2_id is not None: - asym_iface2 = Interface(id=iface2_id) - down_bandwidth = get_int(self.down_bandwidth) - down_jitter = get_int(self.down_jitter) - down_delay = get_int(self.down_delay) - down_duplicate = get_int(self.down_duplicate) - down_buffer = get_int(self.down_buffer) - down_loss = get_float(self.down_loss) - options = LinkOptions( - bandwidth=down_bandwidth, - jitter=down_jitter, - delay=down_delay, - dup=down_duplicate, - loss=down_loss, - buffer=down_buffer, - unidirectional=True, - ) - self.edge.asymmetric_link = Link( - node1_id=link.node2_id, - node2_id=link.node1_id, - iface1=asym_iface2, - iface2=asym_iface1, - options=options, - ) - else: - link.options.unidirectional = False - self.edge.asymmetric_link = None - - if self.app.core.is_runtime() and link.options: - self.app.core.edit_link(link) - if self.edge.asymmetric_link: - self.app.core.edit_link(self.edge.asymmetric_link) - - # update edge label - self.edge.redraw() - self.edge.check_visibility() - self.destroy() - - def change_symmetry(self) -> None: - if self.is_symmetric: - self.is_symmetric = False - self.symmetry_var.set("<<") - if not self.asymmetric_frame: - self.asymmetric_frame = self.get_frame() - self.symmetric_frame.grid_forget() - self.asymmetric_frame.grid(row=2, column=0) - else: - self.is_symmetric = True - self.symmetry_var.set(">>") - if not self.symmetric_frame: - self.symmetric_frame = self.get_frame() - self.asymmetric_frame.grid_forget() - self.symmetric_frame.grid(row=2, column=0) - - def load_link_config(self) -> None: - """ - populate link config to the table - """ - self.width.set(self.edge.width) - self.color.set(self.edge.color) - link = self.edge.link - if link.options: - self.bandwidth.set(str(link.options.bandwidth)) - self.jitter.set(str(link.options.jitter)) - self.duplicate.set(str(link.options.dup)) - self.loss.set(str(link.options.loss)) - self.delay.set(str(link.options.delay)) - self.buffer.set(str(link.options.buffer)) - if not self.is_symmetric: - asym_link = self.edge.asymmetric_link - self.down_bandwidth.set(str(asym_link.options.bandwidth)) - self.down_jitter.set(str(asym_link.options.jitter)) - self.down_duplicate.set(str(asym_link.options.dup)) - self.down_loss.set(str(asym_link.options.loss)) - self.down_delay.set(str(asym_link.options.delay)) - self.down_buffer.set(str(asym_link.options.buffer)) diff --git a/daemon/core/gui/dialogs/macdialog.py b/daemon/core/gui/dialogs/macdialog.py deleted file mode 100644 index c8cd7f45..00000000 --- a/daemon/core/gui/dialogs/macdialog.py +++ /dev/null @@ -1,61 +0,0 @@ -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING - -import netaddr - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - - -class MacConfigDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "MAC Configuration") - mac = self.app.guiconfig.mac - self.mac_var: tk.StringVar = tk.StringVar(value=mac) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - # draw explanation label - text = ( - "MAC addresses will be generated for nodes starting with the\n" - "provided value below and increment by value in order." - ) - label = ttk.Label(self.top, text=text) - label.grid(sticky=tk.EW, pady=PADY) - - # draw input - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=3) - frame.grid(stick="ew", pady=PADY) - label = ttk.Label(frame, text="Starting MAC") - label.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.mac_var) - entry.grid(row=0, column=1, sticky=tk.EW) - - # draw buttons - frame = ttk.Frame(self.top) - frame.grid(stick="ew") - for i in range(2): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_save(self) -> None: - mac = self.mac_var.get() - if not netaddr.valid_mac(mac): - messagebox.showerror("MAC Error", f"{mac} is an invalid mac") - else: - self.app.core.ifaces_manager.mac = netaddr.EUI(mac) - self.app.guiconfig.mac = mac - self.app.save_config() - self.destroy() diff --git a/daemon/core/gui/dialogs/mobilityconfig.py b/daemon/core/gui/dialogs/mobilityconfig.py deleted file mode 100644 index 6a2991aa..00000000 --- a/daemon/core/gui/dialogs/mobilityconfig.py +++ /dev/null @@ -1,59 +0,0 @@ -""" -mobility configuration -""" -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import ConfigOption, Node -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import ConfigFrame - -if TYPE_CHECKING: - from core.gui.app import Application - - -class MobilityConfigDialog(Dialog): - def __init__(self, app: "Application", node: Node) -> None: - super().__init__(app, f"{node.name} Mobility Configuration") - self.node: Node = node - self.config_frame: Optional[ConfigFrame] = None - self.has_error: bool = False - try: - config = self.node.mobility_config - if not config: - config = self.app.core.get_mobility_config(self.node.id) - self.config: dict[str, ConfigOption] = config - self.draw() - except grpc.RpcError as e: - self.app.show_grpc_exception("Get Mobility Config Error", e) - self.has_error: bool = True - self.destroy() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.config_frame = ConfigFrame(self.top, self.app, self.config) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - self.draw_apply_buttons() - - def draw_apply_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_apply(self) -> None: - self.config_frame.parse_config() - self.node.mobility_config = self.config - self.destroy() diff --git a/daemon/core/gui/dialogs/mobilityplayer.py b/daemon/core/gui/dialogs/mobilityplayer.py deleted file mode 100644 index 7b6c4d9f..00000000 --- a/daemon/core/gui/dialogs/mobilityplayer.py +++ /dev/null @@ -1,160 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import MobilityAction, Node -from core.gui.dialogs.dialog import Dialog -from core.gui.images import ImageEnum -from core.gui.themes import PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - -ICON_SIZE: int = 16 - - -class MobilityPlayer: - def __init__(self, app: "Application", node: Node) -> None: - self.app: "Application" = app - self.node: Node = node - self.dialog: Optional[MobilityPlayerDialog] = None - self.state: Optional[MobilityAction] = None - - def show(self) -> None: - if self.dialog: - self.dialog.destroy() - self.dialog = MobilityPlayerDialog(self.app, self.node) - self.dialog.protocol("WM_DELETE_WINDOW", self.close) - if self.state == MobilityAction.START: - self.set_play() - elif self.state == MobilityAction.PAUSE: - self.set_pause() - else: - self.set_stop() - self.dialog.show() - - def close(self) -> None: - if self.dialog: - self.dialog.destroy() - self.dialog = None - - def set_play(self) -> None: - self.state = MobilityAction.START - if self.dialog: - self.dialog.set_play() - - def set_pause(self) -> None: - self.state = MobilityAction.PAUSE - if self.dialog: - self.dialog.set_pause() - - def set_stop(self) -> None: - self.state = MobilityAction.STOP - if self.dialog: - self.dialog.set_stop() - - -class MobilityPlayerDialog(Dialog): - def __init__(self, app: "Application", node: Node) -> None: - super().__init__(app, f"{node.name} Mobility Player", modal=False) - self.resizable(False, False) - self.geometry("") - self.node: Node = node - self.play_button: Optional[ttk.Button] = None - self.pause_button: Optional[ttk.Button] = None - self.stop_button: Optional[ttk.Button] = None - self.progressbar: Optional[ttk.Progressbar] = None - self.draw() - - def draw(self) -> None: - config = self.node.mobility_config - self.top.columnconfigure(0, weight=1) - - file_name = config["file"].value - label = ttk.Label(self.top, text=file_name) - label.grid(sticky=tk.EW, pady=PADY) - - self.progressbar = ttk.Progressbar(self.top, mode="indeterminate") - self.progressbar.grid(sticky=tk.EW, pady=PADY) - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - for i in range(3): - frame.columnconfigure(i, weight=1) - - image = self.app.get_enum_icon(ImageEnum.START, width=ICON_SIZE) - self.play_button = ttk.Button(frame, image=image, command=self.click_play) - self.play_button.image = image - self.play_button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - image = self.app.get_enum_icon(ImageEnum.PAUSE, width=ICON_SIZE) - self.pause_button = ttk.Button(frame, image=image, command=self.click_pause) - self.pause_button.image = image - self.pause_button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - image = self.app.get_enum_icon(ImageEnum.STOP, width=ICON_SIZE) - self.stop_button = ttk.Button(frame, image=image, command=self.click_stop) - self.stop_button.image = image - self.stop_button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - - loop = tk.IntVar(value=int(config["loop"].value == "1")) - checkbutton = ttk.Checkbutton( - frame, text="Loop?", variable=loop, state=tk.DISABLED - ) - checkbutton.grid(row=0, column=3, padx=PADX) - - rate = config["refresh_ms"].value - label = ttk.Label(frame, text=f"rate {rate} ms") - label.grid(row=0, column=4) - - def clear_buttons(self) -> None: - self.play_button.state(["!pressed"]) - self.pause_button.state(["!pressed"]) - self.stop_button.state(["!pressed"]) - - def set_play(self) -> None: - self.clear_buttons() - self.play_button.state(["pressed"]) - self.progressbar.start() - - def set_pause(self) -> None: - self.clear_buttons() - self.pause_button.state(["pressed"]) - self.progressbar.stop() - - def set_stop(self) -> None: - self.clear_buttons() - self.stop_button.state(["pressed"]) - self.progressbar.stop() - - def click_play(self) -> None: - self.set_play() - session_id = self.app.core.session.id - try: - self.app.core.client.mobility_action( - session_id, self.node.id, MobilityAction.START - ) - except grpc.RpcError as e: - self.app.show_grpc_exception("Mobility Error", e) - - def click_pause(self) -> None: - self.set_pause() - session_id = self.app.core.session.id - try: - self.app.core.client.mobility_action( - session_id, self.node.id, MobilityAction.PAUSE - ) - except grpc.RpcError as e: - self.app.show_grpc_exception("Mobility Error", e) - - def click_stop(self) -> None: - self.set_stop() - session_id = self.app.core.session.id - try: - self.app.core.client.mobility_action( - session_id, self.node.id, MobilityAction.STOP - ) - except grpc.RpcError as e: - self.app.show_grpc_exception("Mobility Error", e) diff --git a/daemon/core/gui/dialogs/nodeconfig.py b/daemon/core/gui/dialogs/nodeconfig.py deleted file mode 100644 index 162696d4..00000000 --- a/daemon/core/gui/dialogs/nodeconfig.py +++ /dev/null @@ -1,413 +0,0 @@ -import logging -import tkinter as tk -from functools import partial -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -import netaddr -from PIL.ImageTk import PhotoImage - -from core.api.grpc.wrappers import Interface, Node -from core.gui import images -from core.gui import nodeutils as nutils -from core.gui import validation -from core.gui.appconfig import ICONS_PATH -from core.gui.dialogs.dialog import Dialog -from core.gui.dialogs.emaneconfig import EmaneModelDialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import ListboxScroll, image_chooser - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.node import CanvasNode - -IFACE_NAME_LEN: int = 15 -DEFAULT_SERVER: str = "localhost" - - -def check_ip6(parent: tk.BaseWidget, name: str, value: str) -> bool: - if not value: - return True - title = f"IP6 Error for {name}" - values = value.split("/") - if len(values) != 2: - messagebox.showerror( - title, "Must be in the format address/prefix", parent=parent - ) - return False - addr, mask = values - if not netaddr.valid_ipv6(addr): - messagebox.showerror(title, "Invalid IP6 address", parent=parent) - return False - try: - mask = int(mask) - if not (0 <= mask <= 128): - messagebox.showerror(title, "Mask must be between 0-128", parent=parent) - return False - except ValueError: - messagebox.showerror(title, "Invalid Mask", parent=parent) - return False - return True - - -def check_ip4(parent: tk.BaseWidget, name: str, value: str) -> bool: - if not value: - return True - title = f"IP4 Error for {name}" - values = value.split("/") - if len(values) != 2: - messagebox.showerror( - title, "Must be in the format address/prefix", parent=parent - ) - return False - addr, mask = values - if not netaddr.valid_ipv4(addr): - messagebox.showerror(title, "Invalid IP4 address", parent=parent) - return False - try: - mask = int(mask) - if not (0 <= mask <= 32): - messagebox.showerror(title, "Mask must be between 0-32", parent=parent) - return False - except ValueError: - messagebox.showerror(title, "Invalid mask", parent=parent) - return False - return True - - -def mac_auto(is_auto: tk.BooleanVar, entry: ttk.Entry, mac: tk.StringVar) -> None: - if is_auto.get(): - mac.set("") - entry.config(state=tk.DISABLED) - else: - mac.set("00:00:00:00:00:00") - entry.config(state=tk.NORMAL) - - -class InterfaceData: - def __init__( - self, - name: tk.StringVar, - is_auto: tk.BooleanVar, - mac: tk.StringVar, - ip4: tk.StringVar, - ip6: tk.StringVar, - ) -> None: - self.name: tk.StringVar = name - self.is_auto: tk.BooleanVar = is_auto - self.mac: tk.StringVar = mac - self.ip4: tk.StringVar = ip4 - self.ip6: tk.StringVar = ip6 - - def validate(self, parent: tk.BaseWidget, iface: Interface) -> bool: - valid_name = self._validate_name(parent, iface) - valid_ip4 = self._validate_ip4(parent, iface) - valid_ip6 = self._validate_ip6(parent, iface) - valid_mac = self._validate_mac(parent, iface) - return all([valid_name, valid_ip4, valid_ip6, valid_mac]) - - def _validate_name(self, parent: tk.BaseWidget, iface: Interface) -> bool: - name = self.name.get() - title = f"Interface Name Error for {iface.name}" - if not name: - messagebox.showerror(title, "Name cannot be empty", parent=parent) - return False - if len(name) > IFACE_NAME_LEN: - messagebox.showerror( - title, - f"Name cannot be greater than {IFACE_NAME_LEN} chars", - parent=parent, - ) - return False - for x in name: - if x.isspace() or x == "/": - messagebox.showerror( - title, "Name cannot contain space or /", parent=parent - ) - return False - iface.name = name - return True - - def _validate_ip4(self, parent: tk.BaseWidget, iface: Interface) -> bool: - ip4_net = self.ip4.get() - if not check_ip4(parent, iface.name, ip4_net): - return False - if ip4_net: - ip4, ip4_mask = ip4_net.split("/") - ip4_mask = int(ip4_mask) - else: - ip4, ip4_mask = "", 0 - iface.ip4 = ip4 - iface.ip4_mask = ip4_mask - return True - - def _validate_ip6(self, parent: tk.BaseWidget, iface: Interface) -> bool: - ip6_net = self.ip6.get() - if not check_ip6(parent, iface.name, ip6_net): - return False - if ip6_net: - ip6, ip6_mask = ip6_net.split("/") - ip6_mask = int(ip6_mask) - else: - ip6, ip6_mask = "", 0 - iface.ip6 = ip6 - iface.ip6_mask = ip6_mask - return True - - def _validate_mac(self, parent: tk.BaseWidget, iface: Interface) -> bool: - mac = self.mac.get() - auto_mac = self.is_auto.get() - if auto_mac: - iface.mac = None - else: - if not netaddr.valid_mac(mac): - title = f"MAC Error for {iface.name}" - messagebox.showerror(title, "Invalid MAC Address", parent=parent) - return False - else: - mac = netaddr.EUI(mac, dialect=netaddr.mac_unix_expanded) - iface.mac = str(mac) - return True - - -class NodeConfigDialog(Dialog): - def __init__(self, app: "Application", canvas_node: "CanvasNode") -> None: - """ - create an instance of node configuration - """ - super().__init__(app, f"{canvas_node.core_node.name} Configuration") - self.canvas_node: "CanvasNode" = canvas_node - self.node: Node = canvas_node.core_node - self.image: PhotoImage = canvas_node.image - self.image_file: Optional[str] = None - self.image_button: Optional[ttk.Button] = None - self.name: tk.StringVar = tk.StringVar(value=self.node.name) - self.type: tk.StringVar = tk.StringVar(value=self.node.model) - self.container_image: tk.StringVar = tk.StringVar(value=self.node.image) - server = DEFAULT_SERVER - if self.node.server: - server = self.node.server - self.server: tk.StringVar = tk.StringVar(value=server) - self.ifaces: dict[int, InterfaceData] = {} - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - row = 0 - - # field states - state = tk.DISABLED if self.app.core.is_runtime() else tk.NORMAL - combo_state = tk.DISABLED if self.app.core.is_runtime() else "readonly" - - # field frame - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - frame.columnconfigure(1, weight=1) - - # icon field - label = ttk.Label(frame, text="Icon") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - self.image_button = ttk.Button( - frame, - text="Icon", - image=self.image, - compound=tk.NONE, - command=self.click_icon, - ) - self.image_button.grid(row=row, column=1, sticky=tk.EW) - row += 1 - - # name field - label = ttk.Label(frame, text="Name") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - entry = validation.NodeNameEntry(frame, textvariable=self.name, state=state) - entry.grid(row=row, column=1, sticky=tk.EW) - row += 1 - - # node type field - if nutils.is_model(self.node): - label = ttk.Label(frame, text="Type") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - entry = ttk.Entry(frame, textvariable=self.type, state=tk.DISABLED) - entry.grid(row=row, column=1, sticky=tk.EW) - row += 1 - - # container image field - if nutils.has_image(self.node.type): - label = ttk.Label(frame, text="Image") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - entry = ttk.Entry(frame, textvariable=self.container_image, state=state) - entry.grid(row=row, column=1, sticky=tk.EW) - row += 1 - - if nutils.is_container(self.node): - # server - frame.grid(sticky=tk.EW) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Server") - label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - servers = [DEFAULT_SERVER] - servers.extend(list(sorted(self.app.core.servers.keys()))) - combobox = ttk.Combobox( - frame, textvariable=self.server, values=servers, state=combo_state - ) - combobox.grid(row=row, column=1, sticky=tk.EW) - row += 1 - - if nutils.is_rj45(self.node): - ifaces = self.app.core.client.get_ifaces() - logger.debug("host machine available interfaces: %s", ifaces) - ifaces_scroll = ListboxScroll(frame) - ifaces_scroll.listbox.config(state=state) - ifaces_scroll.grid( - row=row, column=0, columnspan=2, sticky=tk.EW, padx=PADX, pady=PADY - ) - for inf in sorted(ifaces): - ifaces_scroll.listbox.insert(tk.END, inf) - row += 1 - ifaces_scroll.listbox.bind("<>", self.iface_select) - - # interfaces - if nutils.is_container(self.node): - self.draw_ifaces() - - self.draw_spacer() - self.draw_buttons() - - def draw_ifaces(self) -> None: - notebook = ttk.Notebook(self.top) - notebook.grid(sticky=tk.NSEW, pady=PADY) - self.top.rowconfigure(notebook.grid_info()["row"], weight=1) - state = tk.DISABLED if self.app.core.is_runtime() else tk.NORMAL - for iface_id in sorted(self.canvas_node.ifaces): - iface = self.canvas_node.ifaces[iface_id] - tab = ttk.Frame(notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW, pady=PADY) - tab.columnconfigure(1, weight=1) - tab.columnconfigure(2, weight=1) - notebook.add(tab, text=iface.name) - - row = 0 - emane_node = self.canvas_node.has_emane_link(iface.id) - if emane_node: - emane_model = emane_node.emane.split("_")[1] - command = partial(self.click_emane_config, emane_model, iface.id) - button = ttk.Button( - tab, text=f"Configure EMANE {emane_model}", command=command - ) - button.grid(row=row, sticky=tk.EW, columnspan=3, pady=PADY) - row += 1 - - label = ttk.Label(tab, text="Name") - label.grid(row=row, column=0, padx=PADX, pady=PADY) - name = tk.StringVar(value=iface.name) - entry = ttk.Entry(tab, textvariable=name, state=state) - entry.var = name - entry.grid(row=row, column=1, columnspan=2, sticky=tk.EW) - row += 1 - - label = ttk.Label(tab, text="MAC") - label.grid(row=row, column=0, padx=PADX, pady=PADY) - auto_set = not iface.mac - is_auto = tk.BooleanVar(value=auto_set) - mac_state = tk.DISABLED if auto_set else tk.NORMAL - if state == tk.DISABLED: - mac_state = tk.DISABLED - checkbutton = ttk.Checkbutton( - tab, text="Auto?", variable=is_auto, state=state - ) - checkbutton.var = is_auto - checkbutton.grid(row=row, column=1, padx=PADX) - mac = tk.StringVar(value=iface.mac) - entry = ttk.Entry(tab, textvariable=mac, state=mac_state) - entry.grid(row=row, column=2, sticky=tk.EW) - func = partial(mac_auto, is_auto, entry, mac) - checkbutton.config(command=func) - row += 1 - - label = ttk.Label(tab, text="IPv4") - label.grid(row=row, column=0, padx=PADX, pady=PADY) - ip4_net = "" - if iface.ip4: - ip4_net = f"{iface.ip4}/{iface.ip4_mask}" - ip4 = tk.StringVar(value=ip4_net) - entry = ttk.Entry(tab, textvariable=ip4, state=state) - entry.grid(row=row, column=1, columnspan=2, sticky=tk.EW) - row += 1 - - label = ttk.Label(tab, text="IPv6") - label.grid(row=row, column=0, padx=PADX, pady=PADY) - ip6_net = "" - if iface.ip6: - ip6_net = f"{iface.ip6}/{iface.ip6_mask}" - ip6 = tk.StringVar(value=ip6_net) - entry = ttk.Entry(tab, textvariable=ip6, state=state) - entry.grid(row=row, column=1, columnspan=2, sticky=tk.EW) - - self.ifaces[iface.id] = InterfaceData(name, is_auto, mac, ip4, ip6) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_emane_config(self, emane_model: str, iface_id: int) -> None: - logger.info("configuring emane: %s - %s", emane_model, iface_id) - dialog = EmaneModelDialog(self, self.app, self.node, emane_model, iface_id) - dialog.show() - - def click_icon(self) -> None: - file_path = image_chooser(self, ICONS_PATH) - if file_path: - self.image = images.from_file(file_path, width=images.NODE_SIZE) - self.image_button.config(image=self.image) - self.image_file = file_path - - def click_apply(self) -> None: - error = False - - # update core node - self.node.name = self.name.get() - if nutils.has_image(self.node.type): - self.node.image = self.container_image.get() - server = self.server.get() - if nutils.is_container(self.node): - if server == DEFAULT_SERVER: - self.node.server = None - else: - self.node.server = server - - # set custom icon - if self.image_file: - self.node.icon = self.image_file - - # update canvas node - self.canvas_node.image = self.image - - # update node interface data - for iface in self.canvas_node.ifaces.values(): - data = self.ifaces[iface.id] - error = not data.validate(self, iface) - if error: - break - - # redraw - if not error: - self.canvas_node.redraw() - self.destroy() - - def iface_select(self, event: tk.Event) -> None: - listbox = event.widget - cur = listbox.curselection() - if cur: - iface = listbox.get(cur[0]) - self.name.set(iface) diff --git a/daemon/core/gui/dialogs/nodeconfigservice.py b/daemon/core/gui/dialogs/nodeconfigservice.py deleted file mode 100644 index ce718080..00000000 --- a/daemon/core/gui/dialogs/nodeconfigservice.py +++ /dev/null @@ -1,158 +0,0 @@ -""" -core node services -""" -import logging -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import Node -from core.gui.dialogs.configserviceconfig import ConfigServiceConfigDialog -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CheckboxList, ListboxScroll - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class NodeConfigServiceDialog(Dialog): - def __init__( - self, app: "Application", node: Node, services: set[str] = None - ) -> None: - title = f"{node.name} Config Services" - super().__init__(app, title) - self.node: Node = node - self.groups: Optional[ListboxScroll] = None - self.services: Optional[CheckboxList] = None - self.current: Optional[ListboxScroll] = None - if services is None: - services = set(node.config_services) - self.current_services: set[str] = services - self.protocol("WM_DELETE_WINDOW", self.click_cancel) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - frame = ttk.Frame(self.top) - frame.grid(stick="nsew", pady=PADY) - frame.rowconfigure(0, weight=1) - for i in range(3): - frame.columnconfigure(i, weight=1) - label_frame = ttk.LabelFrame(frame, text="Groups", padding=FRAME_PAD) - label_frame.grid(row=0, column=0, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - self.groups = ListboxScroll(label_frame) - self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.config_services_groups): - self.groups.listbox.insert(tk.END, group) - self.groups.listbox.bind("<>", self.handle_group_change) - self.groups.listbox.selection_set(0) - - label_frame = ttk.LabelFrame(frame, text="Services") - label_frame.grid(row=0, column=1, sticky=tk.NSEW) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - self.services = CheckboxList( - label_frame, self.app, clicked=self.service_clicked, padding=FRAME_PAD - ) - self.services.grid(sticky=tk.NSEW) - - label_frame = ttk.LabelFrame(frame, text="Selected", padding=FRAME_PAD) - label_frame.grid(row=0, column=2, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - - self.current = ListboxScroll(label_frame) - self.current.grid(sticky=tk.NSEW) - self.draw_current_services() - - frame = ttk.Frame(self.top) - frame.grid(stick="ew") - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Configure", command=self.click_configure) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Remove", command=self.click_remove) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.click_cancel) - button.grid(row=0, column=3, sticky=tk.EW) - - # trigger group change - self.handle_group_change() - - def handle_group_change(self, event: tk.Event = None) -> None: - selection = self.groups.listbox.curselection() - if selection: - index = selection[0] - group = self.groups.listbox.get(index) - self.services.clear() - for name in sorted(self.app.core.config_services_groups[group]): - checked = name in self.current_services - self.services.add(name, checked) - - def service_clicked(self, name: str, var: tk.IntVar) -> None: - if var.get() and name not in self.current_services: - self.current_services.add(name) - elif not var.get() and name in self.current_services: - self.current_services.remove(name) - self.node.config_service_configs.pop(name, None) - self.draw_current_services() - self.node.config_services = self.current_services.copy() - - def click_configure(self) -> None: - current_selection = self.current.listbox.curselection() - if len(current_selection): - dialog = ConfigServiceConfigDialog( - self, - self.app, - self.current.listbox.get(current_selection[0]), - self.node, - ) - if not dialog.has_error: - dialog.show() - self.draw_current_services() - else: - messagebox.showinfo( - "Config Service Configuration", - "Select a service to configure", - parent=self, - ) - - def draw_current_services(self) -> None: - self.current.listbox.delete(0, tk.END) - for name in sorted(self.current_services): - self.current.listbox.insert(tk.END, name) - if self.is_custom_service(name): - self.current.listbox.itemconfig(tk.END, bg="green") - - def click_save(self) -> None: - self.node.config_services = self.current_services.copy() - logger.info("saved node config services: %s", self.node.config_services) - self.destroy() - - def click_cancel(self) -> None: - self.current_services = None - self.destroy() - - def click_remove(self) -> None: - cur = self.current.listbox.curselection() - if cur: - service = self.current.listbox.get(cur[0]) - self.current.listbox.delete(cur[0]) - self.current_services.remove(service) - self.node.config_service_configs.pop(service, None) - for checkbutton in self.services.frame.winfo_children(): - if checkbutton["text"] == service: - checkbutton.invoke() - return - - def is_custom_service(self, service: str) -> bool: - return service in self.node.config_service_configs diff --git a/daemon/core/gui/dialogs/nodeservice.py b/daemon/core/gui/dialogs/nodeservice.py deleted file mode 100644 index 66e83fa4..00000000 --- a/daemon/core/gui/dialogs/nodeservice.py +++ /dev/null @@ -1,154 +0,0 @@ -""" -core node services -""" -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import Node -from core.gui.dialogs.dialog import Dialog -from core.gui.dialogs.serviceconfig import ServiceConfigDialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CheckboxList, ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - - -class NodeServiceDialog(Dialog): - def __init__(self, app: "Application", node: Node) -> None: - title = f"{node.name} Services (Deprecated)" - super().__init__(app, title) - self.node: Node = node - self.groups: Optional[ListboxScroll] = None - self.services: Optional[CheckboxList] = None - self.current: Optional[ListboxScroll] = None - services = set(node.services) - self.current_services: set[str] = services - self.protocol("WM_DELETE_WINDOW", self.click_cancel) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - - frame = ttk.Frame(self.top) - frame.grid(stick="nsew", pady=PADY) - frame.rowconfigure(0, weight=1) - for i in range(3): - frame.columnconfigure(i, weight=1) - label_frame = ttk.LabelFrame(frame, text="Groups", padding=FRAME_PAD) - label_frame.grid(row=0, column=0, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - self.groups = ListboxScroll(label_frame) - self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.services): - self.groups.listbox.insert(tk.END, group) - self.groups.listbox.bind("<>", self.handle_group_change) - self.groups.listbox.selection_set(0) - - label_frame = ttk.LabelFrame(frame, text="Services") - label_frame.grid(row=0, column=1, sticky=tk.NSEW) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - self.services = CheckboxList( - label_frame, self.app, clicked=self.service_clicked, padding=FRAME_PAD - ) - self.services.grid(sticky=tk.NSEW) - - label_frame = ttk.LabelFrame(frame, text="Selected", padding=FRAME_PAD) - label_frame.grid(row=0, column=2, sticky=tk.NSEW) - label_frame.rowconfigure(0, weight=1) - label_frame.columnconfigure(0, weight=1) - self.current = ListboxScroll(label_frame) - self.current.grid(sticky=tk.NSEW) - for service in sorted(self.current_services): - self.current.listbox.insert(tk.END, service) - if self.is_custom_service(service): - self.current.listbox.itemconfig(tk.END, bg="green") - - frame = ttk.Frame(self.top) - frame.grid(stick="ew") - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Configure", command=self.click_configure) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Remove", command=self.click_remove) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.click_cancel) - button.grid(row=0, column=3, sticky=tk.EW) - - # trigger group change - self.handle_group_change() - - def handle_group_change(self, event: tk.Event = None) -> None: - selection = self.groups.listbox.curselection() - if selection: - index = selection[0] - group = self.groups.listbox.get(index) - self.services.clear() - for name in sorted(self.app.core.services[group]): - checked = name in self.current_services - self.services.add(name, checked) - - def service_clicked(self, name: str, var: tk.IntVar) -> None: - if var.get() and name not in self.current_services: - self.current_services.add(name) - elif not var.get() and name in self.current_services: - self.current_services.remove(name) - self.node.service_configs.pop(name, None) - self.node.service_file_configs.pop(name, None) - self.current.listbox.delete(0, tk.END) - for name in sorted(self.current_services): - self.current.listbox.insert(tk.END, name) - if self.is_custom_service(name): - self.current.listbox.itemconfig(tk.END, bg="green") - self.node.services = self.current_services.copy() - - def click_configure(self) -> None: - current_selection = self.current.listbox.curselection() - if len(current_selection): - dialog = ServiceConfigDialog( - self, - self.app, - self.current.listbox.get(current_selection[0]), - self.node, - ) - - # if error occurred when creating ServiceConfigDialog, don't show the dialog - if not dialog.has_error: - dialog.show() - else: - dialog.destroy() - else: - messagebox.showinfo( - "Service Configuration", "Select a service to configure", parent=self - ) - - def click_cancel(self) -> None: - self.destroy() - - def click_save(self) -> None: - self.node.services = self.current_services.copy() - self.destroy() - - def click_remove(self) -> None: - cur = self.current.listbox.curselection() - if cur: - service = self.current.listbox.get(cur[0]) - self.current.listbox.delete(cur[0]) - self.current_services.remove(service) - self.node.service_configs.pop(service, None) - self.node.service_file_configs.pop(service, None) - for checkbutton in self.services.frame.winfo_children(): - if checkbutton["text"] == service: - checkbutton.invoke() - return - - def is_custom_service(self, service: str) -> bool: - has_service_config = service in self.node.service_configs - has_file_config = service in self.node.service_file_configs - return has_service_config or has_file_config diff --git a/daemon/core/gui/dialogs/observers.py b/daemon/core/gui/dialogs/observers.py deleted file mode 100644 index b815d45b..00000000 --- a/daemon/core/gui/dialogs/observers.py +++ /dev/null @@ -1,153 +0,0 @@ -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.appconfig import Observer -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - - -class ObserverDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Observer Widgets") - self.observers: Optional[tk.Listbox] = None - self.save_button: Optional[ttk.Button] = None - self.delete_button: Optional[ttk.Button] = None - self.selected: Optional[str] = None - self.selected_index: Optional[int] = None - self.name: tk.StringVar = tk.StringVar() - self.cmd: tk.StringVar = tk.StringVar() - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.draw_listbox() - self.draw_form_fields() - self.draw_config_buttons() - self.draw_apply_buttons() - - def draw_listbox(self) -> None: - listbox_scroll = ListboxScroll(self.top) - listbox_scroll.grid(sticky=tk.NSEW, pady=PADY) - listbox_scroll.columnconfigure(0, weight=1) - listbox_scroll.rowconfigure(0, weight=1) - self.observers = listbox_scroll.listbox - self.observers.grid(row=0, column=0, sticky=tk.NSEW) - self.observers.bind("<>", self.handle_observer_change) - for name in sorted(self.app.core.custom_observers): - self.observers.insert(tk.END, name) - - def draw_form_fields(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - - label = ttk.Label(frame, text="Name") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.name) - entry.grid(row=0, column=1, sticky=tk.EW) - - label = ttk.Label(frame, text="Command") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.cmd) - entry.grid(row=1, column=1, sticky=tk.EW) - - def draw_config_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - for i in range(3): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Create", command=self.click_create) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - self.save_button = ttk.Button( - frame, text="Save", state=tk.DISABLED, command=self.click_save - ) - self.save_button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - self.delete_button = ttk.Button( - frame, text="Delete", state=tk.DISABLED, command=self.click_delete - ) - self.delete_button.grid(row=0, column=2, sticky=tk.EW) - - def draw_apply_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Save", command=self.click_save_config) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_save_config(self) -> None: - self.app.guiconfig.observers.clear() - for observer in self.app.core.custom_observers.values(): - self.app.guiconfig.observers.append(observer) - self.app.save_config() - self.destroy() - - def click_create(self) -> None: - name = self.name.get() - if name not in self.app.core.custom_observers: - cmd = self.cmd.get() - observer = Observer(name, cmd) - self.app.core.custom_observers[name] = observer - self.observers.insert(tk.END, name) - self.name.set("") - self.cmd.set("") - self.app.menubar.observers_menu.draw_custom() - self.app.toolbar.observers_menu.draw_custom() - else: - messagebox.showerror("Observer Error", f"{name} already exists") - - def click_save(self) -> None: - name = self.name.get() - if self.selected: - previous_name = self.selected - self.selected = name - observer = self.app.core.custom_observers.pop(previous_name) - observer.name = name - observer.cmd = self.cmd.get() - self.app.core.custom_observers[name] = observer - self.observers.delete(self.selected_index) - self.observers.insert(self.selected_index, name) - self.observers.selection_set(self.selected_index) - - def click_delete(self) -> None: - if self.selected: - self.observers.delete(self.selected_index) - del self.app.core.custom_observers[self.selected] - self.selected = None - self.selected_index = None - self.name.set("") - self.cmd.set("") - self.observers.selection_clear(0, tk.END) - self.save_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) - self.app.menubar.observers_menu.draw_custom() - self.app.toolbar.observers_menu.draw_custom() - - def handle_observer_change(self, event: tk.Event) -> None: - selection = self.observers.curselection() - if selection: - self.selected_index = selection[0] - self.selected = self.observers.get(self.selected_index) - observer = self.app.core.custom_observers[self.selected] - self.name.set(observer.name) - self.cmd.set(observer.cmd) - self.save_button.config(state=tk.NORMAL) - self.delete_button.config(state=tk.NORMAL) - else: - self.selected_index = None - self.selected = None - self.save_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/preferences.py b/daemon/core/gui/dialogs/preferences.py deleted file mode 100644 index 4a6a1c08..00000000 --- a/daemon/core/gui/dialogs/preferences.py +++ /dev/null @@ -1,153 +0,0 @@ -import logging -import math -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING - -from core.gui import appconfig, validation -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY, scale_fonts -from core.gui.validation import LARGEST_SCALE, SMALLEST_SCALE - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -SCALE_INTERVAL: float = 0.01 - - -class PreferencesDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Preferences") - self.gui_scale: tk.DoubleVar = tk.DoubleVar(value=self.app.app_scale) - preferences = self.app.guiconfig.preferences - self.editor: tk.StringVar = tk.StringVar(value=preferences.editor) - self.theme: tk.StringVar = tk.StringVar(value=preferences.theme) - self.terminal: tk.StringVar = tk.StringVar(value=preferences.terminal) - self.gui3d: tk.StringVar = tk.StringVar(value=preferences.gui3d) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.draw_preferences() - self.draw_buttons() - - def draw_preferences(self) -> None: - frame = ttk.LabelFrame(self.top, text="Preferences", padding=FRAME_PAD) - frame.grid(sticky=tk.NSEW, pady=PADY) - frame.columnconfigure(1, weight=1) - - label = ttk.Label(frame, text="Theme") - label.grid(row=0, column=0, pady=PADY, padx=PADX, sticky=tk.W) - themes = self.app.style.theme_names() - combobox = ttk.Combobox( - frame, textvariable=self.theme, values=themes, state="readonly" - ) - combobox.set(self.theme.get()) - combobox.grid(row=0, column=1, sticky=tk.EW) - combobox.bind("<>", self.theme_change) - - label = ttk.Label(frame, text="Editor") - label.grid(row=1, column=0, pady=PADY, padx=PADX, sticky=tk.W) - combobox = ttk.Combobox( - frame, textvariable=self.editor, values=appconfig.EDITORS, state="readonly" - ) - combobox.grid(row=1, column=1, sticky=tk.EW) - - label = ttk.Label(frame, text="Terminal") - label.grid(row=2, column=0, pady=PADY, padx=PADX, sticky=tk.W) - terminals = sorted(appconfig.TERMINALS.values()) - combobox = ttk.Combobox(frame, textvariable=self.terminal, values=terminals) - combobox.grid(row=2, column=1, sticky=tk.EW) - - label = ttk.Label(frame, text="3D GUI") - label.grid(row=3, column=0, pady=PADY, padx=PADX, sticky=tk.W) - entry = ttk.Entry(frame, textvariable=self.gui3d) - entry.grid(row=3, column=1, sticky=tk.EW) - - label = ttk.Label(frame, text="Scaling") - label.grid(row=4, column=0, pady=PADY, padx=PADX, sticky=tk.W) - - scale_frame = ttk.Frame(frame) - scale_frame.grid(row=4, column=1, sticky=tk.EW) - scale_frame.columnconfigure(0, weight=1) - scale = ttk.Scale( - scale_frame, - from_=SMALLEST_SCALE, - to=LARGEST_SCALE, - value=1, - orient=tk.HORIZONTAL, - variable=self.gui_scale, - ) - scale.grid(row=0, column=0, sticky=tk.EW) - entry = validation.AppScaleEntry( - scale_frame, textvariable=self.gui_scale, width=4 - ) - entry.grid(row=0, column=1) - - scrollbar = ttk.Scrollbar(scale_frame, command=self.adjust_scale) - scrollbar.grid(row=0, column=2) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def theme_change(self, event: tk.Event) -> None: - theme = self.theme.get() - logger.info("changing theme: %s", theme) - self.app.style.theme_use(theme) - - def click_save(self) -> None: - preferences = self.app.guiconfig.preferences - preferences.terminal = self.terminal.get() - preferences.editor = self.editor.get() - preferences.gui3d = self.gui3d.get() - preferences.theme = self.theme.get() - self.gui_scale.set(round(self.gui_scale.get(), 2)) - app_scale = self.gui_scale.get() - self.app.guiconfig.scale = app_scale - self.app.save_config() - self.scale_adjust() - self.destroy() - - def scale_adjust(self) -> None: - app_scale = self.gui_scale.get() - self.app.app_scale = app_scale - self.app.master.tk.call("tk", "scaling", app_scale) - - # scale fonts - scale_fonts(self.app.fonts_size, app_scale) - text_scale = app_scale if app_scale < 1 else math.sqrt(app_scale) - self.app.icon_text_font.config(size=int(12 * text_scale)) - self.app.edge_font.config(size=int(8 * text_scale)) - - # scale application window - self.app.center() - - # scale toolbar and canvas items - self.app.toolbar.scale() - for canvas in self.app.manager.all(): - canvas.scale_graph() - - def adjust_scale(self, arg1: str, arg2: str, arg3: str) -> None: - scale_value = self.gui_scale.get() - if arg2 == "-1": - if scale_value <= LARGEST_SCALE - SCALE_INTERVAL: - self.gui_scale.set(round(scale_value + SCALE_INTERVAL, 2)) - else: - self.gui_scale.set(round(LARGEST_SCALE, 2)) - elif arg2 == "1": - if scale_value >= SMALLEST_SCALE + SCALE_INTERVAL: - self.gui_scale.set(round(scale_value - SCALE_INTERVAL, 2)) - else: - self.gui_scale.set(round(SMALLEST_SCALE, 2)) diff --git a/daemon/core/gui/dialogs/runtool.py b/daemon/core/gui/dialogs/runtool.py deleted file mode 100644 index 75789893..00000000 --- a/daemon/core/gui/dialogs/runtool.py +++ /dev/null @@ -1,113 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui import nodeutils as nutils -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CodeText, ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - - -class RunToolDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Run Tool") - self.cmd: tk.StringVar = tk.StringVar(value="ps ax") - self.result: Optional[CodeText] = None - self.node_list: Optional[ListboxScroll] = None - self.executable_nodes: dict[str, int] = {} - self.store_nodes() - self.draw() - - def store_nodes(self) -> None: - """ - store all CORE nodes (nodes that execute commands) from all existing nodes - """ - for node in self.app.core.session.nodes.values(): - if nutils.is_container(node): - self.executable_nodes[node.name] = node.id - - def draw(self) -> None: - self.top.rowconfigure(0, weight=1) - self.top.columnconfigure(0, weight=1) - self.draw_command_frame() - self.draw_nodes_frame() - - def draw_command_frame(self) -> None: - # the main frame - frame = ttk.Frame(self.top) - frame.grid(row=0, column=0, sticky=tk.NSEW, padx=PADX) - frame.columnconfigure(0, weight=1) - frame.rowconfigure(1, weight=1) - - labeled_frame = ttk.LabelFrame(frame, text="Command", padding=FRAME_PAD) - labeled_frame.grid(sticky=tk.EW, pady=PADY) - labeled_frame.rowconfigure(0, weight=1) - labeled_frame.columnconfigure(0, weight=1) - entry = ttk.Entry(labeled_frame, textvariable=self.cmd) - entry.grid(sticky=tk.EW) - - # results frame - labeled_frame = ttk.LabelFrame(frame, text="Output", padding=FRAME_PAD) - labeled_frame.grid(sticky=tk.NSEW, pady=PADY) - labeled_frame.columnconfigure(0, weight=1) - labeled_frame.rowconfigure(0, weight=1) - - self.result = CodeText(labeled_frame) - self.result.text.config(state=tk.DISABLED, height=15) - self.result.grid(sticky=tk.NSEW, pady=PADY) - button_frame = ttk.Frame(labeled_frame) - button_frame.grid(sticky=tk.NSEW) - button_frame.columnconfigure(0, weight=1) - button_frame.columnconfigure(1, weight=1) - button = ttk.Button(button_frame, text="Run", command=self.click_run) - button.grid(sticky=tk.EW, padx=PADX) - button = ttk.Button(button_frame, text="Close", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def draw_nodes_frame(self) -> None: - labeled_frame = ttk.LabelFrame(self.top, text="Nodes", padding=FRAME_PAD) - labeled_frame.grid(row=0, column=1, sticky=tk.NSEW) - labeled_frame.columnconfigure(0, weight=1) - labeled_frame.rowconfigure(0, weight=1) - - self.node_list = ListboxScroll(labeled_frame) - self.node_list.listbox.config(selectmode=tk.MULTIPLE) - self.node_list.grid(sticky=tk.NSEW, pady=PADY) - for n in sorted(self.executable_nodes.keys()): - self.node_list.listbox.insert(tk.END, n) - - button_frame = ttk.Frame(labeled_frame, padding=FRAME_PAD) - button_frame.grid(sticky=tk.NSEW) - button_frame.columnconfigure(0, weight=1) - button_frame.columnconfigure(1, weight=1) - - button = ttk.Button(button_frame, text="All", command=self.click_all) - button.grid(sticky=tk.NSEW, padx=PADX) - button = ttk.Button(button_frame, text="None", command=self.click_none) - button.grid(row=0, column=1, sticky=tk.NSEW) - - def click_all(self) -> None: - self.node_list.listbox.selection_set(0, self.node_list.listbox.size() - 1) - - def click_none(self) -> None: - self.node_list.listbox.selection_clear(0, self.node_list.listbox.size() - 1) - - def click_run(self) -> None: - """ - Run the command on each of the selected nodes and display the output to result - text box. - """ - command = self.cmd.get().strip() - self.result.text.config(state=tk.NORMAL) - self.result.text.delete("1.0", tk.END) - for selection in self.node_list.listbox.curselection(): - node_name = self.node_list.listbox.get(selection) - node_id = self.executable_nodes[node_name] - _, output = self.app.core.client.node_command( - self.app.core.session.id, node_id, command - ) - self.result.text.insert(tk.END, f"> {node_name} > {command}:\n{output}\n") - self.result.text.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/servers.py b/daemon/core/gui/dialogs/servers.py deleted file mode 100644 index 38efad22..00000000 --- a/daemon/core/gui/dialogs/servers.py +++ /dev/null @@ -1,154 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.appconfig import CoreServer -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import ListboxScroll - -if TYPE_CHECKING: - from core.gui.app import Application - -DEFAULT_NAME: str = "example" -DEFAULT_ADDRESS: str = "127.0.0.1" -DEFAULT_PORT: int = 50051 - - -class ServersDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "CORE Servers") - self.name: tk.StringVar = tk.StringVar(value=DEFAULT_NAME) - self.address: tk.StringVar = tk.StringVar(value=DEFAULT_ADDRESS) - self.servers: Optional[tk.Listbox] = None - self.selected_index: Optional[int] = None - self.selected: Optional[str] = None - self.save_button: Optional[ttk.Button] = None - self.delete_button: Optional[ttk.Button] = None - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.draw_servers() - self.draw_servers_buttons() - self.draw_server_configuration() - self.draw_apply_buttons() - - def draw_servers(self) -> None: - listbox_scroll = ListboxScroll(self.top) - listbox_scroll.grid(pady=PADY, sticky=tk.NSEW) - listbox_scroll.columnconfigure(0, weight=1) - listbox_scroll.rowconfigure(0, weight=1) - - self.servers = listbox_scroll.listbox - self.servers.grid(row=0, column=0, sticky=tk.NSEW) - self.servers.bind("<>", self.handle_server_change) - - for server in self.app.core.servers: - self.servers.insert(tk.END, server) - - def draw_server_configuration(self) -> None: - frame = ttk.LabelFrame(self.top, text="Server Configuration", padding=FRAME_PAD) - frame.grid(pady=PADY, sticky=tk.EW) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(3, weight=1) - - label = ttk.Label(frame, text="Name") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.name) - entry.grid(row=0, column=1, sticky=tk.EW) - - label = ttk.Label(frame, text="Address") - label.grid(row=0, column=2, sticky=tk.W, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.address) - entry.grid(row=0, column=3, sticky=tk.EW) - - def draw_servers_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(pady=PADY, sticky=tk.EW) - for i in range(3): - frame.columnconfigure(i, weight=1) - - button = ttk.Button(frame, text="Create", command=self.click_create) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - self.save_button = ttk.Button( - frame, text="Save", state=tk.DISABLED, command=self.click_save - ) - self.save_button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - - self.delete_button = ttk.Button( - frame, text="Delete", state=tk.DISABLED, command=self.click_delete - ) - self.delete_button.grid(row=0, column=2, sticky=tk.EW) - - def draw_apply_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - - button = ttk.Button( - frame, text="Save Configuration", command=self.click_save_configuration - ) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_save_configuration(self): - self.app.guiconfig.servers.clear() - for server in self.app.core.servers.values(): - self.app.guiconfig.servers.append(server) - self.app.save_config() - self.destroy() - - def click_create(self) -> None: - name = self.name.get() - if name not in self.app.core.servers: - address = self.address.get() - server = CoreServer(name, address) - self.app.core.servers[name] = server - self.servers.insert(tk.END, name) - - def click_save(self) -> None: - name = self.name.get() - if self.selected: - previous_name = self.selected - self.selected = name - server = self.app.core.servers.pop(previous_name) - server.name = name - server.address = self.address.get() - self.app.core.servers[name] = server - self.servers.delete(self.selected_index) - self.servers.insert(self.selected_index, name) - self.servers.selection_set(self.selected_index) - - def click_delete(self) -> None: - if self.selected: - self.servers.delete(self.selected_index) - del self.app.core.servers[self.selected] - self.selected = None - self.selected_index = None - self.name.set(DEFAULT_NAME) - self.address.set(DEFAULT_ADDRESS) - self.servers.selection_clear(0, tk.END) - self.save_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) - - def handle_server_change(self, event: tk.Event) -> None: - selection = self.servers.curselection() - if selection: - self.selected_index = selection[0] - self.selected = self.servers.get(self.selected_index) - server = self.app.core.servers[self.selected] - self.name.set(server.name) - self.address.set(server.address) - self.save_button.config(state=tk.NORMAL) - self.delete_button.config(state=tk.NORMAL) - else: - self.selected_index = None - self.selected = None - self.save_button.config(state=tk.DISABLED) - self.delete_button.config(state=tk.DISABLED) diff --git a/daemon/core/gui/dialogs/serviceconfig.py b/daemon/core/gui/dialogs/serviceconfig.py deleted file mode 100644 index 5eec7faf..00000000 --- a/daemon/core/gui/dialogs/serviceconfig.py +++ /dev/null @@ -1,612 +0,0 @@ -import logging -import tkinter as tk -from pathlib import Path -from tkinter import filedialog, messagebox, ttk -from typing import TYPE_CHECKING, Optional - -import grpc -from PIL.ImageTk import PhotoImage - -from core.api.grpc.wrappers import Node, NodeServiceData, ServiceValidationMode -from core.gui import images -from core.gui.dialogs.copyserviceconfig import CopyServiceConfigDialog -from core.gui.dialogs.dialog import Dialog -from core.gui.images import ImageEnum -from core.gui.themes import FRAME_PAD, PADX, PADY -from core.gui.widgets import CodeText, ListboxScroll - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.coreclient import CoreClient - -ICON_SIZE: int = 16 - - -class ServiceConfigDialog(Dialog): - def __init__( - self, master: tk.BaseWidget, app: "Application", service_name: str, node: Node - ) -> None: - title = f"{service_name} Service (Deprecated)" - super().__init__(app, title, master=master) - self.core: "CoreClient" = app.core - self.node: Node = node - self.service_name: str = service_name - self.radiovar: tk.IntVar = tk.IntVar(value=2) - self.metadata: str = "" - self.filenames: list[str] = [] - self.dependencies: list[str] = [] - self.executables: list[str] = [] - self.startup_commands: list[str] = [] - self.validation_commands: list[str] = [] - self.shutdown_commands: list[str] = [] - self.default_startup: list[str] = [] - self.default_validate: list[str] = [] - self.default_shutdown: list[str] = [] - self.validation_mode: Optional[ServiceValidationMode] = None - self.validation_time: Optional[int] = None - self.validation_period: Optional[float] = None - self.directory_entry: Optional[ttk.Entry] = None - self.default_directories: list[str] = [] - self.temp_directories: list[str] = [] - self.documentnew_img: PhotoImage = self.app.get_enum_icon( - ImageEnum.DOCUMENTNEW, width=ICON_SIZE - ) - self.editdelete_img: PhotoImage = self.app.get_enum_icon( - ImageEnum.EDITDELETE, width=ICON_SIZE - ) - self.notebook: Optional[ttk.Notebook] = None - self.metadata_entry: Optional[ttk.Entry] = None - self.filename_combobox: Optional[ttk.Combobox] = None - self.dir_list: Optional[ListboxScroll] = None - self.startup_commands_listbox: Optional[tk.Listbox] = None - self.shutdown_commands_listbox: Optional[tk.Listbox] = None - self.validate_commands_listbox: Optional[tk.Listbox] = None - self.validation_time_entry: Optional[ttk.Entry] = None - self.validation_mode_entry: Optional[ttk.Entry] = None - self.service_file_data: Optional[CodeText] = None - self.validation_period_entry: Optional[ttk.Entry] = None - self.original_service_files: dict[str, str] = {} - self.default_config: Optional[NodeServiceData] = None - self.temp_service_files: dict[str, str] = {} - self.modified_files: set[str] = set() - self.has_error: bool = False - self.load() - if not self.has_error: - self.draw() - - def load(self) -> None: - try: - self.core.start_session(definition=True) - default_config = self.app.core.get_node_service( - self.node.id, self.service_name - ) - self.default_startup = default_config.startup[:] - self.default_validate = default_config.validate[:] - self.default_shutdown = default_config.shutdown[:] - self.default_directories = default_config.dirs[:] - custom_service_config = self.node.service_configs.get(self.service_name) - self.default_config = default_config - service_config = ( - custom_service_config if custom_service_config else default_config - ) - self.dependencies = service_config.dependencies[:] - self.executables = service_config.executables[:] - self.metadata = service_config.meta - self.filenames = service_config.configs[:] - self.startup_commands = service_config.startup[:] - self.validation_commands = service_config.validate[:] - self.shutdown_commands = service_config.shutdown[:] - self.validation_mode = service_config.validation_mode - self.validation_time = service_config.validation_timer - self.temp_directories = service_config.dirs[:] - self.original_service_files = { - x: self.app.core.get_node_service_file( - self.node.id, self.service_name, x - ) - for x in default_config.configs - } - self.temp_service_files = dict(self.original_service_files) - - file_configs = self.node.service_file_configs.get(self.service_name, {}) - for file, data in file_configs.items(): - self.temp_service_files[file] = data - except grpc.RpcError as e: - self.app.show_grpc_exception("Get Node Service Error", e) - self.has_error = True - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - - # draw metadata - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Meta-data") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - self.metadata_entry = ttk.Entry(frame, textvariable=self.metadata) - self.metadata_entry.grid(row=0, column=1, sticky=tk.EW) - - # draw notebook - self.notebook = ttk.Notebook(self.top) - self.notebook.grid(sticky=tk.NSEW, pady=PADY) - self.draw_tab_files() - self.draw_tab_directories() - self.draw_tab_startstop() - self.draw_tab_configuration() - - self.draw_buttons() - - def draw_tab_files(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Files") - - label = ttk.Label( - tab, text="Config files and scripts that are generated for this service." - ) - label.grid() - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="File Name") - label.grid(row=0, column=0, padx=PADX, sticky=tk.W) - self.filename_combobox = ttk.Combobox(frame, values=self.filenames) - self.filename_combobox.bind( - "<>", self.display_service_file_data - ) - self.filename_combobox.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button( - frame, image=self.documentnew_img, command=self.add_filename - ) - button.grid(row=0, column=2, padx=PADX) - button = ttk.Button( - frame, image=self.editdelete_img, command=self.delete_filename - ) - button.grid(row=0, column=3) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - button = ttk.Radiobutton( - frame, - variable=self.radiovar, - text="Copy Source File", - value=1, - state=tk.DISABLED, - ) - button.grid(row=0, column=0, sticky=tk.W, padx=PADX) - entry = ttk.Entry(frame, state=tk.DISABLED) - entry.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - image = images.from_enum(ImageEnum.FILEOPEN, width=images.BUTTON_SIZE) - button = ttk.Button(frame, image=image) - button.image = image - button.grid(row=0, column=2) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(0, weight=1) - button = ttk.Radiobutton( - frame, - variable=self.radiovar, - text="Use text below for file contents", - value=2, - ) - button.grid(row=0, column=0, sticky=tk.EW) - image = images.from_enum(ImageEnum.FILEOPEN, width=images.BUTTON_SIZE) - button = ttk.Button(frame, image=image) - button.image = image - button.grid(row=0, column=1) - image = images.from_enum(ImageEnum.DOCUMENTSAVE, width=images.BUTTON_SIZE) - button = ttk.Button(frame, image=image) - button.image = image - button.grid(row=0, column=2) - - self.service_file_data = CodeText(tab) - self.service_file_data.grid(sticky=tk.NSEW) - tab.rowconfigure(self.service_file_data.grid_info()["row"], weight=1) - if len(self.filenames) > 0: - self.filename_combobox.current(0) - self.service_file_data.text.delete(1.0, "end") - self.service_file_data.text.insert( - "end", self.temp_service_files[self.filenames[0]] - ) - self.service_file_data.text.bind( - "", self.update_temp_service_file_data - ) - - def draw_tab_directories(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - tab.rowconfigure(2, weight=1) - self.notebook.add(tab, text="Directories") - - label = ttk.Label( - tab, - text="Directories required by this service that are unique for each node.", - ) - label.grid(row=0, column=0, sticky=tk.EW) - frame = ttk.Frame(tab, padding=FRAME_PAD) - frame.columnconfigure(0, weight=1) - frame.grid(row=1, column=0, sticky=tk.NSEW) - var = tk.StringVar(value="") - self.directory_entry = ttk.Entry(frame, textvariable=var) - self.directory_entry.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="...", command=self.find_directory_button) - button.grid(row=0, column=1, sticky=tk.EW) - self.dir_list = ListboxScroll(tab) - self.dir_list.grid(row=2, column=0, sticky=tk.NSEW, pady=PADY) - self.dir_list.listbox.bind("<>", self.directory_select) - for d in self.temp_directories: - self.dir_list.listbox.insert("end", d) - - frame = ttk.Frame(tab) - frame.grid(row=3, column=0, sticky=tk.NSEW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - button = ttk.Button(frame, text="Add", command=self.add_directory) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Remove", command=self.remove_directory) - button.grid(row=0, column=1, sticky=tk.EW) - - def draw_tab_startstop(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - for i in range(3): - tab.rowconfigure(i, weight=1) - self.notebook.add(tab, text="Startup/Shutdown") - commands = [] - # tab 3 - for i in range(3): - label_frame = None - if i == 0: - label_frame = ttk.LabelFrame( - tab, text="Startup Commands", padding=FRAME_PAD - ) - commands = self.startup_commands - elif i == 1: - label_frame = ttk.LabelFrame( - tab, text="Shutdown Commands", padding=FRAME_PAD - ) - commands = self.shutdown_commands - elif i == 2: - label_frame = ttk.LabelFrame( - tab, text="Validation Commands", padding=FRAME_PAD - ) - commands = self.validation_commands - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(1, weight=1) - label_frame.grid(row=i, column=0, sticky=tk.NSEW, pady=PADY) - - frame = ttk.Frame(label_frame) - frame.grid(row=0, column=0, sticky=tk.NSEW, pady=PADY) - frame.columnconfigure(0, weight=1) - entry = ttk.Entry(frame, textvariable=tk.StringVar()) - entry.grid(row=0, column=0, stick="ew", padx=PADX) - button = ttk.Button(frame, image=self.documentnew_img) - button.bind("", self.add_command) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, image=self.editdelete_img) - button.grid(row=0, column=2, sticky=tk.EW) - button.bind("", self.delete_command) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.listbox.bind("<>", self.update_entry) - for command in commands: - listbox_scroll.listbox.insert("end", command) - listbox_scroll.listbox.config(height=4) - listbox_scroll.grid(row=1, column=0, sticky=tk.NSEW) - if i == 0: - self.startup_commands_listbox = listbox_scroll.listbox - elif i == 1: - self.shutdown_commands_listbox = listbox_scroll.listbox - elif i == 2: - self.validate_commands_listbox = listbox_scroll.listbox - - def draw_tab_configuration(self) -> None: - tab = ttk.Frame(self.notebook, padding=FRAME_PAD) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - self.notebook.add(tab, text="Configuration", sticky=tk.NSEW) - - frame = ttk.Frame(tab) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - - label = ttk.Label(frame, text="Validation Time") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - self.validation_time_entry = ttk.Entry(frame) - self.validation_time_entry.insert("end", self.validation_time) - self.validation_time_entry.config(state=tk.DISABLED) - self.validation_time_entry.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - - label = ttk.Label(frame, text="Validation Mode") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - if self.validation_mode == ServiceValidationMode.BLOCKING: - mode = "BLOCKING" - elif self.validation_mode == ServiceValidationMode.NON_BLOCKING: - mode = "NON_BLOCKING" - else: - mode = "TIMER" - self.validation_mode_entry = ttk.Entry( - frame, textvariable=tk.StringVar(value=mode) - ) - self.validation_mode_entry.insert("end", mode) - self.validation_mode_entry.config(state=tk.DISABLED) - self.validation_mode_entry.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - - label = ttk.Label(frame, text="Validation Period") - label.grid(row=2, column=0, sticky=tk.W, padx=PADX) - self.validation_period_entry = ttk.Entry( - frame, state=tk.DISABLED, textvariable=tk.StringVar() - ) - self.validation_period_entry.grid(row=2, column=1, sticky=tk.EW, pady=PADY) - - label_frame = ttk.LabelFrame(tab, text="Executables", padding=FRAME_PAD) - label_frame.grid(sticky=tk.NSEW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.grid(sticky=tk.NSEW) - tab.rowconfigure(listbox_scroll.grid_info()["row"], weight=1) - for executable in self.executables: - listbox_scroll.listbox.insert("end", executable) - - label_frame = ttk.LabelFrame(tab, text="Dependencies", padding=FRAME_PAD) - label_frame.grid(sticky=tk.NSEW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - label_frame.rowconfigure(0, weight=1) - listbox_scroll = ListboxScroll(label_frame) - listbox_scroll.grid(sticky=tk.NSEW) - tab.rowconfigure(listbox_scroll.grid_info()["row"], weight=1) - for dependency in self.dependencies: - listbox_scroll.listbox.insert("end", dependency) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(4): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Defaults", command=self.click_defaults) - button.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Copy...", command=self.click_copy) - button.grid(row=0, column=2, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=3, sticky=tk.EW) - - def add_filename(self) -> None: - filename = self.filename_combobox.get() - if filename not in self.filename_combobox["values"]: - self.filename_combobox["values"] += (filename,) - self.filename_combobox.set(filename) - self.temp_service_files[filename] = self.service_file_data.text.get( - 1.0, "end" - ) - else: - logger.debug("file already existed") - - def delete_filename(self) -> None: - cbb = self.filename_combobox - filename = cbb.get() - if filename in cbb["values"]: - cbb["values"] = tuple([x for x in cbb["values"] if x != filename]) - cbb.set("") - self.service_file_data.text.delete(1.0, "end") - self.temp_service_files.pop(filename, None) - if filename in self.modified_files: - self.modified_files.remove(filename) - - @classmethod - def add_command(cls, event: tk.Event) -> None: - frame_contains_button = event.widget.master - listbox = frame_contains_button.master.grid_slaves(row=1, column=0)[0].listbox - command_to_add = frame_contains_button.grid_slaves(row=0, column=0)[0].get() - if command_to_add == "": - return - for cmd in listbox.get(0, tk.END): - if cmd == command_to_add: - return - listbox.insert(tk.END, command_to_add) - - @classmethod - def update_entry(cls, event: tk.Event) -> None: - listbox = event.widget - current_selection = listbox.curselection() - if len(current_selection) > 0: - cmd = listbox.get(current_selection[0]) - entry = listbox.master.master.grid_slaves(row=0, column=0)[0].grid_slaves( - row=0, column=0 - )[0] - entry.delete(0, "end") - entry.insert(0, cmd) - - @classmethod - def delete_command(cls, event: tk.Event) -> None: - button = event.widget - frame_contains_button = button.master - listbox = frame_contains_button.master.grid_slaves(row=1, column=0)[0].listbox - current_selection = listbox.curselection() - if len(current_selection) > 0: - listbox.delete(current_selection[0]) - entry = frame_contains_button.grid_slaves(row=0, column=0)[0] - entry.delete(0, tk.END) - - def click_apply(self) -> None: - if ( - not self.is_custom_command() - and not self.is_custom_service_file() - and not self.has_new_files() - and not self.is_custom_directory() - ): - self.node.service_configs.pop(self.service_name, None) - self.current_service_color("") - self.destroy() - return - files = set(self.filenames) - if ( - self.is_custom_command() - or self.has_new_files() - or self.is_custom_directory() - ): - startup, validate, shutdown = self.get_commands() - files = set(self.filename_combobox["values"]) - service_data = NodeServiceData( - configs=list(files), - dirs=self.temp_directories, - startup=startup, - validate=validate, - shutdown=shutdown, - ) - logger.info("setting service data: %s", service_data) - self.node.service_configs[self.service_name] = service_data - for file in self.modified_files: - if file not in files: - continue - file_configs = self.node.service_file_configs.setdefault( - self.service_name, {} - ) - file_configs[file] = self.temp_service_files[file] - self.current_service_color("green") - self.destroy() - - def display_service_file_data(self, event: tk.Event) -> None: - filename = self.filename_combobox.get() - self.service_file_data.text.delete(1.0, "end") - self.service_file_data.text.insert("end", self.temp_service_files[filename]) - - def update_temp_service_file_data(self, event: tk.Event) -> None: - filename = self.filename_combobox.get() - self.temp_service_files[filename] = self.service_file_data.text.get(1.0, "end") - if self.temp_service_files[filename] != self.original_service_files.get( - filename, "" - ): - self.modified_files.add(filename) - else: - self.modified_files.discard(filename) - - def is_custom_command(self) -> bool: - startup, validate, shutdown = self.get_commands() - return ( - set(self.default_startup) != set(startup) - or set(self.default_validate) != set(validate) - or set(self.default_shutdown) != set(shutdown) - ) - - def has_new_files(self) -> bool: - return set(self.filenames) != set(self.filename_combobox["values"]) - - def is_custom_service_file(self) -> bool: - return len(self.modified_files) > 0 - - def is_custom_directory(self) -> bool: - return set(self.default_directories) != set(self.dir_list.listbox.get(0, "end")) - - def click_defaults(self) -> None: - """ - clears out any custom configuration permanently - """ - # clear coreclient data - self.node.service_configs.pop(self.service_name, None) - file_configs = self.node.service_file_configs.pop(self.service_name, {}) - file_configs.pop(self.service_name, None) - self.temp_service_files = dict(self.original_service_files) - self.modified_files.clear() - - # reset files tab - files = list(self.default_config.configs[:]) - self.filenames = files - self.filename_combobox.config(values=files) - self.service_file_data.text.delete(1.0, "end") - if len(files) > 0: - filename = files[0] - self.filename_combobox.set(filename) - self.service_file_data.text.insert("end", self.temp_service_files[filename]) - - # reset commands - self.startup_commands_listbox.delete(0, tk.END) - self.validate_commands_listbox.delete(0, tk.END) - self.shutdown_commands_listbox.delete(0, tk.END) - for cmd in self.default_startup: - self.startup_commands_listbox.insert(tk.END, cmd) - for cmd in self.default_validate: - self.validate_commands_listbox.insert(tk.END, cmd) - for cmd in self.default_shutdown: - self.shutdown_commands_listbox.insert(tk.END, cmd) - - # reset directories - self.directory_entry.delete(0, "end") - self.dir_list.listbox.delete(0, "end") - self.temp_directories = list(self.default_directories) - for d in self.default_directories: - self.dir_list.listbox.insert("end", d) - - self.current_service_color("") - - def click_copy(self) -> None: - file_name = self.filename_combobox.get() - dialog = CopyServiceConfigDialog( - self.app, self, self.node.name, self.service_name, file_name - ) - dialog.show() - - @classmethod - def append_commands( - cls, commands: list[str], listbox: tk.Listbox, to_add: list[str] - ) -> None: - for cmd in to_add: - commands.append(cmd) - listbox.insert(tk.END, cmd) - - def get_commands(self) -> tuple[list[str], list[str], list[str]]: - startup = self.startup_commands_listbox.get(0, "end") - shutdown = self.shutdown_commands_listbox.get(0, "end") - validate = self.validate_commands_listbox.get(0, "end") - return startup, validate, shutdown - - def find_directory_button(self) -> None: - d = filedialog.askdirectory(initialdir="/") - self.directory_entry.delete(0, "end") - self.directory_entry.insert("end", d) - - def add_directory(self) -> None: - directory = Path(self.directory_entry.get()) - if directory.is_absolute(): - if str(directory) not in self.temp_directories: - self.dir_list.listbox.insert("end", directory) - self.temp_directories.append(str(directory)) - else: - messagebox.showerror("Add Directory", "Path must be absolute!", parent=self) - - def remove_directory(self) -> None: - d = self.directory_entry.get() - dirs = self.dir_list.listbox.get(0, "end") - if d and d in self.temp_directories: - self.temp_directories.remove(d) - try: - i = dirs.index(d) - self.dir_list.listbox.delete(i) - except ValueError: - logger.debug("directory is not in the list") - self.directory_entry.delete(0, "end") - - def directory_select(self, event) -> None: - i = self.dir_list.listbox.curselection() - if i: - d = self.dir_list.listbox.get(i) - self.directory_entry.delete(0, "end") - self.directory_entry.insert("end", d) - - def current_service_color(self, color="") -> None: - """ - change the current service label color - """ - listbox = self.master.current.listbox - services = listbox.get(0, tk.END) - listbox.itemconfig(services.index(self.service_name), bg=color) diff --git a/daemon/core/gui/dialogs/sessionoptions.py b/daemon/core/gui/dialogs/sessionoptions.py deleted file mode 100644 index 28d780dc..00000000 --- a/daemon/core/gui/dialogs/sessionoptions.py +++ /dev/null @@ -1,47 +0,0 @@ -import logging -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import ConfigFrame - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class SessionOptionsDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Session Options") - self.config_frame: Optional[ConfigFrame] = None - self.has_error: bool = False - self.enabled: bool = not self.app.core.is_runtime() - if not self.has_error: - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - options = self.app.core.session.options - self.config_frame = ConfigFrame(self.top, self.app, options, self.enabled) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - state = tk.NORMAL if self.enabled else tk.DISABLED - button = ttk.Button(frame, text="Save", command=self.save, state=state) - button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def save(self) -> None: - config = self.config_frame.parse_config() - for key, value in config.items(): - self.app.core.session.options[key].value = value - self.destroy() diff --git a/daemon/core/gui/dialogs/sessions.py b/daemon/core/gui/dialogs/sessions.py deleted file mode 100644 index 3ca4fa63..00000000 --- a/daemon/core/gui/dialogs/sessions.py +++ /dev/null @@ -1,229 +0,0 @@ -import logging -import tkinter as tk -from tkinter import messagebox, ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import SessionState, SessionSummary -from core.gui import images -from core.gui.dialogs.dialog import Dialog -from core.gui.images import ImageEnum -from core.gui.task import ProgressTask -from core.gui.themes import PADX, PADY - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class SessionsDialog(Dialog): - def __init__(self, app: "Application", is_start_app: bool = False) -> None: - super().__init__(app, "Sessions") - self.is_start_app: bool = is_start_app - self.selected_session: Optional[int] = None - self.selected_id: Optional[int] = None - self.tree: Optional[ttk.Treeview] = None - self.connect_button: Optional[ttk.Button] = None - self.delete_button: Optional[ttk.Button] = None - self.protocol("WM_DELETE_WINDOW", self.on_closing) - self.draw() - - def get_sessions(self) -> list[SessionSummary]: - try: - sessions = self.app.core.client.get_sessions() - logger.info("sessions: %s", sessions) - return sorted(sessions, key=lambda x: x.id) - except grpc.RpcError as e: - self.app.show_grpc_exception("Get Sessions Error", e) - self.destroy() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(1, weight=1) - self.draw_description() - self.draw_tree() - self.draw_buttons() - - def draw_description(self) -> None: - """ - write a short description - """ - label = ttk.Label( - self.top, - text="Below is a list of active CORE sessions. Double-click to \n" - "connect to an existing session. Usually, only sessions in \n" - "the RUNTIME state persist in the daemon, except for the \n" - "one you might be concurrently editting.", - justify=tk.CENTER, - ) - label.grid(pady=PADY) - - def draw_tree(self) -> None: - frame = ttk.Frame(self.top) - frame.columnconfigure(0, weight=1) - frame.rowconfigure(0, weight=1) - frame.grid(sticky=tk.NSEW, pady=PADY) - self.tree = ttk.Treeview( - frame, - columns=("id", "state", "nodes"), - show="headings", - selectmode=tk.BROWSE, - ) - style = ttk.Style() - heading_size = int(self.app.guiconfig.scale * 10) - style.configure("Treeview.Heading", font=(None, heading_size, "bold")) - self.tree.grid(sticky=tk.NSEW) - self.tree.column("id", stretch=tk.YES, anchor="center") - self.tree.heading("id", text="ID") - self.tree.column("state", stretch=tk.YES, anchor="center") - self.tree.heading("state", text="State") - self.tree.column("nodes", stretch=tk.YES, anchor="center") - self.tree.heading("nodes", text="Node Count") - self.draw_sessions() - self.tree.bind("", self.double_click_join) - self.tree.bind("<>", self.click_select) - - yscrollbar = ttk.Scrollbar(frame, orient="vertical", command=self.tree.yview) - yscrollbar.grid(row=0, column=1, sticky=tk.NS) - self.tree.configure(yscrollcommand=yscrollbar.set) - - xscrollbar = ttk.Scrollbar(frame, orient="horizontal", command=self.tree.xview) - xscrollbar.grid(row=1, sticky=tk.EW) - self.tree.configure(xscrollcommand=xscrollbar.set) - - def draw_sessions(self) -> None: - self.tree.delete(*self.tree.get_children()) - for index, session in enumerate(self.get_sessions()): - state_name = SessionState(session.state).name - self.tree.insert( - "", - tk.END, - text=str(session.id), - values=(session.id, state_name, session.nodes), - ) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - for i in range(4): - frame.columnconfigure(i, weight=1) - frame.grid(sticky=tk.EW) - - image = images.from_enum(ImageEnum.DOCUMENTNEW, width=images.BUTTON_SIZE) - b = ttk.Button( - frame, image=image, text="New", compound=tk.LEFT, command=self.click_new - ) - b.image = image - b.grid(row=0, padx=PADX, sticky=tk.EW) - - image = images.from_enum(ImageEnum.FILEOPEN, width=images.BUTTON_SIZE) - self.connect_button = ttk.Button( - frame, - image=image, - text="Connect", - compound=tk.LEFT, - command=self.click_connect, - state=tk.DISABLED, - ) - self.connect_button.image = image - self.connect_button.grid(row=0, column=1, padx=PADX, sticky=tk.EW) - - image = images.from_enum(ImageEnum.DELETE, width=images.BUTTON_SIZE) - self.delete_button = ttk.Button( - frame, - image=image, - text="Delete", - compound=tk.LEFT, - command=self.click_delete, - state=tk.DISABLED, - ) - self.delete_button.image = image - self.delete_button.grid(row=0, column=2, padx=PADX, sticky=tk.EW) - - image = images.from_enum(ImageEnum.CANCEL, width=images.BUTTON_SIZE) - if self.is_start_app: - b = ttk.Button( - frame, - image=image, - text="Exit", - compound=tk.LEFT, - command=self.click_exit, - ) - else: - b = ttk.Button( - frame, - image=image, - text="Cancel", - compound=tk.LEFT, - command=self.destroy, - ) - b.image = image - b.grid(row=0, column=3, sticky=tk.EW) - - def click_new(self) -> None: - self.app.core.create_new_session() - self.destroy() - - def click_select(self, _event: tk.Event = None) -> None: - item = self.tree.selection() - if item: - self.selected_session = int(self.tree.item(item, "text")) - self.selected_id = item - self.delete_button.config(state=tk.NORMAL) - self.connect_button.config(state=tk.NORMAL) - else: - self.selected_session = None - self.selected_id = None - self.delete_button.config(state=tk.DISABLED) - self.connect_button.config(state=tk.DISABLED) - logger.debug("selected session: %s", self.selected_session) - - def click_connect(self) -> None: - if not self.selected_session: - return - self.join_session(self.selected_session) - - def join_session(self, session_id: int) -> None: - self.destroy() - task = ProgressTask( - self.app, "Join", self.app.core.join_session, args=(session_id,) - ) - task.start() - - def double_click_join(self, _event: tk.Event) -> None: - item = self.tree.selection() - if not item: - return - session_id = int(self.tree.item(item, "text")) - self.join_session(session_id) - - def click_delete(self) -> None: - if not self.selected_session: - return - logger.info("click delete session: %s", self.selected_session) - self.tree.delete(self.selected_id) - self.app.core.delete_session(self.selected_session) - session_id = None - if self.app.core.session: - session_id = self.app.core.session.id - if self.selected_session == session_id: - self.app.core.session = None - sessions = self.get_sessions() - if not sessions: - self.app.core.create_new_session() - self.draw_sessions() - else: - session_id = sessions[0].id - self.app.core.join_session(session_id) - self.click_select() - - def click_exit(self) -> None: - self.destroy() - self.app.close() - - def on_closing(self) -> None: - if self.is_start_app and messagebox.askokcancel("Exit", "Quit?", parent=self): - self.click_exit() - if not self.is_start_app: - self.destroy() diff --git a/daemon/core/gui/dialogs/shapemod.py b/daemon/core/gui/dialogs/shapemod.py deleted file mode 100644 index db19ff1a..00000000 --- a/daemon/core/gui/dialogs/shapemod.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -shape input dialog -""" -import tkinter as tk -from tkinter import font, ttk -from typing import TYPE_CHECKING, Optional, Union - -from core.gui.dialogs.colorpicker import ColorPickerDialog -from core.gui.dialogs.dialog import Dialog -from core.gui.graph import tags -from core.gui.graph.shapeutils import is_draw_shape, is_shape_text -from core.gui.themes import FRAME_PAD, PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.graph import CanvasGraph - from core.gui.graph.shape import Shape - -FONT_SIZES: list[int] = [8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, 26, 28, 36, 48, 72] -BORDER_WIDTH: list[int] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] - - -class ShapeDialog(Dialog): - def __init__(self, app: "Application", shape: "Shape") -> None: - if is_draw_shape(shape.shape_type): - title = "Add Shape" - else: - title = "Add Text" - super().__init__(app, title) - self.canvas: "CanvasGraph" = app.manager.current() - self.fill: Optional[ttk.Label] = None - self.border: Optional[ttk.Label] = None - self.shape: "Shape" = shape - data = shape.shape_data - self.shape_text: tk.StringVar = tk.StringVar(value=data.text) - self.font: tk.StringVar = tk.StringVar(value=data.font) - self.font_size: tk.IntVar = tk.IntVar(value=data.font_size) - self.text_color: str = data.text_color - fill_color = data.fill_color - if not fill_color: - fill_color = "#CFCFFF" - self.fill_color: str = fill_color - self.border_color: str = data.border_color - self.border_width: tk.IntVar = tk.IntVar(value=0) - self.bold: tk.BooleanVar = tk.BooleanVar(value=data.bold) - self.italic: tk.BooleanVar = tk.BooleanVar(value=data.italic) - self.underline: tk.BooleanVar = tk.BooleanVar(value=data.underline) - self.draw() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.draw_label_options() - if is_draw_shape(self.shape.shape_type): - self.draw_shape_options() - self.draw_spacer() - self.draw_buttons() - - def draw_label_options(self) -> None: - label_frame = ttk.LabelFrame(self.top, text="Label", padding=FRAME_PAD) - label_frame.grid(sticky=tk.EW) - label_frame.columnconfigure(0, weight=1) - - entry = ttk.Entry(label_frame, textvariable=self.shape_text) - entry.grid(sticky=tk.EW, pady=PADY) - - # font options - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.NSEW, pady=PADY) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - frame.columnconfigure(2, weight=1) - combobox = ttk.Combobox( - frame, - textvariable=self.font, - values=sorted(font.families()), - state="readonly", - ) - combobox.grid(row=0, column=0, sticky=tk.NSEW) - combobox = ttk.Combobox( - frame, textvariable=self.font_size, values=FONT_SIZES, state="readonly" - ) - combobox.grid(row=0, column=1, padx=PADX, sticky=tk.NSEW) - button = ttk.Button(frame, text="Color", command=self.choose_text_color) - button.grid(row=0, column=2, sticky=tk.NSEW) - - # style options - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW) - for i in range(3): - frame.columnconfigure(i, weight=1) - button = ttk.Checkbutton(frame, variable=self.bold, text="Bold") - button.grid(row=0, column=0, sticky=tk.EW) - button = ttk.Checkbutton(frame, variable=self.italic, text="Italic") - button.grid(row=0, column=1, padx=PADX, sticky=tk.EW) - button = ttk.Checkbutton(frame, variable=self.underline, text="Underline") - button.grid(row=0, column=2, sticky=tk.EW) - - def draw_shape_options(self) -> None: - label_frame = ttk.LabelFrame(self.top, text="Shape", padding=FRAME_PAD) - label_frame.grid(sticky=tk.EW, pady=PADY) - label_frame.columnconfigure(0, weight=1) - - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW) - for i in range(1, 3): - frame.columnconfigure(i, weight=1) - label = ttk.Label(frame, text="Fill Color") - label.grid(row=0, column=0, padx=PADX, sticky=tk.W) - self.fill = ttk.Label(frame, text=self.fill_color, background=self.fill_color) - self.fill.grid(row=0, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Color", command=self.choose_fill_color) - button.grid(row=0, column=2, sticky=tk.EW) - - label = ttk.Label(frame, text="Border Color") - label.grid(row=1, column=0, sticky=tk.W, padx=PADX) - self.border = ttk.Label( - frame, text=self.border_color, background=self.border_color - ) - self.border.grid(row=1, column=1, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Color", command=self.choose_border_color) - button.grid(row=1, column=2, sticky=tk.EW) - - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW, pady=PADY) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Border Width") - label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - combobox = ttk.Combobox( - frame, textvariable=self.border_width, values=BORDER_WIDTH, state="readonly" - ) - combobox.grid(row=0, column=1, sticky=tk.NSEW) - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.NSEW) - frame.columnconfigure(0, weight=1) - frame.columnconfigure(1, weight=1) - button = ttk.Button(frame, text="Add shape", command=self.click_add) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.cancel) - button.grid(row=0, column=1, sticky=tk.EW) - - def choose_text_color(self) -> None: - color_picker = ColorPickerDialog(self, self.app, self.text_color) - self.text_color = color_picker.askcolor() - - def choose_fill_color(self) -> None: - color_picker = ColorPickerDialog(self, self.app, self.fill_color) - color = color_picker.askcolor() - self.fill_color = color - self.fill.config(background=color, text=color) - - def choose_border_color(self) -> None: - color_picker = ColorPickerDialog(self, self.app, self.border_color) - color = color_picker.askcolor() - self.border_color = color - self.border.config(background=color, text=color) - - def cancel(self) -> None: - self.shape.delete() - self.canvas.shapes.pop(self.shape.id) - self.destroy() - - def click_add(self) -> None: - if is_draw_shape(self.shape.shape_type): - self.add_shape() - elif is_shape_text(self.shape.shape_type): - self.add_text() - self.destroy() - - def make_font(self) -> list[Union[int, str]]: - """ - create font for text or shape label - """ - size = int(self.font_size.get()) - text_font = [self.font.get(), size] - if self.bold.get(): - text_font.append("bold") - if self.italic.get(): - text_font.append("italic") - if self.underline.get(): - text_font.append("underline") - return text_font - - def save_text(self) -> None: - """ - save info related to text or shape label - """ - data = self.shape.shape_data - data.text = self.shape_text.get() - data.font = self.font.get() - data.font_size = int(self.font_size.get()) - data.text_color = self.text_color - data.bold = self.bold.get() - data.italic = self.italic.get() - data.underline = self.underline.get() - - def save_shape(self) -> None: - """ - save info related to shape - """ - data = self.shape.shape_data - data.fill_color = self.fill_color - data.border_color = self.border_color - data.border_width = int(self.border_width.get()) - - def add_text(self) -> None: - """ - add text to canvas - """ - text = self.shape_text.get() - text_font = self.make_font() - self.canvas.itemconfig( - self.shape.id, text=text, fill=self.text_color, font=text_font - ) - self.save_text() - - def add_shape(self) -> None: - self.canvas.itemconfig( - self.shape.id, - fill=self.fill_color, - dash="", - outline=self.border_color, - width=int(self.border_width.get()), - ) - shape_text = self.shape_text.get() - size = int(self.font_size.get()) - x0, y0, x1, y1 = self.canvas.bbox(self.shape.id) - _y = y0 + 1.5 * size - _x = (x0 + x1) / 2 - text_font = self.make_font() - if self.shape.text_id is None: - self.shape.text_id = self.canvas.create_text( - _x, - _y, - text=shape_text, - fill=self.text_color, - font=text_font, - tags=(tags.SHAPE_TEXT, tags.ANNOTATION), - ) - self.shape.created = True - else: - self.canvas.itemconfig( - self.shape.text_id, - text=shape_text, - fill=self.text_color, - font=text_font, - ) - self.save_text() - self.save_shape() diff --git a/daemon/core/gui/dialogs/throughput.py b/daemon/core/gui/dialogs/throughput.py deleted file mode 100644 index 493d4da4..00000000 --- a/daemon/core/gui/dialogs/throughput.py +++ /dev/null @@ -1,112 +0,0 @@ -""" -throughput dialog -""" -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.dialogs.colorpicker import ColorPickerDialog -from core.gui.dialogs.dialog import Dialog -from core.gui.graph.manager import CanvasManager -from core.gui.themes import FRAME_PAD, PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - - -class ThroughputDialog(Dialog): - def __init__(self, app: "Application") -> None: - super().__init__(app, "Throughput Config") - self.manager: CanvasManager = app.manager - self.show_throughput: tk.IntVar = tk.IntVar(value=1) - self.exponential_weight: tk.IntVar = tk.IntVar(value=1) - self.transmission: tk.IntVar = tk.IntVar(value=1) - self.reception: tk.IntVar = tk.IntVar(value=1) - self.threshold: tk.DoubleVar = tk.DoubleVar( - value=self.manager.throughput_threshold - ) - self.width: tk.IntVar = tk.IntVar(value=self.manager.throughput_width) - self.color: str = self.manager.throughput_color - self.color_button: Optional[tk.Button] = None - self.top.columnconfigure(0, weight=1) - self.draw() - - def draw(self) -> None: - button = ttk.Checkbutton( - self.top, - variable=self.show_throughput, - text="Show Throughput Level On Every Link", - ) - button.grid(sticky=tk.EW) - button = ttk.Checkbutton( - self.top, - variable=self.exponential_weight, - text="Use Exponential Weighted Moving Average", - ) - button.grid(sticky=tk.EW) - button = ttk.Checkbutton( - self.top, variable=self.transmission, text="Include Transmissions" - ) - button.grid(sticky=tk.EW) - button = ttk.Checkbutton( - self.top, variable=self.reception, text="Include Receptions" - ) - button.grid(sticky=tk.EW) - - label_frame = ttk.LabelFrame(self.top, text="Link Highlight", padding=FRAME_PAD) - label_frame.columnconfigure(0, weight=1) - label_frame.grid(sticky=tk.EW) - - scale = ttk.Scale( - label_frame, - from_=0, - to=1000, - value=0, - orient=tk.HORIZONTAL, - variable=self.threshold, - ) - scale.grid(sticky=tk.EW, pady=PADY) - - frame = ttk.Frame(label_frame) - frame.grid(sticky=tk.EW) - frame.columnconfigure(1, weight=1) - label = ttk.Label(frame, text="Threshold Kbps (0 disabled)") - label.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.threshold) - entry.grid(row=0, column=1, sticky=tk.EW, pady=PADY) - label = ttk.Label(frame, text="Width") - label.grid(row=1, column=0, sticky=tk.EW, padx=PADX) - entry = ttk.Entry(frame, textvariable=self.width) - entry.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - label = ttk.Label(frame, text="Color") - label.grid(row=2, column=0, sticky=tk.EW, padx=PADX) - self.color_button = tk.Button( - frame, - text=self.color, - command=self.click_color, - bg=self.color, - highlightthickness=0, - ) - self.color_button.grid(row=2, column=1, sticky=tk.EW) - - self.draw_spacer() - - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Save", command=self.click_save) - button.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_color(self) -> None: - color_picker = ColorPickerDialog(self, self.app, self.color) - self.color = color_picker.askcolor() - self.color_button.config(bg=self.color, text=self.color, bd=0) - - def click_save(self) -> None: - self.manager.throughput_threshold = self.threshold.get() - self.manager.throughput_width = self.width.get() - self.manager.throughput_color = self.color - self.destroy() diff --git a/daemon/core/gui/dialogs/wirelessconfig.py b/daemon/core/gui/dialogs/wirelessconfig.py deleted file mode 100644 index b04fbd2c..00000000 --- a/daemon/core/gui/dialogs/wirelessconfig.py +++ /dev/null @@ -1,55 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import ConfigOption, Node -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import ConfigFrame - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.node import CanvasNode - - -class WirelessConfigDialog(Dialog): - def __init__(self, app: "Application", canvas_node: "CanvasNode"): - super().__init__(app, f"Wireless Configuration - {canvas_node.core_node.name}") - self.node: Node = canvas_node.core_node - self.config_frame: Optional[ConfigFrame] = None - self.config: dict[str, ConfigOption] = {} - try: - config = self.node.wireless_config - if not config: - config = self.app.core.get_wireless_config(self.node.id) - self.config: dict[str, ConfigOption] = config - self.draw() - except grpc.RpcError as e: - self.app.show_grpc_exception("Wireless Config Error", e) - self.has_error: bool = True - self.destroy() - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.config_frame = ConfigFrame(self.top, self.app, self.config) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - self.draw_buttons() - - def draw_buttons(self) -> None: - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_apply(self) -> None: - self.config_frame.parse_config() - self.node.wireless_config = self.config - self.destroy() diff --git a/daemon/core/gui/dialogs/wlanconfig.py b/daemon/core/gui/dialogs/wlanconfig.py deleted file mode 100644 index c382d3c8..00000000 --- a/daemon/core/gui/dialogs/wlanconfig.py +++ /dev/null @@ -1,122 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -import grpc - -from core.api.grpc.wrappers import ConfigOption, Node -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import PADX, PADY -from core.gui.widgets import ConfigFrame - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.node import CanvasNode - from core.gui.graph.graph import CanvasGraph - -RANGE_COLOR: str = "#009933" -RANGE_WIDTH: int = 3 - - -class WlanConfigDialog(Dialog): - def __init__(self, app: "Application", canvas_node: "CanvasNode") -> None: - super().__init__(app, f"{canvas_node.core_node.name} WLAN Configuration") - self.canvas: "CanvasGraph" = app.manager.current() - self.canvas_node: "CanvasNode" = canvas_node - self.node: Node = canvas_node.core_node - self.config_frame: Optional[ConfigFrame] = None - self.range_entry: Optional[ttk.Entry] = None - self.has_error: bool = False - self.ranges: dict[int, int] = {} - self.positive_int: int = self.app.master.register(self.validate_and_update) - try: - config = self.node.wlan_config - if not config: - config = self.app.core.get_wlan_config(self.node.id) - self.config: dict[str, ConfigOption] = config - self.init_draw_range() - self.draw() - except grpc.RpcError as e: - self.app.show_grpc_exception("WLAN Config Error", e) - self.has_error: bool = True - self.destroy() - - def init_draw_range(self) -> None: - if self.canvas_node.id in self.canvas.wireless_network: - for cid in self.canvas.wireless_network[self.canvas_node.id]: - x, y = self.canvas.coords(cid) - range_id = self.canvas.create_oval( - x, y, x, y, width=RANGE_WIDTH, outline=RANGE_COLOR, tags="range" - ) - self.ranges[cid] = range_id - - def draw(self) -> None: - self.top.columnconfigure(0, weight=1) - self.top.rowconfigure(0, weight=1) - self.config_frame = ConfigFrame(self.top, self.app, self.config) - self.config_frame.draw_config() - self.config_frame.grid(sticky=tk.NSEW, pady=PADY) - self.draw_apply_buttons() - self.top.bind("", self.remove_ranges) - - def draw_apply_buttons(self) -> None: - """ - create node configuration options - """ - frame = ttk.Frame(self.top) - frame.grid(sticky=tk.EW) - for i in range(2): - frame.columnconfigure(i, weight=1) - - self.range_entry = self.config_frame.winfo_children()[0].frame.winfo_children()[ - -1 - ] - self.range_entry.config(validatecommand=(self.positive_int, "%P")) - - button = ttk.Button(frame, text="Apply", command=self.click_apply) - button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) - - button = ttk.Button(frame, text="Cancel", command=self.destroy) - button.grid(row=0, column=1, sticky=tk.EW) - - def click_apply(self) -> None: - """ - retrieve user's wlan configuration and store the new configuration values - """ - config = self.config_frame.parse_config() - self.node.wlan_config = self.config - if self.app.core.is_runtime(): - session_id = self.app.core.session.id - self.app.core.client.set_wlan_config(session_id, self.node.id, config) - self.remove_ranges() - self.destroy() - - def remove_ranges(self, event=None) -> None: - for cid in self.canvas.find_withtag("range"): - self.canvas.delete(cid) - self.ranges.clear() - - def validate_and_update(self, s: str) -> bool: - """ - custom validation to also redraw the mdr ranges when the range value changes - """ - if len(s) == 0: - return True - try: - int_value = int(s) / 2 - if int_value >= 0: - net_range = int_value * self.canvas.ratio - if self.canvas_node.id in self.canvas.wireless_network: - for cid in self.canvas.wireless_network[self.canvas_node.id]: - x, y = self.canvas.coords(cid) - self.canvas.coords( - self.ranges[cid], - x - net_range, - y - net_range, - x + net_range, - y + net_range, - ) - return True - return False - except ValueError: - return False diff --git a/daemon/core/gui/frames/__init__.py b/daemon/core/gui/frames/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/gui/frames/base.py b/daemon/core/gui/frames/base.py deleted file mode 100644 index 8db952f1..00000000 --- a/daemon/core/gui/frames/base.py +++ /dev/null @@ -1,36 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING - -from core.gui.themes import FRAME_PAD, PADX, PADY - -if TYPE_CHECKING: - from core.gui.app import Application - - -class InfoFrameBase(ttk.Frame): - def __init__(self, master: tk.BaseWidget, app: "Application") -> None: - super().__init__(master, padding=FRAME_PAD) - self.app: "Application" = app - - def draw(self) -> None: - raise NotImplementedError - - -class DetailsFrame(ttk.Frame): - def __init__(self, master: tk.BaseWidget) -> None: - super().__init__(master) - self.columnconfigure(1, weight=1) - self.row = 0 - - def add_detail(self, label: str, value: str) -> None: - label = ttk.Label(self, text=label, anchor=tk.W) - label.grid(row=self.row, sticky=tk.EW, column=0, padx=PADX) - label = ttk.Label(self, text=value, anchor=tk.W, state=tk.DISABLED) - label.grid(row=self.row, sticky=tk.EW, column=1) - self.row += 1 - - def add_separator(self) -> None: - separator = ttk.Separator(self) - separator.grid(row=self.row, sticky=tk.EW, columnspan=2, pady=PADY) - self.row += 1 diff --git a/daemon/core/gui/frames/default.py b/daemon/core/gui/frames/default.py deleted file mode 100644 index e84edb87..00000000 --- a/daemon/core/gui/frames/default.py +++ /dev/null @@ -1,19 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING - -from core.gui.frames.base import InfoFrameBase - -if TYPE_CHECKING: - from core.gui.app import Application - - -class DefaultInfoFrame(InfoFrameBase): - def __init__(self, master: tk.BaseWidget, app: "Application") -> None: - super().__init__(master, app) - - def draw(self) -> None: - label = ttk.Label(self, text="Click a Node/Link", anchor=tk.CENTER) - label.grid(sticky=tk.EW) - label = ttk.Label(self, text="to see details", anchor=tk.CENTER) - label.grid(sticky=tk.EW) diff --git a/daemon/core/gui/frames/link.py b/daemon/core/gui/frames/link.py deleted file mode 100644 index bde0aec8..00000000 --- a/daemon/core/gui/frames/link.py +++ /dev/null @@ -1,109 +0,0 @@ -import tkinter as tk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import Interface -from core.gui.frames.base import DetailsFrame, InfoFrameBase -from core.gui.utils import bandwidth_text - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.edges import CanvasEdge - from core.gui.graph.node import CanvasNode - from core.gui.graph.edges import CanvasWirelessEdge - - -def get_iface(canvas_node: "CanvasNode", net_id: int) -> Optional[Interface]: - iface = None - for edge in canvas_node.edges: - link = edge.link - if link.node1_id == net_id: - iface = link.iface2 - elif link.node2_id == net_id: - iface = link.iface1 - return iface - - -class EdgeInfoFrame(InfoFrameBase): - def __init__( - self, master: tk.BaseWidget, app: "Application", edge: "CanvasEdge" - ) -> None: - super().__init__(master, app) - self.edge: "CanvasEdge" = edge - - def draw(self) -> None: - self.columnconfigure(0, weight=1) - link = self.edge.link - options = link.options - src_node = self.app.core.session.nodes[link.node1_id] - dst_node = self.app.core.session.nodes[link.node2_id] - - frame = DetailsFrame(self) - frame.grid(sticky=tk.EW) - frame.add_detail("Source", src_node.name) - iface1 = link.iface1 - if iface1: - mac = iface1.mac if iface1.mac else "auto" - frame.add_detail("MAC", mac) - ip4 = f"{iface1.ip4}/{iface1.ip4_mask}" if iface1.ip4 else "" - frame.add_detail("IP4", ip4) - ip6 = f"{iface1.ip6}/{iface1.ip6_mask}" if iface1.ip6 else "" - frame.add_detail("IP6", ip6) - - frame.add_separator() - frame.add_detail("Destination", dst_node.name) - iface2 = link.iface2 - if iface2: - mac = iface2.mac if iface2.mac else "auto" - frame.add_detail("MAC", mac) - ip4 = f"{iface2.ip4}/{iface2.ip4_mask}" if iface2.ip4 else "" - frame.add_detail("IP4", ip4) - ip6 = f"{iface2.ip6}/{iface2.ip6_mask}" if iface2.ip6 else "" - frame.add_detail("IP6", ip6) - - if link.options: - frame.add_separator() - bandwidth = bandwidth_text(options.bandwidth) - frame.add_detail("Bandwidth", bandwidth) - frame.add_detail("Delay", f"{options.delay} us") - frame.add_detail("Jitter", f"\u00B1{options.jitter} us") - frame.add_detail("Loss", f"{options.loss}%") - frame.add_detail("Duplicate", f"{options.dup}%") - - -class WirelessEdgeInfoFrame(InfoFrameBase): - def __init__( - self, master: tk.BaseWidget, app: "Application", edge: "CanvasWirelessEdge" - ) -> None: - super().__init__(master, app) - self.edge: "CanvasWirelessEdge" = edge - - def draw(self) -> None: - link = self.edge.link - src_node = self.edge.src.core_node - dst_node = self.edge.dst.core_node - - # find interface for each node connected to network - net_id = link.network_id - iface1 = get_iface(self.edge.src, net_id) - iface2 = get_iface(self.edge.dst, net_id) - - frame = DetailsFrame(self) - frame.grid(sticky=tk.EW) - frame.add_detail("Source", src_node.name) - if iface1: - mac = iface1.mac if iface1.mac else "auto" - frame.add_detail("MAC", mac) - ip4 = f"{iface1.ip4}/{iface1.ip4_mask}" if iface1.ip4 else "" - frame.add_detail("IP4", ip4) - ip6 = f"{iface1.ip6}/{iface1.ip6_mask}" if iface1.ip6 else "" - frame.add_detail("IP6", ip6) - - frame.add_separator() - frame.add_detail("Destination", dst_node.name) - if iface2: - mac = iface2.mac if iface2.mac else "auto" - frame.add_detail("MAC", mac) - ip4 = f"{iface2.ip4}/{iface2.ip4_mask}" if iface2.ip4 else "" - frame.add_detail("IP4", ip4) - ip6 = f"{iface2.ip6}/{iface2.ip6_mask}" if iface2.ip6 else "" - frame.add_detail("IP6", ip6) diff --git a/daemon/core/gui/frames/node.py b/daemon/core/gui/frames/node.py deleted file mode 100644 index afd03735..00000000 --- a/daemon/core/gui/frames/node.py +++ /dev/null @@ -1,40 +0,0 @@ -import tkinter as tk -from typing import TYPE_CHECKING - -from core.api.grpc.wrappers import NodeType -from core.gui import nodeutils as nutils -from core.gui.frames.base import DetailsFrame, InfoFrameBase - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.node import CanvasNode - - -class NodeInfoFrame(InfoFrameBase): - def __init__(self, master, app: "Application", canvas_node: "CanvasNode") -> None: - super().__init__(master, app) - self.canvas_node: "CanvasNode" = canvas_node - - def draw(self) -> None: - self.columnconfigure(0, weight=1) - node = self.canvas_node.core_node - frame = DetailsFrame(self) - frame.grid(sticky=tk.EW) - frame.add_detail("ID", str(node.id)) - frame.add_detail("Name", node.name) - if nutils.is_model(node): - frame.add_detail("Type", node.model) - if nutils.is_container(node): - for index, service in enumerate(sorted(node.services)): - if index == 0: - frame.add_detail("Services", service) - else: - frame.add_detail("", service) - if node.type == NodeType.EMANE: - emane = "".join(node.emane.split("_")[1:]) - frame.add_detail("EMANE", emane) - if nutils.has_image(node.type): - frame.add_detail("Image", node.image) - if nutils.is_container(node): - server = node.server if node.server else "localhost" - frame.add_detail("Server", server) diff --git a/daemon/core/gui/graph/__init__.py b/daemon/core/gui/graph/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/gui/graph/edges.py b/daemon/core/gui/graph/edges.py deleted file mode 100644 index e5a4c97b..00000000 --- a/daemon/core/gui/graph/edges.py +++ /dev/null @@ -1,764 +0,0 @@ -import functools -import logging -import math -import tkinter as tk -from typing import TYPE_CHECKING, Optional, Union - -from core.api.grpc.wrappers import Interface, Link -from core.gui import nodeutils, themes -from core.gui.dialogs.linkconfig import LinkConfigurationDialog -from core.gui.frames.link import EdgeInfoFrame, WirelessEdgeInfoFrame -from core.gui.graph import tags -from core.gui.utils import bandwidth_text, delay_jitter_text - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.graph import CanvasGraph - from core.gui.graph.manager import CanvasManager - from core.gui.graph.node import CanvasNode, ShadowNode - -TEXT_DISTANCE: int = 60 -EDGE_WIDTH: int = 3 -EDGE_COLOR: str = "#ff0000" -EDGE_LOSS: float = 100.0 -WIRELESS_WIDTH: float = 3 -WIRELESS_COLOR: str = "#009933" -ARC_DISTANCE: int = 50 - - -def create_wireless_token(src: int, dst: int, network: int) -> str: - if src < dst: - node1, node2 = src, dst - else: - node1, node2 = dst, src - return f"{node1}-{node2}-{network}" - - -def create_edge_token(link: Link) -> str: - iface1_id = link.iface1.id if link.iface1 else 0 - iface2_id = link.iface2.id if link.iface2 else 0 - if link.node1_id < link.node2_id: - node1 = link.node1_id - node1_iface = iface1_id - node2 = link.node2_id - node2_iface = iface2_id - else: - node1 = link.node2_id - node1_iface = iface2_id - node2 = link.node1_id - node2_iface = iface1_id - return f"{node1}-{node1_iface}-{node2}-{node2_iface}" - - -def node_label_positions( - src_x: int, src_y: int, dst_x: int, dst_y: int -) -> tuple[tuple[float, float], tuple[float, float]]: - v_x, v_y = dst_x - src_x, dst_y - src_y - v_len = math.sqrt(v_x**2 + v_y**2) - if v_len == 0: - u_x, u_y = 0.0, 0.0 - else: - u_x, u_y = v_x / v_len, v_y / v_len - offset_x, offset_y = TEXT_DISTANCE * u_x, TEXT_DISTANCE * u_y - return (src_x + offset_x, src_y + offset_y), (dst_x - offset_x, dst_y - offset_y) - - -def arc_edges(edges) -> None: - if not edges: - return - mid_index = len(edges) // 2 - if mid_index == 0: - arc_step = ARC_DISTANCE - else: - arc_step = ARC_DISTANCE / mid_index - # below edges - arc = 0 - for edge in edges[:mid_index]: - arc -= arc_step - edge.arc = arc - edge.redraw() - # mid edge - if len(edges) % 2 != 0: - arc = 0 - edge = edges[mid_index] - edge.arc = arc - edge.redraw() - mid_index += 1 - # above edges - arc = 0 - for edge in edges[mid_index:]: - arc += arc_step - edge.arc = arc - edge.redraw() - - -class Edge: - tag: str = tags.EDGE - - def __init__( - self, app: "Application", src: "CanvasNode", dst: "CanvasNode" = None - ) -> None: - self.app: "Application" = app - self.manager: CanvasManager = app.manager - self.id: Optional[int] = None - self.id2: Optional[int] = None - self.src: "CanvasNode" = src - self.src_shadow: Optional[ShadowNode] = None - self.dst: Optional["CanvasNode"] = dst - self.dst_shadow: Optional[ShadowNode] = None - self.link: Optional[Link] = None - self.arc: int = 0 - self.token: Optional[str] = None - self.src_label: Optional[int] = None - self.src_label2: Optional[int] = None - self.middle_label: Optional[int] = None - self.middle_label2: Optional[int] = None - self.dst_label: Optional[int] = None - self.dst_label2: Optional[int] = None - self.color: str = EDGE_COLOR - self.width: int = EDGE_WIDTH - self.linked_wireless: bool = False - self.hidden: bool = False - if self.dst: - self.linked_wireless = self.src.is_wireless() or self.dst.is_wireless() - - def scaled_width(self) -> float: - return self.width * self.app.app_scale - - def _get_arcpoint( - self, src_pos: tuple[float, float], dst_pos: tuple[float, float] - ) -> tuple[float, float]: - src_x, src_y = src_pos - dst_x, dst_y = dst_pos - mp_x = (src_x + dst_x) / 2 - mp_y = (src_y + dst_y) / 2 - slope_denominator = src_x - dst_x - slope_numerator = src_y - dst_y - # vertical line - if slope_denominator == 0: - return mp_x + self.arc, mp_y - # horizontal line - if slope_numerator == 0: - return mp_x, mp_y + self.arc - # everything else - m = slope_numerator / slope_denominator - perp_m = -1 / m - b = mp_y - (perp_m * mp_x) - # get arc x and y - offset = math.sqrt(self.arc**2 / (1 + (1 / m**2))) - arc_x = mp_x - if self.arc >= 0: - arc_x += offset - else: - arc_x -= offset - arc_y = (perp_m * arc_x) + b - return arc_x, arc_y - - def arc_common_edges(self) -> None: - common_edges = list(self.src.edges & self.dst.edges) - common_edges += list(self.src.wireless_edges & self.dst.wireless_edges) - arc_edges(common_edges) - - def has_shadows(self) -> bool: - # still drawing - if not self.dst: - return False - return self.src.canvas != self.dst.canvas - - def draw(self, state: str) -> None: - if not self.has_shadows(): - dst = self.dst if self.dst else self.src - self.id = self.draw_edge(self.src.canvas, self.src, dst, state) - elif self.linked_wireless: - if self.src.is_wireless(): - self.src_shadow = self.dst.canvas.get_shadow(self.src) - self.id2 = self.draw_edge( - self.dst.canvas, self.src_shadow, self.dst, state - ) - if self.dst.is_wireless(): - self.dst_shadow = self.src.canvas.get_shadow(self.dst) - self.id = self.draw_edge( - self.src.canvas, self.src, self.dst_shadow, state - ) - else: - # draw shadow nodes and 2 lines - self.src_shadow = self.dst.canvas.get_shadow(self.src) - self.dst_shadow = self.src.canvas.get_shadow(self.dst) - self.id = self.draw_edge(self.src.canvas, self.src, self.dst_shadow, state) - self.id2 = self.draw_edge(self.dst.canvas, self.src_shadow, self.dst, state) - self.src.canvas.organize() - if self.has_shadows(): - self.dst.canvas.organize() - - def draw_edge( - self, - canvas: "CanvasGraph", - src: Union["CanvasNode", "ShadowNode"], - dst: Union["CanvasNode", "ShadowNode"], - state: str, - ) -> int: - src_pos = src.position() - dst_pos = dst.position() - arc_pos = self._get_arcpoint(src_pos, dst_pos) - return canvas.create_line( - *src_pos, - *arc_pos, - *dst_pos, - smooth=True, - tags=self.tag, - width=self.scaled_width(), - fill=self.color, - state=state, - ) - - def redraw(self) -> None: - self.src.canvas.itemconfig(self.id, width=self.scaled_width(), fill=self.color) - self.move_src() - if self.id2: - self.dst.canvas.itemconfig( - self.id2, width=self.scaled_width(), fill=self.color - ) - self.move_dst() - - def middle_label_text(self, text: str) -> None: - if self.middle_label is None: - _, _, x, y, _, _ = self.src.canvas.coords(self.id) - self.middle_label = self.src.canvas.create_text( - x, - y, - font=self.app.edge_font, - text=text, - tags=tags.LINK_LABEL, - justify=tk.CENTER, - state=self.manager.show_link_labels.state(), - ) - if self.id2: - _, _, x, y, _, _ = self.dst.canvas.coords(self.id2) - self.middle_label2 = self.dst.canvas.create_text( - x, - y, - font=self.app.edge_font, - text=text, - tags=tags.LINK_LABEL, - justify=tk.CENTER, - state=self.manager.show_link_labels.state(), - ) - else: - self.src.canvas.itemconfig(self.middle_label, text=text) - if self.middle_label2: - self.dst.canvas.itemconfig(self.middle_label2, text=text) - - def clear_middle_label(self) -> None: - self.src.canvas.delete(self.middle_label) - self.middle_label = None - if self.middle_label2: - self.dst.canvas.delete(self.middle_label2) - self.middle_label2 = None - - def src_label_text(self, text: str) -> None: - if self.src_label is None and self.src_label2 is None: - if self.id: - src_x, src_y, _, _, dst_x, dst_y = self.src.canvas.coords(self.id) - src_pos, _ = node_label_positions(src_x, src_y, dst_x, dst_y) - self.src_label = self.src.canvas.create_text( - *src_pos, - text=text, - justify=tk.CENTER, - font=self.app.edge_font, - tags=tags.LINK_LABEL, - state=self.manager.show_link_labels.state(), - ) - if self.id2: - src_x, src_y, _, _, dst_x, dst_y = self.dst.canvas.coords(self.id2) - src_pos, _ = node_label_positions(src_x, src_y, dst_x, dst_y) - self.src_label2 = self.dst.canvas.create_text( - *src_pos, - text=text, - justify=tk.CENTER, - font=self.app.edge_font, - tags=tags.LINK_LABEL, - state=self.manager.show_link_labels.state(), - ) - else: - if self.src_label: - self.src.canvas.itemconfig(self.src_label, text=text) - if self.src_label2: - self.dst.canvas.itemconfig(self.src_label2, text=text) - - def dst_label_text(self, text: str) -> None: - if self.dst_label is None and self.dst_label2 is None: - if self.id: - src_x, src_y, _, _, dst_x, dst_y = self.src.canvas.coords(self.id) - _, dst_pos = node_label_positions(src_x, src_y, dst_x, dst_y) - self.dst_label = self.src.canvas.create_text( - *dst_pos, - text=text, - justify=tk.CENTER, - font=self.app.edge_font, - tags=tags.LINK_LABEL, - state=self.manager.show_link_labels.state(), - ) - if self.id2: - src_x, src_y, _, _, dst_x, dst_y = self.dst.canvas.coords(self.id2) - _, dst_pos = node_label_positions(src_x, src_y, dst_x, dst_y) - self.dst_label2 = self.dst.canvas.create_text( - *dst_pos, - text=text, - justify=tk.CENTER, - font=self.app.edge_font, - tags=tags.LINK_LABEL, - state=self.manager.show_link_labels.state(), - ) - else: - if self.dst_label: - self.src.canvas.itemconfig(self.dst_label, text=text) - if self.dst_label2: - self.dst.canvas.itemconfig(self.dst_label2, text=text) - - def drawing(self, pos: tuple[float, float]) -> None: - src_x, src_y, _, _, _, _ = self.src.canvas.coords(self.id) - src_pos = src_x, src_y - self.moved(src_pos, pos) - - def move_node(self, node: "CanvasNode") -> None: - if self.src == node: - self.move_src() - else: - self.move_dst() - - def move_shadow(self, node: "ShadowNode") -> None: - if self.src_shadow == node: - self.move_src_shadow() - elif self.dst_shadow == node: - self.move_dst_shadow() - - def move_src_shadow(self) -> None: - if not self.id2: - return - _, _, _, _, dst_x, dst_y = self.dst.canvas.coords(self.id2) - dst_pos = dst_x, dst_y - self.moved2(self.src_shadow.position(), dst_pos) - - def move_dst_shadow(self) -> None: - if not self.id: - return - src_x, src_y, _, _, _, _ = self.src.canvas.coords(self.id) - src_pos = src_x, src_y - self.moved(src_pos, self.dst_shadow.position()) - - def move_dst(self) -> None: - if self.dst.is_wireless() and self.has_shadows(): - return - dst_pos = self.dst.position() - if self.id2: - src_x, src_y, _, _, _, _ = self.dst.canvas.coords(self.id2) - src_pos = src_x, src_y - self.moved2(src_pos, dst_pos) - elif self.id: - src_x, src_y, _, _, _, _ = self.dst.canvas.coords(self.id) - src_pos = src_x, src_y - self.moved(src_pos, dst_pos) - - def move_src(self) -> None: - if not self.id: - return - _, _, _, _, dst_x, dst_y = self.src.canvas.coords(self.id) - dst_pos = dst_x, dst_y - self.moved(self.src.position(), dst_pos) - - def moved(self, src_pos: tuple[float, float], dst_pos: tuple[float, float]) -> None: - arc_pos = self._get_arcpoint(src_pos, dst_pos) - self.src.canvas.coords(self.id, *src_pos, *arc_pos, *dst_pos) - if self.middle_label: - self.src.canvas.coords(self.middle_label, *arc_pos) - src_x, src_y, _, _, dst_x, dst_y = self.src.canvas.coords(self.id) - src_pos, dst_pos = node_label_positions(src_x, src_y, dst_x, dst_y) - if self.src_label: - self.src.canvas.coords(self.src_label, *src_pos) - if self.dst_label: - self.src.canvas.coords(self.dst_label, *dst_pos) - - def moved2( - self, src_pos: tuple[float, float], dst_pos: tuple[float, float] - ) -> None: - arc_pos = self._get_arcpoint(src_pos, dst_pos) - self.dst.canvas.coords(self.id2, *src_pos, *arc_pos, *dst_pos) - if self.middle_label2: - self.dst.canvas.coords(self.middle_label2, *arc_pos) - src_x, src_y, _, _, dst_x, dst_y = self.dst.canvas.coords(self.id2) - src_pos, dst_pos = node_label_positions(src_x, src_y, dst_x, dst_y) - if self.src_label2: - self.dst.canvas.coords(self.src_label2, *src_pos) - if self.dst_label2: - self.dst.canvas.coords(self.dst_label2, *dst_pos) - - def delete(self) -> None: - logger.debug("deleting canvas edge, id: %s", self.id) - self.src.canvas.delete(self.id) - self.src.canvas.delete(self.src_label) - self.src.canvas.delete(self.dst_label) - if self.dst: - self.dst.canvas.delete(self.id2) - self.dst.canvas.delete(self.src_label2) - self.dst.canvas.delete(self.dst_label2) - if self.src_shadow and self.src_shadow.should_delete(): - self.src_shadow.delete() - self.src_shadow = None - if self.dst_shadow and self.dst_shadow.should_delete(): - self.dst_shadow.delete() - self.dst_shadow = None - self.clear_middle_label() - self.id = None - self.id2 = None - self.src_label = None - self.src_label2 = None - self.dst_label = None - self.dst_label2 = None - if self.dst: - self.arc_common_edges() - - def hide(self) -> None: - self.hidden = True - if self.src_shadow: - self.src_shadow.hide() - if self.dst_shadow: - self.dst_shadow.hide() - self.src.canvas.itemconfigure(self.id, state=tk.HIDDEN) - self.src.canvas.itemconfigure(self.src_label, state=tk.HIDDEN) - self.src.canvas.itemconfigure(self.dst_label, state=tk.HIDDEN) - self.src.canvas.itemconfigure(self.middle_label, state=tk.HIDDEN) - if self.id2: - self.dst.canvas.itemconfigure(self.id2, state=tk.HIDDEN) - self.dst.canvas.itemconfigure(self.src_label2, state=tk.HIDDEN) - self.dst.canvas.itemconfigure(self.dst_label2, state=tk.HIDDEN) - self.dst.canvas.itemconfigure(self.middle_label2, state=tk.HIDDEN) - - def show(self) -> None: - self.hidden = False - if self.src_shadow: - self.src_shadow.show() - if self.dst_shadow: - self.dst_shadow.show() - self.src.canvas.itemconfigure(self.id, state=tk.NORMAL) - state = self.manager.show_link_labels.state() - self.set_labels(state) - - def set_labels(self, state: str) -> None: - self.src.canvas.itemconfigure(self.src_label, state=state) - self.src.canvas.itemconfigure(self.dst_label, state=state) - self.src.canvas.itemconfigure(self.middle_label, state=state) - if self.id2: - self.dst.canvas.itemconfigure(self.id2, state=state) - self.dst.canvas.itemconfigure(self.src_label2, state=state) - self.dst.canvas.itemconfigure(self.dst_label2, state=state) - self.dst.canvas.itemconfigure(self.middle_label2, state=state) - - def other_node(self, node: "CanvasNode") -> "CanvasNode": - if self.src == node: - return self.dst - elif self.dst == node: - return self.src - else: - raise ValueError(f"node({node.core_node.name}) does not belong to edge") - - def other_iface(self, node: "CanvasNode") -> Optional[Interface]: - if self.src == node: - return self.link.iface2 if self.link else None - elif self.dst == node: - return self.link.iface1 if self.link else None - else: - raise ValueError(f"node({node.core_node.name}) does not belong to edge") - - def iface(self, node: "CanvasNode") -> Optional[Interface]: - if self.src == node: - return self.link.iface1 if self.link else None - elif self.dst == node: - return self.link.iface2 if self.link else None - else: - raise ValueError(f"node({node.core_node.name}) does not belong to edge") - - -class CanvasWirelessEdge(Edge): - tag = tags.WIRELESS_EDGE - - def __init__( - self, - app: "Application", - src: "CanvasNode", - dst: "CanvasNode", - network_id: int, - token: str, - link: Link, - ) -> None: - logger.debug("drawing wireless link from node %s to node %s", src, dst) - super().__init__(app, src, dst) - self.src.wireless_edges.add(self) - self.dst.wireless_edges.add(self) - self.network_id: int = network_id - self.link: Link = link - self.token: str = token - self.width: float = WIRELESS_WIDTH - color = link.color if link.color else WIRELESS_COLOR - self.color: str = color - state = self.manager.show_wireless.state() - self.draw(state) - if link.label: - self.middle_label_text(link.label) - if self.src.hidden or self.dst.hidden: - self.hide() - self.set_binding() - self.arc_common_edges() - - def set_binding(self) -> None: - self.src.canvas.tag_bind(self.id, "", self.show_info) - if self.id2 is not None: - self.dst.canvas.tag_bind(self.id2, "", self.show_info) - - def show_info(self, _event: tk.Event) -> None: - self.app.display_info(WirelessEdgeInfoFrame, app=self.app, edge=self) - - def delete(self) -> None: - self.src.wireless_edges.discard(self) - self.dst.wireless_edges.remove(self) - super().delete() - - -class CanvasEdge(Edge): - """ - Canvas edge class - """ - - def __init__( - self, app: "Application", src: "CanvasNode", dst: "CanvasNode" = None - ) -> None: - """ - Create an instance of canvas edge object - """ - super().__init__(app, src, dst) - self.text_src: Optional[int] = None - self.text_dst: Optional[int] = None - self.asymmetric_link: Optional[Link] = None - self.throughput: Optional[float] = None - self.draw(tk.NORMAL) - - def is_customized(self) -> bool: - return self.width != EDGE_WIDTH or self.color != EDGE_COLOR - - def set_bindings(self) -> None: - if self.id: - show_context = functools.partial(self.show_context, self.src.canvas) - self.src.canvas.tag_bind(self.id, "", show_context) - self.src.canvas.tag_bind(self.id, "", self.show_info) - if self.id2: - show_context = functools.partial(self.show_context, self.dst.canvas) - self.dst.canvas.tag_bind(self.id2, "", show_context) - self.dst.canvas.tag_bind(self.id2, "", self.show_info) - - def iface_label(self, iface: Interface) -> str: - label = "" - if iface.name and self.manager.show_iface_names.get(): - label = f"{iface.name}" - if iface.ip4 and self.manager.show_ip4s.get(): - label = f"{label}\n" if label else "" - label += f"{iface.ip4}/{iface.ip4_mask}" - if iface.ip6 and self.manager.show_ip6s.get(): - label = f"{label}\n" if label else "" - label += f"{iface.ip6}/{iface.ip6_mask}" - return label - - def create_node_labels(self) -> tuple[str, str]: - label1 = None - if self.link.iface1: - label1 = self.iface_label(self.link.iface1) - label2 = None - if self.link.iface2: - label2 = self.iface_label(self.link.iface2) - return label1, label2 - - def draw_labels(self) -> None: - src_text, dst_text = self.create_node_labels() - self.src_label_text(src_text) - self.dst_label_text(dst_text) - if not self.linked_wireless: - self.draw_link_options() - - def redraw(self) -> None: - super().redraw() - self.draw_labels() - - def show(self) -> None: - super().show() - self.check_visibility() - - def check_visibility(self) -> None: - state = tk.NORMAL - hide_links = self.manager.show_links.state() == tk.HIDDEN - if self.linked_wireless or hide_links: - state = tk.HIDDEN - elif self.link.options: - hide_loss = self.manager.show_loss_links.state() == tk.HIDDEN - should_hide = self.link.options.loss >= EDGE_LOSS - if hide_loss and should_hide: - state = tk.HIDDEN - if self.id: - self.src.canvas.itemconfigure(self.id, state=state) - if self.id2: - self.dst.canvas.itemconfigure(self.id2, state=state) - - def set_throughput(self, throughput: float) -> None: - throughput = 0.001 * throughput - text = f"{throughput:.3f} kbps" - self.middle_label_text(text) - if throughput > self.manager.throughput_threshold: - color = self.manager.throughput_color - width = self.manager.throughput_width - else: - color = self.color - width = self.scaled_width() - self.src.canvas.itemconfig(self.id, fill=color, width=width) - if self.id2: - self.dst.canvas.itemconfig(self.id2, fill=color, width=width) - - def clear_throughput(self) -> None: - self.clear_middle_label() - if not self.linked_wireless: - self.draw_link_options() - - def complete(self, dst: "CanvasNode", link: Link = None) -> None: - logger.debug( - "completing wired link from node(%s) to node(%s)", - self.src.core_node.name, - dst.core_node.name, - ) - self.dst = dst - self.linked_wireless = self.src.is_wireless() or self.dst.is_wireless() - self.set_bindings() - self.check_wireless() - if link is None: - link = self.app.core.ifaces_manager.create_link(self) - if link.iface1 and not nodeutils.is_rj45(self.src.core_node): - iface1 = link.iface1 - self.src.ifaces[iface1.id] = iface1 - if link.iface2 and not nodeutils.is_rj45(self.dst.core_node): - iface2 = link.iface2 - self.dst.ifaces[iface2.id] = iface2 - self.token = create_edge_token(link) - self.link = link - self.src.edges.add(self) - self.dst.edges.add(self) - if not self.linked_wireless: - self.arc_common_edges() - self.draw_labels() - self.check_visibility() - self.app.core.save_edge(self) - self.src.canvas.organize() - if self.has_shadows(): - self.dst.canvas.organize() - self.manager.edges[self.token] = self - - def check_wireless(self) -> None: - if not self.linked_wireless: - return - if self.id: - self.src.canvas.itemconfig(self.id, state=tk.HIDDEN) - self.src.canvas.dtag(self.id, tags.EDGE) - if self.id2: - self.dst.canvas.itemconfig(self.id2, state=tk.HIDDEN) - self.dst.canvas.dtag(self.id2, tags.EDGE) - # add antenna to node - if self.src.is_wireless() and not self.dst.is_wireless(): - self.dst.add_antenna() - elif not self.src.is_wireless() and self.dst.is_wireless(): - self.src.add_antenna() - else: - self.src.add_antenna() - - def reset(self) -> None: - if self.middle_label: - self.src.canvas.delete(self.middle_label) - self.middle_label = None - if self.middle_label2: - self.dst.canvas.delete(self.middle_label2) - self.middle_label2 = None - if self.id: - self.src.canvas.itemconfig( - self.id, fill=self.color, width=self.scaled_width() - ) - if self.id2: - self.dst.canvas.itemconfig( - self.id2, fill=self.color, width=self.scaled_width() - ) - - def show_info(self, _event: tk.Event) -> None: - self.app.display_info(EdgeInfoFrame, app=self.app, edge=self) - - def show_context(self, canvas: "CanvasGraph", event: tk.Event) -> None: - context: tk.Menu = tk.Menu(canvas) - themes.style_menu(context) - context.add_command(label="Configure", command=self.click_configure) - context.add_command(label="Delete", command=self.click_delete) - state = tk.DISABLED if self.app.core.is_runtime() else tk.NORMAL - context.entryconfigure(1, state=state) - context.tk_popup(event.x_root, event.y_root) - - def click_delete(self) -> None: - self.delete() - - def click_configure(self) -> None: - dialog = LinkConfigurationDialog(self.app, self) - dialog.show() - - def draw_link_options(self): - if not self.link.options: - return - options = self.link.options - asym_options = None - if self.asymmetric_link and self.asymmetric_link.options: - asym_options = self.asymmetric_link.options - lines = [] - # bandwidth - if options.bandwidth > 0: - bandwidth_line = bandwidth_text(options.bandwidth) - if asym_options and asym_options.bandwidth > 0: - bandwidth_line += f" / {bandwidth_text(asym_options.bandwidth)}" - lines.append(bandwidth_line) - # delay/jitter - dj_line = delay_jitter_text(options.delay, options.jitter) - if dj_line and asym_options: - asym_dj_line = delay_jitter_text(asym_options.delay, asym_options.jitter) - if asym_dj_line: - dj_line += f" / {asym_dj_line}" - if dj_line: - lines.append(dj_line) - # loss - if options.loss > 0: - loss_line = f"loss={options.loss}%" - if asym_options and asym_options.loss > 0: - loss_line += f" / loss={asym_options.loss}%" - lines.append(loss_line) - # duplicate - if options.dup > 0: - dup_line = f"dup={options.dup}%" - if asym_options and asym_options.dup > 0: - dup_line += f" / dup={asym_options.dup}%" - lines.append(dup_line) - label = "\n".join(lines) - self.middle_label_text(label) - - def delete(self) -> None: - self.src.edges.discard(self) - if self.dst: - self.dst.edges.discard(self) - if self.link.iface1 and not nodeutils.is_rj45(self.src.core_node): - del self.src.ifaces[self.link.iface1.id] - if self.link.iface2 and not nodeutils.is_rj45(self.dst.core_node): - del self.dst.ifaces[self.link.iface2.id] - if self.src.is_wireless(): - self.dst.delete_antenna() - if self.dst.is_wireless(): - self.src.delete_antenna() - self.app.core.deleted_canvas_edges([self]) - super().delete() - self.manager.edges.pop(self.token, None) diff --git a/daemon/core/gui/graph/enums.py b/daemon/core/gui/graph/enums.py deleted file mode 100644 index b292938f..00000000 --- a/daemon/core/gui/graph/enums.py +++ /dev/null @@ -1,18 +0,0 @@ -import enum - - -class GraphMode(enum.Enum): - SELECT = 0 - EDGE = 1 - PICKNODE = 2 - NODE = 3 - ANNOTATION = 4 - OTHER = 5 - - -class ScaleOption(enum.Enum): - NONE = 0 - UPPER_LEFT = 1 - CENTERED = 2 - SCALED = 3 - TILED = 4 diff --git a/daemon/core/gui/graph/graph.py b/daemon/core/gui/graph/graph.py deleted file mode 100644 index 1a701239..00000000 --- a/daemon/core/gui/graph/graph.py +++ /dev/null @@ -1,852 +0,0 @@ -import logging -import tkinter as tk -from copy import deepcopy -from pathlib import Path -from typing import TYPE_CHECKING, Any, Optional - -from PIL import Image -from PIL.ImageTk import PhotoImage - -from core.api.grpc.wrappers import Interface, Link -from core.gui import appconfig -from core.gui import nodeutils as nutils -from core.gui.dialogs.shapemod import ShapeDialog -from core.gui.graph import tags -from core.gui.graph.edges import EDGE_WIDTH, CanvasEdge -from core.gui.graph.enums import GraphMode, ScaleOption -from core.gui.graph.node import CanvasNode, ShadowNode -from core.gui.graph.shape import Shape -from core.gui.graph.shapeutils import ShapeType, is_draw_shape, is_marker - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.manager import CanvasManager - from core.gui.coreclient import CoreClient - -ZOOM_IN: float = 1.1 -ZOOM_OUT: float = 0.9 -MOVE_NODE_MODES: set[GraphMode] = {GraphMode.NODE, GraphMode.SELECT} -MOVE_SHAPE_MODES: set[GraphMode] = {GraphMode.ANNOTATION, GraphMode.SELECT} -BACKGROUND_COLOR: str = "#cccccc" - - -class CanvasGraph(tk.Canvas): - def __init__( - self, - master: tk.BaseWidget, - app: "Application", - manager: "CanvasManager", - core: "CoreClient", - _id: int, - dimensions: tuple[int, int], - ) -> None: - super().__init__(master, highlightthickness=0, background=BACKGROUND_COLOR) - self.id: int = _id - self.app: "Application" = app - self.manager: "CanvasManager" = manager - self.core: "CoreClient" = core - self.selection: dict[int, int] = {} - self.select_box: Optional[Shape] = None - self.selected: Optional[int] = None - self.nodes: dict[int, CanvasNode] = {} - self.shadow_nodes: dict[int, ShadowNode] = {} - self.shapes: dict[int, Shape] = {} - self.shadow_core_nodes: dict[int, ShadowNode] = {} - - # map wireless/EMANE node to the set of MDRs connected to that node - self.wireless_network: dict[int, set[int]] = {} - - self.drawing_edge: Optional[CanvasEdge] = None - self.rect: Optional[int] = None - self.shape_drawing: bool = False - self.current_dimensions: tuple[int, int] = dimensions - self.ratio: float = 1.0 - self.offset: tuple[int, int] = (0, 0) - self.cursor: tuple[int, int] = (0, 0) - self.to_copy: list[CanvasNode] = [] - - # background related - self.wallpaper_id: Optional[int] = None - self.wallpaper: Optional[Image.Image] = None - self.wallpaper_drawn: Optional[PhotoImage] = None - self.wallpaper_file: str = "" - self.scale_option: tk.IntVar = tk.IntVar(value=1) - self.adjust_to_dim: tk.BooleanVar = tk.BooleanVar(value=False) - - # bindings - self.setup_bindings() - - # draw base canvas - self.draw_canvas() - self.draw_grid() - - def draw_canvas(self, dimensions: tuple[int, int] = None) -> None: - if self.rect is not None: - self.delete(self.rect) - if not dimensions: - dimensions = self.manager.default_dimensions - self.current_dimensions = dimensions - self.rect = self.create_rectangle( - 0, - 0, - *dimensions, - outline="#000000", - fill="#ffffff", - width=1, - tags="rectangle", - ) - self.configure(scrollregion=self.bbox(tk.ALL)) - - def setup_bindings(self) -> None: - """ - Bind any mouse events or hot keys to the matching action - """ - self.bind("", self.copy_selected) - self.bind("", self.paste_selected) - self.bind("", self.cut_selected) - self.bind("", self.delete_selected) - self.bind("", self.hide_selected) - self.bind("", self.click_press) - self.bind("", self.click_release) - self.bind("", self.click_motion) - self.bind("", self.delete_selected) - self.bind("", self.ctrl_click) - self.bind("", self.double_click) - self.bind("", self.zoom) - self.bind("", lambda e: self.zoom(e, ZOOM_IN)) - self.bind("", lambda e: self.zoom(e, ZOOM_OUT)) - self.bind("", lambda e: self.scan_mark(e.x, e.y)) - self.bind("", lambda e: self.scan_dragto(e.x, e.y, gain=1)) - - def get_shadow(self, node: CanvasNode) -> ShadowNode: - shadow_node = self.shadow_core_nodes.get(node.core_node.id) - if not shadow_node: - shadow_node = ShadowNode(self.app, self, node) - return shadow_node - - def get_actual_coords(self, x: float, y: float) -> tuple[float, float]: - actual_x = (x - self.offset[0]) / self.ratio - actual_y = (y - self.offset[1]) / self.ratio - return actual_x, actual_y - - def get_scaled_coords(self, x: float, y: float) -> tuple[float, float]: - scaled_x = (x * self.ratio) + self.offset[0] - scaled_y = (y * self.ratio) + self.offset[1] - return scaled_x, scaled_y - - def inside_canvas(self, x: float, y: float) -> tuple[bool, bool]: - x1, y1, x2, y2 = self.bbox(self.rect) - valid_x = x1 <= x <= x2 - valid_y = y1 <= y <= y2 - return valid_x and valid_y - - def valid_position(self, x1: int, y1: int, x2: int, y2: int) -> tuple[bool, bool]: - valid_topleft = self.inside_canvas(x1, y1) - valid_bottomright = self.inside_canvas(x2, y2) - return valid_topleft and valid_bottomright - - def draw_grid(self) -> None: - """ - Create grid. - """ - width, height = self.width_and_height() - width = int(width) - height = int(height) - for i in range(0, width, 27): - self.create_line(i, 0, i, height, dash=(2, 4), tags=tags.GRIDLINE) - for i in range(0, height, 27): - self.create_line(0, i, width, i, dash=(2, 4), tags=tags.GRIDLINE) - self.tag_lower(tags.GRIDLINE) - self.tag_lower(self.rect) - - def canvas_xy(self, event: tk.Event) -> tuple[float, float]: - """ - Convert window coordinate to canvas coordinate - """ - x = self.canvasx(event.x) - y = self.canvasy(event.y) - return x, y - - def get_selected(self, event: tk.Event) -> int: - """ - Retrieve the item id that is on the mouse position - """ - x, y = self.canvas_xy(event) - overlapping = self.find_overlapping(x, y, x, y) - selected = None - for _id in overlapping: - if self.drawing_edge and self.drawing_edge.id == _id: - continue - elif _id in self.nodes: - selected = _id - elif _id in self.shapes: - selected = _id - elif _id in self.shadow_nodes: - selected = _id - return selected - - def click_release(self, event: tk.Event) -> None: - """ - Draw a node or finish drawing an edge according to the current graph mode - """ - logger.debug("click release") - x, y = self.canvas_xy(event) - if not self.inside_canvas(x, y): - return - if self.manager.mode == GraphMode.ANNOTATION: - self.focus_set() - if self.shape_drawing: - shape = self.shapes[self.selected] - shape.shape_complete(x, y) - self.shape_drawing = False - elif self.manager.mode == GraphMode.SELECT: - self.focus_set() - if self.select_box: - x0, y0, x1, y1 = self.coords(self.select_box.id) - inside = [ - x - for x in self.find_enclosed(x0, y0, x1, y1) - if "node" in self.gettags(x) or "shape" in self.gettags(x) - ] - for i in inside: - self.select_object(i, True) - self.select_box.disappear() - self.select_box = None - else: - self.focus_set() - self.selected = self.get_selected(event) - logger.debug( - "click release selected(%s) mode(%s)", self.selected, self.manager.mode - ) - if self.manager.mode == GraphMode.EDGE: - self.handle_edge_release(event) - elif self.manager.mode == GraphMode.NODE: - self.add_node(x, y) - elif self.manager.mode == GraphMode.PICKNODE: - self.manager.mode = GraphMode.NODE - self.selected = None - - def handle_edge_release(self, _event: tk.Event) -> None: - # not drawing edge return - if not self.drawing_edge: - return - edge = self.drawing_edge - self.drawing_edge = None - # edge dst must be a node - logger.debug("current selected: %s", self.selected) - dst_node = self.nodes.get(self.selected) - if not dst_node: - edge.delete() - return - # check if node can be linked - if not edge.src.is_linkable(dst_node): - edge.delete() - return - # finalize edge creation - edge.drawing(dst_node.position()) - edge.complete(dst_node) - - def select_object(self, object_id: int, choose_multiple: bool = False) -> None: - """ - create a bounding box when a node is selected - """ - if not choose_multiple: - self.clear_selection() - - # draw a bounding box if node hasn't been selected yet - if object_id not in self.selection: - x0, y0, x1, y1 = self.bbox(object_id) - selection_id = self.create_rectangle( - (x0 - 6, y0 - 6, x1 + 6, y1 + 6), - activedash=True, - dash="-", - tags=tags.SELECTION, - ) - self.selection[object_id] = selection_id - else: - selection_id = self.selection.pop(object_id) - self.delete(selection_id) - - def clear_selection(self) -> None: - """ - Clear current selection boxes. - """ - for _id in self.selection.values(): - self.delete(_id) - self.selection.clear() - - def move_selection(self, object_id: int, x_offset: float, y_offset: float) -> None: - select_id = self.selection.get(object_id) - if select_id is not None: - self.move(select_id, x_offset, y_offset) - - def delete_selected_objects(self, _event: tk.Event = None) -> None: - edges = set() - nodes = [] - for object_id in self.selection: - # delete selection box - selection_id = self.selection[object_id] - self.delete(selection_id) - - # delete node and related edges - if object_id in self.nodes: - canvas_node = self.nodes.pop(object_id) - # delete related edges - while canvas_node.edges: - edge = canvas_node.edges.pop() - if edge in edges: - continue - edges.add(edge) - edge.delete() - # delete node - canvas_node.delete() - nodes.append(canvas_node) - - # delete shape - if object_id in self.shapes: - shape = self.shapes.pop(object_id) - shape.delete() - - self.selection.clear() - self.core.deleted_canvas_nodes(nodes) - - def hide_selected(self, _event: tk.Event = None) -> None: - for object_id in self.selection: - # delete selection box - selection_id = self.selection[object_id] - self.delete(selection_id) - # hide node and related edges - if object_id in self.nodes: - canvas_node = self.nodes[object_id] - canvas_node.hide() - - def show_hidden(self) -> None: - for node in self.nodes.values(): - if node.hidden: - node.show() - - def zoom(self, event: tk.Event, factor: float = None) -> None: - if not factor: - factor = ZOOM_IN if event.delta > 0 else ZOOM_OUT - event.x, event.y = self.canvasx(event.x), self.canvasy(event.y) - self.scale(tk.ALL, event.x, event.y, factor, factor) - self.configure(scrollregion=self.bbox(tk.ALL)) - self.ratio *= float(factor) - self.offset = ( - self.offset[0] * factor + event.x * (1 - factor), - self.offset[1] * factor + event.y * (1 - factor), - ) - logger.debug("ratio: %s", self.ratio) - logger.debug("offset: %s", self.offset) - self.app.statusbar.set_zoom(self.ratio) - if self.wallpaper: - self.redraw_wallpaper() - - def click_press(self, event: tk.Event) -> None: - """ - Start drawing an edge if mouse click is on a node - """ - x, y = self.canvas_xy(event) - if not self.inside_canvas(x, y): - return - - self.cursor = x, y - selected = self.get_selected(event) - logger.debug("click press(%s): %s", self.cursor, selected) - x_check = self.cursor[0] - self.offset[0] - y_check = self.cursor[1] - self.offset[1] - logger.debug("click press offset(%s, %s)", x_check, y_check) - is_node = selected in self.nodes - if self.manager.mode == GraphMode.EDGE and is_node: - node = self.nodes[selected] - self.drawing_edge = CanvasEdge(self.app, node) - self.organize() - - if self.manager.mode == GraphMode.ANNOTATION: - if is_marker(self.manager.annotation_type): - r = self.app.toolbar.marker_frame.size.get() - self.create_oval( - x - r, - y - r, - x + r, - y + r, - fill=self.app.toolbar.marker_frame.color, - outline="", - tags=(tags.MARKER, tags.ANNOTATION), - state=self.manager.show_annotations.state(), - ) - return - if selected is None: - shape = Shape(self.app, self, self.manager.annotation_type, x, y) - self.selected = shape.id - self.shape_drawing = True - self.shapes[shape.id] = shape - - if selected is not None: - if selected not in self.selection: - if selected in self.shapes: - shape = self.shapes[selected] - self.select_object(shape.id) - self.selected = selected - elif selected in self.nodes: - node = self.nodes[selected] - self.select_object(node.id) - self.selected = selected - logger.debug( - "selected node(%s), coords: (%s, %s)", - node.core_node.name, - node.core_node.position.x, - node.core_node.position.y, - ) - elif selected in self.shadow_nodes: - shadow_node = self.shadow_nodes[selected] - self.select_object(shadow_node.id) - self.selected = selected - logger.debug( - "selected shadow node(%s), coords: (%s, %s)", - shadow_node.node.core_node.name, - shadow_node.node.core_node.position.x, - shadow_node.node.core_node.position.y, - ) - else: - if self.manager.mode == GraphMode.SELECT: - shape = Shape(self.app, self, ShapeType.RECTANGLE, x, y) - self.select_box = shape - self.clear_selection() - - def ctrl_click(self, event: tk.Event) -> None: - # update cursor location - x, y = self.canvas_xy(event) - if not self.inside_canvas(x, y): - return - - self.cursor = x, y - - # handle multiple selections - logger.debug("control left click: %s", event) - selected = self.get_selected(event) - if ( - selected not in self.selection - and selected in self.shapes - or selected in self.nodes - ): - self.select_object(selected, choose_multiple=True) - - def click_motion(self, event: tk.Event) -> None: - x, y = self.canvas_xy(event) - if not self.inside_canvas(x, y): - if self.select_box: - self.select_box.delete() - self.select_box = None - if is_draw_shape(self.manager.annotation_type) and self.shape_drawing: - shape = self.shapes.pop(self.selected) - shape.delete() - self.shape_drawing = False - return - - x_offset = x - self.cursor[0] - y_offset = y - self.cursor[1] - self.cursor = x, y - - if self.manager.mode == GraphMode.EDGE and self.drawing_edge is not None: - self.drawing_edge.drawing(self.cursor) - if self.manager.mode == GraphMode.ANNOTATION: - if is_draw_shape(self.manager.annotation_type) and self.shape_drawing: - shape = self.shapes[self.selected] - shape.shape_motion(x, y) - return - elif is_marker(self.manager.annotation_type): - r = self.app.toolbar.marker_frame.size.get() - self.create_oval( - x - r, - y - r, - x + r, - y + r, - fill=self.app.toolbar.marker_frame.color, - outline="", - tags=(tags.MARKER, tags.ANNOTATION), - ) - return - - if self.manager.mode == GraphMode.EDGE: - return - - # move selected objects - if self.selection: - for selected_id in self.selection: - if self.manager.mode in MOVE_SHAPE_MODES and selected_id in self.shapes: - shape = self.shapes[selected_id] - shape.motion(x_offset, y_offset) - elif self.manager.mode in MOVE_NODE_MODES and selected_id in self.nodes: - node = self.nodes[selected_id] - node.motion(x_offset, y_offset, update=self.core.is_runtime()) - elif ( - self.manager.mode in MOVE_NODE_MODES - and selected_id in self.shadow_nodes - ): - shadow_node = self.shadow_nodes[selected_id] - shadow_node.motion(x_offset, y_offset) - else: - if self.select_box and self.manager.mode == GraphMode.SELECT: - self.select_box.shape_motion(x, y) - - def double_click(self, event: tk.Event) -> None: - selected = self.get_selected(event) - if selected is not None and selected in self.shapes: - shape = self.shapes[selected] - dialog = ShapeDialog(self.app, shape) - dialog.show() - - def add_node(self, x: float, y: float) -> None: - if self.selected is not None and self.selected not in self.shapes: - return - actual_x, actual_y = self.get_actual_coords(x, y) - core_node = self.core.create_node( - actual_x, - actual_y, - self.manager.node_draw.node_type, - self.manager.node_draw.model, - ) - if not core_node: - return - core_node.canvas = self.id - node = CanvasNode(self.app, self, x, y, core_node, self.manager.node_draw.image) - self.nodes[node.id] = node - self.core.set_canvas_node(core_node, node) - - def width_and_height(self) -> tuple[int, int]: - """ - retrieve canvas width and height in pixels - """ - x0, y0, x1, y1 = self.coords(self.rect) - canvas_w = abs(x0 - x1) - canvas_h = abs(y0 - y1) - return canvas_w, canvas_h - - def get_wallpaper_image(self) -> Image.Image: - width = int(self.wallpaper.width * self.ratio) - height = int(self.wallpaper.height * self.ratio) - image = self.wallpaper.resize((width, height), Image.ANTIALIAS) - return image - - def draw_wallpaper( - self, image: PhotoImage, x: float = None, y: float = None - ) -> None: - if x is None and y is None: - x1, y1, x2, y2 = self.bbox(self.rect) - x = (x1 + x2) / 2 - y = (y1 + y2) / 2 - self.wallpaper_id = self.create_image((x, y), image=image, tags=tags.WALLPAPER) - self.wallpaper_drawn = image - - def wallpaper_upper_left(self) -> None: - self.delete(self.wallpaper_id) - - # create new scaled image, cropped if needed - width, height = self.width_and_height() - image = self.get_wallpaper_image() - cropx = image.width - cropy = image.height - if image.width > width: - cropx = image.width - if image.height > height: - cropy = image.height - cropped = image.crop((0, 0, cropx, cropy)) - image = PhotoImage(cropped) - - # draw on canvas - x1, y1, _, _ = self.bbox(self.rect) - x = (cropx / 2) + x1 - y = (cropy / 2) + y1 - self.draw_wallpaper(image, x, y) - - def wallpaper_center(self) -> None: - """ - place the image at the center of canvas - """ - self.delete(self.wallpaper_id) - - # dimension of the cropped image - width, height = self.width_and_height() - image = self.get_wallpaper_image() - cropx = 0 - if image.width > width: - cropx = (image.width - width) / 2 - cropy = 0 - if image.height > height: - cropy = (image.height - height) / 2 - x1 = 0 + cropx - y1 = 0 + cropy - x2 = image.width - cropx - y2 = image.height - cropy - cropped = image.crop((x1, y1, x2, y2)) - image = PhotoImage(cropped) - self.draw_wallpaper(image) - - def wallpaper_scaled(self) -> None: - """ - scale image based on canvas dimension - """ - self.delete(self.wallpaper_id) - canvas_w, canvas_h = self.width_and_height() - image = self.wallpaper.resize((int(canvas_w), int(canvas_h)), Image.ANTIALIAS) - image = PhotoImage(image) - self.draw_wallpaper(image) - - def resize_to_wallpaper(self) -> None: - self.delete(self.wallpaper_id) - image = PhotoImage(self.wallpaper) - self.redraw_canvas((image.width(), image.height())) - self.draw_wallpaper(image) - - def redraw_canvas(self, dimensions: tuple[int, int] = None) -> None: - logger.debug("redrawing canvas to dimensions: %s", dimensions) - - # reset scale and move back to original position - logger.debug("resetting scaling: %s %s", self.ratio, self.offset) - factor = 1 / self.ratio - self.scale(tk.ALL, self.offset[0], self.offset[1], factor, factor) - self.move(tk.ALL, -self.offset[0], -self.offset[1]) - - # reset ratio and offset - self.ratio = 1.0 - self.offset = (0, 0) - - # redraw canvas rectangle - self.draw_canvas(dimensions) - - # redraw gridlines to new canvas size - self.delete(tags.GRIDLINE) - self.draw_grid() - self.app.manager.show_grid.click_handler() - - def redraw_wallpaper(self) -> None: - if self.adjust_to_dim.get(): - logger.debug("drawing wallpaper to canvas dimensions") - self.resize_to_wallpaper() - else: - option = ScaleOption(self.scale_option.get()) - logger.debug("drawing canvas using scaling option: %s", option) - if option == ScaleOption.UPPER_LEFT: - self.wallpaper_upper_left() - elif option == ScaleOption.CENTERED: - self.wallpaper_center() - elif option == ScaleOption.SCALED: - self.wallpaper_scaled() - elif option == ScaleOption.TILED: - logger.warning("tiled background not implemented yet") - self.organize() - - def organize(self) -> None: - for tag in tags.ORGANIZE_TAGS: - self.tag_raise(tag) - - def set_wallpaper(self, filename: Optional[str]) -> None: - logger.info("setting canvas(%s) background: %s", self.id, filename) - if filename: - img = Image.open(filename) - self.wallpaper = img - self.wallpaper_file = filename - self.redraw_wallpaper() - else: - if self.wallpaper_id is not None: - self.delete(self.wallpaper_id) - self.wallpaper = None - self.wallpaper_file = None - - def is_selection_mode(self) -> bool: - return self.manager.mode == GraphMode.SELECT - - def create_edge(self, src: CanvasNode, dst: CanvasNode) -> CanvasEdge: - """ - create an edge between source node and destination node - """ - edge = CanvasEdge(self.app, src) - edge.complete(dst) - return edge - - def copy_selected(self, _event: tk.Event = None) -> None: - if self.core.is_runtime(): - logger.debug("copy is disabled during runtime state") - return - if self.selection: - logger.debug("to copy nodes: %s", self.selection) - self.to_copy.clear() - for node_id in self.selection.keys(): - canvas_node = self.nodes[node_id] - self.to_copy.append(canvas_node) - - def cut_selected(self, _event: tk.Event = None) -> None: - if self.core.is_runtime(): - logger.debug("cut is disabled during runtime state") - return - self.copy_selected() - self.delete_selected() - - def delete_selected(self, _event: tk.Event = None) -> None: - """ - delete selected nodes and any data that relates to it - """ - logger.debug("press delete key") - if self.core.is_runtime(): - logger.debug("node deletion is disabled during runtime state") - return - self.delete_selected_objects() - self.app.default_info() - - def paste_selected(self, _event: tk.Event = None) -> None: - if self.core.is_runtime(): - logger.debug("paste is disabled during runtime state") - return - # maps original node canvas id to copy node canvas id - copy_map = {} - # the edges that will be copy over - to_copy_edges = set() - to_copy_ids = {x.id for x in self.to_copy} - for canvas_node in self.to_copy: - core_node = canvas_node.core_node - actual_x = core_node.position.x + 50 - actual_y = core_node.position.y + 50 - scaled_x, scaled_y = self.get_scaled_coords(actual_x, actual_y) - copy = self.core.create_node( - actual_x, actual_y, core_node.type, core_node.model - ) - if not copy: - continue - node = CanvasNode( - self.app, self, scaled_x, scaled_y, copy, canvas_node.image - ) - # copy configurations and services - node.core_node.services = core_node.services.copy() - node.core_node.config_services = core_node.config_services.copy() - node.core_node.emane_model_configs = deepcopy(core_node.emane_model_configs) - node.core_node.wlan_config = deepcopy(core_node.wlan_config) - node.core_node.mobility_config = deepcopy(core_node.mobility_config) - node.core_node.service_configs = deepcopy(core_node.service_configs) - node.core_node.service_file_configs = deepcopy( - core_node.service_file_configs - ) - node.core_node.config_service_configs = deepcopy( - core_node.config_service_configs - ) - - copy_map[canvas_node.id] = node.id - self.nodes[node.id] = node - self.core.set_canvas_node(copy, node) - for edge in canvas_node.edges: - if edge.src not in to_copy_ids or edge.dst not in to_copy_ids: - if canvas_node.id == edge.src: - dst_node = self.nodes[edge.dst] - copy_edge = self.create_edge(node, dst_node) - elif canvas_node.id == edge.dst: - src_node = self.nodes[edge.src] - copy_edge = self.create_edge(src_node, node) - else: - continue - copy_link = copy_edge.link - iface1_id = copy_link.iface1.id if copy_link.iface1 else None - iface2_id = copy_link.iface2.id if copy_link.iface2 else None - options = edge.link.options - if options: - copy_edge.link.options = deepcopy(options) - if options and options.unidirectional: - asym_iface1 = None - if iface1_id is not None: - asym_iface1 = Interface(id=iface1_id) - asym_iface2 = None - if iface2_id is not None: - asym_iface2 = Interface(id=iface2_id) - copy_edge.asymmetric_link = Link( - node1_id=copy_link.node2_id, - node2_id=copy_link.node1_id, - iface1=asym_iface2, - iface2=asym_iface1, - options=deepcopy(edge.asymmetric_link.options), - ) - copy_edge.redraw() - else: - to_copy_edges.add(edge) - - # copy link and link config - for edge in to_copy_edges: - src_node_id = copy_map[edge.src] - dst_node_id = copy_map[edge.dst] - src_node_copy = self.nodes[src_node_id] - dst_node_copy = self.nodes[dst_node_id] - copy_edge = self.create_edge(src_node_copy, dst_node_copy) - copy_link = copy_edge.link - iface1_id = copy_link.iface1.id if copy_link.iface1 else None - iface2_id = copy_link.iface2.id if copy_link.iface2 else None - options = edge.link.options - if options: - copy_link.options = deepcopy(options) - if options and options.unidirectional: - asym_iface1 = None - if iface1_id is not None: - asym_iface1 = Interface(id=iface1_id) - asym_iface2 = None - if iface2_id is not None: - asym_iface2 = Interface(id=iface2_id) - copy_edge.asymmetric_link = Link( - node1_id=copy_link.node2_id, - node2_id=copy_link.node1_id, - iface1=asym_iface2, - iface2=asym_iface1, - options=deepcopy(edge.asymmetric_link.options), - ) - copy_edge.redraw() - self.itemconfig( - copy_edge.id, - width=self.itemcget(edge.id, "width"), - fill=self.itemcget(edge.id, "fill"), - ) - self.tag_raise(tags.NODE) - - def scale_graph(self) -> None: - for node_id, canvas_node in self.nodes.items(): - image = nutils.get_icon(canvas_node.core_node, self.app) - self.itemconfig(node_id, image=image) - canvas_node.image = image - canvas_node.scale_text() - canvas_node.scale_antennas() - for edge_id in self.find_withtag(tags.EDGE): - self.itemconfig(edge_id, width=int(EDGE_WIDTH * self.app.app_scale)) - - def get_metadata(self) -> dict[str, Any]: - wallpaper_path = None - if self.wallpaper_file: - wallpaper = Path(self.wallpaper_file) - if appconfig.BACKGROUNDS_PATH == wallpaper.parent: - wallpaper_path = wallpaper.name - else: - wallpaper_path = str(wallpaper) - return dict( - id=self.id, - wallpaper=wallpaper_path, - wallpaper_style=self.scale_option.get(), - fit_image=self.adjust_to_dim.get(), - dimensions=self.current_dimensions, - ) - - def parse_metadata(self, config: dict[str, Any]) -> None: - fit_image = config.get("fit_image", False) - self.adjust_to_dim.set(fit_image) - wallpaper_style = config.get("wallpaper_style", 1) - self.scale_option.set(wallpaper_style) - dimensions = config.get("dimensions") - if dimensions: - self.redraw_canvas(dimensions) - wallpaper = config.get("wallpaper") - if wallpaper: - wallpaper = Path(wallpaper) - if not wallpaper.is_file(): - wallpaper = appconfig.BACKGROUNDS_PATH.joinpath(wallpaper) - logger.info("canvas(%s), wallpaper: %s", self.id, wallpaper) - if wallpaper.is_file(): - self.set_wallpaper(str(wallpaper)) - else: - self.app.show_error( - "Background Error", f"background file not found: {wallpaper}" - ) diff --git a/daemon/core/gui/graph/manager.py b/daemon/core/gui/graph/manager.py deleted file mode 100644 index b2745f5c..00000000 --- a/daemon/core/gui/graph/manager.py +++ /dev/null @@ -1,434 +0,0 @@ -import json -import logging -import tkinter as tk -from collections.abc import ValuesView -from copy import deepcopy -from tkinter import BooleanVar, messagebox, ttk -from typing import TYPE_CHECKING, Any, Literal, Optional - -from core.api.grpc.wrappers import Link, LinkType, Node, Session, ThroughputsEvent -from core.gui import nodeutils as nutils -from core.gui.graph import tags -from core.gui.graph.edges import ( - CanvasEdge, - CanvasWirelessEdge, - create_edge_token, - create_wireless_token, -) -from core.gui.graph.enums import GraphMode -from core.gui.graph.graph import CanvasGraph -from core.gui.graph.node import CanvasNode -from core.gui.graph.shape import Shape -from core.gui.graph.shapeutils import ShapeType -from core.gui.nodeutils import NodeDraw - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.coreclient import CoreClient - - -class ShowVar(BooleanVar): - def __init__(self, manager: "CanvasManager", tag: str, value: bool) -> None: - super().__init__(value=value) - self.manager: "CanvasManager" = manager - self.tag: str = tag - - def state(self) -> Literal["normal", "hidden"]: - return tk.NORMAL if self.get() else tk.HIDDEN - - def click_handler(self) -> None: - for canvas in self.manager.all(): - canvas.itemconfigure(self.tag, state=self.state()) - - -class ShowNodeLabels(ShowVar): - def click_handler(self) -> None: - state = self.state() - for canvas in self.manager.all(): - for node in canvas.nodes.values(): - if not node.hidden: - node.set_label(state) - - -class ShowLinks(ShowVar): - def click_handler(self) -> None: - for edge in self.manager.edges.values(): - if not edge.hidden: - edge.check_visibility() - - -class ShowLinkLabels(ShowVar): - def click_handler(self) -> None: - state = self.state() - for edge in self.manager.edges.values(): - if not edge.hidden: - edge.set_labels(state) - - -class CanvasManager: - def __init__( - self, master: tk.BaseWidget, app: "Application", core: "CoreClient" - ) -> None: - self.master: tk.BaseWidget = master - self.app: "Application" = app - self.core: "CoreClient" = core - - # canvas interactions - self.mode: GraphMode = GraphMode.SELECT - self.annotation_type: Optional[ShapeType] = None - self.node_draw: Optional[NodeDraw] = None - self.canvases: dict[int, CanvasGraph] = {} - - # global edge management - self.edges: dict[str, CanvasEdge] = {} - self.wireless_edges: dict[str, CanvasWirelessEdge] = {} - - # global canvas settings - self.default_dimensions: tuple[int, int] = ( - self.app.guiconfig.preferences.width, - self.app.guiconfig.preferences.height, - ) - self.show_node_labels: ShowVar = ShowNodeLabels( - self, tags.NODE_LABEL, value=True - ) - self.show_link_labels: ShowVar = ShowLinkLabels( - self, tags.LINK_LABEL, value=True - ) - self.show_links: ShowVar = ShowLinks(self, tags.EDGE, value=True) - self.show_wireless: ShowVar = ShowVar(self, tags.WIRELESS_EDGE, value=True) - self.show_grid: ShowVar = ShowVar(self, tags.GRIDLINE, value=True) - self.show_annotations: ShowVar = ShowVar(self, tags.ANNOTATION, value=True) - self.show_loss_links: ShowVar = ShowLinks(self, tags.LOSS_EDGES, value=True) - self.show_iface_names: BooleanVar = BooleanVar(value=False) - self.show_ip4s: BooleanVar = BooleanVar(value=True) - self.show_ip6s: BooleanVar = BooleanVar(value=True) - - # throughput settings - self.throughput_threshold: float = 250.0 - self.throughput_width: int = 10 - self.throughput_color: str = "#FF0000" - - # widget - self.notebook: Optional[ttk.Notebook] = None - self.canvas_ids: dict[str, int] = {} - self.unique_ids: dict[int, str] = {} - self.draw() - - self.setup_bindings() - # start with a single tab by default - self.add_canvas() - - def setup_bindings(self) -> None: - self.notebook.bind("<>", self.tab_change) - - def tab_change(self, _event: tk.Event) -> None: - # ignore tab change events before tab data has been setup - unique_id = self.notebook.select() - if not unique_id or unique_id not in self.canvas_ids: - return - canvas = self.current() - self.app.statusbar.set_zoom(canvas.ratio) - - def select(self, tab_id: int): - unique_id = self.unique_ids.get(tab_id) - self.notebook.select(unique_id) - - def draw(self) -> None: - self.notebook = ttk.Notebook(self.master) - self.notebook.grid(sticky=tk.NSEW, pady=1) - - def _next_id(self) -> int: - _id = 1 - canvas_ids = set(self.canvas_ids.values()) - while _id in canvas_ids: - _id += 1 - return _id - - def current(self) -> CanvasGraph: - unique_id = self.notebook.select() - canvas_id = self.canvas_ids[unique_id] - return self.get(canvas_id) - - def all(self) -> ValuesView[CanvasGraph]: - return self.canvases.values() - - def get(self, canvas_id: int) -> CanvasGraph: - canvas = self.canvases.get(canvas_id) - if not canvas: - canvas = self.add_canvas(canvas_id) - return canvas - - def add_canvas(self, canvas_id: int = None) -> CanvasGraph: - # create tab frame - tab = ttk.Frame(self.notebook, padding=0) - tab.grid(sticky=tk.NSEW) - tab.columnconfigure(0, weight=1) - tab.rowconfigure(0, weight=1) - if canvas_id is None: - canvas_id = self._next_id() - self.notebook.add(tab, text=f"Canvas {canvas_id}") - unique_id = self.notebook.tabs()[-1] - logger.info("creating canvas(%s)", canvas_id) - self.canvas_ids[unique_id] = canvas_id - self.unique_ids[canvas_id] = unique_id - - # create canvas - canvas = CanvasGraph( - tab, self.app, self, self.core, canvas_id, self.default_dimensions - ) - canvas.grid(sticky=tk.NSEW) - self.canvases[canvas_id] = canvas - - # add scrollbars - scroll_y = ttk.Scrollbar(tab, command=canvas.yview) - scroll_y.grid(row=0, column=1, sticky=tk.NS) - scroll_x = ttk.Scrollbar(tab, orient=tk.HORIZONTAL, command=canvas.xview) - scroll_x.grid(row=1, column=0, sticky=tk.EW) - canvas.configure(xscrollcommand=scroll_x.set) - canvas.configure(yscrollcommand=scroll_y.set) - return canvas - - def delete_canvas(self) -> None: - if len(self.notebook.tabs()) == 1: - messagebox.showinfo("Canvas", "Cannot delete last canvas", parent=self.app) - return - unique_id = self.notebook.select() - self.notebook.forget(unique_id) - canvas_id = self.canvas_ids.pop(unique_id) - canvas = self.canvases.pop(canvas_id) - edges = set() - for node in canvas.nodes.values(): - node.delete() - while node.edges: - edge = node.edges.pop() - if edge in edges: - continue - edges.add(edge) - edge.delete() - - def join(self, session: Session) -> None: - # clear out all canvases - for canvas_id in self.notebook.tabs(): - self.notebook.forget(canvas_id) - self.canvases.clear() - self.canvas_ids.clear() - self.unique_ids.clear() - self.edges.clear() - self.wireless_edges.clear() - logger.info("cleared canvases") - - # reset settings - self.show_node_labels.set(True) - self.show_link_labels.set(True) - self.show_grid.set(True) - self.show_annotations.set(True) - self.show_iface_names.set(False) - self.show_ip4s.set(True) - self.show_ip6s.set(True) - self.show_loss_links.set(True) - self.mode = GraphMode.SELECT - self.annotation_type = None - self.node_draw = None - - # draw session - self.draw_session(session) - - def draw_session(self, session: Session) -> None: - # draw canvas configurations and shapes - self.parse_metadata_canvas(session.metadata) - self.parse_metadata_shapes(session.metadata) - - # create session nodes - for core_node in session.nodes.values(): - # add node, avoiding ignored nodes - if nutils.should_ignore(core_node): - continue - self.add_core_node(core_node) - - # organize canvas tabs - canvas_ids = sorted(self.canvases) - for index, canvas_id in enumerate(canvas_ids): - canvas = self.canvases[canvas_id] - self.notebook.insert(index, canvas.master) - - # draw existing links - for link in session.links: - node1 = self.core.get_canvas_node(link.node1_id) - node2 = self.core.get_canvas_node(link.node2_id) - if link.type == LinkType.WIRELESS: - self.add_wireless_edge(node1, node2, link) - else: - self.add_wired_edge(node1, node2, link) - - # organize canvas order - for canvas in self.canvases.values(): - canvas.organize() - - # parse metada for edge configs and hidden nodes - self.parse_metadata_edges(session.metadata) - self.parse_metadata_hidden(session.metadata) - - # create a default canvas if none were created prior - if not self.canvases: - self.add_canvas() - - def redraw_canvas(self, dimensions: tuple[int, int]) -> None: - canvas = self.current() - canvas.redraw_canvas(dimensions) - if canvas.wallpaper: - canvas.redraw_wallpaper() - - def get_metadata(self) -> dict[str, Any]: - canvases = [x.get_metadata() for x in self.all()] - return dict(gridlines=self.show_grid.get(), canvases=canvases) - - def parse_metadata_canvas(self, metadata: dict[str, Any]) -> None: - # canvas setting - canvas_config = metadata.get("canvas") - logger.debug("canvas metadata: %s", canvas_config) - if not canvas_config: - return - canvas_config = json.loads(canvas_config) - # get configured dimensions and gridlines option - gridlines = canvas_config.get("gridlines", True) - self.show_grid.set(gridlines) - - # get background configurations - for canvas_config in canvas_config.get("canvases", []): - canvas_id = canvas_config.get("id") - if canvas_id is None: - logger.error("canvas config id not provided") - continue - canvas = self.get(canvas_id) - canvas.parse_metadata(canvas_config) - - def parse_metadata_shapes(self, metadata: dict[str, Any]) -> None: - # load saved shapes - shapes_config = metadata.get("shapes") - if not shapes_config: - return - shapes_config = json.loads(shapes_config) - for shape_config in shapes_config: - logger.debug("loading shape: %s", shape_config) - Shape.from_metadata(self.app, shape_config) - - def parse_metadata_edges(self, metadata: dict[str, Any]) -> None: - # load edges config - edges_config = metadata.get("edges") - if not edges_config: - return - edges_config = json.loads(edges_config) - logger.info("edges config: %s", edges_config) - for edge_config in edges_config: - edge_token = edge_config["token"] - edge = self.core.links.get(edge_token) - if edge: - edge.width = edge_config["width"] - edge.color = edge_config["color"] - edge.redraw() - else: - logger.warning("invalid edge token to configure: %s", edge_token) - - def parse_metadata_hidden(self, metadata: dict[str, Any]) -> None: - # read hidden nodes - hidden_config = metadata.get("hidden") - if not hidden_config: - return - hidden_config = json.loads(hidden_config) - for node_id in hidden_config: - canvas_node = self.core.canvas_nodes.get(node_id) - if canvas_node: - canvas_node.hide() - else: - logger.warning("invalid node to hide: %s", node_id) - - def add_core_node(self, core_node: Node) -> None: - # get canvas tab for node - canvas_id = core_node.canvas if core_node.canvas > 0 else 1 - logger.info("adding core node canvas(%s): %s", core_node.name, canvas_id) - canvas = self.get(canvas_id) - image = nutils.get_icon(core_node, self.app) - x = core_node.position.x - y = core_node.position.y - node = CanvasNode(self.app, canvas, x, y, core_node, image) - canvas.nodes[node.id] = node - self.core.set_canvas_node(core_node, node) - - def set_throughputs(self, throughputs_event: ThroughputsEvent): - for iface_throughput in throughputs_event.iface_throughputs: - node_id = iface_throughput.node_id - iface_id = iface_throughput.iface_id - throughput = iface_throughput.throughput - iface_to_edge_id = (node_id, iface_id) - edge = self.core.iface_to_edge.get(iface_to_edge_id) - if edge: - edge.set_throughput(throughput) - - def clear_throughputs(self) -> None: - for edge in self.edges.values(): - edge.clear_throughput() - - def stopped_session(self) -> None: - # clear wireless edges - for edge in self.wireless_edges.values(): - edge.delete() - self.wireless_edges.clear() - self.clear_throughputs() - - def update_wired_edge(self, link: Link) -> None: - token = create_edge_token(link) - edge = self.edges.get(token) - if edge: - edge.link.options = deepcopy(link.options) - edge.draw_link_options() - edge.check_visibility() - - def delete_wired_edge(self, link: Link) -> None: - token = create_edge_token(link) - edge = self.edges.get(token) - if edge: - edge.delete() - - def add_wired_edge(self, src: CanvasNode, dst: CanvasNode, link: Link) -> None: - token = create_edge_token(link) - if token in self.edges and link.options.unidirectional: - edge = self.edges[token] - edge.asymmetric_link = link - edge.redraw() - elif token not in self.edges: - edge = CanvasEdge(self.app, src, dst) - edge.complete(dst, link) - - def add_wireless_edge(self, src: CanvasNode, dst: CanvasNode, link: Link) -> None: - network_id = link.network_id if link.network_id else None - token = create_wireless_token(src.id, dst.id, network_id) - if token in self.wireless_edges: - logger.warning("ignoring link that already exists: %s", link) - return - edge = CanvasWirelessEdge(self.app, src, dst, network_id, token, link) - self.wireless_edges[token] = edge - - def delete_wireless_edge( - self, src: CanvasNode, dst: CanvasNode, link: Link - ) -> None: - network_id = link.network_id if link.network_id else None - token = create_wireless_token(src.id, dst.id, network_id) - if token not in self.wireless_edges: - return - edge = self.wireless_edges.pop(token) - edge.delete() - - def update_wireless_edge( - self, src: CanvasNode, dst: CanvasNode, link: Link - ) -> None: - if not link.label: - return - network_id = link.network_id if link.network_id else None - token = create_wireless_token(src.id, dst.id, network_id) - if token not in self.wireless_edges: - self.add_wireless_edge(src, dst, link) - else: - edge = self.wireless_edges[token] - edge.middle_label_text(link.label) diff --git a/daemon/core/gui/graph/node.py b/daemon/core/gui/graph/node.py deleted file mode 100644 index 0cfbf2e9..00000000 --- a/daemon/core/gui/graph/node.py +++ /dev/null @@ -1,588 +0,0 @@ -import functools -import logging -import tkinter as tk -from pathlib import Path -from typing import TYPE_CHECKING, Optional - -import grpc -from PIL.ImageTk import PhotoImage - -from core.api.grpc.wrappers import Interface, Node, NodeType, ServiceAction -from core.gui import images -from core.gui import nodeutils as nutils -from core.gui import themes -from core.gui.dialogs.emaneconfig import EmaneConfigDialog -from core.gui.dialogs.mobilityconfig import MobilityConfigDialog -from core.gui.dialogs.nodeconfig import NodeConfigDialog -from core.gui.dialogs.nodeconfigservice import NodeConfigServiceDialog -from core.gui.dialogs.nodeservice import NodeServiceDialog -from core.gui.dialogs.wirelessconfig import WirelessConfigDialog -from core.gui.dialogs.wlanconfig import WlanConfigDialog -from core.gui.frames.node import NodeInfoFrame -from core.gui.graph import tags -from core.gui.graph.edges import CanvasEdge, CanvasWirelessEdge -from core.gui.graph.tooltip import CanvasTooltip -from core.gui.images import ImageEnum - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.graph import CanvasGraph - -NODE_TEXT_OFFSET: int = 5 - - -class CanvasNode: - def __init__( - self, - app: "Application", - canvas: "CanvasGraph", - x: float, - y: float, - core_node: Node, - image: PhotoImage, - ): - self.app: "Application" = app - self.canvas: "CanvasGraph" = canvas - self.image: PhotoImage = image - self.core_node: Node = core_node - self.id: int = self.canvas.create_image( - x, y, anchor=tk.CENTER, image=self.image, tags=tags.NODE - ) - label_y = self._get_label_y() - label = self.get_label() - self.text_id: int = self.canvas.create_text( - x, - label_y, - text=label, - tags=tags.NODE_LABEL, - font=self.app.icon_text_font, - fill="#0000CD", - state=self.app.manager.show_node_labels.state(), - ) - self.tooltip: CanvasTooltip = CanvasTooltip(self.canvas) - self.edges: set[CanvasEdge] = set() - self.ifaces: dict[int, Interface] = {} - self.wireless_edges: set[CanvasWirelessEdge] = set() - self.antennas: list[int] = [] - self.antenna_images: dict[int, PhotoImage] = {} - self.hidden: bool = False - self.setup_bindings() - self.context: tk.Menu = tk.Menu(self.canvas) - themes.style_menu(self.context) - - def position(self) -> tuple[int, int]: - return self.canvas.coords(self.id) - - def next_iface_id(self) -> int: - i = 0 - while i in self.ifaces: - i += 1 - return i - - def setup_bindings(self) -> None: - self.canvas.tag_bind(self.id, "", self.double_click) - self.canvas.tag_bind(self.id, "", self.on_enter) - self.canvas.tag_bind(self.id, "", self.on_leave) - self.canvas.tag_bind(self.id, "", self.show_context) - self.canvas.tag_bind(self.id, "", self.show_info) - - def delete(self) -> None: - logger.debug("Delete canvas node for %s", self.core_node) - self.canvas.delete(self.id) - self.canvas.delete(self.text_id) - self.delete_antennas() - - def add_antenna(self) -> None: - x, y = self.position() - offset = len(self.antennas) * 8 * self.app.app_scale - img = self.app.get_enum_icon(ImageEnum.ANTENNA, width=images.ANTENNA_SIZE) - antenna_id = self.canvas.create_image( - x - 16 + offset, - y - int(23 * self.app.app_scale), - anchor=tk.CENTER, - image=img, - tags=tags.ANTENNA, - ) - self.antennas.append(antenna_id) - self.antenna_images[antenna_id] = img - - def delete_antenna(self) -> None: - """ - delete one antenna - """ - logger.debug("Delete an antenna on %s", self.core_node.name) - if self.antennas: - antenna_id = self.antennas.pop() - self.canvas.delete(antenna_id) - self.antenna_images.pop(antenna_id, None) - - def delete_antennas(self) -> None: - """ - delete all antennas - """ - logger.debug("Remove all antennas for %s", self.core_node.name) - for antenna_id in self.antennas: - self.canvas.delete(antenna_id) - self.antennas.clear() - self.antenna_images.clear() - - def get_label(self) -> str: - label = self.core_node.name - if self.core_node.server: - label = f"{self.core_node.name}({self.core_node.server})" - return label - - def redraw(self) -> None: - self.canvas.itemconfig(self.id, image=self.image) - label = self.get_label() - self.canvas.itemconfig(self.text_id, text=label) - for edge in self.edges: - edge.redraw() - - def _get_label_y(self) -> int: - image_box = self.canvas.bbox(self.id) - return image_box[3] + NODE_TEXT_OFFSET - - def scale_text(self) -> None: - text_bound = self.canvas.bbox(self.text_id) - prev_y = (text_bound[3] + text_bound[1]) / 2 - new_y = self._get_label_y() - self.canvas.move(self.text_id, 0, new_y - prev_y) - - def move(self, x: float, y: float) -> None: - x, y = self.canvas.get_scaled_coords(x, y) - current_x, current_y = self.position() - x_offset = x - current_x - y_offset = y - current_y - self.motion(x_offset, y_offset, update=False) - - def motion(self, x_offset: float, y_offset: float, update: bool = True) -> None: - original_position = self.position() - self.canvas.move(self.id, x_offset, y_offset) - - # check new position - bbox = self.canvas.bbox(self.id) - if not self.canvas.valid_position(*bbox): - self.canvas.coords(self.id, original_position) - return - - # move test and selection box - self.canvas.move(self.text_id, x_offset, y_offset) - self.canvas.move_selection(self.id, x_offset, y_offset) - - # move antennae - for antenna_id in self.antennas: - self.canvas.move(antenna_id, x_offset, y_offset) - - # move edges - for edge in self.edges: - edge.move_node(self) - for edge in self.wireless_edges: - edge.move_node(self) - - # set actual coords for node and update core is running - pos = self.position() - real_x, real_y = self.canvas.get_actual_coords(*pos) - self.core_node.position.x = real_x - self.core_node.position.y = real_y - if self.app.core.is_runtime() and update: - self.app.core.edit_node(self.core_node) - - def on_enter(self, event: tk.Event) -> None: - is_runtime = self.app.core.is_runtime() - has_observer = self.app.core.observer is not None - is_container = nutils.is_container(self.core_node) - if is_runtime and has_observer and is_container: - self.tooltip.text.set("waiting...") - self.tooltip.on_enter(event) - try: - output = self.app.core.run(self.core_node.id) - self.tooltip.text.set(output) - except grpc.RpcError as e: - self.app.show_grpc_exception("Observer Error", e) - - def on_leave(self, event: tk.Event) -> None: - self.tooltip.on_leave(event) - - def double_click(self, event: tk.Event) -> None: - if self.app.core.is_runtime(): - if nutils.is_container(self.core_node): - self.canvas.core.launch_terminal(self.core_node.id) - else: - self.show_config() - - def show_info(self, _event: tk.Event) -> None: - self.app.display_info(NodeInfoFrame, app=self.app, canvas_node=self) - - def show_context(self, event: tk.Event) -> None: - # clear existing menu - self.context.delete(0, tk.END) - is_wlan = self.core_node.type == NodeType.WIRELESS_LAN - is_wireless = self.core_node.type == NodeType.WIRELESS - is_emane = self.core_node.type == NodeType.EMANE - is_mobility = is_wlan or is_emane - if self.app.core.is_runtime(): - self.context.add_command(label="Configure", command=self.show_config) - if is_emane: - self.context.add_command( - label="EMANE Config", command=self.show_emane_config - ) - if is_wlan: - self.context.add_command( - label="WLAN Config", command=self.show_wlan_config - ) - if is_wireless: - self.context.add_command( - label="Wireless Config", command=self.show_wireless_config - ) - if is_mobility and self.core_node.id in self.app.core.mobility_players: - self.context.add_command( - label="Mobility Player", command=self.show_mobility_player - ) - if nutils.is_container(self.core_node): - services_menu = tk.Menu(self.context) - for service in sorted(self.core_node.config_services): - service_menu = tk.Menu(services_menu) - themes.style_menu(service_menu) - start_func = functools.partial(self.start_service, service) - service_menu.add_command(label="Start", command=start_func) - stop_func = functools.partial(self.stop_service, service) - service_menu.add_command(label="Stop", command=stop_func) - restart_func = functools.partial(self.restart_service, service) - service_menu.add_command(label="Restart", command=restart_func) - validate_func = functools.partial(self.validate_service, service) - service_menu.add_command(label="Validate", command=validate_func) - services_menu.add_cascade(label=service, menu=service_menu) - themes.style_menu(services_menu) - self.context.add_cascade(label="Services", menu=services_menu) - else: - self.context.add_command(label="Configure", command=self.show_config) - if nutils.is_container(self.core_node): - self.context.add_command( - label="Config Services", command=self.show_config_services - ) - self.context.add_command( - label="Services (Deprecated)", command=self.show_services - ) - if is_emane: - self.context.add_command( - label="EMANE Config", command=self.show_emane_config - ) - if is_wlan: - self.context.add_command( - label="WLAN Config", command=self.show_wlan_config - ) - if is_wireless: - self.context.add_command( - label="Wireless Config", command=self.show_wireless_config - ) - if is_mobility: - self.context.add_command( - label="Mobility Config", command=self.show_mobility_config - ) - if nutils.is_wireless(self.core_node): - self.context.add_command( - label="Link To Selected", command=self.wireless_link_selected - ) - - link_menu = tk.Menu(self.context) - for canvas in self.app.manager.all(): - canvas_menu = tk.Menu(link_menu) - themes.style_menu(canvas_menu) - for node in canvas.nodes.values(): - if not self.is_linkable(node): - continue - func_link = functools.partial(self.click_link, node) - canvas_menu.add_command( - label=node.core_node.name, command=func_link - ) - link_menu.add_cascade(label=f"Canvas {canvas.id}", menu=canvas_menu) - themes.style_menu(link_menu) - self.context.add_cascade(label="Link", menu=link_menu) - - unlink_menu = tk.Menu(self.context) - for edge in self.edges: - other_node = edge.other_node(self) - other_iface = edge.other_iface(self) - label = other_node.core_node.name - if other_iface: - iface_label = other_iface.id - if other_iface.name: - iface_label = other_iface.name - label = f"{label}:{iface_label}" - func_unlink = functools.partial(self.click_unlink, edge) - unlink_menu.add_command(label=label, command=func_unlink) - themes.style_menu(unlink_menu) - self.context.add_cascade(label="Unlink", menu=unlink_menu) - - edit_menu = tk.Menu(self.context) - themes.style_menu(edit_menu) - edit_menu.add_command(label="Cut", command=self.click_cut) - edit_menu.add_command(label="Copy", command=self.canvas_copy) - edit_menu.add_command(label="Delete", command=self.canvas_delete) - edit_menu.add_command(label="Hide", command=self.click_hide) - self.context.add_cascade(label="Edit", menu=edit_menu) - self.context.tk_popup(event.x_root, event.y_root) - - def click_cut(self) -> None: - self.canvas_copy() - self.canvas_delete() - - def click_hide(self) -> None: - self.canvas.clear_selection() - self.hide() - - def click_unlink(self, edge: CanvasEdge) -> None: - edge.delete() - self.app.default_info() - - def click_link(self, node: "CanvasNode") -> None: - edge = CanvasEdge(self.app, self, node) - edge.complete(node) - - def canvas_delete(self) -> None: - self.canvas.clear_selection() - self.canvas.select_object(self.id) - self.canvas.delete_selected_objects() - - def canvas_copy(self) -> None: - self.canvas.clear_selection() - self.canvas.select_object(self.id) - self.canvas.copy_selected() - - def show_config(self) -> None: - dialog = NodeConfigDialog(self.app, self) - dialog.show() - - def show_wireless_config(self) -> None: - dialog = WirelessConfigDialog(self.app, self) - dialog.show() - - def show_wlan_config(self) -> None: - dialog = WlanConfigDialog(self.app, self) - if not dialog.has_error: - dialog.show() - - def show_mobility_config(self) -> None: - dialog = MobilityConfigDialog(self.app, self.core_node) - if not dialog.has_error: - dialog.show() - - def show_mobility_player(self) -> None: - mobility_player = self.app.core.mobility_players[self.core_node.id] - mobility_player.show() - - def show_emane_config(self) -> None: - dialog = EmaneConfigDialog(self.app, self.core_node) - dialog.show() - - def show_services(self) -> None: - dialog = NodeServiceDialog(self.app, self.core_node) - dialog.show() - - def show_config_services(self) -> None: - dialog = NodeConfigServiceDialog(self.app, self.core_node) - dialog.show() - - def has_emane_link(self, iface_id: int) -> Node: - result = None - for edge in self.edges: - other_node = edge.other_node(self) - iface = edge.iface(self) - edge_iface_id = iface.id if iface else None - if edge_iface_id != iface_id: - continue - if other_node.core_node.type == NodeType.EMANE: - result = other_node.core_node - break - return result - - def wireless_link_selected(self) -> None: - nodes = [x for x in self.canvas.selection if x in self.canvas.nodes] - for node_id in nodes: - canvas_node = self.canvas.nodes[node_id] - self.canvas.create_edge(self, canvas_node) - self.canvas.clear_selection() - - def scale_antennas(self) -> None: - for i in range(len(self.antennas)): - antenna_id = self.antennas[i] - image = self.app.get_enum_icon(ImageEnum.ANTENNA, width=images.ANTENNA_SIZE) - self.canvas.itemconfig(antenna_id, image=image) - self.antenna_images[antenna_id] = image - node_x, node_y = self.canvas.coords(self.id) - x, y = self.canvas.coords(antenna_id) - dx = node_x - 16 + (i * 8 * self.app.app_scale) - x - dy = node_y - int(23 * self.app.app_scale) - y - self.canvas.move(antenna_id, dx, dy) - - def update_icon(self, icon_path: str) -> None: - if not Path(icon_path).exists(): - logger.error(f"node icon does not exist: {icon_path}") - return - self.core_node.icon = icon_path - self.image = images.from_file(icon_path, width=images.NODE_SIZE) - self.canvas.itemconfig(self.id, image=self.image) - - def is_linkable(self, node: "CanvasNode") -> bool: - # cannot link to self - if self == node: - return False - # rj45 nodes can only support one link - if nutils.is_rj45(self.core_node) and self.edges: - return False - if nutils.is_rj45(node.core_node) and node.edges: - return False - # only 1 link between bridge based nodes - is_src_bridge = nutils.is_bridge(self.core_node) - is_dst_bridge = nutils.is_bridge(node.core_node) - common_links = self.edges & node.edges - if all([is_src_bridge, is_dst_bridge, common_links]): - return False - # valid link - return True - - def hide(self) -> None: - self.hidden = True - self.canvas.itemconfig(self.id, state=tk.HIDDEN) - self.canvas.itemconfig(self.text_id, state=tk.HIDDEN) - for antenna in self.antennas: - self.canvas.itemconfig(antenna, state=tk.HIDDEN) - for edge in self.edges: - if not edge.hidden: - edge.hide() - for edge in self.wireless_edges: - if not edge.hidden: - edge.hide() - - def show(self) -> None: - self.hidden = False - self.canvas.itemconfig(self.id, state=tk.NORMAL) - state = self.app.manager.show_node_labels.state() - self.set_label(state) - for antenna in self.antennas: - self.canvas.itemconfig(antenna, state=tk.NORMAL) - for edge in self.edges: - other_node = edge.other_node(self) - if edge.hidden and not other_node.hidden: - edge.show() - for edge in self.wireless_edges: - other_node = edge.other_node(self) - if edge.hidden and not other_node.hidden: - edge.show() - - def set_label(self, state: str) -> None: - self.canvas.itemconfig(self.text_id, state=state) - - def _service_action(self, service: str, action: ServiceAction) -> None: - session_id = self.app.core.session.id - try: - result = self.app.core.client.config_service_action( - session_id, self.core_node.id, service, action - ) - if not result: - self.app.show_error("Service Action Error", "Action Failed!") - except grpc.RpcError as e: - self.app.show_grpc_exception("Service Error", e) - - def start_service(self, service: str) -> None: - self._service_action(service, ServiceAction.START) - - def stop_service(self, service: str) -> None: - self._service_action(service, ServiceAction.STOP) - - def restart_service(self, service: str) -> None: - self._service_action(service, ServiceAction.RESTART) - - def validate_service(self, service: str) -> None: - self._service_action(service, ServiceAction.VALIDATE) - - def is_wireless(self) -> bool: - return nutils.is_wireless(self.core_node) - - -class ShadowNode: - def __init__( - self, app: "Application", canvas: "CanvasGraph", node: "CanvasNode" - ) -> None: - self.app: "Application" = app - self.canvas: "CanvasGraph" = canvas - self.node: "CanvasNode" = node - self.id: Optional[int] = None - self.text_id: Optional[int] = None - self.image: PhotoImage = self.app.get_enum_icon( - ImageEnum.SHADOW, width=images.NODE_SIZE - ) - self.draw() - self.setup_bindings() - - def setup_bindings(self) -> None: - self.canvas.tag_bind(self.id, "", self.node.double_click) - self.canvas.tag_bind(self.id, "", self.node.on_enter) - self.canvas.tag_bind(self.id, "", self.node.on_leave) - self.canvas.tag_bind(self.id, "", self.node.show_context) - self.canvas.tag_bind(self.id, "", self.node.show_info) - - def draw(self) -> None: - x, y = self.node.position() - self.id: int = self.canvas.create_image( - x, y, anchor=tk.CENTER, image=self.image, tags=tags.NODE - ) - self.text_id = self.canvas.create_text( - x, - y + 20, - text=f"{self.node.get_label()} [{self.node.canvas.id}]", - tags=tags.NODE_LABEL, - font=self.app.icon_text_font, - fill="#0000CD", - state=self.app.manager.show_node_labels.state(), - justify=tk.CENTER, - ) - self.canvas.shadow_nodes[self.id] = self - self.canvas.shadow_core_nodes[self.node.core_node.id] = self - - def position(self) -> tuple[int, int]: - return self.canvas.coords(self.id) - - def should_delete(self) -> bool: - for edge in self.node.edges: - other_node = edge.other_node(self.node) - if not other_node.is_wireless() and other_node.canvas == self.canvas: - return False - return True - - def motion(self, x_offset, y_offset) -> None: - original_position = self.position() - self.canvas.move(self.id, x_offset, y_offset) - - # check new position - bbox = self.canvas.bbox(self.id) - if not self.canvas.valid_position(*bbox): - self.canvas.coords(self.id, original_position) - return - - # move text and selection box - self.canvas.move(self.text_id, x_offset, y_offset) - self.canvas.move_selection(self.id, x_offset, y_offset) - - # move edges - for edge in self.node.edges: - edge.move_shadow(self) - for edge in self.node.wireless_edges: - edge.move_shadow(self) - - def delete(self): - self.canvas.shadow_nodes.pop(self.id, None) - self.canvas.shadow_core_nodes.pop(self.node.core_node.id, None) - self.canvas.delete(self.id) - self.canvas.delete(self.text_id) - - def hide(self) -> None: - self.canvas.itemconfig(self.id, state=tk.HIDDEN) - self.canvas.itemconfig(self.text_id, state=tk.HIDDEN) - - def show(self) -> None: - self.canvas.itemconfig(self.id, state=tk.NORMAL) - self.canvas.itemconfig(self.text_id, state=tk.NORMAL) diff --git a/daemon/core/gui/graph/shape.py b/daemon/core/gui/graph/shape.py deleted file mode 100644 index 5f243fdf..00000000 --- a/daemon/core/gui/graph/shape.py +++ /dev/null @@ -1,227 +0,0 @@ -import logging -from typing import TYPE_CHECKING, Any, Optional, Union - -from core.gui.dialogs.shapemod import ShapeDialog -from core.gui.graph import tags -from core.gui.graph.shapeutils import ShapeType - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - from core.gui.graph.graph import CanvasGraph - - -class AnnotationData: - def __init__( - self, - text: str = "", - font: str = "Arial", - font_size: int = 12, - text_color: str = "#000000", - fill_color: str = "", - border_color: str = "#000000", - border_width: int = 1, - bold: bool = False, - italic: bool = False, - underline: bool = False, - ) -> None: - self.text: str = text - self.font: str = font - self.font_size: int = font_size - self.text_color: str = text_color - self.fill_color: str = fill_color - self.border_color: str = border_color - self.border_width: int = border_width - self.bold: bool = bold - self.italic: bool = italic - self.underline: bool = underline - - -class Shape: - def __init__( - self, - app: "Application", - canvas: "CanvasGraph", - shape_type: ShapeType, - x1: float, - y1: float, - x2: float = None, - y2: float = None, - data: AnnotationData = None, - ) -> None: - self.app: "Application" = app - self.canvas: "CanvasGraph" = canvas - self.shape_type: ShapeType = shape_type - self.id: Optional[int] = None - self.text_id: Optional[int] = None - self.x1: float = x1 - self.y1: float = y1 - if x2 is None: - x2 = x1 - self.x2: float = x2 - if y2 is None: - y2 = y1 - self.y2: float = y2 - if data is None: - self.created: bool = False - self.shape_data: AnnotationData = AnnotationData() - else: - self.created: bool = True - self.shape_data = data - self.draw() - - @classmethod - def from_metadata(cls, app: "Application", config: dict[str, Any]) -> None: - shape_type = config["type"] - try: - shape_type = ShapeType(shape_type) - coords = config["iconcoords"] - data = AnnotationData( - config["label"], - config["fontfamily"], - config["fontsize"], - config["labelcolor"], - config["color"], - config["border"], - config["width"], - config["bold"], - config["italic"], - config["underline"], - ) - canvas_id = config.get("canvas", 1) - canvas = app.manager.get(canvas_id) - shape = Shape(app, canvas, shape_type, *coords, data=data) - canvas.shapes[shape.id] = shape - except ValueError: - logger.exception("unknown shape: %s", shape_type) - - def draw(self) -> None: - if self.created: - dash = None - else: - dash = "-" - if self.shape_type == ShapeType.OVAL: - self.id = self.canvas.create_oval( - self.x1, - self.y1, - self.x2, - self.y2, - tags=(tags.SHAPE, tags.ANNOTATION), - dash=dash, - fill=self.shape_data.fill_color, - outline=self.shape_data.border_color, - width=self.shape_data.border_width, - state=self.app.manager.show_annotations.state(), - ) - self.draw_shape_text() - elif self.shape_type == ShapeType.RECTANGLE: - self.id = self.canvas.create_rectangle( - self.x1, - self.y1, - self.x2, - self.y2, - tags=(tags.SHAPE, tags.ANNOTATION), - dash=dash, - fill=self.shape_data.fill_color, - outline=self.shape_data.border_color, - width=self.shape_data.border_width, - state=self.app.manager.show_annotations.state(), - ) - self.draw_shape_text() - elif self.shape_type == ShapeType.TEXT: - font = self.get_font() - self.id = self.canvas.create_text( - self.x1, - self.y1, - tags=(tags.SHAPE_TEXT, tags.ANNOTATION), - text=self.shape_data.text, - fill=self.shape_data.text_color, - font=font, - state=self.app.manager.show_annotations.state(), - ) - else: - logger.error("unknown shape type: %s", self.shape_type) - self.created = True - - def get_font(self) -> list[Union[int, str]]: - font = [self.shape_data.font, self.shape_data.font_size] - if self.shape_data.bold: - font.append("bold") - if self.shape_data.italic: - font.append("italic") - if self.shape_data.underline: - font.append("underline") - return font - - def draw_shape_text(self) -> None: - if self.shape_data.text: - x = (self.x1 + self.x2) / 2 - y = self.y1 + 1.5 * self.shape_data.font_size - font = self.get_font() - self.text_id = self.canvas.create_text( - x, - y, - tags=(tags.SHAPE_TEXT, tags.ANNOTATION), - text=self.shape_data.text, - fill=self.shape_data.text_color, - font=font, - state=self.app.manager.show_annotations.state(), - ) - - def shape_motion(self, x1: float, y1: float) -> None: - self.canvas.coords(self.id, self.x1, self.y1, x1, y1) - - def shape_complete(self, x: float, y: float) -> None: - self.canvas.organize() - s = ShapeDialog(self.app, self) - s.show() - - def disappear(self) -> None: - self.canvas.delete(self.id) - - def motion(self, x_offset: float, y_offset: float) -> None: - original_position = self.canvas.coords(self.id) - self.canvas.move(self.id, x_offset, y_offset) - coords = self.canvas.coords(self.id) - if self.shape_type == ShapeType.TEXT: - coords = coords * 2 - if not self.canvas.valid_position(*coords): - self.canvas.coords(self.id, original_position) - return - self.canvas.move_selection(self.id, x_offset, y_offset) - if self.text_id is not None: - self.canvas.move(self.text_id, x_offset, y_offset) - - def delete(self) -> None: - logger.debug("Delete shape, id(%s)", self.id) - self.canvas.delete(self.id) - self.canvas.delete(self.text_id) - - def metadata(self) -> dict[str, Union[str, int, bool]]: - coords = self.canvas.coords(self.id) - # update coords to actual positions - if len(coords) == 4: - x1, y1, x2, y2 = coords - x1, y1 = self.canvas.get_actual_coords(x1, y1) - x2, y2 = self.canvas.get_actual_coords(x2, y2) - coords = (x1, y1, x2, y2) - else: - x1, y1 = coords - x1, y1 = self.canvas.get_actual_coords(x1, y1) - coords = (x1, y1) - return { - "canvas": self.canvas.id, - "type": self.shape_type.value, - "iconcoords": coords, - "label": self.shape_data.text, - "fontfamily": self.shape_data.font, - "fontsize": self.shape_data.font_size, - "labelcolor": self.shape_data.text_color, - "color": self.shape_data.fill_color, - "border": self.shape_data.border_color, - "width": self.shape_data.border_width, - "bold": self.shape_data.bold, - "italic": self.shape_data.italic, - "underline": self.shape_data.underline, - } diff --git a/daemon/core/gui/graph/shapeutils.py b/daemon/core/gui/graph/shapeutils.py deleted file mode 100644 index ab82ef76..00000000 --- a/daemon/core/gui/graph/shapeutils.py +++ /dev/null @@ -1,23 +0,0 @@ -import enum - - -class ShapeType(enum.Enum): - MARKER = "marker" - OVAL = "oval" - RECTANGLE = "rectangle" - TEXT = "text" - - -SHAPES: set[ShapeType] = {ShapeType.OVAL, ShapeType.RECTANGLE} - - -def is_draw_shape(shape_type: ShapeType) -> bool: - return shape_type in SHAPES - - -def is_shape_text(shape_type: ShapeType) -> bool: - return shape_type == ShapeType.TEXT - - -def is_marker(shape_type: ShapeType) -> bool: - return shape_type == ShapeType.MARKER diff --git a/daemon/core/gui/graph/tags.py b/daemon/core/gui/graph/tags.py deleted file mode 100644 index cb1ffc15..00000000 --- a/daemon/core/gui/graph/tags.py +++ /dev/null @@ -1,42 +0,0 @@ -ANNOTATION: str = "annotation" -GRIDLINE: str = "gridline" -SHAPE: str = "shape" -SHAPE_TEXT: str = "shapetext" -EDGE: str = "edge" -LOSS_EDGES: str = "loss-edge" -LINK_LABEL: str = "linklabel" -WIRELESS_EDGE: str = "wireless" -ANTENNA: str = "antenna" -NODE_LABEL: str = "nodename" -NODE: str = "node" -WALLPAPER: str = "wallpaper" -SELECTION: str = "selectednodes" -MARKER: str = "marker" -HIDDEN: str = "hidden" -ORGANIZE_TAGS: list[str] = [ - WALLPAPER, - GRIDLINE, - SHAPE, - SHAPE_TEXT, - EDGE, - WIRELESS_EDGE, - LINK_LABEL, - ANTENNA, - NODE, - NODE_LABEL, - SELECTION, - MARKER, -] -RESET_TAGS: list[str] = [ - EDGE, - NODE, - NODE_LABEL, - WALLPAPER, - LINK_LABEL, - ANTENNA, - WIRELESS_EDGE, - SELECTION, - SHAPE, - SHAPE_TEXT, - MARKER, -] diff --git a/daemon/core/gui/graph/tooltip.py b/daemon/core/gui/graph/tooltip.py deleted file mode 100644 index b820abec..00000000 --- a/daemon/core/gui/graph/tooltip.py +++ /dev/null @@ -1,120 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.gui.themes import Styles - -if TYPE_CHECKING: - from core.gui.graph.graph import CanvasGraph - - -class CanvasTooltip: - """ - It creates a tooltip for a given canvas tag or id as the mouse is - above it. - - This class has been derived from the original Tooltip class updated - and posted back to StackOverflow at the following link: - - https://stackoverflow.com/questions/3221956/ - what-is-the-simplest-way-to-make-tooltips-in-tkinter/ - 41079350#41079350 - - Alberto Vassena on 2016.12.10. - """ - - def __init__( - self, - canvas: "CanvasGraph", - *, - pad: tuple[int, int, int, int] = (5, 3, 5, 3), - waittime: int = 400, - wraplength: int = 600, - ) -> None: - # in miliseconds, originally 500 - self.waittime: int = waittime - # in pixels, originally 180 - self.wraplength: int = wraplength - self.canvas: "CanvasGraph" = canvas - self.text: tk.StringVar = tk.StringVar() - self.pad: tuple[int, int, int, int] = pad - self.id: Optional[str] = None - self.tw: Optional[tk.Toplevel] = None - - def on_enter(self, event: tk.Event = None) -> None: - self.schedule() - - def on_leave(self, event: tk.Event = None) -> None: - self.unschedule() - self.hide() - - def schedule(self) -> None: - self.unschedule() - self.id = self.canvas.after(self.waittime, self.show) - - def unschedule(self) -> None: - id_ = self.id - self.id = None - if id_: - self.canvas.after_cancel(id_) - - def show(self, event: tk.Event = None) -> None: - def tip_pos_calculator( - canvas: "CanvasGraph", - label: ttk.Label, - *, - tip_delta: tuple[int, int] = (10, 5), - pad: tuple[int, int, int, int] = (5, 3, 5, 3), - ): - c = canvas - s_width, s_height = c.winfo_screenwidth(), c.winfo_screenheight() - width, height = ( - pad[0] + label.winfo_reqwidth() + pad[2], - pad[1] + label.winfo_reqheight() + pad[3], - ) - mouse_x, mouse_y = c.winfo_pointerxy() - x1, y1 = mouse_x + tip_delta[0], mouse_y + tip_delta[1] - x2, y2 = x1 + width, y1 + height - - x_delta = x2 - s_width - if x_delta < 0: - x_delta = 0 - y_delta = y2 - s_height - if y_delta < 0: - y_delta = 0 - - offscreen = (x_delta, y_delta) != (0, 0) - if offscreen: - if x_delta: - x1 = mouse_x - tip_delta[0] - width - if y_delta: - y1 = mouse_y - tip_delta[1] - height - offscreen_again = y1 < 0 # out on the top - if offscreen_again: - y1 = 0 - return x1, y1 - - pad = self.pad - canvas = self.canvas - - # creates a toplevel window - self.tw = tk.Toplevel(canvas.master) - - # Leaves only the label and removes the app window - self.tw.wm_overrideredirect(True) - win = ttk.Frame(self.tw, style=Styles.tooltip_frame, padding=3) - win.grid() - label = ttk.Label( - win, - textvariable=self.text, - wraplength=self.wraplength, - style=Styles.tooltip, - ) - label.grid(padx=(pad[0], pad[2]), pady=(pad[1], pad[3]), sticky=tk.NSEW) - x, y = tip_pos_calculator(canvas, label, pad=pad) - self.tw.wm_geometry(f"+{x:d}+{y:d}") - - def hide(self) -> None: - if self.tw: - self.tw.destroy() - self.tw = None diff --git a/daemon/core/gui/images.py b/daemon/core/gui/images.py deleted file mode 100644 index 070137fb..00000000 --- a/daemon/core/gui/images.py +++ /dev/null @@ -1,115 +0,0 @@ -from enum import Enum -from typing import Optional - -from PIL import Image -from PIL.ImageTk import PhotoImage - -from core.api.grpc.wrappers import Node, NodeType -from core.gui.appconfig import LOCAL_ICONS_PATH - -NODE_SIZE: int = 48 -ANTENNA_SIZE: int = 32 -BUTTON_SIZE: int = 16 -ERROR_SIZE: int = 24 -DIALOG_SIZE: int = 16 -IMAGES: dict[str, str] = {} - - -def load_all() -> None: - for image in LOCAL_ICONS_PATH.glob("*"): - try: - ImageEnum(image.stem) - IMAGES[image.stem] = str(image) - except ValueError: - pass - - -def from_file( - file_path: str, *, width: int, height: int = None, scale: float = 1.0 -) -> PhotoImage: - if height is None: - height = width - width = int(width * scale) - height = int(height * scale) - image = Image.open(file_path) - image = image.resize((width, height), Image.ANTIALIAS) - return PhotoImage(image) - - -def from_enum( - image_enum: "ImageEnum", *, width: int, height: int = None, scale: float = 1.0 -) -> PhotoImage: - file_path = IMAGES[image_enum.value] - return from_file(file_path, width=width, height=height, scale=scale) - - -class ImageEnum(Enum): - SWITCH = "lanswitch" - CORE = "core-icon" - START = "start" - MARKER = "marker" - ROUTER = "router" - SELECT = "select" - LINK = "link" - HUB = "hub" - WLAN = "wlan" - WIRELESS = "wireless" - EMANE = "emane" - RJ45 = "rj45" - TUNNEL = "tunnel" - OVAL = "oval" - RECTANGLE = "rectangle" - TEXT = "text" - HOST = "host" - PC = "pc" - MDR = "mdr" - PROUTER = "prouter" - OVS = "OVS" - EDITNODE = "edit-node" - PLOT = "plot" - TWONODE = "twonode" - PAUSE = "pause" - STOP = "stop" - OBSERVE = "observe" - RUN = "run" - DOCUMENTNEW = "document-new" - DOCUMENTSAVE = "document-save" - FILEOPEN = "fileopen" - EDITDELETE = "edit-delete" - ANTENNA = "antenna" - DOCKER = "docker" - PODMAN = "podman" - LXC = "lxc" - ALERT = "alert" - DELETE = "delete" - SHUTDOWN = "shutdown" - CANCEL = "cancel" - ERROR = "error" - SHADOW = "shadow" - - -TYPE_MAP: dict[tuple[NodeType, str], ImageEnum] = { - (NodeType.DEFAULT, "router"): ImageEnum.ROUTER, - (NodeType.DEFAULT, "PC"): ImageEnum.PC, - (NodeType.DEFAULT, "host"): ImageEnum.HOST, - (NodeType.DEFAULT, "mdr"): ImageEnum.MDR, - (NodeType.DEFAULT, "prouter"): ImageEnum.PROUTER, - (NodeType.HUB, None): ImageEnum.HUB, - (NodeType.SWITCH, None): ImageEnum.SWITCH, - (NodeType.WIRELESS_LAN, None): ImageEnum.WLAN, - (NodeType.WIRELESS, None): ImageEnum.WIRELESS, - (NodeType.EMANE, None): ImageEnum.EMANE, - (NodeType.RJ45, None): ImageEnum.RJ45, - (NodeType.TUNNEL, None): ImageEnum.TUNNEL, - (NodeType.DOCKER, None): ImageEnum.DOCKER, - (NodeType.PODMAN, None): ImageEnum.PODMAN, - (NodeType.LXC, None): ImageEnum.LXC, -} - - -def from_node(node: Node, *, scale: float) -> Optional[PhotoImage]: - image = None - image_enum = TYPE_MAP.get((node.type, node.model)) - if image_enum: - image = from_enum(image_enum, width=NODE_SIZE, scale=scale) - return image diff --git a/daemon/core/gui/interface.py b/daemon/core/gui/interface.py deleted file mode 100644 index 9ebea3c1..00000000 --- a/daemon/core/gui/interface.py +++ /dev/null @@ -1,283 +0,0 @@ -import logging -from typing import TYPE_CHECKING, Any, Optional - -import netaddr -from netaddr import EUI, IPNetwork - -from core.api.grpc.wrappers import Interface, Link, LinkType, Node -from core.gui import nodeutils as nutils -from core.gui.graph.edges import CanvasEdge -from core.gui.graph.node import CanvasNode - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -IP4_MASK: int = 24 -IP6_MASK: int = 64 -WIRELESS_IP4_MASK: int = 32 -WIRELESS_IP6_MASK: int = 128 - - -def get_index(iface: Interface) -> Optional[int]: - if not iface.ip4: - return None - net = netaddr.IPNetwork(f"{iface.ip4}/{iface.ip4_mask}") - ip_value = net.value - cidr_value = net.cidr.value - return ip_value - cidr_value - - -class Subnets: - def __init__(self, ip4: IPNetwork, ip6: IPNetwork) -> None: - self.ip4 = ip4 - self.ip6 = ip6 - self.used_indexes = set() - - def __eq__(self, other: Any) -> bool: - if not isinstance(other, Subnets): - return False - return self.key() == other.key() - - def __hash__(self) -> int: - return hash(self.key()) - - def key(self) -> tuple[IPNetwork, IPNetwork]: - return self.ip4, self.ip6 - - def next(self) -> "Subnets": - return Subnets(self.ip4.next(), self.ip6.next()) - - -class InterfaceManager: - def __init__(self, app: "Application") -> None: - self.app: "Application" = app - ip4 = self.app.guiconfig.ips.ip4 - ip6 = self.app.guiconfig.ips.ip6 - self.ip4_subnets: IPNetwork = IPNetwork(f"{ip4}/{IP4_MASK}") - self.ip6_subnets: IPNetwork = IPNetwork(f"{ip6}/{IP6_MASK}") - mac = self.app.guiconfig.mac - self.mac: EUI = EUI(mac, dialect=netaddr.mac_unix_expanded) - self.current_mac: Optional[EUI] = None - self.current_subnets: Optional[Subnets] = None - self.used_subnets: dict[tuple[IPNetwork, IPNetwork], Subnets] = {} - self.used_macs: set[str] = set() - - def update_ips(self, ip4: str, ip6: str) -> None: - self.reset() - self.ip4_subnets = IPNetwork(f"{ip4}/{IP4_MASK}") - self.ip6_subnets = IPNetwork(f"{ip6}/{IP6_MASK}") - - def next_mac(self) -> str: - while str(self.current_mac) in self.used_macs: - value = self.current_mac.value + 1 - self.current_mac = EUI(value, dialect=netaddr.mac_unix_expanded) - mac = str(self.current_mac) - value = self.current_mac.value + 1 - self.current_mac = EUI(value, dialect=netaddr.mac_unix_expanded) - return mac - - def next_subnets(self) -> Subnets: - subnets = self.current_subnets - if subnets is None: - subnets = Subnets(self.ip4_subnets, self.ip6_subnets) - while subnets.key() in self.used_subnets: - subnets = subnets.next() - self.used_subnets[subnets.key()] = subnets - return subnets - - def reset(self) -> None: - self.current_subnets = None - self.used_subnets.clear() - - def removed(self, links: list[Link]) -> None: - # get remaining subnets - remaining_subnets = set() - for edge in self.app.core.links.values(): - link = edge.link - if link.iface1: - subnets = self.get_subnets(link.iface1) - remaining_subnets.add(subnets) - if link.iface2: - subnets = self.get_subnets(link.iface2) - remaining_subnets.add(subnets) - - # remove all subnets from used subnets when no longer present - # or remove used indexes from subnet - ifaces = [] - for link in links: - if link.iface1: - ifaces.append(link.iface1) - if link.iface2: - ifaces.append(link.iface2) - for iface in ifaces: - subnets = self.get_subnets(iface) - if subnets not in remaining_subnets: - self.used_subnets.pop(subnets.key(), None) - else: - index = get_index(iface) - if index is not None: - subnets.used_indexes.discard(index) - self.current_subnets = None - - def set_macs(self, links: list[Link]) -> None: - self.current_mac = self.mac - self.used_macs.clear() - for link in links: - if link.iface1: - self.used_macs.add(link.iface1.mac) - if link.iface2: - self.used_macs.add(link.iface2.mac) - - def joined(self, links: list[Link]) -> None: - ifaces = [] - for link in links: - if link.iface1: - ifaces.append(link.iface1) - if link.iface2: - ifaces.append(link.iface2) - - # add to used subnets and mark used indexes - for iface in ifaces: - subnets = self.get_subnets(iface) - index = get_index(iface) - if index is None: - continue - subnets.used_indexes.add(index) - if subnets.key() not in self.used_subnets: - self.used_subnets[subnets.key()] = subnets - - def next_index(self, node: Node) -> int: - if nutils.is_router(node): - index = 1 - else: - index = 20 - while True: - if index not in self.current_subnets.used_indexes: - self.current_subnets.used_indexes.add(index) - break - index += 1 - return index - - def get_ips(self, node: Node) -> [Optional[str], Optional[str]]: - enable_ip4 = self.app.guiconfig.ips.enable_ip4 - enable_ip6 = self.app.guiconfig.ips.enable_ip6 - ip4, ip6 = None, None - if not enable_ip4 and not enable_ip6: - return ip4, ip6 - index = self.next_index(node) - if enable_ip4: - ip4 = str(self.current_subnets.ip4[index]) - if enable_ip6: - ip6 = str(self.current_subnets.ip6[index]) - return ip4, ip6 - - def get_subnets(self, iface: Interface) -> Subnets: - ip4_subnet = self.ip4_subnets - if iface.ip4: - ip4_subnet = IPNetwork(f"{iface.ip4}/{IP4_MASK}").cidr - ip6_subnet = self.ip6_subnets - if iface.ip6: - ip6_subnet = IPNetwork(f"{iface.ip6}/{IP6_MASK}").cidr - subnets = Subnets(ip4_subnet, ip6_subnet) - return self.used_subnets.get(subnets.key(), subnets) - - def determine_subnets( - self, canvas_src_node: CanvasNode, canvas_dst_node: CanvasNode - ) -> None: - src_node = canvas_src_node.core_node - dst_node = canvas_dst_node.core_node - is_src_container = nutils.is_container(src_node) - is_dst_container = nutils.is_container(dst_node) - if is_src_container and is_dst_container: - self.current_subnets = self.next_subnets() - elif is_src_container and not is_dst_container: - subnets = self.find_subnets(canvas_dst_node, visited={src_node.id}) - if subnets: - self.current_subnets = subnets - else: - self.current_subnets = self.next_subnets() - elif not is_src_container and is_dst_container: - subnets = self.find_subnets(canvas_src_node, visited={dst_node.id}) - if subnets: - self.current_subnets = subnets - else: - self.current_subnets = self.next_subnets() - else: - logger.info("ignoring subnet change for link between network nodes") - - def find_subnets( - self, canvas_node: CanvasNode, visited: set[int] = None - ) -> Optional[IPNetwork]: - logger.info("finding subnet for node: %s", canvas_node.core_node.name) - subnets = None - if not visited: - visited = set() - visited.add(canvas_node.core_node.id) - for edge in canvas_node.edges: - iface = edge.link.iface1 - check_node = edge.src - if edge.src == canvas_node: - iface = edge.link.iface2 - check_node = edge.dst - if check_node.core_node.id in visited: - continue - visited.add(check_node.core_node.id) - if iface: - subnets = self.get_subnets(iface) - else: - subnets = self.find_subnets(check_node, visited) - if subnets: - logger.info("found subnets: %s", subnets) - break - return subnets - - def create_link(self, edge: CanvasEdge) -> Link: - """ - Create core link for a given edge based on src/dst nodes. - """ - src_node = edge.src.core_node - dst_node = edge.dst.core_node - self.determine_subnets(edge.src, edge.dst) - src_iface = None - if nutils.is_iface_node(src_node): - src_iface = self.create_iface(edge.src, edge.linked_wireless) - dst_iface = None - if nutils.is_iface_node(dst_node): - dst_iface = self.create_iface(edge.dst, edge.linked_wireless) - link = Link( - type=LinkType.WIRED, - node1_id=src_node.id, - node2_id=dst_node.id, - iface1=src_iface, - iface2=dst_iface, - ) - logger.info("added link between %s and %s", src_node.name, dst_node.name) - return link - - def create_iface(self, canvas_node: CanvasNode, wireless_link: bool) -> Interface: - node = canvas_node.core_node - if nutils.is_bridge(node): - iface_id = canvas_node.next_iface_id() - iface = Interface(id=iface_id) - else: - ip4, ip6 = self.get_ips(node) - if wireless_link: - ip4_mask = WIRELESS_IP4_MASK - ip6_mask = WIRELESS_IP6_MASK - else: - ip4_mask = IP4_MASK - ip6_mask = IP6_MASK - iface_id = canvas_node.next_iface_id() - name = f"eth{iface_id}" - iface = Interface( - id=iface_id, - name=name, - ip4=ip4, - ip4_mask=ip4_mask, - ip6=ip6, - ip6_mask=ip6_mask, - ) - logger.info("create node(%s) interface(%s)", node.name, iface) - return iface diff --git a/daemon/core/gui/menubar.py b/daemon/core/gui/menubar.py deleted file mode 100644 index 16e57cb6..00000000 --- a/daemon/core/gui/menubar.py +++ /dev/null @@ -1,495 +0,0 @@ -import logging -import tkinter as tk -import webbrowser -from functools import partial -from pathlib import Path -from tkinter import filedialog, messagebox -from typing import TYPE_CHECKING, Optional - -from core.gui import images -from core.gui.coreclient import CoreClient -from core.gui.dialogs.about import AboutDialog -from core.gui.dialogs.canvassizeandscale import SizeAndScaleDialog -from core.gui.dialogs.canvaswallpaper import CanvasWallpaperDialog -from core.gui.dialogs.customnodes import CustomNodesDialog -from core.gui.dialogs.executepython import ExecutePythonDialog -from core.gui.dialogs.find import FindDialog -from core.gui.dialogs.hooks import HooksDialog -from core.gui.dialogs.ipdialog import IpConfigDialog -from core.gui.dialogs.macdialog import MacConfigDialog -from core.gui.dialogs.observers import ObserverDialog -from core.gui.dialogs.preferences import PreferencesDialog -from core.gui.dialogs.servers import ServersDialog -from core.gui.dialogs.sessionoptions import SessionOptionsDialog -from core.gui.dialogs.sessions import SessionsDialog -from core.gui.dialogs.throughput import ThroughputDialog -from core.gui.graph.manager import CanvasManager -from core.gui.observers import ObserversMenu -from core.gui.task import ProgressTask - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -MAX_FILES: int = 3 - - -class Menubar(tk.Menu): - """ - Core menubar - """ - - def __init__(self, app: "Application") -> None: - """ - Create a CoreMenubar instance - """ - super().__init__(app) - self.app: "Application" = app - self.core: CoreClient = app.core - self.manager: CanvasManager = app.manager - self.recent_menu: Optional[tk.Menu] = None - self.edit_menu: Optional[tk.Menu] = None - self.canvas_menu: Optional[tk.Menu] = None - self.observers_menu: Optional[ObserversMenu] = None - self.draw() - - def draw(self) -> None: - """ - Create core menubar and bind the hot keys to their matching command - """ - self.draw_file_menu() - self.draw_edit_menu() - self.draw_canvas_menu() - self.draw_view_menu() - self.draw_tools_menu() - self.draw_widgets_menu() - self.draw_session_menu() - self.draw_help_menu() - - def draw_file_menu(self) -> None: - """ - Create file menu - """ - menu = tk.Menu(self) - menu.add_command( - label="New Session", accelerator="Ctrl+N", command=self.click_new - ) - self.app.bind_all("", lambda e: self.click_new()) - menu.add_command(label="Save", accelerator="Ctrl+S", command=self.click_save) - self.app.bind_all("", self.click_save) - menu.add_command(label="Save As...", command=self.click_save_as) - menu.add_command( - label="Open...", command=self.click_open_xml, accelerator="Ctrl+O" - ) - self.app.bind_all("", self.click_open_xml) - self.recent_menu = tk.Menu(menu) - for i in self.app.guiconfig.recentfiles: - self.recent_menu.add_command( - label=i, command=partial(self.open_recent_files, Path(i)) - ) - menu.add_cascade(label="Recent Files", menu=self.recent_menu) - menu.add_separator() - menu.add_command(label="Execute Python Script...", command=self.execute_python) - menu.add_separator() - menu.add_command( - label="Quit", - accelerator="Ctrl+Q", - command=lambda: self.prompt_save_running_session(True), - ) - self.app.bind_all( - "", lambda _: self.prompt_save_running_session(True) - ) - self.add_cascade(label="File", menu=menu) - - def draw_edit_menu(self) -> None: - """ - Create edit menu - """ - menu = tk.Menu(self) - menu.add_command(label="Preferences", command=self.click_preferences) - menu.add_command(label="Custom Nodes", command=self.click_custom_nodes) - menu.add_command(label="Show Hidden Nodes", command=self.click_show_hidden) - menu.add_separator() - menu.add_command(label="Undo", accelerator="Ctrl+Z", state=tk.DISABLED) - menu.add_command(label="Redo", accelerator="Ctrl+Y", state=tk.DISABLED) - menu.add_separator() - menu.add_command(label="Cut", accelerator="Ctrl+X", command=self.click_cut) - menu.add_command(label="Copy", accelerator="Ctrl+C", command=self.click_copy) - menu.add_command(label="Paste", accelerator="Ctrl+V", command=self.click_paste) - menu.add_command( - label="Delete", accelerator="Ctrl+D", command=self.click_delete - ) - menu.add_command(label="Hide", accelerator="Ctrl+H", command=self.click_hide) - self.add_cascade(label="Edit", menu=menu) - self.edit_menu = menu - - def draw_canvas_menu(self) -> None: - """ - Create canvas menu - """ - menu = tk.Menu(self) - menu.add_command(label="New", command=self.click_canvas_add) - menu.add_command(label="Size / Scale", command=self.click_canvas_size_and_scale) - menu.add_separator() - menu.add_command(label="Delete", command=self.click_canvas_delete) - menu.add_command(label="Wallpaper", command=self.click_canvas_wallpaper) - self.add_cascade(label="Canvas", menu=menu) - self.canvas_menu = menu - - def draw_view_menu(self) -> None: - """ - Create view menu - """ - menu = tk.Menu(self) - menu.add_checkbutton( - label="Details Panel", - command=self.click_infobar_change, - variable=self.app.show_infobar, - ) - menu.add_checkbutton( - label="Interface Names", - command=self.click_edge_label_change, - variable=self.manager.show_iface_names, - ) - menu.add_checkbutton( - label="IPv4 Addresses", - command=self.click_edge_label_change, - variable=self.manager.show_ip4s, - ) - menu.add_checkbutton( - label="IPv6 Addresses", - command=self.click_edge_label_change, - variable=self.manager.show_ip6s, - ) - menu.add_checkbutton( - label="Node Labels", - command=self.manager.show_node_labels.click_handler, - variable=self.manager.show_node_labels, - ) - menu.add_checkbutton( - label="Link Labels", - command=self.manager.show_link_labels.click_handler, - variable=self.manager.show_link_labels, - ) - menu.add_checkbutton( - label="Links", - command=self.manager.show_links.click_handler, - variable=self.manager.show_links, - ) - menu.add_checkbutton( - label="Loss Links", - command=self.manager.show_loss_links.click_handler, - variable=self.manager.show_loss_links, - ) - menu.add_checkbutton( - label="Wireless Links", - command=self.manager.show_wireless.click_handler, - variable=self.manager.show_wireless, - ) - menu.add_checkbutton( - label="Annotations", - command=self.manager.show_annotations.click_handler, - variable=self.manager.show_annotations, - ) - menu.add_checkbutton( - label="Canvas Grid", - command=self.manager.show_grid.click_handler, - variable=self.manager.show_grid, - ) - self.add_cascade(label="View", menu=menu) - - def draw_tools_menu(self) -> None: - """ - Create tools menu - """ - menu = tk.Menu(self) - menu.add_command(label="Find", accelerator="Ctrl+F", command=self.click_find) - self.app.master.bind_all("", self.click_find) - menu.add_command(label="Auto Grid", command=self.click_autogrid) - menu.add_command(label="IP Addresses", command=self.click_ip_config) - menu.add_command(label="MAC Addresses", command=self.click_mac_config) - self.add_cascade(label="Tools", menu=menu) - - def create_observer_widgets_menu(self, widget_menu: tk.Menu) -> None: - """ - Create observer widget menu item and create the sub menu items inside - """ - self.observers_menu = ObserversMenu(widget_menu, self.app) - widget_menu.add_cascade(label="Observer Widgets", menu=self.observers_menu) - - def create_adjacency_menu(self, widget_menu: tk.Menu) -> None: - """ - Create adjacency menu item and the sub menu items inside - """ - menu = tk.Menu(widget_menu) - menu.add_command(label="Configure Adjacency", state=tk.DISABLED) - menu.add_command(label="Enable OSPFv2?", state=tk.DISABLED) - menu.add_command(label="Enable OSPFv3?", state=tk.DISABLED) - menu.add_command(label="Enable OSLR?", state=tk.DISABLED) - menu.add_command(label="Enable OSLRv2?", state=tk.DISABLED) - widget_menu.add_cascade(label="Adjacency", menu=menu) - - def create_throughput_menu(self, widget_menu: tk.Menu) -> None: - menu = tk.Menu(widget_menu) - menu.add_command( - label="Configure Throughput", command=self.click_config_throughput - ) - menu.add_checkbutton( - label="Enable Throughput?", - command=self.click_throughput, - variable=self.core.show_throughputs, - ) - widget_menu.add_cascade(label="Throughput", menu=menu) - - def draw_widgets_menu(self) -> None: - """ - Create widget menu - """ - menu = tk.Menu(self) - self.create_observer_widgets_menu(menu) - self.create_adjacency_menu(menu) - self.create_throughput_menu(menu) - self.add_cascade(label="Widgets", menu=menu) - - def draw_session_menu(self) -> None: - """ - Create session menu - """ - menu = tk.Menu(self) - menu.add_command(label="Sessions", command=self.click_sessions) - menu.add_command(label="Servers", command=self.click_servers) - menu.add_command(label="Options", command=self.click_session_options) - menu.add_command(label="Hooks", command=self.click_hooks) - self.add_cascade(label="Session", menu=menu) - - def draw_help_menu(self) -> None: - """ - Create help menu - """ - menu = tk.Menu(self) - menu.add_command(label="Core GitHub (www)", command=self.click_core_github) - menu.add_command(label="Core Documentation (www)", command=self.click_core_doc) - menu.add_command(label="About", command=self.click_about) - self.add_cascade(label="Help", menu=menu) - - def open_recent_files(self, file_path: Path) -> None: - if file_path.is_file(): - logger.debug("Open recent file %s", file_path) - self.open_xml_task(file_path) - else: - logger.warning("File does not exist %s", file_path) - - def update_recent_files(self) -> None: - self.recent_menu.delete(0, tk.END) - for i in self.app.guiconfig.recentfiles: - self.recent_menu.add_command( - label=i, command=partial(self.open_recent_files, Path(i)) - ) - - def click_save(self, _event: tk.Event = None) -> None: - if self.core.session.file: - if self.core.save_xml(): - self.add_recent_file_to_gui_config(self.core.session.file) - else: - self.click_save_as() - - def click_save_as(self, _event: tk.Event = None) -> None: - init_dir = self.core.get_xml_dir() - file_path = filedialog.asksaveasfilename( - initialdir=init_dir, - title="Save As", - filetypes=(("XML files", "*.xml"), ("All files", "*")), - defaultextension=".xml", - ) - if file_path: - file_path = Path(file_path) - if self.core.save_xml(file_path): - self.add_recent_file_to_gui_config(file_path) - - def click_open_xml(self, _event: tk.Event = None) -> None: - init_dir = self.core.get_xml_dir() - file_path = filedialog.askopenfilename( - initialdir=init_dir, - title="Open", - filetypes=(("XML Files", "*.xml"), ("All Files", "*")), - ) - if file_path: - file_path = Path(file_path) - self.open_xml_task(file_path) - - def open_xml_task(self, file_path: Path) -> None: - self.add_recent_file_to_gui_config(file_path) - self.prompt_save_running_session() - task = ProgressTask(self.app, "Open XML", self.core.open_xml, args=(file_path,)) - task.start() - - def execute_python(self) -> None: - dialog = ExecutePythonDialog(self.app) - dialog.show() - - def add_recent_file_to_gui_config(self, file_path: Path) -> None: - recent_files = self.app.guiconfig.recentfiles - file_path = str(file_path) - if file_path in recent_files: - recent_files.remove(file_path) - recent_files.insert(0, file_path) - if len(recent_files) > MAX_FILES: - recent_files.pop() - self.app.save_config() - self.app.menubar.update_recent_files() - - def set_state(self, is_runtime: bool) -> None: - state = tk.DISABLED if is_runtime else tk.NORMAL - for entry in {"Copy", "Paste", "Delete", "Cut"}: - self.edit_menu.entryconfigure(entry, state=state) - for entry in {"Delete"}: - self.canvas_menu.entryconfigure(entry, state=state) - - def prompt_save_running_session(self, quit_app: bool = False) -> None: - """ - Prompt use to stop running session before application is closed - - :param quit_app: True to quit app, False otherwise - """ - result = True - if self.core.is_runtime(): - result = messagebox.askyesnocancel("Exit", "Stop the running session?") - if result: - self.core.delete_session() - if quit_app: - self.app.quit() - - def click_new(self) -> None: - self.prompt_save_running_session() - self.core.create_new_session() - - def click_find(self, _event: tk.Event = None) -> None: - dialog = FindDialog(self.app) - dialog.show() - - def click_preferences(self) -> None: - dialog = PreferencesDialog(self.app) - dialog.show() - - def click_canvas_add(self) -> None: - self.manager.add_canvas() - - def click_canvas_delete(self) -> None: - self.manager.delete_canvas() - - def click_canvas_size_and_scale(self) -> None: - dialog = SizeAndScaleDialog(self.app) - dialog.show() - - def click_canvas_wallpaper(self) -> None: - dialog = CanvasWallpaperDialog(self.app) - dialog.show() - - def click_core_github(self) -> None: - webbrowser.open_new("https://github.com/coreemu/core") - - def click_core_doc(self) -> None: - webbrowser.open_new("http://coreemu.github.io/core/") - - def click_about(self) -> None: - dialog = AboutDialog(self.app) - dialog.show() - - def click_throughput(self) -> None: - if self.core.show_throughputs.get(): - self.core.enable_throughputs() - else: - self.core.cancel_throughputs() - - def click_config_throughput(self) -> None: - dialog = ThroughputDialog(self.app) - dialog.show() - - def click_copy(self, _event: tk.Event = None) -> None: - canvas = self.manager.current() - canvas.copy_selected() - - def click_paste(self, event: tk.Event = None) -> None: - canvas = self.manager.current() - canvas.paste_selected(event) - - def click_delete(self, event: tk.Event = None) -> None: - canvas = self.manager.current() - canvas.delete_selected(event) - - def click_hide(self, event: tk.Event = None) -> None: - canvas = self.manager.current() - canvas.hide_selected(event) - - def click_cut(self, event: tk.Event = None) -> None: - canvas = self.manager.current() - canvas.copy_selected(event) - canvas.delete_selected(event) - - def click_show_hidden(self, _event: tk.Event = None) -> None: - for canvas in self.manager.all(): - canvas.show_hidden() - - def click_session_options(self) -> None: - logger.debug("Click options") - dialog = SessionOptionsDialog(self.app) - if not dialog.has_error: - dialog.show() - - def click_sessions(self) -> None: - logger.debug("Click change sessions") - dialog = SessionsDialog(self.app) - dialog.show() - - def click_hooks(self) -> None: - logger.debug("Click hooks") - dialog = HooksDialog(self.app) - dialog.show() - - def click_servers(self) -> None: - logger.debug("Click emulation servers") - dialog = ServersDialog(self.app) - dialog.show() - - def click_edit_observer_widgets(self) -> None: - dialog = ObserverDialog(self.app) - dialog.show() - - def click_autogrid(self) -> None: - width, height = self.manager.current().current_dimensions - padding = (images.NODE_SIZE / 2) + 10 - layout_size = padding + images.NODE_SIZE - col_count = width // layout_size - logger.info( - "auto grid layout: dimension(%s, %s) col(%s)", width, height, col_count - ) - canvas = self.manager.current() - for i, node in enumerate(canvas.nodes.values()): - col = i % col_count - row = i // col_count - x = (col * layout_size) + padding - y = (row * layout_size) + padding - node.move(x, y) - - def click_infobar_change(self) -> None: - if self.app.show_infobar.get(): - self.app.show_info() - else: - self.app.hide_info() - - def click_edge_label_change(self) -> None: - for edge in self.manager.edges.values(): - edge.draw_labels() - - def click_mac_config(self) -> None: - dialog = MacConfigDialog(self.app) - dialog.show() - - def click_ip_config(self) -> None: - dialog = IpConfigDialog(self.app) - dialog.show() - - def click_custom_nodes(self) -> None: - dialog = CustomNodesDialog(self.app) - dialog.show() diff --git a/daemon/core/gui/nodeutils.py b/daemon/core/gui/nodeutils.py deleted file mode 100644 index 0b3e3d9a..00000000 --- a/daemon/core/gui/nodeutils.py +++ /dev/null @@ -1,195 +0,0 @@ -import logging -from typing import TYPE_CHECKING, Optional - -from PIL.ImageTk import PhotoImage - -from core.api.grpc.wrappers import Node, NodeType -from core.gui import images -from core.gui.appconfig import CustomNode, GuiConfig -from core.gui.images import ImageEnum - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -NODES: list["NodeDraw"] = [] -NETWORK_NODES: list["NodeDraw"] = [] -NODE_ICONS = {} -CONTAINER_NODES: set[NodeType] = { - NodeType.DEFAULT, - NodeType.DOCKER, - NodeType.LXC, - NodeType.PODMAN, -} -IMAGE_NODES: set[NodeType] = {NodeType.DOCKER, NodeType.LXC, NodeType.PODMAN} -WIRELESS_NODES: set[NodeType] = { - NodeType.WIRELESS_LAN, - NodeType.EMANE, - NodeType.WIRELESS, -} -RJ45_NODES: set[NodeType] = {NodeType.RJ45} -BRIDGE_NODES: set[NodeType] = {NodeType.HUB, NodeType.SWITCH} -IGNORE_NODES: set[NodeType] = {NodeType.CONTROL_NET} -MOBILITY_NODES: set[NodeType] = {NodeType.WIRELESS_LAN, NodeType.EMANE} -NODE_MODELS: set[str] = {"router", "PC", "mdr", "prouter"} -ROUTER_NODES: set[str] = {"router", "mdr"} -ANTENNA_ICON: Optional[PhotoImage] = None - - -def setup() -> None: - global ANTENNA_ICON - nodes = [ - (ImageEnum.PC, NodeType.DEFAULT, "PC", "PC"), - (ImageEnum.MDR, NodeType.DEFAULT, "MDR", "mdr"), - (ImageEnum.ROUTER, NodeType.DEFAULT, "Router", "router"), - (ImageEnum.PROUTER, NodeType.DEFAULT, "PRouter", "prouter"), - (ImageEnum.DOCKER, NodeType.DOCKER, "Docker", None), - (ImageEnum.LXC, NodeType.LXC, "LXC", None), - (ImageEnum.PODMAN, NodeType.PODMAN, "Podman", None), - ] - for image_enum, node_type, label, model in nodes: - node_draw = NodeDraw.from_setup(image_enum, node_type, label, model) - NODES.append(node_draw) - NODE_ICONS[(node_type, model)] = node_draw.image - network_nodes = [ - (ImageEnum.HUB, NodeType.HUB, "Hub"), - (ImageEnum.SWITCH, NodeType.SWITCH, "Switch"), - (ImageEnum.WLAN, NodeType.WIRELESS_LAN, "WLAN"), - (ImageEnum.WIRELESS, NodeType.WIRELESS, "Wireless"), - (ImageEnum.EMANE, NodeType.EMANE, "EMANE"), - (ImageEnum.RJ45, NodeType.RJ45, "RJ45"), - (ImageEnum.TUNNEL, NodeType.TUNNEL, "Tunnel"), - ] - for image_enum, node_type, label in network_nodes: - node_draw = NodeDraw.from_setup(image_enum, node_type, label) - NETWORK_NODES.append(node_draw) - NODE_ICONS[(node_type, None)] = node_draw.image - ANTENNA_ICON = images.from_enum(ImageEnum.ANTENNA, width=images.ANTENNA_SIZE) - - -def is_bridge(node: Node) -> bool: - return node.type in BRIDGE_NODES - - -def is_mobility(node: Node) -> bool: - return node.type in MOBILITY_NODES - - -def is_router(node: Node) -> bool: - return is_model(node) and node.model in ROUTER_NODES - - -def should_ignore(node: Node) -> bool: - return node.type in IGNORE_NODES - - -def is_container(node: Node) -> bool: - return node.type in CONTAINER_NODES - - -def is_model(node: Node) -> bool: - return node.type == NodeType.DEFAULT - - -def has_image(node_type: NodeType) -> bool: - return node_type in IMAGE_NODES - - -def is_wireless(node: Node) -> bool: - return node.type in WIRELESS_NODES - - -def is_rj45(node: Node) -> bool: - return node.type in RJ45_NODES - - -def is_custom(node: Node) -> bool: - return is_model(node) and node.model not in NODE_MODELS - - -def is_iface_node(node: Node) -> bool: - return is_container(node) or is_bridge(node) - - -def get_custom_services(gui_config: GuiConfig, name: str) -> list[str]: - for custom_node in gui_config.nodes: - if custom_node.name == name: - return custom_node.services - return [] - - -def _get_custom_file(config: GuiConfig, name: str) -> Optional[str]: - for custom_node in config.nodes: - if custom_node.name == name: - return custom_node.image - return None - - -def get_icon(node: Node, app: "Application") -> PhotoImage: - scale = app.app_scale - image = None - # node icon was overridden with a specific value - if node.icon: - try: - image = images.from_file(node.icon, width=images.NODE_SIZE, scale=scale) - except OSError: - logger.error("invalid icon: %s", node.icon) - # custom node - elif is_custom(node): - image_file = _get_custom_file(app.guiconfig, node.model) - logger.info("custom node file: %s", image_file) - if image_file: - image = images.from_file(image_file, width=images.NODE_SIZE, scale=scale) - # built in node - else: - image = images.from_node(node, scale=scale) - # default image, if everything above fails - if not image: - image = images.from_enum( - ImageEnum.EDITNODE, width=images.NODE_SIZE, scale=scale - ) - return image - - -class NodeDraw: - def __init__(self) -> None: - self.custom: bool = False - self.image: Optional[PhotoImage] = None - self.image_enum: Optional[ImageEnum] = None - self.image_file: Optional[str] = None - self.node_type: Optional[NodeType] = None - self.model: Optional[str] = None - self.services: set[str] = set() - self.label: Optional[str] = None - - @classmethod - def from_setup( - cls, - image_enum: ImageEnum, - node_type: NodeType, - label: str, - model: str = None, - tooltip: str = None, - ) -> "NodeDraw": - node_draw = NodeDraw() - node_draw.image_enum = image_enum - node_draw.image = images.from_enum(image_enum, width=images.NODE_SIZE) - node_draw.node_type = node_type - node_draw.label = label - node_draw.model = model - node_draw.tooltip = tooltip - return node_draw - - @classmethod - def from_custom(cls, custom_node: CustomNode) -> "NodeDraw": - node_draw = NodeDraw() - node_draw.custom = True - node_draw.image_file = custom_node.image - node_draw.image = images.from_file(custom_node.image, width=images.NODE_SIZE) - node_draw.node_type = NodeType.DEFAULT - node_draw.services = set(custom_node.services) - node_draw.label = custom_node.name - node_draw.model = custom_node.name - node_draw.tooltip = custom_node.name - return node_draw diff --git a/daemon/core/gui/observers.py b/daemon/core/gui/observers.py deleted file mode 100644 index 8cf026bd..00000000 --- a/daemon/core/gui/observers.py +++ /dev/null @@ -1,66 +0,0 @@ -import tkinter as tk -from functools import partial -from typing import TYPE_CHECKING - -from core.gui.dialogs.observers import ObserverDialog - -if TYPE_CHECKING: - from core.gui.app import Application - -OBSERVERS: dict[str, str] = { - "List Processes": "ps", - "Show Interfaces": "ip address", - "IPV4 Routes": "ip -4 route", - "IPV6 Routes": "ip -6 route", - "Listening Sockets": "ss -tuwnl", - "IPv4 MFC Entries": "ip -4 mroute show", - "IPv6 MFC Entries": "ip -6 mroute show", - "Firewall Rules": "iptables -L", - "IPSec Policies": "setkey -DP", -} - - -class ObserversMenu(tk.Menu): - def __init__(self, master: tk.BaseWidget, app: "Application") -> None: - super().__init__(master) - self.app: "Application" = app - self.observer: tk.StringVar = tk.StringVar(value=tk.NONE) - self.custom_index: int = 0 - self.draw() - - def draw(self) -> None: - self.add_command(label="Edit Observers", command=self.click_edit) - self.add_separator() - self.add_radiobutton( - label="None", - variable=self.observer, - value="none", - command=lambda: self.app.core.set_observer(None), - ) - for name in sorted(OBSERVERS): - cmd = OBSERVERS[name] - self.add_radiobutton( - label=name, - variable=self.observer, - value=name, - command=partial(self.app.core.set_observer, cmd), - ) - self.custom_index = self.index(tk.END) + 1 - self.draw_custom() - - def draw_custom(self) -> None: - current_index = self.index(tk.END) + 1 - if self.custom_index < current_index: - self.delete(self.custom_index, tk.END) - for name in sorted(self.app.core.custom_observers): - observer = self.app.core.custom_observers[name] - self.add_radiobutton( - label=name, - variable=self.observer, - value=name, - command=partial(self.app.core.set_observer, observer.cmd), - ) - - def click_edit(self) -> None: - dialog = ObserverDialog(self.app) - dialog.show() diff --git a/daemon/core/gui/statusbar.py b/daemon/core/gui/statusbar.py deleted file mode 100644 index a4967cd6..00000000 --- a/daemon/core/gui/statusbar.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -status bar -""" -import tkinter as tk -from tkinter import ttk -from typing import TYPE_CHECKING, Optional - -from core.api.grpc.wrappers import ExceptionEvent, ExceptionLevel -from core.gui.dialogs.alerts import AlertsDialog -from core.gui.themes import Styles - -if TYPE_CHECKING: - from core.gui.app import Application - - -class StatusBar(ttk.Frame): - def __init__(self, master: tk.Widget, app: "Application") -> None: - super().__init__(master) - self.app: "Application" = app - self.status: Optional[ttk.Label] = None - self.statusvar: tk.StringVar = tk.StringVar() - self.zoom: Optional[ttk.Label] = None - self.cpu_label: Optional[ttk.Label] = None - self.alerts_button: Optional[ttk.Button] = None - self.alert_style = Styles.no_alert - self.running: bool = False - self.core_alarms: list[ExceptionEvent] = [] - self.draw() - - def draw(self) -> None: - self.columnconfigure(0, weight=7) - self.columnconfigure(1, weight=1) - self.columnconfigure(2, weight=1) - self.columnconfigure(3, weight=1) - - frame = ttk.Frame(self, borderwidth=1, relief=tk.RIDGE) - frame.grid(row=0, column=0, sticky=tk.EW) - frame.columnconfigure(0, weight=1) - - self.status = ttk.Label( - self, - textvariable=self.statusvar, - anchor=tk.CENTER, - borderwidth=1, - relief=tk.RIDGE, - ) - self.status.grid(row=0, column=0, sticky=tk.EW) - - self.zoom = ttk.Label(self, anchor=tk.CENTER, borderwidth=1, relief=tk.RIDGE) - self.zoom.grid(row=0, column=1, sticky=tk.EW) - - self.cpu_label = ttk.Label( - self, anchor=tk.CENTER, borderwidth=1, relief=tk.RIDGE - ) - self.cpu_label.grid(row=0, column=2, sticky=tk.EW) - self.set_cpu(0.0) - - self.alerts_button = ttk.Button( - self, text="Alerts", command=self.click_alerts, style=self.alert_style - ) - self.alerts_button.grid(row=0, column=3, sticky=tk.EW) - - def set_cpu(self, usage: float) -> None: - self.cpu_label.config(text=f"CPU {usage * 100:.2f}%") - - def set_zoom(self, zoom: float) -> None: - self.zoom.config(text=f"ZOOM {zoom * 100:.0f}%") - - def add_alert(self, event: ExceptionEvent) -> None: - self.core_alarms.append(event) - level = event.level - self._set_alert_style(level) - label = f"Alerts ({len(self.core_alarms)})" - self.alerts_button.config(text=label, style=self.alert_style) - - def _set_alert_style(self, level: ExceptionLevel) -> None: - if level in [ExceptionLevel.FATAL, ExceptionLevel.ERROR]: - self.alert_style = Styles.red_alert - elif level == ExceptionLevel.WARNING and self.alert_style != Styles.red_alert: - self.alert_style = Styles.yellow_alert - elif self.alert_style == Styles.no_alert: - self.alert_style = Styles.green_alert - - def clear_alerts(self): - self.core_alarms.clear() - self.alert_style = Styles.no_alert - self.alerts_button.config(text="Alerts", style=self.alert_style) - - def click_alerts(self) -> None: - dialog = AlertsDialog(self.app) - dialog.show() - - def set_status(self, message: str) -> None: - self.statusvar.set(message) diff --git a/daemon/core/gui/task.py b/daemon/core/gui/task.py deleted file mode 100644 index 6bbeb70f..00000000 --- a/daemon/core/gui/task.py +++ /dev/null @@ -1,59 +0,0 @@ -import logging -import threading -import time -import tkinter as tk -from typing import TYPE_CHECKING, Any, Callable, Optional - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - - -class ProgressTask: - def __init__( - self, - app: "Application", - title: str, - task: Callable, - callback: Callable = None, - args: tuple[Any] = None, - ): - self.app: "Application" = app - self.title: str = title - self.task: Callable = task - self.callback: Callable = callback - if args is None: - args = () - self.args: tuple[Any] = args - self.time: Optional[float] = None - - def start(self) -> None: - self.app.progress.grid(sticky=tk.EW, columnspan=2) - self.app.progress.start() - self.time = time.perf_counter() - thread = threading.Thread(target=self.run, daemon=True) - thread.start() - - def run(self) -> None: - try: - values = self.task(*self.args) - if values is None: - values = () - elif values is not None and not isinstance(values, tuple): - values = (values,) - if self.callback: - self.app.after(0, self.callback, *values) - except Exception as e: - logger.exception("progress task exception") - self.app.show_exception("Task Error", e) - finally: - self.app.after(0, self.complete) - - def complete(self) -> None: - self.app.progress.stop() - self.app.progress.grid_forget() - total = time.perf_counter() - self.time - self.time = None - message = f"{self.title} ran for {total:.3f} seconds" - self.app.statusbar.set_status(message) diff --git a/daemon/core/gui/themes.py b/daemon/core/gui/themes.py deleted file mode 100644 index cb6280e5..00000000 --- a/daemon/core/gui/themes.py +++ /dev/null @@ -1,210 +0,0 @@ -import tkinter as tk -from tkinter import font, ttk - -THEME_DARK: str = "black" -PADX: tuple[int, int] = (0, 5) -PADY: tuple[int, int] = (0, 5) -FRAME_PAD: int = 5 -DIALOG_PAD: int = 5 - - -class Styles: - tooltip: str = "Tooltip.TLabel" - tooltip_frame: str = "Tooltip.TFrame" - service_checkbutton: str = "Service.TCheckbutton" - picker_button: str = "Picker.TButton" - no_alert: str = "NAlert.TButton" - green_alert: str = "GAlert.TButton" - red_alert: str = "RAlert.TButton" - yellow_alert: str = "YAlert.TButton" - - -class Colors: - disabledfg: str = "DarkGrey" - frame: str = "#424242" - dark: str = "#222222" - darker: str = "#121212" - darkest: str = "black" - lighter: str = "#626262" - lightest: str = "#ffffff" - selectbg: str = "#4a6984" - selectfg: str = "#ffffff" - white: str = "white" - black: str = "black" - listboxbg: str = "#f2f1f0" - - -def load(style: ttk.Style) -> None: - style.theme_create( - THEME_DARK, - "clam", - { - ".": { - "configure": { - "background": Colors.frame, - "foreground": Colors.white, - "bordercolor": Colors.darkest, - "darkcolor": Colors.dark, - "lightcolor": Colors.lighter, - "troughcolor": Colors.darker, - "selectbackground": Colors.selectbg, - "selectforeground": Colors.selectfg, - "selectborderwidth": 0, - "font": "TkDefaultFont", - }, - "map": { - "background": [ - ("disabled", Colors.frame), - ("active", Colors.lighter), - ], - "foreground": [("disabled", Colors.disabledfg)], - "selectbackground": [("!focus", Colors.darkest)], - "selectforeground": [("!focus", Colors.white)], - }, - }, - "TButton": { - "configure": { - "width": 8, - "padding": (5, 1), - "relief": tk.RAISED, - "anchor": tk.CENTER, - }, - "map": { - "relief": [("pressed", tk.SUNKEN)], - "shiftrelief": [("pressed", 1)], - }, - }, - "TMenubutton": {"configure": {"padding": (5, 1), "relief": tk.RAISED}}, - "TCheckbutton": { - "configure": { - "indicatorbackground": Colors.white, - "indicatormargin": (1, 1, 4, 1), - } - }, - "TRadiobutton": { - "configure": { - "indicatorbackground": Colors.white, - "indicatormargin": (1, 1, 4, 1), - } - }, - "TEntry": { - "configure": { - "fieldbackground": Colors.white, - "foreground": Colors.black, - "padding": (2, 0), - }, - "map": {"fieldbackground": [("disabled", Colors.frame)]}, - }, - "TSpinbox": { - "configure": { - "fieldbackground": Colors.white, - "foreground": Colors.black, - "padding": (2, 0), - }, - "map": {"fieldbackground": [("disabled", Colors.frame)]}, - }, - "TCombobox": { - "configure": { - "fieldbackground": Colors.white, - "foreground": Colors.black, - "padding": (2, 0), - } - }, - "TLabelframe": {"configure": {"relief": tk.GROOVE}}, - "TNotebook.Tab": { - "configure": {"padding": (6, 2, 6, 2)}, - "map": {"background": [("selected", Colors.lighter)]}, - }, - "Treeview": { - "configure": { - "fieldbackground": Colors.white, - "background": Colors.white, - "foreground": Colors.black, - }, - "map": { - "background": [("selected", Colors.selectbg)], - "foreground": [("selected", Colors.selectfg)], - }, - }, - Styles.tooltip: { - "configure": {"justify": tk.LEFT, "relief": tk.SOLID, "borderwidth": 0} - }, - Styles.tooltip_frame: {"configure": {}}, - Styles.service_checkbutton: { - "configure": { - "background": Colors.listboxbg, - "foreground": Colors.black, - } - }, - }, - ) - - -def theme_change_menu(event: tk.Event) -> None: - if not isinstance(event.widget, tk.Menu): - return - style_menu(event.widget) - - -def style_menu(widget: tk.Widget) -> None: - style = ttk.Style() - bg = style.lookup(".", "background") - fg = style.lookup(".", "foreground") - abg = style.lookup(".", "lightcolor") - if not abg: - abg = bg - widget.config( - background=bg, foreground=fg, activebackground=abg, activeforeground=fg, bd=0 - ) - - -def style_listbox(widget: tk.Widget) -> None: - style = ttk.Style() - bg = style.lookup(".", "background") - fg = style.lookup(".", "foreground") - bc = style.lookup(".", "bordercolor") - if not bc: - bc = "black" - widget.config( - background=bg, - foreground=fg, - highlightthickness=1, - highlightcolor=bc, - highlightbackground=bc, - bd=0, - ) - - -def _alert_style(style: ttk.Style, name: str, background: str): - style.configure( - name, - background=background, - padding=0, - relief=tk.RIDGE, - borderwidth=1, - font="TkDefaultFont", - foreground="black", - highlightbackground="white", - ) - style.map(name, background=[("!active", background), ("active", "white")]) - - -def theme_change(event: tk.Event) -> None: - style = ttk.Style() - style.configure(Styles.picker_button, font="TkSmallCaptionFont") - style.configure( - Styles.no_alert, padding=0, relief=tk.RIDGE, borderwidth=1, font="TkDefaultFont" - ) - _alert_style(style, Styles.green_alert, "green") - _alert_style(style, Styles.yellow_alert, "yellow") - _alert_style(style, Styles.red_alert, "red") - - -def scale_fonts(fonts_size: dict[str, int], scale: float) -> None: - for name in font.names(): - f = font.nametofont(name) - if name in fonts_size: - if name == "TkSmallCaptionFont": - f.config(size=int(fonts_size[name] * scale * 8 / 9)) - else: - f.config(size=int(fonts_size[name] * scale)) diff --git a/daemon/core/gui/toolbar.py b/daemon/core/gui/toolbar.py deleted file mode 100644 index 7c32c0af..00000000 --- a/daemon/core/gui/toolbar.py +++ /dev/null @@ -1,469 +0,0 @@ -import logging -import tkinter as tk -from enum import Enum -from functools import partial -from tkinter import ttk -from typing import TYPE_CHECKING, Callable, Optional - -from PIL.ImageTk import PhotoImage - -from core.gui import nodeutils as nutils -from core.gui.dialogs.colorpicker import ColorPickerDialog -from core.gui.dialogs.runtool import RunToolDialog -from core.gui.graph import tags -from core.gui.graph.enums import GraphMode -from core.gui.graph.shapeutils import ShapeType, is_marker -from core.gui.images import ImageEnum -from core.gui.nodeutils import NodeDraw -from core.gui.observers import ObserversMenu -from core.gui.task import ProgressTask -from core.gui.themes import Styles -from core.gui.tooltip import Tooltip - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -TOOLBAR_SIZE: int = 32 -PICKER_SIZE: int = 24 - - -class NodeTypeEnum(Enum): - NODE = 0 - NETWORK = 1 - OTHER = 2 - - -def enable_buttons(frame: ttk.Frame, enabled: bool) -> None: - state = tk.NORMAL if enabled else tk.DISABLED - for child in frame.winfo_children(): - child.configure(state=state) - - -class PickerFrame(ttk.Frame): - def __init__(self, app: "Application", button: ttk.Button) -> None: - super().__init__(app) - self.app: "Application" = app - self.button: ttk.Button = button - - def create_node_button(self, node_draw: NodeDraw, func: Callable) -> None: - self.create_button( - node_draw.label, func, node_draw.image_enum, node_draw.image_file - ) - - def create_button( - self, - label: str, - func: Callable, - image_enum: ImageEnum = None, - image_file: str = None, - ) -> None: - if image_enum: - bar_image = self.app.get_enum_icon(image_enum, width=TOOLBAR_SIZE) - image = self.app.get_enum_icon(image_enum, width=PICKER_SIZE) - else: - bar_image = self.app.get_file_icon(image_file, width=TOOLBAR_SIZE) - image = self.app.get_file_icon(image_file, width=PICKER_SIZE) - button = ttk.Button( - self, image=image, text=label, compound=tk.TOP, style=Styles.picker_button - ) - button.image = image - button.bind("", lambda e: func(bar_image)) - button.grid(pady=1) - - def show(self) -> None: - self.button.after(0, self._show) - - def _show(self) -> None: - x = self.button.winfo_width() + 1 - y = self.button.winfo_rooty() - self.app.winfo_rooty() - 1 - self.place(x=x, y=y) - self.app.bind_all("", lambda e: self.destroy()) - self.wait_visibility() - self.grab_set() - self.wait_window() - self.app.unbind_all("") - - -class ButtonBar(ttk.Frame): - def __init__(self, master: tk.Widget, app: "Application") -> None: - super().__init__(master) - self.app: "Application" = app - self.radio_buttons: list[ttk.Button] = [] - - def create_button( - self, image_enum: ImageEnum, func: Callable, tooltip: str, radio: bool = False - ) -> ttk.Button: - image = self.app.get_enum_icon(image_enum, width=TOOLBAR_SIZE) - button = ttk.Button(self, image=image, command=func) - button.image = image - button.grid(sticky=tk.EW) - Tooltip(button, tooltip) - if radio: - self.radio_buttons.append(button) - return button - - def select_radio(self, selected: ttk.Button) -> None: - for button in self.radio_buttons: - button.state(["!pressed"]) - selected.state(["pressed"]) - - -class MarkerFrame(ttk.Frame): - PAD: int = 3 - - def __init__(self, master: tk.BaseWidget, app: "Application") -> None: - super().__init__(master, padding=self.PAD) - self.app: "Application" = app - self.color: str = "#000000" - self.size: tk.DoubleVar = tk.DoubleVar() - self.color_frame: Optional[tk.Frame] = None - self.draw() - - def draw(self) -> None: - self.columnconfigure(0, weight=1) - - image = self.app.get_enum_icon(ImageEnum.DELETE, width=16) - button = ttk.Button(self, image=image, width=2, command=self.click_clear) - button.image = image - button.grid(sticky=tk.EW, pady=self.PAD) - Tooltip(button, "Delete Marker") - - sizes = [1, 3, 8, 10] - self.size.set(sizes[0]) - sizes = ttk.Combobox( - self, state="readonly", textvariable=self.size, value=sizes, width=2 - ) - sizes.grid(sticky=tk.EW, pady=self.PAD) - Tooltip(sizes, "Marker Size") - - frame_size = TOOLBAR_SIZE - self.color_frame = tk.Frame( - self, background=self.color, height=frame_size, width=frame_size - ) - self.color_frame.grid(sticky=tk.EW) - self.color_frame.bind("", self.click_color) - Tooltip(self.color_frame, "Marker Color") - - def click_clear(self) -> None: - canvas = self.app.manager.current() - canvas.delete(tags.MARKER) - - def click_color(self, _event: tk.Event) -> None: - dialog = ColorPickerDialog(self.app, self.app, self.color) - self.color = dialog.askcolor() - self.color_frame.config(background=self.color) - - -class Toolbar(ttk.Frame): - """ - Core toolbar class - """ - - def __init__(self, app: "Application") -> None: - """ - Create a CoreToolbar instance - """ - super().__init__(app) - self.app: "Application" = app - - # design buttons - self.play_button: Optional[ttk.Button] = None - self.select_button: Optional[ttk.Button] = None - self.link_button: Optional[ttk.Button] = None - self.node_button: Optional[ttk.Button] = None - self.network_button: Optional[ttk.Button] = None - self.annotation_button: Optional[ttk.Button] = None - - # runtime buttons - self.runtime_select_button: Optional[ttk.Button] = None - self.stop_button: Optional[ttk.Button] = None - self.runtime_marker_button: Optional[ttk.Button] = None - self.run_command_button: Optional[ttk.Button] = None - - # frames - self.design_frame: Optional[ButtonBar] = None - self.runtime_frame: Optional[ButtonBar] = None - self.marker_frame: Optional[MarkerFrame] = None - self.picker: Optional[PickerFrame] = None - - # observers - self.observers_menu: Optional[ObserversMenu] = None - - # these variables help keep track of what images being drawn so that scaling - # is possible since PhotoImage does not have resize method - self.current_node: NodeDraw = nutils.NODES[0] - self.current_network: NodeDraw = nutils.NETWORK_NODES[0] - self.current_annotation: ShapeType = ShapeType.MARKER - self.annotation_enum: ImageEnum = ImageEnum.MARKER - - # draw components - self.draw() - - def draw(self) -> None: - self.columnconfigure(0, weight=1) - self.rowconfigure(0, weight=1) - self.draw_design_frame() - self.draw_runtime_frame() - self.design_frame.tkraise() - self.marker_frame = MarkerFrame(self, self.app) - - def draw_design_frame(self) -> None: - self.design_frame = ButtonBar(self, self.app) - self.design_frame.grid(row=0, column=0, sticky=tk.NSEW) - self.design_frame.columnconfigure(0, weight=1) - self.play_button = self.design_frame.create_button( - ImageEnum.START, self.click_start, "Start Session" - ) - self.select_button = self.design_frame.create_button( - ImageEnum.SELECT, self.click_selection, "Selection Tool", radio=True - ) - self.link_button = self.design_frame.create_button( - ImageEnum.LINK, self.click_link, "Link Tool", radio=True - ) - self.node_button = self.design_frame.create_button( - self.current_node.image_enum, - self.draw_node_picker, - "Container Nodes", - radio=True, - ) - self.network_button = self.design_frame.create_button( - self.current_network.image_enum, - self.draw_network_picker, - "Link Layer Nodes", - radio=True, - ) - self.annotation_button = self.design_frame.create_button( - self.annotation_enum, - self.draw_annotation_picker, - "Annotation Tools", - radio=True, - ) - - def draw_runtime_frame(self) -> None: - self.runtime_frame = ButtonBar(self, self.app) - self.runtime_frame.grid(row=0, column=0, sticky=tk.NSEW) - self.runtime_frame.columnconfigure(0, weight=1) - self.stop_button = self.runtime_frame.create_button( - ImageEnum.STOP, self.click_stop, "Stop Session" - ) - self.runtime_select_button = self.runtime_frame.create_button( - ImageEnum.SELECT, self.click_runtime_selection, "Selection Tool", radio=True - ) - self.create_observe_button() - self.runtime_marker_button = self.runtime_frame.create_button( - ImageEnum.MARKER, self.click_marker_button, "Marker Tool", radio=True - ) - self.run_command_button = self.runtime_frame.create_button( - ImageEnum.RUN, self.click_run_button, "Run Tool" - ) - - def draw_node_picker(self) -> None: - self.hide_marker() - self.app.manager.mode = GraphMode.NODE - self.app.manager.node_draw = self.current_node - self.design_frame.select_radio(self.node_button) - self.picker = PickerFrame(self.app, self.node_button) - # draw default nodes - for node_draw in nutils.NODES: - func = partial( - self.update_button, self.node_button, node_draw, NodeTypeEnum.NODE - ) - self.picker.create_node_button(node_draw, func) - # draw custom nodes - for name in sorted(self.app.core.custom_nodes): - node_draw = self.app.core.custom_nodes[name] - func = partial( - self.update_button, self.node_button, node_draw, NodeTypeEnum.NODE - ) - self.picker.create_node_button(node_draw, func) - self.picker.show() - - def click_selection(self) -> None: - self.design_frame.select_radio(self.select_button) - self.app.manager.mode = GraphMode.SELECT - self.hide_marker() - - def click_runtime_selection(self) -> None: - self.runtime_frame.select_radio(self.runtime_select_button) - self.app.manager.mode = GraphMode.SELECT - self.hide_marker() - - def click_start(self) -> None: - """ - Start session handler redraw buttons, send node and link messages to grpc - server. - """ - self.app.menubar.set_state(is_runtime=True) - self.app.manager.mode = GraphMode.SELECT - enable_buttons(self.design_frame, enabled=False) - task = ProgressTask( - self.app, "Start", self.app.core.start_session, self.start_callback - ) - task.start() - - def start_callback(self, result: bool, exceptions: list[str]) -> None: - self.set_runtime() - self.app.core.show_mobility_players() - if not result and exceptions: - message = "\n".join(exceptions) - self.app.show_exception_data( - "Start Exception", "Session failed to start", message - ) - - def set_runtime(self) -> None: - enable_buttons(self.runtime_frame, enabled=True) - self.runtime_frame.tkraise() - self.click_runtime_selection() - self.hide_marker() - - def set_design(self) -> None: - enable_buttons(self.design_frame, enabled=True) - self.design_frame.tkraise() - self.click_selection() - self.hide_marker() - - def click_link(self) -> None: - self.design_frame.select_radio(self.link_button) - self.app.manager.mode = GraphMode.EDGE - self.hide_marker() - - def update_button( - self, - button: ttk.Button, - node_draw: NodeDraw, - type_enum: NodeTypeEnum, - image: PhotoImage, - ) -> None: - logger.debug("update button(%s): %s", button, node_draw) - button.configure(image=image) - button.image = image - self.app.manager.node_draw = node_draw - if type_enum == NodeTypeEnum.NODE: - self.current_node = node_draw - elif type_enum == NodeTypeEnum.NETWORK: - self.current_network = node_draw - - def draw_network_picker(self) -> None: - """ - Draw the options for link-layer button. - """ - self.hide_marker() - self.app.manager.mode = GraphMode.NODE - self.app.manager.node_draw = self.current_network - self.design_frame.select_radio(self.network_button) - self.picker = PickerFrame(self.app, self.network_button) - for node_draw in nutils.NETWORK_NODES: - func = partial( - self.update_button, self.network_button, node_draw, NodeTypeEnum.NETWORK - ) - self.picker.create_node_button(node_draw, func) - self.picker.show() - - def draw_annotation_picker(self) -> None: - """ - Draw the options for marker button. - """ - self.design_frame.select_radio(self.annotation_button) - self.app.manager.mode = GraphMode.ANNOTATION - self.app.manager.annotation_type = self.current_annotation - if is_marker(self.current_annotation): - self.show_marker() - self.picker = PickerFrame(self.app, self.annotation_button) - nodes = [ - (ImageEnum.MARKER, ShapeType.MARKER), - (ImageEnum.OVAL, ShapeType.OVAL), - (ImageEnum.RECTANGLE, ShapeType.RECTANGLE), - (ImageEnum.TEXT, ShapeType.TEXT), - ] - for image_enum, shape_type in nodes: - label = shape_type.value - func = partial(self.update_annotation, shape_type, image_enum) - self.picker.create_button(label, func, image_enum) - self.picker.show() - - def create_observe_button(self) -> None: - image = self.app.get_enum_icon(ImageEnum.OBSERVE, width=TOOLBAR_SIZE) - menu_button = ttk.Menubutton( - self.runtime_frame, image=image, direction=tk.RIGHT - ) - menu_button.image = image - menu_button.grid(sticky=tk.EW) - self.observers_menu = ObserversMenu(menu_button, self.app) - menu_button["menu"] = self.observers_menu - - def click_stop(self) -> None: - """ - redraw buttons on the toolbar, send node and link messages to grpc server - """ - logger.info("clicked stop button") - self.app.menubar.set_state(is_runtime=False) - self.app.core.close_mobility_players() - enable_buttons(self.runtime_frame, enabled=False) - task = ProgressTask( - self.app, "Stop", self.app.core.stop_session, self.stop_callback - ) - task.start() - - def stop_callback(self, result: bool) -> None: - self.set_design() - self.app.manager.stopped_session() - - def update_annotation( - self, shape_type: ShapeType, image_enum: ImageEnum, image: PhotoImage - ) -> None: - logger.debug("clicked annotation") - self.annotation_button.configure(image=image) - self.annotation_button.image = image - self.app.manager.annotation_type = shape_type - self.current_annotation = shape_type - self.annotation_enum = image_enum - if is_marker(shape_type): - self.show_marker() - else: - self.hide_marker() - - def hide_marker(self) -> None: - self.marker_frame.grid_forget() - - def show_marker(self) -> None: - self.marker_frame.grid() - - def click_run_button(self) -> None: - logger.debug("Click on RUN button") - dialog = RunToolDialog(self.app) - dialog.show() - - def click_marker_button(self) -> None: - self.runtime_frame.select_radio(self.runtime_marker_button) - self.app.manager.mode = GraphMode.ANNOTATION - self.app.manager.annotation_type = ShapeType.MARKER - self.show_marker() - - def scale_button( - self, button: ttk.Button, image_enum: ImageEnum = None, image_file: str = None - ) -> None: - image = None - if image_enum: - image = self.app.get_enum_icon(image_enum, width=TOOLBAR_SIZE) - elif image_file: - image = self.app.get_file_icon(image_file, width=TOOLBAR_SIZE) - if image: - button.config(image=image) - button.image = image - - def scale(self) -> None: - self.scale_button(self.play_button, ImageEnum.START) - self.scale_button(self.select_button, ImageEnum.SELECT) - self.scale_button(self.link_button, ImageEnum.LINK) - if self.current_node.image_enum: - self.scale_button(self.node_button, self.current_node.image_enum) - else: - self.scale_button(self.node_button, image_file=self.current_node.image_file) - self.scale_button(self.network_button, self.current_network.image_enum) - self.scale_button(self.annotation_button, self.annotation_enum) - self.scale_button(self.runtime_select_button, ImageEnum.SELECT) - self.scale_button(self.stop_button, ImageEnum.STOP) - self.scale_button(self.runtime_marker_button, ImageEnum.MARKER) - self.scale_button(self.run_command_button, ImageEnum.RUN) diff --git a/daemon/core/gui/tooltip.py b/daemon/core/gui/tooltip.py deleted file mode 100644 index 6d84ac75..00000000 --- a/daemon/core/gui/tooltip.py +++ /dev/null @@ -1,55 +0,0 @@ -import tkinter as tk -from tkinter import ttk -from typing import Optional - -from core.gui.themes import Styles - - -class Tooltip: - """ - Create tool tip for a given widget - """ - - def __init__(self, widget: tk.BaseWidget, text: str = "widget info") -> None: - self.widget: tk.BaseWidget = widget - self.text: str = text - self.widget.bind("", self.on_enter) - self.widget.bind("", self.on_leave) - self.waittime: int = 400 - self.id: Optional[str] = None - self.tw: Optional[tk.Toplevel] = None - - def on_enter(self, event: tk.Event = None) -> None: - self.schedule() - - def on_leave(self, event: tk.Event = None) -> None: - self.unschedule() - self.close(event) - - def schedule(self): - self.unschedule() - self.id = self.widget.after(self.waittime, self.enter) - - def unschedule(self): - id_ = self.id - self.id = None - if id_: - self.widget.after_cancel(id_) - - def enter(self, event: tk.Event = None): - x, y, cx, cy = self.widget.bbox("insert") - x += self.widget.winfo_rootx() - y += self.widget.winfo_rooty() + 32 - self.tw = tk.Toplevel(self.widget) - self.tw.wm_overrideredirect(True) - self.tw.wm_geometry(f"+{x:d}+{y:d}") - self.tw.rowconfigure(0, weight=1) - self.tw.columnconfigure(0, weight=1) - frame = ttk.Frame(self.tw, style=Styles.tooltip_frame, padding=3) - frame.grid(sticky=tk.NSEW) - label = ttk.Label(frame, text=self.text, style=Styles.tooltip) - label.grid() - - def close(self, event: tk.Event = None): - if self.tw: - self.tw.destroy() diff --git a/daemon/core/gui/utils.py b/daemon/core/gui/utils.py deleted file mode 100644 index 59171ae9..00000000 --- a/daemon/core/gui/utils.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional - - -def bandwidth_text(bandwidth: int) -> str: - size = {0: "bps", 1: "Kbps", 2: "Mbps", 3: "Gbps"} - unit = 1000 - i = 0 - while bandwidth > unit: - bandwidth /= unit - i += 1 - if i == 3: - break - return f"{bandwidth} {size[i]}" - - -def delay_jitter_text(delay: int, jitter: int) -> Optional[str]: - line = None - if delay > 0 and jitter > 0: - line = f"{delay} us (\u00B1{jitter} us)" - elif jitter > 0: - line = f"0 us (\u00B1{jitter} us)" - return line diff --git a/daemon/core/gui/validation.py b/daemon/core/gui/validation.py deleted file mode 100644 index 61500e84..00000000 --- a/daemon/core/gui/validation.py +++ /dev/null @@ -1,124 +0,0 @@ -""" -input validation -""" -import re -import tkinter as tk -from re import Pattern -from tkinter import ttk -from typing import Any, Optional - -SMALLEST_SCALE: float = 0.5 -LARGEST_SCALE: float = 5.0 -HEX_REGEX: Pattern = re.compile("^([#]([0-9]|[a-f])+)$|^[#]$") - - -class ValidationEntry(ttk.Entry): - empty: Optional[str] = None - - def __init__( - self, - master: tk.BaseWidget = None, - widget: tk.BaseWidget = None, - empty_enabled: bool = True, - **kwargs: Any - ) -> None: - super().__init__(master, widget, **kwargs) - cmd = self.register(self.is_valid) - self.configure(validate="key", validatecommand=(cmd, "%P")) - if self.empty is not None and empty_enabled: - self.bind("", self.focus_out) - - def is_valid(self, s: str) -> bool: - raise NotImplementedError - - def focus_out(self, _event: tk.Event) -> None: - value = self.get() - if not value: - self.insert(tk.END, self.empty) - - -class PositiveIntEntry(ValidationEntry): - empty: str = "0" - - def is_valid(self, s: str) -> bool: - if not s: - return True - try: - value = int(s) - return value >= 0 - except ValueError: - return False - - -class PositiveFloatEntry(ValidationEntry): - empty = "0.0" - - def is_valid(self, s: str) -> bool: - if not s: - return True - try: - value = float(s) - return value >= 0.0 - except ValueError: - return False - - -class FloatEntry(ValidationEntry): - empty = "0.0" - - def is_valid(self, s: str) -> bool: - if not s: - return True - try: - float(s) - return True - except ValueError: - return False - - -class RgbEntry(ValidationEntry): - def is_valid(self, s: str) -> bool: - if not s: - return True - if s.startswith("0") and len(s) >= 2: - return False - try: - value = int(s) - return 0 <= value <= 255 - except ValueError: - return False - - -class HexEntry(ValidationEntry): - def is_valid(self, s: str) -> bool: - if not s: - return True - if HEX_REGEX.match(s): - return 0 <= len(s) <= 7 - else: - return False - - -class NodeNameEntry(ValidationEntry): - empty: str = "noname" - - def is_valid(self, s: str) -> bool: - if len(s) < 0: - return False - if len(s) == 0: - return True - for x in s: - if not x.isalnum() and x != "-": - return False - return True - - -class AppScaleEntry(ValidationEntry): - def is_valid(self, s: str) -> bool: - if not s: - return True - try: - float_value = float(s) - return SMALLEST_SCALE <= float_value <= LARGEST_SCALE or float_value == 0 - except ValueError: - return False diff --git a/daemon/core/gui/widgets.py b/daemon/core/gui/widgets.py deleted file mode 100644 index 902f1132..00000000 --- a/daemon/core/gui/widgets.py +++ /dev/null @@ -1,285 +0,0 @@ -import logging -import tkinter as tk -from functools import partial -from pathlib import Path -from tkinter import filedialog, font, ttk -from typing import TYPE_CHECKING, Any, Callable - -from core.api.grpc.wrappers import ConfigOption, ConfigOptionType -from core.gui import appconfig, themes, validation -from core.gui.dialogs.dialog import Dialog -from core.gui.themes import FRAME_PAD, PADX, PADY - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.gui.app import Application - -INT_TYPES: set[ConfigOptionType] = { - ConfigOptionType.UINT8, - ConfigOptionType.UINT16, - ConfigOptionType.UINT32, - ConfigOptionType.UINT64, - ConfigOptionType.INT8, - ConfigOptionType.INT16, - ConfigOptionType.INT32, - ConfigOptionType.INT64, -} - - -def file_button_click(value: tk.StringVar, parent: tk.Widget) -> None: - file_path = filedialog.askopenfilename( - title="Select File", initialdir=str(appconfig.HOME_PATH), parent=parent - ) - if file_path: - value.set(file_path) - - -class FrameScroll(ttk.Frame): - def __init__( - self, - master: tk.Widget, - app: "Application", - _cls: type[ttk.Frame] = ttk.Frame, - **kw: Any - ) -> None: - super().__init__(master, **kw) - self.app: "Application" = app - self.rowconfigure(0, weight=1) - self.columnconfigure(0, weight=1) - bg = self.app.style.lookup(".", "background") - self.canvas: tk.Canvas = tk.Canvas(self, highlightthickness=0, background=bg) - self.canvas.grid(row=0, sticky=tk.NSEW, padx=2, pady=2) - self.canvas.columnconfigure(0, weight=1) - self.canvas.rowconfigure(0, weight=1) - self.scrollbar: ttk.Scrollbar = ttk.Scrollbar( - self, orient="vertical", command=self.canvas.yview - ) - self.scrollbar.grid(row=0, column=1, sticky=tk.NS) - self.frame: ttk.Frame = _cls(self.canvas) - self.frame_id: int = self.canvas.create_window( - 0, 0, anchor="nw", window=self.frame - ) - self.canvas.update_idletasks() - self.canvas.configure( - scrollregion=self.canvas.bbox("all"), yscrollcommand=self.scrollbar.set - ) - self.frame.bind("", self._configure_frame) - self.canvas.bind("", self._configure_canvas) - - def _configure_frame(self, event: tk.Event) -> None: - req_width = self.frame.winfo_reqwidth() - if req_width != self.canvas.winfo_reqwidth(): - self.canvas.configure(width=req_width) - self.canvas.configure(scrollregion=self.canvas.bbox("all")) - - def _configure_canvas(self, event: tk.Event) -> None: - self.canvas.itemconfig(self.frame_id, width=event.width) - - def clear(self) -> None: - for widget in self.frame.winfo_children(): - widget.destroy() - - -class ConfigFrame(ttk.Notebook): - def __init__( - self, - master: tk.Widget, - app: "Application", - config: dict[str, ConfigOption], - enabled: bool = True, - **kw: Any - ) -> None: - super().__init__(master, **kw) - self.app: "Application" = app - self.config: dict[str, ConfigOption] = config - self.values: dict[str, tk.StringVar] = {} - self.enabled: bool = enabled - - def draw_config(self) -> None: - group_mapping = {} - for key in self.config: - option = self.config[key] - group = group_mapping.setdefault(option.group, []) - group.append(option) - - for group_name in sorted(group_mapping): - group = group_mapping[group_name] - tab = FrameScroll(self, self.app, borderwidth=0, padding=FRAME_PAD) - tab.frame.columnconfigure(1, weight=1) - self.add(tab, text=group_name) - for index, option in enumerate(sorted(group, key=lambda x: x.name)): - label = ttk.Label(tab.frame, text=option.label) - label.grid(row=index, pady=PADY, padx=PADX, sticky=tk.W) - value = tk.StringVar() - if option.type == ConfigOptionType.BOOL: - select = ("On", "Off") - state = "readonly" if self.enabled else tk.DISABLED - combobox = ttk.Combobox( - tab.frame, textvariable=value, values=select, state=state - ) - combobox.grid(row=index, column=1, sticky=tk.EW) - if option.value == "1": - value.set("On") - else: - value.set("Off") - elif option.select: - value.set(option.value) - select = tuple(option.select) - state = "readonly" if self.enabled else tk.DISABLED - combobox = ttk.Combobox( - tab.frame, textvariable=value, values=select, state=state - ) - combobox.grid(row=index, column=1, sticky=tk.EW) - elif option.type == ConfigOptionType.STRING: - value.set(option.value) - state = tk.NORMAL if self.enabled else tk.DISABLED - if "file" in option.label: - file_frame = ttk.Frame(tab.frame) - file_frame.grid(row=index, column=1, sticky=tk.EW) - file_frame.columnconfigure(0, weight=1) - entry = ttk.Entry(file_frame, textvariable=value, state=state) - entry.grid(row=0, column=0, sticky=tk.EW, padx=PADX) - func = partial(file_button_click, value, self) - button = ttk.Button( - file_frame, text="...", command=func, state=state - ) - button.grid(row=0, column=1) - else: - entry = ttk.Entry(tab.frame, textvariable=value, state=state) - entry.grid(row=index, column=1, sticky=tk.EW) - elif option.type in INT_TYPES: - value.set(option.value) - state = tk.NORMAL if self.enabled else tk.DISABLED - entry = validation.PositiveIntEntry( - tab.frame, textvariable=value, state=state - ) - entry.grid(row=index, column=1, sticky=tk.EW) - elif option.type == ConfigOptionType.FLOAT: - value.set(option.value) - state = tk.NORMAL if self.enabled else tk.DISABLED - entry = validation.PositiveFloatEntry( - tab.frame, textvariable=value, state=state - ) - entry.grid(row=index, column=1, sticky=tk.EW) - else: - logger.error("unhandled config option type: %s", option.type) - self.values[option.name] = value - - def parse_config(self) -> dict[str, str]: - for key in self.config: - option = self.config[key] - value = self.values[key] - config_value = value.get() - if option.type == ConfigOptionType.BOOL: - if config_value == "On": - option.value = "1" - else: - option.value = "0" - else: - option.value = config_value - return {x: self.config[x].value for x in self.config} - - def set_values(self, config: dict[str, str]) -> None: - for name, data in config.items(): - option = self.config[name] - value = self.values[name] - if option.type == ConfigOptionType.BOOL: - if data == "1": - data = "On" - else: - data = "Off" - value.set(data) - - -class ListboxScroll(ttk.Frame): - def __init__(self, master: tk.BaseWidget = None, **kw: Any) -> None: - super().__init__(master, **kw) - self.columnconfigure(0, weight=1) - self.rowconfigure(0, weight=1) - self.scrollbar: ttk.Scrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL) - self.scrollbar.grid(row=0, column=1, sticky=tk.NS) - self.listbox: tk.Listbox = tk.Listbox( - self, - selectmode=tk.BROWSE, - yscrollcommand=self.scrollbar.set, - exportselection=False, - ) - themes.style_listbox(self.listbox) - self.listbox.grid(row=0, column=0, sticky=tk.NSEW) - self.scrollbar.config(command=self.listbox.yview) - - -class CheckboxList(FrameScroll): - def __init__( - self, - master: ttk.Widget, - app: "Application", - clicked: Callable = None, - **kw: Any - ) -> None: - super().__init__(master, app, **kw) - self.clicked: Callable = clicked - self.frame.columnconfigure(0, weight=1) - - def add(self, name: str, checked: bool) -> None: - var = tk.BooleanVar(value=checked) - func = partial(self.clicked, name, var) - checkbox = ttk.Checkbutton(self.frame, text=name, variable=var, command=func) - checkbox.grid(sticky=tk.W) - - -class CodeFont(font.Font): - def __init__(self) -> None: - super().__init__(font="TkFixedFont", color="green") - - -class CodeText(ttk.Frame): - def __init__(self, master: tk.BaseWidget, **kwargs: Any) -> None: - super().__init__(master, **kwargs) - self.rowconfigure(0, weight=1) - self.columnconfigure(0, weight=1) - self.text: tk.Text = tk.Text( - self, - bd=0, - bg="black", - cursor="xterm lime lime", - fg="lime", - font=CodeFont(), - highlightbackground="black", - insertbackground="lime", - selectbackground="lime", - selectforeground="black", - relief=tk.FLAT, - ) - self.text.grid(row=0, column=0, sticky=tk.NSEW) - yscrollbar = ttk.Scrollbar(self, orient=tk.VERTICAL, command=self.text.yview) - yscrollbar.grid(row=0, column=1, sticky=tk.NS) - self.text.configure(yscrollcommand=yscrollbar.set) - - def get_text(self) -> str: - return self.text.get(1.0, tk.END) - - def set_text(self, text: str) -> None: - self.text.delete(1.0, tk.END) - self.text.insert(tk.END, text.rstrip()) - - -class Spinbox(ttk.Entry): - def __init__(self, master: tk.BaseWidget = None, **kwargs: Any) -> None: - super().__init__(master, "ttk::spinbox", **kwargs) - - def set(self, value: str) -> None: - self.tk.call(self._w, "set", value) - - -def image_chooser(parent: Dialog, path: Path) -> str: - return filedialog.askopenfilename( - parent=parent, - initialdir=str(path), - title="Select", - filetypes=( - ("images", "*.gif *.jpg *.png *.bmp *pcx *.tga ..."), - ("All Files", "*"), - ), - ) diff --git a/daemon/core/location.py b/daemon/core/location.py new file mode 100644 index 00000000..b95a8670 --- /dev/null +++ b/daemon/core/location.py @@ -0,0 +1,273 @@ +""" +location.py: definition of CoreLocation class that is a member of the +Session object. Provides conversions between Cartesian and geographic coordinate +systems. Depends on utm contributed module, from +https://pypi.python.org/pypi/utm (version 0.3.0). +""" + +import logging + +from core.enumerations import RegisterTlvs +from core.misc import utm + + +class CoreLocation(object): + """ + Member of session class for handling global location data. This keeps + track of a latitude/longitude/altitude reference point and scale in + order to convert between X,Y and geo coordinates. + """ + name = "location" + config_type = RegisterTlvs.UTILITY.value + + def __init__(self): + """ + Creates a MobilityManager instance. + + :return: nothing + """ + # ConfigurableManager.__init__(self) + self.reset() + self.zonemap = {} + self.refxyz = (0.0, 0.0, 0.0) + self.refscale = 1.0 + self.zoneshifts = {} + self.refgeo = (0.0, 0.0, 0.0) + for n, l in utm.ZONE_LETTERS: + self.zonemap[l] = n + + def reset(self): + """ + Reset to initial state. + """ + # (x, y, z) coordinates of the point given by self.refgeo + self.refxyz = (0.0, 0.0, 0.0) + # decimal latitude, longitude, and altitude at the point (x, y, z) + self.setrefgeo(0.0, 0.0, 0.0) + # 100 pixels equals this many meters + self.refscale = 1.0 + # cached distance to refpt in other zones + self.zoneshifts = {} + + def px2m(self, val): + """ + Convert the specified value in pixels to meters using the + configured scale. The scale is given as s, where + 100 pixels = s meters. + + :param val: value to use in converting to meters + :return: value converted to meters + """ + return (val / 100.0) * self.refscale + + def m2px(self, val): + """ + Convert the specified value in meters to pixels using the + configured scale. The scale is given as s, where + 100 pixels = s meters. + + :param val: value to convert to pixels + :return: value converted to pixels + """ + if self.refscale == 0.0: + return 0.0 + return 100.0 * (val / self.refscale) + + def setrefgeo(self, lat, lon, alt): + """ + Record the geographical reference point decimal (lat, lon, alt) + and convert and store its UTM equivalent for later use. + + :param lat: latitude + :param lon: longitude + :param alt: altitude + :return: nothing + """ + self.refgeo = (lat, lon, alt) + # easting, northing, zone + e, n, zonen, zonel = utm.from_latlon(lat, lon) + self.refutm = ((zonen, zonel), e, n, alt) + + def getgeo(self, x, y, z): + """ + Given (x, y, z) Cartesian coordinates, convert them to latitude, + longitude, and altitude based on the configured reference point + and scale. + + :param x: x value + :param y: y value + :param z: z value + :return: lat, lon, alt values for provided coordinates + :rtype: tuple + """ + # shift (x,y,z) over to reference point (x,y,z) + x -= self.refxyz[0] + y = -(y - self.refxyz[1]) + if z is None: + z = self.refxyz[2] + else: + z -= self.refxyz[2] + # use UTM coordinates since unit is meters + zone = self.refutm[0] + if zone == "": + raise ValueError("reference point not configured") + e = self.refutm[1] + self.px2m(x) + n = self.refutm[2] + self.px2m(y) + alt = self.refutm[3] + self.px2m(z) + (e, n, zone) = self.getutmzoneshift(e, n) + try: + lat, lon = utm.to_latlon(e, n, zone[0], zone[1]) + except utm.OutOfRangeError: + logging.exception("UTM out of range error for n=%s zone=%s xyz=(%s,%s,%s)", n, zone, x, y, z) + lat, lon = self.refgeo[:2] + # self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \ + # "%.3f,%.3f,%.3f" % (x, y, z, e, n, zone, lat, lon, alt)) + return lat, lon, alt + + def getxyz(self, lat, lon, alt): + """ + Given latitude, longitude, and altitude location data, convert them + to (x, y, z) Cartesian coordinates based on the configured + reference point and scale. Lat/lon is converted to UTM meter + coordinates, UTM zones are accounted for, and the scale turns + meters to pixels. + + :param lat: latitude + :param lon: longitude + :param alt: altitude + :return: converted x, y, z coordinates + :rtype: tuple + """ + # convert lat/lon to UTM coordinates in meters + e, n, zonen, zonel = utm.from_latlon(lat, lon) + _rlat, _rlon, ralt = self.refgeo + xshift = self.geteastingshift(zonen, zonel) + if xshift is None: + xm = e - self.refutm[1] + else: + xm = e + xshift + yshift = self.getnorthingshift(zonen, zonel) + if yshift is None: + ym = n - self.refutm[2] + else: + ym = n + yshift + zm = alt - ralt + + # shift (x,y,z) over to reference point (x,y,z) + x = self.m2px(xm) + self.refxyz[0] + y = -(self.m2px(ym) + self.refxyz[1]) + z = self.m2px(zm) + self.refxyz[2] + return x, y, z + + def geteastingshift(self, zonen, zonel): + """ + If the lat, lon coordinates being converted are located in a + different UTM zone than the canvas reference point, the UTM meters + may need to be shifted. + This picks a reference point in the same longitudinal band + (UTM zone number) as the provided zone, to calculate the shift in + meters for the x coordinate. + + :param zonen: zonen + :param zonel: zone1 + :return: the x shift value + """ + rzonen = int(self.refutm[0][0]) + # same zone number, no x shift required + if zonen == rzonen: + return None + z = (zonen, zonel) + # x shift already calculated, cached + if z in self.zoneshifts and self.zoneshifts[z][0] is not None: + return self.zoneshifts[z][0] + + rlat, rlon, _ralt = self.refgeo + # ea. zone is 6deg band + lon2 = rlon + 6 * (zonen - rzonen) + # ignore northing + e2, _n2, _zonen2, _zonel2 = utm.from_latlon(rlat, lon2) + # NOTE: great circle distance used here, not reference ellipsoid! + xshift = utm.haversine(rlon, rlat, lon2, rlat) - e2 + # cache the return value + yshift = None + if z in self.zoneshifts: + yshift = self.zoneshifts[z][1] + self.zoneshifts[z] = (xshift, yshift) + return xshift + + def getnorthingshift(self, zonen, zonel): + """ + If the lat, lon coordinates being converted are located in a + different UTM zone than the canvas reference point, the UTM meters + may need to be shifted. + This picks a reference point in the same latitude band (UTM zone letter) + as the provided zone, to calculate the shift in meters for the + y coordinate. + + :param zonen: zonen + :param zonel: zone1 + :return: calculated y shift + """ + rzonel = self.refutm[0][1] + # same zone letter, no y shift required + if zonel == rzonel: + return None + z = (zonen, zonel) + # y shift already calculated, cached + if z in self.zoneshifts and self.zoneshifts[z][1] is not None: + return self.zoneshifts[z][1] + + rlat, rlon, _ralt = self.refgeo + # zonemap is used to calculate degrees difference between zone letters + latshift = self.zonemap[zonel] - self.zonemap[rzonel] + # ea. latitude band is 8deg high + lat2 = rlat + latshift + _e2, n2, _zonen2, _zonel2 = utm.from_latlon(lat2, rlon) + # NOTE: great circle distance used here, not reference ellipsoid + yshift = -(utm.haversine(rlon, rlat, rlon, lat2) + n2) + # cache the return value + xshift = None + if z in self.zoneshifts: + xshift = self.zoneshifts[z][0] + self.zoneshifts[z] = (xshift, yshift) + return yshift + + def getutmzoneshift(self, e, n): + """ + Given UTM easting and northing values, check if they fall outside + the reference point's zone boundary. Return the UTM coordinates in a + different zone and the new zone if they do. Zone lettering is only + changed when the reference point is in the opposite hemisphere. + + :param e: easting value + :param n: northing value + :return: modified easting, northing, and zone values + :rtype: tuple + """ + zone = self.refutm[0] + rlat, rlon, _ralt = self.refgeo + if e > 834000 or e < 166000: + num_zones = (int(e) - 166000) / (utm.R / 10) + # estimate number of zones to shift, E (positive) or W (negative) + rlon2 = self.refgeo[1] + (num_zones * 6) + _e2, _n2, zonen2, zonel2 = utm.from_latlon(rlat, rlon2) + xshift = utm.haversine(rlon, rlat, rlon2, rlat) + # after >3 zones away from refpt, the above estimate won't work + # (the above estimate could be improved) + if not 100000 <= (e - xshift) < 1000000: + # move one more zone away + num_zones = (abs(num_zones) + 1) * (abs(num_zones) / num_zones) + rlon2 = self.refgeo[1] + (num_zones * 6) + _e2, _n2, zonen2, zonel2 = utm.from_latlon(rlat, rlon2) + xshift = utm.haversine(rlon, rlat, rlon2, rlat) + e = e - xshift + zone = (zonen2, zonel2) + if n < 0: + # refpt in northern hemisphere and we crossed south of equator + n += 10000000 + zone = (zone[0], 'M') + elif n > 10000000: + # refpt in southern hemisphere and we crossed north of equator + n -= 10000000 + zone = (zone[0], 'N') + return e, n, zone diff --git a/daemon/core/location/__init__.py b/daemon/core/location/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/location/geo.py b/daemon/core/location/geo.py deleted file mode 100644 index 78308728..00000000 --- a/daemon/core/location/geo.py +++ /dev/null @@ -1,127 +0,0 @@ -""" -Provides conversions from x,y,z to lon,lat,alt. -""" - -import logging - -import pyproj -from pyproj import Transformer - -from core.emulator.enumerations import RegisterTlvs - -logger = logging.getLogger(__name__) -SCALE_FACTOR: float = 100.0 -CRS_WGS84: int = 4326 -CRS_PROJ: int = 3857 - - -class GeoLocation: - """ - Provides logic to convert x,y,z coordinates to lon,lat,alt using - defined projections. - """ - - name: str = "location" - config_type: RegisterTlvs = RegisterTlvs.UTILITY - - def __init__(self) -> None: - """ - Creates a GeoLocation instance. - """ - self.to_pixels: Transformer = pyproj.Transformer.from_crs( - CRS_WGS84, CRS_PROJ, always_xy=True - ) - self.to_geo: Transformer = pyproj.Transformer.from_crs( - CRS_PROJ, CRS_WGS84, always_xy=True - ) - self.refproj: tuple[float, float, float] = (0.0, 0.0, 0.0) - self.refgeo: tuple[float, float, float] = (0.0, 0.0, 0.0) - self.refxyz: tuple[float, float, float] = (0.0, 0.0, 0.0) - self.refscale: float = 1.0 - - def setrefgeo(self, lat: float, lon: float, alt: float) -> None: - """ - Set the geospatial reference point. - - :param lat: latitude reference - :param lon: longitude reference - :param alt: altitude reference - :return: nothing - """ - self.refgeo = (lat, lon, alt) - px, py = self.to_pixels.transform(lon, lat) - self.refproj = (px, py, alt) - - def reset(self) -> None: - """ - Reset reference data to default values. - - :return: nothing - """ - self.refxyz = (0.0, 0.0, 0.0) - self.refgeo = (0.0, 0.0, 0.0) - self.refscale = 1.0 - self.refproj = self.to_pixels.transform(*self.refgeo) - - def pixels2meters(self, value: float) -> float: - """ - Provides conversion from pixels to meters. - - :param value: pixels value - :return: pixels value in meters - """ - return (value / SCALE_FACTOR) * self.refscale - - def meters2pixels(self, value: float) -> float: - """ - Provides conversion from meters to pixels. - - :param value: meters value - :return: meters value in pixels - """ - if self.refscale == 0.0: - return 0.0 - return SCALE_FACTOR * (value / self.refscale) - - def getxyz(self, lat: float, lon: float, alt: float) -> tuple[float, float, float]: - """ - Convert provided lon,lat,alt to x,y,z. - - :param lat: latitude value - :param lon: longitude value - :param alt: altitude value - :return: x,y,z representation of provided values - """ - logger.debug("input lon,lat,alt(%s, %s, %s)", lon, lat, alt) - px, py = self.to_pixels.transform(lon, lat) - px -= self.refproj[0] - py -= self.refproj[1] - pz = alt - self.refproj[2] - x = self.meters2pixels(px) + self.refxyz[0] - y = -(self.meters2pixels(py) + self.refxyz[1]) - z = self.meters2pixels(pz) + self.refxyz[2] - logger.debug("result x,y,z(%s, %s, %s)", x, y, z) - return x, y, z - - def getgeo(self, x: float, y: float, z: float) -> tuple[float, float, float]: - """ - Convert provided x,y,z to lon,lat,alt. - - :param x: x value - :param y: y value - :param z: z value - :return: lat,lon,alt representation of provided values - """ - logger.debug("input x,y(%s, %s)", x, y) - x -= self.refxyz[0] - y = -(y - self.refxyz[1]) - if z is None: - z = self.refxyz[2] - else: - z -= self.refxyz[2] - px = self.refproj[0] + self.pixels2meters(x) - py = self.refproj[1] + self.pixels2meters(y) - lon, lat = self.to_geo.transform(px, py) - alt = self.refgeo[2] + self.pixels2meters(z) - logger.debug("result lon,lat,alt(%s, %s, %s)", lon, lat, alt) - return lat, lon, alt diff --git a/daemon/core/location/mobility.py b/daemon/core/location/mobility.py deleted file mode 100644 index ebac9bc5..00000000 --- a/daemon/core/location/mobility.py +++ /dev/null @@ -1,1117 +0,0 @@ -""" -mobility.py: mobility helpers for moving nodes and calculating wireless range. -""" - -import heapq -import logging -import math -import threading -import time -from functools import total_ordering -from pathlib import Path -from typing import TYPE_CHECKING, Callable, Optional, Union - -from core import utils -from core.config import ( - ConfigBool, - ConfigFloat, - ConfigGroup, - ConfigInt, - ConfigString, - ConfigurableOptions, - Configuration, - ModelManager, -) -from core.emane.nodes import EmaneNet -from core.emulator.data import EventData, LinkData, LinkOptions -from core.emulator.enumerations import EventTypes, LinkTypes, MessageFlags, RegisterTlvs -from core.errors import CoreError -from core.executables import BASH -from core.nodes.base import CoreNode -from core.nodes.interface import CoreInterface -from core.nodes.network import WlanNode - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - -LEARNING_DISABLED: int = 0 -LEARNING_ENABLED: int = 30000 - - -def get_mobility_node(session: "Session", node_id: int) -> Union[WlanNode, EmaneNet]: - try: - return session.get_node(node_id, WlanNode) - except CoreError: - return session.get_node(node_id, EmaneNet) - - -def get_config_int(current: int, config: dict[str, str], name: str) -> Optional[int]: - """ - Convenience function to get config values as int. - - :param current: current config value to use when one is not provided - :param config: config to get values from - :param name: name of config value to get - :return: current config value when not provided, new value otherwise - """ - value = get_config_float(current, config, name) - if value is not None: - value = int(value) - return value - - -def get_config_float( - current: Union[int, float], config: dict[str, str], name: str -) -> Optional[float]: - """ - Convenience function to get config values as float. - - :param current: current config value to use when one is not provided - :param config: config to get values from - :param name: name of config value to get - :return: current config value when not provided, new value otherwise - """ - value = config.get(name) - if value is not None: - if value == "": - value = None - else: - value = float(value) - else: - value = current - return value - - -class MobilityManager(ModelManager): - """ - Member of session class for handling configuration data for mobility and - range models. - """ - - name = "MobilityManager" - config_type = RegisterTlvs.WIRELESS - - def __init__(self, session: "Session") -> None: - """ - Creates a MobilityManager instance. - - :param session: session this manager is tied to - """ - super().__init__() - self.session: "Session" = session - self.models[BasicRangeModel.name] = BasicRangeModel - self.models[Ns2ScriptedMobility.name] = Ns2ScriptedMobility - - def reset(self) -> None: - """ - Clear out all current configurations. - - :return: nothing - """ - self.config_reset() - - def startup(self, node_ids: list[int] = None) -> None: - """ - Session is transitioning from instantiation to runtime state. - Instantiate any mobility models that have been configured for a WLAN. - - :param node_ids: node ids to startup - :return: nothing - """ - if node_ids is None: - node_ids = self.nodes() - for node_id in node_ids: - logger.debug( - "node(%s) mobility startup: %s", node_id, self.get_all_configs(node_id) - ) - try: - node = get_mobility_node(self.session, node_id) - # TODO: may be an issue if there are multiple mobility models - for model in self.models.values(): - config = self.get_configs(node_id, model.name) - if not config: - continue - self.set_model(node, model, config) - if node.mobility: - self.session.event_loop.add_event(0.0, node.mobility.startup) - except CoreError: - logger.exception("mobility startup error") - logger.warning( - "skipping mobility configuration for unknown node: %s", node_id - ) - - def handleevent(self, event_data: EventData) -> None: - """ - Handle an Event Message used to start, stop, or pause - mobility scripts for a given mobility network. - - :param event_data: event data to handle - :return: nothing - """ - event_type = event_data.event_type - node_id = event_data.node - name = event_data.name - try: - node = get_mobility_node(self.session, node_id) - except CoreError: - logger.exception( - "ignoring event for model(%s), unknown node(%s)", name, node_id - ) - return - - # name is e.g. "mobility:ns2script" - models = name[9:].split(",") - for model in models: - cls = self.models.get(model) - if not cls: - logger.warning("ignoring event for unknown model '%s'", model) - continue - if cls.config_type in [RegisterTlvs.WIRELESS, RegisterTlvs.MOBILITY]: - model = node.mobility - else: - continue - if model is None: - logger.warning("ignoring event, %s has no model", node.name) - continue - if cls.name != model.name: - logger.warning( - "ignoring event for %s wrong model %s,%s", - node.name, - cls.name, - model.name, - ) - continue - if event_type in [EventTypes.STOP, EventTypes.RESTART]: - model.stop(move_initial=True) - if event_type in [EventTypes.START, EventTypes.RESTART]: - model.start() - if event_type == EventTypes.PAUSE: - model.pause() - - def sendevent(self, model: "WayPointMobility") -> None: - """ - Send an event message on behalf of a mobility model. - This communicates the current and end (max) times to the GUI. - - :param model: mobility model to send event for - :return: nothing - """ - event_type = EventTypes.NONE - if model.state == model.STATE_STOPPED: - event_type = EventTypes.STOP - elif model.state == model.STATE_RUNNING: - event_type = EventTypes.START - elif model.state == model.STATE_PAUSED: - event_type = EventTypes.PAUSE - start_time = int(model.lasttime - model.timezero) - end_time = int(model.endtime) - data = f"start={start_time} end={end_time}" - event_data = EventData( - node=model.id, - event_type=event_type, - name=f"mobility:{model.name}", - data=data, - time=str(time.monotonic()), - ) - self.session.broadcast_event(event_data) - - -class WirelessModel(ConfigurableOptions): - """ - Base class used by EMANE models and the basic range model. - Used for managing arbitrary configuration parameters. - """ - - config_type: RegisterTlvs = RegisterTlvs.WIRELESS - position_callback: Callable[[CoreInterface], None] = None - - def __init__(self, session: "Session", _id: int) -> None: - """ - Create a WirelessModel instance. - - :param session: core session we are tied to - :param _id: object id - """ - self.session: "Session" = session - self.id: int = _id - - def links(self, flags: MessageFlags = MessageFlags.NONE) -> list[LinkData]: - """ - May be used if the model can populate the GUI with wireless (green) - link lines. - - :param flags: link data flags - :return: link data - """ - return [] - - def update(self, moved_ifaces: list[CoreInterface]) -> None: - """ - Update this wireless model. - - :param moved_ifaces: moved network interfaces - :return: nothing - """ - raise NotImplementedError - - def update_config(self, config: dict[str, str]) -> None: - """ - For run-time updates of model config. Returns True when position callback and - set link parameters should be invoked. - - :param config: configuration values to update - :return: nothing - """ - pass - - -class BasicRangeModel(WirelessModel): - """ - Basic Range wireless model, calculates range between nodes and links - and unlinks nodes based on this distance. This was formerly done from - the GUI. - """ - - name: str = "basic_range" - options: list[Configuration] = [ - ConfigInt(id="range", default="275", label="wireless range (pixels)"), - ConfigInt(id="bandwidth", default="54000000", label="bandwidth (bps)"), - ConfigInt(id="jitter", default="0", label="transmission jitter (usec)"), - ConfigInt(id="delay", default="5000", label="transmission delay (usec)"), - ConfigFloat(id="error", default="0.0", label="loss (%)"), - ConfigBool(id="promiscuous", default="0", label="promiscuous mode"), - ] - - @classmethod - def config_groups(cls): - return [ConfigGroup("Basic Range Parameters", 1, len(cls.configurations()))] - - def __init__(self, session: "Session", _id: int) -> None: - """ - Create a BasicRangeModel instance. - - :param session: related core session - :param _id: object id - """ - super().__init__(session, _id) - self.session: "Session" = session - self.wlan: WlanNode = session.get_node(_id, WlanNode) - self.iface_to_pos: dict[CoreInterface, tuple[float, float, float]] = {} - self.iface_lock: threading.Lock = threading.Lock() - self.range: int = 0 - self.bw: Optional[int] = None - self.delay: Optional[int] = None - self.loss: Optional[float] = None - self.jitter: Optional[int] = None - self.promiscuous: bool = False - - def setlinkparams(self) -> None: - """ - Apply link parameters to all interfaces. This is invoked from - WlanNode.setmodel() after the position callback has been set. - """ - with self.iface_lock: - for iface in self.iface_to_pos: - options = LinkOptions( - bandwidth=self.bw, - delay=self.delay, - loss=self.loss, - jitter=self.jitter, - ) - iface.options.update(options) - iface.set_config() - - def get_position(self, iface: CoreInterface) -> tuple[float, float, float]: - """ - Retrieve network interface position. - - :param iface: network interface position to retrieve - :return: network interface position - """ - with self.iface_lock: - return self.iface_to_pos[iface] - - def set_position(self, iface: CoreInterface) -> None: - """ - A node has moved; given an interface, a new (x,y,z) position has - been set; calculate the new distance between other nodes and link or - unlink node pairs based on the configured range. - - :param iface: network interface to set position for - :return: nothing - """ - x, y, z = iface.node.position.get() - with self.iface_lock: - self.iface_to_pos[iface] = (x, y, z) - if x is None or y is None: - return - for iface2 in self.iface_to_pos: - self.calclink(iface, iface2) - - position_callback = set_position - - def update(self, moved_ifaces: list[CoreInterface]) -> None: - """ - Node positions have changed without recalc. Update positions from - node.position, then re-calculate links for those that have moved. - Assumes bidirectional links, with one calculation per node pair, where - one of the nodes has moved. - - :param moved_ifaces: moved network interfaces - :return: nothing - """ - with self.iface_lock: - while len(moved_ifaces): - iface = moved_ifaces.pop() - nx, ny, nz = iface.node.getposition() - if iface in self.iface_to_pos: - self.iface_to_pos[iface] = (nx, ny, nz) - for iface2 in self.iface_to_pos: - if iface2 in moved_ifaces: - continue - self.calclink(iface, iface2) - - def calclink(self, iface: CoreInterface, iface2: CoreInterface) -> None: - """ - Helper used by set_position() and update() to - calculate distance between two interfaces and perform - linking/unlinking. Sends link/unlink messages and updates the - WlanNode's linked dict. - - :param iface: interface one - :param iface2: interface two - :return: nothing - """ - if iface == iface2: - return - try: - x, y, z = self.iface_to_pos[iface] - x2, y2, z2 = self.iface_to_pos[iface2] - if x2 is None or y2 is None: - return - d = self.calcdistance((x, y, z), (x2, y2, z2)) - # ordering is important, to keep the wlan._linked dict organized - a = min(iface, iface2) - b = max(iface, iface2) - with self.wlan.linked_lock: - linked = self.wlan.is_linked(a, b) - if d > self.range: - if linked: - logger.debug("was linked, unlinking") - self.wlan.unlink(a, b) - self.sendlinkmsg(a, b, unlink=True) - else: - if not linked: - logger.debug("was not linked, linking") - self.wlan.link(a, b) - self.sendlinkmsg(a, b) - except KeyError: - logger.exception("error getting interfaces during calclink") - - @staticmethod - def calcdistance( - p1: tuple[float, float, float], p2: tuple[float, float, float] - ) -> float: - """ - Calculate the distance between two three-dimensional points. - - :param p1: point one - :param p2: point two - :return: distance petween the points - """ - a = p1[0] - p2[0] - b = p1[1] - p2[1] - c = 0 - if p1[2] is not None and p2[2] is not None: - c = p1[2] - p2[2] - return math.hypot(math.hypot(a, b), c) - - def update_config(self, config: dict[str, str]) -> None: - """ - Configuration has changed during runtime. - - :param config: values to update configuration - :return: nothing - """ - self.range = get_config_int(self.range, config, "range") - if self.range is None: - self.range = 0 - logger.debug("wlan %s set range to %s", self.wlan.name, self.range) - self.bw = get_config_int(self.bw, config, "bandwidth") - self.delay = get_config_int(self.delay, config, "delay") - self.loss = get_config_float(self.loss, config, "error") - self.jitter = get_config_int(self.jitter, config, "jitter") - promiscuous = config.get("promiscuous", "0") == "1" - if self.promiscuous and not promiscuous: - self.wlan.net_client.set_mac_learning(self.wlan.brname, LEARNING_ENABLED) - elif not self.promiscuous and promiscuous: - self.wlan.net_client.set_mac_learning(self.wlan.brname, LEARNING_DISABLED) - self.promiscuous = promiscuous - self.setlinkparams() - - def create_link_data( - self, iface1: CoreInterface, iface2: CoreInterface, message_type: MessageFlags - ) -> LinkData: - """ - Create a wireless link/unlink data message. - - :param iface1: interface one - :param iface2: interface two - :param message_type: link message type - :return: link data - """ - color = self.session.get_link_color(self.wlan.id) - return LinkData( - message_type=message_type, - type=LinkTypes.WIRELESS, - node1_id=iface1.node.id, - node2_id=iface2.node.id, - network_id=self.wlan.id, - color=color, - ) - - def sendlinkmsg( - self, iface: CoreInterface, iface2: CoreInterface, unlink: bool = False - ) -> None: - """ - Send a wireless link/unlink API message to the GUI. - - :param iface: interface one - :param iface2: interface two - :param unlink: unlink or not - :return: nothing - """ - message_type = MessageFlags.DELETE if unlink else MessageFlags.ADD - link_data = self.create_link_data(iface, iface2, message_type) - self.session.broadcast_link(link_data) - - def links(self, flags: MessageFlags = MessageFlags.NONE) -> list[LinkData]: - """ - Return a list of wireless link messages for when the GUI reconnects. - - :param flags: link flags - :return: all link data - """ - all_links = [] - with self.wlan.linked_lock: - for a in self.wlan.linked: - for b in self.wlan.linked[a]: - if self.wlan.linked[a][b]: - all_links.append(self.create_link_data(a, b, flags)) - return all_links - - -@total_ordering -class WayPoint: - """ - Maintains information regarding waypoints. - """ - - def __init__( - self, - _time: float, - node_id: int, - coords: tuple[float, float, Optional[float]], - speed: float, - ) -> None: - """ - Creates a WayPoint instance. - - :param _time: waypoint time - :param node_id: node id - :param coords: waypoint coordinates - :param speed: waypoint speed - """ - self.time: float = _time - self.node_id: int = node_id - self.coords: tuple[float, float, Optional[float]] = coords - self.speed: float = speed - - def __eq__(self, other: "WayPoint") -> bool: - return (self.time, self.node_id) == (other.time, other.node_id) - - def __ne__(self, other: "WayPoint") -> bool: - return not self == other - - def __lt__(self, other: "WayPoint") -> bool: - if self.time == other.time: - return self.node_id < other.node_id - else: - return self.time < other.time - - -class WayPointMobility(WirelessModel): - """ - Abstract class for mobility models that set node waypoints. - """ - - name: str = "waypoint" - config_type: RegisterTlvs = RegisterTlvs.MOBILITY - STATE_STOPPED: int = 0 - STATE_RUNNING: int = 1 - STATE_PAUSED: int = 2 - - def __init__(self, session: "Session", _id: int) -> None: - """ - Create a WayPointMobility instance. - - :param session: CORE session instance - :param _id: object id - :return: - """ - super().__init__(session=session, _id=_id) - self.state: int = self.STATE_STOPPED - self.queue: list[WayPoint] = [] - self.queue_copy: list[WayPoint] = [] - self.points: dict[int, WayPoint] = {} - self.initial: dict[int, WayPoint] = {} - self.lasttime: Optional[float] = None - self.endtime: Optional[int] = None - self.timezero: float = 0.0 - self.net: Union[WlanNode, EmaneNet] = get_mobility_node(self.session, self.id) - # these are really set in child class via confmatrix - self.loop: bool = False - self.refresh_ms: int = 50 - # flag whether to stop scheduling when queue is empty - # (ns-3 sets this to False as new waypoints may be added from trace) - self.empty_queue_stop: bool = True - - def startup(self): - raise NotImplementedError - - def runround(self) -> None: - """ - Advance script time and move nodes. - - :return: nothing - """ - if self.state != self.STATE_RUNNING: - return - t = self.lasttime - self.lasttime = time.monotonic() - now = self.lasttime - self.timezero - dt = self.lasttime - t - - # keep current waypoints up-to-date - self.updatepoints(now) - - if not len(self.points): - if len(self.queue): - # more future waypoints, allow time for self.lasttime update - nexttime = self.queue[0].time - now - if nexttime > (0.001 * self.refresh_ms): - nexttime -= 0.001 * self.refresh_ms - self.session.event_loop.add_event(nexttime, self.runround) - return - else: - # no more waypoints or queued items, loop? - if not self.empty_queue_stop: - # keep running every refresh_ms, even with empty queue - self.session.event_loop.add_event( - 0.001 * self.refresh_ms, self.runround - ) - return - if not self.loopwaypoints(): - return self.stop(move_initial=False) - if not len(self.queue): - # prevent busy loop - return - return self.run() - - moved_ifaces = [] - for iface in self.net.get_ifaces(): - node = iface.node - if self.movenode(node, dt): - moved_ifaces.append(iface) - - # calculate all ranges after moving nodes; this saves calculations - self.net.wireless_model.update(moved_ifaces) - - # TODO: check session state - self.session.event_loop.add_event(0.001 * self.refresh_ms, self.runround) - - def run(self) -> None: - """ - Run the waypoint mobility scenario. - - :return: nothing - """ - self.timezero = time.monotonic() - self.lasttime = self.timezero - (0.001 * self.refresh_ms) - self.movenodesinitial() - self.runround() - self.session.mobility.sendevent(self) - - def movenode(self, node: CoreNode, dt: float) -> bool: - """ - Calculate next node location and update its coordinates. - Returns True if the node's position has changed. - - :param node: node to move - :param dt: move factor - :return: True if node was moved, False otherwise - """ - if node.id not in self.points: - return False - x1, y1, z1 = node.getposition() - x2, y2, z2 = self.points[node.id].coords - speed = self.points[node.id].speed - # instantaneous move (prevents dx/dy == 0.0 below) - if speed == 0: - self.setnodeposition(node, x2, y2, z2) - del self.points[node.id] - return True - - # linear speed value - alpha = math.atan2(y2 - y1, x2 - x1) - sx = speed * math.cos(alpha) - sy = speed * math.sin(alpha) - - # calculate dt * speed = distance moved - dx = sx * dt - dy = sy * dt - # prevent overshoot - if abs(dx) > abs(x2 - x1): - dx = x2 - x1 - if abs(dy) > abs(y2 - y1): - dy = y2 - y1 - if dx == 0.0 and dy == 0.0: - if self.endtime < (self.lasttime - self.timezero): - # the last node to reach the last waypoint determines this - # script's endtime - self.endtime = self.lasttime - self.timezero - del self.points[node.id] - return False - if (x1 + dx) < 0.0: - dx = 0.0 - x1 - if (y1 + dy) < 0.0: - dy = 0.0 - y1 - self.setnodeposition(node, x1 + dx, y1 + dy, z1) - return True - - def movenodesinitial(self) -> None: - """ - Move nodes to their initial positions. Then calculate the ranges. - - :return: nothing - """ - moved_ifaces = [] - for iface in self.net.get_ifaces(): - node = iface.node - if node.id not in self.initial: - continue - x, y, z = self.initial[node.id].coords - self.setnodeposition(node, x, y, z) - moved_ifaces.append(iface) - self.net.wireless_model.update(moved_ifaces) - - def addwaypoint( - self, - _time: float, - nodenum: int, - x: float, - y: float, - z: Optional[float], - speed: float, - ) -> None: - """ - Waypoints are pushed to a heapq, sorted by time. - - :param _time: waypoint time - :param nodenum: node id - :param x: x position - :param y: y position - :param z: z position - :param speed: speed - :return: nothing - """ - wp = WayPoint(_time, nodenum, coords=(x, y, z), speed=speed) - heapq.heappush(self.queue, wp) - - def addinitial(self, nodenum: int, x: float, y: float, z: float) -> None: - """ - Record initial position in a dict. - - :param nodenum: node id - :param x: x position - :param y: y position - :param z: z position - :return: nothing - """ - wp = WayPoint(0, nodenum, coords=(x, y, z), speed=0) - self.initial[nodenum] = wp - - def updatepoints(self, now: float) -> None: - """ - Move items from self.queue to self.points when their time has come. - - :param now: current timestamp - :return: nothing - """ - while len(self.queue): - if self.queue[0].time > now: - break - wp = heapq.heappop(self.queue) - self.points[wp.node_id] = wp - - def copywaypoints(self) -> None: - """ - Store backup copy of waypoints for looping and stopping. - - :return: nothing - """ - self.queue_copy = list(self.queue) - - def loopwaypoints(self) -> bool: - """ - Restore backup copy of waypoints when looping. - - :return: nothing - """ - self.queue = list(self.queue_copy) - return self.loop - - def setnodeposition(self, node: CoreNode, x: float, y: float, z: float) -> None: - """ - Helper to move a node, notify any GUI (connected session handlers), - without invoking the interface poshook callback that may perform - range calculation. - - :param node: node to set position for - :param x: x position - :param y: y position - :param z: z position - :return: nothing - """ - node.position.set(x, y, z) - self.session.broadcast_node(node) - - def setendtime(self) -> None: - """ - Set self.endtime to the time of the last waypoint in the queue of - waypoints. This is just an estimate. The endtime will later be - adjusted, after one round of the script has run, to be the time - that the last moving node has reached its final waypoint. - - :return: nothing - """ - try: - self.endtime = self.queue[-1].time - except IndexError: - self.endtime = 0 - - def start(self) -> None: - """ - Run the script from the beginning or unpause from where it - was before. - - :return: nothing - """ - laststate = self.state - self.state = self.STATE_RUNNING - if laststate == self.STATE_STOPPED or laststate == self.STATE_RUNNING: - self.loopwaypoints() - self.timezero = 0 - self.lasttime = 0 - self.run() - elif laststate == self.STATE_PAUSED: - now = time.monotonic() - self.timezero += now - self.lasttime - self.lasttime = now - (0.001 * self.refresh_ms) - self.runround() - - def stop(self, move_initial: bool = True) -> None: - """ - Stop the script and move nodes to initial positions. - - :param move_initial: flag to check if we should move nodes to initial - position - :return: nothing - """ - self.state = self.STATE_STOPPED - self.loopwaypoints() - self.timezero = 0 - self.lasttime = 0 - if move_initial: - self.movenodesinitial() - self.session.mobility.sendevent(self) - - def pause(self) -> None: - """ - Pause the script; pause time is stored to self.lasttime. - - :return: nothing - """ - self.state = self.STATE_PAUSED - self.lasttime = time.monotonic() - - -class Ns2ScriptedMobility(WayPointMobility): - """ - Handles the ns-2 script format, generated by scengen/setdest or - BonnMotion. - """ - - name: str = "ns2script" - options: list[Configuration] = [ - ConfigString(id="file", label="mobility script file"), - ConfigInt(id="refresh_ms", default="50", label="refresh time (ms)"), - ConfigBool(id="loop", default="1", label="loop"), - ConfigString(id="autostart", label="auto-start seconds (0.0 for runtime)"), - ConfigString(id="map", label="node mapping (optional, e.g. 0:1,1:2,2:3)"), - ConfigString(id="script_start", label="script file to run upon start"), - ConfigString(id="script_pause", label="script file to run upon pause"), - ConfigString(id="script_stop", label="script file to run upon stop"), - ] - - @classmethod - def config_groups(cls) -> list[ConfigGroup]: - return [ - ConfigGroup("ns-2 Mobility Script Parameters", 1, len(cls.configurations())) - ] - - def __init__(self, session: "Session", _id: int) -> None: - """ - Creates a Ns2ScriptedMobility instance. - - :param session: CORE session instance - :param _id: object id - """ - super().__init__(session, _id) - self.file: Optional[Path] = None - self.autostart: Optional[str] = None - self.nodemap: dict[int, int] = {} - self.script_start: Optional[str] = None - self.script_pause: Optional[str] = None - self.script_stop: Optional[str] = None - - def update_config(self, config: dict[str, str]) -> None: - self.file = Path(config["file"]) - logger.info( - "ns-2 scripted mobility configured for WLAN %d using file: %s", - self.id, - self.file, - ) - self.refresh_ms = int(config["refresh_ms"]) - self.loop = config["loop"] == "1" - self.autostart = config["autostart"] - self.parsemap(config["map"]) - self.script_start = config["script_start"] - self.script_pause = config["script_pause"] - self.script_stop = config["script_stop"] - self.readscriptfile() - self.copywaypoints() - self.setendtime() - - def readscriptfile(self) -> None: - """ - Read in mobility script from a file. This adds waypoints to a - priority queue, sorted by waypoint time. Initial waypoints are - stored in a separate dict. - - :return: nothing - """ - file_path = self.findfile(self.file) - try: - f = file_path.open("r") - except OSError: - logger.exception( - "ns-2 scripted mobility failed to load file: %s", self.file - ) - return - logger.info("reading ns-2 script file: %s", file_path) - ln = 0 - ix = iy = iz = None - inodenum = None - for line in f: - ln += 1 - if line[:2] != "$n": - continue - try: - if line[:8] == "$ns_ at ": - if ix is not None and iy is not None: - self.addinitial(self.map(inodenum), ix, iy, iz) - ix = iy = iz = None - # waypoints: - # $ns_ at 1.00 "$node_(6) setdest 500.0 178.0 25.0" - parts = line.split() - line_time = float(parts[2]) - nodenum = parts[3][1 + parts[3].index("(") : parts[3].index(")")] - x = float(parts[5]) - y = float(parts[6]) - z = None - speed = float(parts[7].strip('"')) - self.addwaypoint(line_time, self.map(nodenum), x, y, z, speed) - elif line[:7] == "$node_(": - # initial position (time=0, speed=0): - # $node_(6) set X_ 780.0 - parts = line.split() - nodenum = parts[0][1 + parts[0].index("(") : parts[0].index(")")] - if parts[2] == "X_": - if ix is not None and iy is not None: - self.addinitial(self.map(inodenum), ix, iy, iz) - ix = iy = iz = None - ix = float(parts[3]) - elif parts[2] == "Y_": - iy = float(parts[3]) - elif parts[2] == "Z_": - iz = float(parts[3]) - self.addinitial(self.map(nodenum), ix, iy, iz) - ix = iy = iz = None - inodenum = nodenum - else: - raise ValueError - except ValueError: - logger.exception( - "skipping line %d of file %s '%s'", ln, self.file, line - ) - continue - if ix is not None and iy is not None: - self.addinitial(self.map(inodenum), ix, iy, iz) - - def findfile(self, file_path: Path) -> Path: - """ - Locate a script file. If the specified file doesn't exist, look in the - same directory as the scenario file, or in gui directories. - - :param file_path: file name to find - :return: absolute path to the file - :raises CoreError: when file is not found - """ - file_path = file_path.expanduser() - if file_path.exists(): - return file_path - if self.session.file_path: - session_file_path = self.session.file_path.parent / file_path - if session_file_path.exists(): - return session_file_path - if self.session.user: - user_path = Path(f"~{self.session.user}").expanduser() - configs_path = user_path / ".core" / "configs" / file_path - if configs_path.exists(): - return configs_path - mobility_path = user_path / ".coregui" / "mobility" / file_path - if mobility_path.exists(): - return mobility_path - raise CoreError(f"invalid file: {file_path}") - - def parsemap(self, mapstr: str) -> None: - """ - Parse a node mapping string, given as a configuration parameter. - - :param mapstr: mapping string to parse - :return: nothing - """ - self.nodemap = {} - if mapstr.strip() == "": - return - for pair in mapstr.split(","): - parts = pair.split(":") - try: - if len(parts) != 2: - raise ValueError - self.nodemap[int(parts[0])] = int(parts[1]) - except ValueError: - logger.exception("ns-2 mobility node map error") - - def map(self, nodenum: str) -> int: - """ - Map one node number (from a script file) to another. - - :param nodenum: node id to map - :return: mapped value or the node id itself - """ - nodenum = int(nodenum) - return self.nodemap.get(nodenum, nodenum) - - def startup(self) -> None: - """ - Start running the script if autostart is enabled. - Move node to initial positions when any autostart time is specified. - Ignore the script if autostart is an empty string (can still be - started via GUI controls). - - :return: nothing - """ - if self.autostart == "": - logger.info("not auto-starting ns-2 script for %s", self.net.name) - return - try: - t = float(self.autostart) - except ValueError: - logger.exception( - "Invalid auto-start seconds specified '%s' for %s", - self.autostart, - self.net.name, - ) - return - self.movenodesinitial() - logger.info("scheduling ns-2 script for %s autostart at %s", self.net.name, t) - self.state = self.STATE_RUNNING - self.session.event_loop.add_event(t, self.run) - - def start(self) -> None: - """ - Handle the case when un-paused. - - :return: nothing - """ - logger.info("starting script: %s", self.file) - laststate = self.state - super().start() - if laststate == self.STATE_PAUSED: - self.statescript("unpause") - - def run(self) -> None: - """ - Start is pressed or autostart is triggered. - - :return: nothing - """ - super().run() - self.statescript("run") - - def pause(self) -> None: - """ - Pause the mobility script. - - :return: nothing - """ - logger.info("pausing script: %s", self.file) - super().pause() - self.statescript("pause") - - def stop(self, move_initial: bool = True) -> None: - """ - Stop the mobility script. - - :param move_initial: flag to check if we should move node to initial - position - :return: nothing - """ - logger.info("stopping script: %s", self.file) - super().stop(move_initial=move_initial) - self.statescript("stop") - - def statescript(self, typestr: str) -> None: - """ - State of the mobility script. - - :param typestr: state type string - :return: nothing - """ - filename = None - if typestr == "run" or typestr == "unpause": - filename = self.script_start - elif typestr == "pause": - filename = self.script_pause - elif typestr == "stop": - filename = self.script_stop - if filename is None or filename == "": - return - filename = Path(filename) - filename = self.findfile(filename) - args = f"{BASH} {filename} {typestr}" - utils.cmd(args, cwd=self.session.directory, env=self.session.get_environment()) diff --git a/daemon/core/misc/LatLongUTMconversion.py b/daemon/core/misc/LatLongUTMconversion.py new file mode 100755 index 00000000..4f7c13dc --- /dev/null +++ b/daemon/core/misc/LatLongUTMconversion.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python +# this file is from http://pygps.org/ + +# Lat Long - UTM, UTM - Lat Long conversions + +from math import pi, sin, cos, tan, sqrt + +# LatLong- UTM conversion..h +# definitions for lat/long to UTM and UTM to lat/lng conversions +# include + +_deg2rad = pi / 180.0 +_rad2deg = 180.0 / pi + +_EquatorialRadius = 2 +_eccentricitySquared = 3 + +_ellipsoid = [ + # id, Ellipsoid name, Equatorial Radius, square of eccentricity + # first once is a placeholder only, To allow array indices to match id numbers + [-1, "Placeholder", 0, 0], + [1, "Airy", 6377563, 0.00667054], + [2, "Australian National", 6378160, 0.006694542], + [3, "Bessel 1841", 6377397, 0.006674372], + [4, "Bessel 1841 (Nambia] ", 6377484, 0.006674372], + [5, "Clarke 1866", 6378206, 0.006768658], + [6, "Clarke 1880", 6378249, 0.006803511], + [7, "Everest", 6377276, 0.006637847], + [8, "Fischer 1960 (Mercury] ", 6378166, 0.006693422], + [9, "Fischer 1968", 6378150, 0.006693422], + [10, "GRS 1967", 6378160, 0.006694605], + [11, "GRS 1980", 6378137, 0.00669438], + [12, "Helmert 1906", 6378200, 0.006693422], + [13, "Hough", 6378270, 0.00672267], + [14, "International", 6378388, 0.00672267], + [15, "Krassovsky", 6378245, 0.006693422], + [16, "Modified Airy", 6377340, 0.00667054], + [17, "Modified Everest", 6377304, 0.006637847], + [18, "Modified Fischer 1960", 6378155, 0.006693422], + [19, "South American 1969", 6378160, 0.006694542], + [20, "WGS 60", 6378165, 0.006693422], + [21, "WGS 66", 6378145, 0.006694542], + [22, "WGS-72", 6378135, 0.006694318], + [23, "WGS-84", 6378137, 0.00669438] +] + + +# Reference ellipsoids derived from Peter H. Dana's website- +# http://www.utexas.edu/depts/grg/gcraft/notes/datum/elist.html +# Department of Geography, University of Texas at Austin +# Internet: pdana@mail.utexas.edu +# 3/22/95 + +# Source +# Defense Mapping Agency. 1987b. DMA Technical Report: Supplement to Department of Defense World Geodetic System +# 1984 Technical Report. Part I and II. Washington, DC: Defense Mapping Agency + +# def LLtoUTM(int ReferenceEllipsoid, const double Lat, const double Long, +# double &UTMNorthing, double &UTMEasting, char* UTMZone) + +def LLtoUTM(ReferenceEllipsoid, Lat, Long, zone=None): + """converts lat/long to UTM coords. Equations from USGS Bulletin 1532 + East Longitudes are positive, West longitudes are negative. + North latitudes are positive, South latitudes are negative + Lat and Long are in decimal degrees + Written by Chuck Gantz- chuck.gantz@globalstar.com""" + + a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius] + eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared] + k0 = 0.9996 + + # Make sure the longitude is between -180.00 .. 179.9 + LongTemp = (Long + 180) - int((Long + 180) / 360) * 360 - 180 # -180.00 .. 179.9 + + LatRad = Lat * _deg2rad + LongRad = LongTemp * _deg2rad + + if zone is None: + ZoneNumber = int((LongTemp + 180) / 6) + 1 + else: + ZoneNumber = zone + + if Lat >= 56.0 and Lat < 64.0 and LongTemp >= 3.0 and LongTemp < 12.0: + ZoneNumber = 32 + + # Special zones for Svalbard + if Lat >= 72.0 and Lat < 84.0: + if LongTemp >= 0.0 and LongTemp < 9.0: + ZoneNumber = 31 + elif LongTemp >= 9.0 and LongTemp < 21.0: + ZoneNumber = 33 + elif LongTemp >= 21.0 and LongTemp < 33.0: + ZoneNumber = 35 + elif LongTemp >= 33.0 and LongTemp < 42.0: + ZoneNumber = 37 + + LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone + LongOriginRad = LongOrigin * _deg2rad + + # compute the UTM Zone from the latitude and longitude + UTMZone = "%d%c" % (ZoneNumber, _UTMLetterDesignator(Lat)) + + eccPrimeSquared = (eccSquared) / (1 - eccSquared) + N = a / sqrt(1 - eccSquared * sin(LatRad) * sin(LatRad)) + T = tan(LatRad) * tan(LatRad) + C = eccPrimeSquared * cos(LatRad) * cos(LatRad) + A = cos(LatRad) * (LongRad - LongOriginRad) + + M = a * ((1 + - eccSquared / 4 + - 3 * eccSquared * eccSquared / 64 + - 5 * eccSquared * eccSquared * eccSquared / 256) * LatRad + - (3 * eccSquared / 8 + + 3 * eccSquared * eccSquared / 32 + + 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(2 * LatRad) + + (15 * eccSquared * eccSquared / 256 + 45 * eccSquared * eccSquared * eccSquared / 1024) * sin(4 * LatRad) + - (35 * eccSquared * eccSquared * eccSquared / 3072) * sin(6 * LatRad)) + + UTMEasting = (k0 * N * (A + (1 - T + C) * A * A * A / 6 + + (5 - 18 * T + T * T + 72 * C - 58 * eccPrimeSquared) * A * A * A * A * A / 120) + + 500000.0) + + UTMNorthing = (k0 * (M + N * tan(LatRad) * (A * A / 2 + (5 - T + 9 * C + 4 * C * C) * A * A * A * A / 24 + + (61 + - 58 * T + + T * T + + 600 * C + - 330 * eccPrimeSquared) * A * A * A * A * A * A / 720))) + + if Lat < 0: + UTMNorthing = UTMNorthing + 10000000.0; # 10000000 meter offset for southern hemisphere + return (UTMZone, UTMEasting, UTMNorthing) + + +def _UTMLetterDesignator(Lat): + """This routine determines the correct UTM letter designator for the given + latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S + Written by Chuck Gantz- chuck.gantz@globalstar.com""" + + if 84 >= Lat >= 72: + return 'X' + elif 72 > Lat >= 64: + return 'W' + elif 64 > Lat >= 56: + return 'V' + elif 56 > Lat >= 48: + return 'U' + elif 48 > Lat >= 40: + return 'T' + elif 40 > Lat >= 32: + return 'S' + elif 32 > Lat >= 24: + return 'R' + elif 24 > Lat >= 16: + return 'Q' + elif 16 > Lat >= 8: + return 'P' + elif 8 > Lat >= 0: + return 'N' + elif 0 > Lat >= -8: + return 'M' + elif -8 > Lat >= -16: + return 'L' + elif -16 > Lat >= -24: + return 'K' + elif -24 > Lat >= -32: + return 'J' + elif -32 > Lat >= -40: + return 'H' + elif -40 > Lat >= -48: + return 'G' + elif -48 > Lat >= -56: + return 'F' + elif -56 > Lat >= -64: + return 'E' + elif -64 > Lat >= -72: + return 'D' + elif -72 > Lat >= -80: + return 'C' + else: + return 'Z' # if the Latitude is outside the UTM limits + + +# void UTMtoLL(int ReferenceEllipsoid, const double UTMNorthing, const double UTMEasting, const char* UTMZone, +# double& Lat, double& Long ) + +def UTMtoLL(ReferenceEllipsoid, northing, easting, zone): + """converts UTM coords to lat/long. Equations from USGS Bulletin 1532 +East Longitudes are positive, West longitudes are negative. +North latitudes are positive, South latitudes are negative +Lat and Long are in decimal degrees. +Written by Chuck Gantz- chuck.gantz@globalstar.com +Converted to Python by Russ Nelson """ + + k0 = 0.9996 + a = _ellipsoid[ReferenceEllipsoid][_EquatorialRadius] + eccSquared = _ellipsoid[ReferenceEllipsoid][_eccentricitySquared] + e1 = (1 - sqrt(1 - eccSquared)) / (1 + sqrt(1 - eccSquared)) + # NorthernHemisphere; //1 for northern hemispher, 0 for southern + + x = easting - 500000.0 # remove 500,000 meter offset for longitude + y = northing + + ZoneLetter = zone[-1] + ZoneNumber = int(zone[:-1]) + if ZoneLetter >= 'N': + NorthernHemisphere = 1 # point is in northern hemisphere + else: + NorthernHemisphere = 0 # point is in southern hemisphere + y -= 10000000.0 # remove 10,000,000 meter offset used for southern hemisphere + + LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3 # +3 puts origin in middle of zone + + eccPrimeSquared = (eccSquared) / (1 - eccSquared) + + M = y / k0 + mu = M / ( + a * (1 - eccSquared / 4 - 3 * eccSquared * eccSquared / 64 - 5 * eccSquared * eccSquared * eccSquared / 256)) + + phi1Rad = (mu + (3 * e1 / 2 - 27 * e1 * e1 * e1 / 32) * sin(2 * mu) + + (21 * e1 * e1 / 16 - 55 * e1 * e1 * e1 * e1 / 32) * sin(4 * mu) + + (151 * e1 * e1 * e1 / 96) * sin(6 * mu)) + phi1 = phi1Rad * _rad2deg; + + N1 = a / sqrt(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad)) + T1 = tan(phi1Rad) * tan(phi1Rad) + C1 = eccPrimeSquared * cos(phi1Rad) * cos(phi1Rad) + R1 = a * (1 - eccSquared) / pow(1 - eccSquared * sin(phi1Rad) * sin(phi1Rad), 1.5) + D = x / (N1 * k0) + + Lat = phi1Rad - (N1 * tan(phi1Rad) / R1) * ( + D * D / 2 - (5 + 3 * T1 + 10 * C1 - 4 * C1 * C1 - 9 * eccPrimeSquared) * D * D * D * D / 24 + + (61 + 90 * T1 + 298 * C1 + 45 * T1 * T1 - 252 * eccPrimeSquared - 3 * C1 * C1) * D * D * D * D * D * D / 720) + Lat = Lat * _rad2deg + + Long = (D - (1 + 2 * T1 + C1) * D * D * D / 6 + ( + 5 - 2 * C1 + 28 * T1 - 3 * C1 * C1 + 8 * eccPrimeSquared + 24 * T1 * T1) + * D * D * D * D * D / 120) / cos(phi1Rad) + Long = LongOrigin + Long * _rad2deg + return (Lat, Long) + + +if __name__ == '__main__': + (z, e, n) = LLtoUTM(23, 45.00, -75.00) + print z, e, n + print UTMtoLL(23, n, e, z) diff --git a/daemon/core/configservice/__init__.py b/daemon/core/misc/__init__.py similarity index 100% rename from daemon/core/configservice/__init__.py rename to daemon/core/misc/__init__.py diff --git a/daemon/core/location/event.py b/daemon/core/misc/event.py similarity index 64% rename from daemon/core/location/event.py rename to daemon/core/misc/event.py index 9b300241..19ed7ced 100644 --- a/daemon/core/location/event.py +++ b/daemon/core/misc/event.py @@ -5,8 +5,6 @@ event.py: event loop implementation using a heap queue and threads. import heapq import threading import time -from functools import total_ordering -from typing import Any, Callable, Optional class Timer(threading.Thread): @@ -15,41 +13,41 @@ class Timer(threading.Thread): already running. """ - def __init__( - self, - interval: float, - func: Callable[..., None], - args: tuple[Any] = None, - kwargs: dict[Any, Any] = None, - ) -> None: + def __init__(self, interval, function, args=None, kwargs=None): """ Create a Timer instance. :param interval: time interval - :param func: function to call when timer finishes + :param function: function to call when timer finishes :param args: function arguments :param kwargs: function keyword arguments """ - super().__init__() - self.interval: float = interval - self.func: Callable[..., None] = func - self.finished: threading.Event = threading.Event() - self._running: threading.Lock = threading.Lock() - # validate arguments were provided - if args is None: - args = () - self.args: tuple[Any] = args - # validate keyword arguments were provided - if kwargs is None: - kwargs = {} - self.kwargs: dict[Any, Any] = kwargs + super(Timer, self).__init__() + self.interval = interval + self.function = function - def cancel(self) -> bool: + self.finished = threading.Event() + self._running = threading.Lock() + + # validate arguments were provided + if args: + self.args = args + else: + self.args = [] + + # validate keyword arguments were provided + if kwargs: + self.kwargs = kwargs + else: + self.kwargs = {} + + def cancel(self): """ Stop the timer if it hasn't finished yet. Return False if the timer was already running. :return: True if canceled, False otherwise + :rtype: bool """ locked = self._running.acquire(False) if locked: @@ -57,7 +55,7 @@ class Timer(threading.Thread): self._running.release() return locked - def run(self) -> None: + def run(self): """ Run the timer. @@ -66,24 +64,16 @@ class Timer(threading.Thread): self.finished.wait(self.interval) with self._running: if not self.finished.is_set(): - self.func(*self.args, **self.kwargs) + self.function(*self.args, **self.kwargs) self.finished.set() -@total_ordering -class Event: +class Event(object): """ Provides event objects that can be used within the EventLoop class. """ - def __init__( - self, - eventnum: int, - event_time: float, - func: Callable[..., None], - *args: Any, - **kwds: Any - ) -> None: + def __init__(self, eventnum, event_time, func, *args, **kwds): """ Create an Event instance. @@ -93,20 +83,27 @@ class Event: :param args: function arguments :param kwds: function keyword arguments """ - self.eventnum: int = eventnum - self.time: float = event_time - self.func: Callable[..., None] = func - self.args: tuple[Any] = args - self.kwds: dict[Any, Any] = kwds - self.canceled: bool = False + self.eventnum = eventnum + self.time = event_time + self.func = func + self.args = args + self.kwds = kwds + self.canceled = False - def __lt__(self, other: "Event") -> bool: - result = self.time < other.time - if result: - result = self.eventnum < other.eventnum - return result + def __cmp__(self, other): + """ + Comparison function. - def run(self) -> None: + :param Event other: event to compare with + :return: comparison result + :rtype: int + """ + tmp = cmp(self.time, other.time) + if tmp == 0: + tmp = cmp(self.eventnum, other.eventnum) + return tmp + + def run(self): """ Run an event. @@ -116,32 +113,33 @@ class Event: return self.func(*self.args, **self.kwds) - def cancel(self) -> None: + def cancel(self): """ Cancel event. :return: nothing """ + # XXX not thread-safe self.canceled = True -class EventLoop: +class EventLoop(object): """ Provides an event loop for running events. """ - def __init__(self) -> None: + def __init__(self): """ Creates a EventLoop instance. """ - self.lock: threading.RLock = threading.RLock() - self.queue: list[Event] = [] - self.eventnum: int = 0 - self.timer: Optional[Timer] = None - self.running: bool = False - self.start: Optional[float] = None + self.lock = threading.RLock() + self.queue = [] + self.eventnum = 0 + self.timer = None + self.running = False + self.start = None - def _run_events(self) -> None: + def __run_events(self): """ Run events. @@ -152,7 +150,7 @@ class EventLoop: with self.lock: if not self.running or not self.queue: break - now = time.monotonic() + now = time.time() if self.queue[0].time > now: schedule = True break @@ -164,9 +162,9 @@ class EventLoop: with self.lock: self.timer = None if schedule: - self._schedule_event() + self.__schedule_event() - def _schedule_event(self) -> None: + def __schedule_event(self): """ Schedule event. @@ -177,14 +175,14 @@ class EventLoop: raise ValueError("scheduling event while not running") if not self.queue: return - delay = self.queue[0].time - time.monotonic() + delay = self.queue[0].time - time.time() if self.timer: raise ValueError("timer was already set") - self.timer = Timer(delay, self._run_events) + self.timer = Timer(delay, self.__run_events) self.timer.daemon = True self.timer.start() - def run(self) -> None: + def run(self): """ Start event loop. @@ -194,12 +192,12 @@ class EventLoop: if self.running: return self.running = True - self.start = time.monotonic() + self.start = time.time() for event in self.queue: event.time += self.start - self._schedule_event() + self.__schedule_event() - def stop(self) -> None: + def stop(self): """ Stop event loop. @@ -216,22 +214,23 @@ class EventLoop: self.running = False self.start = None - def add_event(self, delaysec: float, func: Callable, *args: Any, **kwds: Any): + def add_event(self, delaysec, func, *args, **kwds): """ Add an event to the event loop. - :param delaysec: delay in seconds for event + :param float delaysec: delay in seconds for event :param func: event function :param args: event arguments :param kwds: event keyword arguments :return: created event + :rtype: Event """ with self.lock: eventnum = self.eventnum self.eventnum += 1 evtime = float(delaysec) if self.running: - evtime += time.monotonic() + evtime += time.time() event = Event(eventnum, evtime, func, *args, **kwds) if self.queue: @@ -245,5 +244,5 @@ class EventLoop: if self.timer is not None and self.timer.cancel(): self.timer = None if self.running and self.timer is None: - self._schedule_event() + self.__schedule_event() return event diff --git a/daemon/core/misc/ipaddress.py b/daemon/core/misc/ipaddress.py new file mode 100644 index 00000000..4da49642 --- /dev/null +++ b/daemon/core/misc/ipaddress.py @@ -0,0 +1,446 @@ +""" +Helper objects for dealing with IPv4/v6 addresses. +""" + +import logging +import random +import socket +import struct +from socket import AF_INET +from socket import AF_INET6 + + +class MacAddress(object): + """ + Provides mac address utilities for use within core. + """ + + def __init__(self, address): + """ + Creates a MacAddress instance. + + :param str address: mac address + """ + self.addr = address + + def __str__(self): + """ + Create a string representation of a MacAddress. + + :return: string representation + :rtype: str + """ + return ":".join("%02x" % ord(x) for x in self.addr) + + def to_link_local(self): + """ + Convert the MAC address to a IPv6 link-local address, using EUI 48 + to EUI 64 conversion process per RFC 5342. + + :return: ip address object + :rtype: IpAddress + """ + if not self.addr: + return IpAddress.from_string("::") + tmp = struct.unpack("!Q", "\x00\x00" + self.addr)[0] + nic = long(tmp) & 0x000000FFFFFFL + oui = long(tmp) & 0xFFFFFF000000L + # toggle U/L bit + oui ^= 0x020000000000L + # append EUI-48 octets + oui = (oui << 16) | 0xFFFE000000L + return IpAddress(AF_INET6, struct.pack("!QQ", 0xfe80 << 48, oui | nic)) + + @classmethod + def from_string(cls, s): + """ + Create a mac address object from a string. + + :param s: string representation of a mac address + :return: mac address class + :rtype: MacAddress + """ + addr = "".join(chr(int(x, 16)) for x in s.split(":")) + return cls(addr) + + @classmethod + def random(cls): + """ + Create a random mac address. + + :return: random mac address + :rtype: MacAddress + """ + tmp = random.randint(0, 0xFFFFFF) + # use the Xen OID 00:16:3E + tmp |= 0x00163E << 24 + tmpbytes = struct.pack("!Q", tmp) + return cls(tmpbytes[2:]) + + +class IpAddress(object): + """ + Provides ip utilities and functionality for use within core. + """ + + def __init__(self, af, address): + """ + Create a IpAddress instance. + + :param int af: address family + :param str address: ip address + :return: + """ + # check if (af, addr) is valid + if not socket.inet_ntop(af, address): + raise ValueError("invalid af/addr") + self.af = af + self.addr = address + + def is_ipv4(self): + """ + Checks if this is an ipv4 address. + + :return: True if ipv4 address, False otherwise + :rtype: bool + """ + return self.af == AF_INET + + def is_ipv6(self): + """ + Checks if this is an ipv6 address. + + :return: True if ipv6 address, False otherwise + :rtype: bool + """ + return self.af == AF_INET6 + + def __str__(self): + """ + Create a string representation of this address. + + :return: string representation of address + :rtype: str + """ + return socket.inet_ntop(self.af, self.addr) + + def __eq__(self, other): + """ + Checks for equality with another ip address. + + :param IpAddress other: other ip address to check equality with + :return: True is the other IpAddress is equal, False otherwise + :rtype: bool + """ + if not isinstance(other, IpAddress): + return False + elif self is other: + return True + else: + return other.af == self.af and other.addr == self.addr + + def __add__(self, other): + """ + Add value to ip addresses. + + :param int other: value to add to ip address + :return: added together ip address instance + :rtype: IpAddress + """ + try: + carry = int(other) + except ValueError: + logging.exception("error during addition") + return NotImplemented + + tmp = [ord(x) for x in self.addr] + for i in xrange(len(tmp) - 1, -1, -1): + x = tmp[i] + carry + tmp[i] = x & 0xff + carry = x >> 8 + if carry == 0: + break + addr = "".join(chr(x) for x in tmp) + return self.__class__(self.af, addr) + + def __sub__(self, other): + """ + Subtract value from ip address. + + :param int other: value to subtract from ip address + :return: + """ + try: + tmp = -int(other) + except ValueError: + logging.exception("error during subtraction") + return NotImplemented + + return self.__add__(tmp) + + @classmethod + def from_string(cls, s): + """ + Create a ip address from a string representation. + + :param s: string representation to create ip address from + :return: ip address instance + :rtype: IpAddress + """ + for af in AF_INET, AF_INET6: + return cls(af, socket.inet_pton(af, s)) + + @staticmethod + def to_int(s): + """ + Convert IPv4 string to integer + + :param s: string to convert to 32-bit integer + :return: integer value + :rtype: int + """ + value = socket.inet_pton(AF_INET, s) + return struct.unpack("!I", value)[0] + + +class IpPrefix(object): + """ + Provides ip address generation and prefix utilities. + """ + + def __init__(self, af, prefixstr): + """ + Create a IpPrefix instance. + + :param int af: address family for ip prefix + :param prefixstr: ip prefix string + """ + # prefixstr format: address/prefixlen + tmp = prefixstr.split("/") + if len(tmp) > 2: + raise ValueError("invalid prefix: %s" % prefixstr) + self.af = af + if self.af == AF_INET: + self.addrlen = 32 + elif self.af == AF_INET6: + self.addrlen = 128 + else: + raise ValueError("invalid address family: %s" % self.af) + if len(tmp) == 2: + self.prefixlen = int(tmp[1]) + else: + self.prefixlen = self.addrlen + self.prefix = socket.inet_pton(self.af, tmp[0]) + if self.addrlen > self.prefixlen: + addrbits = self.addrlen - self.prefixlen + netmask = ((1L << self.prefixlen) - 1) << addrbits + prefix = "" + for i in xrange(-1, -(addrbits >> 3) - 2, -1): + prefix = chr(ord(self.prefix[i]) & (netmask & 0xff)) + prefix + netmask >>= 8 + self.prefix = self.prefix[:i] + prefix + + def __str__(self): + """ + String representation of an ip prefix. + + :return: string representation + :rtype: str + """ + return "%s/%s" % (socket.inet_ntop(self.af, self.prefix), self.prefixlen) + + def __eq__(self, other): + """ + Compare equality with another ip prefix. + + :param IpPrefix other: other ip prefix to compare with + :return: True is equal, False otherwise + :rtype: bool + """ + if not isinstance(other, IpPrefix): + return False + elif self is other: + return True + else: + return other.af == self.af and other.prefixlen == self.prefixlen and other.prefix == self.prefix + + def __add__(self, other): + """ + Add a value to this ip prefix. + + :param int other: value to add + :return: added ip prefix instance + :rtype: IpPrefix + """ + try: + tmp = int(other) + except ValueError: + logging.exception("error during addition") + return NotImplemented + + a = IpAddress(self.af, self.prefix) + (tmp << (self.addrlen - self.prefixlen)) + prefixstr = "%s/%s" % (a, self.prefixlen) + if self.__class__ == IpPrefix: + return self.__class__(self.af, prefixstr) + else: + return self.__class__(prefixstr) + + def __sub__(self, other): + """ + Subtract value from this ip prefix. + + :param int other: value to subtract + :return: subtracted ip prefix instance + :rtype: IpPrefix + """ + try: + tmp = -int(other) + except ValueError: + logging.exception("error during subtraction") + return NotImplemented + + return self.__add__(tmp) + + def addr(self, hostid): + """ + Create an ip address for a given host id. + + :param hostid: host id for an ip address + :return: ip address + :rtype: IpAddress + """ + tmp = int(hostid) + if tmp in [-1, 0, 1] and self.addrlen == self.prefixlen: + return IpAddress(self.af, self.prefix) + + if tmp == 0 or tmp > (1 << (self.addrlen - self.prefixlen)) - 1 or ( + self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1): + raise ValueError("invalid hostid for prefix %s: %s" % (self, hostid)) + + addr = "" + prefix_endpoint = -1 + for i in xrange(-1, -(self.addrlen >> 3) - 1, -1): + prefix_endpoint = i + addr = chr(ord(self.prefix[i]) | (tmp & 0xff)) + addr + tmp >>= 8 + if not tmp: + break + addr = self.prefix[:prefix_endpoint] + addr + return IpAddress(self.af, addr) + + def min_addr(self): + """ + Return the minimum ip address for this prefix. + + :return: minimum ip address + :rtype: IpAddress + """ + return self.addr(1) + + def max_addr(self): + """ + Return the maximum ip address for this prefix. + + :return: maximum ip address + :rtype: IpAddress + """ + if self.af == AF_INET: + return self.addr((1 << (self.addrlen - self.prefixlen)) - 2) + else: + return self.addr((1 << (self.addrlen - self.prefixlen)) - 1) + + def num_addr(self): + """ + Retrieve the number of ip addresses for this prefix. + + :return: maximum number of ip addresses + :rtype: int + """ + return max(0, (1 << (self.addrlen - self.prefixlen)) - 2) + + def prefix_str(self): + """ + Retrieve the prefix string for this ip address. + + :return: prefix string + :rtype: str + """ + return "%s" % socket.inet_ntop(self.af, self.prefix) + + def netmask_str(self): + """ + Retrieve the netmask string for this ip address. + + :return: netmask string + :rtype: str + """ + addrbits = self.addrlen - self.prefixlen + netmask = ((1L << self.prefixlen) - 1) << addrbits + netmaskbytes = struct.pack("!L", netmask) + return IpAddress(af=AF_INET, address=netmaskbytes).__str__() + + +class Ipv4Prefix(IpPrefix): + """ + Provides an ipv4 specific class for ip prefixes. + """ + + def __init__(self, prefixstr): + """ + Create a Ipv4Prefix instance. + + :param str prefixstr: ip prefix + """ + IpPrefix.__init__(self, AF_INET, prefixstr) + + +class Ipv6Prefix(IpPrefix): + """ + Provides an ipv6 specific class for ip prefixes. + """ + + def __init__(self, prefixstr): + """ + Create a Ipv6Prefix instance. + + :param str prefixstr: ip prefix + """ + IpPrefix.__init__(self, AF_INET6, prefixstr) + + +def is_ip_address(af, addrstr): + """ + Check if ip address string is a valid ip address. + + :param int af: address family + :param str addrstr: ip address string + :return: True if a valid ip address, False otherwise + :rtype: bool + """ + try: + socket.inet_pton(af, addrstr) + return True + except IOError: + return False + + +def is_ipv4_address(addrstr): + """ + Check if ipv4 address string is a valid ipv4 address. + + :param str addrstr: ipv4 address string + :return: True if a valid ipv4 address, False otherwise + :rtype: bool + """ + return is_ip_address(AF_INET, addrstr) + + +def is_ipv6_address(addrstr): + """ + Check if ipv6 address string is a valid ipv6 address. + + :param str addrstr: ipv6 address string + :return: True if a valid ipv6 address, False otherwise + :rtype: bool + """ + return is_ip_address(AF_INET6, addrstr) diff --git a/daemon/core/misc/nodemaps.py b/daemon/core/misc/nodemaps.py new file mode 100644 index 00000000..8c3d0c37 --- /dev/null +++ b/daemon/core/misc/nodemaps.py @@ -0,0 +1,28 @@ +""" +Provides default node maps that can be used to run core with. +""" + +from core.emane.nodes import EmaneNet +from core.emane.nodes import EmaneNode +from core.enumerations import NodeTypes +from core.netns import nodes +from core.netns.vnet import GreTapBridge +from core.phys import pnodes + +# legacy core nodes, that leverage linux bridges +NODES = { + NodeTypes.DEFAULT: nodes.CoreNode, + NodeTypes.PHYSICAL: pnodes.PhysicalNode, + NodeTypes.TBD: None, + NodeTypes.SWITCH: nodes.SwitchNode, + NodeTypes.HUB: nodes.HubNode, + NodeTypes.WIRELESS_LAN: nodes.WlanNode, + NodeTypes.RJ45: nodes.RJ45Node, + NodeTypes.TUNNEL: nodes.TunnelNode, + NodeTypes.KTUNNEL: None, + NodeTypes.EMANE: EmaneNode, + NodeTypes.EMANE_NET: EmaneNet, + NodeTypes.TAP_BRIDGE: GreTapBridge, + NodeTypes.PEER_TO_PEER: nodes.PtpNet, + NodeTypes.CONTROL_NET: nodes.CtrlNet +} diff --git a/daemon/core/misc/nodeutils.py b/daemon/core/misc/nodeutils.py new file mode 100644 index 00000000..ef4dacb6 --- /dev/null +++ b/daemon/core/misc/nodeutils.py @@ -0,0 +1,97 @@ +""" +Serves as a global point for storing and retrieving node types needed during simulation. +""" + +import logging + +_NODE_MAP = None + + +def _log_map(): + global _NODE_MAP + for key, value in _NODE_MAP.iteritems(): + name = None + if value: + name = value.__name__ + logging.debug("node type (%s) - class (%s)", key.name, name) + + +def _convert_map(x, y): + """ + Convenience method to create a human readable version of the node map to log. + + :param dict x: dictionary to reduce node items into + :param tuple y: current node item + :return: + """ + x[y[0].name] = y[1] + return x + + +def update_node_map(node_map): + """ + Update the current node map with the provided node map values. + + + :param dict node_map: node map to update with + """ + global _NODE_MAP + _NODE_MAP.update(node_map) + _log_map() + + +def set_node_map(node_map): + """ + Set the global node map that proides a consistent way to retrieve differently configured nodes. + + :param dict node_map: node map to set to + :return: nothing + """ + global _NODE_MAP + _NODE_MAP = node_map + _log_map() + + +def get_node_class(node_type): + """ + Retrieve the node class for a given node type. + + :param int node_type: node type to retrieve class for + :return: node class + """ + global _NODE_MAP + return _NODE_MAP[node_type] + + +def get_node_type(node_class): + """ + Retrieve the node type given a node class. + + :param class node_class: node class to get type for + :return: node type + :rtype: core.enumerations.NodeTypes + """ + global _NODE_MAP + node_type_map = {v: k for k, v in _NODE_MAP.iteritems()} + return node_type_map.get(node_class) + + +def is_node(obj, node_types): + """ + Validates if an object is one of the provided node types. + + :param obj: object to check type for + :param int|tuple|list node_types: node type(s) to check against + :return: True if the object is one of the node types, False otherwise + :rtype: bool + """ + type_classes = [] + if isinstance(node_types, (tuple, list)): + for node_type in node_types: + type_class = get_node_class(node_type) + type_classes.append(type_class) + else: + type_class = get_node_class(node_types) + type_classes.append(type_class) + + return isinstance(obj, tuple(type_classes)) diff --git a/daemon/core/misc/quagga.py b/daemon/core/misc/quagga.py new file mode 100644 index 00000000..9887277d --- /dev/null +++ b/daemon/core/misc/quagga.py @@ -0,0 +1,172 @@ +""" +quagga.py: helper class for generating Quagga configuration. +""" + +from string import Template + +from core.misc import utils + + +def addrstr(x): + if x.find(".") >= 0: + return "ip address %s" % x + elif x.find(":") >= 0: + return "ipv6 address %s" % x + else: + raise ValueError("invalid address: %s" % x) + + +class NetIf(object): + """ + Represents a network interface. + """ + + def __init__(self, name, addrlist=None): + """ + Create a NetIf instance. + + :param str name: interface name + :param addrlist: address list for the interface + """ + self.name = name + + if addrlist: + self.addrlist = addrlist + else: + self.addrlist = [] + + +class Conf(object): + """ + Provides a configuration object. + """ + + template = Template("") + + def __init__(self, **kwargs): + """ + Create a Conf instance. + + :param dict kwargs: configuration keyword arguments + """ + self.kwargs = kwargs + + def __str__(self): + """ + Provides a string representation of a configuration object. + + :return: string representation + :rtype: str + """ + tmp = self.template.substitute(**self.kwargs) + if tmp[-1] == "\n": + tmp = tmp[:-1] + return tmp + + +class QuaggaOSPF6Interface(Conf): + """ + Provides quagga ospf6 interface functionality. + """ + AF_IPV6_ID = 0 + AF_IPV4_ID = 65 + + template = Template("""\ +interface $interface + $addr + ipv6 ospf6 instance-id $instanceid + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 11 + ipv6 ospf6 retransmit-interval 5 + ipv6 ospf6 network $network + ipv6 ospf6 diffhellos + ipv6 ospf6 adjacencyconnectivity uniconnected + ipv6 ospf6 lsafullness mincostlsa +""") + + # ip address $ipaddr/32 + # ipv6 ospf6 simhelloLLtoULRecv :$simhelloport + # !$ipaddr:$simhelloport + + def __init__(self, netif, instanceid=AF_IPV4_ID, network="manet-designated-router", **kwargs): + """ + Create a QuaggaOSPF6Interface instance. + + :param netif: network interface + :param int instanceid: instance id + :param network: network + :param dict kwargs: keyword arguments + """ + self.netif = netif + addr = "\n ".join(map(addrstr, netif.addrlist)) + self.instanceid = instanceid + self.network = network + Conf.__init__(self, interface=netif.name, addr=addr, + instanceid=instanceid, network=network, **kwargs) + + def name(self): + """ + Retrieve network interface name. + + :return: network interface name + :rtype: str + """ + return self.netif.name + + +class QuaggaOSPF6(Conf): + """ + Provides quagga ospf6 functionality. + """ + template = Template("""\ +$interfaces +! +router ospf6 + router-id $routerid + $ospfifs + $redistribute +""") + + def __init__(self, ospf6ifs, area, routerid, redistribute="! no redistribute"): + """ + Create a QuaggaOSPF6 instance. + + :param list ospf6ifs: ospf6 interfaces + :param area: area + :param routerid: router id + :param str redistribute: redistribute value + """ + ospf6ifs = utils.make_tuple(ospf6ifs) + interfaces = "\n!\n".join(map(str, ospf6ifs)) + ospfifs = "\n ".join(map(lambda x: "interface %s area %s" % (x.name(), area), ospf6ifs)) + Conf.__init__(self, interfaces=interfaces, routerid=routerid, ospfifs=ospfifs, redistribute=redistribute) + + +class QuaggaConf(Conf): + """ + Provides quagga configuration functionality. + """ + template = Template("""\ +log file $logfile +$debugs +! +$routers +! +$forwarding +""") + + def __init__(self, routers, logfile, debugs=()): + """ + Create a QuaggaConf instance. + + :param list routers: routers + :param str logfile: log file name + :param debugs: debug options + """ + routers = "\n!\n".join(map(str, utils.make_tuple(routers))) + if debugs: + debugs = "\n".join(utils.make_tuple(debugs)) + else: + debugs = "! no debugs" + forwarding = "ip forwarding\nipv6 forwarding" + Conf.__init__(self, logfile=logfile, debugs=debugs, routers=routers, forwarding=forwarding) diff --git a/daemon/core/misc/structutils.py b/daemon/core/misc/structutils.py new file mode 100644 index 00000000..cfcc7a93 --- /dev/null +++ b/daemon/core/misc/structutils.py @@ -0,0 +1,46 @@ +""" +Utilities for working with python struct data. +""" + +import logging + + +def pack_values(clazz, packers): + """ + Pack values for a given legacy class. + + :param class clazz: class that will provide a pack method + :param list packers: a list of tuples that are used to pack values and transform them + :return: packed data string of all values + """ + + # iterate through tuples of values to pack + data = "" + for packer in packers: + # check if a transformer was provided for valid values + transformer = None + if len(packer) == 2: + tlv_type, value = packer + elif len(packer) == 3: + tlv_type, value, transformer = packer + else: + raise RuntimeError("packer had more than 3 arguments") + + # convert unicode to normal str for packing + if isinstance(value, unicode): + value = str(value) + + # only pack actual values and avoid packing empty strings + # protobuf defaults to empty strings and does no imply a value to set + if value is None or (isinstance(value, str) and not value): + continue + + # transform values as needed + if transformer: + value = transformer(value) + + # pack and add to existing data + logging.debug("packing: %s - %s", tlv_type, value) + data += clazz.pack(tlv_type.value, value) + + return data diff --git a/daemon/core/misc/utils.py b/daemon/core/misc/utils.py new file mode 100644 index 00000000..f2e578bf --- /dev/null +++ b/daemon/core/misc/utils.py @@ -0,0 +1,406 @@ +""" +Miscellaneous utility functions, wrappers around some subprocess procedures. +""" + +import importlib +import inspect +import logging +import os +import shlex +import subprocess +import sys + +import fcntl + +from core import CoreCommandError + +DEVNULL = open(os.devnull, "wb") + + +def _detach_init(): + """ + Fork a child process and exit. + + :return: nothing + """ + if os.fork(): + # parent exits + os._exit(0) + os.setsid() + + +def _valid_module(path, file_name): + """ + Check if file is a valid python module. + + :param str path: path to file + :param str file_name: file name to check + :return: True if a valid python module file, False otherwise + :rtype: bool + """ + file_path = os.path.join(path, file_name) + if not os.path.isfile(file_path): + return False + + if file_name.startswith("_"): + return False + + if not file_name.endswith(".py"): + return False + + return True + + +def _is_class(module, member, clazz): + """ + Validates if a module member is a class and an instance of a CoreService. + + :param module: module to validate for service + :param member: member to validate for service + :param clazz: clazz type to check for validation + :return: True if a valid service, False otherwise + :rtype: bool + """ + if not inspect.isclass(member): + return False + + if not issubclass(member, clazz): + return False + + if member.__module__ != module.__name__: + return False + + return True + + +def _is_exe(file_path): + """ + Check if a given file path exists and is an executable file. + + :param str file_path: file path to check + :return: True if the file is considered and executable file, False otherwise + :rtype: bool + """ + return os.path.isfile(file_path) and os.access(file_path, os.X_OK) + + +def close_onexec(fd): + """ + Close on execution of a shell process. + + :param fd: file descriptor to close + :return: nothing + """ + fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) + fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC) + + +def check_executables(executables): + """ + Check executables, verify they exist and are executable. + + :param list[str] executables: executable to check + :return: nothing + :raises EnvironmentError: when an executable doesn't exist or is not executable + """ + for executable in executables: + if not _is_exe(executable): + raise EnvironmentError("executable not found: %s" % executable) + + +def make_tuple(obj): + """ + Create a tuple from an object, or return the object itself. + + :param obj: object to convert to a tuple + :return: converted tuple or the object itself + :rtype: tuple + """ + if hasattr(obj, "__iter__"): + return tuple(obj) + else: + return obj, + + +def make_tuple_fromstr(s, value_type): + """ + Create a tuple from a string. + + :param str|unicode s: string to convert to a tuple + :param value_type: type of values to be contained within tuple + :return: tuple from string + :rtype: tuple + """ + # remove tuple braces and strip commands and space from all values in the tuple string + values = [] + for x in s.strip("(), ").split(","): + x = x.strip("' ") + if x: + values.append(x) + return tuple(value_type(i) for i in values) + + +def split_args(args): + """ + Convenience method for splitting potential string commands into a shell-like syntax list. + + :param list/str args: command list or string + :return: shell-like syntax list + :rtype: list + """ + if isinstance(args, basestring): + args = shlex.split(args) + return args + + +def mute_detach(args, **kwargs): + """ + Run a muted detached process by forking it. + + :param list[str]|str args: arguments for the command + :param dict kwargs: keyword arguments for the command + :return: process id of the command + :rtype: int + """ + args = split_args(args) + kwargs["preexec_fn"] = _detach_init + kwargs["stdout"] = DEVNULL + kwargs["stderr"] = subprocess.STDOUT + return subprocess.Popen(args, **kwargs).pid + + +def cmd(args, wait=True): + """ + Runs a command on and returns the exit status. + + :param list[str]|str args: command arguments + :param bool wait: wait for command to end or not + :return: command status + :rtype: int + """ + args = split_args(args) + logging.debug("command: %s", args) + try: + p = subprocess.Popen(args) + if not wait: + return 0 + return p.wait() + except OSError: + raise CoreCommandError(-1, args) + + +def cmd_output(args): + """ + Execute a command on the host and return a tuple containing the exit status and result string. stderr output + is folded into the stdout result string. + + :param list[str]|str args: command arguments + :return: command status and stdout + :rtype: tuple[int, str] + :raises CoreCommandError: when the file to execute is not found + """ + args = split_args(args) + logging.debug("command: %s", args) + try: + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + stdout, _ = p.communicate() + status = p.wait() + return status, stdout.strip() + except OSError: + raise CoreCommandError(-1, args) + + +def check_cmd(args, **kwargs): + """ + Execute a command on the host and return a tuple containing the exit status and result string. stderr output + is folded into the stdout result string. + + :param list[str]|str args: command arguments + :param dict kwargs: keyword arguments to pass to subprocess.Popen + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when there is a non-zero exit status or the file to execute is not found + """ + kwargs["stdout"] = subprocess.PIPE + kwargs["stderr"] = subprocess.STDOUT + args = split_args(args) + logging.debug("command: %s", args) + try: + p = subprocess.Popen(args, **kwargs) + stdout, _ = p.communicate() + status = p.wait() + if status != 0: + raise CoreCommandError(status, args, stdout) + return stdout.strip() + except OSError: + raise CoreCommandError(-1, args) + + +def hex_dump(s, bytes_per_word=2, words_per_line=8): + """ + Hex dump of a string. + + :param str s: string to hex dump + :param bytes_per_word: number of bytes per word + :param words_per_line: number of words per line + :return: hex dump of string + """ + dump = "" + count = 0 + total_bytes = bytes_per_word * words_per_line + + while s: + line = s[:total_bytes] + s = s[total_bytes:] + tmp = map(lambda x: ("%02x" * bytes_per_word) % x, + zip(*[iter(map(ord, line))] * bytes_per_word)) + if len(line) % 2: + tmp.append("%x" % ord(line[-1])) + dump += "0x%08x: %s\n" % (count, " ".join(tmp)) + count += len(line) + return dump[:-1] + + +def file_munge(pathname, header, text): + """ + Insert text at the end of a file, surrounded by header comments. + + :param str pathname: file path to add text to + :param str header: header text comments + :param str text: text to append to file + :return: nothing + """ + # prevent duplicates + file_demunge(pathname, header) + + with open(pathname, "a") as append_file: + append_file.write("# BEGIN %s\n" % header) + append_file.write(text) + append_file.write("# END %s\n" % header) + + +def file_demunge(pathname, header): + """ + Remove text that was inserted in a file surrounded by header comments. + + :param str pathname: file path to open for removing a header + :param str header: header text to target for removal + :return: nothing + """ + with open(pathname, "r") as read_file: + lines = read_file.readlines() + + start = None + end = None + + for i, line in enumerate(lines): + if line == "# BEGIN %s\n" % header: + start = i + elif line == "# END %s\n" % header: + end = i + 1 + + if start is None or end is None: + return + + with open(pathname, "w") as write_file: + lines = lines[:start] + lines[end:] + write_file.write("".join(lines)) + + +def expand_corepath(pathname, session=None, node=None): + """ + Expand a file path given session information. + + :param str pathname: file path to expand + :param core.session.Session session: core session object to expand path with + :param core.netns.LxcNode node: node to expand path with + :return: expanded path + :rtype: str + """ + if session is not None: + pathname = pathname.replace("~", "/home/%s" % session.user) + pathname = pathname.replace("%SESSION%", str(session.id)) + pathname = pathname.replace("%SESSION_DIR%", session.session_dir) + pathname = pathname.replace("%SESSION_USER%", session.user) + + if node is not None: + pathname = pathname.replace("%NODE%", str(node.objid)) + pathname = pathname.replace("%NODENAME%", node.name) + + return pathname + + +def sysctl_devname(devname): + """ + Translate a device name to the name used with sysctl. + + :param str devname: device name to translate + :return: translated device name + :rtype: str + """ + if devname is None: + return None + return devname.replace(".", "/") + + +def load_config(filename, d): + """ + Read key=value pairs from a file, into a dict. Skip comments; strip newline characters and spacing. + + :param str filename: file to read into a dictionary + :param dict d: dictionary to read file into + :return: nothing + """ + with open(filename, "r") as f: + lines = f.readlines() + + for line in lines: + if line[:1] == "#": + continue + + try: + key, value = line.split("=", 1) + d[key] = value.strip() + except ValueError: + logging.exception("error reading file to dict: %s", filename) + + +def load_classes(path, clazz): + """ + Dynamically load classes for use within CORE. + + :param path: path to load classes from + :param clazz: class type expected to be inherited from for loading + :return: list of classes loaded + """ + # validate path exists + logging.debug("attempting to load modules from path: %s", path) + if not os.path.isdir(path): + logging.warn("invalid custom module directory specified" ": %s" % path) + # check if path is in sys.path + parent_path = os.path.dirname(path) + if parent_path not in sys.path: + logging.debug("adding parent path to allow imports: %s", parent_path) + sys.path.append(parent_path) + + # retrieve potential service modules, and filter out invalid modules + base_module = os.path.basename(path) + module_names = os.listdir(path) + module_names = filter(lambda x: _valid_module(path, x), module_names) + module_names = map(lambda x: x[:-3], module_names) + + # import and add all service modules in the path + classes = [] + for module_name in module_names: + import_statement = "%s.%s" % (base_module, module_name) + logging.debug("importing custom module: %s", import_statement) + try: + module = importlib.import_module(import_statement) + members = inspect.getmembers(module, lambda x: _is_class(module, x, clazz)) + for member in members: + valid_class = member[1] + classes.append(valid_class) + except: + logging.exception("unexpected error during import, skipping: %s", import_statement) + + return classes diff --git a/daemon/core/misc/utm.py b/daemon/core/misc/utm.py new file mode 100644 index 00000000..b80a7d6d --- /dev/null +++ b/daemon/core/misc/utm.py @@ -0,0 +1,259 @@ +""" +utm +=== + +.. image:: https://travis-ci.org/Turbo87/utm.png + +Bidirectional UTM-WGS84 converter for python + +Usage +----- + +:: + + import utm + +Convert a (latitude, longitude) tuple into an UTM coordinate:: + + utm.from_latlon(51.2, 7.5) + >>> (395201.3103811303, 5673135.241182375, 32, 'U') + +Convert an UTM coordinate into a (latitude, longitude) tuple:: + + utm.to_latlon(340000, 5710000, 32, 'U') + >>> (51.51852098408468, 6.693872395145327) + +Speed +----- + +The library has been compared to the more generic pyproj library by running the +unit test suite through pyproj instead of utm. These are the results: + +* with pyproj (without projection cache): 4.0 - 4.5 sec +* with pyproj (with projection cache): 0.9 - 1.0 sec +* with utm: 0.4 - 0.5 sec + +Authors +------- + +* Tobias Bieniek + +License +------- + +Copyright (C) 2012 Tobias Bieniek + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" + +import math + +__all__ = ['to_latlon', 'from_latlon'] + + +class OutOfRangeError(ValueError): + pass + + +K0 = 0.9996 + +E = 0.00669438 +E2 = E * E +E3 = E2 * E +E_P2 = E / (1.0 - E) + +SQRT_E = math.sqrt(1 - E) +_E = (1 - SQRT_E) / (1 + SQRT_E) +_E3 = _E * _E * _E +_E4 = _E3 * _E + +M1 = (1 - E / 4 - 3 * E2 / 64 - 5 * E3 / 256) +M2 = (3 * E / 8 + 3 * E2 / 32 + 45 * E3 / 1024) +M3 = (15 * E2 / 256 + 45 * E3 / 1024) +M4 = (35 * E3 / 3072) + +P2 = (3 * _E / 2 - 27 * _E3 / 32) +P3 = (21 * _E3 / 16 - 55 * _E4 / 32) +P4 = (151 * _E3 / 96) + +R = 6378137 + +ZONE_LETTERS = [ + (84, None), (72, 'X'), (64, 'W'), (56, 'V'), (48, 'U'), (40, 'T'), + (32, 'S'), (24, 'R'), (16, 'Q'), (8, 'P'), (0, 'N'), (-8, 'M'), (-16, 'L'), + (-24, 'K'), (-32, 'J'), (-40, 'H'), (-48, 'G'), (-56, 'F'), (-64, 'E'), + (-72, 'D'), (-80, 'C') +] + + +def to_latlon(easting, northing, zone_number, zone_letter): + zone_letter = zone_letter.upper() + + if not 100000 <= easting < 1000000: + raise OutOfRangeError('easting out of range (must be between 100.000 m and 999.999 m)') + if not 0 <= northing <= 10000000: + raise OutOfRangeError('northing out of range (must be between 0 m and 10.000.000 m)') + if not 1 <= zone_number <= 60: + raise OutOfRangeError('zone number out of range (must be between 1 and 60)') + if not 'C' <= zone_letter <= 'X' or zone_letter in ['I', 'O']: + raise OutOfRangeError('zone letter out of range (must be between C and X)') + + x = easting - 500000 + y = northing + + if zone_letter < 'N': + y -= 10000000 + + m = y / K0 + mu = m / (R * M1) + + p_rad = (mu + P2 * math.sin(2 * mu) + P3 * math.sin(4 * mu) + P4 * math.sin(6 * mu)) + + p_sin = math.sin(p_rad) + p_sin2 = p_sin * p_sin + + p_cos = math.cos(p_rad) + + p_tan = p_sin / p_cos + p_tan2 = p_tan * p_tan + p_tan4 = p_tan2 * p_tan2 + + ep_sin = 1 - E * p_sin2 + ep_sin_sqrt = math.sqrt(1 - E * p_sin2) + + n = R / ep_sin_sqrt + r = (1 - E) / ep_sin + + c = _E * p_cos ** 2 + c2 = c * c + + d = x / (n * K0) + d2 = d * d + d3 = d2 * d + d4 = d3 * d + d5 = d4 * d + d6 = d5 * d + + latitude = (p_rad - (p_tan / r) * + (d2 / 2 - + d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) + + d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2)) + + longitude = (d - + d3 / 6 * (1 + 2 * p_tan2 + c) + + d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4)) / p_cos + + return (math.degrees(latitude), + math.degrees(longitude) + zone_number_to_central_longitude(zone_number)) + + +def from_latlon(latitude, longitude): + if not -80.0 <= latitude <= 84.0: + raise OutOfRangeError('latitude out of range (must be between 80 deg S and 84 deg N)') + if not -180.0 <= longitude <= 180.0: + raise OutOfRangeError('northing out of range (must be between 180 deg W and 180 deg E)') + + lat_rad = math.radians(latitude) + lat_sin = math.sin(lat_rad) + lat_cos = math.cos(lat_rad) + + lat_tan = lat_sin / lat_cos + lat_tan2 = lat_tan * lat_tan + lat_tan4 = lat_tan2 * lat_tan2 + + lon_rad = math.radians(longitude) + + zone_number = latlon_to_zone_number(latitude, longitude) + central_lon = zone_number_to_central_longitude(zone_number) + central_lon_rad = math.radians(central_lon) + + zone_letter = latitude_to_zone_letter(latitude) + + n = R / math.sqrt(1 - E * lat_sin ** 2) + c = E_P2 * lat_cos ** 2 + + a = lat_cos * (lon_rad - central_lon_rad) + a2 = a * a + a3 = a2 * a + a4 = a3 * a + a5 = a4 * a + a6 = a5 * a + + m = R * (M1 * lat_rad - + M2 * math.sin(2 * lat_rad) + + M3 * math.sin(4 * lat_rad) - + M4 * math.sin(6 * lat_rad)) + + easting = K0 * n * (a + + a3 / 6 * (1 - lat_tan2 + c) + + a5 / 120 * (5 - 18 * lat_tan2 + lat_tan4 + 72 * c - 58 * E_P2)) + 500000 + + northing = K0 * (m + n * lat_tan * (a2 / 2 + + a4 / 24 * (5 - lat_tan2 + 9 * c + 4 * c ** 2) + + a6 / 720 * (61 - 58 * lat_tan2 + lat_tan4 + 600 * c - 330 * E_P2))) + + if latitude < 0: + northing += 10000000 + + return easting, northing, zone_number, zone_letter + + +def latitude_to_zone_letter(latitude): + for lat_min, zone_letter in ZONE_LETTERS: + if latitude >= lat_min: + return zone_letter + + return None + + +def latlon_to_zone_number(latitude, longitude): + if 56 <= latitude <= 64 and 3 <= longitude <= 12: + return 32 + + if 72 <= latitude <= 84 and longitude >= 0: + if longitude <= 9: + return 31 + elif longitude <= 21: + return 33 + elif longitude <= 33: + return 35 + elif longitude <= 42: + return 37 + + return int((longitude + 180) / 6) + 1 + + +def zone_number_to_central_longitude(zone_number): + return (zone_number - 1) * 6 - 180 + 3 + + +def haversine(lon1, lat1, lon2, lat2): + """ + Calculate the great circle distance between two points + on the earth (specified in decimal degrees) + """ + # convert decimal degrees to radians + lon1, lat1, lon2, lat2 = map(math.radians, [lon1, lat1, lon2, lat2]) + # haversine formula + dlon = lon2 - lon1 + dlat = lat2 - lat1 + a = math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2 + c = 2 * math.asin(math.sqrt(a)) + m = 6367000 * c + return m diff --git a/daemon/core/mobility.py b/daemon/core/mobility.py new file mode 100644 index 00000000..4a9eb746 --- /dev/null +++ b/daemon/core/mobility.py @@ -0,0 +1,1162 @@ +""" +mobility.py: mobility helpers for moving nodes and calculating wireless range. +""" + +import heapq +import logging +import math +import os +import threading +import time + +from core.conf import ConfigGroup +from core.conf import ConfigurableOptions +from core.conf import Configuration +from core.conf import ModelManager +from core.coreobj import PyCoreNode +from core.data import EventData +from core.data import LinkData +from core.enumerations import ConfigDataTypes +from core.enumerations import EventTypes +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import RegisterTlvs +from core.misc import utils +from core.misc.ipaddress import IpAddress + + +class MobilityManager(ModelManager): + """ + Member of session class for handling configuration data for mobility and + range models. + """ + name = "MobilityManager" + config_type = RegisterTlvs.WIRELESS.value + + def __init__(self, session): + """ + Creates a MobilityManager instance. + + :param core.session.Session session: session this manager is tied to + """ + super(MobilityManager, self).__init__() + self.session = session + self.models[BasicRangeModel.name] = BasicRangeModel + self.models[Ns2ScriptedMobility.name] = Ns2ScriptedMobility + + # dummy node objects for tracking position of nodes on other servers + self.phys = {} + self.physnets = {} + self.session.broker.handlers.add(self.physnodehandlelink) + + def startup(self, node_ids=None): + """ + Session is transitioning from instantiation to runtime state. + Instantiate any mobility models that have been configured for a WLAN. + + :param list node_ids: node ids to startup + :return: nothing + """ + if node_ids is None: + node_ids = self.nodes() + + for node_id in node_ids: + logging.info("checking mobility startup for node: %s", node_id) + logging.info("node mobility configurations: %s", self.get_all_configs(node_id)) + + try: + node = self.session.get_object(node_id) + except KeyError: + logging.warn("skipping mobility configuration for unknown node: %s", node_id) + continue + + for model_name in self.models.iterkeys(): + config = self.get_configs(node_id, model_name) + if not config: + continue + model_class = self.models[model_name] + self.set_model(node, model_class, config) + + if self.session.master: + self.installphysnodes(node) + + if node.mobility: + self.session.event_loop.add_event(0.0, node.mobility.startup) + + def handleevent(self, event_data): + """ + Handle an Event Message used to start, stop, or pause + mobility scripts for a given WlanNode. + + :param EventData event_data: event data to handle + :return: nothing + """ + event_type = event_data.event_type + node_id = event_data.node + name = event_data.name + + try: + node = self.session.get_object(node_id) + except KeyError: + logging.exception("Ignoring event for model '%s', unknown node '%s'", name, node_id) + return + + # name is e.g. "mobility:ns2script" + models = name[9:].split(',') + for model in models: + try: + cls = self.models[model] + except KeyError: + logging.warn("Ignoring event for unknown model '%s'", model) + continue + + if cls.config_type in [RegisterTlvs.WIRELESS.value, RegisterTlvs.MOBILITY.value]: + model = node.mobility + else: + continue + + if model is None: + logging.warn("Ignoring event, %s has no model", node.name) + continue + + if cls.name != model.name: + logging.warn("Ignoring event for %s wrong model %s,%s", node.name, cls.name, model.name) + continue + + if event_type == EventTypes.STOP.value or event_type == EventTypes.RESTART.value: + model.stop(move_initial=True) + if event_type == EventTypes.START.value or event_type == EventTypes.RESTART.value: + model.start() + if event_type == EventTypes.PAUSE.value: + model.pause() + + def sendevent(self, model): + """ + Send an event message on behalf of a mobility model. + This communicates the current and end (max) times to the GUI. + + :param WayPointMobility model: mobility model to send event for + :return: nothing + """ + event_type = EventTypes.NONE.value + if model.state == model.STATE_STOPPED: + event_type = EventTypes.STOP.value + elif model.state == model.STATE_RUNNING: + event_type = EventTypes.START.value + elif model.state == model.STATE_PAUSED: + event_type = EventTypes.PAUSE.value + + data = "start=%d" % int(model.lasttime - model.timezero) + data += " end=%d" % int(model.endtime) + + event_data = EventData( + node=model.object_id, + event_type=event_type, + name="mobility:%s" % model.name, + data=data, + time="%s" % time.time() + ) + + self.session.broadcast_event(event_data) + + def updatewlans(self, moved, moved_netifs): + """ + A mobility script has caused nodes in the 'moved' list to move. + Update every WlanNode. This saves range calculations if the model + were to recalculate for each individual node movement. + + :param list moved: moved nodes + :param list moved_netifs: moved network interfaces + :return: nothing + """ + for node_id in self.nodes(): + try: + node = self.session.get_object(node_id) + except KeyError: + continue + if node.model: + node.model.update(moved, moved_netifs) + + def addphys(self, netnum, node): + """ + Keep track of PhysicalNodes and which network they belong to. + + :param int netnum: network number + :param core.coreobj.PyCoreNode node: node to add physical network to + :return: nothing + """ + node_id = node.objid + self.phys[node_id] = node + if netnum not in self.physnets: + self.physnets[netnum] = [node_id, ] + else: + self.physnets[netnum].append(node_id) + + # TODO: remove need for handling old style message + + def physnodehandlelink(self, message): + """ + Broker handler. Snoop Link add messages to get + node numbers of PhyiscalNodes and their nets. + Physical nodes exist only on other servers, but a shadow object is + created here for tracking node position. + + :param message: link message to handle + :return: nothing + """ + if message.message_type == MessageTypes.LINK.value and message.flags & MessageFlags.ADD.value: + nn = message.node_numbers() + # first node is always link layer node in Link add message + if nn[0] not in self.session.broker.network_nodes: + return + if nn[1] in self.session.broker.physical_nodes: + # record the fact that this PhysicalNode is linked to a net + dummy = PyCoreNode(session=self.session, objid=nn[1], name="n%d" % nn[1], start=False) + self.addphys(nn[0], dummy) + + # TODO: remove need to handling old style messages + def physnodeupdateposition(self, message): + """ + Snoop node messages belonging to physical nodes. The dummy object + in self.phys[] records the node position. + + :param message: message to handle + :return: nothing + """ + nodenum = message.node_numbers()[0] + try: + dummy = self.phys[nodenum] + nodexpos = message.get_tlv(NodeTlvs.X_POSITION.value) + nodeypos = message.get_tlv(NodeTlvs.Y_POSITION.value) + dummy.setposition(nodexpos, nodeypos, None) + except KeyError: + logging.exception("error retrieving physical node: %s", nodenum) + + def installphysnodes(self, net): + """ + After installing a mobility model on a net, include any physical + nodes that we have recorded. Use the GreTap tunnel to the physical node + as the node's interface. + + :param net: network to install + :return: nothing + """ + nodenums = self.physnets.get(net.objid, []) + for nodenum in nodenums: + node = self.phys[nodenum] + # TODO: fix this bad logic, relating to depending on a break to get a valid server + for server in self.session.broker.getserversbynode(nodenum): + break + netif = self.session.broker.gettunnel(net.objid, IpAddress.to_int(server.host)) + node.addnetif(netif, 0) + netif.node = node + x, y, z = netif.node.position.get() + netif.poshook(netif, x, y, z) + + +class WirelessModel(ConfigurableOptions): + """ + Base class used by EMANE models and the basic range model. + Used for managing arbitrary configuration parameters. + """ + config_type = RegisterTlvs.WIRELESS.value + bitmap = None + position_callback = None + + def __init__(self, session, object_id): + """ + Create a WirelessModel instance. + + :param core.session.Session session: core session we are tied to + :param int object_id: object id + """ + self.session = session + self.object_id = object_id + + def all_link_data(self, flags): + """ + May be used if the model can populate the GUI with wireless (green) + link lines. + + :param flags: link data flags + :return: link data + :rtype: list + """ + return [] + + def update(self, moved, moved_netifs): + """ + Update this wireless model. + + :param bool moved: flag is it was moved + :param list moved_netifs: moved network interfaces + :return: nothing + """ + raise NotImplementedError + + def update_config(self, config): + """ + For run-time updates of model config. Returns True when position callback and set link + parameters should be invoked. + + :param dict config: configuration values to update + :return: nothing + """ + pass + + +class BasicRangeModel(WirelessModel): + """ + Basic Range wireless model, calculates range between nodes and links + and unlinks nodes based on this distance. This was formerly done from + the GUI. + """ + name = "basic_range" + options = [ + Configuration(_id="range", _type=ConfigDataTypes.UINT32, default="275", label="wireless range (pixels)"), + Configuration(_id="bandwidth", _type=ConfigDataTypes.UINT32, default="54000000", label="bandwidth (bps)"), + Configuration(_id="jitter", _type=ConfigDataTypes.FLOAT, default="0.0", label="transmission jitter (usec)"), + Configuration(_id="delay", _type=ConfigDataTypes.FLOAT, default="5000.0", + label="transmission delay (usec)"), + Configuration(_id="error", _type=ConfigDataTypes.FLOAT, default="0.0", label="error rate (%)") + ] + + @classmethod + def config_groups(cls): + return [ + ConfigGroup("Basic Range Parameters", 1, len(cls.configurations())) + ] + + def __init__(self, session, object_id): + """ + Create a BasicRangeModel instance. + + :param core.session.Session session: related core session + :param int object_id: object id + :param dict config: values + """ + super(BasicRangeModel, self).__init__(session=session, object_id=object_id) + self.session = session + self.wlan = session.get_object(object_id) + self._netifs = {} + self._netifslock = threading.Lock() + + self.range = None + self.bw = None + self.delay = None + self.loss = None + self.jitter = None + + def values_from_config(self, config): + """ + Values to convert to link parameters. + + :param dict config: values to convert + :return: nothing + """ + self.range = float(config["range"]) + logging.info("basic range model configured for WLAN %d using range %d", self.wlan.objid, self.range) + self.bw = int(config["bandwidth"]) + if self.bw == 0.0: + self.bw = None + self.delay = float(config["delay"]) + if self.delay == 0.0: + self.delay = None + self.loss = float(config["error"]) + if self.loss == 0.0: + self.loss = None + self.jitter = float(config["jitter"]) + if self.jitter == 0.0: + self.jitter = None + + def setlinkparams(self): + """ + Apply link parameters to all interfaces. This is invoked from + WlanNode.setmodel() after the position callback has been set. + """ + with self._netifslock: + for netif in self._netifs: + self.wlan.linkconfig(netif, bw=self.bw, delay=self.delay, loss=self.loss, duplicate=None, + jitter=self.jitter) + + def get_position(self, netif): + """ + Retrieve network interface position. + + :param netif: network interface position to retrieve + :return: network interface position + """ + with self._netifslock: + return self._netifs[netif] + + def set_position(self, netif, x=None, y=None, z=None): + """ + A node has moved; given an interface, a new (x,y,z) position has + been set; calculate the new distance between other nodes and link or + unlink node pairs based on the configured range. + + :param netif: network interface to set position for + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + self._netifslock.acquire() + self._netifs[netif] = (x, y, z) + if x is None or y is None: + self._netifslock.release() + return + for netif2 in self._netifs: + self.calclink(netif, netif2) + self._netifslock.release() + + position_callback = set_position + + def update(self, moved, moved_netifs): + """ + Node positions have changed without recalc. Update positions from + node.position, then re-calculate links for those that have moved. + Assumes bidirectional links, with one calculation per node pair, where + one of the nodes has moved. + + :param bool moved: flag is it was moved + :param list moved_netifs: moved network interfaces + :return: nothing + """ + with self._netifslock: + while len(moved_netifs): + netif = moved_netifs.pop() + nx, ny, nz = netif.node.getposition() + if netif in self._netifs: + self._netifs[netif] = (nx, ny, nz) + for netif2 in self._netifs: + if netif2 in moved_netifs: + continue + self.calclink(netif, netif2) + + def calclink(self, netif, netif2): + """ + Helper used by set_position() and update() to + calculate distance between two interfaces and perform + linking/unlinking. Sends link/unlink messages and updates the + WlanNode's linked dict. + + :param netif: interface one + :param netif2: interface two + :return: nothing + """ + if netif == netif2: + return + + try: + x, y, z = self._netifs[netif] + x2, y2, z2 = self._netifs[netif2] + + if x2 is None or y2 is None: + return + + d = self.calcdistance((x, y, z), (x2, y2, z2)) + + # ordering is important, to keep the wlan._linked dict organized + a = min(netif, netif2) + b = max(netif, netif2) + + with self.wlan._linked_lock: + linked = self.wlan.linked(a, b) + + logging.debug("checking range netif1(%s) netif2(%s): linked(%s) actual(%s) > config(%s)", + a.name, b.name, linked, d, self.range) + if d > self.range: + if linked: + logging.debug("was linked, unlinking") + self.wlan.unlink(a, b) + self.sendlinkmsg(a, b, unlink=True) + else: + if not linked: + logging.debug("was not linked, linking") + self.wlan.link(a, b) + self.sendlinkmsg(a, b) + except KeyError: + logging.exception("error getting interfaces during calclinkS") + + @staticmethod + def calcdistance(p1, p2): + """ + Calculate the distance between two three-dimensional points. + + :param tuple p1: point one + :param tuple p2: point two + :return: distance petween the points + :rtype: float + """ + a = p1[0] - p2[0] + b = p1[1] - p2[1] + c = 0 + if p1[2] is not None and p2[2] is not None: + c = p1[2] - p2[2] + return math.hypot(math.hypot(a, b), c) + + def update_config(self, config): + """ + Configuration has changed during runtime. + + :param dict config: values to update configuration + :return: nothing + """ + self.values_from_config(config) + self.setlinkparams() + return True + + def create_link_data(self, interface1, interface2, message_type): + """ + Create a wireless link/unlink data message. + + :param core.coreobj.PyCoreNetIf interface1: interface one + :param core.coreobj.PyCoreNetIf interface2: interface two + :param message_type: link message type + :return: link data + :rtype: LinkData + """ + return LinkData( + message_type=message_type, + node1_id=interface1.node.objid, + node2_id=interface2.node.objid, + network_id=self.wlan.objid, + link_type=LinkTypes.WIRELESS.value + ) + + def sendlinkmsg(self, netif, netif2, unlink=False): + """ + Send a wireless link/unlink API message to the GUI. + + :param core.coreobj.PyCoreNetIf netif: interface one + :param core.coreobj.PyCoreNetIf netif2: interface two + :param bool unlink: unlink or not + :return: nothing + """ + if unlink: + message_type = MessageFlags.DELETE.value + else: + message_type = MessageFlags.ADD.value + + link_data = self.create_link_data(netif, netif2, message_type) + self.session.broadcast_link(link_data) + + def all_link_data(self, flags): + """ + Return a list of wireless link messages for when the GUI reconnects. + + :param flags: link flags + :return: all link data + :rtype: list + """ + all_links = [] + with self.wlan._linked_lock: + for a in self.wlan._linked: + for b in self.wlan._linked[a]: + if self.wlan._linked[a][b]: + all_links.append(self.create_link_data(a, b, flags)) + return all_links + + +class WayPoint(object): + """ + Maintains information regarding waypoints. + """ + + def __init__(self, time, nodenum, coords, speed): + """ + Creates a WayPoint instance. + + :param time: waypoint time + :param int nodenum: node id + :param coords: waypoint coordinates + :param speed: waypoint speed + """ + self.time = time + self.nodenum = nodenum + self.coords = coords + self.speed = speed + + def __cmp__(self, other): + """ + Custom comparison method for waypoints. + + :param WayPoint other: waypoint to compare to + :return: the comparison result against the other waypoint + :rtype: int + """ + tmp = cmp(self.time, other.time) + if tmp == 0: + tmp = cmp(self.nodenum, other.nodenum) + return tmp + + +class WayPointMobility(WirelessModel): + """ + Abstract class for mobility models that set node waypoints. + """ + name = "waypoint" + config_type = RegisterTlvs.MOBILITY.value + + STATE_STOPPED = 0 + STATE_RUNNING = 1 + STATE_PAUSED = 2 + + def __init__(self, session, object_id): + """ + Create a WayPointMobility instance. + + :param core.session.Session session: CORE session instance + :param int object_id: object id + :return: + """ + super(WayPointMobility, self).__init__(session=session, object_id=object_id) + + self.state = self.STATE_STOPPED + self.queue = [] + self.queue_copy = [] + self.points = {} + self.initial = {} + self.lasttime = None + self.endtime = None + self.wlan = session.get_object(object_id) + # these are really set in child class via confmatrix + self.loop = False + self.refresh_ms = 50 + # flag whether to stop scheduling when queue is empty + # (ns-3 sets this to False as new waypoints may be added from trace) + self.empty_queue_stop = True + + def runround(self): + """ + Advance script time and move nodes. + + :return: nothing + """ + if self.state != self.STATE_RUNNING: + return + t = self.lasttime + self.lasttime = time.time() + now = self.lasttime - self.timezero + dt = self.lasttime - t + + # keep current waypoints up-to-date + self.updatepoints(now) + + if not len(self.points): + if len(self.queue): + # more future waypoints, allow time for self.lasttime update + nexttime = self.queue[0].time - now + if nexttime > (0.001 * self.refresh_ms): + nexttime -= 0.001 * self.refresh_ms + self.session.event_loop.add_event(nexttime, self.runround) + return + else: + # no more waypoints or queued items, loop? + if not self.empty_queue_stop: + # keep running every refresh_ms, even with empty queue + self.session.event_loop.add_event(0.001 * self.refresh_ms, self.runround) + return + if not self.loopwaypoints(): + return self.stop(move_initial=False) + if not len(self.queue): + # prevent busy loop + return + return self.run() + + # only move netifs attached to self.wlan, or all nodenum in script? + moved = [] + moved_netifs = [] + for netif in self.wlan.netifs(): + node = netif.node + if self.movenode(node, dt): + moved.append(node) + moved_netifs.append(netif) + + # calculate all ranges after moving nodes; this saves calculations + self.session.mobility.updatewlans(moved, moved_netifs) + + # TODO: check session state + self.session.event_loop.add_event(0.001 * self.refresh_ms, self.runround) + + def run(self): + """ + Run the waypoint mobility scenario. + + :return: nothing + """ + logging.info("running mobility scenario") + self.timezero = time.time() + self.lasttime = self.timezero - (0.001 * self.refresh_ms) + self.movenodesinitial() + self.runround() + self.session.mobility.sendevent(self) + + def movenode(self, node, dt): + """ + Calculate next node location and update its coordinates. + Returns True if the node's position has changed. + + :param core.netns.nodes.CoreNode node: node to move + :param dt: move factor + :return: True if node was moved, False otherwise + :rtype: bool + """ + if node.objid not in self.points: + return False + x1, y1, z1 = node.getposition() + x2, y2, z2 = self.points[node.objid].coords + speed = self.points[node.objid].speed + # instantaneous move (prevents dx/dy == 0.0 below) + if speed == 0: + self.setnodeposition(node, x2, y2, z2) + del self.points[node.objid] + return True + # speed can be a velocity vector (ns3 mobility) or speed value + if isinstance(speed, (float, int)): + # linear speed value + alpha = math.atan2(y2 - y1, x2 - x1) + sx = speed * math.cos(alpha) + sy = speed * math.sin(alpha) + else: + # velocity vector + sx = speed[0] + sy = speed[1] + + # calculate dt * speed = distance moved + dx = sx * dt + dy = sy * dt + # prevent overshoot + if abs(dx) > abs(x2 - x1): + dx = x2 - x1 + if abs(dy) > abs(y2 - y1): + dy = y2 - y1 + if dx == 0.0 and dy == 0.0: + if self.endtime < (self.lasttime - self.timezero): + # the last node to reach the last waypoint determines this + # script's endtime + self.endtime = self.lasttime - self.timezero + del self.points[node.objid] + return False + if (x1 + dx) < 0.0: + dx = 0.0 - x1 + if (y1 + dy) < 0.0: + dy = 0.0 - y1 + self.setnodeposition(node, x1 + dx, y1 + dy, z1) + return True + + def movenodesinitial(self): + """ + Move nodes to their initial positions. Then calculate the ranges. + + :return: nothing + """ + moved = [] + moved_netifs = [] + for netif in self.wlan.netifs(): + node = netif.node + if node.objid not in self.initial: + continue + x, y, z = self.initial[node.objid].coords + self.setnodeposition(node, x, y, z) + moved.append(node) + moved_netifs.append(netif) + self.session.mobility.updatewlans(moved, moved_netifs) + + def addwaypoint(self, time, nodenum, x, y, z, speed): + """ + Waypoints are pushed to a heapq, sorted by time. + + :param time: waypoint time + :param int nodenum: node id + :param x: x position + :param y: y position + :param z: z position + :param speed: speed + :return: nothing + """ + wp = WayPoint(time, nodenum, coords=(x, y, z), speed=speed) + heapq.heappush(self.queue, wp) + + def addinitial(self, nodenum, x, y, z): + """ + Record initial position in a dict. + + :param int nodenum: node id + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + wp = WayPoint(0, nodenum, coords=(x, y, z), speed=0) + self.initial[nodenum] = wp + + def updatepoints(self, now): + """ + Move items from self.queue to self.points when their time has come. + + :param int now: current timestamp + :return: nothing + """ + while len(self.queue): + if self.queue[0].time > now: + break + wp = heapq.heappop(self.queue) + self.points[wp.nodenum] = wp + + def copywaypoints(self): + """ + Store backup copy of waypoints for looping and stopping. + + :return: nothing + """ + self.queue_copy = list(self.queue) + + def loopwaypoints(self): + """ + Restore backup copy of waypoints when looping. + + :return: nothing + """ + self.queue = list(self.queue_copy) + return self.loop + + def setnodeposition(self, node, x, y, z): + """ + Helper to move a node, notify any GUI (connected session handlers), + without invoking the interface poshook callback that may perform + range calculation. + + :param core.netns.nodes.CoreNode node: node to set position for + :param x: x position + :param y: y position + :param z: z position + :return: nothing + """ + # this would cause PyCoreNetIf.poshook() callback (range calculation) + node.position.set(x, y, z) + node_data = node.data(message_type=0) + self.session.broadcast_node(node_data) + + def setendtime(self): + """ + Set self.endtime to the time of the last waypoint in the queue of + waypoints. This is just an estimate. The endtime will later be + adjusted, after one round of the script has run, to be the time + that the last moving node has reached its final waypoint. + + :return: nothing + """ + try: + self.endtime = self.queue[-1].time + except IndexError: + self.endtime = 0 + + def start(self): + """ + Run the script from the beginning or unpause from where it + was before. + + :return: nothing + """ + laststate = self.state + self.state = self.STATE_RUNNING + if laststate == self.STATE_STOPPED or laststate == self.STATE_RUNNING: + self.loopwaypoints() + self.timezero = 0 + self.lasttime = 0 + self.run() + elif laststate == self.STATE_PAUSED: + now = time.time() + self.timezero += now - self.lasttime + self.lasttime = now - (0.001 * self.refresh_ms) + self.runround() + + def stop(self, move_initial=True): + """ + Stop the script and move nodes to initial positions. + + :param bool move_initial: flag to check if we should move nodes to initial position + :return: nothing + """ + self.state = self.STATE_STOPPED + self.loopwaypoints() + self.timezero = 0 + self.lasttime = 0 + if move_initial: + self.movenodesinitial() + self.session.mobility.sendevent(self) + + def pause(self): + """ + Pause the script; pause time is stored to self.lasttime. + + :return: nothing + """ + self.state = self.STATE_PAUSED + self.lasttime = time.time() + + +class Ns2ScriptedMobility(WayPointMobility): + """ + Handles the ns-2 script format, generated by scengen/setdest or + BonnMotion. + """ + name = "ns2script" + options = [ + Configuration(_id="file", _type=ConfigDataTypes.STRING, label="mobility script file"), + Configuration(_id="refresh_ms", _type=ConfigDataTypes.UINT32, default="50", label="refresh time (ms)"), + Configuration(_id="loop", _type=ConfigDataTypes.BOOL, default="1", options=["On", "Off"], label="loop"), + Configuration(_id="autostart", _type=ConfigDataTypes.STRING, label="auto-start seconds (0.0 for runtime)"), + Configuration(_id="map", _type=ConfigDataTypes.STRING, label="node mapping (optional, e.g. 0:1,1:2,2:3)"), + Configuration(_id="script_start", _type=ConfigDataTypes.STRING, label="script file to run upon start"), + Configuration(_id="script_pause", _type=ConfigDataTypes.STRING, label="script file to run upon pause"), + Configuration(_id="script_stop", _type=ConfigDataTypes.STRING, label="script file to run upon stop") + ] + + @classmethod + def config_groups(cls): + return [ + ConfigGroup("ns-2 Mobility Script Parameters", 1, len(cls.configurations())) + ] + + def __init__(self, session, object_id): + """ + Creates a Ns2ScriptedMobility instance. + + :param core.session.Session session: CORE session instance + :param int object_id: object id + :param config: values + """ + super(Ns2ScriptedMobility, self).__init__(session=session, object_id=object_id) + self._netifs = {} + self._netifslock = threading.Lock() + + self.file = None + self.refresh_ms = None + self.loop = None + self.autostart = None + self.nodemap = {} + self.script_start = None + self.script_pause = None + self.script_stop = None + + def update_config(self, config): + self.file = config["file"] + logging.info("ns-2 scripted mobility configured for WLAN %d using file: %s", self.object_id, self.file) + self.refresh_ms = int(config["refresh_ms"]) + self.loop = config["loop"].lower() == "on" + self.autostart = config["autostart"] + self.parsemap(config["map"]) + self.script_start = config["script_start"] + self.script_pause = config["script_pause"] + self.script_stop = config["script_stop"] + self.readscriptfile() + self.copywaypoints() + self.setendtime() + + def readscriptfile(self): + """ + Read in mobility script from a file. This adds waypoints to a + priority queue, sorted by waypoint time. Initial waypoints are + stored in a separate dict. + + :return: nothing + """ + filename = self.findfile(self.file) + try: + f = open(filename, "r") + except IOError: + logging.exception("ns-2 scripted mobility failed to load file: %s", self.file) + return + logging.info("reading ns-2 script file: %s" % filename) + ln = 0 + ix = iy = iz = None + inodenum = None + for line in f: + ln += 1 + if line[:2] != '$n': + continue + try: + if line[:8] == "$ns_ at ": + if ix is not None and iy is not None: + self.addinitial(self.map(inodenum), ix, iy, iz) + ix = iy = iz = None + # waypoints: + # $ns_ at 1.00 "$node_(6) setdest 500.0 178.0 25.0" + parts = line.split() + time = float(parts[2]) + nodenum = parts[3][1 + parts[3].index('('):parts[3].index(')')] + x = float(parts[5]) + y = float(parts[6]) + z = None + speed = float(parts[7].strip('"')) + self.addwaypoint(time, self.map(nodenum), x, y, z, speed) + elif line[:7] == "$node_(": + # initial position (time=0, speed=0): + # $node_(6) set X_ 780.0 + parts = line.split() + time = 0.0 + nodenum = parts[0][1 + parts[0].index('('):parts[0].index(')')] + if parts[2] == 'X_': + if ix is not None and iy is not None: + self.addinitial(self.map(inodenum), ix, iy, iz) + ix = iy = iz = None + ix = float(parts[3]) + elif parts[2] == 'Y_': + iy = float(parts[3]) + elif parts[2] == 'Z_': + iz = float(parts[3]) + self.addinitial(self.map(nodenum), ix, iy, iz) + ix = iy = iz = None + inodenum = nodenum + else: + raise ValueError + except ValueError: + logging.exception("skipping line %d of file %s '%s'", ln, self.file, line) + continue + if ix is not None and iy is not None: + self.addinitial(self.map(inodenum), ix, iy, iz) + + def findfile(self, file_name): + """ + Locate a script file. If the specified file doesn't exist, look in the + same directory as the scenario file, or in the default + configs directory (~/.core/configs). This allows for sample files without + absolute path names. + + :param str file_name: file name to find + :return: absolute path to the file + :rtype: str + """ + if os.path.exists(file_name): + return file_name + + if self.session.file_name is not None: + d = os.path.dirname(self.session.file_name) + sessfn = os.path.join(d, file_name) + if os.path.exists(sessfn): + return sessfn + + if self.session.user is not None: + userfn = os.path.join('/home', self.session.user, '.core', 'configs', file_name) + if os.path.exists(userfn): + return userfn + + return file_name + + def parsemap(self, mapstr): + """ + Parse a node mapping string, given as a configuration parameter. + + :param str mapstr: mapping string to parse + :return: nothing + """ + self.nodemap = {} + if mapstr.strip() == "": + return + + for pair in mapstr.split(","): + parts = pair.split(":") + try: + if len(parts) != 2: + raise ValueError + self.nodemap[int(parts[0])] = int(parts[1]) + except ValueError: + logging.exception("ns-2 mobility node map error") + + def map(self, nodenum): + """ + Map one node number (from a script file) to another. + + :param str nodenum: node id to map + :return: mapped value or the node id itself + :rtype: int + """ + nodenum = int(nodenum) + return self.nodemap.get(nodenum, nodenum) + + def startup(self): + """ + Start running the script if autostart is enabled. + Move node to initial positions when any autostart time is specified. + Ignore the script if autostart is an empty string (can still be + started via GUI controls). + + :return: nothing + """ + if self.autostart == '': + logging.info("not auto-starting ns-2 script for %s" % self.wlan.name) + return + try: + t = float(self.autostart) + except ValueError: + logging.exception("Invalid auto-start seconds specified '%s' for %s", self.autostart, self.wlan.name) + return + self.movenodesinitial() + logging.info("scheduling ns-2 script for %s autostart at %s" % (self.wlan.name, t)) + self.state = self.STATE_RUNNING + self.session.event_loop.add_event(t, self.run) + + def start(self): + """ + Handle the case when un-paused. + + :return: nothing + """ + logging.info("starting script") + laststate = self.state + super(Ns2ScriptedMobility, self).start() + if laststate == self.STATE_PAUSED: + self.statescript("unpause") + + def run(self): + """ + Start is pressed or autostart is triggered. + + :return: nothing + """ + super(Ns2ScriptedMobility, self).run() + self.statescript("run") + + def pause(self): + """ + Pause the mobility script. + + :return: nothing + """ + super(Ns2ScriptedMobility, self).pause() + self.statescript("pause") + + def stop(self, move_initial=True): + """ + Stop the mobility script. + + :param bool move_initial: flag to check if we should move node to initial position + :return: nothing + """ + super(Ns2ScriptedMobility, self).stop(move_initial=move_initial) + self.statescript("stop") + + def statescript(self, typestr): + """ + State of the mobility script. + + :param str typestr: state type string + :return: nothing + """ + filename = None + if typestr == "run" or typestr == "unpause": + filename = self.script_start + elif typestr == "pause": + filename = self.script_pause + elif typestr == "stop": + filename = self.script_stop + if filename is None or filename == '': + return + filename = self.findfile(filename) + args = ["/bin/sh", filename, typestr] + utils.check_cmd(args, cwd=self.session.session_dir, env=self.session.get_environment()) diff --git a/daemon/core/configservices/__init__.py b/daemon/core/netns/__init__.py similarity index 100% rename from daemon/core/configservices/__init__.py rename to daemon/core/netns/__init__.py diff --git a/daemon/core/netns/nodes.py b/daemon/core/netns/nodes.py new file mode 100644 index 00000000..4f9b4a51 --- /dev/null +++ b/daemon/core/netns/nodes.py @@ -0,0 +1,739 @@ +""" +Definition of LxcNode, CoreNode, and other node classes that inherit from the CoreNode, +implementing specific node types. +""" + +import logging +import socket +import threading +from socket import AF_INET +from socket import AF_INET6 + +from core import CoreCommandError +from core import constants +from core.coreobj import PyCoreNetIf +from core.coreobj import PyCoreNode +from core.coreobj import PyCoreObj +from core.data import LinkData +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import ipaddress +from core.misc import utils +from core.netns.vnet import GreTapBridge +from core.netns.vnet import LxBrNet +from core.netns.vnode import LxcNode + + +class CtrlNet(LxBrNet): + """ + Control network functionality. + """ + policy = "ACCEPT" + # base control interface index + CTRLIF_IDX_BASE = 99 + DEFAULT_PREFIX_LIST = [ + "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", + "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", + "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", + "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24" + ] + + def __init__(self, session, objid="ctrlnet", name=None, prefix=None, + hostid=None, start=True, assign_address=True, + updown_script=None, serverintf=None): + """ + Creates a CtrlNet instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node namee + :param prefix: control network ipv4 prefix + :param hostid: host id + :param bool start: start flag + :param str assign_address: assigned address + :param str updown_script: updown script + :param serverintf: server interface + :return: + """ + self.prefix = ipaddress.Ipv4Prefix(prefix) + self.hostid = hostid + self.assign_address = assign_address + self.updown_script = updown_script + self.serverintf = serverintf + LxBrNet.__init__(self, session, objid=objid, name=name, start=start) + + def startup(self): + """ + Startup functionality for the control network. + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + if self.detectoldbridge(): + return + + LxBrNet.startup(self) + + if self.hostid: + addr = self.prefix.addr(self.hostid) + else: + addr = self.prefix.max_addr() + + logging.info("added control network bridge: %s %s", self.brname, self.prefix) + + if self.assign_address: + addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)] + self.addrconfig(addrlist=addrlist) + logging.info("address %s", addr) + + if self.updown_script: + logging.info("interface %s updown script (%s startup) called", self.brname, self.updown_script) + utils.check_cmd([self.updown_script, self.brname, "startup"]) + + if self.serverintf: + # sets the interface as a port of the bridge + utils.check_cmd([constants.BRCTL_BIN, "addif", self.brname, self.serverintf]) + + # bring interface up + utils.check_cmd([constants.IP_BIN, "link", "set", self.serverintf, "up"]) + + def detectoldbridge(self): + """ + Occassionally, control net bridges from previously closed sessions are not cleaned up. + Check if there are old control net bridges and delete them + + :return: True if an old bridge was detected, False otherwise + :rtype: bool + """ + status, output = utils.cmd_output([constants.BRCTL_BIN, "show"]) + if status != 0: + logging.error("Unable to retrieve list of installed bridges") + else: + lines = output.split("\n") + for line in lines[1:]: + cols = line.split("\t") + oldbr = cols[0] + flds = cols[0].split(".") + if len(flds) == 3: + if flds[0] == "b" and flds[1] == self.objid: + logging.error( + "error: An active control net bridge (%s) found. " + "An older session might still be running. " + "Stop all sessions and, if needed, delete %s to continue.", oldbr, oldbr + ) + return True + return False + + def shutdown(self): + """ + Control network shutdown. + + :return: nothing + """ + if self.serverintf is not None: + try: + utils.check_cmd([constants.BRCTL_BIN, "delif", self.brname, self.serverintf]) + except CoreCommandError: + logging.exception("error deleting server interface %s from bridge %s", self.serverintf, self.brname) + + if self.updown_script is not None: + try: + logging.info("interface %s updown script (%s shutdown) called", self.brname, self.updown_script) + utils.check_cmd([self.updown_script, self.brname, "shutdown"]) + except CoreCommandError: + logging.exception("error issuing shutdown script shutdown") + + LxBrNet.shutdown(self) + + def all_link_data(self, flags): + """ + Do not include CtrlNet in link messages describing this session. + + :param flags: message flags + :return: list of link data + :rtype: list[core.data.LinkData] + """ + return [] + + +class CoreNode(LxcNode): + """ + Basic core node class for nodes to extend. + """ + apitype = NodeTypes.DEFAULT.value + + +class PtpNet(LxBrNet): + """ + Peer to peer network node. + """ + policy = "ACCEPT" + + def attach(self, netif): + """ + Attach a network interface, but limit attachment to two interfaces. + + :param core.netns.vif.VEth netif: network interface + :return: nothing + """ + if len(self._netif) >= 2: + raise ValueError("Point-to-point links support at most 2 network interfaces") + + LxBrNet.attach(self, netif) + + def data(self, message_type, lat=None, lon=None, alt=None): + """ + Do not generate a Node Message for point-to-point links. They are + built using a link message instead. + + :param message_type: purpose for the data object we are creating + :param float lat: latitude + :param float lon: longitude + :param float alt: altitude + :return: node data object + :rtype: core.data.NodeData + """ + return None + + def all_link_data(self, flags): + """ + Build CORE API TLVs for a point-to-point link. One Link message + describes this network. + + :param flags: message flags + :return: list of link data + :rtype: list[core.data.LinkData] + """ + + all_links = [] + + if len(self._netif) != 2: + return all_links + + if1, if2 = self._netif.values() + + unidirectional = 0 + if if1.getparams() != if2.getparams(): + unidirectional = 1 + + interface1_ip4 = None + interface1_ip4_mask = None + interface1_ip6 = None + interface1_ip6_mask = None + for address in if1.addrlist: + ip, _sep, mask = address.partition("/") + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip6_mask = mask + + interface2_ip4 = None + interface2_ip4_mask = None + interface2_ip6 = None + interface2_ip6_mask = None + for address in if2.addrlist: + ip, _sep, mask = address.partition("/") + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip6_mask = mask + + link_data = LinkData( + message_type=flags, + node1_id=if1.node.objid, + node2_id=if2.node.objid, + link_type=self.linktype, + unidirectional=unidirectional, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + interface1_id=if1.node.getifindex(if1), + interface1_mac=if1.hwaddr, + interface1_ip4=interface1_ip4, + interface1_ip4_mask=interface1_ip4_mask, + interface1_ip6=interface1_ip6, + interface1_ip6_mask=interface1_ip6_mask, + interface2_id=if2.node.getifindex(if2), + interface2_mac=if2.hwaddr, + interface2_ip4=interface2_ip4, + interface2_ip4_mask=interface2_ip4_mask, + interface2_ip6=interface2_ip6, + interface2_ip6_mask=interface2_ip6_mask, + ) + + all_links.append(link_data) + + # build a 2nd link message for the upstream link parameters + # (swap if1 and if2) + if unidirectional: + link_data = LinkData( + message_type=0, + node1_id=if2.node.objid, + node2_id=if1.node.objid, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + unidirectional=1, + interface1_id=if2.node.getifindex(if2), + interface2_id=if1.node.getifindex(if1) + ) + all_links.append(link_data) + + return all_links + + +class SwitchNode(LxBrNet): + """ + Provides switch functionality within a core node. + """ + apitype = NodeTypes.SWITCH.value + policy = "ACCEPT" + type = "lanswitch" + + +class HubNode(LxBrNet): + """ + Provides hub functionality within a core node, forwards packets to all bridge + ports by turning off MAC address learning. + """ + apitype = NodeTypes.HUB.value + policy = "ACCEPT" + type = "hub" + + def __init__(self, session, objid=None, name=None, start=True): + """ + Creates a HubNode instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node namee + :param bool start: start flag + :raises CoreCommandError: when there is a command exception + """ + LxBrNet.__init__(self, session, objid, name, start) + + # TODO: move to startup method + if start: + utils.check_cmd([constants.BRCTL_BIN, "setageing", self.brname, "0"]) + + +class WlanNode(LxBrNet): + """ + Provides wireless lan functionality within a core node. + """ + apitype = NodeTypes.WIRELESS_LAN.value + linktype = LinkTypes.WIRELESS.value + policy = "DROP" + type = "wlan" + + def __init__(self, session, objid=None, name=None, start=True, policy=None): + """ + Create a WlanNode instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node name + :param bool start: start flag + :param policy: wlan policy + """ + LxBrNet.__init__(self, session, objid, name, start, policy) + # wireless model such as basic range + self.model = None + # mobility model such as scripted + self.mobility = None + + def attach(self, netif): + """ + Attach a network interface. + + :param core.netns.vif.VEth netif: network interface + :return: nothing + """ + LxBrNet.attach(self, netif) + if self.model: + netif.poshook = self.model.position_callback + if netif.node is None: + return + x, y, z = netif.node.position.get() + # invokes any netif.poshook + netif.setposition(x, y, z) + + def setmodel(self, model, config): + """ + Sets the mobility and wireless model. + + :param core.mobility.WirelessModel.cls model: wireless model to set to + :param dict config: configuration for model being set + :return: nothing + """ + logging.info("adding model: %s", model.name) + if model.config_type == RegisterTlvs.WIRELESS.value: + self.model = model(session=self.session, object_id=self.objid) + self.model.update_config(config) + if self.model.position_callback: + for netif in self.netifs(): + netif.poshook = self.model.position_callback + if netif.node is not None: + x, y, z = netif.node.position.get() + netif.poshook(netif, x, y, z) + self.model.setlinkparams() + elif model.config_type == RegisterTlvs.MOBILITY.value: + self.mobility = model(session=self.session, object_id=self.objid) + self.mobility.update_config(config) + + def update_mobility(self, config): + if not self.mobility: + raise ValueError("no mobility set to update for node(%s)", self.objid) + self.mobility.set_configs(config, node_id=self.objid) + + def updatemodel(self, config): + if not self.model: + raise ValueError("no model set to update for node(%s)", self.objid) + logging.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config) + self.model.set_configs(config, node_id=self.objid) + if self.model.position_callback: + for netif in self.netifs(): + netif.poshook = self.model.position_callback + if netif.node is not None: + x, y, z = netif.node.position.get() + netif.poshook(netif, x, y, z) + self.model.updateconfig() + + def all_link_data(self, flags): + """ + Retrieve all link data. + + :param flags: message flags + :return: list of link data + :rtype: list[core.data.LinkData] + """ + all_links = LxBrNet.all_link_data(self, flags) + + if self.model: + all_links.extend(self.model.all_link_data(flags)) + + return all_links + + +class RJ45Node(PyCoreNode, PyCoreNetIf): + """ + RJ45Node is a physical interface on the host linked to the emulated + network. + """ + apitype = NodeTypes.RJ45.value + type = "rj45" + + def __init__(self, session, objid=None, name=None, mtu=1500, start=True): + """ + Create an RJ45Node instance. + + :param core.session.Session session: core session instance + :param int objid: node id + :param str name: node name + :param mtu: rj45 mtu + :param bool start: start flag + :return: + """ + PyCoreNode.__init__(self, session, objid, name, start=start) + PyCoreNetIf.__init__(self, node=self, name=name, mtu=mtu) + self.up = False + self.lock = threading.RLock() + self.ifindex = None + # the following are PyCoreNetIf attributes + self.transport_type = "raw" + self.localname = name + self.old_up = False + self.old_addrs = [] + + if start: + self.startup() + + def startup(self): + """ + Set the interface in the up state. + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + # interface will also be marked up during net.attach() + self.savestate() + utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"]) + self.up = True + + def shutdown(self): + """ + Bring the interface down. Remove any addresses and queuing + disciplines. + + :return: nothing + """ + if not self.up: + return + + try: + utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "down"]) + utils.check_cmd([constants.IP_BIN, "addr", "flush", "dev", self.localname]) + utils.check_cmd([constants.TC_BIN, "qdisc", "del", "dev", self.localname, "root"]) + except CoreCommandError: + logging.exception("error shutting down") + + self.up = False + self.restorestate() + + # TODO: issue in that both classes inherited from provide the same method with different signatures + def attachnet(self, net): + """ + Attach a network. + + :param core.coreobj.PyCoreNet net: network to attach + :return: nothing + """ + PyCoreNetIf.attachnet(self, net) + + # TODO: issue in that both classes inherited from provide the same method with different signatures + def detachnet(self): + """ + Detach a network. + + :return: nothing + """ + PyCoreNetIf.detachnet(self) + + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + """ + This is called when linking with another node. Since this node + represents an interface, we do not create another object here, + but attach ourselves to the given network. + + :param core.coreobj.PyCoreNet net: new network instance + :param list[str] addrlist: address list + :param str hwaddr: hardware address + :param int ifindex: interface index + :param str ifname: interface name + :return: interface index + :rtype: int + :raises ValueError: when an interface has already been created, one max + """ + with self.lock: + if ifindex is None: + ifindex = 0 + + if self.net is not None: + raise ValueError("RJ45 nodes support at most 1 network interface") + + self._netif[ifindex] = self + # PyCoreNetIf.node is self + self.node = self + self.ifindex = ifindex + + if net is not None: + self.attachnet(net) + + if addrlist: + for addr in utils.make_tuple(addrlist): + self.addaddr(addr) + + return ifindex + + def delnetif(self, ifindex): + """ + Delete a network interface. + + :param int ifindex: interface index to delete + :return: nothing + """ + if ifindex is None: + ifindex = 0 + + self._netif.pop(ifindex) + + if ifindex == self.ifindex: + self.shutdown() + else: + raise ValueError("ifindex %s does not exist" % ifindex) + + def netif(self, ifindex, net=None): + """ + This object is considered the network interface, so we only + return self here. This keeps the RJ45Node compatible with + real nodes. + + :param int ifindex: interface index to retrieve + :param net: network to retrieve + :return: a network interface + :rtype: core.coreobj.PyCoreNetIf + """ + if net is not None and net == self.net: + return self + + if ifindex is None: + ifindex = 0 + + if ifindex == self.ifindex: + return self + + return None + + def getifindex(self, netif): + """ + Retrieve network interface index. + + :param core.coreobj.PyCoreNetIf netif: network interface to retrieve index for + :return: interface index, None otherwise + :rtype: int + """ + if netif != self: + return None + + return self.ifindex + + def addaddr(self, addr): + """ + Add address to to network interface. + + :param str addr: address to add + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + if self.up: + utils.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]) + + PyCoreNetIf.addaddr(self, addr) + + def deladdr(self, addr): + """ + Delete address from network interface. + + :param str addr: address to delete + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + if self.up: + utils.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.name]) + + PyCoreNetIf.deladdr(self, addr) + + def savestate(self): + """ + Save the addresses and other interface state before using the + interface for emulation purposes. TODO: save/restore the PROMISC flag + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + self.old_up = False + self.old_addrs = [] + args = [constants.IP_BIN, "addr", "show", "dev", self.localname] + output = utils.check_cmd(args) + for line in output.split("\n"): + items = line.split() + if len(items) < 2: + continue + + if items[1] == "%s:" % self.localname: + flags = items[2][1:-1].split(",") + if "UP" in flags: + self.old_up = True + elif items[0] == "inet": + self.old_addrs.append((items[1], items[3])) + elif items[0] == "inet6": + if items[1][:4] == "fe80": + continue + self.old_addrs.append((items[1], None)) + + def restorestate(self): + """ + Restore the addresses and other interface state after using it. + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + for addr in self.old_addrs: + if addr[1] is None: + utils.check_cmd([constants.IP_BIN, "addr", "add", addr[0], "dev", self.localname]) + else: + utils.check_cmd([constants.IP_BIN, "addr", "add", addr[0], "brd", addr[1], "dev", self.localname]) + + if self.old_up: + utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"]) + + def setposition(self, x=None, y=None, z=None): + """ + Uses setposition from both parent classes. + + :param float x: x position + :param float y: y position + :param float z: z position + :return: True if position changed, False otherwise + :rtype: bool + """ + result = PyCoreObj.setposition(self, x, y, z) + PyCoreNetIf.setposition(self, x, y, z) + return result + + def check_cmd(self, args): + """ + Runs shell command on node. + + :param list[str]|str args: command to run + :return: exist status and combined stdout and stderr + :rtype: tuple[int, str] + :raises CoreCommandError: when a non-zero exit status occurs + """ + raise NotImplementedError + + def cmd(self, args, wait=True): + """ + Runs shell command on node, with option to not wait for a result. + + :param list[str]|str args: command to run + :param bool wait: wait for command to exit, defaults to True + :return: exit status for command + :rtype: int + """ + raise NotImplementedError + + def cmd_output(self, args): + """ + Runs shell command on node and get exit status and output. + + :param list[str]|str args: command to run + :return: exit status and combined stdout and stderr + :rtype: tuple[int, str] + """ + raise NotImplementedError + + def termcmdstring(self, sh): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + raise NotImplementedError + + +class TunnelNode(GreTapBridge): + """ + Provides tunnel functionality in a core node. + """ + apitype = NodeTypes.TUNNEL.value + policy = "ACCEPT" + type = "tunnel" diff --git a/daemon/core/netns/openvswitch.py b/daemon/core/netns/openvswitch.py new file mode 100644 index 00000000..d18471ea --- /dev/null +++ b/daemon/core/netns/openvswitch.py @@ -0,0 +1,723 @@ +""" +TODO: probably goes away, or implement the usage of "unshare", or docker formal. +""" + +import logging +import socket +import threading +from socket import AF_INET +from socket import AF_INET6 + +from core import CoreCommandError +from core import constants +from core.coreobj import PyCoreNet +from core.data import LinkData +from core.enumerations import LinkTypes +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.misc import ipaddress +from core.misc import utils +from core.netns.vif import GreTap +from core.netns.vif import VEth +from core.netns.vnet import EbtablesQueue +from core.netns.vnet import GreTapBridge + +# a global object because all WLANs share the same queue +# cannot have multiple threads invoking the ebtables commnd +ebtables_queue = EbtablesQueue() + +ebtables_lock = threading.Lock() + +utils.check_executables([ + constants.IP_BIN, + constants.EBTABLES_BIN, + constants.TC_BIN +]) + + +def ebtables_commands(call, commands): + with ebtables_lock: + for command in commands: + call(command) + + +class OvsNet(PyCoreNet): + """ + Used to be LxBrNet. + + Base class for providing Openvswitch functionality to objects that create bridges. + """ + + policy = "DROP" + + def __init__(self, session, objid=None, name=None, start=True, policy=None): + """ + Creates an OvsNet instance. + + :param core.session.Session session: session this object is a part of + :param objid: + :param name: + :param start: + :param policy: + :return: + """ + + PyCoreNet.__init__(self, session, objid, name, start) + + if policy: + self.policy = policy + else: + self.policy = self.__class__.policy + + session_id = self.session.short_session_id() + self.bridge_name = "b.%s.%s" % (str(self.objid), session_id) + self.up = False + + if start: + self.startup() + ebtables_queue.startupdateloop(self) + + def startup(self): + """ + + :return: + :raises CoreCommandError: when there is a command exception + """ + utils.check_cmd([constants.OVS_BIN, "add-br", self.bridge_name]) + + # turn off spanning tree protocol and forwarding delay + # TODO: appears stp and rstp are off by default, make sure this always holds true + # TODO: apears ovs only supports rstp forward delay and again it's off by default + utils.check_cmd([constants.IP_BIN, "link", "set", self.bridge_name, "up"]) + + # create a new ebtables chain for this bridge + ebtables_commands(utils.check_cmd, [ + [constants.EBTABLES_BIN, "-N", self.bridge_name, "-P", self.policy], + [constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name] + ]) + + self.up = True + + def shutdown(self): + if not self.up: + logging.info("exiting shutdown, object is not up") + return + + ebtables_queue.stopupdateloop(self) + + try: + utils.check_cmd([constants.IP_BIN, "link", "set", self.bridge_name, "down"]) + utils.check_cmd([constants.OVS_BIN, "del-br", self.bridge_name]) + ebtables_commands(utils.check_cmd, [ + [constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.bridge_name, "-j", self.bridge_name], + [constants.EBTABLES_BIN, "-X", self.bridge_name] + ]) + except CoreCommandError: + logging.exception("error bringing bridge down and removing it") + + # removes veth pairs used for bridge-to-bridge connections + for interface in self.netifs(): + interface.shutdown() + + self._netif.clear() + self._linked.clear() + del self.session + self.up = False + + def attach(self, interface): + if self.up: + utils.check_cmd([constants.OVS_BIN, "add-port", self.bridge_name, interface.localname]) + utils.check_cmd([constants.IP_BIN, "link", "set", interface.localname, "up"]) + + PyCoreNet.attach(self, interface) + + def detach(self, interface): + if self.up: + utils.check_cmd([constants.OVS_BIN, "del-port", self.bridge_name, interface.localname]) + + PyCoreNet.detach(self, interface) + + def linked(self, interface_one, interface_two): + # check if the network interfaces are attached to this network + if self._netif[interface_one.netifi] != interface_one: + raise ValueError("inconsistency for interface %s" % interface_one.name) + + if self._netif[interface_two.netifi] != interface_two: + raise ValueError("inconsistency for interface %s" % interface_two.name) + + try: + linked = self._linked[interface_one][interface_two] + except KeyError: + if self.policy == "ACCEPT": + linked = True + elif self.policy == "DROP": + linked = False + else: + raise ValueError("unknown policy: %s" % self.policy) + + self._linked[interface_one][interface_two] = linked + + return linked + + def unlink(self, interface_one, interface_two): + """ + Unlink two PyCoreNetIfs, resulting in adding or removing ebtables + filtering rules. + """ + with self._linked_lock: + if not self.linked(interface_one, interface_two): + return + + self._linked[interface_one][interface_two] = False + + ebtables_queue.ebchange(self) + + def link(self, interface_one, interface_two): + """ + Link two PyCoreNetIfs together, resulting in adding or removing + ebtables filtering rules. + """ + with self._linked_lock: + if self.linked(interface_one, interface_two): + return + + self._linked[interface_one][interface_two] = True + + ebtables_queue.ebchange(self) + + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, + jitter=None, netif2=None, devname=None): + """ + Configure link parameters by applying tc queuing disciplines on the + interface. + """ + if not devname: + devname = netif.localname + + tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname] + parent = ["root"] + + # attempt to set bandwidth and update as needed if value changed + bandwidth_changed = netif.setparam("bw", bw) + if bandwidth_changed: + # from tc-tbf(8): minimum value for burst is rate / kernel_hz + if bw > 0: + if self.up: + burst = max(2 * netif.mtu, bw / 1000) + limit = 0xffff # max IP payload + tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)] + logging.info("linkconfig: %s" % [tc + parent + ["handle", "1:"] + tbf]) + utils.check_cmd(tc + parent + ["handle", "1:"] + tbf) + netif.setparam("has_tbf", True) + elif netif.getparam("has_tbf") and bw <= 0: + tcd = [] + tc + tcd[2] = "delete" + + if self.up: + utils.check_cmd(tcd + parent) + + netif.setparam("has_tbf", False) + # removing the parent removes the child + netif.setparam("has_netem", False) + + if netif.getparam("has_tbf"): + parent = ["parent", "1:1"] + + netem = ["netem"] + delay_changed = netif.setparam("delay", delay) + + if loss is not None: + loss = float(loss) + loss_changed = netif.setparam("loss", loss) + + if duplicate is not None: + duplicate = float(duplicate) + duplicate_changed = netif.setparam("duplicate", duplicate) + jitter_changed = netif.setparam("jitter", jitter) + + # if nothing changed return + if not any([bandwidth_changed, delay_changed, loss_changed, duplicate_changed, jitter_changed]): + return + + # jitter and delay use the same delay statement + if delay is not None: + netem += ["delay", "%sus" % delay] + else: + netem += ["delay", "0us"] + + if jitter is not None: + netem += ["%sus" % jitter, "25%"] + + if loss is not None and loss > 0: + netem += ["loss", "%s%%" % min(loss, 100)] + + if duplicate is not None and duplicate > 0: + netem += ["duplicate", "%s%%" % min(duplicate, 100)] + + if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0: + # possibly remove netem if it exists and parent queue wasn"t removed + if not netif.getparam("has_netem"): + return + + tc[2] = "delete" + + if self.up: + logging.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],)) + utils.check_cmd(tc + parent + ["handle", "10:"]) + netif.setparam("has_netem", False) + elif len(netem) > 1: + if self.up: + logging.info("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],)) + utils.check_cmd(tc + parent + ["handle", "10:"] + netem) + netif.setparam("has_netem", True) + + def linknet(self, network): + """ + Link this bridge with another by creating a veth pair and installing + each device into each bridge. + """ + session_id = self.session.short_session_id() + + try: + self_objid = "%x" % self.objid + except TypeError: + self_objid = "%s" % self.objid + + try: + net_objid = "%x" % network.objid + except TypeError: + net_objid = "%s" % network.objid + + localname = "veth%s.%s.%s" % (self_objid, net_objid, session_id) + + if len(localname) >= 16: + raise ValueError("interface local name %s too long" % localname) + + name = "veth%s.%s.%s" % (net_objid, self_objid, session_id) + if len(name) >= 16: + raise ValueError("interface name %s too long" % name) + + interface = VEth(node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up) + self.attach(interface) + if network.up: + # this is similar to net.attach() but uses netif.name instead + # of localname + utils.check_cmd([constants.OVS_BIN, "add-port", network.bridge_name, interface.name]) + utils.check_cmd([constants.IP_BIN, "link", "set", interface.name, "up"]) + + # TODO: is there a native method for this? see if this causes issues + # i = network.newifindex() + # network._netif[i] = interface + # with network._linked_lock: + # network._linked[interface] = {} + # this method call is equal to the above, with a interface.netifi = call + network.attach(interface) + + interface.net = self + interface.othernet = network + return interface + + def getlinknetif(self, network): + """ + Return the interface of that links this net with another net + (that were linked using linknet()). + """ + for interface in self.netifs(): + if hasattr(interface, "othernet") and interface.othernet == network: + return interface + + return None + + def addrconfig(self, addresses): + """ + Set addresses on the bridge. + """ + if not self.up: + return + + for address in addresses: + utils.check_cmd([constants.IP_BIN, "addr", "add", str(address), "dev", self.bridge_name]) + + +class OvsCtrlNet(OvsNet): + policy = "ACCEPT" + CTRLIF_IDX_BASE = 99 # base control interface index + DEFAULT_PREFIX_LIST = [ + "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", + "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", + "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", + "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24" + ] + + def __init__(self, session, objid="ctrlnet", name=None, prefix=None, hostid=None, + start=True, assign_address=True, updown_script=None, serverintf=None): + self.prefix = ipaddress.Ipv4Prefix(prefix) + self.hostid = hostid + self.assign_address = assign_address + self.updown_script = updown_script + self.serverintf = serverintf + OvsNet.__init__(self, session, objid=objid, name=name, start=start) + + def startup(self): + if self.detectoldbridge(): + return + + OvsNet.startup(self) + if self.hostid: + addr = self.prefix.addr(self.hostid) + else: + addr = self.prefix.max_addr() + + message = "Added control network bridge: %s %s" % (self.bridge_name, self.prefix) + addresses = ["%s/%s" % (addr, self.prefix.prefixlen)] + if self.assign_address: + self.addrconfig(addresses=addresses) + message += " address %s" % addr + logging.info(message) + + if self.updown_script: + logging.info("interface %s updown script %s startup called" % (self.bridge_name, self.updown_script)) + utils.check_cmd([self.updown_script, self.bridge_name, "startup"]) + + if self.serverintf: + utils.check_cmd([constants.OVS_BIN, "add-port", self.bridge_name, self.serverintf]) + utils.check_cmd([constants.IP_BIN, "link", "set", self.serverintf, "up"]) + + def detectoldbridge(self): + """ + Occasionally, control net bridges from previously closed sessions are not cleaned up. + Check if there are old control net bridges and delete them + """ + + output = utils.check_cmd([constants.OVS_BIN, "list-br"]) + output = output.strip() + if output: + for line in output.split("\n"): + bride_name = line.split(".") + if bride_name[0] == "b" and bride_name[1] == self.objid: + logging.error("older session may still be running with conflicting id for bridge: %s", line) + return True + + return False + + def shutdown(self): + if self.serverintf: + try: + utils.check_cmd([constants.OVS_BIN, "del-port", self.bridge_name, self.serverintf]) + except CoreCommandError: + logging.exception("error deleting server interface %s to controlnet bridge %s", + self.serverintf, self.bridge_name) + + if self.updown_script: + try: + logging.info("interface %s updown script (%s shutdown) called", self.bridge_name, self.updown_script) + utils.check_cmd([self.updown_script, self.bridge_name, "shutdown"]) + except CoreCommandError: + logging.exception("error during updown script shutdown") + + OvsNet.shutdown(self) + + def all_link_data(self, flags): + """ + Do not include CtrlNet in link messages describing this session. + """ + return [] + + +class OvsPtpNet(OvsNet): + policy = "ACCEPT" + + def attach(self, interface): + if len(self._netif) >= 2: + raise ValueError("point-to-point links support at most 2 network interfaces") + OvsNet.attach(self, interface) + + def data(self, message_type, lat=None, lon=None, alt=None): + """ + Do not generate a Node Message for point-to-point links. They are + built using a link message instead. + """ + return None + + def all_link_data(self, flags): + """ + Build CORE API TLVs for a point-to-point link. One Link message describes this network. + """ + + all_links = [] + + if len(self._netif) != 2: + return all_links + + if1, if2 = self._netif.values() + + unidirectional = 0 + if if1.getparams() != if2.getparams(): + unidirectional = 1 + + interface1_ip4 = None + interface1_ip4_mask = None + interface1_ip6 = None + interface1_ip6_mask = None + for address in if1.addrlist: + ip, _sep, mask = address.partition("/") + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface1_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface1_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface1_ip6_mask = mask + + interface2_ip4 = None + interface2_ip4_mask = None + interface2_ip6 = None + interface2_ip6_mask = None + for address in if2.addrlist: + ip, _sep, mask = address.partition("/") + mask = int(mask) + if ipaddress.is_ipv4_address(ip): + family = AF_INET + ipl = socket.inet_pton(family, ip) + interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip4_mask = mask + else: + family = AF_INET6 + ipl = socket.inet_pton(family, ip) + interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl) + interface2_ip6_mask = mask + + # TODO: not currently used + # loss=netif.getparam("loss") + link_data = LinkData( + message_type=flags, + node1_id=if1.node.objid, + node2_id=if2.node.objid, + link_type=self.linktype, + unidirectional=unidirectional, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + interface1_id=if1.node.getifindex(if1), + interface1_mac=if1.hwaddr, + interface1_ip4=interface1_ip4, + interface1_ip4_mask=interface1_ip4_mask, + interface1_ip6=interface1_ip6, + interface1_ip6_mask=interface1_ip6_mask, + interface2_id=if2.node.getifindex(if2), + interface2_mac=if2.hwaddr, + interface2_ip4=interface2_ip4, + interface2_ip4_mask=interface2_ip4_mask, + interface2_ip6=interface2_ip6, + interface2_ip6_mask=interface2_ip6_mask, + ) + + all_links.append(link_data) + + # build a 2nd link message for the upstream link parameters + # (swap if1 and if2) + if unidirectional: + link_data = LinkData( + message_type=0, + node1_id=if2.node.objid, + node2_id=if1.node.objid, + delay=if1.getparam("delay"), + bandwidth=if1.getparam("bw"), + dup=if1.getparam("duplicate"), + jitter=if1.getparam("jitter"), + unidirectional=1, + interface1_id=if2.node.getifindex(if2), + interface2_id=if1.node.getifindex(if1) + ) + all_links.append(link_data) + + return all_links + + +class OvsSwitchNode(OvsNet): + apitype = NodeTypes.SWITCH.value + policy = "ACCEPT" + type = "lanswitch" + + +class OvsHubNode(OvsNet): + apitype = NodeTypes.HUB.value + policy = "ACCEPT" + type = "hub" + + def __init__(self, session, objid=None, name=None, start=True): + """ + the Hub node forwards packets to all bridge ports by turning off + the MAC address learning + """ + OvsNet.__init__(self, session, objid, name, start) + + if start: + # TODO: verify that the below flow accomplishes what is desired for a "HUB" + # TODO: replace "brctl setageing 0" + utils.check_cmd([constants.OVS_FLOW_BIN, "add-flow", self.bridge_name, "action=flood"]) + + +class OvsWlanNode(OvsNet): + apitype = NodeTypes.WIRELESS_LAN.value + linktype = LinkTypes.WIRELESS.value + policy = "DROP" + type = "wlan" + + def __init__(self, session, objid=None, name=None, start=True, policy=None): + OvsNet.__init__(self, session, objid, name, start, policy) + + # wireless model such as basic range + self.model = None + # mobility model such as scripted + self.mobility = None + + def attach(self, interface): + OvsNet.attach(self, interface) + + if self.model: + interface.poshook = self.model.position_callback + + if interface.node is None: + return + + x, y, z = interface.node.position.get() + # invokes any netif.poshook + interface.setposition(x, y, z) + # self.model.setlinkparams() + + def setmodel(self, model, config=None): + """ + Mobility and wireless model. + """ + logging.info("adding model %s", model.name) + + if model.type == RegisterTlvs.WIRELESS.value: + self.model = model(session=self.session, object_id=self.objid, config=config) + if self.model.position_callback: + for interface in self.netifs(): + interface.poshook = self.model.position_callback + if interface.node is not None: + x, y, z = interface.node.position.get() + interface.poshook(interface, x, y, z) + self.model.setlinkparams() + elif model.type == RegisterTlvs.MOBILITY.value: + self.mobility = model(session=self.session, object_id=self.objid, config=config) + + def updatemodel(self, config): + if not self.model: + raise ValueError("no model set to update for node(%s)", self.objid) + logging.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config) + self.model.set_configs(config, node_id=self.objid) + if self.model.position_callback: + for netif in self.netifs(): + netif.poshook = self.model.position_callback + if netif.node is not None: + x, y, z = netif.node.position.get() + netif.poshook(netif, x, y, z) + self.model.updateconfig() + + def all_link_data(self, flags): + all_links = OvsNet.all_link_data(self, flags) + + if self.model: + all_links.extend(self.model.all_link_data(flags)) + + return all_links + + +class OvsTunnelNode(GreTapBridge): + apitype = NodeTypes.TUNNEL.value + policy = "ACCEPT" + type = "tunnel" + + +class OvsGreTapBridge(OvsNet): + """ + A network consisting of a bridge with a gretap device for tunneling to + another system. + """ + + def __init__(self, session, remoteip=None, objid=None, name=None, policy="ACCEPT", + localip=None, ttl=255, key=None, start=True): + OvsNet.__init__(self, session=session, objid=objid, name=name, policy=policy, start=False) + self.grekey = key + if self.grekey is None: + self.grekey = self.session.id ^ self.objid + + self.localnum = None + self.remotenum = None + self.remoteip = remoteip + self.localip = localip + self.ttl = ttl + + if remoteip is None: + self.gretap = None + else: + self.gretap = GreTap(node=self, session=session, remoteip=remoteip, + localip=localip, ttl=ttl, key=self.grekey) + if start: + self.startup() + + def startup(self): + """ + Creates a bridge and adds the gretap device to it. + """ + OvsNet.startup(self) + + if self.gretap: + self.attach(self.gretap) + + def shutdown(self): + """ + Detach the gretap device and remove the bridge. + """ + if self.gretap: + self.detach(self.gretap) + self.gretap.shutdown() + self.gretap = None + + OvsNet.shutdown(self) + + def addrconfig(self, addresses): + """ + Set the remote tunnel endpoint. This is a one-time method for + creating the GreTap device, which requires the remoteip at startup. + The 1st address in the provided list is remoteip, 2nd optionally + specifies localip. + """ + if self.gretap: + raise ValueError("gretap already exists for %s" % self.name) + + remoteip = addresses[0].split("/")[0] + localip = None + + if len(addresses) > 1: + localip = addresses[1].split("/")[0] + + self.gretap = GreTap(session=self.session, remoteip=remoteip, + localip=localip, ttl=self.ttl, key=self.grekey) + self.attach(self.gretap) + + def setkey(self, key): + """ + Set the GRE key used for the GreTap device. This needs to be set + prior to instantiating the GreTap device (before addrconfig). + """ + self.grekey = key + + +OVS_NODES = { + NodeTypes.SWITCH: OvsSwitchNode, + NodeTypes.HUB: OvsHubNode, + NodeTypes.WIRELESS_LAN: OvsWlanNode, + NodeTypes.TUNNEL: OvsTunnelNode, + NodeTypes.TAP_BRIDGE: OvsGreTapBridge, + NodeTypes.PEER_TO_PEER: OvsPtpNet, + NodeTypes.CONTROL_NET: OvsCtrlNet +} diff --git a/daemon/core/netns/vif.py b/daemon/core/netns/vif.py new file mode 100644 index 00000000..8b224c1e --- /dev/null +++ b/daemon/core/netns/vif.py @@ -0,0 +1,324 @@ +""" +virtual ethernet classes that implement the interfaces available under Linux. +""" + +import logging +import time + +from core import CoreCommandError +from core import constants +from core.coreobj import PyCoreNetIf +from core.enumerations import NodeTypes +from core.misc import nodeutils +from core.misc import utils + +utils.check_executables([constants.IP_BIN]) + + +class VEth(PyCoreNetIf): + """ + Provides virtual ethernet functionality for core nodes. + """ + + # TODO: network is not used, why was it needed? + def __init__(self, node, name, localname, mtu=1500, net=None, start=True): + """ + Creates a VEth instance. + + :param core.netns.vnode.SimpleLxcNode node: related core node + :param str name: interface name + :param str localname: interface local name + :param mtu: interface mtu + :param net: network + :param bool start: start flag + :raises CoreCommandError: when there is a command exception + """ + # note that net arg is ignored + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) + self.localname = localname + self.up = False + if start: + self.startup() + + def startup(self): + """ + Interface startup logic. + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + utils.check_cmd([constants.IP_BIN, "link", "add", "name", self.localname, + "type", "veth", "peer", "name", self.name]) + utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"]) + self.up = True + + def shutdown(self): + """ + Interface shutdown logic. + + :return: nothing + """ + if not self.up: + return + + if self.node: + try: + self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) + except CoreCommandError: + logging.exception("error shutting down interface") + + if self.localname: + try: + utils.check_cmd([constants.IP_BIN, "link", "delete", self.localname]) + except CoreCommandError: + logging.exception("error deleting link") + + self.up = False + + +class TunTap(PyCoreNetIf): + """ + TUN/TAP virtual device in TAP mode + """ + + # TODO: network is not used, why was it needed? + def __init__(self, node, name, localname, mtu=1500, net=None, start=True): + """ + Create a TunTap instance. + + :param core.netns.vnode.SimpleLxcNode node: related core node + :param str name: interface name + :param str localname: local interface name + :param mtu: interface mtu + :param net: related network + :param bool start: start flag + """ + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) + self.localname = localname + self.up = False + self.transport_type = "virtual" + if start: + self.startup() + + def startup(self): + """ + Startup logic for a tunnel tap. + + :return: nothing + """ + # TODO: more sophisticated TAP creation here + # Debian does not support -p (tap) option, RedHat does. + # For now, this is disabled to allow the TAP to be created by another + # system (e.g. EMANE"s emanetransportd) + # check_call(["tunctl", "-t", self.name]) + # self.install() + self.up = True + + def shutdown(self): + """ + Shutdown functionality for a tunnel tap. + + :return: nothing + """ + if not self.up: + return + + try: + self.node.check_cmd([constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]) + except CoreCommandError: + logging.exception("error shutting down tunnel tap") + + self.up = False + + def waitfor(self, func, attempts=10, maxretrydelay=0.25): + """ + Wait for func() to return zero with exponential backoff. + + :param func: function to wait for a result of zero + :param int attempts: number of attempts to wait for a zero result + :param float maxretrydelay: maximum retry delay + :return: True if wait succeeded, False otherwise + :rtype: bool + """ + delay = 0.01 + result = False + for i in xrange(1, attempts + 1): + r = func() + if r == 0: + result = True + break + msg = "attempt %s failed with nonzero exit status %s" % (i, r) + if i < attempts + 1: + msg += ", retrying..." + logging.info(msg) + time.sleep(delay) + delay += delay + if delay > maxretrydelay: + delay = maxretrydelay + else: + msg += ", giving up" + logging.info(msg) + + return result + + def waitfordevicelocal(self): + """ + Check for presence of a local device - tap device may not + appear right away waits + + :return: wait for device local response + :rtype: int + """ + logging.debug("waiting for device local: %s", self.localname) + + def localdevexists(): + args = [constants.IP_BIN, "link", "show", self.localname] + return utils.cmd(args) + + self.waitfor(localdevexists) + + def waitfordevicenode(self): + """ + Check for presence of a node device - tap device may not appear right away waits. + + :return: nothing + """ + logging.debug("waiting for device node: %s", self.name) + + def nodedevexists(): + args = [constants.IP_BIN, "link", "show", self.name] + return self.node.cmd(args) + + count = 0 + while True: + result = self.waitfor(nodedevexists) + if result: + break + + # check if this is an EMANE interface; if so, continue + # waiting if EMANE is still running + # TODO: remove emane code + should_retry = count < 5 + is_emane_node = nodeutils.is_node(self.net, NodeTypes.EMANE) + is_emane_running = self.node.session.emane.emanerunning(self.node) + if all([should_retry, is_emane_node, is_emane_running]): + count += 1 + else: + raise RuntimeError("node device failed to exist") + + def install(self): + """ + Install this TAP into its namespace. This is not done from the + startup() method but called at a later time when a userspace + program (running on the host) has had a chance to open the socket + end of the TAP. + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + self.waitfordevicelocal() + netns = str(self.node.pid) + utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "netns", netns]) + self.node.check_cmd([constants.IP_BIN, "link", "set", self.localname, "name", self.name]) + self.node.check_cmd([constants.IP_BIN, "link", "set", self.name, "up"]) + + def setaddrs(self): + """ + Set interface addresses based on self.addrlist. + + :return: nothing + """ + self.waitfordevicenode() + for addr in self.addrlist: + self.node.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]) + + +class GreTap(PyCoreNetIf): + """ + GRE TAP device for tunneling between emulation servers. + Uses the "gretap" tunnel device type from Linux which is a GRE device + having a MAC address. The MAC address is required for bridging. + """ + + def __init__(self, node=None, name=None, session=None, mtu=1458, + remoteip=None, objid=None, localip=None, ttl=255, + key=None, start=True): + """ + Creates a GreTap instance. + + :param core.netns.vnode.SimpleLxcNode node: related core node + :param str name: interface name + :param core.session.Session session: core session instance + :param mtu: interface mtu + :param str remoteip: remote address + :param int objid: object id + :param str localip: local address + :param ttl: ttl value + :param key: gre tap key + :param bool start: start flag + :raises CoreCommandError: when there is a command exception + """ + PyCoreNetIf.__init__(self, node=node, name=name, mtu=mtu) + self.session = session + if objid is None: + # from PyCoreObj + objid = ((id(self) >> 16) ^ (id(self) & 0xffff)) & 0xffff + self.objid = objid + sessionid = self.session.short_session_id() + # interface name on the local host machine + self.localname = "gt.%s.%s" % (self.objid, sessionid) + self.transport_type = "raw" + if not start: + self.up = False + return + + if remoteip is None: + raise ValueError, "missing remote IP required for GRE TAP device" + args = [constants.IP_BIN, "link", "add", self.localname, "type", "gretap", + "remote", str(remoteip)] + if localip: + args += ["local", str(localip)] + if ttl: + args += ["ttl", str(ttl)] + if key: + args += ["key", str(key)] + utils.check_cmd(args) + args = [constants.IP_BIN, "link", "set", self.localname, "up"] + utils.check_cmd(args) + self.up = True + + def shutdown(self): + """ + Shutdown logic for a GreTap. + + :return: nothing + """ + if self.localname: + try: + args = [constants.IP_BIN, "link", "set", self.localname, "down"] + utils.check_cmd(args) + args = [constants.IP_BIN, "link", "del", self.localname] + utils.check_cmd(args) + except CoreCommandError: + logging.exception("error during shutdown") + + self.localname = None + + def data(self, message_type): + """ + Data for a gre tap. + + :param message_type: message type for data + :return: None + """ + return None + + def all_link_data(self, flags): + """ + Retrieve link data. + + :param flags: link flags + :return: link data + :rtype: list[core.data.LinkData] + """ + return [] diff --git a/daemon/core/netns/vnet.py b/daemon/core/netns/vnet.py new file mode 100644 index 00000000..17bcada0 --- /dev/null +++ b/daemon/core/netns/vnet.py @@ -0,0 +1,656 @@ +""" +PyCoreNet and LxBrNet classes that implement virtual networks using +Linux Ethernet bridging and ebtables rules. +""" + +import logging +import os +import threading +import time + +from core import CoreCommandError +from core import constants +from core.coreobj import PyCoreNet +from core.misc import utils +from core.netns.vif import GreTap +from core.netns.vif import VEth + +utils.check_executables([ + constants.BRCTL_BIN, + constants.IP_BIN, + constants.EBTABLES_BIN, + constants.TC_BIN +]) + +ebtables_lock = threading.Lock() + + +class EbtablesQueue(object): + """ + Helper class for queuing up ebtables commands into rate-limited + atomic commits. This improves performance and reliability when there are + many WLAN link updates. + """ + # update rate is every 300ms + rate = 0.3 + # ebtables + atomic_file = "/tmp/pycore.ebtables.atomic" + + def __init__(self): + """ + Initialize the helper class, but don't start the update thread + until a WLAN is instantiated. + """ + self.doupdateloop = False + self.updatethread = None + # this lock protects cmds and updates lists + self.updatelock = threading.Lock() + # list of pending ebtables commands + self.cmds = [] + # list of WLANs requiring update + self.updates = [] + # timestamps of last WLAN update; this keeps track of WLANs that are + # using this queue + self.last_update_time = {} + + def startupdateloop(self, wlan): + """ + Kick off the update loop; only needs to be invoked once. + + :return: nothing + """ + with self.updatelock: + self.last_update_time[wlan] = time.time() + + if self.doupdateloop: + return + + self.doupdateloop = True + self.updatethread = threading.Thread(target=self.updateloop) + self.updatethread.daemon = True + self.updatethread.start() + + def stopupdateloop(self, wlan): + """ + Kill the update loop thread if there are no more WLANs using it. + + :return: nothing + """ + with self.updatelock: + try: + del self.last_update_time[wlan] + except KeyError: + logging.exception("error deleting last update time for wlan, ignored before: %s", wlan) + + if len(self.last_update_time) > 0: + return + + self.doupdateloop = False + if self.updatethread: + self.updatethread.join() + self.updatethread = None + + def ebatomiccmd(self, cmd): + """ + Helper for building ebtables atomic file command list. + + :param list[str] cmd: ebtable command + :return: ebtable atomic command + :rtype: list[str] + """ + r = [constants.EBTABLES_BIN, "--atomic-file", self.atomic_file] + if cmd: + r.extend(cmd) + return r + + def lastupdate(self, wlan): + """ + Return the time elapsed since this WLAN was last updated. + + :param wlan: wlan entity + :return: elpased time + :rtype: float + """ + try: + elapsed = time.time() - self.last_update_time[wlan] + except KeyError: + self.last_update_time[wlan] = time.time() + elapsed = 0.0 + + return elapsed + + def updated(self, wlan): + """ + Keep track of when this WLAN was last updated. + + :param wlan: wlan entity + :return: nothing + """ + self.last_update_time[wlan] = time.time() + self.updates.remove(wlan) + + def updateloop(self): + """ + Thread target that looks for WLANs needing update, and + rate limits the amount of ebtables activity. Only one userspace program + should use ebtables at any given time, or results can be unpredictable. + + :return: nothing + """ + while self.doupdateloop: + with self.updatelock: + for wlan in self.updates: + # Check if wlan is from a previously closed session. Because of the + # rate limiting scheme employed here, this may happen if a new session + # is started soon after closing a previous session. + # TODO: if these are WlanNodes, this will never throw an exception + try: + wlan.session + except: + # Just mark as updated to remove from self.updates. + self.updated(wlan) + continue + + if self.lastupdate(wlan) > self.rate: + self.buildcmds(wlan) + self.ebcommit(wlan) + self.updated(wlan) + + time.sleep(self.rate) + + def ebcommit(self, wlan): + """ + Perform ebtables atomic commit using commands built in the self.cmds list. + + :return: nothing + """ + # save kernel ebtables snapshot to a file + args = self.ebatomiccmd(["--atomic-save", ]) + utils.check_cmd(args) + + # modify the table file using queued ebtables commands + for c in self.cmds: + args = self.ebatomiccmd(c) + utils.check_cmd(args) + self.cmds = [] + + # commit the table file to the kernel + args = self.ebatomiccmd(["--atomic-commit", ]) + utils.check_cmd(args) + + try: + os.unlink(self.atomic_file) + except OSError: + logging.exception("error removing atomic file: %s", self.atomic_file) + + def ebchange(self, wlan): + """ + Flag a change to the given WLAN"s _linked dict, so the ebtables + chain will be rebuilt at the next interval. + + :return: nothing + """ + with self.updatelock: + if wlan not in self.updates: + self.updates.append(wlan) + + def buildcmds(self, wlan): + """ + Inspect a _linked dict from a wlan, and rebuild the ebtables chain for that WLAN. + + :return: nothing + """ + with wlan._linked_lock: + # flush the chain + self.cmds.extend([["-F", wlan.brname], ]) + # rebuild the chain + for netif1, v in wlan._linked.items(): + for netif2, linked in v.items(): + if wlan.policy == "DROP" and linked: + self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname, + "-o", netif2.localname, "-j", "ACCEPT"], + ["-A", wlan.brname, "-o", netif1.localname, + "-i", netif2.localname, "-j", "ACCEPT"]]) + elif wlan.policy == "ACCEPT" and not linked: + self.cmds.extend([["-A", wlan.brname, "-i", netif1.localname, + "-o", netif2.localname, "-j", "DROP"], + ["-A", wlan.brname, "-o", netif1.localname, + "-i", netif2.localname, "-j", "DROP"]]) + + +# a global object because all WLANs share the same queue +# cannot have multiple threads invoking the ebtables commnd +ebq = EbtablesQueue() + + +def ebtablescmds(call, cmds): + """ + Run ebtable commands. + + :param func call: function to call commands + :param list cmds: commands to call + :return: nothing + """ + with ebtables_lock: + for args in cmds: + call(args) + + +class LxBrNet(PyCoreNet): + """ + Provides linux bridge network functionlity for core nodes. + """ + policy = "DROP" + + def __init__(self, session, objid=None, name=None, start=True, policy=None): + """ + Creates a LxBrNet instance. + + :param core.session.Session session: core session instance + :param int objid: object id + :param str name: object name + :param bool start: start flag + :param policy: network policy + """ + PyCoreNet.__init__(self, session, objid, name, start) + if name is None: + name = str(self.objid) + if policy is not None: + self.policy = policy + self.name = name + sessionid = self.session.short_session_id() + self.brname = "b.%s.%s" % (str(self.objid), sessionid) + self.up = False + if start: + self.startup() + ebq.startupdateloop(self) + + def startup(self): + """ + Linux bridge starup logic. + + :return: nothing + :raises CoreCommandError: when there is a command exception + """ + utils.check_cmd([constants.BRCTL_BIN, "addbr", self.brname]) + + # turn off spanning tree protocol and forwarding delay + utils.check_cmd([constants.BRCTL_BIN, "stp", self.brname, "off"]) + utils.check_cmd([constants.BRCTL_BIN, "setfd", self.brname, "0"]) + utils.check_cmd([constants.IP_BIN, "link", "set", self.brname, "up"]) + # create a new ebtables chain for this bridge + ebtablescmds(utils.check_cmd, [ + [constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy], + [constants.EBTABLES_BIN, "-A", "FORWARD", "--logical-in", self.brname, "-j", self.brname] + ]) + # turn off multicast snooping so mcast forwarding occurs w/o IGMP joins + snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % self.brname + if os.path.exists(snoop): + with open(snoop, "w") as snoop_file: + snoop_file.write("0") + + self.up = True + + def shutdown(self): + """ + Linux bridge shutdown logic. + + :return: nothing + """ + if not self.up: + return + + ebq.stopupdateloop(self) + + try: + utils.check_cmd([constants.IP_BIN, "link", "set", self.brname, "down"]) + utils.check_cmd([constants.BRCTL_BIN, "delbr", self.brname]) + ebtablescmds(utils.check_cmd, [ + [constants.EBTABLES_BIN, "-D", "FORWARD", "--logical-in", self.brname, "-j", self.brname], + [constants.EBTABLES_BIN, "-X", self.brname] + ]) + except CoreCommandError: + logging.exception("error during shutdown") + + # removes veth pairs used for bridge-to-bridge connections + for netif in self.netifs(): + netif.shutdown() + + self._netif.clear() + self._linked.clear() + del self.session + self.up = False + + # TODO: this depends on a subtype with localname defined, seems like the wrong place for this to live + def attach(self, netif): + """ + Attach a network interface. + + :param core.netns.vnode.VEth netif: network interface to attach + :return: nothing + """ + if self.up: + utils.check_cmd([constants.BRCTL_BIN, "addif", self.brname, netif.localname]) + utils.check_cmd([constants.IP_BIN, "link", "set", netif.localname, "up"]) + + PyCoreNet.attach(self, netif) + + def detach(self, netif): + """ + Detach a network interface. + + :param core.netns.vif.Veth netif: network interface to detach + :return: nothing + """ + if self.up: + utils.check_cmd([constants.BRCTL_BIN, "delif", self.brname, netif.localname]) + + PyCoreNet.detach(self, netif) + + def linked(self, netif1, netif2): + """ + Determine if the provided network interfaces are linked. + + :param core.netns.vif.Veth netif1: interface one + :param core.netns.vif.Veth netif2: interface two + :return: True if interfaces are linked, False otherwise + :rtype: bool + """ + # check if the network interfaces are attached to this network + if self._netif[netif1.netifi] != netif1: + raise ValueError("inconsistency for netif %s" % netif1.name) + + if self._netif[netif2.netifi] != netif2: + raise ValueError("inconsistency for netif %s" % netif2.name) + + try: + linked = self._linked[netif1][netif2] + except KeyError: + if self.policy == "ACCEPT": + linked = True + elif self.policy == "DROP": + linked = False + else: + raise Exception("unknown policy: %s" % self.policy) + self._linked[netif1][netif2] = linked + + return linked + + def unlink(self, netif1, netif2): + """ + Unlink two PyCoreNetIfs, resulting in adding or removing ebtables + filtering rules. + + :param core.netns.vif.Veth netif1: interface one + :param core.netns.vif.Veth netif2: interface two + :return: nothing + """ + with self._linked_lock: + if not self.linked(netif1, netif2): + return + self._linked[netif1][netif2] = False + + ebq.ebchange(self) + + def link(self, netif1, netif2): + """ + Link two PyCoreNetIfs together, resulting in adding or removing + ebtables filtering rules. + + :param core.netns.vif.Veth netif1: interface one + :param core.netns.vif.Veth netif2: interface two + :return: nothing + """ + with self._linked_lock: + if self.linked(netif1, netif2): + return + self._linked[netif1][netif2] = True + + ebq.ebchange(self) + + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, + jitter=None, netif2=None, devname=None): + """ + Configure link parameters by applying tc queuing disciplines on the interface. + + :param core.netns.vif.Veth netif: interface one + :param bw: bandwidth to set to + :param delay: packet delay to set to + :param loss: packet loss to set to + :param duplicate: duplicate percentage to set to + :param jitter: jitter to set to + :param core.netns.vif.Veth netif2: interface two + :param devname: device name + :return: nothing + """ + if devname is None: + devname = netif.localname + tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname] + parent = ["root"] + changed = False + if netif.setparam("bw", bw): + # from tc-tbf(8): minimum value for burst is rate / kernel_hz + if bw is not None: + burst = max(2 * netif.mtu, bw / 1000) + # max IP payload + limit = 0xffff + tbf = ["tbf", "rate", str(bw), + "burst", str(burst), "limit", str(limit)] + if bw > 0: + if self.up: + logging.debug("linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],)) + utils.check_cmd(tc + parent + ["handle", "1:"] + tbf) + netif.setparam("has_tbf", True) + changed = True + elif netif.getparam("has_tbf") and bw <= 0: + tcd = [] + tc + tcd[2] = "delete" + if self.up: + utils.check_cmd(tcd + parent) + netif.setparam("has_tbf", False) + # removing the parent removes the child + netif.setparam("has_netem", False) + changed = True + if netif.getparam("has_tbf"): + parent = ["parent", "1:1"] + netem = ["netem"] + changed = max(changed, netif.setparam("delay", delay)) + if loss is not None: + loss = float(loss) + changed = max(changed, netif.setparam("loss", loss)) + if duplicate is not None: + duplicate = float(duplicate) + changed = max(changed, netif.setparam("duplicate", duplicate)) + changed = max(changed, netif.setparam("jitter", jitter)) + if not changed: + return + # jitter and delay use the same delay statement + if delay is not None: + netem += ["delay", "%sus" % delay] + if jitter is not None: + if delay is None: + netem += ["delay", "0us", "%sus" % jitter, "25%"] + else: + netem += ["%sus" % jitter, "25%"] + + if loss is not None and loss > 0: + netem += ["loss", "%s%%" % min(loss, 100)] + if duplicate is not None and duplicate > 0: + netem += ["duplicate", "%s%%" % min(duplicate, 100)] + if delay <= 0 and jitter <= 0 and loss <= 0 and duplicate <= 0: + # possibly remove netem if it exists and parent queue wasn't removed + if not netif.getparam("has_netem"): + return + tc[2] = "delete" + if self.up: + logging.debug("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],)) + utils.check_cmd(tc + parent + ["handle", "10:"]) + netif.setparam("has_netem", False) + elif len(netem) > 1: + if self.up: + logging.debug("linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],)) + utils.check_cmd(tc + parent + ["handle", "10:"] + netem) + netif.setparam("has_netem", True) + + def linknet(self, net): + """ + Link this bridge with another by creating a veth pair and installing + each device into each bridge. + + :param core.netns.vnet.LxBrNet net: network to link with + :return: created interface + :rtype: Veth + """ + sessionid = self.session.short_session_id() + try: + self_objid = "%x" % self.objid + except TypeError: + self_objid = "%s" % self.objid + + try: + net_objid = "%x" % net.objid + except TypeError: + net_objid = "%s" % net.objid + + localname = "veth%s.%s.%s" % (self_objid, net_objid, sessionid) + if len(localname) >= 16: + raise ValueError("interface local name %s too long" % localname) + + name = "veth%s.%s.%s" % (net_objid, self_objid, sessionid) + if len(name) >= 16: + raise ValueError("interface name %s too long" % name) + + netif = VEth(node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up) + self.attach(netif) + if net.up: + # this is similar to net.attach() but uses netif.name instead + # of localname + utils.check_cmd([constants.BRCTL_BIN, "addif", net.brname, netif.name]) + utils.check_cmd([constants.IP_BIN, "link", "set", netif.name, "up"]) + i = net.newifindex() + net._netif[i] = netif + with net._linked_lock: + net._linked[netif] = {} + netif.net = self + netif.othernet = net + return netif + + def getlinknetif(self, net): + """ + Return the interface of that links this net with another net + (that were linked using linknet()). + + :param core.netns.vnet.LxBrNet net: interface to get link for + :return: interface the provided network is linked to + :rtype: core.netns.vnet.LxBrNet + """ + for netif in self.netifs(): + if hasattr(netif, "othernet") and netif.othernet == net: + return netif + + return None + + def addrconfig(self, addrlist): + """ + Set addresses on the bridge. + + :param list[str] addrlist: address list + :return: nothing + """ + if not self.up: + return + + for addr in addrlist: + utils.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname]) + + +class GreTapBridge(LxBrNet): + """ + A network consisting of a bridge with a gretap device for tunneling to + another system. + """ + + def __init__(self, session, remoteip=None, objid=None, name=None, + policy="ACCEPT", localip=None, ttl=255, key=None, start=True): + """ + Create a GreTapBridge instance. + + :param core.session.Session session: core session instance + :param str remoteip: remote address + :param int objid: object id + :param str name: object name + :param policy: network policy + :param str localip: local address + :param ttl: ttl value + :param key: gre tap key + :param bool start: start flag + :return: + """ + LxBrNet.__init__(self, session=session, objid=objid, name=name, policy=policy, start=False) + self.grekey = key + if self.grekey is None: + self.grekey = self.session.id ^ self.objid + self.localnum = None + self.remotenum = None + self.remoteip = remoteip + self.localip = localip + self.ttl = ttl + if remoteip is None: + self.gretap = None + else: + self.gretap = GreTap(node=self, session=session, remoteip=remoteip, + localip=localip, ttl=ttl, key=self.grekey) + if start: + self.startup() + + def startup(self): + """ + Creates a bridge and adds the gretap device to it. + + :return: nothing + """ + LxBrNet.startup(self) + if self.gretap: + self.attach(self.gretap) + + def shutdown(self): + """ + Detach the gretap device and remove the bridge. + + :return: nothing + """ + if self.gretap: + self.detach(self.gretap) + self.gretap.shutdown() + self.gretap = None + LxBrNet.shutdown(self) + + def addrconfig(self, addrlist): + """ + Set the remote tunnel endpoint. This is a one-time method for + creating the GreTap device, which requires the remoteip at startup. + The 1st address in the provided list is remoteip, 2nd optionally + specifies localip. + + :param list addrlist: address list + :return: nothing + """ + if self.gretap: + raise ValueError("gretap already exists for %s" % self.name) + remoteip = addrlist[0].split("/")[0] + localip = None + if len(addrlist) > 1: + localip = addrlist[1].split("/")[0] + self.gretap = GreTap(session=self.session, remoteip=remoteip, + localip=localip, ttl=self.ttl, key=self.grekey) + self.attach(self.gretap) + + def setkey(self, key): + """ + Set the GRE key used for the GreTap device. This needs to be set + prior to instantiating the GreTap device (before addrconfig). + + :param key: gre key + :return: nothing + """ + self.grekey = key diff --git a/daemon/core/netns/vnode.py b/daemon/core/netns/vnode.py new file mode 100644 index 00000000..3c41e33a --- /dev/null +++ b/daemon/core/netns/vnode.py @@ -0,0 +1,609 @@ +""" +PyCoreNode and LxcNode classes that implement the network namespac virtual node. +""" + +import errno +import logging +import os +import random +import shutil +import signal +import string +import threading + +from core import CoreCommandError +from core import constants +from core.coreobj import PyCoreNetIf +from core.coreobj import PyCoreNode +from core.enumerations import NodeTypes +from core.misc import nodeutils +from core.misc import utils +from core.misc.ipaddress import MacAddress +from core.netns import vnodeclient +from core.netns.vif import TunTap +from core.netns.vif import VEth + +_DEFAULT_MTU = 1500 + +utils.check_executables([constants.IP_BIN]) + + +class SimpleLxcNode(PyCoreNode): + """ + Provides simple lxc functionality for core nodes. + + :var nodedir: str + :var ctrlchnlname: str + :var client: core.netns.vnodeclient.VnodeClient + :var pid: int + :var up: bool + :var lock: threading.RLock + :var _mounts: list[tuple[str, str]] + """ + valid_address_types = {"inet", "inet6", "inet6link"} + + def __init__(self, session, objid=None, name=None, nodedir=None, start=True): + """ + Create a SimpleLxcNode instance. + + :param core.session.Session session: core session instance + :param int objid: object id + :param str name: object name + :param str nodedir: node directory + :param bool start: start flag + """ + PyCoreNode.__init__(self, session, objid, name, start=start) + self.nodedir = nodedir + self.ctrlchnlname = os.path.abspath(os.path.join(self.session.session_dir, self.name)) + self.client = None + self.pid = None + self.up = False + self.lock = threading.RLock() + self._mounts = [] + + def alive(self): + """ + Check if the node is alive. + + :return: True if node is alive, False otherwise + :rtype: bool + """ + try: + os.kill(self.pid, 0) + except OSError: + return False + + return True + + def startup(self): + """ + Start a new namespace node by invoking the vnoded process that + allocates a new namespace. Bring up the loopback device and set + the hostname. + + :return: nothing + """ + if self.up: + raise ValueError("starting a node that is already up") + + # create a new namespace for this node using vnoded + vnoded = [ + constants.VNODED_BIN, + "-v", + "-c", self.ctrlchnlname, + "-l", self.ctrlchnlname + ".log", + "-p", self.ctrlchnlname + ".pid" + ] + if self.nodedir: + vnoded += ["-C", self.nodedir] + env = self.session.get_environment(state=False) + env["NODE_NUMBER"] = str(self.objid) + env["NODE_NAME"] = str(self.name) + + output = utils.check_cmd(vnoded, env=env) + self.pid = int(output) + + # create vnode client + self.client = vnodeclient.VnodeClient(self.name, self.ctrlchnlname) + + # bring up the loopback interface + logging.debug("bringing up loopback interface") + self.check_cmd([constants.IP_BIN, "link", "set", "lo", "up"]) + + # set hostname for node + logging.debug("setting hostname: %s", self.name) + self.check_cmd(["hostname", self.name]) + + # mark node as up + self.up = True + + def shutdown(self): + """ + Shutdown logic for simple lxc nodes. + + :return: nothing + """ + # nothing to do if node is not up + if not self.up: + return + + # unmount all targets (NOTE: non-persistent mount namespaces are + # removed by the kernel when last referencing process is killed) + self._mounts = [] + + # shutdown all interfaces + for netif in self.netifs(): + netif.shutdown() + + # attempt to kill node process and wait for termination of children + try: + os.kill(self.pid, signal.SIGTERM) + os.waitpid(self.pid, 0) + except OSError as e: + if e.errno != 10: + logging.exception("error killing process") + + # remove node directory if present + try: + os.unlink(self.ctrlchnlname) + except OSError as e: + # no such file or directory + if e.errno != errno.ENOENT: + logging.exception("error removing node directory") + + # clear interface data, close client, and mark self and not up + self._netif.clear() + self.client.close() + self.up = False + + def cmd(self, args, wait=True): + """ + Runs shell command on node, with option to not wait for a result. + + :param list[str]|str args: command to run + :param bool wait: wait for command to exit, defaults to True + :return: exit status for command + :rtype: int + """ + return self.client.cmd(args, wait) + + def cmd_output(self, args): + """ + Runs shell command on node and get exit status and output. + + :param list[str]|str args: command to run + :return: exit status and combined stdout and stderr + :rtype: tuple[int, str] + """ + return self.client.cmd_output(args) + + def check_cmd(self, args): + """ + Runs shell command on node. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when a non-zero exit status occurs + """ + return self.client.check_cmd(args) + + def termcmdstring(self, sh="/bin/sh"): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + return self.client.termcmdstring(sh) + + def mount(self, source, target): + """ + Create and mount a directory. + + :param str source: source directory to mount + :param str target: target directory to create + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + source = os.path.abspath(source) + logging.info("node(%s) mounting: %s at %s", self.name, source, target) + cmd = 'mkdir -p "%s" && %s -n --bind "%s" "%s"' % (target, constants.MOUNT_BIN, source, target) + status, output = self.client.shcmd_result(cmd) + if status: + raise CoreCommandError(status, cmd, output) + self._mounts.append((source, target)) + + def newifindex(self): + """ + Retrieve a new interface index. + + :return: new interface index + :rtype: int + """ + with self.lock: + return super(SimpleLxcNode, self).newifindex() + + def newveth(self, ifindex=None, ifname=None, net=None): + """ + Create a new interface. + + :param int ifindex: index for the new interface + :param str ifname: name for the new interface + :param net: network to associate interface with + :return: nothing + """ + with self.lock: + if ifindex is None: + ifindex = self.newifindex() + + if ifname is None: + ifname = "eth%d" % ifindex + + sessionid = self.session.short_session_id() + + try: + suffix = "%x.%s.%s" % (self.objid, ifindex, sessionid) + except TypeError: + suffix = "%s.%s.%s" % (self.objid, ifindex, sessionid) + + localname = "veth" + suffix + if len(localname) >= 16: + raise ValueError("interface local name (%s) too long" % localname) + + name = localname + "p" + if len(name) >= 16: + raise ValueError("interface name (%s) too long" % name) + + veth = VEth(node=self, name=name, localname=localname, net=net, start=self.up) + + if self.up: + utils.check_cmd([constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)]) + self.check_cmd([constants.IP_BIN, "link", "set", veth.name, "name", ifname]) + + veth.name = ifname + + if self.up: + # TODO: potentially find better way to query interface ID + # retrieve interface information + output = self.check_cmd(["ip", "link", "show", veth.name]) + logging.debug("interface command output: %s", output) + output = output.split("\n") + veth.flow_id = int(output[0].strip().split(":")[0]) + 1 + logging.debug("interface flow index: %s - %s", veth.name, veth.flow_id) + veth.hwaddr = MacAddress.from_string(output[1].strip().split()[1]) + logging.debug("interface mac: %s - %s", veth.name, veth.hwaddr) + + try: + self.addnetif(veth, ifindex) + except ValueError as e: + veth.shutdown() + del veth + raise e + + return ifindex + + def newtuntap(self, ifindex=None, ifname=None, net=None): + """ + Create a new tunnel tap. + + :param int ifindex: interface index + :param str ifname: interface name + :param net: network to associate with + :return: interface index + :rtype: int + """ + with self.lock: + if ifindex is None: + ifindex = self.newifindex() + + if ifname is None: + ifname = "eth%d" % ifindex + + sessionid = self.session.short_session_id() + localname = "tap%s.%s.%s" % (self.objid, ifindex, sessionid) + name = ifname + tuntap = TunTap(node=self, name=name, localname=localname, net=net, start=self.up) + + try: + self.addnetif(tuntap, ifindex) + except ValueError as e: + tuntap.shutdown() + del tuntap + raise e + + return ifindex + + def sethwaddr(self, ifindex, addr): + """ + Set hardware addres for an interface. + + :param int ifindex: index of interface to set hardware address for + :param core.misc.ipaddress.MacAddress addr: hardware address to set + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + self._netif[ifindex].sethwaddr(addr) + if self.up: + args = [constants.IP_BIN, "link", "set", "dev", self.ifname(ifindex), "address", str(addr)] + self.check_cmd(args) + + def addaddr(self, ifindex, addr): + """ + Add interface address. + + :param int ifindex: index of interface to add address to + :param str addr: address to add to interface + :return: nothing + """ + if self.up: + # check if addr is ipv6 + if ":" in str(addr): + args = [constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)] + self.check_cmd(args) + else: + args = [constants.IP_BIN, "addr", "add", str(addr), "broadcast", "+", "dev", self.ifname(ifindex)] + self.check_cmd(args) + + self._netif[ifindex].addaddr(addr) + + def deladdr(self, ifindex, addr): + """ + Delete address from an interface. + + :param int ifindex: index of interface to delete address from + :param str addr: address to delete from interface + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + try: + self._netif[ifindex].deladdr(addr) + except ValueError: + logging.exception("trying to delete unknown address: %s" % addr) + + if self.up: + self.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)]) + + def delalladdr(self, ifindex, address_types=None): + """ + Delete all addresses from an interface. + + :param int ifindex: index of interface to delete address types from + :param tuple[str] address_types: address types to delete + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + if not address_types: + address_types = self.valid_address_types + + interface_name = self.ifname(ifindex) + addresses = self.client.getaddr(interface_name, rescan=True) + + for address_type in address_types: + if address_type not in self.valid_address_types: + raise ValueError("addr type must be in: %s" % " ".join(self.valid_address_types)) + for address in addresses[address_type]: + self.deladdr(ifindex, address) + + # update cached information + self.client.getaddr(interface_name, rescan=True) + + def ifup(self, ifindex): + """ + Bring an interface up. + + :param int ifindex: index of interface to bring up + :return: nothing + """ + if self.up: + self.check_cmd([constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"]) + + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + """ + Create a new network interface. + + :param net: network to associate with + :param list addrlist: addresses to add on the interface + :param core.misc.ipaddress.MacAddress hwaddr: hardware address to set for interface + :param int ifindex: index of interface to create + :param str ifname: name for interface + :return: interface index + :rtype: int + """ + if not addrlist: + addrlist = [] + + with self.lock: + # TODO: see if you can move this to emane specific code + if nodeutils.is_node(net, NodeTypes.EMANE): + ifindex = self.newtuntap(ifindex=ifindex, ifname=ifname, net=net) + # TUN/TAP is not ready for addressing yet; the device may + # take some time to appear, and installing it into a + # namespace after it has been bound removes addressing; + # save addresses with the interface now + self.attachnet(ifindex, net) + netif = self.netif(ifindex) + netif.sethwaddr(hwaddr) + for address in utils.make_tuple(addrlist): + netif.addaddr(address) + return ifindex + else: + ifindex = self.newveth(ifindex=ifindex, ifname=ifname, net=net) + + if net is not None: + self.attachnet(ifindex, net) + + if hwaddr: + self.sethwaddr(ifindex, hwaddr) + + for address in utils.make_tuple(addrlist): + self.addaddr(ifindex, address) + + self.ifup(ifindex) + return ifindex + + def connectnode(self, ifname, othernode, otherifname): + """ + Connect a node. + + :param str ifname: name of interface to connect + :param core.netns.nodes.LxcNode othernode: node to connect to + :param str otherifname: interface name to connect to + :return: nothing + """ + tmplen = 8 + tmp1 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in xrange(tmplen)]) + tmp2 = "tmp." + "".join([random.choice(string.ascii_lowercase) for _ in xrange(tmplen)]) + utils.check_cmd([constants.IP_BIN, "link", "add", "name", tmp1, "type", "veth", "peer", "name", tmp2]) + + utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)]) + self.check_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname]) + interface = PyCoreNetIf(node=self, name=ifname, mtu=_DEFAULT_MTU) + self.addnetif(interface, self.newifindex()) + + utils.check_cmd([constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]) + othernode.check_cmd([constants.IP_BIN, "link", "set", tmp2, "name", otherifname]) + other_interface = PyCoreNetIf(node=othernode, name=otherifname, mtu=_DEFAULT_MTU) + othernode.addnetif(other_interface, othernode.newifindex()) + + def addfile(self, srcname, filename): + """ + Add a file. + + :param str srcname: source file name + :param str filename: file name to add + :return: nothing + :raises CoreCommandError: when a non-zero exit status occurs + """ + logging.info("adding file from %s to %s", srcname, filename) + directory = os.path.dirname(filename) + + cmd = 'mkdir -p "%s" && mv "%s" "%s" && sync' % (directory, srcname, filename) + status, output = self.client.shcmd_result(cmd) + if status: + raise CoreCommandError(status, cmd, output) + + +class LxcNode(SimpleLxcNode): + """ + Provides lcx node functionality for core nodes. + """ + + def __init__(self, session, objid=None, name=None, nodedir=None, bootsh="boot.sh", start=True): + """ + Create a LxcNode instance. + + :param core.session.Session session: core session instance + :param int objid: object id + :param str name: object name + :param str nodedir: node directory + :param bootsh: boot shell + :param bool start: start flag + """ + super(LxcNode, self).__init__(session=session, objid=objid, name=name, nodedir=nodedir, start=start) + self.bootsh = bootsh + if start: + self.startup() + + def startup(self): + """ + Startup logic for the node. + + :return: nothing + """ + with self.lock: + self.makenodedir() + super(LxcNode, self).startup() + self.privatedir("/var/run") + self.privatedir("/var/log") + + def shutdown(self): + """ + Shutdown logic for the node. + + :return: nothing + """ + if not self.up: + return + + with self.lock: + try: + super(LxcNode, self).shutdown() + except OSError: + logging.exception("error during shutdown") + finally: + self.rmnodedir() + + def privatedir(self, path): + """ + Create a private directory. + + :param str path: path to create + :return: nothing + """ + if path[0] != "/": + raise ValueError("path not fully qualified: %s" % path) + hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip("/").replace("/", ".")) + os.mkdir(hostpath) + self.mount(hostpath, path) + + def hostfilename(self, filename): + """ + Return the name of a node"s file on the host filesystem. + + :param str filename: host file name + :return: path to file + """ + dirname, basename = os.path.split(filename) + if not basename: + raise ValueError("no basename for filename: %s" % filename) + if dirname and dirname[0] == "/": + dirname = dirname[1:] + dirname = dirname.replace("/", ".") + dirname = os.path.join(self.nodedir, dirname) + return os.path.join(dirname, basename) + + def opennodefile(self, filename, mode="w"): + """ + Open a node file, within it"s directory. + + :param str filename: file name to open + :param str mode: mode to open file in + :return: open file + :rtype: file + """ + hostfilename = self.hostfilename(filename) + dirname, _basename = os.path.split(hostfilename) + if not os.path.isdir(dirname): + os.makedirs(dirname, mode=0755) + return open(hostfilename, mode) + + def nodefile(self, filename, contents, mode=0644): + """ + Create a node file with a given mode. + + :param str filename: name of file to create + :param contents: contents of file + :param int mode: mode for file + :return: nothing + """ + with self.opennodefile(filename, "w") as open_file: + open_file.write(contents) + os.chmod(open_file.name, mode) + logging.info("node(%s) added file: %s; mode: 0%o", self.name, open_file.name, mode) + + def nodefilecopy(self, filename, srcfilename, mode=None): + """ + Copy a file to a node, following symlinks and preserving metadata. + Change file mode if specified. + + :param str filename: file name to copy file to + :param str srcfilename: file to copy + :param int mode: mode to copy to + :return: nothing + """ + hostfilename = self.hostfilename(filename) + shutil.copy2(srcfilename, hostfilename) + if mode is not None: + os.chmod(hostfilename, mode) + logging.info("node(%s) copied file: %s; mode: %s", self.name, hostfilename, mode) diff --git a/daemon/core/netns/vnodeclient.py b/daemon/core/netns/vnodeclient.py new file mode 100644 index 00000000..11669a59 --- /dev/null +++ b/daemon/core/netns/vnodeclient.py @@ -0,0 +1,294 @@ +""" +vnodeclient.py: implementation of the VnodeClient class for issuing commands +over a control channel to the vnoded process running in a network namespace. +The control channel can be accessed via calls to the vcmd Python module or +by invoking the vcmd shell command. +""" + +import logging +import os + +import vcmd + +from core import CoreCommandError +from core import constants +from core.misc import utils + + +class VnodeClient(object): + """ + Provides client functionality for interacting with a virtual node. + """ + + def __init__(self, name, ctrlchnlname): + """ + Create a VnodeClient instance. + + :param str name: name for client + :param str ctrlchnlname: control channel name + """ + self.name = name + self.ctrlchnlname = ctrlchnlname + self.cmdchnl = vcmd.VCmd(self.ctrlchnlname) + self._addr = {} + + def _verify_connection(self): + """ + Checks that the vcmd client is properly connected. + + :return: nothing + :raises IOError: when not connected + """ + if not self.connected(): + raise IOError("vcmd not connected") + + def connected(self): + """ + Check if node is connected or not. + + :return: True if connected, False otherwise + :rtype: bool + """ + return self.cmdchnl.connected() + + def close(self): + """ + Close the client connection. + + :return: nothing + """ + self.cmdchnl.close() + + def cmd(self, args, wait=True): + """ + Execute a command on a node and return the status (return code). + + :param list[str]|str args: command arguments + :param bool wait: wait for command to end or not + :return: command status + :rtype: int + """ + self._verify_connection() + args = utils.split_args(args) + + # run command, return process when not waiting + p = self.cmdchnl.qcmd(args) + if not wait: + return 0 + + # wait for and return exit status + return p.wait() + + def cmd_output(self, args): + """ + Execute a command on a node and return a tuple containing the + exit status and result string. stderr output + is folded into the stdout result string. + + :param list[str]|str args: command to run + :return: command status and combined stdout and stderr output + :rtype: tuple[int, str] + """ + p, stdin, stdout, stderr = self.popen(args) + stdin.close() + output = stdout.read() + stderr.read() + stdout.close() + stderr.close() + status = p.wait() + return status, output.strip() + + def check_cmd(self, args): + """ + Run command and return exit status and combined stdout and stderr. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises core.CoreCommandError: when there is a non-zero exit status + """ + status, output = self.cmd_output(args) + if status != 0: + raise CoreCommandError(status, args, output) + return output.strip() + + def popen(self, args): + """ + Execute a popen command against the node. + + :param list[str]|str args: command arguments + :return: popen object, stdin, stdout, and stderr + :rtype: tuple + """ + self._verify_connection() + args = utils.split_args(args) + return self.cmdchnl.popen(args) + + def icmd(self, args): + """ + Execute an icmd against a node. + + :param list[str]|str args: command arguments + :return: command result + :rtype: int + """ + args = utils.split_args(args) + return os.spawnlp(os.P_WAIT, constants.VCMD_BIN, constants.VCMD_BIN, "-c", self.ctrlchnlname, "--", *args) + + def redircmd(self, infd, outfd, errfd, args, wait=True): + """ + Execute a command on a node with standard input, output, and + error redirected according to the given file descriptors. + + :param infd: stdin file descriptor + :param outfd: stdout file descriptor + :param errfd: stderr file descriptor + :param list[str]|str args: command arguments + :param bool wait: wait flag + :return: command status + :rtype: int + """ + self._verify_connection() + + # run command, return process when not waiting + args = utils.split_args(args) + p = self.cmdchnl.redircmd(infd, outfd, errfd, args) + if not wait: + return p + + # wait for and return exit status + status = p.wait() + if status: + logging.warn("cmd exited with status %s: %s", status, args) + return status + + def term(self, sh="/bin/sh"): + """ + Open a terminal on a node. + + :param str sh: shell to open terminal with + :return: terminal command result + :rtype: int + """ + args = ("xterm", "-ut", "-title", self.name, "-e", constants.VCMD_BIN, "-c", self.ctrlchnlname, "--", sh) + if "SUDO_USER" in os.environ: + args = ("su", "-s", "/bin/sh", "-c", + "exec " + " ".join(map(lambda x: "'%s'" % x, args)), + os.environ["SUDO_USER"]) + return os.spawnvp(os.P_NOWAIT, args[0], args) + + def termcmdstring(self, sh="/bin/sh"): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + return "%s -c %s -- %s" % (constants.VCMD_BIN, self.ctrlchnlname, sh) + + def shcmd(self, cmd, sh="/bin/sh"): + """ + Execute a shell command. + + :param str cmd: command string + :param str sh: shell to run command in + :return: command result + :rtype: int + """ + return self.cmd([sh, "-c", cmd]) + + def shcmd_result(self, cmd, sh="/bin/sh"): + """ + Execute a shell command and return the exist status and combined output. + + :param str cmd: shell command to run + :param str sh: shell to run command in + :return: exist status and combined output + :rtype: tuple[int, str] + """ + return self.cmd_output([sh, "-c", cmd]) + + def getaddr(self, ifname, rescan=False): + """ + Get address for interface on node. + + :param str ifname: interface name to get address for + :param bool rescan: rescan flag + :return: interface information + :rtype: dict + """ + if ifname in self._addr and not rescan: + return self._addr[ifname] + + interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []} + args = [constants.IP_BIN, "addr", "show", "dev", ifname] + p, stdin, stdout, stderr = self.popen(args) + stdin.close() + + for line in stdout: + line = line.strip().split() + if line[0] == "link/ether": + interface["ether"].append(line[1]) + elif line[0] == "inet": + interface["inet"].append(line[1]) + elif line[0] == "inet6": + if line[3] == "global": + interface["inet6"].append(line[1]) + elif line[3] == "link": + interface["inet6link"].append(line[1]) + else: + logging.warn("unknown scope: %s" % line[3]) + + err = stderr.read() + stdout.close() + stderr.close() + status = p.wait() + if status: + logging.warn("nonzero exist status (%s) for cmd: %s", status, args) + if err: + logging.warn("error output: %s", err) + self._addr[ifname] = interface + return interface + + def netifstats(self, ifname=None): + """ + Retrieve network interface state. + + :param str ifname: name of interface to get state for + :return: interface state information + :rtype: dict + """ + stats = {} + args = ["cat", "/proc/net/dev"] + p, stdin, stdout, stderr = self.popen(args) + stdin.close() + # ignore first line + stdout.readline() + # second line has count names + tmp = stdout.readline().strip().split("|") + rxkeys = tmp[1].split() + txkeys = tmp[2].split() + for line in stdout: + line = line.strip().split() + devname, tmp = line[0].split(":") + if tmp: + line.insert(1, tmp) + stats[devname] = {"rx": {}, "tx": {}} + field = 1 + for count in rxkeys: + stats[devname]["rx"][count] = int(line[field]) + field += 1 + for count in txkeys: + stats[devname]["tx"][count] = int(line[field]) + field += 1 + err = stderr.read() + stdout.close() + stderr.close() + status = p.wait() + if status: + logging.warn("nonzero exist status (%s) for cmd: %s", status, args) + if err: + logging.warn("error output: %s", err) + if ifname is not None: + return stats[ifname] + else: + return stats diff --git a/daemon/core/nodes/__init__.py b/daemon/core/nodes/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/nodes/base.py b/daemon/core/nodes/base.py deleted file mode 100644 index e59a89e4..00000000 --- a/daemon/core/nodes/base.py +++ /dev/null @@ -1,968 +0,0 @@ -""" -Defines the base logic for nodes used within core. -""" -import abc -import logging -import shlex -import shutil -import threading -from dataclasses import dataclass, field -from pathlib import Path -from threading import RLock -from typing import TYPE_CHECKING, Optional, Union - -import netaddr - -from core import utils -from core.configservice.dependencies import ConfigServiceDependencies -from core.emulator.data import InterfaceData, LinkOptions -from core.errors import CoreCommandError, CoreError -from core.executables import BASH, MOUNT, TEST, VCMD, VNODED -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.netclient import LinuxNetClient, get_net_client - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.distributed import DistributedServer - from core.emulator.session import Session - from core.configservice.base import ConfigService - from core.services.coreservices import CoreService - - CoreServices = list[Union[CoreService, type[CoreService]]] - ConfigServiceType = type[ConfigService] - -PRIVATE_DIRS: list[Path] = [Path("/var/run"), Path("/var/log")] - - -@dataclass -class Position: - """ - Helper class for Cartesian coordinate position - """ - - x: float = 0.0 - y: float = 0.0 - z: float = 0.0 - lon: float = None - lat: float = None - alt: float = None - - def set(self, x: float = None, y: float = None, z: float = None) -> bool: - """ - Returns True if the position has actually changed. - - :param x: x position - :param y: y position - :param z: z position - :return: True if position changed, False otherwise - """ - if self.x == x and self.y == y and self.z == z: - return False - self.x = x - self.y = y - self.z = z - return True - - def get(self) -> tuple[float, float, float]: - """ - Retrieve x,y,z position. - - :return: x,y,z position tuple - """ - return self.x, self.y, self.z - - def has_geo(self) -> bool: - return all(x is not None for x in [self.lon, self.lat, self.alt]) - - def set_geo(self, lon: float, lat: float, alt: float) -> None: - """ - Set geo position lon, lat, alt. - - :param lon: longitude value - :param lat: latitude value - :param alt: altitude value - :return: nothing - """ - self.lon = lon - self.lat = lat - self.alt = alt - - def get_geo(self) -> tuple[float, float, float]: - """ - Retrieve current geo position lon, lat, alt. - - :return: lon, lat, alt position tuple - """ - return self.lon, self.lat, self.alt - - -@dataclass -class NodeOptions: - """ - Base options for configuring a node. - """ - - canvas: int = None - """id of canvas for display within gui""" - icon: str = None - """custom icon for display, None for default""" - - -@dataclass -class CoreNodeOptions(NodeOptions): - model: str = "PC" - """model is used for providing a default set of services""" - services: list[str] = field(default_factory=list) - """services to start within node""" - config_services: list[str] = field(default_factory=list) - """config services to start within node""" - directory: Path = None - """directory to define node, defaults to path under the session directory""" - legacy: bool = False - """legacy nodes default to standard services""" - - -class NodeBase(abc.ABC): - """ - Base class for CORE nodes (nodes and networks) - """ - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: "DistributedServer" = None, - options: NodeOptions = None, - ) -> None: - """ - Creates a NodeBase instance. - - :param session: CORE session object - :param _id: id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - :param options: options to create node with - """ - self.session: "Session" = session - self.id: int = _id if _id is not None else self.session.next_node_id() - self.name: str = name or f"{self.__class__.__name__}{self.id}" - self.server: "DistributedServer" = server - self.model: Optional[str] = None - self.services: CoreServices = [] - self.ifaces: dict[int, CoreInterface] = {} - self.iface_id: int = 0 - self.position: Position = Position() - self.up: bool = False - self.lock: RLock = RLock() - self.net_client: LinuxNetClient = get_net_client( - self.session.use_ovs(), self.host_cmd - ) - options = options if options else NodeOptions() - self.canvas: Optional[int] = options.canvas - self.icon: Optional[str] = options.icon - - @classmethod - def create_options(cls) -> NodeOptions: - return NodeOptions() - - @abc.abstractmethod - def startup(self) -> None: - """ - Each object implements its own startup method. - - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def shutdown(self) -> None: - """ - Each object implements its own shutdown method. - - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - """ - Adopt an interface, placing within network namespacing for containers - and setting to bridge masters for network like nodes. - - :param iface: interface to adopt - :param name: proper name to use for interface - :return: nothing - """ - raise NotImplementedError - - def host_cmd( - self, - args: str, - env: dict[str, str] = None, - cwd: Path = None, - wait: bool = True, - shell: bool = False, - ) -> str: - """ - Runs a command on the host system or distributed server. - - :param args: command to run - :param env: environment to run command with - :param cwd: directory to run command in - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - if self.server is None: - return utils.cmd(args, env, cwd, wait, shell) - else: - return self.server.remote_cmd(args, env, cwd, wait) - - def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - """ - Runs a command that is in the context of a node, default is to run a standard - host command. - - :param args: command to run - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - return self.host_cmd(args, wait=wait, shell=shell) - - def setposition(self, x: float = None, y: float = None, z: float = None) -> bool: - """ - Set the (x,y,z) position of the object. - - :param x: x position - :param y: y position - :param z: z position - :return: True if position changed, False otherwise - """ - return self.position.set(x=x, y=y, z=z) - - def getposition(self) -> tuple[float, float, float]: - """ - Return an (x,y,z) tuple representing this object's position. - - :return: x,y,z position tuple - """ - return self.position.get() - - def create_iface( - self, iface_data: InterfaceData = None, options: LinkOptions = None - ) -> CoreInterface: - """ - Creates an interface and adopts it to a node. - - :param iface_data: data to create interface with - :param options: options to create interface with - :return: created interface - """ - with self.lock: - if iface_data and iface_data.id is not None: - if iface_data.id in self.ifaces: - raise CoreError( - f"node({self.id}) interface({iface_data.id}) already exists" - ) - iface_id = iface_data.id - else: - iface_id = self.next_iface_id() - mtu = DEFAULT_MTU - if iface_data and iface_data.mtu is not None: - mtu = iface_data.mtu - unique_name = f"{self.id}.{iface_id}.{self.session.short_session_id()}" - name = f"veth{unique_name}" - localname = f"beth{unique_name}" - iface = CoreInterface( - iface_id, - name, - localname, - self.session.use_ovs(), - mtu, - self, - self.server, - ) - if iface_data: - if iface_data.mac: - iface.set_mac(iface_data.mac) - for ip in iface_data.get_ips(): - iface.add_ip(ip) - if iface_data.name: - name = iface_data.name - if options: - iface.options.update(options) - self.ifaces[iface_id] = iface - if self.up: - iface.startup() - self.adopt_iface(iface, name) - else: - iface.name = name - return iface - - def delete_iface(self, iface_id: int) -> CoreInterface: - """ - Delete an interface. - - :param iface_id: interface id to delete - :return: the removed interface - """ - if iface_id not in self.ifaces: - raise CoreError(f"node({self.name}) interface({iface_id}) does not exist") - iface = self.ifaces.pop(iface_id) - logger.info("node(%s) removing interface(%s)", self.name, iface.name) - iface.shutdown() - return iface - - def get_iface(self, iface_id: int) -> CoreInterface: - """ - Retrieve interface based on id. - - :param iface_id: id of interface to retrieve - :return: interface - :raises CoreError: when interface does not exist - """ - if iface_id not in self.ifaces: - raise CoreError(f"node({self.name}) does not have interface({iface_id})") - return self.ifaces[iface_id] - - def get_ifaces(self, control: bool = True) -> list[CoreInterface]: - """ - Retrieve sorted list of interfaces, optionally do not include control - interfaces. - - :param control: False to exclude control interfaces, included otherwise - :return: list of interfaces - """ - ifaces = [] - for iface_id in sorted(self.ifaces): - iface = self.ifaces[iface_id] - if not control and iface.control: - continue - ifaces.append(iface) - return ifaces - - def get_iface_id(self, iface: CoreInterface) -> int: - """ - Retrieve id for an interface. - - :param iface: interface to get id for - :return: interface index if found, -1 otherwise - """ - for iface_id, local_iface in self.ifaces.items(): - if local_iface is iface: - return iface_id - raise CoreError(f"node({self.name}) does not have interface({iface.name})") - - def next_iface_id(self) -> int: - """ - Create a new interface index. - - :return: interface index - """ - while self.iface_id in self.ifaces: - self.iface_id += 1 - iface_id = self.iface_id - self.iface_id += 1 - return iface_id - - -class CoreNodeBase(NodeBase): - """ - Base class for CORE nodes. - """ - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: "DistributedServer" = None, - options: NodeOptions = None, - ) -> None: - """ - Create a CoreNodeBase instance. - - :param session: CORE session object - :param _id: object id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - """ - super().__init__(session, _id, name, server, options) - self.config_services: dict[str, "ConfigService"] = {} - self.directory: Optional[Path] = None - self.tmpnodedir: bool = False - - @abc.abstractmethod - def create_dir(self, dir_path: Path) -> None: - """ - Create a node private directory. - - :param dir_path: path to create - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - """ - Create a node file with a given mode. - - :param file_path: name of file to create - :param contents: contents of file - :param mode: mode for file - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - """ - Copy source file to node host destination, updating the file mode when - provided. - - :param src_path: source file to copy - :param dst_path: node host destination - :param mode: file mode - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def termcmdstring(self, sh: str) -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - raise NotImplementedError - - @abc.abstractmethod - def path_exists(self, path: str) -> bool: - """ - Determines if a file or directory path exists. - - :param path: path to file or directory - :return: True if path exists, False otherwise - """ - raise NotImplementedError - - def host_path(self, path: Path, is_dir: bool = False) -> Path: - """ - Return the name of a node's file on the host filesystem. - - :param path: path to translate to host path - :param is_dir: True if path is a directory path, False otherwise - :return: path to file - """ - if is_dir: - directory = str(path).strip("/").replace("/", ".") - return self.directory / directory - else: - directory = str(path.parent).strip("/").replace("/", ".") - return self.directory / directory / path.name - - def add_config_service(self, service_class: "ConfigServiceType") -> None: - """ - Adds a configuration service to the node. - - :param service_class: configuration service class to assign to node - :return: nothing - """ - name = service_class.name - if name in self.config_services: - raise CoreError(f"node({self.name}) already has service({name})") - self.config_services[name] = service_class(self) - - def set_service_config(self, name: str, data: dict[str, str]) -> None: - """ - Sets configuration service custom config data. - - :param name: name of configuration service - :param data: custom config data to set - :return: nothing - """ - service = self.config_services.get(name) - if service is None: - raise CoreError(f"node({self.name}) does not have service({name})") - service.set_config(data) - - def start_config_services(self) -> None: - """ - Determines startup paths and starts configuration services, based on their - dependency chains. - - :return: nothing - """ - startup_paths = ConfigServiceDependencies(self.config_services).startup_paths() - for startup_path in startup_paths: - for service in startup_path: - service.start() - - def stop_config_services(self) -> None: - """ - Stop all configuration services. - - :return: nothing - """ - for service in self.config_services.values(): - service.stop() - - def makenodedir(self) -> None: - """ - Create the node directory. - - :return: nothing - """ - if self.directory is None: - self.directory = self.session.directory / f"{self.name}.conf" - self.host_cmd(f"mkdir -p {self.directory}") - self.tmpnodedir = True - else: - self.tmpnodedir = False - - def rmnodedir(self) -> None: - """ - Remove the node directory, unless preserve directory has been set. - - :return: nothing - """ - preserve = self.session.options.get_int("preservedir") == 1 - if preserve: - return - if self.tmpnodedir: - self.host_cmd(f"rm -rf {self.directory}") - - def setposition(self, x: float = None, y: float = None, z: float = None) -> None: - """ - Set position. - - :param x: x position - :param y: y position - :param z: z position - :return: nothing - """ - changed = super().setposition(x, y, z) - if changed: - for iface in self.get_ifaces(): - iface.setposition() - - -class CoreNode(CoreNodeBase): - """ - Provides standard core node logic. - """ - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: "DistributedServer" = None, - options: CoreNodeOptions = None, - ) -> None: - """ - Create a CoreNode instance. - - :param session: core session instance - :param _id: object id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - :param options: options to create node with - """ - options = options or CoreNodeOptions() - super().__init__(session, _id, name, server, options) - self.directory: Optional[Path] = options.directory - self.ctrlchnlname: Path = self.session.directory / self.name - self.pid: Optional[int] = None - self._mounts: list[tuple[Path, Path]] = [] - self.node_net_client: LinuxNetClient = self.create_node_net_client( - self.session.use_ovs() - ) - options = options or CoreNodeOptions() - self.model: Optional[str] = options.model - # setup services - if options.legacy or options.services: - logger.debug("set node type: %s", self.model) - self.session.services.add_services(self, self.model, options.services) - # add config services - config_services = options.config_services - if not options.legacy and not config_services and not options.services: - config_services = self.session.services.default_services.get(self.model, []) - logger.info("setting node config services: %s", config_services) - for name in config_services: - service_class = self.session.service_manager.get_service(name) - self.add_config_service(service_class) - - @classmethod - def create_options(cls) -> CoreNodeOptions: - return CoreNodeOptions() - - def create_node_net_client(self, use_ovs: bool) -> LinuxNetClient: - """ - Create node network client for running network commands within the nodes - container. - - :param use_ovs: True for OVS bridges, False for Linux bridges - :return: node network client - """ - return get_net_client(use_ovs, self.cmd) - - def alive(self) -> bool: - """ - Check if the node is alive. - - :return: True if node is alive, False otherwise - """ - try: - self.host_cmd(f"kill -0 {self.pid}") - except CoreCommandError: - return False - return True - - def startup(self) -> None: - """ - Start a new namespace node by invoking the vnoded process that - allocates a new namespace. Bring up the loopback device and set - the hostname. - - :return: nothing - """ - with self.lock: - self.makenodedir() - if self.up: - raise ValueError("starting a node that is already up") - # create a new namespace for this node using vnoded - vnoded = ( - f"{VNODED} -v -c {self.ctrlchnlname} -l {self.ctrlchnlname}.log " - f"-p {self.ctrlchnlname}.pid" - ) - if self.directory: - vnoded += f" -C {self.directory}" - env = self.session.get_environment(state=False) - env["NODE_NUMBER"] = str(self.id) - env["NODE_NAME"] = str(self.name) - output = self.host_cmd(vnoded, env=env) - self.pid = int(output) - logger.debug("node(%s) pid: %s", self.name, self.pid) - # bring up the loopback interface - logger.debug("bringing up loopback interface") - self.node_net_client.device_up("lo") - # set hostname for node - logger.debug("setting hostname: %s", self.name) - self.node_net_client.set_hostname(self.name) - # mark node as up - self.up = True - # create private directories - for dir_path in PRIVATE_DIRS: - self.create_dir(dir_path) - - def shutdown(self) -> None: - """ - Shutdown logic for simple lxc nodes. - - :return: nothing - """ - # nothing to do if node is not up - if not self.up: - return - with self.lock: - try: - # unmount all targets (NOTE: non-persistent mount namespaces are - # removed by the kernel when last referencing process is killed) - self._mounts = [] - # shutdown all interfaces - for iface in self.get_ifaces(): - try: - self.node_net_client.device_flush(iface.name) - except CoreCommandError: - pass - iface.shutdown() - # kill node process if present - try: - self.host_cmd(f"kill -9 {self.pid}") - except CoreCommandError: - logger.exception("error killing process") - # remove node directory if present - try: - self.host_cmd(f"rm -rf {self.ctrlchnlname}") - except CoreCommandError: - logger.exception("error removing node directory") - # clear interface data, close client, and mark self and not up - self.ifaces.clear() - self.up = False - except OSError: - logger.exception("error during shutdown") - finally: - self.rmnodedir() - - def create_cmd(self, args: str, shell: bool = False) -> str: - """ - Create command used to run commands within the context of a node. - - :param args: command arguments - :param shell: True to run shell like, False otherwise - :return: node command - """ - if shell: - args = f"{BASH} -c {shlex.quote(args)}" - return f"{VCMD} -c {self.ctrlchnlname} -- {args}" - - def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - """ - Runs a command that is used to configure and setup the network within a - node. - - :param args: command to run - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - args = self.create_cmd(args, shell) - if self.server is None: - return utils.cmd(args, wait=wait, shell=shell) - else: - return self.server.remote_cmd(args, wait=wait) - - def path_exists(self, path: str) -> bool: - """ - Determines if a file or directory path exists. - - :param path: path to file or directory - :return: True if path exists, False otherwise - """ - try: - self.cmd(f"{TEST} -e {path}") - return True - except CoreCommandError: - return False - - def termcmdstring(self, sh: str = "/bin/sh") -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - terminal = self.create_cmd(sh) - if self.server is None: - return terminal - else: - return f"ssh -X -f {self.server.host} xterm -e {terminal}" - - def create_dir(self, dir_path: Path) -> None: - """ - Create a node private directory. - - :param dir_path: path to create - :return: nothing - """ - if not dir_path.is_absolute(): - raise CoreError(f"private directory path not fully qualified: {dir_path}") - logger.debug("node(%s) creating private directory: %s", self.name, dir_path) - parent_path = self._find_parent_path(dir_path) - if parent_path: - self.host_cmd(f"mkdir -p {parent_path}") - else: - host_path = self.host_path(dir_path, is_dir=True) - self.host_cmd(f"mkdir -p {host_path}") - self.mount(host_path, dir_path) - - def mount(self, src_path: Path, target_path: Path) -> None: - """ - Create and mount a directory. - - :param src_path: source directory to mount - :param target_path: target directory to create - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - logger.debug("node(%s) mounting: %s at %s", self.name, src_path, target_path) - self.cmd(f"mkdir -p {target_path}") - self.cmd(f"{MOUNT} -n --bind {src_path} {target_path}") - self._mounts.append((src_path, target_path)) - - def _find_parent_path(self, path: Path) -> Optional[Path]: - """ - Check if there is a mounted parent directory created for this node. - - :param path: existing parent path to use - :return: exist parent path if exists, None otherwise - """ - logger.debug("looking for existing parent: %s", path) - existing_path = None - for parent in path.parents: - node_path = self.host_path(parent, is_dir=True) - if node_path == self.directory: - break - if self.path_exists(str(node_path)): - relative_path = path.relative_to(parent) - existing_path = node_path / relative_path - break - return existing_path - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - """ - Create file within a node at the given path, using contents and mode. - - :param file_path: desired path for file - :param contents: contents of file - :param mode: mode to create file with - :return: nothing - """ - logger.debug("node(%s) create file(%s) mode(%o)", self.name, file_path, mode) - host_path = self._find_parent_path(file_path) - if host_path: - self.host_cmd(f"mkdir -p {host_path.parent}") - else: - host_path = self.host_path(file_path) - directory = host_path.parent - if self.server is None: - if not directory.exists(): - directory.mkdir(parents=True, mode=0o755) - with host_path.open("w") as f: - f.write(contents) - host_path.chmod(mode) - else: - self.host_cmd(f"mkdir -m {0o755:o} -p {directory}") - self.server.remote_put_temp(host_path, contents) - self.host_cmd(f"chmod {mode:o} {host_path}") - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - """ - Copy source file to node host destination, updating the file mode when - provided. - - :param src_path: source file to copy - :param dst_path: node host destination - :param mode: file mode - :return: nothing - """ - logger.debug( - "node(%s) copying file src(%s) to dst(%s) mode(%o)", - self.name, - src_path, - dst_path, - mode or 0, - ) - host_path = self._find_parent_path(dst_path) - if host_path: - self.host_cmd(f"mkdir -p {host_path.parent}") - else: - host_path = self.host_path(dst_path) - if self.server is None: - shutil.copy2(src_path, host_path) - else: - self.server.remote_put(src_path, host_path) - if mode is not None: - self.host_cmd(f"chmod {mode:o} {host_path}") - - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - """ - Adopt interface to the network namespace of the node and setting - the proper name provided. - - :param iface: interface to adopt - :param name: proper name for interface - :return: nothing - """ - # TODO: container, checksums off (container only?) - # TODO: container, get flow id (container only?) - # validate iface belongs to node and get id - iface_id = self.get_iface_id(iface) - if iface_id == -1: - raise CoreError(f"adopting unknown iface({iface.name})") - # add iface to container namespace - self.net_client.device_ns(iface.name, str(self.pid)) - # use default iface name for container, if a unique name was not provided - if iface.name == name: - name = f"eth{iface_id}" - self.node_net_client.device_name(iface.name, name) - iface.name = name - # turn checksums off - self.node_net_client.checksums_off(iface.name) - # retrieve flow id for container - iface.flow_id = self.node_net_client.get_ifindex(iface.name) - logger.debug("interface flow index: %s - %s", iface.name, iface.flow_id) - # set mac address - if iface.mac: - self.node_net_client.device_mac(iface.name, str(iface.mac)) - logger.debug("interface mac: %s - %s", iface.name, iface.mac) - # set all addresses - for ip in iface.ips(): - # ipv4 check - broadcast = None - if netaddr.valid_ipv4(str(ip.ip)): - broadcast = "+" - self.node_net_client.create_address(iface.name, str(ip), broadcast) - # configure iface options - iface.set_config() - # set iface up - self.node_net_client.device_up(iface.name) - - -class CoreNetworkBase(NodeBase): - """ - Base class for networks - """ - - def __init__( - self, - session: "Session", - _id: int, - name: str, - server: "DistributedServer" = None, - options: NodeOptions = None, - ) -> None: - """ - Create a CoreNetworkBase instance. - - :param session: session object - :param _id: object id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - :param options: options to create node with - """ - super().__init__(session, _id, name, server, options) - mtu = self.session.options.get_int("mtu") - self.mtu: int = mtu if mtu > 0 else DEFAULT_MTU - self.brname: Optional[str] = None - self.linked: dict[CoreInterface, dict[CoreInterface, bool]] = {} - self.linked_lock: threading.Lock = threading.Lock() - - def attach(self, iface: CoreInterface) -> None: - """ - Attach network interface. - - :param iface: network interface to attach - :return: nothing - """ - iface_id = self.next_iface_id() - self.ifaces[iface_id] = iface - iface.net = self - iface.net_id = iface_id - with self.linked_lock: - self.linked[iface] = {} - - def detach(self, iface: CoreInterface) -> None: - """ - Detach network interface. - - :param iface: network interface to detach - :return: nothing - """ - del self.ifaces[iface.net_id] - iface.net = None - iface.net_id = None - with self.linked_lock: - del self.linked[iface] diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py deleted file mode 100644 index ad05c407..00000000 --- a/daemon/core/nodes/docker.py +++ /dev/null @@ -1,296 +0,0 @@ -import json -import logging -import shlex -from dataclasses import dataclass, field -from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING - -from core import utils -from core.emulator.distributed import DistributedServer -from core.errors import CoreCommandError, CoreError -from core.executables import BASH -from core.nodes.base import CoreNode, CoreNodeOptions - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - -DOCKER: str = "docker" - - -@dataclass -class DockerOptions(CoreNodeOptions): - image: str = "ubuntu" - """image used when creating container""" - binds: list[tuple[str, str]] = field(default_factory=list) - """bind mount source and destinations to setup within container""" - volumes: list[tuple[str, str, bool, bool]] = field(default_factory=list) - """ - volume mount source, destination, unique, delete to setup within container - - unique is True for node unique volume naming - delete is True for deleting volume mount during shutdown - """ - - -@dataclass -class DockerVolume: - src: str - """volume mount name""" - dst: str - """volume mount destination directory""" - unique: bool = True - """True to create a node unique prefixed name for this volume""" - delete: bool = True - """True to delete the volume during shutdown""" - path: str = None - """path to the volume on the host""" - - -class DockerNode(CoreNode): - """ - Provides logic for creating a Docker based node. - """ - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: DockerOptions = None, - ) -> None: - """ - Create a DockerNode instance. - - :param session: core session instance - :param _id: node id - :param name: node name - :param server: remote server node - will run on, default is None for localhost - :param options: options for creating node - """ - options = options or DockerOptions() - super().__init__(session, _id, name, server, options) - self.image: str = options.image - self.binds: list[tuple[str, str]] = options.binds - self.volumes: dict[str, DockerVolume] = {} - self.env: dict[str, str] = {} - for src, dst, unique, delete in options.volumes: - src_name = self._unique_name(src) if unique else src - self.volumes[src] = DockerVolume(src_name, dst, unique, delete) - - @classmethod - def create_options(cls) -> DockerOptions: - """ - Return default creation options, which can be used during node creation. - - :return: docker options - """ - return DockerOptions() - - def create_cmd(self, args: str, shell: bool = False) -> str: - """ - Create command used to run commands within the context of a node. - - :param args: command arguments - :param shell: True to run shell like, False otherwise - :return: node command - """ - if shell: - args = f"{BASH} -c {shlex.quote(args)}" - return f"nsenter -t {self.pid} -m -u -i -p -n -- {args}" - - def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - """ - Runs a command that is used to configure and setup the network within a - node. - - :param args: command to run - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - args = self.create_cmd(args, shell) - if self.server is None: - return utils.cmd(args, wait=wait, shell=shell, env=self.env) - else: - return self.server.remote_cmd(args, wait=wait, env=self.env) - - def _unique_name(self, name: str) -> str: - """ - Creates a session/node unique prefixed name for the provided input. - - :param name: name to make unique - :return: unique session/node prefixed name - """ - return f"{self.session.id}.{self.id}.{name}" - - def alive(self) -> bool: - """ - Check if the node is alive. - - :return: True if node is alive, False otherwise - """ - try: - running = self.host_cmd( - f"{DOCKER} inspect -f '{{{{.State.Running}}}}' {self.name}" - ) - return json.loads(running) - except CoreCommandError: - return False - - def startup(self) -> None: - """ - Create a docker container instance for the specified image. - - :return: nothing - """ - with self.lock: - if self.up: - raise CoreError(f"starting node({self.name}) that is already up") - # create node directory - self.makenodedir() - # setup commands for creating bind/volume mounts - binds = "" - for src, dst in self.binds: - binds += f"--mount type=bind,source={src},target={dst} " - volumes = "" - for volume in self.volumes.values(): - volumes += ( - f"--mount type=volume," f"source={volume.src},target={volume.dst} " - ) - # normalize hostname - hostname = self.name.replace("_", "-") - # create container and retrieve the created containers PID - self.host_cmd( - f"{DOCKER} run -td --init --net=none --hostname {hostname} " - f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " - f"{binds} {volumes} " - f"--privileged {self.image} tail -f /dev/null" - ) - # retrieve pid and process environment for use in nsenter commands - self.pid = self.host_cmd( - f"{DOCKER} inspect -f '{{{{.State.Pid}}}}' {self.name}" - ) - output = self.host_cmd(f"cat /proc/{self.pid}/environ") - for line in output.split("\x00"): - if not line: - continue - key, value = line.split("=") - self.env[key] = value - # setup symlinks for bind and volume mounts within - for src, dst in self.binds: - link_path = self.host_path(Path(dst), True) - self.host_cmd(f"ln -s {src} {link_path}") - for volume in self.volumes.values(): - volume.path = self.host_cmd( - f"{DOCKER} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" - ) - link_path = self.host_path(Path(volume.dst), True) - self.host_cmd(f"ln -s {volume.path} {link_path}") - logger.debug("node(%s) pid: %s", self.name, self.pid) - self.up = True - - def shutdown(self) -> None: - """ - Shutdown logic. - - :return: nothing - """ - # nothing to do if node is not up - if not self.up: - return - with self.lock: - self.ifaces.clear() - self.host_cmd(f"{DOCKER} rm -f {self.name}") - for volume in self.volumes.values(): - if volume.delete: - self.host_cmd(f"{DOCKER} volume rm {volume.src}") - self.up = False - - def termcmdstring(self, sh: str = "/bin/sh") -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - terminal = f"{DOCKER} exec -it {self.name} {sh}" - if self.server is None: - return terminal - else: - return f"ssh -X -f {self.server.host} xterm -e {terminal}" - - def create_dir(self, dir_path: Path) -> None: - """ - Create a private directory. - - :param dir_path: path to create - :return: nothing - """ - logger.debug("creating node dir: %s", dir_path) - self.cmd(f"mkdir -p {dir_path}") - - def mount(self, src_path: str, target_path: str) -> None: - """ - Create and mount a directory. - - :param src_path: source directory to mount - :param target_path: target directory to create - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - logger.debug("mounting source(%s) target(%s)", src_path, target_path) - raise Exception("not supported") - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - """ - Create a node file with a given mode. - - :param file_path: name of file to create - :param contents: contents of file - :param mode: mode for file - :return: nothing - """ - logger.debug("node(%s) create file(%s) mode(%o)", self.name, file_path, mode) - temp = NamedTemporaryFile(delete=False) - temp.write(contents.encode()) - temp.close() - temp_path = Path(temp.name) - directory = file_path.parent - if str(directory) != ".": - self.cmd(f"mkdir -m {0o755:o} -p {directory}") - if self.server is not None: - self.server.remote_put(temp_path, temp_path) - self.host_cmd(f"{DOCKER} cp {temp_path} {self.name}:{file_path}") - self.cmd(f"chmod {mode:o} {file_path}") - if self.server is not None: - self.host_cmd(f"rm -f {temp_path}") - temp_path.unlink() - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - """ - Copy a file to a node, following symlinks and preserving metadata. - Change file mode if specified. - - :param dst_path: file name to copy file to - :param src_path: file to copy - :param mode: mode to copy to - :return: nothing - """ - logger.info( - "node file copy file(%s) source(%s) mode(%o)", dst_path, src_path, mode or 0 - ) - self.cmd(f"mkdir -p {dst_path.parent}") - if self.server: - temp = NamedTemporaryFile(delete=False) - temp_path = Path(temp.name) - src_path = temp_path - self.server.remote_put(src_path, temp_path) - self.host_cmd(f"{DOCKER} cp {src_path} {self.name}:{dst_path}") - if mode is not None: - self.cmd(f"chmod {mode:o} {dst_path}") diff --git a/daemon/core/nodes/interface.py b/daemon/core/nodes/interface.py deleted file mode 100644 index 294e85f9..00000000 --- a/daemon/core/nodes/interface.py +++ /dev/null @@ -1,412 +0,0 @@ -""" -virtual ethernet classes that implement the interfaces available under Linux. -""" - -import logging -import math -from pathlib import Path -from typing import TYPE_CHECKING, Callable, Optional - -import netaddr - -from core import utils -from core.emulator.data import InterfaceData, LinkOptions -from core.emulator.enumerations import TransportType -from core.errors import CoreCommandError, CoreError -from core.executables import TC -from core.nodes.netclient import LinuxNetClient, get_net_client - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - from core.emulator.distributed import DistributedServer - from core.nodes.base import CoreNetworkBase, CoreNode, NodeBase - -DEFAULT_MTU: int = 1500 -IFACE_NAME_LENGTH: int = 15 - - -def tc_clear_cmd(name: str) -> str: - """ - Create tc command to clear device configuration. - - :param name: name of device to clear - :return: tc command - """ - return f"{TC} qdisc delete dev {name} root handle 10:" - - -def tc_cmd(name: str, options: LinkOptions, mtu: int) -> str: - """ - Create tc command to configure a device with given name and options. - - :param name: name of device to configure - :param options: options to configure with - :param mtu: mtu for configuration - :return: tc command - """ - netem = "" - if options.bandwidth is not None: - limit = 1000 - bw = options.bandwidth / 1000 - if options.buffer is not None and options.buffer > 0: - limit = options.buffer - elif options.delay and options.bandwidth: - delay = options.delay / 1000 - limit = max(2, math.ceil((2 * bw * delay) / (8 * mtu))) - netem += f" rate {bw}kbit" - netem += f" limit {limit}" - if options.delay is not None: - netem += f" delay {options.delay}us" - if options.jitter is not None: - if options.delay is None: - netem += f" delay 0us {options.jitter}us 25%" - else: - netem += f" {options.jitter}us 25%" - if options.loss is not None and options.loss > 0: - netem += f" loss {min(options.loss, 100)}%" - if options.dup is not None and options.dup > 0: - netem += f" duplicate {min(options.dup, 100)}%" - return f"{TC} qdisc replace dev {name} root handle 10: netem {netem}" - - -class CoreInterface: - """ - Base class for network interfaces. - """ - - def __init__( - self, - _id: int, - name: str, - localname: str, - use_ovs: bool, - mtu: int = DEFAULT_MTU, - node: "NodeBase" = None, - server: "DistributedServer" = None, - ) -> None: - """ - Creates a CoreInterface instance. - - :param _id: interface id for associated node - :param name: interface name - :param localname: interface local name - :param use_ovs: True to use ovs, False otherwise - :param mtu: mtu value - :param node: node associated with this interface - :param server: remote server node will run on, default is None for localhost - """ - if len(name) >= IFACE_NAME_LENGTH: - raise CoreError( - f"interface name ({name}) too long, max {IFACE_NAME_LENGTH}" - ) - if len(localname) >= IFACE_NAME_LENGTH: - raise CoreError( - f"interface local name ({localname}) too long, max {IFACE_NAME_LENGTH}" - ) - self.id: int = _id - self.node: Optional["NodeBase"] = node - # id of interface for network, used by wlan/emane - self.net_id: Optional[int] = None - self.name: str = name - self.localname: str = localname - self.up: bool = False - self.mtu: int = mtu - self.net: Optional[CoreNetworkBase] = None - self.ip4s: list[netaddr.IPNetwork] = [] - self.ip6s: list[netaddr.IPNetwork] = [] - self.mac: Optional[netaddr.EUI] = None - # placeholder position hook - self.poshook: Callable[[CoreInterface], None] = lambda x: None - # used with EMANE - self.transport_type: TransportType = TransportType.VIRTUAL - # id used to find flow data - self.flow_id: Optional[int] = None - self.server: Optional["DistributedServer"] = server - self.net_client: LinuxNetClient = get_net_client(use_ovs, self.host_cmd) - self.control: bool = False - # configuration data - self.has_netem: bool = False - self.options: LinkOptions = LinkOptions() - - def host_cmd( - self, - args: str, - env: dict[str, str] = None, - cwd: Path = None, - wait: bool = True, - shell: bool = False, - ) -> str: - """ - Runs a command on the host system or distributed server. - - :param args: command to run - :param env: environment to run command with - :param cwd: directory to run command in - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - if self.server is None: - return utils.cmd(args, env, cwd, wait, shell) - else: - return self.server.remote_cmd(args, env, cwd, wait) - - def startup(self) -> None: - """ - Startup method for the interface. - - :return: nothing - """ - self.net_client.create_veth(self.localname, self.name) - if self.mtu > 0: - self.net_client.set_mtu(self.name, self.mtu) - self.net_client.set_mtu(self.localname, self.mtu) - self.net_client.device_up(self.name) - self.net_client.device_up(self.localname) - self.up = True - - def shutdown(self) -> None: - """ - Shutdown method for the interface. - - :return: nothing - """ - if not self.up: - return - if self.localname: - try: - self.net_client.delete_device(self.localname) - except CoreCommandError: - pass - self.up = False - - def add_ip(self, ip: str) -> None: - """ - Add ip address in the format "10.0.0.1/24". - - :param ip: ip address to add - :return: nothing - :raises CoreError: when ip address provided is invalid - """ - try: - ip = netaddr.IPNetwork(ip) - address = str(ip.ip) - if netaddr.valid_ipv4(address): - self.ip4s.append(ip) - else: - self.ip6s.append(ip) - except netaddr.AddrFormatError as e: - raise CoreError(f"adding invalid address {ip}: {e}") - - def remove_ip(self, ip: str) -> None: - """ - Remove ip address in the format "10.0.0.1/24". - - :param ip: ip address to delete - :return: nothing - :raises CoreError: when ip address provided is invalid - """ - try: - ip = netaddr.IPNetwork(ip) - address = str(ip.ip) - if netaddr.valid_ipv4(address): - self.ip4s.remove(ip) - else: - self.ip6s.remove(ip) - except (netaddr.AddrFormatError, ValueError) as e: - raise CoreError(f"deleting invalid address {ip}: {e}") - - def get_ip4(self) -> Optional[netaddr.IPNetwork]: - """ - Looks for the first ip4 address. - - :return: ip4 address, None otherwise - """ - return next(iter(self.ip4s), None) - - def get_ip6(self) -> Optional[netaddr.IPNetwork]: - """ - Looks for the first ip6 address. - - :return: ip6 address, None otherwise - """ - return next(iter(self.ip6s), None) - - def ips(self) -> list[netaddr.IPNetwork]: - """ - Retrieve a list of all ip4 and ip6 addresses combined. - - :return: ip4 and ip6 addresses - """ - return self.ip4s + self.ip6s - - def set_mac(self, mac: Optional[str]) -> None: - """ - Set mac address. - - :param mac: mac address to set, None for random mac - :return: nothing - :raises CoreError: when there is an invalid mac address - """ - if mac is None: - self.mac = mac - else: - try: - self.mac = netaddr.EUI(mac, dialect=netaddr.mac_unix_expanded) - except netaddr.AddrFormatError as e: - raise CoreError(f"invalid mac address({mac}): {e}") - - def setposition(self) -> None: - """ - Dispatch position hook handler when possible. - - :return: nothing - """ - if self.poshook and self.node: - self.poshook(self) - - def __lt__(self, other: "CoreInterface") -> bool: - """ - Used for comparisons of this object. - - :param other: other interface - :return: true if less than, false otherwise - """ - return id(self) < id(other) - - def is_raw(self) -> bool: - """ - Used to determine if this interface is considered a raw interface. - - :return: True if raw interface, False otherwise - """ - return self.transport_type == TransportType.RAW - - def is_virtual(self) -> bool: - """ - Used to determine if this interface is considered a virtual interface. - - :return: True if virtual interface, False otherwise - """ - return self.transport_type == TransportType.VIRTUAL - - def set_config(self) -> None: - # clear current settings - if self.options.is_clear(): - if self.has_netem: - cmd = tc_clear_cmd(self.name) - if self.node: - self.node.cmd(cmd) - else: - self.host_cmd(cmd) - self.has_netem = False - # set updated settings - else: - cmd = tc_cmd(self.name, self.options, self.mtu) - if self.node: - self.node.cmd(cmd) - else: - self.host_cmd(cmd) - self.has_netem = True - - def get_data(self) -> InterfaceData: - """ - Retrieve the data representation of this interface. - - :return: interface data - """ - ip4 = self.get_ip4() - ip4_addr = str(ip4.ip) if ip4 else None - ip4_mask = ip4.prefixlen if ip4 else None - ip6 = self.get_ip6() - ip6_addr = str(ip6.ip) if ip6 else None - ip6_mask = ip6.prefixlen if ip6 else None - mac = str(self.mac) if self.mac else None - return InterfaceData( - id=self.id, - name=self.name, - mac=mac, - ip4=ip4_addr, - ip4_mask=ip4_mask, - ip6=ip6_addr, - ip6_mask=ip6_mask, - ) - - -class GreTap(CoreInterface): - """ - GRE TAP device for tunneling between emulation servers. - Uses the "gretap" tunnel device type from Linux which is a GRE device - having a MAC address. The MAC address is required for bridging. - """ - - def __init__( - self, - session: "Session", - remoteip: str, - key: int = None, - node: "CoreNode" = None, - mtu: int = DEFAULT_MTU, - _id: int = None, - localip: str = None, - ttl: int = 255, - server: "DistributedServer" = None, - ) -> None: - """ - Creates a GreTap instance. - - :param session: session for this gre tap - :param remoteip: remote address - :param key: gre tap key - :param node: related core node - :param mtu: interface mtu - :param _id: object id - :param localip: local address - :param ttl: ttl value - :param server: remote server node - will run on, default is None for localhost - :raises CoreCommandError: when there is a command exception - """ - if _id is None: - _id = ((id(self) >> 16) ^ (id(self) & 0xFFFF)) & 0xFFFF - self.id: int = _id - sessionid = session.short_session_id() - localname = f"gt.{self.id}.{sessionid}" - name = f"{localname}p" - super().__init__(0, name, localname, session.use_ovs(), mtu, node, server) - self.transport_type: TransportType = TransportType.RAW - self.remote_ip: str = remoteip - self.ttl: int = ttl - self.key: Optional[int] = key - self.local_ip: Optional[str] = localip - - def startup(self) -> None: - """ - Startup logic for a GreTap. - - :return: nothing - """ - self.net_client.create_gretap( - self.localname, self.remote_ip, self.local_ip, self.ttl, self.key - ) - if self.mtu > 0: - self.net_client.set_mtu(self.localname, self.mtu) - self.net_client.device_up(self.localname) - self.up = True - - def shutdown(self) -> None: - """ - Shutdown logic for a GreTap. - - :return: nothing - """ - if self.localname: - try: - self.net_client.device_down(self.localname) - self.net_client.delete_device(self.localname) - except CoreCommandError: - logger.exception("error during shutdown") - self.localname = None diff --git a/daemon/core/nodes/lxd.py b/daemon/core/nodes/lxd.py deleted file mode 100644 index e4cba002..00000000 --- a/daemon/core/nodes/lxd.py +++ /dev/null @@ -1,221 +0,0 @@ -import json -import logging -import shlex -import time -from dataclasses import dataclass, field -from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING - -from core.emulator.data import InterfaceData, LinkOptions -from core.emulator.distributed import DistributedServer -from core.errors import CoreCommandError -from core.executables import BASH -from core.nodes.base import CoreNode, CoreNodeOptions -from core.nodes.interface import CoreInterface - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - - -@dataclass -class LxcOptions(CoreNodeOptions): - image: str = "ubuntu" - """image used when creating container""" - binds: list[tuple[str, str]] = field(default_factory=list) - """bind mount source and destinations to setup within container""" - volumes: list[tuple[str, str, bool, bool]] = field(default_factory=list) - """ - volume mount source, destination, unique, delete to setup within container - - unique is True for node unique volume naming - delete is True for deleting volume mount during shutdown - """ - - -class LxcNode(CoreNode): - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: LxcOptions = None, - ) -> None: - """ - Create a LxcNode instance. - - :param session: core session instance - :param _id: object id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - :param options: option to create node with - """ - options = options or LxcOptions() - super().__init__(session, _id, name, server, options) - self.image: str = options.image - - @classmethod - def create_options(cls) -> LxcOptions: - return LxcOptions() - - def create_cmd(self, args: str, shell: bool = False) -> str: - """ - Create command used to run commands within the context of a node. - - :param args: command arguments - :param shell: True to run shell like, False otherwise - :return: node command - """ - if shell: - args = f"{BASH} -c {shlex.quote(args)}" - return f"nsenter -t {self.pid} -m -u -i -p -n {args}" - - def _get_info(self) -> dict: - args = f"lxc list {self.name} --format json" - output = self.host_cmd(args) - data = json.loads(output) - if not data: - raise CoreCommandError(1, args, f"LXC({self.name}) not present") - return data[0] - - def alive(self) -> bool: - """ - Check if the node is alive. - - :return: True if node is alive, False otherwise - """ - try: - data = self._get_info() - return data["state"]["status"] == "Running" - except CoreCommandError: - return False - - def startup(self) -> None: - """ - Startup logic. - - :return: nothing - """ - with self.lock: - if self.up: - raise ValueError("starting a node that is already up") - self.makenodedir() - self.host_cmd(f"lxc launch {self.image} {self.name}") - data = self._get_info() - self.pid = data["state"]["pid"] - self.up = True - - def shutdown(self) -> None: - """ - Shutdown logic. - - :return: nothing - """ - # nothing to do if node is not up - if not self.up: - return - with self.lock: - self.ifaces.clear() - self.host_cmd(f"lxc delete --force {self.name}") - self.up = False - - def termcmdstring(self, sh: str = "/bin/sh") -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - terminal = f"lxc exec {self.name} -- {sh}" - if self.server is None: - return terminal - else: - return f"ssh -X -f {self.server.host} xterm -e {terminal}" - - def create_dir(self, dir_path: Path) -> None: - """ - Create a private directory. - - :param dir_path: path to create - :return: nothing - """ - logger.info("creating node dir: %s", dir_path) - args = f"mkdir -p {dir_path}" - self.cmd(args) - - def mount(self, src_path: Path, target_path: Path) -> None: - """ - Create and mount a directory. - - :param src_path: source directory to mount - :param target_path: target directory to create - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - logger.debug("mounting source(%s) target(%s)", src_path, target_path) - raise Exception("not supported") - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - """ - Create a node file with a given mode. - - :param file_path: name of file to create - :param contents: contents of file - :param mode: mode for file - :return: nothing - """ - logger.debug("node(%s) create file(%s) mode(%o)", self.name, file_path, mode) - temp = NamedTemporaryFile(delete=False) - temp.write(contents.encode()) - temp.close() - temp_path = Path(temp.name) - directory = file_path.parent - if str(directory) != ".": - self.cmd(f"mkdir -m {0o755:o} -p {directory}") - if self.server is not None: - self.server.remote_put(temp_path, temp_path) - if not str(file_path).startswith("/"): - file_path = Path("/root/") / file_path - self.host_cmd(f"lxc file push {temp_path} {self.name}/{file_path}") - self.cmd(f"chmod {mode:o} {file_path}") - if self.server is not None: - self.host_cmd(f"rm -f {temp_path}") - temp_path.unlink() - logger.debug("node(%s) added file: %s; mode: 0%o", self.name, file_path, mode) - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - """ - Copy a file to a node, following symlinks and preserving metadata. - Change file mode if specified. - - :param dst_path: file name to copy file to - :param src_path: file to copy - :param mode: mode to copy to - :return: nothing - """ - logger.info( - "node file copy file(%s) source(%s) mode(%o)", dst_path, src_path, mode or 0 - ) - self.cmd(f"mkdir -p {dst_path.parent}") - if self.server: - temp = NamedTemporaryFile(delete=False) - temp_path = Path(temp.name) - src_path = temp_path - self.server.remote_put(src_path, temp_path) - if not str(dst_path).startswith("/"): - dst_path = Path("/root/") / dst_path - self.host_cmd(f"lxc file push {src_path} {self.name}/{dst_path}") - if mode is not None: - self.cmd(f"chmod {mode:o} {dst_path}") - - def create_iface( - self, iface_data: InterfaceData = None, options: LinkOptions = None - ) -> CoreInterface: - iface = super().create_iface(iface_data, options) - # adding small delay to allow time for adding addresses to work correctly - time.sleep(0.5) - return iface diff --git a/daemon/core/nodes/netclient.py b/daemon/core/nodes/netclient.py deleted file mode 100644 index 74087e31..00000000 --- a/daemon/core/nodes/netclient.py +++ /dev/null @@ -1,399 +0,0 @@ -""" -Clients for dealing with bridge/interface commands. -""" -from typing import Callable - -import netaddr - -from core import utils -from core.executables import ETHTOOL, IP, OVS_VSCTL, SYSCTL, TC - - -class LinuxNetClient: - """ - Client for creating Linux bridges and ip interfaces for nodes. - """ - - def __init__(self, run: Callable[..., str]) -> None: - """ - Create LinuxNetClient instance. - - :param run: function to run commands with - """ - self.run: Callable[..., str] = run - - def set_hostname(self, name: str) -> None: - """ - Set network hostname. - - :param name: name for hostname - :return: nothing - """ - name = name.replace("_", "-") - self.run(f"hostname {name}") - - def create_route(self, route: str, device: str) -> None: - """ - Create a new route for a device. - - :param route: route to create - :param device: device to add route to - :return: nothing - """ - self.run(f"{IP} route replace {route} dev {device}") - - def device_up(self, device: str) -> None: - """ - Bring a device up. - - :param device: device to bring up - :return: nothing - """ - self.run(f"{IP} link set {device} up") - - def device_down(self, device: str) -> None: - """ - Bring a device down. - - :param device: device to bring down - :return: nothing - """ - self.run(f"{IP} link set {device} down") - - def device_name(self, device: str, name: str) -> None: - """ - Set a device name. - - :param device: device to set name for - :param name: name to set - :return: nothing - """ - self.run(f"{IP} link set {device} name {name}") - - def device_show(self, device: str) -> str: - """ - Show link information for a device. - - :param device: device to get information for - :return: device information - """ - return self.run(f"{IP} link show {device}") - - def address_show(self, device: str) -> str: - """ - Show address information for a device. - - :param device: device name - :return: address information - """ - return self.run(f"{IP} address show {device}") - - def get_mac(self, device: str) -> str: - """ - Retrieve MAC address for a given device. - - :param device: device to get mac for - :return: MAC address - """ - return self.run(f"cat /sys/class/net/{device}/address") - - def get_ifindex(self, device: str) -> int: - """ - Retrieve ifindex for a given device. - - :param device: device to get ifindex for - :return: ifindex - """ - return int(self.run(f"cat /sys/class/net/{device}/ifindex")) - - def device_ns(self, device: str, namespace: str) -> None: - """ - Set netns for a device. - - :param device: device to setns for - :param namespace: namespace to set device to - :return: nothing - """ - self.run(f"{IP} link set {device} netns {namespace}") - - def device_flush(self, device: str) -> None: - """ - Flush device addresses. - - :param device: device to flush - :return: nothing - """ - self.run(f"{IP} address flush dev {device}") - - def device_mac(self, device: str, mac: str) -> None: - """ - Set MAC address for a device. - - :param device: device to set mac for - :param mac: mac to set - :return: nothing - """ - self.run(f"{IP} link set dev {device} address {mac}") - - def delete_device(self, device: str) -> None: - """ - Delete device. - - :param device: device to delete - :return: nothing - """ - self.run(f"{IP} link delete {device}") - - def delete_tc(self, device: str) -> None: - """ - Remove traffic control settings for a device. - - :param device: device to remove tc - :return: nothing - """ - self.run(f"{TC} qdisc delete dev {device} root") - - def checksums_off(self, iface_name: str) -> None: - """ - Turns interface checksums off. - - :param iface_name: interface to update - :return: nothing - """ - self.run(f"{ETHTOOL} -K {iface_name} rx off tx off") - - def create_address(self, device: str, address: str, broadcast: str = None) -> None: - """ - Create address for a device. - - :param device: device to add address to - :param address: address to add - :param broadcast: broadcast address to use, default is None - :return: nothing - """ - if broadcast is not None: - self.run(f"{IP} address add {address} broadcast {broadcast} dev {device}") - else: - self.run(f"{IP} address add {address} dev {device}") - if netaddr.valid_ipv6(address.split("/")[0]): - # IPv6 addresses are removed by default on interface down. - # Make sure that the IPv6 address we add is not removed - device = utils.sysctl_devname(device) - self.run(f"{SYSCTL} -w net.ipv6.conf.{device}.keep_addr_on_down=1") - - def delete_address(self, device: str, address: str) -> None: - """ - Delete an address from a device. - - :param device: targeted device - :param address: address to remove - :return: nothing - """ - self.run(f"{IP} address delete {address} dev {device}") - - def create_veth(self, name: str, peer: str) -> None: - """ - Create a veth pair. - - :param name: veth name - :param peer: peer name - :return: nothing - """ - self.run(f"{IP} link add name {name} type veth peer name {peer}") - - def create_gretap( - self, device: str, address: str, local: str, ttl: int, key: int - ) -> None: - """ - Create a GRE tap on a device. - - :param device: device to add tap to - :param address: address to add tap for - :param local: local address to tie to - :param ttl: time to live value - :param key: key for tap - :return: nothing - """ - cmd = f"{IP} link add {device} type gretap remote {address}" - if local is not None: - cmd += f" local {local}" - if ttl is not None: - cmd += f" ttl {ttl}" - if key is not None: - cmd += f" key {key}" - self.run(cmd) - - def create_bridge(self, name: str) -> None: - """ - Create a Linux bridge and bring it up. - - :param name: bridge name - :return: nothing - """ - self.run(f"{IP} link add name {name} type bridge") - self.run(f"{IP} link set {name} type bridge stp_state 0") - self.run(f"{IP} link set {name} type bridge forward_delay 0") - self.run(f"{IP} link set {name} type bridge mcast_snooping 0") - self.run(f"{IP} link set {name} type bridge group_fwd_mask 65528") - self.device_up(name) - - def delete_bridge(self, name: str) -> None: - """ - Bring down and delete a Linux bridge. - - :param name: bridge name - :return: nothing - """ - self.device_down(name) - self.run(f"{IP} link delete {name} type bridge") - - def set_iface_master(self, bridge_name: str, iface_name: str) -> None: - """ - Assign interface master to a Linux bridge. - - :param bridge_name: bridge name - :param iface_name: interface name - :return: nothing - """ - self.run(f"{IP} link set dev {iface_name} master {bridge_name}") - self.device_up(iface_name) - - def delete_iface(self, bridge_name: str, iface_name: str) -> None: - """ - Delete an interface associated with a Linux bridge. - - :param bridge_name: bridge name - :param iface_name: interface name - :return: nothing - """ - self.run(f"{IP} link set dev {iface_name} nomaster") - - def existing_bridges(self, _id: int) -> bool: - """ - Checks if there are any existing Linux bridges for a node. - - :param _id: node id to check bridges for - :return: True if there are existing bridges, False otherwise - """ - output = self.run(f"{IP} -o link show type bridge") - lines = output.split("\n") - for line in lines: - values = line.split(":") - if not len(values) >= 2: - continue - name = values[1] - fields = name.split(".") - if len(fields) != 3: - continue - if fields[0] == "b" and fields[1] == _id: - return True - return False - - def set_mac_learning(self, name: str, value: int) -> None: - """ - Set mac learning for a Linux bridge. - - :param name: bridge name - :param value: ageing time value - :return: nothing - """ - self.run(f"{IP} link set {name} type bridge ageing_time {value}") - - def set_mtu(self, name: str, value: int) -> None: - """ - Sets the mtu value for a device. - - :param name: name of device to set value for - :param value: mtu value to set - :return: nothing - """ - self.run(f"{IP} link set {name} mtu {value}") - - -class OvsNetClient(LinuxNetClient): - """ - Client for creating OVS bridges and ip interfaces for nodes. - """ - - def create_bridge(self, name: str) -> None: - """ - Create a OVS bridge and bring it up. - - :param name: bridge name - :return: nothing - """ - self.run(f"{OVS_VSCTL} add-br {name}") - self.run(f"{OVS_VSCTL} set bridge {name} stp_enable=false") - self.run(f"{OVS_VSCTL} set bridge {name} other_config:stp-max-age=6") - self.run(f"{OVS_VSCTL} set bridge {name} other_config:stp-forward-delay=4") - self.device_up(name) - - def delete_bridge(self, name: str) -> None: - """ - Bring down and delete a OVS bridge. - - :param name: bridge name - :return: nothing - """ - self.device_down(name) - self.run(f"{OVS_VSCTL} del-br {name}") - - def set_iface_master(self, bridge_name: str, iface_name: str) -> None: - """ - Create an interface associated with a network bridge. - - :param bridge_name: bridge name - :param iface_name: interface name - :return: nothing - """ - self.run(f"{OVS_VSCTL} add-port {bridge_name} {iface_name}") - self.device_up(iface_name) - - def delete_iface(self, bridge_name: str, iface_name: str) -> None: - """ - Delete an interface associated with a OVS bridge. - - :param bridge_name: bridge name - :param iface_name: interface name - :return: nothing - """ - self.run(f"{OVS_VSCTL} del-port {bridge_name} {iface_name}") - - def existing_bridges(self, _id: int) -> bool: - """ - Checks if there are any existing OVS bridges for a node. - - :param _id: node id to check bridges for - :return: True if there are existing bridges, False otherwise - """ - output = self.run(f"{OVS_VSCTL} list-br") - if output: - for line in output.split("\n"): - fields = line.split(".") - if fields[0] == "b" and fields[1] == _id: - return True - return False - - def set_mac_learning(self, name: str, value: int) -> None: - """ - Set mac learning for an OVS bridge. - - :param name: bridge name - :param value: ageing time value - :return: nothing - """ - self.run(f"{OVS_VSCTL} set bridge {name} other_config:mac-aging-time={value}") - - -def get_net_client(use_ovs: bool, run: Callable[..., str]) -> LinuxNetClient: - """ - Retrieve desired net client for running network commands. - - :param use_ovs: True for OVS bridges, False for Linux bridges - :param run: function used to run net client commands - :return: net client class - """ - if use_ovs: - return OvsNetClient(run) - else: - return LinuxNetClient(run) diff --git a/daemon/core/nodes/network.py b/daemon/core/nodes/network.py deleted file mode 100644 index 1ea9c31e..00000000 --- a/daemon/core/nodes/network.py +++ /dev/null @@ -1,789 +0,0 @@ -""" -Defines network nodes used within core. -""" - -import logging -import threading -from dataclasses import dataclass -from pathlib import Path -from typing import TYPE_CHECKING, Optional - -import netaddr - -from core import utils -from core.emulator.data import InterfaceData, LinkData -from core.emulator.enumerations import MessageFlags, NetworkPolicy, RegisterTlvs -from core.errors import CoreCommandError, CoreError -from core.executables import NFTABLES -from core.nodes.base import CoreNetworkBase, NodeOptions -from core.nodes.interface import CoreInterface, GreTap -from core.nodes.netclient import get_net_client - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.distributed import DistributedServer - from core.emulator.session import Session - from core.location.mobility import WirelessModel, WayPointMobility - -LEARNING_DISABLED: int = 0 - - -class NftablesQueue: - """ - Helper class for queuing up nftables commands into rate-limited - atomic commits. This improves performance and reliability when there are - many WLAN link updates. - """ - - # update rate is every 300ms - rate: float = 0.3 - atomic_file: str = "/tmp/pycore.nftables.atomic" - chain: str = "forward" - - def __init__(self) -> None: - """ - Initialize the helper class, but don't start the update thread - until a WLAN is instantiated. - """ - self.running: bool = False - self.run_thread: Optional[threading.Thread] = None - # this lock protects cmds and updates lists - self.lock: threading.Lock = threading.Lock() - # list of pending nftables commands - self.cmds: list[str] = [] - # list of WLANs requiring update - self.updates: utils.SetQueue = utils.SetQueue() - - def start(self) -> None: - """ - Start thread to listen for updates for the provided network. - - :return: nothing - """ - with self.lock: - if not self.running: - self.running = True - self.run_thread = threading.Thread(target=self.run, daemon=True) - self.run_thread.start() - - def stop(self) -> None: - """ - Stop updates for network, when no networks remain, stop update thread. - - :return: nothing - """ - with self.lock: - if self.running: - self.running = False - self.updates.put(None) - self.run_thread.join() - self.run_thread = None - - def run(self) -> None: - """ - Thread target that looks for networks needing update, and - rate limits the amount of nftables activity. Only one userspace program - should use nftables at any given time, or results can be unpredictable. - - :return: nothing - """ - while self.running: - net = self.updates.get() - if net is None: - break - self.build_cmds(net) - self.commit(net) - - def commit(self, net: "CoreNetwork") -> None: - """ - Commit changes to nftables for the provided network. - - :param net: network to commit nftables changes - :return: nothing - """ - if not self.cmds: - return - # write out nft commands to file - for cmd in self.cmds: - net.host_cmd(f"echo {cmd} >> {self.atomic_file}", shell=True) - # read file as atomic change - net.host_cmd(f"{NFTABLES} -f {self.atomic_file}") - # remove file - net.host_cmd(f"rm -f {self.atomic_file}") - self.cmds.clear() - - def update(self, net: "CoreNetwork") -> None: - """ - Flag this network has an update, so the nftables chain will be rebuilt. - - :param net: wlan network - :return: nothing - """ - self.updates.put(net) - - def delete_table(self, net: "CoreNetwork") -> None: - """ - Delete nftable bridge rule table. - - :param net: network to delete table for - :return: nothing - """ - with self.lock: - net.host_cmd(f"{NFTABLES} delete table bridge {net.brname}") - - def build_cmds(self, net: "CoreNetwork") -> None: - """ - Inspect linked nodes for a network, and rebuild the nftables chain commands. - - :param net: network to build commands for - :return: nothing - """ - with net.linked_lock: - if net.has_nftables_chain: - self.cmds.append(f"flush table bridge {net.brname}") - else: - net.has_nftables_chain = True - policy = net.policy.value.lower() - self.cmds.append(f"add table bridge {net.brname}") - self.cmds.append( - f"add chain bridge {net.brname} {self.chain} {{type filter hook " - f"forward priority -1\\; policy {policy}\\;}}" - ) - # add default rule to accept all traffic not for this bridge - self.cmds.append( - f"add rule bridge {net.brname} {self.chain} " - f"ibriport != {net.brname} accept" - ) - # rebuild the chain - for iface1, v in net.linked.items(): - for iface2, linked in v.items(): - policy = None - if net.policy == NetworkPolicy.DROP and linked: - policy = "accept" - elif net.policy == NetworkPolicy.ACCEPT and not linked: - policy = "drop" - if policy: - self.cmds.append( - f"add rule bridge {net.brname} {self.chain} " - f"iif {iface1.localname} oif {iface2.localname} " - f"{policy}" - ) - self.cmds.append( - f"add rule bridge {net.brname} {self.chain} " - f"oif {iface1.localname} iif {iface2.localname} " - f"{policy}" - ) - - -# a global object because all networks share the same queue -# cannot have multiple threads invoking the nftables commnd -nft_queue: NftablesQueue = NftablesQueue() - - -@dataclass -class NetworkOptions(NodeOptions): - policy: NetworkPolicy = None - """allows overriding the network policy, otherwise uses class defined default""" - - -class CoreNetwork(CoreNetworkBase): - """ - Provides linux bridge network functionality for core nodes. - """ - - policy: NetworkPolicy = NetworkPolicy.DROP - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: "DistributedServer" = None, - options: NetworkOptions = None, - ) -> None: - """ - Creates a CoreNetwork instance. - - :param session: core session instance - :param _id: object id - :param name: object name - :param server: remote server node - will run on, default is None for localhost - :param options: options to create node with - """ - options = options or NetworkOptions() - super().__init__(session, _id, name, server, options) - self.policy: NetworkPolicy = options.policy if options.policy else self.policy - sessionid = self.session.short_session_id() - self.brname: str = f"b.{self.id}.{sessionid}" - self.has_nftables_chain: bool = False - - @classmethod - def create_options(cls) -> NetworkOptions: - return NetworkOptions() - - def host_cmd( - self, - args: str, - env: dict[str, str] = None, - cwd: Path = None, - wait: bool = True, - shell: bool = False, - ) -> str: - """ - Runs a command that is used to configure and setup the network on the host - system and all configured distributed servers. - - :param args: command to run - :param env: environment to run command with - :param cwd: directory to run command in - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - logger.debug("network node(%s) cmd", self.name) - output = utils.cmd(args, env, cwd, wait, shell) - self.session.distributed.execute(lambda x: x.remote_cmd(args, env, cwd, wait)) - return output - - def startup(self) -> None: - """ - Linux bridge startup logic. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - self.net_client.create_bridge(self.brname) - if self.mtu > 0: - self.net_client.set_mtu(self.brname, self.mtu) - self.has_nftables_chain = False - self.up = True - nft_queue.start() - - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - """ - Adopt interface and set it to use this bridge as master. - - :param iface: interface to adpopt - :param name: formal name for interface - :return: nothing - """ - iface.net_client.set_iface_master(self.brname, iface.name) - iface.set_config() - - def shutdown(self) -> None: - """ - Linux bridge shutdown logic. - - :return: nothing - """ - if not self.up: - return - nft_queue.stop() - try: - self.net_client.delete_bridge(self.brname) - if self.has_nftables_chain: - nft_queue.delete_table(self) - except CoreCommandError: - logging.exception("error during shutdown") - # removes veth pairs used for bridge-to-bridge connections - for iface in self.get_ifaces(): - iface.shutdown() - self.ifaces.clear() - self.linked.clear() - self.up = False - - def attach(self, iface: CoreInterface) -> None: - """ - Attach a network interface. - - :param iface: network interface to attach - :return: nothing - """ - super().attach(iface) - if self.up: - iface.net_client.set_iface_master(self.brname, iface.localname) - - def detach(self, iface: CoreInterface) -> None: - """ - Detach a network interface. - - :param iface: network interface to detach - :return: nothing - """ - super().detach(iface) - if self.up: - iface.net_client.delete_iface(self.brname, iface.localname) - - def is_linked(self, iface1: CoreInterface, iface2: CoreInterface) -> bool: - """ - Determine if the provided network interfaces are linked. - - :param iface1: interface one - :param iface2: interface two - :return: True if interfaces are linked, False otherwise - """ - # check if the network interfaces are attached to this network - if self.ifaces[iface1.net_id] != iface1: - raise ValueError(f"inconsistency for interface {iface1.name}") - if self.ifaces[iface2.net_id] != iface2: - raise ValueError(f"inconsistency for interface {iface2.name}") - try: - linked = self.linked[iface1][iface2] - except KeyError: - if self.policy == NetworkPolicy.ACCEPT: - linked = True - elif self.policy == NetworkPolicy.DROP: - linked = False - else: - raise Exception(f"unknown policy: {self.policy.value}") - self.linked[iface1][iface2] = linked - return linked - - def unlink(self, iface1: CoreInterface, iface2: CoreInterface) -> None: - """ - Unlink two interfaces, resulting in adding or removing filtering rules. - - :param iface1: interface one - :param iface2: interface two - :return: nothing - """ - with self.linked_lock: - if not self.is_linked(iface1, iface2): - return - self.linked[iface1][iface2] = False - nft_queue.update(self) - - def link(self, iface1: CoreInterface, iface2: CoreInterface) -> None: - """ - Link two interfaces together, resulting in adding or removing - filtering rules. - - :param iface1: interface one - :param iface2: interface two - :return: nothing - """ - with self.linked_lock: - if self.is_linked(iface1, iface2): - return - self.linked[iface1][iface2] = True - nft_queue.update(self) - - -class GreTapBridge(CoreNetwork): - """ - A network consisting of a bridge with a gretap device for tunneling to - another system. - """ - - def __init__( - self, - session: "Session", - remoteip: str = None, - _id: int = None, - name: str = None, - policy: NetworkPolicy = NetworkPolicy.ACCEPT, - localip: str = None, - ttl: int = 255, - key: int = None, - server: "DistributedServer" = None, - ) -> None: - """ - Create a GreTapBridge instance. - - :param session: core session instance - :param remoteip: remote address - :param _id: object id - :param name: object name - :param policy: network policy - :param localip: local address - :param ttl: ttl value - :param key: gre tap key - :param server: remote server node - will run on, default is None for localhost - """ - CoreNetwork.__init__(self, session, _id, name, server, policy) - if key is None: - key = self.session.id ^ self.id - self.grekey: int = key - self.localnum: Optional[int] = None - self.remotenum: Optional[int] = None - self.remoteip: Optional[str] = remoteip - self.localip: Optional[str] = localip - self.ttl: int = ttl - self.gretap: Optional[GreTap] = None - if self.remoteip is not None: - self.gretap = GreTap( - session, - remoteip, - key=self.grekey, - node=self, - localip=localip, - ttl=ttl, - mtu=self.mtu, - ) - - def startup(self) -> None: - """ - Creates a bridge and adds the gretap device to it. - - :return: nothing - """ - super().startup() - if self.gretap: - self.gretap.startup() - self.attach(self.gretap) - - def shutdown(self) -> None: - """ - Detach the gretap device and remove the bridge. - - :return: nothing - """ - if self.gretap: - self.detach(self.gretap) - self.gretap.shutdown() - self.gretap = None - super().shutdown() - - def add_ips(self, ips: list[str]) -> None: - """ - Set the remote tunnel endpoint. This is a one-time method for - creating the GreTap device, which requires the remoteip at startup. - The 1st address in the provided list is remoteip, 2nd optionally - specifies localip. - - :param ips: address list - :return: nothing - """ - if self.gretap: - raise CoreError(f"gretap already exists for {self.name}") - remoteip = ips[0].split("/")[0] - localip = None - if len(ips) > 1: - localip = ips[1].split("/")[0] - self.gretap = GreTap( - self.session, - remoteip, - key=self.grekey, - localip=localip, - ttl=self.ttl, - mtu=self.mtu, - ) - self.startup() - self.attach(self.gretap) - - def setkey(self, key: int, iface_data: InterfaceData) -> None: - """ - Set the GRE key used for the GreTap device. This needs to be set - prior to instantiating the GreTap device (before addrconfig). - - :param key: gre key - :param iface_data: interface data for setting up tunnel key - :return: nothing - """ - self.grekey = key - ips = iface_data.get_ips() - if ips: - self.add_ips(ips) - - -@dataclass -class CtrlNetOptions(NetworkOptions): - prefix: str = None - """ip4 network prefix to use for generating an address""" - updown_script: str = None - """script to execute during startup and shutdown""" - serverintf: str = None - """used to associate an interface with the control network bridge""" - assign_address: bool = True - """used to determine if a specific address should be assign using hostid""" - hostid: int = None - """used with assign address to """ - - -class CtrlNet(CoreNetwork): - """ - Control network functionality. - """ - - policy: NetworkPolicy = NetworkPolicy.ACCEPT - # base control interface index - CTRLIF_IDX_BASE: int = 99 - DEFAULT_PREFIX_LIST: list[str] = [ - "172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24", - "172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24", - "172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24", - "172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24", - ] - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: "DistributedServer" = None, - options: CtrlNetOptions = None, - ) -> None: - """ - Creates a CtrlNet instance. - - :param session: core session instance - :param _id: node id - :param name: node name - :param server: remote server node - will run on, default is None for localhost - :param options: node options for creation - """ - options = options or CtrlNetOptions() - super().__init__(session, _id, name, server, options) - self.prefix: netaddr.IPNetwork = netaddr.IPNetwork(options.prefix).cidr - self.hostid: Optional[int] = options.hostid - self.assign_address: bool = options.assign_address - self.updown_script: Optional[str] = options.updown_script - self.serverintf: Optional[str] = options.serverintf - - @classmethod - def create_options(cls) -> CtrlNetOptions: - return CtrlNetOptions() - - def add_addresses(self, index: int) -> None: - """ - Add addresses used for created control networks, - - :param index: starting address index - :return: nothing - """ - use_ovs = self.session.use_ovs() - address = self.prefix[index] - current = f"{address}/{self.prefix.prefixlen}" - net_client = get_net_client(use_ovs, utils.cmd) - net_client.create_address(self.brname, current) - servers = self.session.distributed.servers - for name in servers: - server = servers[name] - index -= 1 - address = self.prefix[index] - current = f"{address}/{self.prefix.prefixlen}" - net_client = get_net_client(use_ovs, server.remote_cmd) - net_client.create_address(self.brname, current) - - def startup(self) -> None: - """ - Startup functionality for the control network. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - if self.net_client.existing_bridges(self.id): - raise CoreError(f"old bridges exist for node: {self.id}") - - super().startup() - logger.info("added control network bridge: %s %s", self.brname, self.prefix) - - if self.hostid and self.assign_address: - self.add_addresses(self.hostid) - elif self.assign_address: - self.add_addresses(-2) - - if self.updown_script: - logger.info( - "interface %s updown script (%s startup) called", - self.brname, - self.updown_script, - ) - self.host_cmd(f"{self.updown_script} {self.brname} startup") - - if self.serverintf: - self.net_client.set_iface_master(self.brname, self.serverintf) - - def shutdown(self) -> None: - """ - Control network shutdown. - - :return: nothing - """ - if self.serverintf is not None: - try: - self.net_client.delete_iface(self.brname, self.serverintf) - except CoreCommandError: - logger.exception( - "error deleting server interface %s from bridge %s", - self.serverintf, - self.brname, - ) - - if self.updown_script is not None: - try: - logger.info( - "interface %s updown script (%s shutdown) called", - self.brname, - self.updown_script, - ) - self.host_cmd(f"{self.updown_script} {self.brname} shutdown") - except CoreCommandError: - logger.exception("error issuing shutdown script shutdown") - - super().shutdown() - - -class PtpNet(CoreNetwork): - """ - Peer to peer network node. - """ - - policy: NetworkPolicy = NetworkPolicy.ACCEPT - - def attach(self, iface: CoreInterface) -> None: - """ - Attach a network interface, but limit attachment to two interfaces. - - :param iface: network interface - :return: nothing - """ - if len(self.ifaces) >= 2: - raise CoreError("ptp links support at most 2 network interfaces") - super().attach(iface) - - def startup(self) -> None: - """ - Startup for a p2p node, that disables mac learning after normal startup. - - :return: nothing - """ - super().startup() - self.net_client.set_mac_learning(self.brname, LEARNING_DISABLED) - - -class SwitchNode(CoreNetwork): - """ - Provides switch functionality within a core node. - """ - - policy: NetworkPolicy = NetworkPolicy.ACCEPT - - -class HubNode(CoreNetwork): - """ - Provides hub functionality within a core node, forwards packets to all bridge - ports by turning off MAC address learning. - """ - - policy: NetworkPolicy = NetworkPolicy.ACCEPT - - def startup(self) -> None: - """ - Startup for a hub node, that disables mac learning after normal startup. - - :return: nothing - """ - super().startup() - self.net_client.set_mac_learning(self.brname, LEARNING_DISABLED) - - -class WlanNode(CoreNetwork): - """ - Provides wireless lan functionality within a core node. - """ - - policy: NetworkPolicy = NetworkPolicy.DROP - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: "DistributedServer" = None, - options: NetworkOptions = None, - ) -> None: - """ - Create a WlanNode instance. - - :param session: core session instance - :param _id: node id - :param name: node name - :param server: remote server node - will run on, default is None for localhost - :param options: options to create node with - """ - super().__init__(session, _id, name, server, options) - # wireless and mobility models (BasicRangeModel, Ns2WaypointMobility) - self.wireless_model: Optional[WirelessModel] = None - self.mobility: Optional[WayPointMobility] = None - - def startup(self) -> None: - """ - Startup for a wlan node, that disables mac learning after normal startup. - - :return: nothing - """ - super().startup() - nft_queue.update(self) - - def attach(self, iface: CoreInterface) -> None: - """ - Attach a network interface. - - :param iface: network interface - :return: nothing - """ - super().attach(iface) - if self.wireless_model: - iface.poshook = self.wireless_model.position_callback - iface.setposition() - - def setmodel(self, wireless_model: type["WirelessModel"], config: dict[str, str]): - """ - Sets the mobility and wireless model. - - :param wireless_model: wireless model to set to - :param config: configuration for model being set - :return: nothing - """ - logger.debug("node(%s) setting model: %s", self.name, wireless_model.name) - if wireless_model.config_type == RegisterTlvs.WIRELESS: - self.wireless_model = wireless_model(session=self.session, _id=self.id) - for iface in self.get_ifaces(): - iface.poshook = self.wireless_model.position_callback - iface.setposition() - self.updatemodel(config) - elif wireless_model.config_type == RegisterTlvs.MOBILITY: - self.mobility = wireless_model(session=self.session, _id=self.id) - self.mobility.update_config(config) - - def update_mobility(self, config: dict[str, str]) -> None: - if not self.mobility: - raise CoreError(f"no mobility set to update for node({self.name})") - self.mobility.update_config(config) - - def updatemodel(self, config: dict[str, str]) -> None: - if not self.wireless_model: - raise CoreError(f"no model set to update for node({self.name})") - logger.debug( - "node(%s) updating model(%s): %s", self.id, self.wireless_model.name, config - ) - self.wireless_model.update_config(config) - for iface in self.get_ifaces(): - iface.setposition() - - def links(self, flags: MessageFlags = MessageFlags.NONE) -> list[LinkData]: - """ - Retrieve all link data. - - :param flags: message flags - :return: list of link data - """ - if self.wireless_model: - return self.wireless_model.links(flags) - else: - return [] - - -class TunnelNode(GreTapBridge): - """ - Provides tunnel functionality in a core node. - """ - - policy: NetworkPolicy = NetworkPolicy.ACCEPT diff --git a/daemon/core/nodes/physical.py b/daemon/core/nodes/physical.py deleted file mode 100644 index 30640fd8..00000000 --- a/daemon/core/nodes/physical.py +++ /dev/null @@ -1,289 +0,0 @@ -""" -PhysicalNode class for including real systems in the emulated network. -""" - -import logging -from pathlib import Path -from typing import TYPE_CHECKING, Optional - -import netaddr - -from core.emulator.data import InterfaceData, LinkOptions -from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import TransportType -from core.errors import CoreCommandError, CoreError -from core.executables import BASH, TEST, UMOUNT -from core.nodes.base import CoreNode, CoreNodeBase, CoreNodeOptions, NodeOptions -from core.nodes.interface import CoreInterface - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - - -class Rj45Node(CoreNodeBase): - """ - RJ45Node is a physical interface on the host linked to the emulated - network. - """ - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: NodeOptions = None, - ) -> None: - """ - Create an RJ45Node instance. - - :param session: core session instance - :param _id: node id - :param name: node name - :param server: remote server node - will run on, default is None for localhost - :param options: option to create node with - """ - super().__init__(session, _id, name, server, options) - self.iface: CoreInterface = CoreInterface( - self.iface_id, name, name, session.use_ovs(), node=self, server=server - ) - self.iface.transport_type = TransportType.RAW - self.old_up: bool = False - self.old_addrs: list[tuple[str, Optional[str]]] = [] - - def startup(self) -> None: - """ - Set the interface in the up state. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - # interface will also be marked up during net.attach() - self.save_state() - self.net_client.device_up(self.iface.localname) - self.up = True - - def shutdown(self) -> None: - """ - Bring the interface down. Remove any addresses and queuing - disciplines. - - :return: nothing - """ - if not self.up: - return - localname = self.iface.localname - self.net_client.device_down(localname) - self.net_client.device_flush(localname) - try: - self.net_client.delete_tc(localname) - except CoreCommandError: - pass - self.up = False - self.restore_state() - - def path_exists(self, path: str) -> bool: - """ - Determines if a file or directory path exists. - - :param path: path to file or directory - :return: True if path exists, False otherwise - """ - try: - self.host_cmd(f"{TEST} -e {path}") - return True - except CoreCommandError: - return False - - def create_iface( - self, iface_data: InterfaceData = None, options: LinkOptions = None - ) -> CoreInterface: - with self.lock: - if self.iface.id in self.ifaces: - raise CoreError( - f"rj45({self.name}) nodes support at most 1 network interface" - ) - if iface_data and iface_data.mtu is not None: - self.iface.mtu = iface_data.mtu - self.iface.ip4s.clear() - self.iface.ip6s.clear() - for ip in iface_data.get_ips(): - self.iface.add_ip(ip) - self.ifaces[self.iface.id] = self.iface - if self.up: - for ip in self.iface.ips(): - self.net_client.create_address(self.iface.name, str(ip)) - return self.iface - - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - raise CoreError(f"rj45({self.name}) does not support adopt interface") - - def delete_iface(self, iface_id: int) -> None: - """ - Delete a network interface. - - :param iface_id: interface index to delete - :return: nothing - """ - self.get_iface(iface_id) - self.ifaces.pop(iface_id) - self.shutdown() - - def get_iface(self, iface_id: int) -> CoreInterface: - if iface_id not in self.ifaces: - raise CoreError(f"node({self.name}) interface({iface_id}) does not exist") - return self.iface - - def get_iface_id(self, iface: CoreInterface) -> Optional[int]: - """ - Retrieve network interface index. - - :param iface: network interface to retrieve - index for - :return: interface index, None otherwise - """ - if iface is not self.iface: - raise CoreError(f"node({self.name}) does not have interface({iface.name})") - return self.iface.id - - def save_state(self) -> None: - """ - Save the addresses and other interface state before using the - interface for emulation purposes. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - # TODO: save/restore the PROMISC flag - self.old_up = False - self.old_addrs: list[tuple[str, Optional[str]]] = [] - localname = self.iface.localname - output = self.net_client.address_show(localname) - for line in output.split("\n"): - items = line.split() - if len(items) < 2: - continue - if items[1] == f"{localname}:": - flags = items[2][1:-1].split(",") - if "UP" in flags: - self.old_up = True - elif items[0] == "inet": - broadcast = None - if items[2] == "brd": - broadcast = items[3] - self.old_addrs.append((items[1], broadcast)) - elif items[0] == "inet6": - if items[1][:4] == "fe80": - continue - self.old_addrs.append((items[1], None)) - logger.info("saved rj45 state: addrs(%s) up(%s)", self.old_addrs, self.old_up) - - def restore_state(self) -> None: - """ - Restore the addresses and other interface state after using it. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - localname = self.iface.localname - logger.info("restoring rj45 state: %s", localname) - for addr in self.old_addrs: - self.net_client.create_address(localname, addr[0], addr[1]) - if self.old_up: - self.net_client.device_up(localname) - - def setposition(self, x: float = None, y: float = None, z: float = None) -> None: - """ - Uses setposition from both parent classes. - - :param x: x position - :param y: y position - :param z: z position - :return: True if position changed, False otherwise - """ - super().setposition(x, y, z) - self.iface.setposition() - - def termcmdstring(self, sh: str) -> str: - raise CoreError("rj45 does not support terminal commands") - - def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - raise CoreError("rj45 does not support cmds") - - def create_dir(self, dir_path: Path) -> None: - raise CoreError("rj45 does not support creating directories") - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - raise CoreError("rj45 does not support creating files") - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - raise CoreError("rj45 does not support copying files") - - -class PhysicalNode(CoreNode): - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: CoreNodeOptions = None, - ) -> None: - if not self.server: - raise CoreError("physical nodes must be assigned to a remote server") - super().__init__(session, _id, name, server, options) - - def startup(self) -> None: - with self.lock: - self.makenodedir() - self.up = True - - def shutdown(self) -> None: - if not self.up: - return - with self.lock: - while self._mounts: - _, target_path = self._mounts.pop(-1) - self.umount(target_path) - for iface in self.get_ifaces(): - iface.shutdown() - self.rmnodedir() - - def create_cmd(self, args: str, shell: bool = False) -> str: - if shell: - args = f'{BASH} -c "{args}"' - return args - - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - # validate iface belongs to node and get id - iface_id = self.get_iface_id(iface) - if iface_id == -1: - raise CoreError(f"adopting unknown iface({iface.name})") - # turn checksums off - self.node_net_client.checksums_off(iface.name) - # retrieve flow id for container - iface.flow_id = self.node_net_client.get_ifindex(iface.name) - logger.debug("interface flow index: %s - %s", iface.name, iface.flow_id) - if iface.mac: - self.net_client.device_mac(iface.name, str(iface.mac)) - # set all addresses - for ip in iface.ips(): - # ipv4 check - broadcast = None - if netaddr.valid_ipv4(ip): - broadcast = "+" - self.node_net_client.create_address(iface.name, str(ip), broadcast) - # configure iface options - iface.set_config() - # set iface up - self.net_client.device_up(iface.name) - - def umount(self, target_path: Path) -> None: - logger.info("unmounting '%s'", target_path) - try: - self.host_cmd(f"{UMOUNT} -l {target_path}", cwd=self.directory) - except CoreCommandError: - logger.exception("unmounting failed for %s", target_path) diff --git a/daemon/core/nodes/podman.py b/daemon/core/nodes/podman.py deleted file mode 100644 index 00ef24fc..00000000 --- a/daemon/core/nodes/podman.py +++ /dev/null @@ -1,271 +0,0 @@ -import json -import logging -import shlex -from dataclasses import dataclass, field -from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING - -from core.emulator.distributed import DistributedServer -from core.errors import CoreCommandError, CoreError -from core.executables import BASH -from core.nodes.base import CoreNode, CoreNodeOptions - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - -PODMAN: str = "podman" - - -@dataclass -class PodmanOptions(CoreNodeOptions): - image: str = "ubuntu" - """image used when creating container""" - binds: list[tuple[str, str]] = field(default_factory=list) - """bind mount source and destinations to setup within container""" - volumes: list[tuple[str, str, bool, bool]] = field(default_factory=list) - """ - volume mount source, destination, unique, delete to setup within container - - unique is True for node unique volume naming - delete is True for deleting volume mount during shutdown - """ - - -@dataclass -class VolumeMount: - src: str - """volume mount name""" - dst: str - """volume mount destination directory""" - unique: bool = True - """True to create a node unique prefixed name for this volume""" - delete: bool = True - """True to delete the volume during shutdown""" - path: str = None - """path to the volume on the host""" - - -class PodmanNode(CoreNode): - """ - Provides logic for creating a Podman based node. - """ - - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - server: DistributedServer = None, - options: PodmanOptions = None, - ) -> None: - """ - Create a PodmanNode instance. - - :param session: core session instance - :param _id: node id - :param name: node name - :param server: remote server node - will run on, default is None for localhost - :param options: options for creating node - """ - options = options or PodmanOptions() - super().__init__(session, _id, name, server, options) - self.image: str = options.image - self.binds: list[tuple[str, str]] = options.binds - self.volumes: dict[str, VolumeMount] = {} - for src, dst, unique, delete in options.volumes: - src_name = self._unique_name(src) if unique else src - self.volumes[src] = VolumeMount(src_name, dst, unique, delete) - - @classmethod - def create_options(cls) -> PodmanOptions: - """ - Return default creation options, which can be used during node creation. - - :return: podman options - """ - return PodmanOptions() - - def create_cmd(self, args: str, shell: bool = False) -> str: - """ - Create command used to run commands within the context of a node. - - :param args: command arguments - :param shell: True to run shell like, False otherwise - :return: node command - """ - if shell: - args = f"{BASH} -c {shlex.quote(args)}" - return f"{PODMAN} exec {self.name} {args}" - - def _unique_name(self, name: str) -> str: - """ - Creates a session/node unique prefixed name for the provided input. - - :param name: name to make unique - :return: unique session/node prefixed name - """ - return f"{self.session.id}.{self.id}.{name}" - - def alive(self) -> bool: - """ - Check if the node is alive. - - :return: True if node is alive, False otherwise - """ - try: - running = self.host_cmd( - f"{PODMAN} inspect -f '{{{{.State.Running}}}}' {self.name}" - ) - return json.loads(running) - except CoreCommandError: - return False - - def startup(self) -> None: - """ - Create a podman container instance for the specified image. - - :return: nothing - """ - with self.lock: - if self.up: - raise CoreError(f"starting node({self.name}) that is already up") - # create node directory - self.makenodedir() - # setup commands for creating bind/volume mounts - binds = "" - for src, dst in self.binds: - binds += f"--mount type=bind,source={src},target={dst} " - volumes = "" - for volume in self.volumes.values(): - volumes += ( - f"--mount type=volume," f"source={volume.src},target={volume.dst} " - ) - # normalize hostname - hostname = self.name.replace("_", "-") - # create container and retrieve the created containers PID - self.host_cmd( - f"{PODMAN} run -td --init --net=none --hostname {hostname} " - f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " - f"{binds} {volumes} " - f"--privileged {self.image} tail -f /dev/null" - ) - # retrieve pid and process environment for use in nsenter commands - self.pid = self.host_cmd( - f"{PODMAN} inspect -f '{{{{.State.Pid}}}}' {self.name}" - ) - # setup symlinks for bind and volume mounts within - for src, dst in self.binds: - link_path = self.host_path(Path(dst), True) - self.host_cmd(f"ln -s {src} {link_path}") - for volume in self.volumes.values(): - volume.path = self.host_cmd( - f"{PODMAN} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" - ) - link_path = self.host_path(Path(volume.dst), True) - self.host_cmd(f"ln -s {volume.path} {link_path}") - logger.debug("node(%s) pid: %s", self.name, self.pid) - self.up = True - - def shutdown(self) -> None: - """ - Shutdown logic. - - :return: nothing - """ - # nothing to do if node is not up - if not self.up: - return - with self.lock: - self.ifaces.clear() - self.host_cmd(f"{PODMAN} rm -f {self.name}") - for volume in self.volumes.values(): - if volume.delete: - self.host_cmd(f"{PODMAN} volume rm {volume.src}") - self.up = False - - def termcmdstring(self, sh: str = "/bin/sh") -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - terminal = f"{PODMAN} exec -it {self.name} {sh}" - if self.server is None: - return terminal - else: - return f"ssh -X -f {self.server.host} xterm -e {terminal}" - - def create_dir(self, dir_path: Path) -> None: - """ - Create a private directory. - - :param dir_path: path to create - :return: nothing - """ - logger.debug("creating node dir: %s", dir_path) - self.cmd(f"mkdir -p {dir_path}") - - def mount(self, src_path: str, target_path: str) -> None: - """ - Create and mount a directory. - - :param src_path: source directory to mount - :param target_path: target directory to create - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - logger.debug("mounting source(%s) target(%s)", src_path, target_path) - raise Exception("not supported") - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - """ - Create a node file with a given mode. - - :param file_path: name of file to create - :param contents: contents of file - :param mode: mode for file - :return: nothing - """ - logger.debug("node(%s) create file(%s) mode(%o)", self.name, file_path, mode) - temp = NamedTemporaryFile(delete=False) - temp.write(contents.encode()) - temp.close() - temp_path = Path(temp.name) - directory = file_path.parent - if str(directory) != ".": - self.cmd(f"mkdir -m {0o755:o} -p {directory}") - if self.server is not None: - self.server.remote_put(temp_path, temp_path) - self.host_cmd(f"{PODMAN} cp {temp_path} {self.name}:{file_path}") - self.cmd(f"chmod {mode:o} {file_path}") - if self.server is not None: - self.host_cmd(f"rm -f {temp_path}") - temp_path.unlink() - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - """ - Copy a file to a node, following symlinks and preserving metadata. - Change file mode if specified. - - :param dst_path: file name to copy file to - :param src_path: file to copy - :param mode: mode to copy to - :return: nothing - """ - logger.info( - "node file copy file(%s) source(%s) mode(%o)", dst_path, src_path, mode or 0 - ) - self.cmd(f"mkdir -p {dst_path.parent}") - if self.server: - temp = NamedTemporaryFile(delete=False) - temp_path = Path(temp.name) - src_path = temp_path - self.server.remote_put(src_path, temp_path) - self.host_cmd(f"{PODMAN} cp {src_path} {self.name}:{dst_path}") - if mode is not None: - self.cmd(f"chmod {mode:o} {dst_path}") diff --git a/daemon/core/nodes/wireless.py b/daemon/core/nodes/wireless.py deleted file mode 100644 index 51a98917..00000000 --- a/daemon/core/nodes/wireless.py +++ /dev/null @@ -1,345 +0,0 @@ -""" -Defines a wireless node that allows programmatic link connectivity and -configuration between pairs of nodes. -""" -import copy -import logging -import math -import secrets -from dataclasses import dataclass -from typing import TYPE_CHECKING - -from core.config import ConfigBool, ConfigFloat, ConfigInt, Configuration -from core.emulator.data import LinkData, LinkOptions -from core.emulator.enumerations import LinkTypes, MessageFlags -from core.errors import CoreError -from core.executables import NFTABLES -from core.nodes.base import CoreNetworkBase, NodeOptions -from core.nodes.interface import CoreInterface - -if TYPE_CHECKING: - from core.emulator.session import Session - from core.emulator.distributed import DistributedServer - -logger = logging.getLogger(__name__) -CONFIG_ENABLED: bool = True -CONFIG_RANGE: float = 400.0 -CONFIG_LOSS_RANGE: float = 300.0 -CONFIG_LOSS_FACTOR: float = 1.0 -CONFIG_LOSS: float = 0.0 -CONFIG_DELAY: int = 5000 -CONFIG_BANDWIDTH: int = 54_000_000 -CONFIG_JITTER: int = 0 -KEY_ENABLED: str = "movement" -KEY_RANGE: str = "max-range" -KEY_BANDWIDTH: str = "bandwidth" -KEY_DELAY: str = "delay" -KEY_JITTER: str = "jitter" -KEY_LOSS_RANGE: str = "loss-range" -KEY_LOSS_FACTOR: str = "loss-factor" -KEY_LOSS: str = "loss" - - -def calc_distance( - point1: tuple[float, float, float], point2: tuple[float, float, float] -) -> float: - a = point1[0] - point2[0] - b = point1[1] - point2[1] - c = 0 - if point1[2] is not None and point2[2] is not None: - c = point1[2] - point2[2] - return math.hypot(math.hypot(a, b), c) - - -def get_key(node1_id: int, node2_id: int) -> tuple[int, int]: - return (node1_id, node2_id) if node1_id < node2_id else (node2_id, node1_id) - - -@dataclass -class WirelessLink: - bridge1: str - bridge2: str - iface: CoreInterface - linked: bool - label: str = None - - -class WirelessNode(CoreNetworkBase): - options: list[Configuration] = [ - ConfigBool( - id=KEY_ENABLED, default="1" if CONFIG_ENABLED else "0", label="Enabled?" - ), - ConfigFloat( - id=KEY_RANGE, default=str(CONFIG_RANGE), label="Max Range (pixels)" - ), - ConfigInt( - id=KEY_BANDWIDTH, default=str(CONFIG_BANDWIDTH), label="Bandwidth (bps)" - ), - ConfigInt(id=KEY_DELAY, default=str(CONFIG_DELAY), label="Delay (usec)"), - ConfigInt(id=KEY_JITTER, default=str(CONFIG_JITTER), label="Jitter (usec)"), - ConfigFloat( - id=KEY_LOSS_RANGE, - default=str(CONFIG_LOSS_RANGE), - label="Loss Start Range (pixels)", - ), - ConfigFloat( - id=KEY_LOSS_FACTOR, default=str(CONFIG_LOSS_FACTOR), label="Loss Factor" - ), - ConfigFloat(id=KEY_LOSS, default=str(CONFIG_LOSS), label="Loss Initial"), - ] - devices: set[str] = set() - - @classmethod - def add_device(cls) -> str: - while True: - name = f"we{secrets.token_hex(6)}" - if name not in cls.devices: - cls.devices.add(name) - break - return name - - @classmethod - def delete_device(cls, name: str) -> None: - cls.devices.discard(name) - - def __init__( - self, - session: "Session", - _id: int, - name: str, - server: "DistributedServer" = None, - options: NodeOptions = None, - ): - super().__init__(session, _id, name, server, options) - self.bridges: dict[int, tuple[CoreInterface, str]] = {} - self.links: dict[tuple[int, int], WirelessLink] = {} - self.position_enabled: bool = CONFIG_ENABLED - self.bandwidth: int = CONFIG_BANDWIDTH - self.delay: int = CONFIG_DELAY - self.jitter: int = CONFIG_JITTER - self.max_range: float = CONFIG_RANGE - self.loss_initial: float = CONFIG_LOSS - self.loss_range: float = CONFIG_LOSS_RANGE - self.loss_factor: float = CONFIG_LOSS_FACTOR - - def startup(self) -> None: - if self.up: - return - self.up = True - - def shutdown(self) -> None: - while self.bridges: - _, (_, bridge_name) = self.bridges.popitem() - self.net_client.delete_bridge(bridge_name) - self.host_cmd(f"{NFTABLES} delete table bridge {bridge_name}") - while self.links: - _, link = self.links.popitem() - link.iface.shutdown() - self.up = False - - def attach(self, iface: CoreInterface) -> None: - super().attach(iface) - logging.info("attaching node(%s) iface(%s)", iface.node.name, iface.name) - if self.up: - # create node unique bridge - bridge_name = f"wb{iface.node.id}.{self.id}.{self.session.id}" - self.net_client.create_bridge(bridge_name) - # setup initial bridge rules - self.host_cmd(f'{NFTABLES} "add table bridge {bridge_name}"') - self.host_cmd( - f"{NFTABLES} " - f"'add chain bridge {bridge_name} forward {{type filter hook " - f"forward priority -1; policy drop;}}'" - ) - self.host_cmd( - f"{NFTABLES} " - f"'add rule bridge {bridge_name} forward " - f"ibriport != {bridge_name} accept'" - ) - # associate node iface with bridge - iface.net_client.set_iface_master(bridge_name, iface.localname) - # assign position callback, when enabled - if self.position_enabled: - iface.poshook = self.position_callback - # save created bridge - self.bridges[iface.node.id] = (iface, bridge_name) - - def post_startup(self) -> None: - routes = {} - for node_id, (iface, bridge_name) in self.bridges.items(): - for onode_id, (oiface, obridge_name) in self.bridges.items(): - if node_id == onode_id: - continue - if node_id < onode_id: - node1, node2 = iface.node, oiface.node - bridge1, bridge2 = bridge_name, obridge_name - else: - node1, node2 = oiface.node, iface.node - bridge1, bridge2 = obridge_name, bridge_name - key = (node1.id, node2.id) - if key in self.links: - continue - # create node to node link - name1 = self.add_device() - name2 = self.add_device() - link_iface = CoreInterface(0, name1, name2, self.session.use_ovs()) - link_iface.startup() - link = WirelessLink(bridge1, bridge2, link_iface, False) - self.links[key] = link - # track bridge routes - node1_routes = routes.setdefault(node1.id, set()) - node1_routes.add(name1) - node2_routes = routes.setdefault(node2.id, set()) - node2_routes.add(name2) - if self.position_enabled: - link.linked = True - # assign ifaces to respective bridges - self.net_client.set_iface_master(bridge1, link_iface.name) - self.net_client.set_iface_master(bridge2, link_iface.localname) - # calculate link data - self.calc_link(iface, oiface) - for node_id, ifaces in routes.items(): - iface, bridge_name = self.bridges[node_id] - ifaces = ",".join(ifaces) - # out routes - self.host_cmd( - f"{NFTABLES} " - f'"add rule bridge {bridge_name} forward ' - f"iif {iface.localname} oif {{{ifaces}}} " - f'accept"' - ) - # in routes - self.host_cmd( - f"{NFTABLES} " - f'"add rule bridge {bridge_name} forward ' - f"iif {{{ifaces}}} oif {iface.localname} " - f'accept"' - ) - - def link_control(self, node1_id: int, node2_id: int, linked: bool) -> None: - key = get_key(node1_id, node2_id) - link = self.links.get(key) - if not link: - raise CoreError(f"invalid node links node1({node1_id}) node2({node2_id})") - bridge1, bridge2 = link.bridge1, link.bridge2 - iface = link.iface - if not link.linked and linked: - link.linked = True - self.net_client.set_iface_master(bridge1, iface.name) - self.net_client.set_iface_master(bridge2, iface.localname) - self.send_link(key[0], key[1], MessageFlags.ADD, link.label) - elif link.linked and not linked: - link.linked = False - self.net_client.delete_iface(bridge1, iface.name) - self.net_client.delete_iface(bridge2, iface.localname) - self.send_link(key[0], key[1], MessageFlags.DELETE, link.label) - - def link_config( - self, node1_id: int, node2_id: int, options1: LinkOptions, options2: LinkOptions - ) -> None: - key = get_key(node1_id, node2_id) - link = self.links.get(key) - if not link: - raise CoreError(f"invalid node links node1({node1_id}) node2({node2_id})") - iface = link.iface - has_netem = iface.has_netem - iface.options.update(options1) - iface.set_config() - name, localname = iface.name, iface.localname - iface.name, iface.localname = localname, name - iface.options.update(options2) - iface.has_netem = has_netem - iface.set_config() - iface.name, iface.localname = name, localname - if options1 == options2: - link.label = f"{options1.loss:.2f}%/{options1.delay}us" - else: - link.label = ( - f"({options1.loss:.2f}%/{options1.delay}us) " - f"({options2.loss:.2f}%/{options2.delay}us)" - ) - self.send_link(key[0], key[1], MessageFlags.NONE, link.label) - - def send_link( - self, - node1_id: int, - node2_id: int, - message_type: MessageFlags, - label: str = None, - ) -> None: - """ - Broadcasts out a wireless link/unlink message. - - :param node1_id: first node in link - :param node2_id: second node in link - :param message_type: type of link message to send - :param label: label to display for link - :return: nothing - """ - color = self.session.get_link_color(self.id) - link_data = LinkData( - message_type=message_type, - type=LinkTypes.WIRELESS, - node1_id=node1_id, - node2_id=node2_id, - network_id=self.id, - color=color, - label=label, - ) - self.session.broadcast_link(link_data) - - def position_callback(self, iface: CoreInterface) -> None: - for oiface, bridge_name in self.bridges.values(): - if iface == oiface: - continue - self.calc_link(iface, oiface) - - def calc_link(self, iface1: CoreInterface, iface2: CoreInterface) -> None: - key = get_key(iface1.node.id, iface2.node.id) - link = self.links.get(key) - point1 = iface1.node.position.get() - point2 = iface2.node.position.get() - distance = calc_distance(point1, point2) - if distance >= self.max_range: - if link.linked: - self.link_control(iface1.node.id, iface2.node.id, False) - else: - if not link.linked: - self.link_control(iface1.node.id, iface2.node.id, True) - loss_distance = max(distance - self.loss_range, 0.0) - max_distance = max(self.max_range - self.loss_range, 0.0) - loss = min((loss_distance / max_distance) * 100.0 * self.loss_factor, 100.0) - loss = max(self.loss_initial, loss) - options = LinkOptions( - loss=loss, - delay=self.delay, - bandwidth=self.bandwidth, - jitter=self.jitter, - ) - self.link_config(iface1.node.id, iface2.node.id, options, options) - - def adopt_iface(self, iface: CoreInterface, name: str) -> None: - raise CoreError(f"{type(self)} does not support adopt interface") - - def get_config(self) -> dict[str, Configuration]: - config = {x.id: x for x in copy.copy(self.options)} - config[KEY_ENABLED].default = "1" if self.position_enabled else "0" - config[KEY_RANGE].default = str(self.max_range) - config[KEY_LOSS_RANGE].default = str(self.loss_range) - config[KEY_LOSS_FACTOR].default = str(self.loss_factor) - config[KEY_LOSS].default = str(self.loss_initial) - config[KEY_BANDWIDTH].default = str(self.bandwidth) - config[KEY_DELAY].default = str(self.delay) - config[KEY_JITTER].default = str(self.jitter) - return config - - def set_config(self, config: dict[str, str]) -> None: - logger.info("wireless config: %s", config) - self.position_enabled = config[KEY_ENABLED] == "1" - self.max_range = float(config[KEY_RANGE]) - self.loss_range = float(config[KEY_LOSS_RANGE]) - self.loss_factor = float(config[KEY_LOSS_FACTOR]) - self.loss_initial = float(config[KEY_LOSS]) - self.bandwidth = int(config[KEY_BANDWIDTH]) - self.delay = int(config[KEY_DELAY]) - self.jitter = int(config[KEY_JITTER]) diff --git a/daemon/core/configservices/frrservices/__init__.py b/daemon/core/phys/__init__.py similarity index 100% rename from daemon/core/configservices/frrservices/__init__.py rename to daemon/core/phys/__init__.py diff --git a/daemon/core/phys/pnodes.py b/daemon/core/phys/pnodes.py new file mode 100644 index 00000000..08b892e7 --- /dev/null +++ b/daemon/core/phys/pnodes.py @@ -0,0 +1,245 @@ +""" +PhysicalNode class for including real systems in the emulated network. +""" + +import logging +import os +import subprocess +import threading + +from core import CoreCommandError +from core import constants +from core.coreobj import PyCoreNode +from core.misc import utils +from core.netns.vnet import GreTap +from core.netns.vnet import LxBrNet + + +class PhysicalNode(PyCoreNode): + def __init__(self, session, objid=None, name=None, nodedir=None, start=True): + PyCoreNode.__init__(self, session, objid, name, start=start) + self.nodedir = nodedir + self.up = start + self.lock = threading.RLock() + self._mounts = [] + if start: + self.startup() + + def startup(self): + with self.lock: + self.makenodedir() + + def shutdown(self): + if not self.up: + return + + with self.lock: + while self._mounts: + _source, target = self._mounts.pop(-1) + self.umount(target) + + for netif in self.netifs(): + netif.shutdown() + + self.rmnodedir() + + def termcmdstring(self, sh="/bin/sh"): + """ + Create a terminal command string. + + :param str sh: shell to execute command in + :return: str + """ + return sh + + def cmd(self, args, wait=True): + """ + Runs shell command on node, with option to not wait for a result. + + :param list[str]|str args: command to run + :param bool wait: wait for command to exit, defaults to True + :return: exit status for command + :rtype: int + """ + os.chdir(self.nodedir) + status = utils.cmd(args, wait) + return status + + def cmd_output(self, args): + """ + Runs shell command on node and get exit status and output. + + :param list[str]|str args: command to run + :return: exit status and combined stdout and stderr + :rtype: tuple[int, str] + """ + os.chdir(self.nodedir) + p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + stdout, _ = p.communicate() + status = p.wait() + return status, stdout.strip() + + def check_cmd(self, args): + """ + Runs shell command on node. + + :param list[str]|str args: command to run + :return: combined stdout and stderr + :rtype: str + :raises CoreCommandError: when a non-zero exit status occurs + """ + status, output = self.cmd_output(args) + if status: + raise CoreCommandError(status, args, output) + return output.strip() + + def shcmd(self, cmdstr, sh="/bin/sh"): + return self.cmd([sh, "-c", cmdstr]) + + def sethwaddr(self, ifindex, addr): + """ + same as SimpleLxcNode.sethwaddr() + """ + self._netif[ifindex].sethwaddr(addr) + ifname = self.ifname(ifindex) + if self.up: + self.check_cmd([constants.IP_BIN, "link", "set", "dev", ifname, "address", str(addr)]) + + def addaddr(self, ifindex, addr): + """ + same as SimpleLxcNode.addaddr() + """ + if self.up: + self.check_cmd([constants.IP_BIN, "addr", "add", str(addr), "dev", self.ifname(ifindex)]) + + self._netif[ifindex].addaddr(addr) + + def deladdr(self, ifindex, addr): + """ + same as SimpleLxcNode.deladdr() + """ + try: + self._netif[ifindex].deladdr(addr) + except ValueError: + logging.exception("trying to delete unknown address: %s", addr) + + if self.up: + self.check_cmd([constants.IP_BIN, "addr", "del", str(addr), "dev", self.ifname(ifindex)]) + + def adoptnetif(self, netif, ifindex, hwaddr, addrlist): + """ + The broker builds a GreTap tunnel device to this physical node. + When a link message is received linking this node to another part of + the emulation, no new interface is created; instead, adopt the + GreTap netif as the node interface. + """ + netif.name = "gt%d" % ifindex + netif.node = self + self.addnetif(netif, ifindex) + + # use a more reasonable name, e.g. "gt0" instead of "gt.56286.150" + if self.up: + self.check_cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "down"]) + self.check_cmd([constants.IP_BIN, "link", "set", netif.localname, "name", netif.name]) + + netif.localname = netif.name + + if hwaddr: + self.sethwaddr(ifindex, hwaddr) + + for addr in utils.make_tuple(addrlist): + self.addaddr(ifindex, addr) + + if self.up: + self.check_cmd([constants.IP_BIN, "link", "set", "dev", netif.localname, "up"]) + + def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None): + """ + Apply tc queing disciplines using LxBrNet.linkconfig() + """ + # borrow the tc qdisc commands from LxBrNet.linkconfig() + linux_bridge = LxBrNet(session=self.session, start=False) + linux_bridge.up = True + linux_bridge.linkconfig(netif, bw=bw, delay=delay, loss=loss, duplicate=duplicate, jitter=jitter, netif2=netif2) + del linux_bridge + + def newifindex(self): + with self.lock: + while self.ifindex in self._netif: + self.ifindex += 1 + ifindex = self.ifindex + self.ifindex += 1 + return ifindex + + def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None): + logging.info("creating interface") + if not addrlist: + addrlist = [] + + if self.up and net is None: + raise NotImplementedError + + if ifindex is None: + ifindex = self.newifindex() + + if self.up: + # this is reached when this node is linked to a network node + # tunnel to net not built yet, so build it now and adopt it + gt = self.session.broker.addnettunnel(net.objid) + if gt is None or len(gt) != 1: + raise ValueError("error building tunnel from adding a new network interface: %s" % gt) + gt = gt[0] + net.detach(gt) + self.adoptnetif(gt, ifindex, hwaddr, addrlist) + return ifindex + + # this is reached when configuring services (self.up=False) + if ifname is None: + ifname = "gt%d" % ifindex + + netif = GreTap(node=self, name=ifname, session=self.session, start=False) + self.adoptnetif(netif, ifindex, hwaddr, addrlist) + return ifindex + + def privatedir(self, path): + if path[0] != "/": + raise ValueError("path not fully qualified: %s" % path) + hostpath = os.path.join(self.nodedir, os.path.normpath(path).strip('/').replace('/', '.')) + os.mkdir(hostpath) + self.mount(hostpath, path) + + def mount(self, source, target): + source = os.path.abspath(source) + logging.info("mounting %s at %s", source, target) + os.makedirs(target) + self.check_cmd([constants.MOUNT_BIN, "--bind", source, target]) + self._mounts.append((source, target)) + + def umount(self, target): + logging.info("unmounting '%s'" % target) + try: + self.check_cmd([constants.UMOUNT_BIN, "-l", target]) + except CoreCommandError: + logging.exception("unmounting failed for %s", target) + + def opennodefile(self, filename, mode="w"): + dirname, basename = os.path.split(filename) + if not basename: + raise ValueError("no basename for filename: " + filename) + + if dirname and dirname[0] == "/": + dirname = dirname[1:] + + dirname = dirname.replace("/", ".") + dirname = os.path.join(self.nodedir, dirname) + if not os.path.isdir(dirname): + os.makedirs(dirname, mode=0755) + + hostfilename = os.path.join(dirname, basename) + return open(hostfilename, mode) + + def nodefile(self, filename, contents, mode=0644): + with self.opennodefile(filename, "w") as node_file: + node_file.write(contents) + os.chmod(node_file.name, mode) + logging.info("created nodefile: '%s'; mode: 0%o", node_file.name, mode) diff --git a/daemon/core/player.py b/daemon/core/player.py deleted file mode 100644 index d06e7b97..00000000 --- a/daemon/core/player.py +++ /dev/null @@ -1,450 +0,0 @@ -import ast -import csv -import enum -import logging -import sched -from pathlib import Path -from threading import Thread -from typing import IO, Callable, Optional - -import grpc - -from core.api.grpc.client import CoreGrpcClient, MoveNodesStreamer -from core.api.grpc.wrappers import LinkOptions - -logger = logging.getLogger(__name__) - - -@enum.unique -class PlayerEvents(enum.Enum): - """ - Provides event types for processing file events. - """ - - XY = enum.auto() - GEO = enum.auto() - CMD = enum.auto() - WLINK = enum.auto() - WILINK = enum.auto() - WICONFIG = enum.auto() - - @classmethod - def get(cls, value: str) -> Optional["PlayerEvents"]: - """ - Retrieves a valid event type from read input. - - :param value: value to get event type for - :return: valid event type, None otherwise - """ - event = None - try: - event = cls[value] - except KeyError: - pass - return event - - -class CorePlayerWriter: - """ - Provides conveniences for programatically creating a core file for playback. - """ - - def __init__(self, file_path: str): - """ - Create a CorePlayerWriter instance. - - :param file_path: path to create core file - """ - self._time: float = 0.0 - self._file_path: str = file_path - self._file: Optional[IO] = None - self._csv_file: Optional[csv.writer] = None - - def open(self) -> None: - """ - Opens the provided file path for writing and csv creation. - - :return: nothing - """ - logger.info("core player write file(%s)", self._file_path) - self._file = open(self._file_path, "w", newline="") - self._csv_file = csv.writer(self._file, quoting=csv.QUOTE_MINIMAL) - - def close(self) -> None: - """ - Closes the file being written to. - - :return: nothing - """ - if self._file: - self._file.close() - - def update(self, delay: float) -> None: - """ - Update and move the current play time forward by delay amount. - - :param delay: amount to move time forward by - :return: nothing - """ - self._time += delay - - def write_xy(self, node_id: int, x: float, y: float) -> None: - """ - Write a node xy movement event. - - :param node_id: id of node to move - :param x: x position - :param y: y position - :return: nothing - """ - self._csv_file.writerow([self._time, PlayerEvents.XY.name, node_id, x, y]) - - def write_geo(self, node_id: int, lon: float, lat: float, alt: float) -> None: - """ - Write a node geo movement event. - - :param node_id: id of node to move - :param lon: longitude position - :param lat: latitude position - :param alt: altitude position - :return: nothing - """ - self._csv_file.writerow( - [self._time, PlayerEvents.GEO.name, node_id, lon, lat, alt] - ) - - def write_cmd(self, node_id: int, wait: bool, shell: bool, cmd: str) -> None: - """ - Write a node command event. - - :param node_id: id of node to run command on - :param wait: should command wait for successful execution - :param shell: should command run under shell context - :param cmd: command to run - :return: nothing - """ - self._csv_file.writerow( - [self._time, PlayerEvents.CMD.name, node_id, wait, shell, f"'{cmd}'"] - ) - - def write_wlan_link( - self, wireless_id: int, node1_id: int, node2_id: int, linked: bool - ) -> None: - """ - Write a wlan link event. - - :param wireless_id: id of wlan network for link - :param node1_id: first node connected to wlan - :param node2_id: second node connected to wlan - :param linked: True if nodes are linked, False otherwise - :return: nothing - """ - self._csv_file.writerow( - [ - self._time, - PlayerEvents.WLINK.name, - wireless_id, - node1_id, - node2_id, - linked, - ] - ) - - def write_wireless_link( - self, wireless_id: int, node1_id: int, node2_id: int, linked: bool - ) -> None: - """ - Write a wireless link event. - - :param wireless_id: id of wireless network for link - :param node1_id: first node connected to wireless - :param node2_id: second node connected to wireless - :param linked: True if nodes are linked, False otherwise - :return: nothing - """ - self._csv_file.writerow( - [ - self._time, - PlayerEvents.WILINK.name, - wireless_id, - node1_id, - node2_id, - linked, - ] - ) - - def write_wireless_config( - self, - wireless_id: int, - node1_id: int, - node2_id: int, - loss1: float, - delay1: int, - loss2: float = None, - delay2: float = None, - ) -> None: - """ - Write a wireless link config event. - - :param wireless_id: id of wireless network for link - :param node1_id: first node connected to wireless - :param node2_id: second node connected to wireless - :param loss1: loss for the first interface - :param delay1: delay for the first interface - :param loss2: loss for the second interface, defaults to first interface loss - :param delay2: delay for second interface, defaults to first interface delay - :return: nothing - """ - loss2 = loss2 if loss2 is not None else loss1 - delay2 = delay2 if delay2 is not None else delay1 - self._csv_file.writerow( - [ - self._time, - PlayerEvents.WICONFIG.name, - wireless_id, - node1_id, - node2_id, - loss1, - delay1, - loss2, - delay2, - ] - ) - - -class CorePlayer: - """ - Provides core player functionality for reading a file with timed events - and playing them out. - """ - - def __init__(self, file_path: Path): - """ - Creates a CorePlayer instance. - - :param file_path: file to play path - """ - self.file_path: Path = file_path - self.core: CoreGrpcClient = CoreGrpcClient() - self.session_id: Optional[int] = None - self.node_streamer: Optional[MoveNodesStreamer] = None - self.node_streamer_thread: Optional[Thread] = None - self.scheduler: sched.scheduler = sched.scheduler() - self.handlers: dict[PlayerEvents, Callable] = { - PlayerEvents.XY: self.handle_xy, - PlayerEvents.GEO: self.handle_geo, - PlayerEvents.CMD: self.handle_cmd, - PlayerEvents.WLINK: self.handle_wlink, - PlayerEvents.WILINK: self.handle_wireless_link, - PlayerEvents.WICONFIG: self.handle_wireless_config, - } - - def init(self, session_id: Optional[int]) -> bool: - """ - Initialize core connections, settings to or retrieving session to use. - Also setup node streamer for xy/geo movements. - - :param session_id: session id to use, None for default session - :return: True if init was successful, False otherwise - """ - self.core.connect() - try: - if session_id is None: - sessions = self.core.get_sessions() - if len(sessions): - session_id = sessions[0].id - if session_id is None: - logger.error("no core sessions found") - return False - self.session_id = session_id - logger.info("playing to session(%s)", self.session_id) - self.node_streamer = MoveNodesStreamer(self.session_id) - self.node_streamer_thread = Thread( - target=self.core.move_nodes, args=(self.node_streamer,), daemon=True - ) - self.node_streamer_thread.start() - except grpc.RpcError as e: - logger.error("core is not running: %s", e.details()) - return False - return True - - def start(self) -> None: - """ - Starts playing file, reading the csv data line by line, then handling - each line event type. Delay is tracked and calculated, while processing, - to ensure we wait for the event time to be active. - - :return: nothing - """ - current_time = 0.0 - with self.file_path.open("r", newline="") as f: - for row in csv.reader(f): - # determine delay - input_time = float(row[0]) - delay = input_time - current_time - current_time = input_time - # determine event - event_value = row[1] - event = PlayerEvents.get(event_value) - if not event: - logger.error("unknown event type: %s", ",".join(row)) - continue - # get args and event functions - args = tuple(ast.literal_eval(x) for x in row[2:]) - event_func = self.handlers.get(event) - if not event_func: - logger.error("unknown event type handler: %s", ",".join(row)) - continue - logger.info( - "processing line time(%s) event(%s) args(%s)", - input_time, - event.name, - args, - ) - # schedule and run event - self.scheduler.enter(delay, 1, event_func, argument=args) - self.scheduler.run() - self.stop() - - def stop(self) -> None: - """ - Stop and cleanup playback. - - :return: nothing - """ - logger.info("stopping playback, cleaning up") - self.node_streamer.stop() - self.node_streamer_thread.join() - self.node_streamer_thread = None - - def handle_xy(self, node_id: int, x: float, y: float) -> None: - """ - Handle node xy movement event. - - :param node_id: id of node to move - :param x: x position - :param y: y position - :return: nothing - """ - logger.debug("handling xy node(%s) x(%s) y(%s)", node_id, x, y) - self.node_streamer.send_position(node_id, x, y) - - def handle_geo(self, node_id: int, lon: float, lat: float, alt: float) -> None: - """ - Handle node geo movement event. - - :param node_id: id of node to move - :param lon: longitude position - :param lat: latitude position - :param alt: altitude position - :return: nothing - """ - logger.debug( - "handling geo node(%s) lon(%s) lat(%s) alt(%s)", node_id, lon, lat, alt - ) - self.node_streamer.send_geo(node_id, lon, lat, alt) - - def handle_cmd(self, node_id: int, wait: bool, shell: bool, cmd: str) -> None: - """ - Handle node command event. - - :param node_id: id of node to run command - :param wait: True to wait for successful command, False otherwise - :param shell: True to run command in shell context, False otherwise - :param cmd: command to run - :return: nothing - """ - logger.debug( - "handling cmd node(%s) wait(%s) shell(%s) cmd(%s)", - node_id, - wait, - shell, - cmd, - ) - status, output = self.core.node_command( - self.session_id, node_id, cmd, wait, shell - ) - logger.info("cmd result(%s): %s", status, output) - - def handle_wlink( - self, net_id: int, node1_id: int, node2_id: int, linked: bool - ) -> None: - """ - Handle wlan link event. - - :param net_id: id of wlan network - :param node1_id: first node in link - :param node2_id: second node in link - :param linked: True if linked, Flase otherwise - :return: nothing - """ - logger.debug( - "handling wlink node1(%s) node2(%s) net(%s) linked(%s)", - node1_id, - node2_id, - net_id, - linked, - ) - self.core.wlan_link(self.session_id, net_id, node1_id, node2_id, linked) - - def handle_wireless_link( - self, wireless_id: int, node1_id: int, node2_id: int, linked: bool - ) -> None: - """ - Handle wireless link event. - - :param wireless_id: id of wireless network - :param node1_id: first node in link - :param node2_id: second node in link - :param linked: True if linked, Flase otherwise - :return: nothing - """ - logger.debug( - "handling link wireless(%s) node1(%s) node2(%s) linked(%s)", - wireless_id, - node1_id, - node2_id, - linked, - ) - self.core.wireless_linked( - self.session_id, wireless_id, node1_id, node2_id, linked - ) - - def handle_wireless_config( - self, - wireless_id: int, - node1_id: int, - node2_id: int, - loss1: float, - delay1: int, - loss2: float, - delay2: int, - ) -> None: - """ - Handle wireless config event. - - :param wireless_id: id of wireless network - :param node1_id: first node in link - :param node2_id: second node in link - :param loss1: first interface loss - :param delay1: first interface delay - :param loss2: second interface loss - :param delay2: second interface delay - :return: nothing - """ - logger.debug( - "handling config wireless(%s) node1(%s) node2(%s) " - "options1(%s/%s) options2(%s/%s)", - wireless_id, - node1_id, - node2_id, - loss1, - delay1, - loss2, - delay2, - ) - options1 = LinkOptions(loss=loss1, delay=delay1) - options2 = LinkOptions(loss=loss2, delay=delay2) - self.core.wireless_config( - self.session_id, wireless_id, node1_id, node2_id, options1, options2 - ) diff --git a/daemon/core/plugins/__init__.py b/daemon/core/plugins/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/plugins/sdt.py b/daemon/core/plugins/sdt.py deleted file mode 100644 index f963c817..00000000 --- a/daemon/core/plugins/sdt.py +++ /dev/null @@ -1,460 +0,0 @@ -""" -sdt.py: Scripted Display Tool (SDT3D) helper -""" - -import logging -import socket -from pathlib import Path -from typing import TYPE_CHECKING, Optional -from urllib.parse import urlparse - -from core.constants import CORE_CONF_DIR -from core.emane.nodes import EmaneNet -from core.emulator.data import LinkData, NodeData -from core.emulator.enumerations import EventTypes, MessageFlags -from core.errors import CoreError -from core.nodes.base import CoreNode, NodeBase -from core.nodes.network import HubNode, SwitchNode, TunnelNode, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - -LOCAL_ICONS_PATH: Path = Path(__file__).parent.parent / "gui" / "data" / "icons" -CORE_LAYER: str = "CORE" -NODE_LAYER: str = "CORE::Nodes" -LINK_LAYER: str = "CORE::Links" -WIRED_LINK_LAYER: str = f"{LINK_LAYER}::wired" -CORE_LAYERS: list[str] = [CORE_LAYER, LINK_LAYER, NODE_LAYER, WIRED_LINK_LAYER] -DEFAULT_LINK_COLOR: str = "red" -NODE_TYPES: dict[type[NodeBase], str] = { - HubNode: "hub", - SwitchNode: "lanswitch", - TunnelNode: "tunnel", - WlanNode: "wlan", - EmaneNet: "emane", - WirelessNode: "wireless", - Rj45Node: "rj45", -} - - -def is_wireless(node: NodeBase) -> bool: - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) - - -def get_link_id(node1_id: int, node2_id: int, network_id: int) -> str: - link_id = f"{node1_id}-{node2_id}" - if network_id is not None: - link_id = f"{link_id}-{network_id}" - return link_id - - -class Sdt: - """ - Helper class for exporting session objects to NRL"s SDT3D. - The connect() method initializes the display, and can be invoked - when a node position or link has changed. - """ - - DEFAULT_SDT_URL: str = "tcp://127.0.0.1:50000/" - # default altitude (in meters) for flyto view - DEFAULT_ALT: int = 2500 - # TODO: read in user"s nodes.conf here; below are default node types from the GUI - DEFAULT_SPRITES: dict[str, str] = [ - ("router", "router.png"), - ("host", "host.png"), - ("PC", "pc.png"), - ("mdr", "mdr.png"), - ("prouter", "prouter.png"), - ("hub", "hub.png"), - ("lanswitch", "lanswitch.png"), - ("wlan", "wlan.png"), - ("emane", "emane.png"), - ("wireless", "wireless.png"), - ("rj45", "rj45.png"), - ("tunnel", "tunnel.png"), - ] - - def __init__(self, session: "Session") -> None: - """ - Creates a Sdt instance. - - :param session: session this manager is tied to - """ - self.session: "Session" = session - self.sock: Optional[socket.socket] = None - self.connected: bool = False - self.url: str = self.DEFAULT_SDT_URL - self.address: Optional[tuple[Optional[str], Optional[int]]] = None - self.protocol: Optional[str] = None - self.network_layers: set[str] = set() - self.session.node_handlers.append(self.handle_node_update) - self.session.link_handlers.append(self.handle_link_update) - - def is_enabled(self) -> bool: - """ - Check for "enablesdt" session option. Return False by default if - the option is missing. - - :return: True if enabled, False otherwise - """ - return self.session.options.get_int("enablesdt") == 1 - - def seturl(self) -> None: - """ - Read "sdturl" from session options, or use the default value. - Set self.url, self.address, self.protocol - - :return: nothing - """ - url = self.session.options.get("stdurl", self.DEFAULT_SDT_URL) - self.url = urlparse(url) - self.address = (self.url.hostname, self.url.port) - self.protocol = self.url.scheme - - def connect(self) -> bool: - """ - Connect to the SDT address/port if enabled. - - :return: True if connected, False otherwise - """ - if not self.is_enabled(): - return False - if self.connected: - return True - if self.session.state == EventTypes.SHUTDOWN_STATE: - return False - - self.seturl() - logger.info("connecting to SDT at %s://%s", self.protocol, self.address) - if self.sock is None: - try: - if self.protocol.lower() == "udp": - self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - self.sock.connect(self.address) - else: - # Default to tcp - self.sock = socket.create_connection(self.address, 5) - except OSError: - logger.exception("SDT socket connect error") - return False - - if not self.initialize(): - return False - - self.connected = True - # refresh all objects in SDT3D when connecting after session start - if not self.sendobjs(): - return False - return True - - def initialize(self) -> bool: - """ - Load icon sprites, and fly to the reference point location on - the virtual globe. - - :return: initialize command status - """ - if not self.cmd(f'path "{LOCAL_ICONS_PATH.absolute()}"'): - return False - # send node type to icon mappings - for node_type, icon in self.DEFAULT_SPRITES: - if not self.cmd(f"sprite {node_type} image {icon}"): - return False - lat, long = self.session.location.refgeo[:2] - return self.cmd(f"flyto {long:.6f},{lat:.6f},{self.DEFAULT_ALT}") - - def disconnect(self) -> None: - """ - Disconnect from SDT. - - :return: nothing - """ - if self.sock: - try: - self.sock.close() - except OSError: - logger.error("error closing socket") - finally: - self.sock = None - self.connected = False - - def shutdown(self) -> None: - """ - Invoked from Session.shutdown() and Session.checkshutdown(). - - :return: nothing - """ - self.cmd("clear all") - for layer in self.network_layers: - self.cmd(f"delete layer,{layer}") - for layer in CORE_LAYERS[::-1]: - self.cmd(f"delete layer,{layer}") - self.disconnect() - self.network_layers.clear() - - def cmd(self, cmdstr: str) -> bool: - """ - Send an SDT command over a UDP socket. socket.sendall() is used - as opposed to socket.sendto() because an exception is raised when - there is no socket listener. - - :param cmdstr: command to send - :return: True if command was successful, False otherwise - """ - if self.sock is None: - return False - try: - cmd = f"{cmdstr}\n".encode() - logger.debug("sdt cmd: %s", cmd) - self.sock.sendall(cmd) - return True - except OSError: - logger.exception("SDT connection error") - self.sock = None - self.connected = False - return False - - def sendobjs(self) -> None: - """ - Session has already started, and the SDT3D GUI later connects. - Send all node and link objects for display. Otherwise, nodes and - links will only be drawn when they have been updated (e.g. moved). - - :return: nothing - """ - for layer in CORE_LAYERS: - self.cmd(f"layer {layer}") - with self.session.nodes_lock: - nets = [] - for node in self.session.nodes.values(): - if isinstance(node, (EmaneNet, WlanNode)): - nets.append(node) - if not isinstance(node, NodeBase): - continue - self.add_node(node) - for link in self.session.link_manager.links(): - if is_wireless(link.node1) or is_wireless(link.node2): - continue - link_data = link.get_data(MessageFlags.ADD) - self.handle_link_update(link_data) - for net in nets: - for link_data in net.links(MessageFlags.ADD): - self.handle_link_update(link_data) - - def get_node_position(self, node: NodeBase) -> Optional[str]: - """ - Convenience to generate an SDT position string, given a node. - - :param node: - :return: - """ - x, y, z = node.position.get() - if x is None or y is None: - return None - lat, lon, alt = self.session.location.getgeo(x, y, z) - return f"pos {lon:.6f},{lat:.6f},{alt:.6f}" - - def add_node(self, node: NodeBase) -> None: - """ - Handle adding a node in SDT. - - :param node: node to add - :return: nothing - """ - logger.debug("sdt add node: %s - %s", node.id, node.name) - if not self.connect(): - return - pos = self.get_node_position(node) - if not pos: - return - if isinstance(node, CoreNode): - node_type = node.model - else: - node_type = NODE_TYPES.get(type(node), "PC") - icon = node.icon - if icon: - node_type = node.name - icon = icon.replace("$CORE_DATA_DIR", str(LOCAL_ICONS_PATH.absolute())) - icon = icon.replace("$CORE_CONF_DIR", str(CORE_CONF_DIR)) - self.cmd(f"sprite {node_type} image {icon}") - self.cmd( - f'node {node.id} nodeLayer "{NODE_LAYER}" ' - f'type {node_type} label on,"{node.name}" {pos}' - ) - - def edit_node(self, node: NodeBase, lon: float, lat: float, alt: float) -> None: - """ - Handle updating a node in SDT. - - :param node: node to update - :param lon: node longitude - :param lat: node latitude - :param alt: node altitude - :return: nothing - """ - logger.debug("sdt update node: %s - %s", node.id, node.name) - if not self.connect(): - return - - if all([lat is not None, lon is not None, alt is not None]): - pos = f"pos {lon:.6f},{lat:.6f},{alt:.6f}" - self.cmd(f"node {node.id} {pos}") - else: - pos = self.get_node_position(node) - if not pos: - return - self.cmd(f"node {node.id} {pos}") - - def delete_node(self, node_id: int) -> None: - """ - Handle deleting a node in SDT. - - :param node_id: node id to delete - :return: nothing - """ - logger.debug("sdt delete node: %s", node_id) - if not self.connect(): - return - self.cmd(f"delete node,{node_id}") - - def handle_node_update(self, node_data: NodeData) -> None: - """ - Handler for node updates, specifically for updating their location. - - :param node_data: node data being updated - :return: nothing - """ - if not self.connect(): - return - node = node_data.node - logger.debug("sdt handle node update: %s - %s", node.id, node.name) - if node_data.message_type == MessageFlags.DELETE: - self.cmd(f"delete node,{node.id}") - else: - x, y, _ = node.position.get() - lon, lat, alt = node.position.get_geo() - if all([lat is not None, lon is not None, alt is not None]): - pos = f"pos {lon:.6f},{lat:.6f},{alt:.6f}" - self.cmd(f"node {node.id} {pos}") - elif node_data.message_type == MessageFlags.NONE: - lat, lon, alt = self.session.location.getgeo(x, y, 0) - pos = f"pos {lon:.6f},{lat:.6f},{alt:.6f}" - self.cmd(f"node {node.id} {pos}") - - def wireless_net_check(self, node_id: int) -> bool: - """ - Determines if a node is either a wireless node type. - - :param node_id: node id to check - :return: True is a wireless node type, False otherwise - """ - result = False - try: - node = self.session.get_node(node_id, NodeBase) - result = isinstance(node, (WlanNode, EmaneNet, WirelessNode)) - except CoreError: - pass - return result - - def add_link( - self, node1_id: int, node2_id: int, network_id: int = None, label: str = None - ) -> None: - """ - Handle adding a link in SDT. - - :param node1_id: node one id - :param node2_id: node two id - :param network_id: network link is associated with, None otherwise - :param label: label for link - :return: nothing - """ - logger.debug("sdt add link: %s, %s, %s", node1_id, node2_id, network_id) - if not self.connect(): - return - if self.wireless_net_check(node1_id) or self.wireless_net_check(node2_id): - return - color = DEFAULT_LINK_COLOR - if network_id: - color = self.session.get_link_color(network_id) - line = f"{color},2" - link_id = get_link_id(node1_id, node2_id, network_id) - if network_id: - layer = self.get_network_layer(network_id) - else: - layer = WIRED_LINK_LAYER - link_label = "" - if label: - link_label = f'linklabel on,"{label}"' - self.cmd( - f"link {node1_id},{node2_id},{link_id} linkLayer {layer} line {line} " - f"{link_label}" - ) - - def get_network_layer(self, network_id: int) -> str: - node = self.session.nodes.get(network_id) - if node: - layer = f"{LINK_LAYER}::{node.name}" - self.network_layers.add(layer) - else: - layer = WIRED_LINK_LAYER - return layer - - def delete_link(self, node1_id: int, node2_id: int, network_id: int = None) -> None: - """ - Handle deleting a link in SDT. - - :param node1_id: node one id - :param node2_id: node two id - :param network_id: network link is associated with, None otherwise - :return: nothing - """ - logger.debug("sdt delete link: %s, %s, %s", node1_id, node2_id, network_id) - if not self.connect(): - return - if self.wireless_net_check(node1_id) or self.wireless_net_check(node2_id): - return - link_id = get_link_id(node1_id, node2_id, network_id) - self.cmd(f"delete link,{node1_id},{node2_id},{link_id}") - - def edit_link( - self, node1_id: int, node2_id: int, network_id: int, label: str - ) -> None: - """ - Handle editing a link in SDT. - - :param node1_id: node one id - :param node2_id: node two id - :param network_id: network link is associated with, None otherwise - :param label: label to update - :return: nothing - """ - logger.debug("sdt edit link: %s, %s, %s", node1_id, node2_id, network_id) - if not self.connect(): - return - if self.wireless_net_check(node1_id) or self.wireless_net_check(node2_id): - return - link_id = get_link_id(node1_id, node2_id, network_id) - link_label = f'linklabel on,"{label}"' - self.cmd(f"link {node1_id},{node2_id},{link_id} {link_label}") - - def handle_link_update(self, link_data: LinkData) -> None: - """ - Handle link broadcast messages and push changes to SDT. - - :param link_data: link data to handle - :return: nothing - """ - node1_id = link_data.node1_id - node2_id = link_data.node2_id - network_id = link_data.network_id - label = link_data.label - if link_data.message_type == MessageFlags.ADD: - self.add_link(node1_id, node2_id, network_id, label) - elif link_data.message_type == MessageFlags.DELETE: - self.delete_link(node1_id, node2_id, network_id) - elif link_data.message_type == MessageFlags.NONE and label: - self.edit_link(node1_id, node2_id, network_id, label) diff --git a/daemon/core/scripts/__init__.py b/daemon/core/scripts/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/daemon/core/scripts/cleanup.py b/daemon/core/scripts/cleanup.py deleted file mode 100755 index 1ab4647e..00000000 --- a/daemon/core/scripts/cleanup.py +++ /dev/null @@ -1,105 +0,0 @@ -import argparse -import os -import subprocess -import sys -import time - - -def check_root() -> None: - if os.geteuid() != 0: - print("permission denied, run this script as root") - sys.exit(1) - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="helps cleanup lingering core processes and files", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "-d", "--daemon", action="store_true", help="also kill core-daemon" - ) - return parser.parse_args() - - -def cleanup_daemon() -> None: - print("killing core-daemon process ... ", end="") - result = subprocess.call("pkill -9 core-daemon", shell=True) - if result: - print("not found") - else: - print("done") - - -def cleanup_nodes() -> None: - print("killing vnoded processes ... ", end="") - result = subprocess.call("pkill -KILL vnoded", shell=True) - if result: - print("none found") - else: - time.sleep(1) - print("done") - - -def cleanup_emane() -> None: - print("killing emane processes ... ", end="") - result = subprocess.call("pkill emane", shell=True) - if result: - print("none found") - else: - print("done") - - -def cleanup_sessions() -> None: - print("removing session directories ... ", end="") - result = subprocess.call("rm -rf /tmp/pycore*", shell=True) - if result: - print("none found") - else: - print("done") - - -def cleanup_interfaces() -> None: - print("cleaning up devices") - output = subprocess.check_output("ip -br link show", shell=True) - lines = output.decode().strip().split("\n") - for line in lines: - values = line.split() - name = values[0] - if ( - name.startswith("veth") - or name.startswith("beth") - or name.startswith("gt.") - or name.startswith("b.") - or name.startswith("ctrl") - ): - name = name.split("@")[0] - result = subprocess.call(f"ip link delete {name}", shell=True) - if result: - print(f"failed to remove {name}") - else: - print(f"removed {name}") - if name.startswith("b."): - result = subprocess.call( - f"nft delete table bridge {name}", - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - shell=True, - ) - if not result: - print(f"cleared nft rules for {name}") - - -def main() -> None: - check_root() - args = parse_args() - if args.daemon: - cleanup_daemon() - cleanup_nodes() - cleanup_emane() - cleanup_interfaces() - cleanup_sessions() - - -if __name__ == "__main__": - main() diff --git a/daemon/core/scripts/cli.py b/daemon/core/scripts/cli.py deleted file mode 100755 index 760dbad7..00000000 --- a/daemon/core/scripts/cli.py +++ /dev/null @@ -1,628 +0,0 @@ -import json -import sys -from argparse import ( - ArgumentDefaultsHelpFormatter, - ArgumentParser, - ArgumentTypeError, - Namespace, -) -from functools import wraps -from pathlib import Path -from typing import Any, Optional - -import grpc -import netaddr -from google.protobuf.json_format import MessageToDict -from netaddr import EUI, AddrFormatError, IPNetwork - -from core.api.grpc.client import CoreGrpcClient -from core.api.grpc.wrappers import ( - ConfigOption, - Geo, - Interface, - Link, - LinkOptions, - Node, - NodeType, - Position, -) - -NODE_TYPES = [x.name for x in NodeType if x != NodeType.PEER_TO_PEER] - - -def protobuf_to_json(message: Any) -> dict[str, Any]: - return MessageToDict( - message, including_default_value_fields=True, preserving_proto_field_name=True - ) - - -def print_json(data: Any) -> None: - data = json.dumps(data, indent=2) - print(data) - - -def coreclient(func): - @wraps(func) - def wrapper(*args, **kwargs): - core = CoreGrpcClient() - try: - with core.context_connect(): - return func(core, *args, **kwargs) - except grpc.RpcError as e: - print(f"grpc error: {e.details()}") - - return wrapper - - -def mac_type(value: str) -> str: - try: - mac = EUI(value, dialect=netaddr.mac_unix_expanded) - return str(mac) - except AddrFormatError: - raise ArgumentTypeError(f"invalid mac address: {value}") - - -def ip4_type(value: str) -> IPNetwork: - try: - ip = IPNetwork(value) - if not netaddr.valid_ipv4(str(ip.ip)): - raise ArgumentTypeError(f"invalid ip4 address: {value}") - return ip - except AddrFormatError: - raise ArgumentTypeError(f"invalid ip4 address: {value}") - - -def ip6_type(value: str) -> IPNetwork: - try: - ip = IPNetwork(value) - if not netaddr.valid_ipv6(str(ip.ip)): - raise ArgumentTypeError(f"invalid ip6 address: {value}") - return ip - except AddrFormatError: - raise ArgumentTypeError(f"invalid ip6 address: {value}") - - -def position_type(value: str) -> tuple[float, float]: - error = "invalid position, must be in the format: float,float" - try: - values = [float(x) for x in value.split(",")] - except ValueError: - raise ArgumentTypeError(error) - if len(values) != 2: - raise ArgumentTypeError(error) - x, y = values - return x, y - - -def geo_type(value: str) -> tuple[float, float, float]: - error = "invalid geo, must be in the format: float,float,float" - try: - values = [float(x) for x in value.split(",")] - except ValueError: - raise ArgumentTypeError(error) - if len(values) != 3: - raise ArgumentTypeError(error) - lon, lat, alt = values - return lon, lat, alt - - -def file_type(value: str) -> Path: - path = Path(value) - if not path.is_file(): - raise ArgumentTypeError(f"invalid file: {value}") - return path - - -def get_current_session(core: CoreGrpcClient, session_id: Optional[int]) -> int: - if session_id: - return session_id - sessions = core.get_sessions() - if not sessions: - print("no current session to interact with") - sys.exit(1) - return sessions[0].id - - -def create_iface( - iface_id: int, mac: str, ip4_net: IPNetwork, ip6_net: IPNetwork -) -> Interface: - ip4 = str(ip4_net.ip) if ip4_net else None - ip4_mask = ip4_net.prefixlen if ip4_net else None - ip6 = str(ip6_net.ip) if ip6_net else None - ip6_mask = ip6_net.prefixlen if ip6_net else None - return Interface( - id=iface_id, mac=mac, ip4=ip4, ip4_mask=ip4_mask, ip6=ip6, ip6_mask=ip6_mask - ) - - -def print_iface_header() -> None: - print("ID | MAC Address | IP4 Address | IP6 Address") - - -def print_iface(iface: Interface) -> None: - iface_ip4 = f"{iface.ip4}/{iface.ip4_mask}" if iface.ip4 else "" - iface_ip6 = f"{iface.ip6}/{iface.ip6_mask}" if iface.ip6 else "" - print(f"{iface.id:<3} | {iface.mac:<17} | {iface_ip4:<18} | {iface_ip6}") - - -@coreclient -def get_wlan_config(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - config = core.get_wlan_config(session_id, args.node) - if args.json: - print_json(ConfigOption.to_dict(config)) - else: - size = 0 - for option in config.values(): - size = max(size, len(option.name)) - print(f"{'Name':<{size}.{size}} | Value") - for option in config.values(): - print(f"{option.name:<{size}.{size}} | {option.value}") - - -@coreclient -def set_wlan_config(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - config = {} - if args.bandwidth: - config["bandwidth"] = str(args.bandwidth) - if args.delay: - config["delay"] = str(args.delay) - if args.loss: - config["error"] = str(args.loss) - if args.jitter: - config["jitter"] = str(args.jitter) - if args.range: - config["range"] = str(args.range) - result = core.set_wlan_config(session_id, args.node, config) - if args.json: - print_json(dict(result=result)) - else: - print(f"set wlan config: {result}") - - -@coreclient -def open_xml(core: CoreGrpcClient, args: Namespace) -> None: - result, session_id = core.open_xml(args.file, args.start) - if args.json: - print_json(dict(result=result, session_id=session_id)) - else: - print(f"opened xml: {result},{session_id}") - - -@coreclient -def query_sessions(core: CoreGrpcClient, args: Namespace) -> None: - sessions = core.get_sessions() - if args.json: - sessions = [protobuf_to_json(x.to_proto()) for x in sessions] - print_json(sessions) - else: - print("Session ID | Session State | Nodes") - for session in sessions: - print(f"{session.id:<10} | {session.state.name:<13} | {session.nodes}") - - -@coreclient -def query_session(core: CoreGrpcClient, args: Namespace) -> None: - session = core.get_session(args.id) - if args.json: - session = protobuf_to_json(session.to_proto()) - print_json(session) - else: - print("Nodes") - print("ID | Name | Type | XY | Geo") - for node in session.nodes.values(): - xy_pos = f"{int(node.position.x)},{int(node.position.y)}" - geo_pos = f"{node.geo.lon:.7f},{node.geo.lat:.7f},{node.geo.alt:f}" - print( - f"{node.id:<7} | {node.name[:7]:<7} | {node.type.name[:7]:<7} | {xy_pos:<9} | {geo_pos}" - ) - print("\nLinks") - for link in session.links: - n1 = session.nodes[link.node1_id].name - n2 = session.nodes[link.node2_id].name - print("Node | ", end="") - print_iface_header() - print(f"{n1:<6} | ", end="") - if link.iface1: - print_iface(link.iface1) - else: - print() - print(f"{n2:<6} | ", end="") - if link.iface2: - print_iface(link.iface2) - else: - print() - print() - - -@coreclient -def query_node(core: CoreGrpcClient, args: Namespace) -> None: - session = core.get_session(args.id) - node, ifaces, _ = core.get_node(args.id, args.node) - if args.json: - node = protobuf_to_json(node.to_proto()) - ifaces = [protobuf_to_json(x.to_proto()) for x in ifaces] - print_json(dict(node=node, ifaces=ifaces)) - else: - print("ID | Name | Type | XY | Geo") - xy_pos = f"{int(node.position.x)},{int(node.position.y)}" - geo_pos = f"{node.geo.lon:.7f},{node.geo.lat:.7f},{node.geo.alt:f}" - print( - f"{node.id:<7} | {node.name[:7]:<7} | {node.type.name[:7]:<7} | {xy_pos:<9} | {geo_pos}" - ) - if ifaces: - print("Interfaces") - print("Connected To | ", end="") - print_iface_header() - for iface in ifaces: - if iface.net_id == node.id: - if iface.node_id: - name = session.nodes[iface.node_id].name - else: - name = session.nodes[iface.net2_id].name - else: - net_node = session.nodes.get(iface.net_id) - name = net_node.name if net_node else "" - print(f"{name:<12} | ", end="") - print_iface(iface) - - -@coreclient -def delete_session(core: CoreGrpcClient, args: Namespace) -> None: - result = core.delete_session(args.id) - if args.json: - print_json(dict(result=result)) - else: - print(f"delete session({args.id}): {result}") - - -@coreclient -def add_node(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - node_type = NodeType[args.type] - pos = None - if args.pos: - x, y = args.pos - pos = Position(x=x, y=y) - geo = None - if args.geo: - lon, lat, alt = args.geo - geo = Geo(lon=lon, lat=lat, alt=alt) - node = Node( - id=args.id, - name=args.name, - type=node_type, - model=args.model, - emane=args.emane, - icon=args.icon, - image=args.image, - position=pos, - geo=geo, - ) - node_id = core.add_node(session_id, node) - if args.json: - print_json(dict(node_id=node_id)) - else: - print(f"created node: {node_id}") - - -@coreclient -def edit_node(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - result = core.edit_node(session_id, args.id, args.icon) - if args.json: - print_json(dict(result=result)) - else: - print(f"edit node: {result}") - - -@coreclient -def move_node(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - pos = None - if args.pos: - x, y = args.pos - pos = Position(x=x, y=y) - geo = None - if args.geo: - lon, lat, alt = args.geo - geo = Geo(lon=lon, lat=lat, alt=alt) - result = core.move_node(session_id, args.id, pos, geo) - if args.json: - print_json(dict(result=result)) - else: - print(f"move node: {result}") - - -@coreclient -def delete_node(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - result = core.delete_node(session_id, args.id) - if args.json: - print_json(dict(result=result)) - else: - print(f"deleted node: {result}") - - -@coreclient -def add_link(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - iface1 = None - if args.iface1_id is not None: - iface1 = create_iface( - args.iface1_id, args.iface1_mac, args.iface1_ip4, args.iface1_ip6 - ) - iface2 = None - if args.iface2_id is not None: - iface2 = create_iface( - args.iface2_id, args.iface2_mac, args.iface2_ip4, args.iface2_ip6 - ) - options = LinkOptions( - bandwidth=args.bandwidth, - loss=args.loss, - jitter=args.jitter, - delay=args.delay, - dup=args.duplicate, - unidirectional=args.uni, - ) - link = Link(args.node1, args.node2, iface1=iface1, iface2=iface2, options=options) - result, iface1, iface2 = core.add_link(session_id, link) - if args.json: - iface1 = protobuf_to_json(iface1.to_proto()) - iface2 = protobuf_to_json(iface2.to_proto()) - print_json(dict(result=result, iface1=iface1, iface2=iface2)) - else: - print(f"add link: {result}") - - -@coreclient -def edit_link(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - options = LinkOptions( - bandwidth=args.bandwidth, - loss=args.loss, - jitter=args.jitter, - delay=args.delay, - dup=args.duplicate, - unidirectional=args.uni, - ) - iface1 = Interface(args.iface1) - iface2 = Interface(args.iface2) - link = Link(args.node1, args.node2, iface1=iface1, iface2=iface2, options=options) - result = core.edit_link(session_id, link) - if args.json: - print_json(dict(result=result)) - else: - print(f"edit link: {result}") - - -@coreclient -def delete_link(core: CoreGrpcClient, args: Namespace) -> None: - session_id = get_current_session(core, args.session) - iface1 = Interface(args.iface1) - iface2 = Interface(args.iface2) - link = Link(args.node1, args.node2, iface1=iface1, iface2=iface2) - result = core.delete_link(session_id, link) - if args.json: - print_json(dict(result=result)) - else: - print(f"delete link: {result}") - - -def setup_sessions_parser(parent) -> None: - parser = parent.add_parser("session", help="session interactions") - parser.formatter_class = ArgumentDefaultsHelpFormatter - parser.add_argument("-i", "--id", type=int, help="session id to use", required=True) - subparsers = parser.add_subparsers(help="session commands") - subparsers.required = True - subparsers.dest = "command" - - delete_parser = subparsers.add_parser("delete", help="delete a session") - delete_parser.formatter_class = ArgumentDefaultsHelpFormatter - delete_parser.set_defaults(func=delete_session) - - -def setup_node_parser(parent) -> None: - parser = parent.add_parser("node", help="node interactions") - parser.formatter_class = ArgumentDefaultsHelpFormatter - parser.add_argument("-s", "--session", type=int, help="session to interact with") - subparsers = parser.add_subparsers(help="node commands") - subparsers.required = True - subparsers.dest = "command" - - add_parser = subparsers.add_parser("add", help="add a node") - add_parser.formatter_class = ArgumentDefaultsHelpFormatter - add_parser.add_argument("-i", "--id", type=int, help="id to use, optional") - add_parser.add_argument("-n", "--name", help="name to use, optional") - add_parser.add_argument( - "-t", "--type", choices=NODE_TYPES, default="DEFAULT", help="type of node" - ) - add_parser.add_argument( - "-m", "--model", help="used to determine services, optional" - ) - group = add_parser.add_mutually_exclusive_group(required=True) - group.add_argument("-p", "--pos", type=position_type, help="x,y position") - group.add_argument("-g", "--geo", type=geo_type, help="lon,lat,alt position") - add_parser.add_argument("-ic", "--icon", help="icon to use, optional") - add_parser.add_argument("-im", "--image", help="container image, optional") - add_parser.add_argument( - "-e", "--emane", help="emane model, only required for emane nodes" - ) - add_parser.set_defaults(func=add_node) - - edit_parser = subparsers.add_parser("edit", help="edit a node") - edit_parser.formatter_class = ArgumentDefaultsHelpFormatter - edit_parser.add_argument("-i", "--id", type=int, help="id to use", required=True) - edit_parser.add_argument("-ic", "--icon", help="icon to use, optional") - edit_parser.set_defaults(func=edit_node) - - move_parser = subparsers.add_parser("move", help="move a node") - move_parser.formatter_class = ArgumentDefaultsHelpFormatter - move_parser.add_argument( - "-i", "--id", type=int, help="id to use, optional", required=True - ) - group = move_parser.add_mutually_exclusive_group(required=True) - group.add_argument("-p", "--pos", type=position_type, help="x,y position") - group.add_argument("-g", "--geo", type=geo_type, help="lon,lat,alt position") - move_parser.set_defaults(func=move_node) - - delete_parser = subparsers.add_parser("delete", help="delete a node") - delete_parser.formatter_class = ArgumentDefaultsHelpFormatter - delete_parser.add_argument("-i", "--id", type=int, help="node id", required=True) - delete_parser.set_defaults(func=delete_node) - - -def setup_link_parser(parent) -> None: - parser = parent.add_parser("link", help="link interactions") - parser.formatter_class = ArgumentDefaultsHelpFormatter - parser.add_argument("-s", "--session", type=int, help="session to interact with") - subparsers = parser.add_subparsers(help="link commands") - subparsers.required = True - subparsers.dest = "command" - - add_parser = subparsers.add_parser("add", help="add a node") - add_parser.formatter_class = ArgumentDefaultsHelpFormatter - add_parser.add_argument("-n1", "--node1", type=int, help="node1 id", required=True) - add_parser.add_argument("-n2", "--node2", type=int, help="node2 id", required=True) - add_parser.add_argument("-i1-i", "--iface1-id", type=int, help="node1 interface id") - add_parser.add_argument( - "-i1-m", "--iface1-mac", type=mac_type, help="node1 interface mac" - ) - add_parser.add_argument( - "-i1-4", "--iface1-ip4", type=ip4_type, help="node1 interface ip4" - ) - add_parser.add_argument( - "-i1-6", "--iface1-ip6", type=ip6_type, help="node1 interface ip6" - ) - add_parser.add_argument("-i2-i", "--iface2-id", type=int, help="node2 interface id") - add_parser.add_argument( - "-i2-m", "--iface2-mac", type=mac_type, help="node2 interface mac" - ) - add_parser.add_argument( - "-i2-4", "--iface2-ip4", type=ip4_type, help="node2 interface ip4" - ) - add_parser.add_argument( - "-i2-6", "--iface2-ip6", type=ip6_type, help="node2 interface ip6" - ) - add_parser.add_argument("-b", "--bandwidth", type=int, help="bandwidth (bps)") - add_parser.add_argument("-l", "--loss", type=float, help="loss (%%)") - add_parser.add_argument("-j", "--jitter", type=int, help="jitter (us)") - add_parser.add_argument("-de", "--delay", type=int, help="delay (us)") - add_parser.add_argument("-du", "--duplicate", type=int, help="duplicate (%%)") - add_parser.add_argument( - "-u", "--uni", action="store_true", help="is link unidirectional?" - ) - add_parser.set_defaults(func=add_link) - - edit_parser = subparsers.add_parser("edit", help="edit a link") - edit_parser.formatter_class = ArgumentDefaultsHelpFormatter - edit_parser.add_argument("-n1", "--node1", type=int, help="node1 id", required=True) - edit_parser.add_argument("-n2", "--node2", type=int, help="node2 id", required=True) - edit_parser.add_argument("-i1", "--iface1", type=int, help="node1 interface id") - edit_parser.add_argument("-i2", "--iface2", type=int, help="node2 interface id") - edit_parser.add_argument("-b", "--bandwidth", type=int, help="bandwidth (bps)") - edit_parser.add_argument("-l", "--loss", type=float, help="loss (%%)") - edit_parser.add_argument("-j", "--jitter", type=int, help="jitter (us)") - edit_parser.add_argument("-de", "--delay", type=int, help="delay (us)") - edit_parser.add_argument("-du", "--duplicate", type=int, help="duplicate (%%)") - edit_parser.add_argument( - "-u", "--uni", action="store_true", help="is link unidirectional?" - ) - edit_parser.set_defaults(func=edit_link) - - delete_parser = subparsers.add_parser("delete", help="delete a link") - delete_parser.formatter_class = ArgumentDefaultsHelpFormatter - delete_parser.add_argument( - "-n1", "--node1", type=int, help="node1 id", required=True - ) - delete_parser.add_argument( - "-n2", "--node2", type=int, help="node1 id", required=True - ) - delete_parser.add_argument("-i1", "--iface1", type=int, help="node1 interface id") - delete_parser.add_argument("-i2", "--iface2", type=int, help="node2 interface id") - delete_parser.set_defaults(func=delete_link) - - -def setup_query_parser(parent) -> None: - parser = parent.add_parser("query", help="query interactions") - subparsers = parser.add_subparsers(help="query commands") - subparsers.required = True - subparsers.dest = "command" - - sessions_parser = subparsers.add_parser("sessions", help="query current sessions") - sessions_parser.formatter_class = ArgumentDefaultsHelpFormatter - sessions_parser.set_defaults(func=query_sessions) - - session_parser = subparsers.add_parser("session", help="query session") - session_parser.formatter_class = ArgumentDefaultsHelpFormatter - session_parser.add_argument( - "-i", "--id", type=int, help="session to query", required=True - ) - session_parser.set_defaults(func=query_session) - - node_parser = subparsers.add_parser("node", help="query node") - node_parser.formatter_class = ArgumentDefaultsHelpFormatter - node_parser.add_argument( - "-i", "--id", type=int, help="session to query", required=True - ) - node_parser.add_argument( - "-n", "--node", type=int, help="node to query", required=True - ) - node_parser.set_defaults(func=query_node) - - -def setup_xml_parser(parent) -> None: - parser = parent.add_parser("xml", help="open session xml") - parser.formatter_class = ArgumentDefaultsHelpFormatter - parser.add_argument( - "-f", "--file", type=file_type, help="xml file to open", required=True - ) - parser.add_argument("-s", "--start", action="store_true", help="start the session?") - parser.set_defaults(func=open_xml) - - -def setup_wlan_parser(parent) -> None: - parser = parent.add_parser("wlan", help="wlan specific interactions") - parser.formatter_class = ArgumentDefaultsHelpFormatter - parser.add_argument("-s", "--session", type=int, help="session to interact with") - subparsers = parser.add_subparsers(help="link commands") - subparsers.required = True - subparsers.dest = "command" - - get_parser = subparsers.add_parser("get", help="get wlan configuration") - get_parser.formatter_class = ArgumentDefaultsHelpFormatter - get_parser.add_argument("-n", "--node", type=int, help="wlan node", required=True) - get_parser.set_defaults(func=get_wlan_config) - - set_parser = subparsers.add_parser("set", help="set wlan configuration") - set_parser.formatter_class = ArgumentDefaultsHelpFormatter - set_parser.add_argument("-n", "--node", type=int, help="wlan node", required=True) - set_parser.add_argument("-b", "--bandwidth", type=int, help="bandwidth (bps)") - set_parser.add_argument("-d", "--delay", type=int, help="delay (us)") - set_parser.add_argument("-l", "--loss", type=float, help="loss (%%)") - set_parser.add_argument("-j", "--jitter", type=int, help="jitter (us)") - set_parser.add_argument("-r", "--range", type=int, help="range (pixels)") - set_parser.set_defaults(func=set_wlan_config) - - -def main() -> None: - parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) - parser.add_argument( - "-js", "--json", action="store_true", help="print responses to terminal as json" - ) - subparsers = parser.add_subparsers(help="supported commands") - subparsers.required = True - subparsers.dest = "command" - setup_sessions_parser(subparsers) - setup_node_parser(subparsers) - setup_link_parser(subparsers) - setup_query_parser(subparsers) - setup_xml_parser(subparsers) - setup_wlan_parser(subparsers) - args = parser.parse_args() - args.func(args) - - -if __name__ == "__main__": - main() diff --git a/daemon/core/scripts/daemon.py b/daemon/core/scripts/daemon.py deleted file mode 100755 index 6b9caa54..00000000 --- a/daemon/core/scripts/daemon.py +++ /dev/null @@ -1,130 +0,0 @@ -""" -core-daemon: the CORE daemon is a server process that receives CORE API -messages and instantiates emulated nodes and networks within the kernel. Various -message handlers are defined and some support for sending messages. -""" - -import argparse -import logging -import os -import time -from configparser import ConfigParser -from pathlib import Path - -from core import constants -from core.api.grpc.server import CoreGrpcServer -from core.constants import CORE_CONF_DIR, COREDPY_VERSION -from core.emulator.coreemu import CoreEmu -from core.utils import load_logging_config - -logger = logging.getLogger(__name__) - - -def banner(): - """ - Output the program banner printed to the terminal or log file. - - :return: nothing - """ - logger.info("CORE daemon v.%s started %s", constants.COREDPY_VERSION, time.ctime()) - - -def cored(cfg): - """ - Start the CoreServer object and enter the server loop. - - :param dict cfg: core configuration - :return: nothing - """ - # initialize grpc api - coreemu = CoreEmu(cfg) - grpc_server = CoreGrpcServer(coreemu) - address_config = cfg["grpcaddress"] - port_config = cfg["grpcport"] - grpc_address = f"{address_config}:{port_config}" - grpc_server.listen(grpc_address) - - -def get_merged_config(filename): - """ - Return a configuration after merging config file and command-line arguments. - - :param str filename: file name to merge configuration settings with - :return: merged configuration - :rtype: dict - """ - # these are the defaults used in the config file - default_log = os.path.join(constants.CORE_CONF_DIR, "logging.conf") - default_grpc_port = "50051" - default_address = "localhost" - defaults = { - "grpcport": default_grpc_port, - "grpcaddress": default_address, - "logfile": default_log, - } - parser = argparse.ArgumentParser( - description=f"CORE daemon v.{COREDPY_VERSION} instantiates Linux network namespace nodes." - ) - parser.add_argument( - "-f", - "--configfile", - dest="configfile", - help=f"read config from specified file; default = {filename}", - ) - parser.add_argument( - "--ovs", - action="store_true", - help="enable experimental ovs mode, default is false", - ) - parser.add_argument( - "--grpc-port", - dest="grpcport", - help=f"grpc port to listen on; default {default_grpc_port}", - ) - parser.add_argument( - "--grpc-address", - dest="grpcaddress", - help=f"grpc address to listen on; default {default_address}", - ) - parser.add_argument( - "-l", "--logfile", help=f"core logging configuration; default {default_log}" - ) - # parse command line options - args = parser.parse_args() - # convert ovs to internal format - args.ovs = "1" if args.ovs else "0" - # read the config file - if args.configfile is not None: - filename = args.configfile - del args.configfile - cfg = ConfigParser(defaults) - cfg.read(filename) - section = "core-daemon" - if not cfg.has_section(section): - cfg.add_section(section) - # merge argparse with configparser - for opt in vars(args): - val = getattr(args, opt) - if val is not None: - cfg.set(section, opt, str(val)) - return dict(cfg.items(section)) - - -def main(): - """ - Main program startup. - - :return: nothing - """ - cfg = get_merged_config(f"{CORE_CONF_DIR}/core.conf") - log_config_path = Path(cfg["logfile"]) - load_logging_config(log_config_path) - banner() - try: - cored(cfg) - except KeyboardInterrupt: - logger.info("keyboard interrupt, stopping core daemon") - - -if __name__ == "__main__": - main() diff --git a/daemon/core/scripts/gui.py b/daemon/core/scripts/gui.py deleted file mode 100755 index 9c0560b2..00000000 --- a/daemon/core/scripts/gui.py +++ /dev/null @@ -1,50 +0,0 @@ -import argparse -import logging -from logging.handlers import TimedRotatingFileHandler - -from core.gui import appconfig, images -from core.gui.app import Application - - -def main() -> None: - # parse flags - parser = argparse.ArgumentParser(description="CORE Python GUI") - parser.add_argument( - "-l", - "--level", - choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], - default="INFO", - help="logging level", - ) - parser.add_argument("-p", "--proxy", action="store_true", help="enable proxy") - parser.add_argument("-s", "--session", type=int, help="session id to join") - parser.add_argument( - "--create-dir", action="store_true", help="create gui directory and exit" - ) - args = parser.parse_args() - - # check home directory exists and create if necessary - appconfig.check_directory() - if args.create_dir: - return - - # setup logging - log_format = "%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - %(message)s" - stream_handler = logging.StreamHandler() - file_handler = TimedRotatingFileHandler( - filename=appconfig.LOG_PATH, when="D", backupCount=5 - ) - log_level = logging.getLevelName(args.level) - logging.basicConfig( - level=log_level, format=log_format, handlers=[stream_handler, file_handler] - ) - logging.getLogger("PIL").setLevel(logging.ERROR) - - # start app - images.load_all() - app = Application(args.proxy, args.session) - app.mainloop() - - -if __name__ == "__main__": - main() diff --git a/daemon/core/scripts/player.py b/daemon/core/scripts/player.py deleted file mode 100755 index 07728939..00000000 --- a/daemon/core/scripts/player.py +++ /dev/null @@ -1,51 +0,0 @@ -import argparse -import logging -import sys -from pathlib import Path - -from core.player import CorePlayer - -logger = logging.getLogger(__name__) - - -def path_type(value: str) -> Path: - file_path = Path(value) - if not file_path.is_file(): - raise argparse.ArgumentTypeError(f"file does not exist: {value}") - return file_path - - -def parse_args() -> argparse.Namespace: - """ - Setup and parse command line arguments. - - :return: parsed arguments - """ - parser = argparse.ArgumentParser( - description="core player runs files that can move nodes and send commands", - formatter_class=argparse.ArgumentDefaultsHelpFormatter, - ) - parser.add_argument( - "-f", "--file", required=True, type=path_type, help="core file to play" - ) - parser.add_argument( - "-s", - "--session", - type=int, - help="session to play to, first found session otherwise", - ) - return parser.parse_args() - - -def main() -> None: - logging.basicConfig(level=logging.INFO) - args = parse_args() - player = CorePlayer(args.file) - result = player.init(args.session) - if not result: - sys.exit(1) - player.start() - - -if __name__ == "__main__": - main() diff --git a/daemon/core/scripts/routemonitor.py b/daemon/core/scripts/routemonitor.py deleted file mode 100755 index 42fbf3a9..00000000 --- a/daemon/core/scripts/routemonitor.py +++ /dev/null @@ -1,258 +0,0 @@ -import argparse -import enum -import select -import socket -import subprocess -import sys -import time -from argparse import ArgumentDefaultsHelpFormatter -from functools import cmp_to_key -from queue import Queue -from threading import Thread - -import grpc - -from core import utils -from core.api.grpc.client import CoreGrpcClient -from core.api.grpc.wrappers import NodeType - -SDT_HOST = "127.0.0.1" -SDT_PORT = 50000 -ROUTE_LAYER = "CORE Route" -DEAD_TIME = 3 -ROUTE_TIME = 3 -PACKET_CHOICES = ["udp", "tcp", "icmp"] - - -class RouteEnum(enum.Enum): - ADD = 0 - DEL = 1 - - -class SdtClient: - def __init__(self, address: tuple[str, int]) -> None: - self.sock = socket.create_connection(address) - self.links = [] - self.send(f'layer "{ROUTE_LAYER}"') - - def close(self) -> None: - self.sock.close() - - def send(self, cmd: str) -> None: - sdt_cmd = f"{cmd}\n".encode() - self.sock.sendall(sdt_cmd) - - def add_link(self, node1, node2) -> None: - route_id = f"{node1}-{node2}-r" - link_id = f"{node1},{node2},{route_id}" - cmd = f'link {link_id} linkLayer "{ROUTE_LAYER}" line yellow,2' - self.send(cmd) - self.links.append(link_id) - - def delete_links(self) -> None: - for link_id in self.links: - cmd = f"delete link,{link_id}" - self.send(cmd) - self.links.clear() - - -class RouterMonitor: - def __init__( - self, - session: int, - src: str, - dst: str, - pkt: str, - rate: int, - dead: int, - sdt_host: str, - sdt_port: int, - ) -> None: - self.queue = Queue() - self.core = CoreGrpcClient() - self.session = session - self.src_id = None - self.src = src - self.dst = dst - self.pkt = pkt - self.rate = rate - self.dead = dead - self.seen = {} - self.running = False - self.route_time = None - self.listeners = [] - self.sdt = SdtClient((sdt_host, sdt_port)) - self.nodes = self.get_nodes() - - def get_nodes(self) -> dict[int, str]: - with self.core.context_connect(): - if self.session is None: - self.session = self.get_session() - print("session: ", self.session) - try: - session = self.core.get_session(self.session) - node_map = {} - for node in session.nodes.values(): - if node.type != NodeType.DEFAULT: - continue - node_map[node.id] = node.channel - if self.src_id is None: - _, ifaces, _ = self.core.get_node(self.session, node.id) - for iface in ifaces: - if self.src == iface.ip4: - self.src_id = node.id - break - except grpc.RpcError: - print(f"invalid session: {self.session}") - sys.exit(1) - if self.src_id is None: - print(f"could not find node with source address: {self.src}") - sys.exit(1) - print( - f"monitoring src_id ({self.src_id}) src({self.src}) dst({self.dst}) pkt({self.pkt})" - ) - return node_map - - def get_session(self) -> int: - sessions = self.core.get_sessions() - session = None - if sessions: - session = sessions[0] - if not session: - print("no current core sessions") - sys.exit(1) - return session.id - - def start(self) -> None: - self.running = True - for node_id, node in self.nodes.items(): - print("listening on node: ", node) - thread = Thread(target=self.listen, args=(node_id, node), daemon=True) - thread.start() - self.listeners.append(thread) - self.manage() - - def manage(self) -> None: - self.route_time = time.monotonic() - while self.running: - route_enum, node, seen = self.queue.get() - if route_enum == RouteEnum.ADD: - self.seen[node] = seen - elif node in self.seen: - del self.seen[node] - - if (time.monotonic() - self.route_time) >= self.rate: - self.manage_routes() - self.route_time = time.monotonic() - - def route_sort(self, x: tuple[str, int], y: tuple[str, int]) -> int: - x_node = x[0] - y_node = y[0] - if x_node == self.src_id: - return 1 - if y_node == self.src_id: - return -1 - x_ttl, y_ttl = x[1], y[1] - return x_ttl - y_ttl - - def manage_routes(self) -> None: - self.sdt.delete_links() - if not self.seen: - return - values = sorted( - self.seen.items(), key=cmp_to_key(self.route_sort), reverse=True - ) - print("current route:") - for index, node_data in enumerate(values): - next_index = index + 1 - if next_index == len(values): - break - next_node_id = values[next_index][0] - node_id, ttl = node_data - print(f"{node_id} -> {next_node_id}") - self.sdt.add_link(node_id, next_node_id) - - def stop(self) -> None: - self.running = False - self.sdt.delete_links() - self.sdt.close() - for thread in self.listeners: - thread.join() - self.listeners.clear() - - def listen(self, node_id, node) -> None: - cmd = f"tcpdump -lnvi any src host {self.src} and dst host {self.dst} and {self.pkt}" - node_cmd = f"vcmd -c {node} -- {cmd}" - p = subprocess.Popen( - node_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL - ) - current = time.monotonic() - try: - while not p.poll() and self.running: - ready, _, _ = select.select([p.stdout], [], [], 1) - if ready: - line = p.stdout.readline().strip().decode() - if line: - line = line.split("ttl", 1)[1] - ttl = int(line.split(",", 1)[0]) - p.stdout.readline() - self.queue.put((RouteEnum.ADD, node_id, ttl)) - current = time.monotonic() - else: - if (time.monotonic() - current) >= self.dead: - self.queue.put((RouteEnum.DEL, node_id, None)) - except Exception as e: - print(f"listener error: {e}") - - -def main() -> None: - if not utils.which("tcpdump", required=False): - print("core-route-monitor requires tcpdump to be installed") - return - - desc = "core route monitor leverages tcpdump to monitor traffic and find route using TTL" - parser = argparse.ArgumentParser( - description=desc, formatter_class=ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - "--src", required=True, help="source address for route monitoring" - ) - parser.add_argument( - "--dst", required=True, help="destination address for route monitoring" - ) - parser.add_argument("--session", type=int, help="session to monitor route") - parser.add_argument( - "--pkt", default="icmp", choices=PACKET_CHOICES, help="packet type" - ) - parser.add_argument( - "--rate", type=int, default=ROUTE_TIME, help="rate to update route, in seconds" - ) - parser.add_argument( - "--dead", - type=int, - default=DEAD_TIME, - help="timeout to declare path dead, in seconds", - ) - parser.add_argument("--sdt-host", default=SDT_HOST, help="sdt host address") - parser.add_argument("--sdt-port", type=int, default=SDT_PORT, help="sdt port") - args = parser.parse_args() - - monitor = RouterMonitor( - args.session, - args.src, - args.dst, - args.pkt, - args.rate, - args.dead, - args.sdt_host, - args.sdt_port, - ) - try: - monitor.start() - except KeyboardInterrupt: - monitor.stop() - print("ending route monitor") - - -if __name__ == "__main__": - main() diff --git a/daemon/core/scripts/serviceupdate.py b/daemon/core/scripts/serviceupdate.py deleted file mode 100755 index 50ada96d..00000000 --- a/daemon/core/scripts/serviceupdate.py +++ /dev/null @@ -1,71 +0,0 @@ -import argparse -import re -from io import TextIOWrapper - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="Helps transition older CORE services to work with newer versions" - ) - parser.add_argument( - "-f", - "--file", - dest="file", - type=argparse.FileType("r"), - help="service file to update", - ) - return parser.parse_args() - - -def update_service(service_file: TextIOWrapper) -> None: - update = [] - for line in service_file.readlines(): - # update service attributes - line = re.sub(r"^(\s+)_([a-z])", r"\1\2", line) - # rename dirs to directories - line = re.sub(r"^(\s+)dirs", r"\1directories", line) - # fix import states for service - line = re.sub( - r"^.+import.+CoreService.+$", - r"from core.services.coreservices import CoreService", - line, - ) - # fix method signatures - line = re.sub( - r"def generateconfig\(cls, node, filename, services\)", - r"def generate_config(cls, node, filename)", - line, - ) - line = re.sub( - r"def getvalidate\(cls, node, services\)", - r"def get_validate(cls, node)", - line, - ) - line = re.sub( - r"def getstartup\(cls, node, services\)", - r"def get_startup(cls, node)", - line, - ) - line = re.sub( - r"def getconfigfilenames\(cls, nodenum, services\)", - r"def get_configs(cls, node)", - line, - ) - # remove unwanted lines - if re.search(r"addservice\(", line): - continue - if re.search(r"from.+\.ipaddr|import ipaddr", line): - continue - if re.search(r"from.+\.ipaddress|import ipaddress", line): - continue - # add modified line to make updated copy - update.append(line) - service_file.close() - - with open(f"{service_file.name}.update", "w") as f: - f.writelines(update) - - -if __name__ == "__main__": - args = parse_args() - update_service(args.file) diff --git a/daemon/core/sdt.py b/daemon/core/sdt.py new file mode 100644 index 00000000..21843d98 --- /dev/null +++ b/daemon/core/sdt.py @@ -0,0 +1,480 @@ +""" +sdt.py: Scripted Display Tool (SDT3D) helper +""" + +import logging +import socket +from urlparse import urlparse + +from core import constants +from core.coreobj import PyCoreNet +from core.coreobj import PyCoreObj +from core.enumerations import EventTypes +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.misc import nodeutils + + +# TODO: A named tuple may be more appropriate, than abusing a class dict like this +class Bunch(object): + """ + Helper class for recording a collection of attributes. + """ + + def __init__(self, **kwds): + """ + Create a Bunch instance. + + :param dict kwds: keyword arguments + :return: + """ + self.__dict__.update(kwds) + + +class Sdt(object): + """ + Helper class for exporting session objects to NRL"s SDT3D. + The connect() method initializes the display, and can be invoked + when a node position or link has changed. + """ + DEFAULT_SDT_URL = "tcp://127.0.0.1:50000/" + # default altitude (in meters) for flyto view + DEFAULT_ALT = 2500 + # TODO: read in user"s nodes.conf here; below are default node types from the GUI + DEFAULT_SPRITES = [ + ("router", "router.gif"), + ("host", "host.gif"), + ("PC", "pc.gif"), + ("mdr", "mdr.gif"), + ("prouter", "router_green.gif"), + ("hub", "hub.gif"), + ("lanswitch", "lanswitch.gif"), + ("wlan", "wlan.gif"), + ("rj45", "rj45.gif"), + ("tunnel", "tunnel.gif"), + ] + + def __init__(self, session): + """ + Creates a Sdt instance. + + :param core.session.Session session: session this manager is tied to + """ + self.session = session + self.sock = None + self.connected = False + self.showerror = True + self.url = self.DEFAULT_SDT_URL + # node information for remote nodes not in session._objs + # local nodes also appear here since their obj may not exist yet + self.remotes = {} + session.broker.handlers.add(self.handle_distributed) + + # add handler for node updates + self.session.node_handlers.append(self.handle_node_update) + + # add handler for link updates + self.session.link_handlers.append(self.handle_link_update) + + def handle_node_update(self, node_data): + """ + Handler for node updates, specifically for updating their location. + + :param core.data.NodeData node_data: node data being updated + :return: nothing + """ + x = node_data.x_position + y = node_data.y_position + lat = node_data.latitude + lon = node_data.longitude + alt = node_data.altitude + + if all([lat, lon, alt]): + self.updatenodegeo(node_data.id, node_data.latitude, node_data.longitude, node_data.altitude) + + if node_data.message_type == 0: + # TODO: z is not currently supported by node messages + self.updatenode(node_data.id, 0, x, y, 0) + + def handle_link_update(self, link_data): + """ + Handler for link updates, checking for wireless link/unlink messages. + + :param core.data.LinkData link_data: link data being updated + :return: nothing + """ + if link_data.link_type == LinkTypes.WIRELESS.value: + self.updatelink(link_data.node1_id, link_data.node2_id, link_data.message_type, wireless=True) + + def is_enabled(self): + """ + Check for "enablesdt" session option. Return False by default if + the option is missing. + + :return: True if enabled, False otherwise + :rtype: bool + """ + return self.session.options.get_config("enablesdt") == "1" + + def seturl(self): + """ + Read "sdturl" from session options, or use the default value. + Set self.url, self.address, self.protocol + + :return: nothing + """ + url = self.session.options.get_config("stdurl") + if not url: + url = self.DEFAULT_SDT_URL + self.url = urlparse(url) + self.address = (self.url.hostname, self.url.port) + self.protocol = self.url.scheme + + def connect(self, flags=0): + """ + Connect to the SDT address/port if enabled. + + :return: True if connected, False otherwise + :rtype: bool + """ + if not self.is_enabled(): + return False + if self.connected: + return True + if self.session.state == EventTypes.SHUTDOWN_STATE.value: + return False + + self.seturl() + logging.info("connecting to SDT at %s://%s" % (self.protocol, self.address)) + if self.sock is None: + try: + if self.protocol.lower() == "udp": + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.sock.connect(self.address) + else: + # Default to tcp + self.sock = socket.create_connection(self.address, 5) + except IOError: + logging.exception("SDT socket connect error") + return False + + if not self.initialize(): + return False + + self.connected = True + # refresh all objects in SDT3D when connecting after session start + if not flags & MessageFlags.ADD.value and not self.sendobjs(): + return False + + return True + + def initialize(self): + """ + Load icon sprites, and fly to the reference point location on + the virtual globe. + + :return: initialize command status + :rtype: bool + """ + if not self.cmd("path \"%s/icons/normal\"" % constants.CORE_DATA_DIR): + return False + # send node type to icon mappings + for node_type, icon in self.DEFAULT_SPRITES: + if not self.cmd("sprite %s image %s" % (node_type, icon)): + return False + lat, long = self.session.location.refgeo[:2] + return self.cmd("flyto %.6f,%.6f,%d" % (long, lat, self.DEFAULT_ALT)) + + def disconnect(self): + """ + Disconnect from SDT. + + :return: nothing + """ + if self.sock: + try: + self.sock.close() + except IOError: + logging.error("error closing socket") + finally: + self.sock = None + + self.connected = False + + def shutdown(self): + """ + Invoked from Session.shutdown() and Session.checkshutdown(). + + :return: nothing + """ + logging.info("SDT shutdown!") + self.cmd("clear all") + self.disconnect() + self.showerror = True + + def cmd(self, cmdstr): + """ + Send an SDT command over a UDP socket. socket.sendall() is used + as opposed to socket.sendto() because an exception is raised when + there is no socket listener. + + :param str cmdstr: command to send + :return: True if command was successful, False otherwise + :rtype: bool + """ + if self.sock is None: + return False + try: + logging.info("sdt: %s" % cmdstr) + self.sock.sendall("%s\n" % cmdstr) + return True + except IOError: + logging.exception("SDT connection error") + self.sock = None + self.connected = False + return False + + def updatenode(self, nodenum, flags, x, y, z, name=None, node_type=None, icon=None): + """ + Node is updated from a Node Message or mobility script. + + :param int nodenum: node id to update + :param flags: update flags + :param x: x position + :param y: y position + :param z: z position + :param str name: node name + :param node_type: node type + :param icon: node icon + :return: nothing + """ + if not self.connect(): + return + if flags & MessageFlags.DELETE.value: + self.cmd("delete node,%d" % nodenum) + return + if x is None or y is None: + return + lat, lon, alt = self.session.location.getgeo(x, y, z) + pos = "pos %.6f,%.6f,%.6f" % (lon, lat, alt) + if flags & MessageFlags.ADD.value: + if icon is not None: + node_type = name + icon = icon.replace("$CORE_DATA_DIR", constants.CORE_DATA_DIR) + icon = icon.replace("$CORE_CONF_DIR", constants.CORE_CONF_DIR) + self.cmd("sprite %s image %s" % (type, icon)) + self.cmd("node %d type %s label on,\"%s\" %s" % (nodenum, node_type, name, pos)) + else: + self.cmd("node %d %s" % (nodenum, pos)) + + def updatenodegeo(self, nodenum, lat, long, alt): + """ + Node is updated upon receiving an EMANE Location Event. + + :param int nodenum: node id to update geospatial for + :param lat: latitude + :param long: longitude + :param alt: altitude + :return: nothing + """ + + # TODO: received Node Message with lat/long/alt. + if not self.connect(): + return + pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt) + self.cmd("node %d %s" % (nodenum, pos)) + + def updatelink(self, node1num, node2num, flags, wireless=False): + """ + Link is updated from a Link Message or by a wireless model. + + :param int node1num: node one id + :param int node2num: node two id + :param flags: link flags + :param bool wireless: flag to check if wireless or not + :return: nothing + """ + if node1num is None or node2num is None: + return + if not self.connect(): + return + if flags & MessageFlags.DELETE.value: + self.cmd("delete link,%s,%s" % (node1num, node2num)) + elif flags & MessageFlags.ADD.value: + attr = "" + if wireless: + attr = " line green,2" + else: + attr = " line red,2" + self.cmd("link %s,%s%s" % (node1num, node2num, attr)) + + def sendobjs(self): + """ + Session has already started, and the SDT3D GUI later connects. + Send all node and link objects for display. Otherwise, nodes and + links will only be drawn when they have been updated (e.g. moved). + + :return: nothing + """ + nets = [] + with self.session._objects_lock: + for obj in self.session.objects.itervalues(): + if isinstance(obj, PyCoreNet): + nets.append(obj) + if not isinstance(obj, PyCoreObj): + continue + (x, y, z) = obj.getposition() + if x is None or y is None: + continue + self.updatenode(obj.objid, MessageFlags.ADD.value, x, y, z, + obj.name, obj.type, obj.icon) + for nodenum in sorted(self.remotes.keys()): + r = self.remotes[nodenum] + x, y, z = r.pos + self.updatenode(nodenum, MessageFlags.ADD.value, x, y, z, + r.name, r.type, r.icon) + + for net in nets: + all_links = net.all_link_data(flags=MessageFlags.ADD.value) + for link_data in all_links: + is_wireless = nodeutils.is_node(net, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)) + wireless_link = link_data.message_type == LinkTypes.WIRELESS.value + if is_wireless and link_data.node1_id == net.objid: + continue + + self.updatelink( + link_data.node1_id, + link_data.node2_id, + MessageFlags.ADD.value, + wireless_link + ) + + for n1num in sorted(self.remotes.keys()): + r = self.remotes[n1num] + for n2num, wireless_link in r.links: + self.updatelink(n1num, n2num, MessageFlags.ADD.value, wireless_link) + + def handle_distributed(self, message): + """ + Broker handler for processing CORE API messages as they are + received. This is used to snoop the Node messages and update + node positions. + + :param message: message to handle + :return: replies + """ + if message.message_type == MessageTypes.LINK.value: + return self.handlelinkmsg(message) + elif message.message_type == MessageTypes.NODE.value: + return self.handlenodemsg(message) + + def handlenodemsg(self, msg): + """ + Process a Node Message to add/delete or move a node on + the SDT display. Node properties are found in session._objs or + self.remotes for remote nodes (or those not yet instantiated). + + :param msg: node message to handle + :return: nothing + """ + # for distributed sessions to work properly, the SDT option should be + # enabled prior to starting the session + if not self.is_enabled(): + return False + # node.(objid, type, icon, name) are used. + nodenum = msg.get_tlv(NodeTlvs.NUMBER.value) + if not nodenum: + return + x = msg.get_tlv(NodeTlvs.X_POSITION.value) + y = msg.get_tlv(NodeTlvs.Y_POSITION.value) + z = None + name = msg.get_tlv(NodeTlvs.NAME.value) + + nodetype = msg.get_tlv(NodeTlvs.TYPE.value) + model = msg.get_tlv(NodeTlvs.MODEL.value) + icon = msg.get_tlv(NodeTlvs.ICON.value) + + net = False + if nodetype == NodeTypes.DEFAULT.value or \ + nodetype == NodeTypes.PHYSICAL.value: + if model is None: + model = "router" + nodetype = model + elif nodetype is not None: + nodetype = nodeutils.get_node_class(NodeTypes(nodetype)).type + net = True + else: + nodetype = None + + try: + node = self.session.get_object(nodenum) + except KeyError: + node = None + if node: + self.updatenode(node.objid, msg.flags, x, y, z, node.name, node.type, node.icon) + else: + if nodenum in self.remotes: + remote = self.remotes[nodenum] + if name is None: + name = remote.name + if nodetype is None: + nodetype = remote.type + if icon is None: + icon = remote.icon + else: + remote = Bunch(objid=nodenum, type=nodetype, icon=icon, name=name, net=net, links=set()) + self.remotes[nodenum] = remote + remote.pos = (x, y, z) + self.updatenode(nodenum, msg.flags, x, y, z, name, nodetype, icon) + + def handlelinkmsg(self, msg): + """ + Process a Link Message to add/remove links on the SDT display. + Links are recorded in the remotes[nodenum1].links set for updating + the SDT display at a later time. + + :param msg: link message to handle + :return: nothing + """ + if not self.is_enabled(): + return False + nodenum1 = msg.get_tlv(LinkTlvs.N1_NUMBER.value) + nodenum2 = msg.get_tlv(LinkTlvs.N2_NUMBER.value) + link_msg_type = msg.get_tlv(LinkTlvs.TYPE.value) + # this filters out links to WLAN and EMANE nodes which are not drawn + if self.wlancheck(nodenum1): + return + wl = link_msg_type == LinkTypes.WIRELESS.value + if nodenum1 in self.remotes: + r = self.remotes[nodenum1] + if msg.flags & MessageFlags.DELETE.value: + if (nodenum2, wl) in r.links: + r.links.remove((nodenum2, wl)) + else: + r.links.add((nodenum2, wl)) + self.updatelink(nodenum1, nodenum2, msg.flags, wireless=wl) + + def wlancheck(self, nodenum): + """ + Helper returns True if a node number corresponds to a WlanNode or EmaneNode. + + :param int nodenum: node id to check + :return: True if node is wlan or emane, False otherwise + :rtype: bool + """ + if nodenum in self.remotes: + node_type = self.remotes[nodenum].type + if node_type in ("wlan", "emane"): + return True + else: + try: + n = self.session.get_object(nodenum) + except KeyError: + return False + if nodeutils.is_node(n, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): + return True + return False diff --git a/daemon/core/service.py b/daemon/core/service.py new file mode 100644 index 00000000..ba8a98ed --- /dev/null +++ b/daemon/core/service.py @@ -0,0 +1,853 @@ +""" +Definition of CoreService class that is subclassed to define +startup services and routing for nodes. A service is typically a daemon +program launched when a node starts that provides some sort of service. +The CoreServices class handles configuration messages for sending +a list of available services to the GUI and for configuring individual +services. +""" + +import logging +import time +from multiprocessing.pool import ThreadPool + +import enum +from core.constants import which + +from core import CoreCommandError +from core.data import FileData +from core.enumerations import MessageFlags +from core.enumerations import RegisterTlvs +from core.misc import utils + + +class ServiceBootError(Exception): + pass + + +class ServiceMode(enum.Enum): + BLOCKING = 0 + NON_BLOCKING = 1 + TIMER = 2 + + +class ServiceDependencies(object): + """ + Can generate boot paths for services, based on their dependencies. Will validate + that all services will be booted and that all dependencies exist within the services provided. + """ + + def __init__(self, services): + # helpers to check validity + self.dependents = {} + self.booted = set() + self.node_services = {} + for service in services: + self.node_services[service.name] = service + for dependency in service.dependencies: + dependents = self.dependents.setdefault(dependency, set()) + dependents.add(service.name) + + # used to find paths + self.path = [] + self.visited = set() + self.visiting = set() + + def boot_paths(self): + """ + Generates the boot paths for the services provided to the class. + + :return: list of services to boot, in order + :rtype: list[core.service.CoreService] + """ + paths = [] + for service in self.node_services.itervalues(): + if service.name in self.booted: + logging.debug("skipping service that will already be booted: %s", service.name) + continue + + path = self._start(service) + if path: + paths.append(path) + + if self.booted != set(self.node_services.iterkeys()): + raise ValueError("failure to boot all services: %s != %s" % (self.booted, self.node_services.keys())) + + return paths + + def _reset(self): + self.path = [] + self.visited.clear() + self.visiting.clear() + + def _start(self, service): + logging.debug("starting service dependency check: %s", service.name) + self._reset() + return self._visit(service) + + def _visit(self, current_service): + logging.debug("visiting service(%s): %s", current_service.name, self.path) + self.visited.add(current_service.name) + self.visiting.add(current_service.name) + + # dive down + for service_name in current_service.dependencies: + if service_name not in self.node_services: + raise ValueError("required dependency was not included in node services: %s" % service_name) + + if service_name in self.visiting: + raise ValueError("cyclic dependency at service(%s): %s" % (current_service.name, service_name)) + + if service_name not in self.visited: + service = self.node_services[service_name] + self._visit(service) + + # add service when bottom is found + logging.debug("adding service to boot path: %s", current_service.name) + self.booted.add(current_service.name) + self.path.append(current_service) + self.visiting.remove(current_service.name) + + # rise back up + for service_name in self.dependents.get(current_service.name, []): + if service_name not in self.visited: + service = self.node_services[service_name] + self._visit(service) + + return self.path + + +class ServiceShim(object): + keys = ["dirs", "files", "startidx", "cmdup", "cmddown", "cmdval", "meta", "starttime"] + + @classmethod + def tovaluelist(cls, node, service): + """ + Convert service properties into a string list of key=value pairs, + separated by "|". + + :param core.netns.nodes.CoreNode node: node to get value list for + :param CoreService service: service to get value list for + :return: value list string + :rtype: str + """ + start_time = 0 + start_index = 0 + valmap = [service.dirs, service.configs, start_index, service.startup, + service.shutdown, service.validate, service.meta, start_time] + if not service.custom: + valmap[1] = service.get_configs(node) + valmap[3] = service.get_startup(node) + vals = ["%s=%s" % (x, y) for x, y in zip(cls.keys, valmap)] + return "|".join(vals) + + @classmethod + def fromvaluelist(cls, service, values): + """ + Convert list of values into properties for this instantiated + (customized) service. + + :param CoreService service: service to get value list for + :param dict values: value list to set properties from + :return: nothing + """ + # TODO: support empty value? e.g. override default meta with '' + for key in cls.keys: + try: + cls.setvalue(service, key, values[cls.keys.index(key)]) + except IndexError: + # old config does not need to have new keys + logging.exception("error indexing into key") + + @classmethod + def setvalue(cls, service, key, value): + """ + Set values for this service. + + :param CoreService service: service to get value list for + :param str key: key to set value for + :param value: value of key to set + :return: nothing + """ + if key not in cls.keys: + raise ValueError("key `%s` not in `%s`" % (key, cls.keys)) + # this handles data conversion to int, string, and tuples + if value: + if key == "startidx": + value = int(value) + elif key == "meta": + value = str(value) + else: + value = utils.make_tuple_fromstr(value, str) + + if key == "dirs": + service.dirs = value + elif key == "files": + service.configs = value + elif key == "cmdup": + service.startup = value + elif key == "cmddown": + service.shutdown = value + elif key == "cmdval": + service.validate = value + elif key == "meta": + service.meta = value + + @classmethod + def servicesfromopaque(cls, opaque): + """ + Build a list of services from an opaque data string. + + :param str opaque: opaque data string + :return: services + :rtype: list + """ + servicesstring = opaque.split(':') + if servicesstring[0] != "service": + return [] + return servicesstring[1].split(',') + + +class ServiceManager(object): + """ + Manages services available for CORE nodes to use. + """ + services = {} + + @classmethod + def add(cls, service): + """ + Add a service to manager. + + :param CoreService service: service to add + :return: nothing + """ + name = service.name + logging.debug("loading service: class(%s) name(%s)", service.__name__, name) + + # avoid duplicate services + if name in cls.services: + raise ValueError("duplicate service being added: %s" % name) + + # validate dependent executables are present + for executable in service.executables: + if not which(executable): + logging.debug("service(%s) missing executable: %s", service.name, executable) + raise ValueError("service(%s) missing executable: %s" % (service.name, executable)) + + # make service available + cls.services[name] = service + + @classmethod + def get(cls, name): + """ + Retrieve a service from the manager. + + :param str name: name of the service to retrieve + :return: service if it exists, None otherwise + :rtype: CoreService.class + """ + return cls.services.get(name) + + @classmethod + def add_services(cls, path): + """ + Method for retrieving all CoreServices from a given path. + + :param str path: path to retrieve services from + :return: list of core services that failed to load + :rtype: list[str] + """ + service_errors = [] + services = utils.load_classes(path, CoreService) + for service in services: + if not service.name: + continue + service.on_load() + + try: + cls.add(service) + except ValueError as e: + service_errors.append(service.name) + logging.debug("not loading service: %s", e) + return service_errors + + +class CoreServices(object): + """ + Class for interacting with a list of available startup services for + nodes. Mostly used to convert a CoreService into a Config API + message. This class lives in the Session object and remembers + the default services configured for each node type, and any + custom service configuration. A CoreService is not a Configurable. + """ + name = "services" + config_type = RegisterTlvs.UTILITY.value + + def __init__(self, session): + """ + Creates a CoreServices instance. + + :param core.session.Session session: session this manager is tied to + """ + self.session = session + # dict of default services tuples, key is node type + self.default_services = {} + # dict of node ids to dict of custom services by name + self.custom_services = {} + + def reset(self): + """ + Called when config message with reset flag is received + """ + self.custom_services.clear() + + def get_default_services(self, node_type): + """ + Get the list of default services that should be enabled for a + node for the given node type. + + :param node_type: node type to get default services for + :return: default services + :rtype: list[CoreService] + """ + logging.debug("getting default services for type: %s", node_type) + results = [] + defaults = self.default_services.get(node_type, []) + for name in defaults: + logging.debug("checking for service with service manager: %s", name) + service = ServiceManager.get(name) + if not service: + logging.warn("default service %s is unknown", name) + else: + results.append(service) + return results + + def get_service(self, node_id, service_name, default_service=False): + """ + Get any custom service configured for the given node that matches the specified service name. + If no custom service is found, return the specified service. + + :param int node_id: object id to get service from + :param str service_name: name of service to retrieve + :param bool default_service: True to return default service when custom does not exist, False returns None + :return: custom service from the node + :rtype: CoreService + """ + node_services = self.custom_services.setdefault(node_id, {}) + default = None + if default_service: + default = ServiceManager.get(service_name) + return node_services.get(service_name, default) + + def set_service(self, node_id, service_name): + """ + Store service customizations in an instantiated service object + using a list of values that came from a config message. + + :param int node_id: object id to set custom service for + :param str service_name: name of service to set + :return: nothing + """ + logging.debug("setting custom service(%s) for node: %s", service_name, node_id) + service = self.get_service(node_id, service_name) + if not service: + service_class = ServiceManager.get(service_name) + service = service_class() + + # add the custom service to dict + node_services = self.custom_services.setdefault(node_id, {}) + node_services[service.name] = service + + def add_services(self, node, node_type, services=None): + """ + Add services to a node. + + :param core.coreobj.PyCoreNode node: node to add services to + :param str node_type: node type to add services to + :param list[str] services: names of services to add to node + :return: nothing + """ + if not services: + logging.info("using default services for node(%s) type(%s)", node.name, node_type) + services = self.default_services.get(node_type, []) + + logging.info("setting services for node(%s): %s", node.name, services) + for service_name in services: + service = self.get_service(node.objid, service_name, default_service=True) + if not service: + logging.warn("unknown service(%s) for node(%s)", service_name, node.name) + continue + logging.info("adding service to node(%s): %s", node.name, service_name) + node.addservice(service) + + def all_configs(self): + """ + Return (node_id, service) tuples for all stored configs. Used when reconnecting to a + session or opening XML. + + :return: list of tuples of node ids and services + :rtype: list[tuple] + """ + configs = [] + for node_id in self.custom_services.iterkeys(): + for service in self.custom_services[node_id].itervalues(): + configs.append((node_id, service)) + return configs + + def all_files(self, service): + """ + Return all customized files stored with a service. + Used when reconnecting to a session or opening XML. + + :param CoreService service: service to get files for + :return: list of all custom service files + :rtype: list[tuple] + """ + files = [] + if not service.custom: + return files + + for filename in service.configs: + data = service.config_data.get(filename) + if data is None: + continue + files.append((filename, data)) + + return files + + def boot_services(self, node): + """ + Start all services on a node. + + :param core.netns.vnode.LxcNode node: node to start services on + :return: nothing + """ + pool = ThreadPool() + results = [] + + boot_paths = ServiceDependencies(node.services).boot_paths() + for boot_path in boot_paths: + result = pool.apply_async(self._start_boot_paths, (node, boot_path)) + results.append(result) + + pool.close() + pool.join() + for result in results: + result.get() + + def _start_boot_paths(self, node, boot_path): + """ + Start all service boot paths found, based on dependencies. + + :param core.netns.vnode.LxcNode node: node to start services on + :param list[CoreService] boot_path: service to start in dependent order + :return: nothing + """ + logging.info("booting node services: %s", " -> ".join([x.name for x in boot_path])) + for service in boot_path: + try: + self.boot_service(node, service) + except: + logging.exception("exception booting service: %s", service.name) + raise + + def boot_service(self, node, service): + """ + Start a service on a node. Create private dirs, generate config + files, and execute startup commands. + + :param core.netns.vnode.LxcNode node: node to boot services on + :param CoreService service: service to start + :return: nothing + """ + logging.info("starting node(%s) service(%s) validation(%s)", node.name, service.name, + service.validation_mode.name) + + # create service directories + for directory in service.dirs: + try: + node.privatedir(directory) + except (CoreCommandError, ValueError) as e: + logging.warn("error mounting private dir '%s' for service '%s': %s", + directory, service.name, e) + + # create service files + self.create_service_files(node, service) + + # run startup + wait = service.validation_mode == ServiceMode.BLOCKING + status = self.startup_service(node, service, wait) + if status: + raise ServiceBootError("node(%s) service(%s) error during startup" % (node.name, service.name)) + + # blocking mode is finished + if wait: + return + + # timer mode, sleep and return + if service.validation_mode == ServiceMode.TIMER: + time.sleep(service.validation_timer) + # non-blocking, attempt to validate periodically, up to validation_timer time + elif service.validation_mode == ServiceMode.NON_BLOCKING: + start = time.time() + while True: + status = self.validate_service(node, service) + if not status: + break + + if time.time() - start > service.validation_timer: + break + + time.sleep(service.validation_period) + + if status: + raise ServiceBootError("node(%s) service(%s) failed validation" % (node.name, service.name)) + + def copy_service_file(self, node, filename, cfg): + """ + Given a configured service filename and config, determine if the + config references an existing file that should be copied. + Returns True for local files, False for generated. + + :param core.netns.vnode.LxcNode node: node to copy service for + :param str filename: file name for a configured service + :param str cfg: configuration string + :return: True if successful, False otherwise + :rtype: bool + """ + if cfg[:7] == 'file://': + src = cfg[7:] + src = src.split('\n')[0] + src = utils.expand_corepath(src, node.session, node) + # TODO: glob here + node.nodefilecopy(filename, src, mode=0644) + return True + return False + + def validate_service(self, node, service): + """ + Run the validation command(s) for a service. + + :param core.netns.vnode.LxcNode node: node to validate service for + :param CoreService service: service to validate + :return: service validation status + :rtype: int + """ + logging.info("validating node(%s) service(%s)", node.name, service.name) + cmds = service.validate + if not service.custom: + cmds = service.get_validate(node) + + status = 0 + for cmd in cmds: + logging.debug("validating service(%s) using: %s", service.name, cmd) + try: + node.check_cmd(cmd) + except CoreCommandError as e: + logging.error("node(%s) service(%s) validate failed", node.name, service.name) + logging.error("cmd(%s): %s", e.cmd, e.output) + status = -1 + break + + return status + + def stop_services(self, node): + """ + Stop all services on a node. + + :param core.netns.nodes.CoreNode node: node to stop services on + :return: nothing + """ + for service in node.services: + self.stop_service(node, service) + + def stop_service(self, node, service): + """ + Stop a service on a node. + + :param core.netns.vnode.LxcNode node: node to stop a service on + :param CoreService service: service to stop + :return: status for stopping the services + :rtype: str + """ + status = 0 + for args in service.shutdown: + try: + node.check_cmd(args) + except CoreCommandError: + logging.exception("error running stop command %s", args) + status = -1 + return status + + def get_service_file(self, node, service_name, filename): + """ + Send a File Message when the GUI has requested a service file. + The file data is either auto-generated or comes from an existing config. + + :param core.netns.vnode.LxcNode node: node to get service file from + :param str service_name: service to get file from + :param str filename: file name to retrieve + :return: file message for node + """ + # get service to get file from + service = self.get_service(node.objid, service_name, default_service=True) + if not service: + raise ValueError("invalid service: %s", service_name) + + # retrieve config files for default/custom service + if service.custom: + config_files = service.configs + else: + config_files = service.get_configs(node) + + if filename not in config_files: + raise ValueError("unknown service(%s) config file: %s", service_name, filename) + + # get the file data + data = service.config_data.get(filename) + if data is None: + data = "%s" % service.generate_config(node, filename) + else: + data = "%s" % data + + filetypestr = "service:%s" % service.name + return FileData( + message_type=MessageFlags.ADD.value, + node=node.objid, + name=filename, + type=filetypestr, + data=data + ) + + def set_service_file(self, node_id, service_name, file_name, data): + """ + Receive a File Message from the GUI and store the customized file + in the service config. The filename must match one from the list of + config files in the service. + + :param int node_id: node id to set service file + :param str service_name: service name to set file for + :param str file_name: file name to set + :param data: data for file to set + :return: nothing + """ + # attempt to set custom service, if needed + self.set_service(node_id, service_name) + + # retrieve custom service + service = self.get_service(node_id, service_name) + if service is None: + logging.warn("received file name for unknown service: %s", service_name) + return + + # validate file being set is valid + config_files = service.configs + if file_name not in config_files: + logging.warn("received unknown file(%s) for service(%s)", file_name, service_name) + return + + # set custom service file data + service.config_data[file_name] = data + + def startup_service(self, node, service, wait=False): + """ + Startup a node service. + + :param PyCoreNode node: node to reconfigure service for + :param CoreService service: service to reconfigure + :param bool wait: determines if we should wait to validate startup + :return: status of startup + :rtype: int + """ + + cmds = service.startup + if not service.custom: + cmds = service.get_startup(node) + + status = 0 + for cmd in cmds: + try: + if wait: + node.check_cmd(cmd) + else: + node.cmd(cmd, wait=False) + except CoreCommandError: + logging.exception("error starting command") + status = -1 + return status + + def create_service_files(self, node, service): + """ + Creates node service files. + + :param PyCoreNode node: node to reconfigure service for + :param CoreService service: service to reconfigure + :return: nothing + """ + logging.info("node(%s) service(%s) creating config files", node.name, service.name) + # get values depending on if custom or not + config_files = service.configs + if not service.custom: + config_files = service.get_configs(node) + + for file_name in config_files: + logging.debug("generating service config: %s", file_name) + if service.custom: + cfg = service.config_data.get(file_name) + if cfg is None: + cfg = service.generate_config(node, file_name) + + # cfg may have a file:/// url for copying from a file + try: + if self.copy_service_file(node, file_name, cfg): + continue + except IOError: + logging.exception("error copying service file: %s", file_name) + continue + else: + cfg = service.generate_config(node, file_name) + + node.nodefile(file_name, cfg) + + def service_reconfigure(self, node, service): + """ + Reconfigure a node service. + + :param PyCoreNode node: node to reconfigure service for + :param CoreService service: service to reconfigure + :return: nothing + """ + config_files = service.configs + if not service.custom: + config_files = service.get_configs(node) + + for file_name in config_files: + if file_name[:7] == "file:///": + # TODO: implement this + raise NotImplementedError + + cfg = service.config_data.get(file_name) + if cfg is None: + cfg = service.generate_config(node, file_name) + + node.nodefile(file_name, cfg) + + +class CoreService(object): + """ + Parent class used for defining services. + """ + # service name should not include spaces + name = None + + # executables that must exist for service to run + executables = () + + # sets service requirements that must be started prior to this service starting + dependencies = () + + # group string allows grouping services together + group = None + + # private, per-node directories required by this service + dirs = () + + # config files written by this service + configs = () + + # config file data + config_data = {} + + # list of startup commands + startup = () + + # list of shutdown commands + shutdown = () + + # list of validate commands + validate = () + + # validation mode, used to determine startup success + validation_mode = ServiceMode.NON_BLOCKING + + # time to wait in seconds for determining if service started successfully + validation_timer = 5 + + # validation period in seconds, how frequent validation is attempted + validation_period = 0.5 + + # metadata associated with this service + meta = None + + # custom configuration text + custom = False + custom_needed = False + + def __init__(self): + """ + Services are not necessarily instantiated. Classmethods may be used + against their config. Services are instantiated when a custom + configuration is used to override their default parameters. + """ + self.custom = True + self.config_data = self.__class__.config_data.copy() + + @classmethod + def on_load(cls): + pass + + @classmethod + def get_configs(cls, node): + """ + Return the tuple of configuration file filenames. This default method + returns the cls._configs tuple, but this method may be overriden to + provide node-specific filenames that may be based on other services. + + :param core.netns.vnode.LxcNode node: node to generate config for + :return: configuration files + :rtype: tuple + """ + return cls.configs + + @classmethod + def generate_config(cls, node, filename): + """ + Generate configuration file given a node object. The filename is + provided to allow for multiple config files. + Return the configuration string to be written to a file or sent + to the GUI for customization. + + :param core.netns.vnode.LxcNode node: node to generate config for + :param str filename: file name to generate config for + :return: nothing + """ + raise NotImplementedError + + @classmethod + def get_startup(cls, node): + """ + Return the tuple of startup commands. This default method + returns the cls.startup tuple, but this method may be + overridden to provide node-specific commands that may be + based on other services. + + :param core.netns.vnode.LxcNode node: node to get startup for + :return: startup commands + :rtype: tuple + """ + return cls.startup + + @classmethod + def get_validate(cls, node): + """ + Return the tuple of validate commands. This default method + returns the cls.validate tuple, but this method may be + overridden to provide node-specific commands that may be + based on other services. + + :param core.netns.vnode.LxcNode node: node to validate + :return: validation commands + :rtype: tuple + """ + return cls.validate diff --git a/daemon/core/services/__init__.py b/daemon/core/services/__init__.py index e69de29b..974cd03e 100644 --- a/daemon/core/services/__init__.py +++ b/daemon/core/services/__init__.py @@ -0,0 +1,21 @@ +""" +Services + +Services available to nodes can be put in this directory. Everything listed in +__all__ is automatically loaded by the main core module. +""" +import os + +from core.service import ServiceManager + +_PATH = os.path.abspath(os.path.dirname(__file__)) + + +def load(): + """ + Loads all services from the modules that reside under core.services. + + :return: list of services that failed to load + :rtype: list[str] + """ + return ServiceManager.add_services(_PATH) diff --git a/daemon/core/services/bird.py b/daemon/core/services/bird.py index c2ecc4dc..d0b3d2e9 100644 --- a/daemon/core/services/bird.py +++ b/daemon/core/services/bird.py @@ -1,85 +1,85 @@ """ bird.py: defines routing services provided by the BIRD Internet Routing Daemon. """ -from typing import Optional -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService +from core.service import CoreService class Bird(CoreService): """ Bird router support """ - - name: str = "bird" - group: str = "BIRD" - executables: tuple[str, ...] = ("bird",) - dirs: tuple[str, ...] = ("/etc/bird",) - configs: tuple[str, ...] = ("/etc/bird/bird.conf",) - startup: tuple[str, ...] = (f"bird -c {configs[0]}",) - shutdown: tuple[str, ...] = ("killall bird",) - validate: tuple[str, ...] = ("pidof bird",) + name = "bird" + executables = ("bird",) + group = "BIRD" + dirs = ("/etc/bird",) + configs = ("/etc/bird/bird.conf",) + startup = ("bird -c %s" % (configs[0]),) + shutdown = ("killall bird",) + validate = ("pidof bird",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the bird.conf file contents. """ if filename == cls.configs[0]: - return cls.generate_bird_config(node) + return cls.generateBirdConf(node) else: raise ValueError @staticmethod - def router_id(node: CoreNode) -> str: + def routerid(node): """ Helper to return the first IPv4 address of a node as its router ID. """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") >= 0: + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" return "0.0.0.0" @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: + def generateBirdConf(cls, node): """ Returns configuration file text. Other services that depend on bird - will have hooks that are invoked here. + will have generatebirdifcconfig() and generatebirdconfig() + hooks that are invoked here. """ - cfg = f"""\ + cfg = """\ /* Main configuration file for BIRD. This is ony a template, * you will *need* to customize it according to your needs * Beware that only double quotes \'"\' are valid. No singles. */ -log "/var/log/{cls.name}.log" all; +log "/var/log/%s.log" all; #debug protocols all; #debug commands 2; -router id {cls.router_id(node)}; # Mandatory for IPv6, may be automatic for IPv4 +router id %s; # Mandatory for IPv6, may be automatic for IPv4 -protocol kernel {{ +protocol kernel { persist; # Don\'t remove routes on BIRD shutdown scan time 200; # Scan kernel routing table every 200 seconds export all; import all; -}} +} -protocol device {{ +protocol device { scan time 10; # Scan interfaces every 10 seconds -}} +} -""" +""" % (cls.name, cls.routerid(node)) - # generate protocol specific configurations + # Generate protocol specific configurations for s in node.services: if cls.name not in s.dependencies: continue - if not (isinstance(s, BirdService) or issubclass(s, BirdService)): - continue - cfg += s.generate_bird_config(node) + cfg += s.generatebirdconfig(node) + return cfg @@ -89,26 +89,34 @@ class BirdService(CoreService): common to Bird's routing daemons. """ - name: Optional[str] = None - group: str = "BIRD" - executables: tuple[str, ...] = ("bird",) - dependencies: tuple[str, ...] = ("bird",) - meta: str = "The config file for this service can be found in the bird service." + name = None + executables = ("bird",) + group = "BIRD" + dependencies = ("bird",) + dirs = () + configs = () + startup = () + shutdown = () + meta = "The config file for this service can be found in the bird service." @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: + def generatebirdconfig(cls, node): return "" @classmethod - def generate_bird_iface_config(cls, node: CoreNode) -> str: + def generatebirdifcconfig(cls, node): """ Use only bare interfaces descriptions in generated protocol configurations. This has the slight advantage of being the same everywhere. """ cfg = "" - for iface in node.get_ifaces(control=False): - cfg += f' interface "{iface.name}";\n' + + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += ' interface "%s";\n' % ifc.name + return cfg @@ -117,11 +125,11 @@ class BirdBgp(BirdService): BGP BIRD Service (configuration generation) """ - name: str = "BIRD_BGP" - custom_needed: bool = True + name = "BIRD_BGP" + custom_needed = True @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: + def generatebirdconfig(cls, node): return """ /* This is a sample config that should be customized with appropriate AS numbers * and peers; add one section like this for each neighbor */ @@ -148,22 +156,23 @@ class BirdOspf(BirdService): OSPF BIRD Service (configuration generation) """ - name: str = "BIRD_OSPFv2" + name = "BIRD_OSPFv2" @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "protocol ospf {\n" - cfg += " export filter {\n" - cfg += " if source = RTS_BGP then {\n" - cfg += " ospf_metric1 = 100;\n" - cfg += " accept;\n" - cfg += " }\n" - cfg += " accept;\n" - cfg += " };\n" - cfg += " area 0.0.0.0 {\n" - cfg += cls.generate_bird_iface_config(node) - cfg += " };\n" - cfg += "}\n\n" + def generatebirdconfig(cls, node): + cfg = 'protocol ospf {\n' + cfg += ' export filter {\n' + cfg += ' if source = RTS_BGP then {\n' + cfg += ' ospf_metric1 = 100;\n' + cfg += ' accept;\n' + cfg += ' }\n' + cfg += ' accept;\n' + cfg += ' };\n' + cfg += ' area 0.0.0.0 {\n' + cfg += cls.generatebirdifcconfig(node) + cfg += ' };\n' + cfg += '}\n\n' + return cfg @@ -172,24 +181,26 @@ class BirdRadv(BirdService): RADV BIRD Service (configuration generation) """ - name: str = "BIRD_RADV" + name = "BIRD_RADV" @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "/* This is a sample config that must be customized */\n" - cfg += "protocol radv {\n" - cfg += " # auto configuration on all interfaces\n" - cfg += cls.generate_bird_iface_config(node) - cfg += " # Advertise DNS\n" - cfg += " rdnss {\n" - cfg += "# lifetime mult 10;\n" - cfg += "# lifetime mult 10;\n" - cfg += "# ns 2001:0DB8:1234::11;\n" - cfg += "# ns 2001:0DB8:1234::11;\n" - cfg += "# ns 2001:0DB8:1234::12;\n" - cfg += "# ns 2001:0DB8:1234::12;\n" - cfg += " };\n" - cfg += "}\n\n" + def generatebirdconfig(cls, node): + cfg = '/* This is a sample config that must be customized */\n' + + cfg += 'protocol radv {\n' + cfg += ' # auto configuration on all interfaces\n' + cfg += cls.generatebirdifcconfig(node) + cfg += ' # Advertise DNS\n' + cfg += ' rdnss {\n' + cfg += '# lifetime mult 10;\n' + cfg += '# lifetime mult 10;\n' + cfg += '# ns 2001:0DB8:1234::11;\n' + cfg += '# ns 2001:0DB8:1234::11;\n' + cfg += '# ns 2001:0DB8:1234::12;\n' + cfg += '# ns 2001:0DB8:1234::12;\n' + cfg += ' };\n' + cfg += '}\n\n' + return cfg @@ -198,19 +209,20 @@ class BirdRip(BirdService): RIP BIRD Service (configuration generation) """ - name: str = "BIRD_RIP" + name = "BIRD_RIP" @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "protocol rip {\n" - cfg += " period 10;\n" - cfg += " garbage time 60;\n" - cfg += cls.generate_bird_iface_config(node) - cfg += " honor neighbor;\n" - cfg += " authentication none;\n" - cfg += " import all;\n" - cfg += " export all;\n" - cfg += "}\n\n" + def generatebirdconfig(cls, node): + cfg = 'protocol rip {\n' + cfg += ' period 10;\n' + cfg += ' garbage time 60;\n' + cfg += cls.generatebirdifcconfig(node) + cfg += ' honor neighbor;\n' + cfg += ' authentication none;\n' + cfg += ' import all;\n' + cfg += ' export all;\n' + cfg += '}\n\n' + return cfg @@ -219,15 +231,15 @@ class BirdStatic(BirdService): Static Bird Service (configuration generation) """ - name: str = "BIRD_static" - custom_needed: bool = True + name = "BIRD_static" + custom_needed = True @classmethod - def generate_bird_config(cls, node: CoreNode) -> str: - cfg = "/* This is a sample config that must be customized */\n" - cfg += "protocol static {\n" - cfg += "# route 0.0.0.0/0 via 198.51.100.130; # Default route. Do NOT advertise on BGP !\n" - cfg += "# route 203.0.113.0/24 reject; # Sink route\n" + def generatebirdconfig(cls, node): + cfg = '/* This is a sample config that must be customized */\n' + cfg += 'protocol static {\n' + cfg += '# route 0.0.0.0/0 via 198.51.100.130; # Default route. Do NOT advertise on BGP !\n' + cfg += '# route 203.0.113.0/24 reject; # Sink route\n' cfg += '# route 10.2.0.0/24 via "arc0"; # Secondary network\n' - cfg += "}\n\n" + cfg += '}\n\n' return cfg diff --git a/daemon/core/services/coreservices.py b/daemon/core/services/coreservices.py deleted file mode 100644 index 0eee980e..00000000 --- a/daemon/core/services/coreservices.py +++ /dev/null @@ -1,773 +0,0 @@ -""" -Definition of CoreService class that is subclassed to define -startup services and routing for nodes. A service is typically a daemon -program launched when a node starts that provides some sort of service. -The CoreServices class handles configuration messages for sending -a list of available services to the GUI and for configuring individual -services. -""" - -import enum -import logging -import pkgutil -import time -from collections.abc import Iterable -from pathlib import Path -from typing import TYPE_CHECKING, Optional, Union - -from core import services as core_services -from core import utils -from core.emulator.data import FileData -from core.emulator.enumerations import ExceptionLevels, MessageFlags, RegisterTlvs -from core.errors import ( - CoreCommandError, - CoreError, - CoreServiceBootError, - CoreServiceError, -) -from core.nodes.base import CoreNode - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.session import Session - - CoreServiceType = Union["CoreService", type["CoreService"]] - - -class ServiceMode(enum.Enum): - BLOCKING = 0 - NON_BLOCKING = 1 - TIMER = 2 - - -class ServiceDependencies: - """ - Can generate boot paths for services, based on their dependencies. Will validate - that all services will be booted and that all dependencies exist within the services - provided. - """ - - def __init__(self, services: list["CoreServiceType"]) -> None: - self.visited: set[str] = set() - self.services: dict[str, "CoreServiceType"] = {} - self.paths: dict[str, list["CoreServiceType"]] = {} - self.boot_paths: list[list["CoreServiceType"]] = [] - roots = {x.name for x in services} - for service in services: - self.services[service.name] = service - roots -= set(service.dependencies) - self.roots: list["CoreServiceType"] = [x for x in services if x.name in roots] - if services and not self.roots: - raise ValueError("circular dependency is present") - - def _search( - self, - service: "CoreServiceType", - visiting: set[str] = None, - path: list[str] = None, - ) -> list["CoreServiceType"]: - if service.name in self.visited: - return self.paths[service.name] - self.visited.add(service.name) - if visiting is None: - visiting = set() - visiting.add(service.name) - if path is None: - for dependency in service.dependencies: - path = self.paths.get(dependency) - if path is not None: - break - for dependency in service.dependencies: - service_dependency = self.services.get(dependency) - if not service_dependency: - raise ValueError(f"required dependency was not provided: {dependency}") - if dependency in visiting: - raise ValueError(f"circular dependency, already visited: {dependency}") - else: - path = self._search(service_dependency, visiting, path) - visiting.remove(service.name) - if path is None: - path = [] - self.boot_paths.append(path) - path.append(service) - self.paths[service.name] = path - return path - - def boot_order(self) -> list[list["CoreServiceType"]]: - for service in self.roots: - self._search(service) - return self.boot_paths - - -class ServiceManager: - """ - Manages services available for CORE nodes to use. - """ - - services: dict[str, type["CoreService"]] = {} - - @classmethod - def add(cls, service: type["CoreService"]) -> None: - """ - Add a service to manager. - - :param service: service to add - :return: nothing - :raises ValueError: when service cannot be loaded - """ - name = service.name - logger.debug("loading service: class(%s) name(%s)", service.__name__, name) - # avoid services with no name - if name is None: - logger.debug("not loading class(%s) with no name", service.__name__) - return - # avoid duplicate services - if name in cls.services: - raise ValueError(f"duplicate service being added: {name}") - # validate dependent executables are present - for executable in service.executables: - try: - utils.which(executable, required=True) - except CoreError as e: - raise CoreError(f"service({name}): {e}") - # validate service on load succeeds - try: - service.on_load() - except Exception as e: - logger.exception("error during service(%s) on load", service.name) - raise ValueError(e) - # make service available - cls.services[name] = service - - @classmethod - def get(cls, name: str) -> type["CoreService"]: - """ - Retrieve a service from the manager. - - :param name: name of the service to retrieve - :return: service if it exists, None otherwise - """ - service = cls.services.get(name) - if service is None: - raise CoreServiceError(f"service({name}) does not exist") - return service - - @classmethod - def add_services(cls, path: Path) -> list[str]: - """ - Method for retrieving all CoreServices from a given path. - - :param path: path to retrieve services from - :return: list of core services that failed to load - """ - service_errors = [] - services = utils.load_classes(path, CoreService) - for service in services: - if not service.name: - continue - try: - cls.add(service) - except (CoreError, ValueError) as e: - service_errors.append(service.name) - logger.debug("not loading service(%s): %s", service.name, e) - return service_errors - - @classmethod - def load_locals(cls) -> list[str]: - errors = [] - for module_info in pkgutil.walk_packages( - core_services.__path__, f"{core_services.__name__}." - ): - services = utils.load_module(module_info.name, CoreService) - for service in services: - try: - cls.add(service) - except CoreError as e: - errors.append(service.name) - logger.debug("not loading service(%s): %s", service.name, e) - return errors - - -class CoreServices: - """ - Class for interacting with a list of available startup services for - nodes. Mostly used to convert a CoreService into a Config API - message. This class lives in the Session object and remembers - the default services configured for each node type, and any - custom service configuration. A CoreService is not a Configurable. - """ - - name: str = "services" - config_type: RegisterTlvs = RegisterTlvs.UTILITY - - def __init__(self, session: "Session") -> None: - """ - Creates a CoreServices instance. - - :param session: session this manager is tied to - """ - self.session: "Session" = session - # dict of default services tuples, key is node type - self.default_services: dict[str, list[str]] = { - "mdr": ["zebra", "OSPFv3MDR", "IPForward"], - "PC": ["DefaultRoute"], - "prouter": [], - "router": ["zebra", "OSPFv2", "OSPFv3", "IPForward"], - "host": ["DefaultRoute", "SSH"], - } - # dict of node ids to dict of custom services by name - self.custom_services: dict[int, dict[str, "CoreService"]] = {} - - def reset(self) -> None: - """ - Called when config message with reset flag is received - """ - self.custom_services.clear() - - def get_service( - self, node_id: int, service_name: str, default_service: bool = False - ) -> "CoreService": - """ - Get any custom service configured for the given node that matches the specified - service name. If no custom service is found, return the specified service. - - :param node_id: object id to get service from - :param service_name: name of service to retrieve - :param default_service: True to return default service when custom does - not exist, False returns None - :return: custom service from the node - """ - node_services = self.custom_services.setdefault(node_id, {}) - default = None - if default_service: - default = ServiceManager.get(service_name) - return node_services.get(service_name, default) - - def set_service(self, node_id: int, service_name: str) -> None: - """ - Store service customizations in an instantiated service object - using a list of values that came from a config message. - - :param node_id: object id to set custom service for - :param service_name: name of service to set - :return: nothing - """ - logger.debug("setting custom service(%s) for node: %s", service_name, node_id) - service = self.get_service(node_id, service_name) - if not service: - service_class = ServiceManager.get(service_name) - service = service_class() - - # add the custom service to dict - node_services = self.custom_services.setdefault(node_id, {}) - node_services[service.name] = service - - def add_services( - self, node: CoreNode, model: str, services: list[str] = None - ) -> None: - """ - Add services to a node. - - :param node: node to add services to - :param model: node model type to add services for - :param services: names of services to add to node - :return: nothing - """ - if not services: - logger.info( - "using default services for node(%s) type(%s)", node.name, model - ) - services = self.default_services.get(model, []) - logger.info("setting services for node(%s): %s", node.name, services) - for service_name in services: - service = self.get_service(node.id, service_name, default_service=True) - if not service: - logger.warning( - "unknown service(%s) for node(%s)", service_name, node.name - ) - continue - node.services.append(service) - - def all_configs(self) -> list[tuple[int, "CoreService"]]: - """ - Return (node_id, service) tuples for all stored configs. Used when reconnecting - to a session or opening XML. - - :return: list of tuples of node ids and services - """ - configs = [] - for node_id in self.custom_services: - custom_services = self.custom_services[node_id] - for name in custom_services: - service = custom_services[name] - configs.append((node_id, service)) - return configs - - def all_files(self, service: "CoreService") -> list[tuple[str, str]]: - """ - Return all customized files stored with a service. - Used when reconnecting to a session or opening XML. - - :param service: service to get files for - :return: list of all custom service files - """ - files = [] - if not service.custom: - return files - - for filename in service.configs: - data = service.config_data.get(filename) - if data is None: - continue - files.append((filename, data)) - - return files - - def boot_services(self, node: CoreNode) -> None: - """ - Start all services on a node. - - :param node: node to start services on - :return: nothing - """ - boot_paths = ServiceDependencies(node.services).boot_order() - funcs = [] - for boot_path in boot_paths: - args = (node, boot_path) - funcs.append((self._boot_service_path, args, {})) - result, exceptions = utils.threadpool(funcs) - if exceptions: - raise CoreServiceBootError(*exceptions) - - def _boot_service_path(self, node: CoreNode, boot_path: list["CoreServiceType"]): - logger.info( - "booting node(%s) services: %s", - node.name, - " -> ".join([x.name for x in boot_path]), - ) - for service in boot_path: - service = self.get_service(node.id, service.name, default_service=True) - try: - self.boot_service(node, service) - except Exception as e: - logger.exception("exception booting service: %s", service.name) - raise CoreServiceBootError(e) - - def boot_service(self, node: CoreNode, service: "CoreServiceType") -> None: - """ - Start a service on a node. Create private dirs, generate config - files, and execute startup commands. - - :param node: node to boot services on - :param service: service to start - :return: nothing - """ - logger.info( - "starting node(%s) service(%s) validation(%s)", - node.name, - service.name, - service.validation_mode.name, - ) - - # create service directories - for directory in service.dirs: - dir_path = Path(directory) - try: - node.create_dir(dir_path) - except (CoreCommandError, CoreError) as e: - logger.warning( - "error mounting private dir '%s' for service '%s': %s", - directory, - service.name, - e, - ) - - # create service files - self.create_service_files(node, service) - - # run startup - wait = service.validation_mode == ServiceMode.BLOCKING - status = self.startup_service(node, service, wait) - if status: - raise CoreServiceBootError( - f"node({node.name}) service({service.name}) error during startup" - ) - - # blocking mode is finished - if wait: - return - - # timer mode, sleep and return - if service.validation_mode == ServiceMode.TIMER: - time.sleep(service.validation_timer) - # non-blocking, attempt to validate periodically, up to validation_timer time - elif service.validation_mode == ServiceMode.NON_BLOCKING: - start = time.monotonic() - while True: - status = self.validate_service(node, service) - if not status: - break - - if time.monotonic() - start > service.validation_timer: - break - - time.sleep(service.validation_period) - - if status: - raise CoreServiceBootError( - f"node({node.name}) service({service.name}) failed validation" - ) - - def copy_service_file(self, node: CoreNode, file_path: Path, cfg: str) -> bool: - """ - Given a configured service filename and config, determine if the - config references an existing file that should be copied. - Returns True for local files, False for generated. - - :param node: node to copy service for - :param file_path: file name for a configured service - :param cfg: configuration string - :return: True if successful, False otherwise - """ - if cfg[:7] == "file://": - src = cfg[7:] - src = src.split("\n")[0] - src = utils.expand_corepath(src, node.session, node) - # TODO: glob here - node.copy_file(src, file_path, mode=0o644) - return True - return False - - def validate_service(self, node: CoreNode, service: "CoreServiceType") -> int: - """ - Run the validation command(s) for a service. - - :param node: node to validate service for - :param service: service to validate - :return: service validation status - """ - logger.debug("validating node(%s) service(%s)", node.name, service.name) - cmds = service.validate - if not service.custom: - cmds = service.get_validate(node) - - status = 0 - for cmd in cmds: - logger.debug("validating service(%s) using: %s", service.name, cmd) - try: - node.cmd(cmd) - except CoreCommandError as e: - logger.debug( - "node(%s) service(%s) validate failed", node.name, service.name - ) - logger.debug("cmd(%s): %s", e.cmd, e.output) - status = -1 - break - - return status - - def stop_services(self, node: CoreNode) -> None: - """ - Stop all services on a node. - - :param node: node to stop services on - :return: nothing - """ - for service in node.services: - self.stop_service(node, service) - - def stop_service(self, node: CoreNode, service: "CoreServiceType") -> int: - """ - Stop a service on a node. - - :param node: node to stop a service on - :param service: service to stop - :return: status for stopping the services - """ - status = 0 - for args in service.shutdown: - try: - node.cmd(args) - except CoreCommandError as e: - self.session.exception( - ExceptionLevels.ERROR, - "services", - f"error stopping service {service.name}: {e.stderr}", - node.id, - ) - logger.exception("error running stop command %s", args) - status = -1 - return status - - def get_service_file( - self, node: CoreNode, service_name: str, filename: str - ) -> FileData: - """ - Send a File Message when the GUI has requested a service file. - The file data is either auto-generated or comes from an existing config. - - :param node: node to get service file from - :param service_name: service to get file from - :param filename: file name to retrieve - :return: file data - """ - # get service to get file from - service = self.get_service(node.id, service_name, default_service=True) - if not service: - raise ValueError("invalid service: %s", service_name) - - # retrieve config files for default/custom service - if service.custom: - config_files = service.configs - else: - config_files = service.get_configs(node) - - if filename not in config_files: - raise ValueError( - "unknown service(%s) config file: %s", service_name, filename - ) - - # get the file data - data = service.config_data.get(filename) - if data is None: - data = service.generate_config(node, filename) - else: - data = data - - filetypestr = f"service:{service.name}" - return FileData( - message_type=MessageFlags.ADD, - node=node.id, - name=filename, - type=filetypestr, - data=data, - ) - - def set_service_file( - self, node_id: int, service_name: str, file_name: str, data: str - ) -> None: - """ - Receive a File Message from the GUI and store the customized file - in the service config. The filename must match one from the list of - config files in the service. - - :param node_id: node id to set service file - :param service_name: service name to set file for - :param file_name: file name to set - :param data: data for file to set - :return: nothing - """ - # attempt to set custom service, if needed - self.set_service(node_id, service_name) - - # retrieve custom service - service = self.get_service(node_id, service_name) - if service is None: - logger.warning("received file name for unknown service: %s", service_name) - return - - # validate file being set is valid - config_files = service.configs - if file_name not in config_files: - logger.warning( - "received unknown file(%s) for service(%s)", file_name, service_name - ) - return - - # set custom service file data - service.config_data[file_name] = data - - def startup_service( - self, node: CoreNode, service: "CoreServiceType", wait: bool = False - ) -> int: - """ - Startup a node service. - - :param node: node to reconfigure service for - :param service: service to reconfigure - :param wait: determines if we should wait to validate startup - :return: status of startup - """ - cmds = service.startup - if not service.custom: - cmds = service.get_startup(node) - - status = 0 - for cmd in cmds: - try: - node.cmd(cmd, wait) - except CoreCommandError: - logger.exception("error starting command") - status = -1 - return status - - def create_service_files(self, node: CoreNode, service: "CoreServiceType") -> None: - """ - Creates node service files. - - :param node: node to reconfigure service for - :param service: service to reconfigure - :return: nothing - """ - # get values depending on if custom or not - config_files = service.configs - if not service.custom: - config_files = service.get_configs(node) - for file_name in config_files: - file_path = Path(file_name) - logger.debug( - "generating service config custom(%s): %s", service.custom, file_name - ) - if service.custom: - cfg = service.config_data.get(file_name) - if cfg is None: - cfg = service.generate_config(node, file_name) - # cfg may have a file:/// url for copying from a file - try: - if self.copy_service_file(node, file_path, cfg): - continue - except OSError: - logger.exception("error copying service file: %s", file_name) - continue - else: - cfg = service.generate_config(node, file_name) - node.create_file(file_path, cfg) - - def service_reconfigure(self, node: CoreNode, service: "CoreService") -> None: - """ - Reconfigure a node service. - - :param node: node to reconfigure service for - :param service: service to reconfigure - :return: nothing - """ - config_files = service.configs - if not service.custom: - config_files = service.get_configs(node) - for file_name in config_files: - file_path = Path(file_name) - if file_name[:7] == "file:///": - # TODO: implement this - raise NotImplementedError - cfg = service.config_data.get(file_name) - if cfg is None: - cfg = service.generate_config(node, file_name) - node.create_file(file_path, cfg) - - -class CoreService: - """ - Parent class used for defining services. - """ - - # service name should not include spaces - name: Optional[str] = None - - # executables that must exist for service to run - executables: tuple[str, ...] = () - - # sets service requirements that must be started prior to this service starting - dependencies: tuple[str, ...] = () - - # group string allows grouping services together - group: Optional[str] = None - - # private, per-node directories required by this service - dirs: tuple[str, ...] = () - - # config files written by this service - configs: tuple[str, ...] = () - - # config file data - config_data: dict[str, str] = {} - - # list of startup commands - startup: tuple[str, ...] = () - - # list of shutdown commands - shutdown: tuple[str, ...] = () - - # list of validate commands - validate: tuple[str, ...] = () - - # validation mode, used to determine startup success - validation_mode: ServiceMode = ServiceMode.NON_BLOCKING - - # time to wait in seconds for determining if service started successfully - validation_timer: int = 5 - - # validation period in seconds, how frequent validation is attempted - validation_period: float = 0.5 - - # metadata associated with this service - meta: Optional[str] = None - - # custom configuration text - custom: bool = False - custom_needed: bool = False - - def __init__(self) -> None: - """ - Services are not necessarily instantiated. Classmethods may be used - against their config. Services are instantiated when a custom - configuration is used to override their default parameters. - """ - self.custom: bool = True - self.config_data: dict[str, str] = self.__class__.config_data.copy() - - @classmethod - def on_load(cls) -> None: - pass - - @classmethod - def get_configs(cls, node: CoreNode) -> Iterable[str]: - """ - Return the tuple of configuration file filenames. This default method - returns the cls._configs tuple, but this method may be overriden to - provide node-specific filenames that may be based on other services. - - :param node: node to generate config for - :return: configuration files - """ - return cls.configs - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Generate configuration file given a node object. The filename is - provided to allow for multiple config files. - Return the configuration string to be written to a file or sent - to the GUI for customization. - - :param node: node to generate config for - :param filename: file name to generate config for - :return: generated config - """ - raise NotImplementedError - - @classmethod - def get_startup(cls, node: CoreNode) -> Iterable[str]: - """ - Return the tuple of startup commands. This default method - returns the cls.startup tuple, but this method may be - overridden to provide node-specific commands that may be - based on other services. - - :param node: node to get startup for - :return: startup commands - """ - return cls.startup - - @classmethod - def get_validate(cls, node: CoreNode) -> Iterable[str]: - """ - Return the tuple of validate commands. This default method - returns the cls.validate tuple, but this method may be - overridden to provide node-specific commands that may be - based on other services. - - :param node: node to validate - :return: validation commands - """ - return cls.validate diff --git a/daemon/core/services/dockersvc.py b/daemon/core/services/dockersvc.py new file mode 100644 index 00000000..75a31c54 --- /dev/null +++ b/daemon/core/services/dockersvc.py @@ -0,0 +1,177 @@ +""" +Docker service allows running docker containers within CORE nodes. + +The running of Docker within a CORE node allows for additional extensibility to +the CORE services. This allows network applications and protocols to be easily +packaged and run on any node. + +This service that will add a new group to the services list. This +will have a service called Docker which will just start the docker service +within the node but not run anything. It will also scan all docker images on +the host machine. If any are tagged with 'core' then they will be added as a +service to the Docker group. The image will then be auto run if that service is +selected. + +This requires a recent version of Docker. This was tested using a PPA on Ubuntu + with version 1.2.0. The version in the standard Ubuntu repo is to old for +this purpose (we need --net host). + +It also requires docker-py (https://pypi.python.org/pypi/docker-py) which can be +installed with 'pip install docker-py'. This is used to interface with Docker +from the python service. + +An example use case is to pull an image from Docker.com. A test image has been +uploaded for this purpose: + +sudo docker pull stuartmarsden/multicastping + +This downloads an image which is based on Ubuntu 14.04 with python and twisted. +It runs a simple program that sends a multicast ping and listens and records +any it receives. + +In order for this to appear as a docker service it must be tagged with core. +Find out the id by running 'sudo docker images'. You should see all installed +images and the one you want looks like this: + +stuartmarsden/multicastping latest 4833487e66d2 20 hours +ago 487 MB + +The id will be different on your machine so use it in the following command: + +sudo docker tag 4833487e66d2 stuartmarsden/multicastping:core + +This image will be listed in the services after we restart the core-daemon: + +sudo service core-daemon restart + +You can set up a simple network with a number of PCs connected to a switch. Set +the stuartmarsden/multicastping service for all the PCs. When started they will +all begin sending Multicast pings. + +In order to see what is happening you can go in to the terminal of a node and +look at the docker log. Easy shorthand is: + +docker logs $(docker ps -q) + +Which just shows the log of the running docker container (usually just one per +node). I have added this as an observer node to my setup: Name: docker logs +Command: bash -c 'docker logs $(docker ps -q) | tail -20' + +So I can just hover over to see the log which looks like this: + +Datagram 'Client: Ping' received from ('10.0.0.20', 8005) +Datagram 'Client: Ping' received from ('10.0.5.21', 8005) +Datagram 'Client: Ping' received from ('10.0.3.20', 8005) +Datagram 'Client: Ping' received from ('10.0.4.20', 8005) +Datagram 'Client: Ping' received from ('10.0.4.20', 8005) +Datagram 'Client: Ping' received from ('10.0.1.21', 8005) +Datagram 'Client: Ping' received from ('10.0.4.21', 8005) +Datagram 'Client: Ping' received from ('10.0.4.21', 8005) +Datagram 'Client: Ping' received from ('10.0.5.20', 8005) +Datagram 'Client: Ping' received from ('10.0.0.21', 8005) +Datagram 'Client: Ping' received from ('10.0.3.21', 8005) +Datagram 'Client: Ping' received from ('10.0.0.20', 8005) +Datagram 'Client: Ping' received from ('10.0.5.21', 8005) +Datagram 'Client: Ping' received from ('10.0.3.20', 8005) +Datagram 'Client: Ping' received from ('10.0.4.20', 8005) +Datagram 'Client: Ping' received from ('10.0.4.20', 8005) +Datagram 'Client: Ping' received from ('10.0.1.21', 8005) +Datagram 'Client: Ping' received from ('10.0.4.21', 8005) +Datagram 'Client: Ping' received from ('10.0.4.21', 8005) +Datagram 'Client: Ping' received from ('10.0.5.20', 8005) + +Limitations: + +1. Docker images must be downloaded on the host as usually a CORE node does not + have access to the internet. +2. Each node isolates running containers (keeps things simple) +3. Recent version of docker needed so that --net host can be used. This does + not further abstract the network within a node and allows multicast which + is not enabled within Docker containers at the moment. +4. The core-daemon must be restarted for new images to show up. +5. A Docker-daemon is run within each node but the images are shared. This + does mean that the daemon attempts to access an SQLlite database within the + host. At startup all the nodes will try to access this and it will be locked + for most due to contention. The service just does a hackish wait for 1 second + and retry. This means all the docker containers can take a while to come up + depending on how many nodes you have. +""" + +import logging + +from core.service import CoreService +from core.service import ServiceManager + +try: + from docker import Client +except ImportError: + logging.debug("missing python docker bindings") + + +class DockerService(CoreService): + """ + This is a service which will allow running docker containers in a CORE + node. + """ + name = "Docker" + executables = ("docker",) + group = "Docker" + dirs = ('/var/lib/docker/containers/', '/run/shm', '/run/resolvconf',) + configs = ('docker.sh',) + startup = ('sh docker.sh',) + shutdown = ('service docker stop',) + # Container image to start + image = "" + + @classmethod + def generate_config(cls, node, filename): + """ + Returns a string having contents of a docker.sh script that + can be modified to start a specific docker image. + """ + cfg = "#!/bin/sh\n" + cfg += "# auto-generated by Docker (docker.py)\n" + # Docker likes to think it has DNS set up or it complains. + # Unless your network was attached to the Internet this is + # non-functional but hides error messages. + cfg += 'echo "nameserver 8.8.8.8" > /run/resolvconf/resolv.conf\n' + # Starts the docker service. In Ubuntu this is docker.io; in other + # distros may just be docker + cfg += 'service docker start\n' + cfg += "# you could add a command to start a image here eg:\n" + if not cls.image: + cfg += "# docker run -d --net host --name coreDock \n" + else: + cfg += """\ +result=1 +until [ $result -eq 0 ]; do + docker run -d --net host --name coreDock %s + result=$? + # this is to alleviate contention to docker's SQLite database + sleep 0.3 +done +""" % (cls.image,) + return cfg + + @classmethod + def on_load(cls): + logging.debug("loading custom docker services") + + if "Client" in globals(): + client = Client(version="1.10") + images = client.images() + del client + else: + images = [] + + for image in images: + if u"" in image["RepoTags"][0]: + continue + for repo in image["RepoTags"]: + if u":core" not in repo: + continue + dockerid = repo.encode("ascii", "ignore").split(":")[0] + sub_class = type("SubClass", (DockerService,), {"_name": dockerid, "_image": dockerid}) + ServiceManager.add(sub_class) + + del images diff --git a/daemon/core/services/emaneservices.py b/daemon/core/services/emaneservices.py index 43cd9af4..6c9ea0a7 100644 --- a/daemon/core/services/emaneservices.py +++ b/daemon/core/services/emaneservices.py @@ -1,32 +1,37 @@ -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService +from core.enumerations import NodeTypes +from core.misc import nodeutils +from core.service import CoreService from core.xml import emanexml class EmaneTransportService(CoreService): - name: str = "transportd" - group: str = "EMANE" - executables: tuple[str, ...] = ("emanetransportd", "emanegentransportxml") - dependencies: tuple[str, ...] = () - dirs: tuple[str, ...] = () - configs: tuple[str, ...] = ("emanetransport.sh",) - startup: tuple[str, ...] = (f"bash {configs[0]}",) - validate: tuple[str, ...] = (f"pidof {executables[0]}",) - validation_timer: float = 0.5 - shutdown: tuple[str, ...] = (f"killall {executables[0]}",) + name = "transportd" + executables = ("emanetransportd", "emanegentransportxml") + group = "EMANE" + dependencies = () + dirs = () + configs = ("emanetransport.sh",) + startup = ("sh %s" % configs[0],) + validate = ("pidof %s" % executables[0],) + validation_timer = 0.5 + shutdown = ("killall %s" % executables[0],) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - emane_manager = node.session.emane - cfg = "" - for iface in node.get_ifaces(): - if not isinstance(iface.net, EmaneNet): - continue - emane_net = iface.net - config = emane_manager.get_iface_config(emane_net, iface) - if emanexml.is_external(config): - nem_id = emane_manager.get_nem_id(iface) - cfg += f"emanegentransportxml {iface.name}-platform.xml\n" - cfg += f"emanetransportd -r -l 0 -d transportdaemon{nem_id}.xml\n" - return cfg + def generate_config(cls, node, filename): + if filename == cls.configs[0]: + transport_commands = [] + for interface in node.netifs(sort=True): + network_node = node.session.get_object(interface.net.objid) + if nodeutils.is_node(network_node, NodeTypes.EMANE): + config = node.session.emane.get_configs(network_node.objid, network_node.model.name) + if config and emanexml.is_external(config): + nem_id = network_node.getnemid(interface) + command = "emanetransportd -r -l 0 -d ../transportdaemon%s.xml" % nem_id + transport_commands.append(command) + transport_commands = "\n".join(transport_commands) + return """ +emanegentransportxml -o ../ ../platform%s.xml +%s +""" % (node.objid, transport_commands) + else: + raise ValueError diff --git a/daemon/core/services/frr.py b/daemon/core/services/frr.py index 28756c19..c79542ac 100644 --- a/daemon/core/services/frr.py +++ b/daemon/core/services/frr.py @@ -2,84 +2,70 @@ frr.py: defines routing services provided by FRRouting. Assumes installation of FRR via https://deb.frrouting.org/ """ -from typing import Optional -import netaddr - -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode, NodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import PtpNet, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService - -FRR_STATE_DIR: str = "/var/run/frr" - - -def is_wireless(node: NodeBase) -> bool: - """ - Check if the node is a wireless type node. - - :param node: node to check type for - :return: True if wireless type, False otherwise - """ - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) +from core import constants +from core.enumerations import LinkTypes, NodeTypes +from core.misc import ipaddress +from core.misc import nodeutils +from core.service import CoreService class FRRZebra(CoreService): - name: str = "FRRzebra" - group: str = "FRR" - dirs: tuple[str, ...] = ("/usr/local/etc/frr", "/var/run/frr", "/var/log/frr") - configs: tuple[str, ...] = ( + name = "FRRzebra" + group = "FRR" + dirs = ( + "/usr/local/etc/frr", + "/var/run/frr", + "/var/log/frr", + ) + configs = ( "/usr/local/etc/frr/frr.conf", "frrboot.sh", "/usr/local/etc/frr/vtysh.conf", "/usr/local/etc/frr/daemons", ) - startup: tuple[str, ...] = ("bash frrboot.sh zebra",) - shutdown: tuple[str, ...] = ("killall zebra",) - validate: tuple[str, ...] = ("pidof zebra",) + startup = ("sh frrboot.sh zebra",) + shutdown = ("killall zebra",) + validate = ("pidof zebra",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the frr.conf or frrboot.sh file contents. """ if filename == cls.configs[0]: - return cls.generate_frr_conf(node) + return cls.generateFrrConf(node) elif filename == cls.configs[1]: - return cls.generate_frr_boot(node) + return cls.generateFrrBoot(node) elif filename == cls.configs[2]: - return cls.generate_vtysh_conf(node) + return cls.generateVtyshConf(node) elif filename == cls.configs[3]: - return cls.generate_frr_daemons(node) + return cls.generateFrrDaemons(node) else: - raise ValueError( - "file name (%s) is not a known configuration: %s", filename, cls.configs - ) + raise ValueError("file name (%s) is not a known configuration: %s", filename, cls.configs) @classmethod - def generate_vtysh_conf(cls, node: CoreNode) -> str: + def generateVtyshConf(cls, node): """ Returns configuration file text. """ return "service integrated-vtysh-config\n" @classmethod - def generate_frr_conf(cls, node: CoreNode) -> str: + def generateFrrConf(cls, node): """ Returns configuration file text. Other services that depend on zebra - will have hooks that are invoked here. + will have generatefrrifcconfig() and generatefrrconfig() + hooks that are invoked here. """ # we could verify here that filename == frr.conf cfg = "" - for iface in node.get_ifaces(): - cfg += f"interface {iface.name}\n" + for ifc in node.netifs(): + cfg += "interface %s\n" % ifc.name # include control interfaces in addressing but not routing daemons - if iface.control: + if hasattr(ifc, 'control') and ifc.control is True: cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ips())) + cfg += "\n ".join(map(cls.addrstr, ifc.addrlist)) cfg += "\n" continue cfgv4 = "" @@ -89,25 +75,25 @@ class FRRZebra(CoreService): for s in node.services: if cls.name not in s.dependencies: continue - if not (isinstance(s, FrrService) or issubclass(s, FrrService)): - continue - iface_config = s.generate_frr_iface_config(node, iface) + ifccfg = s.generatefrrifcconfig(node, ifc) if s.ipv4_routing: want_ipv4 = True if s.ipv6_routing: want_ipv6 = True - cfgv6 += iface_config + cfgv6 += ifccfg else: - cfgv4 += iface_config + cfgv4 += ifccfg if want_ipv4: + ipv4list = filter(lambda x: ipaddress.is_ipv4_address(x.split('/')[0]), ifc.addrlist) cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip4s)) + cfg += "\n ".join(map(cls.addrstr, ipv4list)) cfg += "\n" cfg += cfgv4 if want_ipv6: + ipv6list = filter(lambda x: ipaddress.is_ipv6_address(x.split('/')[0]), ifc.addrlist) cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip6s)) + cfg += "\n ".join(map(cls.addrstr, ipv6list)) cfg += "\n" cfg += cfgv6 cfg += "!\n" @@ -115,46 +101,40 @@ class FRRZebra(CoreService): for s in node.services: if cls.name not in s.dependencies: continue - if not (isinstance(s, FrrService) or issubclass(s, FrrService)): - continue - cfg += s.generate_frr_config(node) + cfg += s.generatefrrconfig(node) return cfg @staticmethod - def addrstr(ip: netaddr.IPNetwork) -> str: + def addrstr(x): """ helper for mapping IP addresses to zebra config statements """ - address = str(ip.ip) - if netaddr.valid_ipv4(address): - return f"ip address {ip}" - elif netaddr.valid_ipv6(address): - return f"ipv6 address {ip}" + if x.find(".") >= 0: + return "ip address %s" % x + elif x.find(":") >= 0: + return "ipv6 address %s" % x else: - raise ValueError(f"invalid address: {ip}") + raise ValueError("invalid address: %s", x) @classmethod - def generate_frr_boot(cls, node: CoreNode) -> str: + def generateFrrBoot(cls, node): """ Generate a shell script used to boot the FRR daemons. """ - frr_bin_search = node.session.options.get( - "frr_bin_search", '"/usr/local/bin /usr/bin /usr/lib/frr"' - ) - frr_sbin_search = node.session.options.get( - "frr_sbin_search", - '"/usr/local/sbin /usr/sbin /usr/lib/frr /usr/libexec/frr"', - ) - cfg = f"""\ + frr_bin_search = node.session.options.get_config("frr_bin_search", + default='"/usr/local/bin /usr/bin /usr/lib/frr"') + frr_sbin_search = node.session.options.get_config('frr_sbin_search', + default='"/usr/local/sbin /usr/sbin /usr/lib/frr"') + return """\ #!/bin/sh # auto-generated by zebra service (frr.py) -FRR_CONF={cls.configs[0]} -FRR_SBIN_SEARCH={frr_sbin_search} -FRR_BIN_SEARCH={frr_bin_search} -FRR_STATE_DIR={FRR_STATE_DIR} +FRR_CONF=%s +FRR_SBIN_SEARCH=%s +FRR_BIN_SEARCH=%s +FRR_STATE_DIR=%s searchforprog() -{{ +{ prog=$1 searchpath=$@ ret= @@ -165,10 +145,10 @@ searchforprog() fi done echo $ret -}} +} confcheck() -{{ +{ CONF_DIR=`dirname $FRR_CONF` # if /etc/frr exists, point /etc/frr/frr.conf -> CONF_DIR if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/frr.conf ]; then @@ -178,10 +158,10 @@ confcheck() if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/vtysh.conf ]; then ln -s $CONF_DIR/vtysh.conf /etc/frr/vtysh.conf fi -}} +} bootdaemon() -{{ +{ FRR_SBIN_DIR=$(searchforprog $1 $FRR_SBIN_SEARCH) if [ "z$FRR_SBIN_DIR" = "z" ]; then echo "ERROR: FRR's '$1' daemon not found in search path:" @@ -196,22 +176,19 @@ bootdaemon() flags="$flags -6" fi - if [ "$1" = "ospfd" ]; then - flags="$flags --apiserver" - fi - + #force FRR to use CORE generated conf file - flags="$flags -d -f $FRR_CONF" + flags="$flags -d -f $FRR_CONF" $FRR_SBIN_DIR/$1 $flags if [ "$?" != "0" ]; then echo "ERROR: FRR's '$1' daemon failed to start!:" return 1 fi -}} +} bootfrr() -{{ +{ FRR_BIN_DIR=$(searchforprog 'vtysh' $FRR_BIN_SEARCH) if [ "z$FRR_BIN_DIR" = "z" ]; then echo "ERROR: FRR's 'vtysh' program not found in search path:" @@ -226,12 +203,9 @@ bootfrr() fi bootdaemon "zebra" - if grep -q "^ip route " $FRR_CONF; then - bootdaemon "staticd" - fi - for r in rip ripng ospf6 ospf bgp babel isis; do - if grep -q "^router \\<${{r}}\\>" $FRR_CONF; then - bootdaemon "${{r}}d" + for r in rip ripng ospf6 ospf bgp babel; do + if grep -q "^router \<${r}\>" $FRR_CONF; then + bootdaemon "${r}d" fi done @@ -240,24 +214,18 @@ bootfrr() fi $FRR_BIN_DIR/vtysh -b -}} +} if [ "$1" != "zebra" ]; then echo "WARNING: '$1': all FRR daemons are launched by the 'zebra' service!" exit 1 fi - confcheck bootfrr -""" - for iface in node.get_ifaces(): - cfg += f"ip link set dev {iface.name} down\n" - cfg += "sleep 1\n" - cfg += f"ip link set dev {iface.name} up\n" - return cfg +""" % (cls.configs[0], frr_sbin_search, frr_bin_search, constants.FRR_STATE_DIR) @classmethod - def generate_frr_daemons(cls, node: CoreNode) -> str: + def generateFrrDaemons(cls, node): """ Returns configuration file text. """ @@ -284,7 +252,6 @@ nhrpd=yes eigrpd=yes babeld=yes sharpd=yes -staticd=yes pbrd=yes bfdd=yes fabricd=yes @@ -325,54 +292,62 @@ fabricd_options="-A 127.0.0.1" """ + class FrrService(CoreService): """ Parent class for FRR services. Defines properties and methods common to FRR's routing daemons. """ + name = None + group = "FRR" + dependencies = ("FRRzebra",) + dirs = () + configs = () + startup = () + shutdown = () + meta = "The config file for this service can be found in the Zebra service." - name: Optional[str] = None - group: str = "FRR" - dependencies: tuple[str, ...] = ("FRRzebra",) - meta: str = "The config file for this service can be found in the Zebra service." - ipv4_routing: bool = False - ipv6_routing: bool = False + ipv4_routing = False + ipv6_routing = False @staticmethod - def router_id(node: CoreNode) -> str: + def routerid(node): """ Helper to return the first IPv4 address of a node as its router ID. """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") >= 0: + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" return "0.0.0.0" @staticmethod - def rj45check(iface: CoreInterface) -> bool: + def rj45check(ifc): """ Helper to detect whether interface is connected an external RJ45 link. """ - if iface.net: - for peer_iface in iface.net.get_ifaces(): - if peer_iface == iface: + if ifc.net: + for peerifc in ifc.net.netifs(): + if peerifc == ifc: continue - if isinstance(peer_iface.node, Rj45Node): + if nodeutils.is_node(peerifc, NodeTypes.RJ45): return True return False @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return "" @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: + def generatefrrifcconfig(cls, node, ifc): return "" @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): return "" @@ -382,68 +357,73 @@ class FRROspfv2(FrrService): not build its own configuration file but has hooks for adding to the unified frr.conf file. """ - - name: str = "FRROSPFv2" - shutdown: tuple[str, ...] = ("killall ospfd",) - validate: tuple[str, ...] = ("pidof ospfd",) - ipv4_routing: bool = True + name = "FRROSPFv2" + startup = () + shutdown = ("killall ospfd",) + validate = ("pidof ospfd",) + ipv4_routing = True @staticmethod - def mtu_check(iface: CoreInterface) -> str: + def mtucheck(ifc): """ Helper to detect MTU mismatch and add the appropriate OSPF mtu-ignore command. This is needed when e.g. a node is linked via a GreTap device. """ - if iface.mtu != DEFAULT_MTU: + if ifc.mtu != 1500: # a workaround for PhysicalNode GreTap, which has no knowledge of # the other nodes/nets return " ip ospf mtu-ignore\n" - if not iface.net: + if not ifc.net: return "" - for iface in iface.net.get_ifaces(): - if iface.mtu != iface.mtu: + for i in ifc.net.netifs(): + if i.mtu != ifc.mtu: return " ip ospf mtu-ignore\n" return "" @staticmethod - def ptp_check(iface: CoreInterface) -> str: + def ptpcheck(ifc): """ Helper to detect whether interface is connected to a notional point-to-point link. """ - if isinstance(iface.net, PtpNet): + if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER): return " ip ospf network point-to-point\n" return "" @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): cfg = "router ospf\n" - rtrid = cls.router_id(node) - cfg += f" router-id {rtrid}\n" + rtrid = cls.routerid(node) + cfg += " router-id %s\n" % rtrid # network 10.0.0.0/24 area 0 - for iface in node.get_ifaces(control=False): - for ip4 in iface.ip4s: - cfg += f" network {ip4} area 0\n" - cfg += " ospf opaque-lsa\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") < 0: + continue + net = ipaddress.Ipv4Prefix(a) + cfg += " network %s area 0\n" % net cfg += "!\n" return cfg @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = cls.mtu_check(iface) + def generatefrrifcconfig(cls, node, ifc): + return cls.mtucheck(ifc) + # cfg = cls.mtucheck(ifc) # external RJ45 connections will use default OSPF timers - if cls.rj45check(iface): - return cfg - cfg += cls.ptp_check(iface) - return ( - cfg - + """\ - ip ospf hello-interval 2 - ip ospf dead-interval 6 - ip ospf retransmit-interval 5 -""" - ) + # if cls.rj45check(ifc): + # return cfg + # cfg += cls.ptpcheck(ifc) + + # return cfg + """\ + + +# ip ospf hello-interval 2 +# ip ospf dead-interval 6 +# ip ospf retransmit-interval 5 +# """ class FRROspfv3(FrrService): @@ -452,63 +432,78 @@ class FRROspfv3(FrrService): not build its own configuration file but has hooks for adding to the unified frr.conf file. """ - - name: str = "FRROSPFv3" - shutdown: tuple[str, ...] = ("killall ospf6d",) - validate: tuple[str, ...] = ("pidof ospf6d",) - ipv4_routing: bool = True - ipv6_routing: bool = True + name = "FRROSPFv3" + startup = () + shutdown = ("killall ospf6d",) + validate = ("pidof ospf6d",) + ipv4_routing = True + ipv6_routing = True @staticmethod - def min_mtu(iface: CoreInterface) -> int: + def minmtu(ifc): """ Helper to discover the minimum MTU of interfaces linked with the given interface. """ - mtu = iface.mtu - if not iface.net: + mtu = ifc.mtu + if not ifc.net: return mtu - for iface in iface.net.get_ifaces(): - if iface.mtu < mtu: - mtu = iface.mtu + for i in ifc.net.netifs(): + if i.mtu < mtu: + mtu = i.mtu return mtu @classmethod - def mtu_check(cls, iface: CoreInterface) -> str: + def mtucheck(cls, ifc): """ Helper to detect MTU mismatch and add the appropriate OSPFv3 ifmtu command. This is needed when e.g. a node is linked via a GreTap device. """ - minmtu = cls.min_mtu(iface) - if minmtu < iface.mtu: - return f" ipv6 ospf6 ifmtu {minmtu:d}\n" + minmtu = cls.minmtu(ifc) + if minmtu < ifc.mtu: + return " ipv6 ospf6 ifmtu %d\n" % minmtu else: return "" @staticmethod - def ptp_check(iface: CoreInterface) -> str: + def ptpcheck(ifc): """ Helper to detect whether interface is connected to a notional point-to-point link. """ - if isinstance(iface.net, PtpNet): + if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER): return " ipv6 ospf6 network point-to-point\n" return "" @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): cfg = "router ospf6\n" - rtrid = cls.router_id(node) - cfg += f" router-id {rtrid}\n" - for iface in node.get_ifaces(control=False): - cfg += f" interface {iface.name} area 0.0.0.0\n" + rtrid = cls.routerid(node) + cfg += " router-id %s\n" % rtrid + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += " interface %s area 0.0.0.0\n" % ifc.name cfg += "!\n" return cfg @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return cls.mtu_check(iface) + def generatefrrifcconfig(cls, node, ifc): + return cls.mtucheck(ifc) + # cfg = cls.mtucheck(ifc) + # external RJ45 connections will use default OSPF timers + # if cls.rj45check(ifc): + # return cfg + # cfg += cls.ptpcheck(ifc) + + # return cfg + """\ + + +# ipv6 ospf6 hello-interval 2 +# ipv6 ospf6 dead-interval 6 +# ipv6 ospf6 retransmit-interval 5 +# """ class FRRBgp(FrrService): @@ -517,22 +512,22 @@ class FRRBgp(FrrService): Peers must be manually configured, with a full mesh for those having the same AS number. """ - - name: str = "FRRBGP" - shutdown: tuple[str, ...] = ("killall bgpd",) - validate: tuple[str, ...] = ("pidof bgpd",) - custom_needed: bool = True - ipv4_routing: bool = True - ipv6_routing: bool = True + name = "FRRBGP" + startup = () + shutdown = ("killall bgpd",) + validate = ("pidof bgpd",) + custom_needed = True + ipv4_routing = True + ipv6_routing = True @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): cfg = "!\n! BGP configuration\n!\n" cfg += "! You should configure the AS number below,\n" cfg += "! along with this router's peers.\n!\n" - cfg += f"router bgp {node.id}\n" - rtrid = cls.router_id(node) - cfg += f" bgp router-id {rtrid}\n" + cfg += "router bgp %s\n" % node.objid + rtrid = cls.routerid(node) + cfg += " bgp router-id %s\n" % rtrid cfg += " redistribute connected\n" cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n" return cfg @@ -542,14 +537,14 @@ class FRRRip(FrrService): """ The RIP service provides IPv4 routing for wired networks. """ - - name: str = "FRRRIP" - shutdown: tuple[str, ...] = ("killall ripd",) - validate: tuple[str, ...] = ("pidof ripd",) - ipv4_routing: bool = True + name = "FRRRIP" + startup = () + shutdown = ("killall ripd",) + validate = ("pidof ripd",) + ipv4_routing = True @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): cfg = """\ router rip redistribute static @@ -565,14 +560,14 @@ class FRRRipng(FrrService): """ The RIP NG service provides IPv6 routing for wired networks. """ - - name: str = "FRRRIPNG" - shutdown: tuple[str, ...] = ("killall ripngd",) - validate: tuple[str, ...] = ("pidof ripngd",) - ipv6_routing: bool = True + name = "FRRRIPNG" + startup = () + shutdown = ("killall ripngd",) + validate = ("pidof ripngd",) + ipv6_routing = True @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): cfg = """\ router ripng redistribute static @@ -589,23 +584,25 @@ class FRRBabel(FrrService): The Babel service provides a loop-avoiding distance-vector routing protocol for IPv6 and IPv4 with fast convergence properties. """ - - name: str = "FRRBabel" - shutdown: tuple[str, ...] = ("killall babeld",) - validate: tuple[str, ...] = ("pidof babeld",) - ipv6_routing: bool = True + name = "FRRBabel" + startup = () + shutdown = ("killall babeld",) + validate = ("pidof babeld",) + ipv6_routing = True @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: + def generatefrrconfig(cls, node): cfg = "router babel\n" - for iface in node.get_ifaces(control=False): - cfg += f" network {iface.name}\n" + for ifc in node.netifs(): + if hasattr(ifc, "control") and ifc.control is True: + continue + cfg += " network %s\n" % ifc.name cfg += " redistribute static\n redistribute ipv4 connected\n" return cfg @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - if is_wireless(iface.net): + def generatefrrifcconfig(cls, node, ifc): + if ifc.net and ifc.net.linktype == LinkTypes.WIRELESS.value: return " babel wireless\n no babel split-horizon\n" else: return " babel wired\n babel split-horizon\n" @@ -615,69 +612,28 @@ class FRRpimd(FrrService): """ PIM multicast routing based on XORP. """ - - name: str = "FRRpimd" - shutdown: tuple[str, ...] = ("killall pimd",) - validate: tuple[str, ...] = ("pidof pimd",) - ipv4_routing: bool = True + name = 'FRRpimd' + startup = () + shutdown = ('killall pimd',) + validate = ('pidof pimd',) + ipv4_routing = True @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - ifname = "eth0" - for iface in node.get_ifaces(): - if iface.name != "lo": - ifname = iface.name + def generatefrrconfig(cls, node): + ifname = 'eth0' + for ifc in node.netifs(): + if ifc.name != 'lo': + ifname = ifc.name break - cfg = "router mfea\n!\n" - cfg += "router igmp\n!\n" - cfg += "router pim\n" - cfg += " !ip pim rp-address 10.0.0.1\n" - cfg += f" ip pim bsr-candidate {ifname}\n" - cfg += f" ip pim rp-candidate {ifname}\n" - cfg += " !ip pim spt-threshold interval 10 bytes 80000\n" + cfg = 'router mfea\n!\n' + cfg += 'router igmp\n!\n' + cfg += 'router pim\n' + cfg += ' !ip pim rp-address 10.0.0.1\n' + cfg += ' ip pim bsr-candidate %s\n' % ifname + cfg += ' ip pim rp-candidate %s\n' % ifname + cfg += ' !ip pim spt-threshold interval 10 bytes 80000\n' return cfg @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return " ip mfea\n ip igmp\n ip pim\n" - - -class FRRIsis(FrrService): - """ - The ISIS service provides IPv4 and IPv6 routing for wired networks. It does - not build its own configuration file but has hooks for adding to the - unified frr.conf file. - """ - - name: str = "FRRISIS" - shutdown: tuple[str, ...] = ("killall isisd",) - validate: tuple[str, ...] = ("pidof isisd",) - ipv4_routing: bool = True - ipv6_routing: bool = True - - @staticmethod - def ptp_check(iface: CoreInterface) -> str: - """ - Helper to detect whether interface is connected to a notional - point-to-point link. - """ - if isinstance(iface.net, PtpNet): - return " isis network point-to-point\n" - return "" - - @classmethod - def generate_frr_config(cls, node: CoreNode) -> str: - cfg = "router isis DEFAULT\n" - cfg += f" net 47.0001.0000.1900.{node.id:04x}.00\n" - cfg += " metric-style wide\n" - cfg += " is-type level-2-only\n" - cfg += "!\n" - return cfg - - @classmethod - def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = " ip router isis DEFAULT\n" - cfg += " ipv6 router isis DEFAULT\n" - cfg += " isis circuit-type level-2-only\n" - cfg += cls.ptp_check(iface) - return cfg + def generatefrrifcconfig(cls, node, ifc): + return ' ip mfea\n ip igmp\n ip pim\n' diff --git a/daemon/core/services/nrl.py b/daemon/core/services/nrl.py index 32e19f60..a036b732 100644 --- a/daemon/core/services/nrl.py +++ b/daemon/core/services/nrl.py @@ -2,108 +2,116 @@ nrl.py: defines services provided by NRL protolib tools hosted here: http://www.nrl.navy.mil/itd/ncs/products """ -from typing import Optional -from core import utils -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService +from core.misc import utils +from core.misc.ipaddress import Ipv4Prefix +from core.service import CoreService class NrlService(CoreService): """ Parent class for NRL services. Defines properties and methods common to NRL's routing daemons. - """ - - name: Optional[str] = None - group: str = "ProtoSvc" + """"" + name = None + group = "ProtoSvc" + dirs = () + configs = () + startup = () + shutdown = () @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return "" @staticmethod - def firstipv4prefix(node: CoreNode, prefixlen: int = 24) -> str: + def firstipv4prefix(node, prefixlen=24): """ Similar to QuaggaService.routerid(). Helper to return the first IPv4 prefix of a node, using the supplied prefix length. This ignores the interface's prefix length, so e.g. '/32' can turn into '/24'. """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return f"{ip4.ip}/{prefixlen}" - return f"0.0.0.0/{prefixlen}" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") >= 0: + addr = a.split('/')[0] + pre = Ipv4Prefix("%s/%s" % (addr, prefixlen)) + return str(pre) + # raise ValueError, "no IPv4 address found" + return "0.0.0.0/%s" % prefixlen class MgenSinkService(NrlService): - name: str = "MGEN_Sink" - executables: tuple[str, ...] = ("mgen",) - configs: tuple[str, ...] = ("sink.mgen",) - startup: tuple[str, ...] = ("mgen input sink.mgen",) - validate: tuple[str, ...] = ("pidof mgen",) - shutdown: tuple[str, ...] = ("killall mgen",) + name = "MGEN_Sink" + executables = ("mgen",) + configs = ("sink.mgen",) + startup = ("mgen input sink.mgen",) + validate = ("pidof mgen",) + shutdown = ("killall mgen",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): cfg = "0.0 LISTEN UDP 5000\n" - for iface in node.get_ifaces(): - name = utils.sysctl_devname(iface.name) - cfg += f"0.0 Join 224.225.1.2 INTERFACE {name}\n" + for ifc in node.netifs(): + name = utils.sysctl_devname(ifc.name) + cfg += "0.0 Join 224.225.1.2 INTERFACE %s\n" % name return cfg @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: + def get_startup(cls, node): cmd = cls.startup[0] - cmd += f" output /tmp/mgen_{node.name}.log" - return (cmd,) + cmd += " output /tmp/mgen_%s.log" % node.name + return cmd, class NrlNhdp(NrlService): """ NeighborHood Discovery Protocol for MANET networks. """ - - name: str = "NHDP" - executables: tuple[str, ...] = ("nrlnhdp",) - startup: tuple[str, ...] = ("nrlnhdp",) - shutdown: tuple[str, ...] = ("killall nrlnhdp",) - validate: tuple[str, ...] = ("pidof nrlnhdp",) + name = "NHDP" + executables = ("nrlnhdp",) + startup = ("nrlnhdp",) + shutdown = ("killall nrlnhdp",) + validate = ("pidof nrlnhdp",) @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: + def get_startup(cls, node): """ Generate the appropriate command-line based on node interfaces. """ cmd = cls.startup[0] cmd += " -l /var/log/nrlnhdp.log" - cmd += f" -rpipe {node.name}_nhdp" + cmd += " -rpipe %s_nhdp" % node.name + servicenames = map(lambda x: x.name, node.services) if "SMF" in servicenames: cmd += " -flooding ecds" - cmd += f" -smfClient {node.name}_smf" - ifaces = node.get_ifaces(control=False) - if len(ifaces) > 0: - iface_names = map(lambda x: x.name, ifaces) + cmd += " -smfClient %s_smf" % node.name + + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) + if len(netifs) > 0: + interfacenames = map(lambda x: x.name, netifs) cmd += " -i " - cmd += " -i ".join(iface_names) - return (cmd,) + cmd += " -i ".join(interfacenames) + + return cmd, class NrlSmf(NrlService): """ Simplified Multicast Forwarding for MANET networks. """ - - name: str = "SMF" - executables: tuple[str, ...] = ("nrlsmf",) - startup: tuple[str, ...] = ("bash startsmf.sh",) - shutdown: tuple[str, ...] = ("killall nrlsmf",) - validate: tuple[str, ...] = ("pidof nrlsmf",) - configs: tuple[str, ...] = ("startsmf.sh",) + name = "SMF" + executables = ("nrlsmf",) + startup = ("sh startsmf.sh",) + shutdown = ("killall nrlsmf",) + validate = ("pidof nrlsmf",) + configs = ("startsmf.sh",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename, ): """ Generate a startup script for SMF. Because nrlsmf does not daemonize, it can cause problems in some situations when launched @@ -112,13 +120,19 @@ class NrlSmf(NrlService): cfg = "#!/bin/sh\n" cfg += "# auto-generated by nrl.py:NrlSmf.generateconfig()\n" comments = "" - cmd = f"nrlsmf instance {node.name}_smf" + cmd = "nrlsmf instance %s_smf" % node.name servicenames = map(lambda x: x.name, node.services) - ifaces = node.get_ifaces(control=False) - if len(ifaces) == 0: + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) + if len(netifs) == 0: return "" - if len(ifaces) > 0: + + if "arouted" in servicenames: + comments += "# arouted service is enabled\n" + cmd += " tap %s_tap" % (node.name,) + cmd += " unicast %s" % cls.firstipv4prefix(node, 24) + cmd += " push lo,%s resequence on" % netifs[0].name + if len(netifs) > 0: if "NHDP" in servicenames: comments += "# NHDP service is enabled\n" cmd += " ecds " @@ -127,11 +141,12 @@ class NrlSmf(NrlService): cmd += " smpr " else: cmd += " cf " - iface_names = map(lambda x: x.name, ifaces) - cmd += ",".join(iface_names) + interfacenames = map(lambda x: x.name, netifs) + cmd += ",".join(interfacenames) cmd += " hash MD5" cmd += " log /var/log/nrlsmf.log" + cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" return cfg @@ -140,98 +155,101 @@ class NrlOlsr(NrlService): """ Optimized Link State Routing protocol for MANET networks. """ - - name: str = "OLSR" - executables: tuple[str, ...] = ("nrlolsrd",) - startup: tuple[str, ...] = ("nrlolsrd",) - shutdown: tuple[str, ...] = ("killall nrlolsrd",) - validate: tuple[str, ...] = ("pidof nrlolsrd",) + name = "OLSR" + executables = ("nrlolsrd",) + startup = ("nrlolsrd",) + shutdown = ("killall nrlolsrd",) + validate = ("pidof nrlolsrd",) @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: + def get_startup(cls, node): """ Generate the appropriate command-line based on node interfaces. """ cmd = cls.startup[0] # are multiple interfaces supported? No. - ifaces = node.get_ifaces() - if len(ifaces) > 0: - iface = ifaces[0] - cmd += f" -i {iface.name}" + netifs = list(node.netifs()) + if len(netifs) > 0: + ifc = netifs[0] + cmd += " -i %s" % ifc.name cmd += " -l /var/log/nrlolsrd.log" - cmd += f" -rpipe {node.name}_olsr" + cmd += " -rpipe %s_olsr" % node.name + servicenames = map(lambda x: x.name, node.services) - if "SMF" in servicenames and "NHDP" not in servicenames: + if "SMF" in servicenames and not "NHDP" in servicenames: cmd += " -flooding s-mpr" - cmd += f" -smfClient {node.name}_smf" + cmd += " -smfClient %s_smf" % node.name if "zebra" in servicenames: cmd += " -z" - return (cmd,) + + return cmd, class NrlOlsrv2(NrlService): """ Optimized Link State Routing protocol version 2 for MANET networks. """ - - name: str = "OLSRv2" - executables: tuple[str, ...] = ("nrlolsrv2",) - startup: tuple[str, ...] = ("nrlolsrv2",) - shutdown: tuple[str, ...] = ("killall nrlolsrv2",) - validate: tuple[str, ...] = ("pidof nrlolsrv2",) + name = "OLSRv2" + executables = ("nrlolsrv2",) + startup = ("nrlolsrv2",) + shutdown = ("killall nrlolsrv2",) + validate = ("pidof nrlolsrv2",) @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: + def get_startup(cls, node): """ Generate the appropriate command-line based on node interfaces. """ cmd = cls.startup[0] cmd += " -l /var/log/nrlolsrv2.log" - cmd += f" -rpipe {node.name}_olsrv2" + cmd += " -rpipe %s_olsrv2" % node.name + servicenames = map(lambda x: x.name, node.services) if "SMF" in servicenames: cmd += " -flooding ecds" - cmd += f" -smfClient {node.name}_smf" + cmd += " -smfClient %s_smf" % node.name + cmd += " -p olsr" - ifaces = node.get_ifaces(control=False) - if len(ifaces) > 0: - iface_names = map(lambda x: x.name, ifaces) + + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) + if len(netifs) > 0: + interfacenames = map(lambda x: x.name, netifs) cmd += " -i " - cmd += " -i ".join(iface_names) - return (cmd,) + cmd += " -i ".join(interfacenames) + + return cmd, class OlsrOrg(NrlService): """ Optimized Link State Routing protocol from olsr.org for MANET networks. """ - - name: str = "OLSRORG" - executables: tuple[str, ...] = ("olsrd",) - configs: tuple[str, ...] = ("/etc/olsrd/olsrd.conf",) - dirs: tuple[str, ...] = ("/etc/olsrd",) - startup: tuple[str, ...] = ("olsrd",) - shutdown: tuple[str, ...] = ("killall olsrd",) - validate: tuple[str, ...] = ("pidof olsrd",) + name = "OLSRORG" + executables = ("olsrd",) + configs = ("/etc/olsrd/olsrd.conf",) + dirs = ("/etc/olsrd",) + startup = ("olsrd",) + shutdown = ("killall olsrd",) + validate = ("pidof olsrd",) @classmethod - def get_startup(cls, node: CoreNode) -> tuple[str, ...]: + def get_startup(cls, node): """ Generate the appropriate command-line based on node interfaces. """ cmd = cls.startup[0] - ifaces = node.get_ifaces(control=False) - if len(ifaces) > 0: - iface_names = map(lambda x: x.name, ifaces) + netifs = filter(lambda x: not getattr(x, 'control', False), node.netifs()) + if len(netifs) > 0: + interfacenames = map(lambda x: x.name, netifs) cmd += " -i " - cmd += " -i ".join(iface_names) - return (cmd,) + cmd += " -i ".join(interfacenames) + + return cmd, @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ - Generate a default olsrd config file to use the broadcast address of - 255.255.255.255. + Generate a default olsrd config file to use the broadcast address of 255.255.255.255. """ cfg = """\ # @@ -556,16 +574,24 @@ class MgenActor(NrlService): """ # a unique name is required, without spaces - name: str = "MgenActor" - group: str = "ProtoSvc" - executables: tuple[str, ...] = ("mgen",) - configs: tuple[str, ...] = ("start_mgen_actor.sh",) - startup: tuple[str, ...] = ("bash start_mgen_actor.sh",) - validate: tuple[str, ...] = ("pidof mgen",) - shutdown: tuple[str, ...] = ("killall mgen",) + name = "MgenActor" + executables = ("mgen",) + # you can create your own group here + group = "ProtoSvc" + # per-node directories + dirs = () + # generated files (without a full path this file goes in the node's dir, + # e.g. /tmp/pycore.12345/n1.conf/) + configs = ('start_mgen_actor.sh',) + # list of startup commands, also may be generated during startup + startup = ("sh start_mgen_actor.sh",) + # list of validation commands + validate = ("pidof mgen",) + # list of shutdown commands + shutdown = ("killall mgen",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate a startup script for MgenActor. Because mgenActor does not daemonize, it can cause problems in some situations when launched @@ -574,9 +600,50 @@ class MgenActor(NrlService): cfg = "#!/bin/sh\n" cfg += "# auto-generated by nrl.py:MgenActor.generateconfig()\n" comments = "" - cmd = f"mgenBasicActor.py -n {node.name} -a 0.0.0.0" - ifaces = node.get_ifaces(control=False) - if len(ifaces) == 0: + cmd = "mgenBasicActor.py -n %s -a 0.0.0.0" % node.name + + netifs = [x for x in node.netifs() if not getattr(x, "control", False)] + if len(netifs) == 0: return "" + cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" return cfg + + +class Arouted(NrlService): + """ + Adaptive Routing + """ + name = "arouted" + executables = ("arouted",) + configs = ("startarouted.sh",) + startup = ("sh startarouted.sh",) + shutdown = ("pkill arouted",) + validate = ("pidof arouted",) + + @classmethod + def generate_config(cls, node, filename): + """ + Return the Quagga.conf or quaggaboot.sh file contents. + """ + cfg = """ +#!/bin/sh +for f in "/tmp/%s_smf"; do + count=1 + until [ -e "$f" ]; do + if [ $count -eq 10 ]; then + echo "ERROR: nrlmsf pipe not found: $f" >&2 + exit 1 + fi + sleep 0.1 + count=$(($count + 1)) + done +done + +""" % node.name + cfg += "ip route add %s dev lo\n" % cls.firstipv4prefix(node, 24) + cfg += "arouted instance %s_smf tap %s_tap" % (node.name, node.name) + # seconds to consider a new route valid + cfg += " stability 10" + cfg += " 2>&1 > /var/log/arouted.log &\n\n" + return cfg diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index b96a8eae..ffd48bf6 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -1,81 +1,63 @@ """ quagga.py: defines routing services provided by Quagga. """ -from typing import Optional -import netaddr - -from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode, NodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import PtpNet, WlanNode -from core.nodes.physical import Rj45Node -from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService - -QUAGGA_STATE_DIR: str = "/var/run/quagga" - - -def is_wireless(node: NodeBase) -> bool: - """ - Check if the node is a wireless type node. - - :param node: node to check type for - :return: True if wireless type, False otherwise - """ - return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) +from core import constants +from core.enumerations import LinkTypes, NodeTypes +from core.misc import ipaddress +from core.misc import nodeutils +from core.service import CoreService class Zebra(CoreService): - name: str = "zebra" - group: str = "Quagga" - dirs: tuple[str, ...] = ("/usr/local/etc/quagga", "/var/run/quagga") - configs: tuple[str, ...] = ( + name = "zebra" + group = "Quagga" + dirs = ("/usr/local/etc/quagga", "/var/run/quagga") + configs = ( "/usr/local/etc/quagga/Quagga.conf", "quaggaboot.sh", - "/usr/local/etc/quagga/vtysh.conf", + "/usr/local/etc/quagga/vtysh.conf" ) - startup: tuple[str, ...] = ("bash quaggaboot.sh zebra",) - shutdown: tuple[str, ...] = ("killall zebra",) - validate: tuple[str, ...] = ("pidof zebra",) + startup = ("sh quaggaboot.sh zebra",) + shutdown = ("killall zebra",) + validate = ("pidof zebra",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the Quagga.conf or quaggaboot.sh file contents. """ if filename == cls.configs[0]: - return cls.generate_quagga_conf(node) + return cls.generateQuaggaConf(node) elif filename == cls.configs[1]: - return cls.generate_quagga_boot(node) + return cls.generateQuaggaBoot(node) elif filename == cls.configs[2]: - return cls.generate_vtysh_conf(node) + return cls.generateVtyshConf(node) else: - raise ValueError( - "file name (%s) is not a known configuration: %s", filename, cls.configs - ) + raise ValueError("file name (%s) is not a known configuration: %s", filename, cls.configs) @classmethod - def generate_vtysh_conf(cls, node: CoreNode) -> str: + def generateVtyshConf(cls, node): """ Returns configuration file text. """ return "service integrated-vtysh-config\n" @classmethod - def generate_quagga_conf(cls, node: CoreNode) -> str: + def generateQuaggaConf(cls, node): """ Returns configuration file text. Other services that depend on zebra - will have hooks that are invoked here. + will have generatequaggaifcconfig() and generatequaggaconfig() + hooks that are invoked here. """ # we could verify here that filename == Quagga.conf cfg = "" - for iface in node.get_ifaces(): - cfg += f"interface {iface.name}\n" + for ifc in node.netifs(): + cfg += "interface %s\n" % ifc.name # include control interfaces in addressing but not routing daemons - if iface.control: + if hasattr(ifc, 'control') and ifc.control is True: cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ips())) + cfg += "\n ".join(map(cls.addrstr, ifc.addrlist)) cfg += "\n" continue cfgv4 = "" @@ -85,25 +67,25 @@ class Zebra(CoreService): for s in node.services: if cls.name not in s.dependencies: continue - if not (isinstance(s, QuaggaService) or issubclass(s, QuaggaService)): - continue - iface_config = s.generate_quagga_iface_config(node, iface) + ifccfg = s.generatequaggaifcconfig(node, ifc) if s.ipv4_routing: want_ipv4 = True if s.ipv6_routing: want_ipv6 = True - cfgv6 += iface_config + cfgv6 += ifccfg else: - cfgv4 += iface_config + cfgv4 += ifccfg if want_ipv4: + ipv4list = filter(lambda x: ipaddress.is_ipv4_address(x.split('/')[0]), ifc.addrlist) cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip4s)) + cfg += "\n ".join(map(cls.addrstr, ipv4list)) cfg += "\n" cfg += cfgv4 if want_ipv6: + ipv6list = filter(lambda x: ipaddress.is_ipv6_address(x.split('/')[0]), ifc.addrlist) cfg += " " - cfg += "\n ".join(map(cls.addrstr, iface.ip6s)) + cfg += "\n ".join(map(cls.addrstr, ipv6list)) cfg += "\n" cfg += cfgv6 cfg += "!\n" @@ -111,45 +93,40 @@ class Zebra(CoreService): for s in node.services: if cls.name not in s.dependencies: continue - if not (isinstance(s, QuaggaService) or issubclass(s, QuaggaService)): - continue - cfg += s.generate_quagga_config(node) + cfg += s.generatequaggaconfig(node) return cfg @staticmethod - def addrstr(ip: netaddr.IPNetwork) -> str: + def addrstr(x): """ helper for mapping IP addresses to zebra config statements """ - address = str(ip.ip) - if netaddr.valid_ipv4(address): - return f"ip address {ip}" - elif netaddr.valid_ipv6(address): - return f"ipv6 address {ip}" + if x.find(".") >= 0: + return "ip address %s" % x + elif x.find(":") >= 0: + return "ipv6 address %s" % x else: - raise ValueError(f"invalid address: {ip}") + raise ValueError("invalid address: %s", x) @classmethod - def generate_quagga_boot(cls, node: CoreNode) -> str: + def generateQuaggaBoot(cls, node): """ Generate a shell script used to boot the Quagga daemons. """ - quagga_bin_search = node.session.options.get( - "quagga_bin_search", '"/usr/local/bin /usr/bin /usr/lib/quagga"' - ) - quagga_sbin_search = node.session.options.get( - "quagga_sbin_search", '"/usr/local/sbin /usr/sbin /usr/lib/quagga"' - ) - return f"""\ + quagga_bin_search = node.session.options.get_config("quagga_bin_search", + default='"/usr/local/bin /usr/bin /usr/lib/quagga"') + quagga_sbin_search = node.session.options.get_config('quagga_sbin_search', + default='"/usr/local/sbin /usr/sbin /usr/lib/quagga"') + return """\ #!/bin/sh # auto-generated by zebra service (quagga.py) -QUAGGA_CONF={cls.configs[0]} -QUAGGA_SBIN_SEARCH={quagga_sbin_search} -QUAGGA_BIN_SEARCH={quagga_bin_search} -QUAGGA_STATE_DIR={QUAGGA_STATE_DIR} +QUAGGA_CONF=%s +QUAGGA_SBIN_SEARCH=%s +QUAGGA_BIN_SEARCH=%s +QUAGGA_STATE_DIR=%s searchforprog() -{{ +{ prog=$1 searchpath=$@ ret= @@ -160,10 +137,10 @@ searchforprog() fi done echo $ret -}} +} confcheck() -{{ +{ CONF_DIR=`dirname $QUAGGA_CONF` # if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then @@ -173,10 +150,10 @@ confcheck() if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf fi -}} +} bootdaemon() -{{ +{ QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH) if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then echo "ERROR: Quagga's '$1' daemon not found in search path:" @@ -196,10 +173,10 @@ bootdaemon() echo "ERROR: Quagga's '$1' daemon failed to start!:" return 1 fi -}} +} bootquagga() -{{ +{ QUAGGA_BIN_DIR=$(searchforprog 'vtysh' $QUAGGA_BIN_SEARCH) if [ "z$QUAGGA_BIN_DIR" = "z" ]; then echo "ERROR: Quagga's 'vtysh' program not found in search path:" @@ -215,8 +192,8 @@ bootquagga() bootdaemon "zebra" for r in rip ripng ospf6 ospf bgp babel; do - if grep -q "^router \\<${{r}}\\>" $QUAGGA_CONF; then - bootdaemon "${{r}}d" + if grep -q "^router \<${r}\>" $QUAGGA_CONF; then + bootdaemon "${r}d" fi done @@ -225,7 +202,7 @@ bootquagga() fi $QUAGGA_BIN_DIR/vtysh -b -}} +} if [ "$1" != "zebra" ]; then echo "WARNING: '$1': all Quagga daemons are launched by the 'zebra' service!" @@ -233,7 +210,7 @@ if [ "$1" != "zebra" ]; then fi confcheck bootquagga -""" +""" % (cls.configs[0], quagga_sbin_search, quagga_bin_search, constants.QUAGGA_STATE_DIR) class QuaggaService(CoreService): @@ -241,49 +218,56 @@ class QuaggaService(CoreService): Parent class for Quagga services. Defines properties and methods common to Quagga's routing daemons. """ + name = None + group = "Quagga" + dependencies = ("zebra",) + dirs = () + configs = () + startup = () + shutdown = () + meta = "The config file for this service can be found in the Zebra service." - name: Optional[str] = None - group: str = "Quagga" - dependencies: tuple[str, ...] = (Zebra.name,) - meta: str = "The config file for this service can be found in the Zebra service." - ipv4_routing: bool = False - ipv6_routing: bool = False + ipv4_routing = False + ipv6_routing = False @staticmethod - def router_id(node: CoreNode) -> str: + def routerid(node): """ Helper to return the first IPv4 address of a node as its router ID. """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) - return f"0.0.0.{node.id:d}" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") >= 0: + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" + return "0.0.0.0" @staticmethod - def rj45check(iface: CoreInterface) -> bool: + def rj45check(ifc): """ Helper to detect whether interface is connected an external RJ45 link. """ - if iface.net: - for peer_iface in iface.net.get_ifaces(): - if peer_iface == iface: + if ifc.net: + for peerifc in ifc.net.netifs(): + if peerifc == ifc: continue - if isinstance(peer_iface.node, Rj45Node): + if nodeutils.is_node(peerifc, NodeTypes.RJ45): return True return False @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return "" @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: + def generatequaggaifcconfig(cls, node, ifc): return "" @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): return "" @@ -293,67 +277,73 @@ class Ospfv2(QuaggaService): not build its own configuration file but has hooks for adding to the unified Quagga.conf file. """ - - name: str = "OSPFv2" - shutdown: tuple[str, ...] = ("killall ospfd",) - validate: tuple[str, ...] = ("pidof ospfd",) - ipv4_routing: bool = True + name = "OSPFv2" + startup = () + shutdown = ("killall ospfd",) + validate = ("pidof ospfd",) + ipv4_routing = True @staticmethod - def mtu_check(iface: CoreInterface) -> str: + def mtucheck(ifc): """ Helper to detect MTU mismatch and add the appropriate OSPF mtu-ignore command. This is needed when e.g. a node is linked via a GreTap device. """ - if iface.mtu != DEFAULT_MTU: + if ifc.mtu != 1500: # a workaround for PhysicalNode GreTap, which has no knowledge of # the other nodes/nets return " ip ospf mtu-ignore\n" - if not iface.net: + if not ifc.net: return "" - for iface in iface.net.get_ifaces(): - if iface.mtu != iface.mtu: + for i in ifc.net.netifs(): + if i.mtu != ifc.mtu: return " ip ospf mtu-ignore\n" return "" @staticmethod - def ptp_check(iface: CoreInterface) -> str: + def ptpcheck(ifc): """ Helper to detect whether interface is connected to a notional point-to-point link. """ - if isinstance(iface.net, PtpNet): + if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER): return " ip ospf network point-to-point\n" return "" @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): cfg = "router ospf\n" - rtrid = cls.router_id(node) - cfg += f" router-id {rtrid}\n" + rtrid = cls.routerid(node) + cfg += " router-id %s\n" % rtrid # network 10.0.0.0/24 area 0 - for iface in node.get_ifaces(control=False): - for ip4 in iface.ip4s: - cfg += f" network {ip4} area 0\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") < 0: + continue + net = ipaddress.Ipv4Prefix(a) + cfg += " network %s area 0\n" % net cfg += "!\n" return cfg @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = cls.mtu_check(iface) + def generatequaggaifcconfig(cls, node, ifc): + return cls.mtucheck(ifc) + # cfg = cls.mtucheck(ifc) # external RJ45 connections will use default OSPF timers - if cls.rj45check(iface): - return cfg - cfg += cls.ptp_check(iface) - return ( - cfg - + """\ - ip ospf hello-interval 2 - ip ospf dead-interval 6 - ip ospf retransmit-interval 5 -""" - ) + # if cls.rj45check(ifc): + # return cfg + # cfg += cls.ptpcheck(ifc) + + # return cfg + """\ + + +# ip ospf hello-interval 2 +# ip ospf dead-interval 6 +# ip ospf retransmit-interval 5 +# """ class Ospfv3(QuaggaService): @@ -362,64 +352,78 @@ class Ospfv3(QuaggaService): not build its own configuration file but has hooks for adding to the unified Quagga.conf file. """ - - name: str = "OSPFv3" - shutdown: tuple[str, ...] = ("killall ospf6d",) - validate: tuple[str, ...] = ("pidof ospf6d",) - ipv4_routing: bool = True - ipv6_routing: bool = True + name = "OSPFv3" + startup = () + shutdown = ("killall ospf6d",) + validate = ("pidof ospf6d",) + ipv4_routing = True + ipv6_routing = True @staticmethod - def min_mtu(iface: CoreInterface) -> int: + def minmtu(ifc): """ Helper to discover the minimum MTU of interfaces linked with the given interface. """ - mtu = iface.mtu - if not iface.net: + mtu = ifc.mtu + if not ifc.net: return mtu - for iface in iface.net.get_ifaces(): - if iface.mtu < mtu: - mtu = iface.mtu + for i in ifc.net.netifs(): + if i.mtu < mtu: + mtu = i.mtu return mtu @classmethod - def mtu_check(cls, iface: CoreInterface) -> str: + def mtucheck(cls, ifc): """ Helper to detect MTU mismatch and add the appropriate OSPFv3 ifmtu command. This is needed when e.g. a node is linked via a GreTap device. """ - minmtu = cls.min_mtu(iface) - if minmtu < iface.mtu: - return f" ipv6 ospf6 ifmtu {minmtu:d}\n" + minmtu = cls.minmtu(ifc) + if minmtu < ifc.mtu: + return " ipv6 ospf6 ifmtu %d\n" % minmtu else: return "" @staticmethod - def ptp_check(iface: CoreInterface) -> str: + def ptpcheck(ifc): """ Helper to detect whether interface is connected to a notional point-to-point link. """ - if isinstance(iface.net, PtpNet): + if nodeutils.is_node(ifc.net, NodeTypes.PEER_TO_PEER): return " ipv6 ospf6 network point-to-point\n" return "" @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): cfg = "router ospf6\n" - rtrid = cls.router_id(node) - cfg += " instance-id 65\n" - cfg += f" router-id {rtrid}\n" - for iface in node.get_ifaces(control=False): - cfg += f" interface {iface.name} area 0.0.0.0\n" + rtrid = cls.routerid(node) + cfg += " router-id %s\n" % rtrid + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += " interface %s area 0.0.0.0\n" % ifc.name cfg += "!\n" return cfg @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return cls.mtu_check(iface) + def generatequaggaifcconfig(cls, node, ifc): + return cls.mtucheck(ifc) + # cfg = cls.mtucheck(ifc) + # external RJ45 connections will use default OSPF timers + # if cls.rj45check(ifc): + # return cfg + # cfg += cls.ptpcheck(ifc) + + # return cfg + """\ + + +# ipv6 ospf6 hello-interval 2 +# ipv6 ospf6 dead-interval 6 +# ipv6 ospf6 retransmit-interval 5 +# """ class Ospfv3mdr(Ospfv3): @@ -429,26 +433,24 @@ class Ospfv3mdr(Ospfv3): configuration file but has hooks for adding to the unified Quagga.conf file. """ - - name: str = "OSPFv3MDR" - ipv4_routing: bool = True + name = "OSPFv3MDR" + ipv4_routing = True @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - cfg = cls.mtu_check(iface) - if is_wireless(iface.net): - return ( - cfg - + """\ + def generatequaggaifcconfig(cls, node, ifc): + cfg = cls.mtucheck(ifc) + # Uncomment the following line to use Address Family Translation for IPv4 + cfg += " ipv6 ospf6 instance-id 65\n" + if ifc.net is not None and nodeutils.is_node(ifc.net, (NodeTypes.WIRELESS_LAN, NodeTypes.EMANE)): + return cfg + """\ ipv6 ospf6 hello-interval 2 ipv6 ospf6 dead-interval 6 ipv6 ospf6 retransmit-interval 5 ipv6 ospf6 network manet-designated-router - ipv6 ospf6 twohoprefresh 3 + ipv6 ospf6 diffhellos ipv6 ospf6 adjacencyconnectivity uniconnected ipv6 ospf6 lsafullness mincostlsa """ - ) else: return cfg @@ -459,22 +461,22 @@ class Bgp(QuaggaService): Peers must be manually configured, with a full mesh for those having the same AS number. """ - - name: str = "BGP" - shutdown: tuple[str, ...] = ("killall bgpd",) - validate: tuple[str, ...] = ("pidof bgpd",) - custom_needed: bool = True - ipv4_routing: bool = True - ipv6_routing: bool = True + name = "BGP" + startup = () + shutdown = ("killall bgpd",) + validate = ("pidof bgpd",) + custom_needed = True + ipv4_routing = True + ipv6_routing = True @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): cfg = "!\n! BGP configuration\n!\n" cfg += "! You should configure the AS number below,\n" cfg += "! along with this router's peers.\n!\n" - cfg += f"router bgp {node.id}\n" - rtrid = cls.router_id(node) - cfg += f" bgp router-id {rtrid}\n" + cfg += "router bgp %s\n" % node.objid + rtrid = cls.routerid(node) + cfg += " bgp router-id %s\n" % rtrid cfg += " redistribute connected\n" cfg += "! neighbor 1.2.3.4 remote-as 555\n!\n" return cfg @@ -484,14 +486,14 @@ class Rip(QuaggaService): """ The RIP service provides IPv4 routing for wired networks. """ - - name: str = "RIP" - shutdown: tuple[str, ...] = ("killall ripd",) - validate: tuple[str, ...] = ("pidof ripd",) - ipv4_routing: bool = True + name = "RIP" + startup = () + shutdown = ("killall ripd",) + validate = ("pidof ripd",) + ipv4_routing = True @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): cfg = """\ router rip redistribute static @@ -507,14 +509,14 @@ class Ripng(QuaggaService): """ The RIP NG service provides IPv6 routing for wired networks. """ - - name: str = "RIPNG" - shutdown: tuple[str, ...] = ("killall ripngd",) - validate: tuple[str, ...] = ("pidof ripngd",) - ipv6_routing: bool = True + name = "RIPNG" + startup = () + shutdown = ("killall ripngd",) + validate = ("pidof ripngd",) + ipv6_routing = True @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): cfg = """\ router ripng redistribute static @@ -531,23 +533,25 @@ class Babel(QuaggaService): The Babel service provides a loop-avoiding distance-vector routing protocol for IPv6 and IPv4 with fast convergence properties. """ - - name: str = "Babel" - shutdown: tuple[str, ...] = ("killall babeld",) - validate: tuple[str, ...] = ("pidof babeld",) - ipv6_routing: bool = True + name = "Babel" + startup = () + shutdown = ("killall babeld",) + validate = ("pidof babeld",) + ipv6_routing = True @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: + def generatequaggaconfig(cls, node): cfg = "router babel\n" - for iface in node.get_ifaces(control=False): - cfg += f" network {iface.name}\n" + for ifc in node.netifs(): + if hasattr(ifc, "control") and ifc.control is True: + continue + cfg += " network %s\n" % ifc.name cfg += " redistribute static\n redistribute connected\n" return cfg @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - if is_wireless(iface.net): + def generatequaggaifcconfig(cls, node, ifc): + if ifc.net and ifc.net.linktype == LinkTypes.WIRELESS.value: return " babel wireless\n no babel split-horizon\n" else: return " babel wired\n babel split-horizon\n" @@ -557,28 +561,28 @@ class Xpimd(QuaggaService): """ PIM multicast routing based on XORP. """ - - name: str = "Xpimd" - shutdown: tuple[str, ...] = ("killall xpimd",) - validate: tuple[str, ...] = ("pidof xpimd",) - ipv4_routing: bool = True + name = 'Xpimd' + startup = () + shutdown = ('killall xpimd',) + validate = ('pidof xpimd',) + ipv4_routing = True @classmethod - def generate_quagga_config(cls, node: CoreNode) -> str: - ifname = "eth0" - for iface in node.get_ifaces(): - if iface.name != "lo": - ifname = iface.name + def generatequaggaconfig(cls, node): + ifname = 'eth0' + for ifc in node.netifs(): + if ifc.name != 'lo': + ifname = ifc.name break - cfg = "router mfea\n!\n" - cfg += "router igmp\n!\n" - cfg += "router pim\n" - cfg += " !ip pim rp-address 10.0.0.1\n" - cfg += f" ip pim bsr-candidate {ifname}\n" - cfg += f" ip pim rp-candidate {ifname}\n" - cfg += " !ip pim spt-threshold interval 10 bytes 80000\n" + cfg = 'router mfea\n!\n' + cfg += 'router igmp\n!\n' + cfg += 'router pim\n' + cfg += ' !ip pim rp-address 10.0.0.1\n' + cfg += ' ip pim bsr-candidate %s\n' % ifname + cfg += ' ip pim rp-candidate %s\n' % ifname + cfg += ' !ip pim spt-threshold interval 10 bytes 80000\n' return cfg @classmethod - def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return " ip mfea\n ip igmp\n ip pim\n" + def generatequaggaifcconfig(cls, node, ifc): + return ' ip mfea\n ip igmp\n ip pim\n' diff --git a/daemon/core/services/sdn.py b/daemon/core/services/sdn.py index a31cf87d..edbb8dfc 100644 --- a/daemon/core/services/sdn.py +++ b/daemon/core/services/sdn.py @@ -4,37 +4,31 @@ sdn.py defines services to start Open vSwitch and the Ryu SDN Controller. import re -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService +from core.service import CoreService class SdnService(CoreService): """ Parent class for SDN services. """ - - group: str = "SDN" + group = "SDN" @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return "" class OvsService(SdnService): - name: str = "OvsService" - group: str = "SDN" - executables: tuple[str, ...] = ("ovs-ofctl", "ovs-vsctl") - dirs: tuple[str, ...] = ( - "/etc/openvswitch", - "/var/run/openvswitch", - "/var/log/openvswitch", - ) - configs: tuple[str, ...] = ("OvsService.sh",) - startup: tuple[str, ...] = ("bash OvsService.sh",) - shutdown: tuple[str, ...] = ("killall ovs-vswitchd", "killall ovsdb-server") + name = "OvsService" + executables = ("ovs-ofctl", "ovs-vsctl") + group = "SDN" + dirs = ("/etc/openvswitch", "/var/run/openvswitch", "/var/log/openvswitch") + configs = ('OvsService.sh',) + startup = ('sh OvsService.sh',) + shutdown = ('killall ovs-vswitchd', 'killall ovsdb-server') @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): # Check whether the node is running zebra has_zebra = 0 for s in node.services: @@ -43,89 +37,71 @@ class OvsService(SdnService): cfg = "#!/bin/sh\n" cfg += "# auto-generated by OvsService (OvsService.py)\n" - cfg += "## First make sure that the ovs services are up and running\n" - cfg += "/etc/init.d/openvswitch-switch start < /dev/null\n\n" - cfg += "## create the switch itself, set the fail mode to secure, \n" - cfg += "## this stops it from routing traffic without defined flows.\n" - cfg += "## remove the -- and everything after if you want it to act as a regular switch\n" - cfg += "ovs-vsctl add-br ovsbr0 -- set Bridge ovsbr0 fail-mode=secure\n" - cfg += "\n## Now add all our interfaces as ports to the switch\n" + cfg += "/etc/init.d/openvswitch-switch start < /dev/null\n" + cfg += "ovs-vsctl add-br ovsbr0\n" + cfg += "ifconfig ovsbr0 up\n" - portnum = 1 - for iface in node.get_ifaces(control=False): - ifnumstr = re.findall(r"\d+", iface.name) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + ifnumstr = re.findall(r"\d+", ifc.name) ifnum = ifnumstr[0] # create virtual interfaces - cfg += "## Create a veth pair to send the data to\n" - cfg += f"ip link add rtr{ifnum} type veth peer name sw{ifnum}\n" + cfg += "ip link add rtr%s type veth peer name sw%s\n" % (ifnum, ifnum) + cfg += "ifconfig rtr%s up\n" % ifnum + cfg += "ifconfig sw%s up\n" % ifnum # remove ip address of eths because quagga/zebra will assign same IPs to rtr interfaces # or assign them manually to rtr interfaces if zebra is not running - for ip4 in iface.ip4s: - cfg += f"ip addr del {ip4.ip} dev {iface.name}\n" - if has_zebra == 0: - cfg += f"ip addr add {ip4.ip} dev rtr{ifnum}\n" - for ip6 in iface.ip6s: - cfg += f"ip -6 addr del {ip6.ip} dev {iface.name}\n" - if has_zebra == 0: - cfg += f"ip -6 addr add {ip6.ip} dev rtr{ifnum}\n" + for ifcaddr in ifc.addrlist: + if ifcaddr.find(".") >= 0: + cfg += "ip addr del %s dev %s\n" % (ifcaddr, ifc.name) + if has_zebra == 0: + cfg += "ip addr add %s dev rtr%s\n" % (ifcaddr, ifnum) + elif ifcaddr.find(":") >= 0: + cfg += "ip -6 addr del %s dev %s\n" % (ifcaddr, ifc.name) + if has_zebra == 0: + cfg += "ip -6 addr add %s dev rtr%s\n" % (ifcaddr, ifnum) + else: + raise ValueError("invalid address: %s" % ifcaddr) # add interfaces to bridge - # Make port numbers explicit so they're easier to follow in - # reading the script - cfg += "## Add the CORE interface to the switch\n" - cfg += ( - f"ovs-vsctl add-port ovsbr0 eth{ifnum} -- " - f"set Interface eth{ifnum} ofport_request={portnum:d}\n" - ) - cfg += "## And then add its sibling veth interface\n" - cfg += ( - f"ovs-vsctl add-port ovsbr0 sw{ifnum} -- " - f"set Interface sw{ifnum} ofport_request={portnum + 1:d}\n" - ) - cfg += "## start them up so we can send/receive data\n" - cfg += f"ovs-ofctl mod-port ovsbr0 eth{ifnum} up\n" - cfg += f"ovs-ofctl mod-port ovsbr0 sw{ifnum} up\n" - cfg += "## Bring up the lower part of the veth pair\n" - cfg += f"ip link set dev rtr{ifnum} up\n" - portnum += 2 + cfg += "ovs-vsctl add-port ovsbr0 eth%s\n" % ifnum + cfg += "ovs-vsctl add-port ovsbr0 sw%s\n" % ifnum - # Add rule for default controller if there is one local - # (even if the controller is not local, it finds it) - cfg += "\n## We assume there will be an SDN controller on the other end of this, \n" - cfg += "## but it will still function if there's not\n" + # Add rule for default controller if there is one local (even if the controller is not local, it finds it) cfg += "ovs-vsctl set-controller ovsbr0 tcp:127.0.0.1:6633\n" - cfg += "\n## Now to create some default flows, \n" - cfg += "## if the above controller will be present then you probably want to delete them\n" # Setup default flows portnum = 1 - for iface in node.get_ifaces(control=False): - cfg += "## Take the data from the CORE interface and put it on the veth and vice versa\n" - cfg += f"ovs-ofctl add-flow ovsbr0 priority=1000,in_port={portnum:d},action=output:{portnum + 1:d}\n" - cfg += f"ovs-ofctl add-flow ovsbr0 priority=1000,in_port={portnum + 1:d},action=output:{portnum:d}\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "ovs-ofctl add-flow ovsbr0 priority=1000,in_port=%d,action=output:%d\n" % (portnum, portnum + 1) + cfg += "ovs-ofctl add-flow ovsbr0 priority=1000,in_port=%d,action=output:%d\n" % (portnum + 1, portnum) portnum += 2 + return cfg class RyuService(SdnService): - name: str = "ryuService" - group: str = "SDN" - executables: tuple[str, ...] = ("ryu-manager",) - configs: tuple[str, ...] = ("ryuService.sh",) - startup: tuple[str, ...] = ("bash ryuService.sh",) - shutdown: tuple[str, ...] = ("killall ryu-manager",) + name = "ryuService" + executables = ("ryu-manager",) + group = "SDN" + dirs = () + configs = ('ryuService.sh',) + startup = ('sh ryuService.sh',) + shutdown = ('killall ryu-manager',) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return a string that will be written to filename, or sent to the GUI for user customization. """ + app_path = "/usr/local/lib/python2.7/dist-packages/ryu/app" cfg = "#!/bin/sh\n" cfg += "# auto-generated by ryuService (ryuService.py)\n" - cfg += ( - "ryu-manager --observe-links ryu.app.ofctl_rest ryu.app.rest_topology &\n" - ) + cfg += '/usr/local/bin/ryu-manager --observe-links %s/ofctl_rest.py %s/rest_topology.py' % (app_path, app_path) return cfg diff --git a/daemon/core/services/security.py b/daemon/core/services/security.py index afd71a14..9ed3fc39 100644 --- a/daemon/core/services/security.py +++ b/daemon/core/services/security.py @@ -6,78 +6,72 @@ firewall) import logging from core import constants -from core.nodes.base import CoreNode -from core.nodes.interface import CoreInterface -from core.services.coreservices import CoreService - -logger = logging.getLogger(__name__) +from core.service import CoreService class VPNClient(CoreService): - name: str = "VPNClient" - group: str = "Security" - configs: tuple[str, ...] = ("vpnclient.sh",) - startup: tuple[str, ...] = ("bash vpnclient.sh",) - shutdown: tuple[str, ...] = ("killall openvpn",) - validate: tuple[str, ...] = ("pidof openvpn",) - custom_needed: bool = True + name = "VPNClient" + group = "Security" + configs = ("vpnclient.sh",) + startup = ("sh vpnclient.sh",) + shutdown = ("killall openvpn",) + validate = ("pidof openvpn",) + custom_needed = True @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the client.conf and vpnclient.sh file contents to """ cfg = "#!/bin/sh\n" cfg += "# custom VPN Client configuration for service (security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleVPNClient" + fname = "%s/examples/services/sampleVPNClient" % constants.CORE_DATA_DIR + try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception( - "error opening VPN client configuration template (%s)", fname - ) + cfg += open(fname, "rb").read() + except IOError: + logging.exception("Error opening VPN client configuration template (%s)", fname) + return cfg class VPNServer(CoreService): - name: str = "VPNServer" - group: str = "Security" - configs: tuple[str, ...] = ("vpnserver.sh",) - startup: tuple[str, ...] = ("bash vpnserver.sh",) - shutdown: tuple[str, ...] = ("killall openvpn",) - validate: tuple[str, ...] = ("pidof openvpn",) - custom_needed: bool = True + name = "VPNServer" + group = "Security" + configs = ("vpnserver.sh",) + startup = ("sh vpnserver.sh",) + shutdown = ("killall openvpn",) + validate = ("pidof openvpn",) + custom_needed = True @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the sample server.conf and vpnserver.sh file contents to GUI for user customization. """ cfg = "#!/bin/sh\n" cfg += "# custom VPN Server Configuration for service (security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleVPNServer" + fname = "%s/examples/services/sampleVPNServer" % constants.CORE_DATA_DIR + try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception( - "Error opening VPN server configuration template (%s)", fname - ) + cfg += open(fname, "rb").read() + except IOError: + logging.exception("Error opening VPN server configuration template (%s)", fname) + return cfg class IPsec(CoreService): - name: str = "IPsec" - group: str = "Security" - configs: tuple[str, ...] = ("ipsec.sh",) - startup: tuple[str, ...] = ("bash ipsec.sh",) - shutdown: tuple[str, ...] = ("killall racoon",) - custom_needed: bool = True + name = "IPsec" + group = "Security" + configs = ("ipsec.sh",) + startup = ("sh ipsec.sh",) + shutdown = ("killall racoon",) + custom_needed = True @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the ipsec.conf and racoon.conf file contents to GUI for user customization. @@ -85,37 +79,37 @@ class IPsec(CoreService): cfg = "#!/bin/sh\n" cfg += "# set up static tunnel mode security assocation for service " cfg += "(security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleIPsec" + fname = "%s/examples/services/sampleIPsec" % constants.CORE_DATA_DIR + try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception("Error opening IPsec configuration template (%s)", fname) + cfg += open(fname, "rb").read() + except IOError: + logging.exception("Error opening IPsec configuration template (%s)", fname) + return cfg class Firewall(CoreService): - name: str = "Firewall" - group: str = "Security" - configs: tuple[str, ...] = ("firewall.sh",) - startup: tuple[str, ...] = ("bash firewall.sh",) - custom_needed: bool = True + name = "Firewall" + group = "Security" + configs = ("firewall.sh",) + startup = ("sh firewall.sh",) + custom_needed = True @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the firewall rule examples to GUI for user customization. """ cfg = "#!/bin/sh\n" cfg += "# custom node firewall rules for service (security.py)\n" - fname = f"{constants.CORE_DATA_DIR}/examples/services/sampleFirewall" + fname = "%s/examples/services/sampleFirewall" % constants.CORE_DATA_DIR + try: - with open(fname) as f: - cfg += f.read() - except OSError: - logger.exception( - "Error opening Firewall configuration template (%s)", fname - ) + cfg += open(fname, "rb").read() + except IOError: + logging.exception("Error opening Firewall configuration template (%s)", fname) + return cfg @@ -123,29 +117,30 @@ class Nat(CoreService): """ IPv4 source NAT service. """ - - name: str = "NAT" - group: str = "Security" - executables: tuple[str, ...] = ("iptables",) - configs: tuple[str, ...] = ("nat.sh",) - startup: tuple[str, ...] = ("bash nat.sh",) - custom_needed: bool = False + name = "NAT" + executables = ("iptables",) + group = "Security" + configs = ("nat.sh", ) + startup = ("sh nat.sh",) + custom_needed = False @classmethod - def generate_iface_nat_rule(cls, iface: CoreInterface, prefix: str = "") -> str: + def generateifcnatrule(cls, ifc, line_prefix=""): """ Generate a NAT line for one interface. """ - cfg = prefix + "iptables -t nat -A POSTROUTING -o " - cfg += iface.name + " -j MASQUERADE\n" - cfg += prefix + "iptables -A FORWARD -i " + iface.name + cfg = line_prefix + "iptables -t nat -A POSTROUTING -o " + cfg +=ifc.name + " -j MASQUERADE\n" + + cfg += line_prefix + "iptables -A FORWARD -i " + ifc.name cfg += " -m state --state RELATED,ESTABLISHED -j ACCEPT\n" - cfg += prefix + "iptables -A FORWARD -i " - cfg += iface.name + " -j DROP\n" + + cfg += line_prefix + "iptables -A FORWARD -i " + cfg += ifc.name + " -j DROP\n" return cfg @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ NAT out the first interface """ @@ -153,12 +148,15 @@ class Nat(CoreService): cfg += "# generated by security.py\n" cfg += "# NAT out the first interface by default\n" have_nat = False - for iface in node.get_ifaces(control=False): + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control == True: + continue if have_nat: - cfg += cls.generate_iface_nat_rule(iface, prefix="#") + cfg += cls.generateifcnatrule(ifc, line_prefix="#") else: have_nat = True - cfg += "# NAT out the " + iface.name + " interface\n" - cfg += cls.generate_iface_nat_rule(iface) + cfg += "# NAT out the " + ifc.name + " interface\n" + cfg += cls.generateifcnatrule(ifc) cfg += "\n" return cfg + diff --git a/daemon/core/services/ucarp.py b/daemon/core/services/ucarp.py index c6f2256e..31657861 100644 --- a/daemon/core/services/ucarp.py +++ b/daemon/core/services/ucarp.py @@ -2,55 +2,54 @@ ucarp.py: defines high-availability IP address controlled by ucarp """ -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService +from core.service import CoreService UCARP_ETC = "/usr/local/etc/ucarp" class Ucarp(CoreService): - name: str = "ucarp" - group: str = "Utility" - dirs: tuple[str, ...] = (UCARP_ETC,) - configs: tuple[str, ...] = ( - UCARP_ETC + "/default.sh", - UCARP_ETC + "/default-up.sh", - UCARP_ETC + "/default-down.sh", - "ucarpboot.sh", - ) - startup: tuple[str, ...] = ("bash ucarpboot.sh",) - shutdown: tuple[str, ...] = ("killall ucarp",) - validate: tuple[str, ...] = ("pidof ucarp",) + name = "ucarp" + group = "Utility" + dirs = (UCARP_ETC,) + configs = ( + UCARP_ETC + "/default.sh", UCARP_ETC + "/default-up.sh", UCARP_ETC + "/default-down.sh", "ucarpboot.sh",) + startup = ("sh ucarpboot.sh",) + shutdown = ("killall ucarp",) + validate = ("pidof ucarp",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Return the default file contents """ if filename == cls.configs[0]: - return cls.generate_ucarp_conf(node) + return cls.generateUcarpConf(node) elif filename == cls.configs[1]: - return cls.generate_vip_up(node) + return cls.generateVipUp(node) elif filename == cls.configs[2]: - return cls.generate_vip_down(node) + return cls.generateVipDown(node) elif filename == cls.configs[3]: - return cls.generate_ucarp_boot(node) + return cls.generateUcarpBoot(node) else: raise ValueError @classmethod - def generate_ucarp_conf(cls, node: CoreNode) -> str: + def generateUcarpConf(cls, node): """ Returns configuration file text. """ - ucarp_bin = node.session.options.get("ucarp_bin", "/usr/sbin/ucarp") - return f"""\ + try: + ucarp_bin = node.session.cfg['ucarp_bin'] + except KeyError: + ucarp_bin = "/usr/sbin/ucarp" + + return """\ #!/bin/sh # Location of UCARP executable -UCARP_EXEC={ucarp_bin} +UCARP_EXEC=%s # Location of the UCARP config directory -UCARP_CFGDIR={UCARP_ETC} +UCARP_CFGDIR=%s # Logging Facility FACILITY=daemon @@ -91,37 +90,37 @@ OPTIONS="-z -n -M" # Send extra parameter to down and up scripts #XPARAM="-x " -XPARAM="-x ${{VIRTUAL_NET}}" +XPARAM="-x ${VIRTUAL_NET}" # The start and stop scripts -START_SCRIPT=${{UCARP_CFGDIR}}/default-up.sh -STOP_SCRIPT=${{UCARP_CFGDIR}}/default-down.sh +START_SCRIPT=${UCARP_CFGDIR}/default-up.sh +STOP_SCRIPT=${UCARP_CFGDIR}/default-down.sh # These line should not need to be touched UCARP_OPTS="$OPTIONS -b $UCARP_BASE -k $SKEW -i $INTERFACE -v $INSTANCE_ID -p $PASSWORD -u $START_SCRIPT -d $STOP_SCRIPT -a $VIRTUAL_ADDRESS -s $SOURCE_ADDRESS -f $FACILITY $XPARAM" -${{UCARP_EXEC}} -B ${{UCARP_OPTS}} -""" +${UCARP_EXEC} -B ${UCARP_OPTS} +""" % (ucarp_bin, UCARP_ETC) @classmethod - def generate_ucarp_boot(cls, node: CoreNode) -> str: + def generateUcarpBoot(cls, node): """ Generate a shell script used to boot the Ucarp daemons. """ - return f"""\ + return """\ #!/bin/sh # Location of the UCARP config directory -UCARP_CFGDIR={UCARP_ETC} +UCARP_CFGDIR=%s -chmod a+x ${{UCARP_CFGDIR}}/*.sh +chmod a+x ${UCARP_CFGDIR}/*.sh # Start the default ucarp daemon configuration -${{UCARP_CFGDIR}}/default.sh +${UCARP_CFGDIR}/default.sh -""" +""" % UCARP_ETC @classmethod - def generate_vip_up(cls, node: CoreNode) -> str: + def generateVipUp(cls, node): """ Generate a shell script used to start the virtual ip """ @@ -134,7 +133,7 @@ exec 2> /dev/null IP="${2}" NET="${3}" if [ -z "$NET" ]; then - NET="24" + NET="24" fi /sbin/ip addr add ${IP}/${NET} dev "$1" @@ -143,7 +142,7 @@ fi """ @classmethod - def generate_vip_down(cls, node: CoreNode) -> str: + def generateVipDown(cls, node): """ Generate a shell script used to stop the virtual ip """ @@ -156,7 +155,7 @@ exec 2> /dev/null IP="${2}" NET="${3}" if [ -z "$NET" ]; then - NET="24" + NET="24" fi /sbin/ip addr del ${IP}/${NET} dev "$1" diff --git a/daemon/core/services/utility.py b/daemon/core/services/utility.py index e83cb9d5..366973a2 100644 --- a/daemon/core/services/utility.py +++ b/daemon/core/services/utility.py @@ -1,144 +1,172 @@ """ utility.py: defines miscellaneous utility services. """ -from typing import Optional -import netaddr +import os -from core import utils -from core.errors import CoreCommandError -from core.executables import SYSCTL -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceMode +from core import CoreCommandError +from core import constants +from core.misc import utils +from core.misc.ipaddress import Ipv4Prefix +from core.misc.ipaddress import Ipv6Prefix +from core.service import CoreService class UtilService(CoreService): """ Parent class for utility services. """ - - name: Optional[str] = None - group: str = "Utility" + name = None + group = "Utility" + dirs = () + configs = () + startup = () + shutdown = () @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return "" class IPForwardService(UtilService): - name: str = "IPForward" - configs: tuple[str, ...] = ("ipforward.sh",) - startup: tuple[str, ...] = ("bash ipforward.sh",) + name = "IPForward" + configs = ("ipforward.sh",) + startup = ("sh ipforward.sh",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - return cls.generateconfiglinux(node, filename) + def generate_config(cls, node, filename): + if os.uname()[0] == "Linux": + return cls.generateconfiglinux(node, filename) + else: + raise Exception("unknown platform") @classmethod - def generateconfiglinux(cls, node: CoreNode, filename: str) -> str: - cfg = f"""\ + def generateconfiglinux(cls, node, filename): + cfg = """\ #!/bin/sh # auto-generated by IPForward service (utility.py) -{SYSCTL} -w net.ipv4.conf.all.forwarding=1 -{SYSCTL} -w net.ipv4.conf.default.forwarding=1 -{SYSCTL} -w net.ipv6.conf.all.forwarding=1 -{SYSCTL} -w net.ipv6.conf.default.forwarding=1 -{SYSCTL} -w net.ipv4.conf.all.send_redirects=0 -{SYSCTL} -w net.ipv4.conf.default.send_redirects=0 -{SYSCTL} -w net.ipv4.conf.all.rp_filter=0 -{SYSCTL} -w net.ipv4.conf.default.rp_filter=0 -""" - for iface in node.get_ifaces(): - name = utils.sysctl_devname(iface.name) - cfg += f"{SYSCTL} -w net.ipv4.conf.{name}.forwarding=1\n" - cfg += f"{SYSCTL} -w net.ipv4.conf.{name}.send_redirects=0\n" - cfg += f"{SYSCTL} -w net.ipv4.conf.{name}.rp_filter=0\n" +%(sysctl)s -w net.ipv4.conf.all.forwarding=1 +%(sysctl)s -w net.ipv4.conf.default.forwarding=1 +%(sysctl)s -w net.ipv6.conf.all.forwarding=1 +%(sysctl)s -w net.ipv6.conf.default.forwarding=1 +%(sysctl)s -w net.ipv4.conf.all.send_redirects=0 +%(sysctl)s -w net.ipv4.conf.default.send_redirects=0 +%(sysctl)s -w net.ipv4.conf.all.rp_filter=0 +%(sysctl)s -w net.ipv4.conf.default.rp_filter=0 +""" % {'sysctl': constants.SYSCTL_BIN} + for ifc in node.netifs(): + name = utils.sysctl_devname(ifc.name) + cfg += "%s -w net.ipv4.conf.%s.forwarding=1\n" % (constants.SYSCTL_BIN, name) + cfg += "%s -w net.ipv4.conf.%s.send_redirects=0\n" % \ + (constants.SYSCTL_BIN, name) + cfg += "%s -w net.ipv4.conf.%s.rp_filter=0\n" % (constants.SYSCTL_BIN, name) return cfg class DefaultRouteService(UtilService): - name: str = "DefaultRoute" - configs: tuple[str, ...] = ("defaultroute.sh",) - startup: tuple[str, ...] = ("bash defaultroute.sh",) + name = "DefaultRoute" + configs = ("defaultroute.sh",) + startup = ("sh defaultroute.sh",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - routes = [] - ifaces = node.get_ifaces() - if ifaces: - iface = ifaces[0] - for ip in iface.ips(): - net = ip.cidr - if net.size > 1: - router = net[1] - routes.append(str(router)) + def generate_config(cls, node, filename): cfg = "#!/bin/sh\n" cfg += "# auto-generated by DefaultRoute service (utility.py)\n" - for route in routes: - cfg += f"ip route add default via {route}\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\n".join(map(cls.addrstr, ifc.addrlist)) + cfg += "\n" return cfg + @staticmethod + def addrstr(x): + if x.find(":") >= 0: + net = Ipv6Prefix(x) + else: + net = Ipv4Prefix(x) + if net.max_addr() == net.min_addr(): + return "" + else: + if os.uname()[0] == "Linux": + rtcmd = "ip route add default via" + else: + raise Exception("unknown platform") + return "%s %s" % (rtcmd, net.min_addr()) + class DefaultMulticastRouteService(UtilService): - name: str = "DefaultMulticastRoute" - configs: tuple[str, ...] = ("defaultmroute.sh",) - startup: tuple[str, ...] = ("bash defaultmroute.sh",) + name = "DefaultMulticastRoute" + configs = ("defaultmroute.sh",) + startup = ("sh defaultmroute.sh",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): cfg = "#!/bin/sh\n" cfg += "# auto-generated by DefaultMulticastRoute service (utility.py)\n" cfg += "# the first interface is chosen below; please change it " cfg += "as needed\n" - for iface in node.get_ifaces(control=False): - rtcmd = "ip route add 224.0.0.0/4 dev" - cfg += f"{rtcmd} {iface.name}\n" + + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + if os.uname()[0] == "Linux": + rtcmd = "ip route add 224.0.0.0/4 dev" + else: + raise Exception("unknown platform") + cfg += "%s %s\n" % (rtcmd, ifc.name) cfg += "\n" break return cfg class StaticRouteService(UtilService): - name: str = "StaticRoute" - configs: tuple[str, ...] = ("staticroute.sh",) - startup: tuple[str, ...] = ("bash staticroute.sh",) - custom_needed: bool = True + name = "StaticRoute" + configs = ("staticroute.sh",) + startup = ("sh staticroute.sh",) + custom_needed = True @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): cfg = "#!/bin/sh\n" cfg += "# auto-generated by StaticRoute service (utility.py)\n#\n" cfg += "# NOTE: this service must be customized to be of any use\n" cfg += "# Below are samples that you can uncomment and edit.\n#\n" - for iface in node.get_ifaces(control=False): - cfg += "\n".join(map(cls.routestr, iface.ips())) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\n".join(map(cls.routestr, ifc.addrlist)) cfg += "\n" return cfg @staticmethod - def routestr(ip: netaddr.IPNetwork) -> str: - address = str(ip.ip) - if netaddr.valid_ipv6(address): + def routestr(x): + if x.find(":") >= 0: + net = Ipv6Prefix(x) dst = "3ffe:4::/64" else: + net = Ipv4Prefix(x) dst = "10.9.8.0/24" - if ip[-2] == ip[1]: + if net.max_addr() == net.min_addr(): return "" else: - rtcmd = f"#/sbin/ip route add {dst} via" - return f"{rtcmd} {ip[1]}" + if os.uname()[0] == "Linux": + rtcmd = "#/sbin/ip route add %s via" % dst + else: + raise Exception("unknown platform") + return "%s %s" % (rtcmd, net.min_addr()) class SshService(UtilService): - name: str = "SSH" - configs: tuple[str, ...] = ("startsshd.sh", "/etc/ssh/sshd_config") - dirs: tuple[str, ...] = ("/etc/ssh", "/var/run/sshd") - startup: tuple[str, ...] = ("bash startsshd.sh",) - shutdown: tuple[str, ...] = ("killall sshd",) - validation_mode: ServiceMode = ServiceMode.BLOCKING + name = "SSH" + configs = ("startsshd.sh", "/etc/ssh/sshd_config",) + dirs = ("/etc/ssh", "/var/run/sshd",) + startup = ("sh startsshd.sh",) + shutdown = ("killall sshd",) + validate = () @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Use a startup script for launching sshd in order to wait for host key generation. @@ -147,22 +175,22 @@ class SshService(UtilService): sshstatedir = cls.dirs[1] sshlibdir = "/usr/lib/openssh" if filename == "startsshd.sh": - return f"""\ + return """\ #!/bin/sh # auto-generated by SSH service (utility.py) -ssh-keygen -q -t rsa -N "" -f {sshcfgdir}/ssh_host_rsa_key -chmod 655 {sshstatedir} +ssh-keygen -q -t rsa -N "" -f %s/ssh_host_rsa_key +chmod 655 %s # wait until RSA host key has been generated to launch sshd -/usr/sbin/sshd -f {sshcfgdir}/sshd_config -""" +/usr/sbin/sshd -f %s/sshd_config +""" % (sshcfgdir, sshstatedir, sshcfgdir) else: - return f"""\ + return """\ # auto-generated by SSH service (utility.py) Port 22 Protocol 2 -HostKey {sshcfgdir}/ssh_host_rsa_key +HostKey %s/ssh_host_rsa_key UsePrivilegeSeparation yes -PidFile {sshstatedir}/sshd.pid +PidFile %s/sshd.pid KeyRegenerationInterval 3600 ServerKeyBits 768 @@ -191,22 +219,22 @@ PrintLastLog yes TCPKeepAlive yes AcceptEnv LANG LC_* -Subsystem sftp {sshlibdir}/sftp-server +Subsystem sftp %s/sftp-server UsePAM yes UseDNS no -""" +""" % (sshcfgdir, sshstatedir, sshlibdir) class DhcpService(UtilService): - name: str = "DHCP" - configs: tuple[str, ...] = ("/etc/dhcp/dhcpd.conf",) - dirs: tuple[str, ...] = ("/etc/dhcp", "/var/lib/dhcp") - startup: tuple[str, ...] = ("touch /var/lib/dhcp/dhcpd.leases", "dhcpd") - shutdown: tuple[str, ...] = ("killall dhcpd",) - validate: tuple[str, ...] = ("pidof dhcpd",) + name = "DHCP" + configs = ("/etc/dhcp/dhcpd.conf",) + dirs = ("/etc/dhcp","/var/lib/dhcp") + startup = ("touch /var/lib/dhcp/dhcpd.leases","dhcpd") + shutdown = ("killall dhcpd",) + validate = ("pidof dhcpd",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate a dhcpd config file using the network address of each interface. @@ -225,47 +253,50 @@ max-lease-time 7200; ddns-update-style none; """ - for iface in node.get_ifaces(control=False): - cfg += "\n".join(map(cls.subnetentry, iface.ip4s)) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\n".join(map(cls.subnetentry, ifc.addrlist)) cfg += "\n" return cfg @staticmethod - def subnetentry(ip: netaddr.IPNetwork) -> str: + def subnetentry(x): """ Generate a subnet declaration block given an IPv4 prefix string for inclusion in the dhcpd3 config file. """ - if ip.size == 1: + if x.find(":") >= 0: return "" - # divide the address space in half - index = (ip.size - 2) / 2 - rangelow = ip[index] - rangehigh = ip[-2] - return f""" -subnet {ip.cidr.ip} netmask {ip.netmask} {{ - pool {{ - range {rangelow} {rangehigh}; + else: + addr = x.split("/")[0] + net = Ipv4Prefix(x) + # divide the address space in half + rangelow = net.addr(net.num_addr() / 2) + rangehigh = net.max_addr() + return """ +subnet %s netmask %s { + pool { + range %s %s; default-lease-time 600; - option routers {ip.ip}; - }} -}} -""" + option routers %s; + } +} +""" % (net.prefix_str(), net.netmask_str(), rangelow, rangehigh, addr) class DhcpClientService(UtilService): """ Use a DHCP client for all interfaces for addressing. """ - - name: str = "DHCPClient" - configs: tuple[str, ...] = ("startdhcpclient.sh",) - startup: tuple[str, ...] = ("bash startdhcpclient.sh",) - shutdown: tuple[str, ...] = ("killall dhclient",) - validate: tuple[str, ...] = ("pidof dhclient",) + name = "DHCPClient" + configs = ("startdhcpclient.sh",) + startup = ("sh startdhcpclient.sh",) + shutdown = ("killall dhclient",) + validate = ("pidof dhclient",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate a script to invoke dhclient on all interfaces. """ @@ -274,11 +305,14 @@ class DhcpClientService(UtilService): cfg += "# uncomment this mkdir line and symlink line to enable client-" cfg += "side DNS\n# resolution based on the DHCP server response.\n" cfg += "#mkdir -p /var/run/resolvconf/interface\n" - for iface in node.get_ifaces(control=False): - cfg += f"#ln -s /var/run/resolvconf/interface/{iface.name}.dhclient" + + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "#ln -s /var/run/resolvconf/interface/%s.dhclient" % ifc.name cfg += " /var/run/resolvconf/resolv.conf\n" - cfg += f"/sbin/dhclient -nw -pf /var/run/dhclient-{iface.name}.pid" - cfg += f" -lf /var/run/dhclient-{iface.name}.lease {iface.name}\n" + cfg += "/sbin/dhclient -nw -pf /var/run/dhclient-%s.pid" % ifc.name + cfg += " -lf /var/run/dhclient-%s.lease %s\n" % (ifc.name, ifc.name) return cfg @@ -286,16 +320,15 @@ class FtpService(UtilService): """ Start a vsftpd server. """ - - name: str = "FTP" - configs: tuple[str, ...] = ("vsftpd.conf",) - dirs: tuple[str, ...] = ("/var/run/vsftpd/empty", "/var/ftp") - startup: tuple[str, ...] = ("vsftpd ./vsftpd.conf",) - shutdown: tuple[str, ...] = ("killall vsftpd",) - validate: tuple[str, ...] = ("pidof vsftpd",) + name = "FTP" + configs = ("vsftpd.conf",) + dirs = ("/var/run/vsftpd/empty", "/var/ftp",) + startup = ("vsftpd ./vsftpd.conf",) + shutdown = ("killall vsftpd",) + validate = ("pidof vsftpd",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate a vsftpd.conf configuration file. """ @@ -319,29 +352,19 @@ class HttpService(UtilService): """ Start an apache server. """ + name = "HTTP" + configs = ("/etc/apache2/apache2.conf", "/etc/apache2/envvars", + "/var/www/index.html",) + dirs = ("/etc/apache2", "/var/run/apache2", "/var/log/apache2", + "/run/lock", "/var/lock/apache2", "/var/www",) + startup = ("chown www-data /var/lock/apache2", "apache2ctl start",) + shutdown = ("apache2ctl stop",) + validate = ("pidof apache2",) - name: str = "HTTP" - configs: tuple[str, ...] = ( - "/etc/apache2/apache2.conf", - "/etc/apache2/envvars", - "/var/www/index.html", - ) - dirs: tuple[str, ...] = ( - "/etc/apache2", - "/var/run/apache2", - "/var/log/apache2", - "/run/lock", - "/var/lock/apache2", - "/var/www", - ) - startup: tuple[str, ...] = ("chown www-data /var/lock/apache2", "apache2ctl start") - shutdown: tuple[str, ...] = ("apache2ctl stop",) - validate: tuple[str, ...] = ("pidof apache2",) - APACHEVER22: int = 22 - APACHEVER24: int = 24 + APACHEVER22, APACHEVER24 = (22, 24) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate an apache2.conf configuration file. """ @@ -355,42 +378,44 @@ class HttpService(UtilService): return "" @classmethod - def detectversionfromcmd(cls) -> int: + def detectversionfromcmd(cls): """ Detect the apache2 version using the 'a2query' command. """ try: - result = utils.cmd("a2query -v") - status = 0 - except CoreCommandError as e: - status = e.returncode - result = e.stderr - if status == 0 and result[:3] == "2.4": + status, result = utils.cmd_output(['a2query', '-v']) + except CoreCommandError: + status = -1 + + if status == 0 and result[:3] == '2.4': return cls.APACHEVER24 + return cls.APACHEVER22 @classmethod - def generateapache2conf(cls, node: CoreNode, filename: str) -> str: - lockstr = { - cls.APACHEVER22: "LockFile ${APACHE_LOCK_DIR}/accept.lock\n", - cls.APACHEVER24: "Mutex file:${APACHE_LOCK_DIR} default\n", - } - mpmstr = { - cls.APACHEVER22: "", - cls.APACHEVER24: "LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n", - } - permstr = { - cls.APACHEVER22: " Order allow,deny\n Deny from all\n Satisfy all\n", - cls.APACHEVER24: " Require all denied\n", - } - authstr = { - cls.APACHEVER22: "LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n", - cls.APACHEVER24: "LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n", - } - permstr2 = { - cls.APACHEVER22: "\t\tOrder allow,deny\n\t\tallow from all\n", - cls.APACHEVER24: "\t\tRequire all granted\n", - } + def generateapache2conf(cls, node, filename): + lockstr = {cls.APACHEVER22: + 'LockFile ${APACHE_LOCK_DIR}/accept.lock\n', + cls.APACHEVER24: + 'Mutex file:${APACHE_LOCK_DIR} default\n', } + mpmstr = {cls.APACHEVER22: '', cls.APACHEVER24: + 'LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so\n', } + + permstr = {cls.APACHEVER22: + ' Order allow,deny\n Deny from all\n Satisfy all\n', + cls.APACHEVER24: + ' Require all denied\n', } + + authstr = {cls.APACHEVER22: + 'LoadModule authz_default_module /usr/lib/apache2/modules/mod_authz_default.so\n', + cls.APACHEVER24: + 'LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so\n', } + + permstr2 = {cls.APACHEVER22: + '\t\tOrder allow,deny\n\t\tallow from all\n', + cls.APACHEVER24: + '\t\tRequire all granted\n', } + version = cls.detectversionfromcmd() cfg = "# apache2.conf generated by utility.py:HttpService\n" cfg += lockstr[version] @@ -437,7 +462,7 @@ Group ${APACHE_RUN_GROUP} AccessFileName .htaccess - + """ cfg += permstr[version] cfg += """\ @@ -484,29 +509,29 @@ ServerSignature On TraceEnable Off - ServerAdmin webmaster@localhost - DocumentRoot /var/www - - Options FollowSymLinks - AllowOverride None - - - Options Indexes FollowSymLinks MultiViews - AllowOverride None + ServerAdmin webmaster@localhost + DocumentRoot /var/www + + Options FollowSymLinks + AllowOverride None + + + Options Indexes FollowSymLinks MultiViews + AllowOverride None """ cfg += permstr2[version] cfg += """\ - - ErrorLog ${APACHE_LOG_DIR}/error.log - LogLevel warn - CustomLog ${APACHE_LOG_DIR}/access.log combined + + ErrorLog ${APACHE_LOG_DIR}/error.log + LogLevel warn + CustomLog ${APACHE_LOG_DIR}/access.log combined """ return cfg @classmethod - def generateenvvars(cls, node: CoreNode, filename: str) -> str: + def generateenvvars(cls, node, filename): return """\ # this file is used by apache2ctl - generated by utility.py:HttpService # these settings come from a default Ubuntu apache2 installation @@ -521,32 +546,34 @@ export LANG """ @classmethod - def generatehtml(cls, node: CoreNode, filename: str) -> str: - body = f"""\ + def generatehtml(cls, node, filename): + body = """\ -

{node.name} web server

+

%s web server

This is the default web page for this server.

The web server software is running but no content has been added, yet.

-""" - for iface in node.get_ifaces(control=False): - body += f"
  • {iface.name} - {[str(x) for x in iface.ips()]}
  • \n" - return f"{body}" +""" % node.name + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + body += "
  • %s - %s
  • \n" % (ifc.name, ifc.addrlist) + return "%s" % body class PcapService(UtilService): """ Pcap service for logging packets. """ - - name: str = "pcap" - configs: tuple[str, ...] = ("pcap.sh",) - startup: tuple[str, ...] = ("bash pcap.sh start",) - shutdown: tuple[str, ...] = ("bash pcap.sh stop",) - validate: tuple[str, ...] = ("pidof tcpdump",) - meta: str = "logs network traffic to pcap packet capture files" + name = "pcap" + configs = ("pcap.sh",) + dirs = () + startup = ("sh pcap.sh start",) + shutdown = ("sh pcap.sh stop",) + validate = ("pidof tcpdump",) + meta = "logs network traffic to pcap packet capture files" @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate a startpcap.sh traffic logging script. """ @@ -559,14 +586,12 @@ DUMPOPTS="-s 12288 -C 10 -n" if [ "x$1" = "xstart" ]; then """ - for iface in node.get_ifaces(): - if iface.control: - cfg += "# " + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + cfg += '# ' redir = "< /dev/null" - cfg += ( - f"tcpdump ${{DUMPOPTS}} -w {node.name}.{iface.name}.pcap " - f"-i {iface.name} {redir} &\n" - ) + cfg += "tcpdump ${DUMPOPTS} -w %s.%s.pcap -i %s %s &\n" % \ + (node.name, ifc.name, ifc.name, redir) cfg += """ elif [ "x$1" = "xstop" ]; then @@ -578,58 +603,58 @@ fi; class RadvdService(UtilService): - name: str = "radvd" - configs: tuple[str, ...] = ("/etc/radvd/radvd.conf",) - dirs: tuple[str, ...] = ("/etc/radvd", "/var/run/radvd") - startup: tuple[str, ...] = ( - "radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log", - ) - shutdown: tuple[str, ...] = ("pkill radvd",) - validate: tuple[str, ...] = ("pidof radvd",) + name = "radvd" + configs = ("/etc/radvd/radvd.conf",) + dirs = ("/etc/radvd",) + startup = ("radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log",) + shutdown = ("pkill radvd",) + validate = ("pidof radvd",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Generate a RADVD router advertisement daemon config file using the network address of each interface. """ cfg = "# auto-generated by RADVD service (utility.py)\n" - for iface in node.get_ifaces(control=False): - prefixes = list(map(cls.subnetentry, iface.ips())) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + prefixes = map(cls.subnetentry, ifc.addrlist) if len(prefixes) < 1: continue - cfg += f"""\ -interface {iface.name} -{{ + cfg += """\ +interface %s +{ AdvSendAdvert on; MinRtrAdvInterval 3; MaxRtrAdvInterval 10; AdvDefaultPreference low; AdvHomeAgentFlag off; -""" +""" % ifc.name for prefix in prefixes: if prefix == "": continue - cfg += f"""\ - prefix {prefix} - {{ + cfg += """\ + prefix %s + { AdvOnLink on; AdvAutonomous on; AdvRouterAddr on; - }}; -""" + }; +""" % prefix cfg += "};\n" return cfg @staticmethod - def subnetentry(ip: netaddr.IPNetwork) -> str: + def subnetentry(x): """ Generate a subnet declaration block given an IPv6 prefix string for inclusion in the RADVD config file. """ - address = str(ip.ip) - if netaddr.valid_ipv6(address): - return str(ip) + if x.find(":") >= 0: + net = Ipv6Prefix(x) + return str(net) else: return "" @@ -638,15 +663,14 @@ class AtdService(UtilService): """ Atd service for scheduling at jobs """ - - name: str = "atd" - configs: tuple[str, ...] = ("startatd.sh",) - dirs: tuple[str, ...] = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool") - startup: tuple[str, ...] = ("bash startatd.sh",) - shutdown: tuple[str, ...] = ("pkill atd",) + name = "atd" + configs = ("startatd.sh",) + dirs = ("/var/spool/cron/atjobs", "/var/spool/cron/atspool") + startup = ("sh startatd.sh",) + shutdown = ("pkill atd",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return """ #!/bin/sh echo 00001 > /var/spool/cron/atjobs/.SEQ @@ -660,6 +684,5 @@ class UserDefinedService(UtilService): """ Dummy service allowing customization of anything. """ - - name: str = "UserDefined" - meta: str = "Customize this service to do anything upon startup." + name = "UserDefined" + meta = "Customize this service to do anything upon startup." diff --git a/daemon/core/services/xorp.py b/daemon/core/services/xorp.py index ac29b299..49425ab7 100644 --- a/daemon/core/services/xorp.py +++ b/daemon/core/services/xorp.py @@ -2,13 +2,9 @@ xorp.py: defines routing services provided by the XORP routing suite. """ -from typing import Optional +import logging -import netaddr - -from core.nodes.base import CoreNode -from core.nodes.interface import CoreInterface -from core.services.coreservices import CoreService +from core.service import CoreService class XorpRtrmgr(CoreService): @@ -16,59 +12,58 @@ class XorpRtrmgr(CoreService): XORP router manager service builds a config.boot file based on other enabled XORP services, and launches necessary daemons upon startup. """ - - name: str = "xorp_rtrmgr" - group: str = "XORP" - executables: tuple[str, ...] = ("xorp_rtrmgr",) - dirs: tuple[str, ...] = ("/etc/xorp",) - configs: tuple[str, ...] = ("/etc/xorp/config.boot",) - startup: tuple[ - str, ... - ] = f"xorp_rtrmgr -d -b {configs[0]} -l /var/log/{name}.log -P /var/run/{name}.pid" - shutdown: tuple[str, ...] = ("killall xorp_rtrmgr",) - validate: tuple[str, ...] = ("pidof xorp_rtrmgr",) + name = "xorp_rtrmgr" + executables = ("xorp_rtrmgr",) + group = "XORP" + dirs = ("/etc/xorp",) + configs = ("/etc/xorp/config.boot",) + startup = ("xorp_rtrmgr -d -b %s -l /var/log/%s.log -P /var/run/%s.pid" % (configs[0], name, name),) + shutdown = ("killall xorp_rtrmgr",) + validate = ("pidof xorp_rtrmgr",) @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): """ Returns config.boot configuration file text. Other services that depend on this will have generatexorpconfig() hooks that are invoked here. Filename currently ignored. """ cfg = "interfaces {\n" - for iface in node.get_ifaces(): - cfg += f" interface {iface.name} {{\n" - cfg += f"\tvif {iface.name} {{\n" - cfg += "".join(map(cls.addrstr, iface.ips())) - cfg += cls.lladdrstr(iface) + for ifc in node.netifs(): + cfg += " interface %s {\n" % ifc.name + cfg += "\tvif %s {\n" % ifc.name + cfg += "".join(map(cls.addrstr, ifc.addrlist)) + cfg += cls.lladdrstr(ifc) cfg += "\t}\n" cfg += " }\n" cfg += "}\n\n" for s in node.services: - if cls.name not in s.dependencies: - continue - if not (isinstance(s, XorpService) or issubclass(s, XorpService)): - continue - cfg += s.generate_xorp_config(node) + try: + s.dependencies.index(cls.name) + cfg += s.generatexorpconfig(node) + except ValueError: + logging.exception("error getting value from service: %s", cls.name) + return cfg @staticmethod - def addrstr(ip: netaddr.IPNetwork) -> str: + def addrstr(x): """ helper for mapping IP addresses to XORP config statements """ - cfg = f"\t address {ip.ip} {{\n" - cfg += f"\t\tprefix-length: {ip.prefixlen}\n" + addr, plen = x.split("/") + cfg = "\t address %s {\n" % addr + cfg += "\t\tprefix-length: %s\n" % plen cfg += "\t }\n" return cfg @staticmethod - def lladdrstr(iface: CoreInterface) -> str: + def lladdrstr(ifc): """ helper for adding link-local address entries (required by OSPFv3) """ - cfg = f"\t address {iface.mac.eui64()} {{\n" + cfg = "\t address %s {\n" % ifc.hwaddr.tolinklocal() cfg += "\t\tprefix-length: 64\n" cfg += "\t }\n" return cfg @@ -79,41 +74,45 @@ class XorpService(CoreService): Parent class for XORP services. Defines properties and methods common to XORP's routing daemons. """ - - name: Optional[str] = None - group: str = "XORP" - executables: tuple[str, ...] = ("xorp_rtrmgr",) - dependencies: tuple[str, ...] = ("xorp_rtrmgr",) - meta: str = ( - "The config file for this service can be found in the xorp_rtrmgr service." - ) + name = None + executables = ("xorp_rtrmgr",) + group = "XORP" + dependencies = ("xorp_rtrmgr",) + dirs = () + configs = () + startup = () + shutdown = () + meta = "The config file for this service can be found in the xorp_rtrmgr service." @staticmethod - def fea(forwarding: str) -> str: + def fea(forwarding): """ Helper to add a forwarding engine entry to the config file. """ cfg = "fea {\n" - cfg += f" {forwarding} {{\n" + cfg += " %s {\n" % forwarding cfg += "\tdisable:false\n" cfg += " }\n" cfg += "}\n" return cfg @staticmethod - def mfea(forwarding, node: CoreNode) -> str: + def mfea(forwarding, ifcs): """ Helper to add a multicast forwarding engine entry to the config file. """ names = [] - for iface in node.get_ifaces(control=False): - names.append(iface.name) + for ifc in ifcs: + if hasattr(ifc, 'control') and ifc.control is True: + continue + names.append(ifc.name) names.append("register_vif") + cfg = "plumbing {\n" - cfg += f" {forwarding} {{\n" + cfg += " %s {\n" % forwarding for name in names: - cfg += f"\tinterface {name} {{\n" - cfg += f"\t vif {name} {{\n" + cfg += "\tinterface %s {\n" % name + cfg += "\t vif %s {\n" % name cfg += "\t\tdisable: false\n" cfg += "\t }\n" cfg += "\t}\n" @@ -122,7 +121,7 @@ class XorpService(CoreService): return cfg @staticmethod - def policyexportconnected() -> str: + def policyexportconnected(): """ Helper to add a policy statement for exporting connected routes. """ @@ -130,7 +129,7 @@ class XorpService(CoreService): cfg += " policy-statement export-connected {\n" cfg += "\tterm 100 {\n" cfg += "\t from {\n" - cfg += '\t\tprotocol: "connected"\n' + cfg += "\t\tprotocol: \"connected\"\n" cfg += "\t }\n" cfg += "\t}\n" cfg += " }\n" @@ -138,22 +137,25 @@ class XorpService(CoreService): return cfg @staticmethod - def router_id(node: CoreNode) -> str: + def routerid(node): """ Helper to return the first IPv4 address of a node as its router ID. """ - for iface in node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - return str(ip4.ip) + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + for a in ifc.addrlist: + if a.find(".") >= 0: + return a.split('/')[0] + # raise ValueError, "no IPv4 address found for router ID" return "0.0.0.0" @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: + def generate_config(cls, node, filename): return "" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): return "" @@ -163,22 +165,26 @@ class XorpOspfv2(XorpService): not build its own configuration file but has hooks for adding to the unified XORP configuration file. """ - - name: str = "XORP_OSPFv2" + name = "XORP_OSPFv2" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding4") - rtrid = cls.router_id(node) + rtrid = cls.routerid(node) cfg += "\nprotocols {\n" cfg += " ospf4 {\n" - cfg += f"\trouter-id: {rtrid}\n" + cfg += "\trouter-id: %s\n" % rtrid cfg += "\tarea 0.0.0.0 {\n" - for iface in node.get_ifaces(control=False): - cfg += f"\t interface {iface.name} {{\n" - cfg += f"\t\tvif {iface.name} {{\n" - for ip4 in iface.ip4s: - cfg += f"\t\t address {ip4.ip} {{\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\t interface %s {\n" % ifc.name + cfg += "\t\tvif %s {\n" % ifc.name + for a in ifc.addrlist: + if a.find(".") < 0: + continue + addr = a.split("/")[0] + cfg += "\t\t address %s {\n" % addr cfg += "\t\t }\n" cfg += "\t\t}\n" cfg += "\t }\n" @@ -194,20 +200,21 @@ class XorpOspfv3(XorpService): not build its own configuration file but has hooks for adding to the unified XORP configuration file. """ - - name: str = "XORP_OSPFv3" + name = "XORP_OSPFv3" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding6") - rtrid = cls.router_id(node) + rtrid = cls.routerid(node) cfg += "\nprotocols {\n" cfg += " ospf6 0 { /* Instance ID 0 */\n" - cfg += f"\trouter-id: {rtrid}\n" + cfg += "\trouter-id: %s\n" % rtrid cfg += "\tarea 0.0.0.0 {\n" - for iface in node.get_ifaces(control=False): - cfg += f"\t interface {iface.name} {{\n" - cfg += f"\t\tvif {iface.name} {{\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\t interface %s {\n" % ifc.name + cfg += "\t\tvif %s {\n" % ifc.name cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t}\n" @@ -220,22 +227,21 @@ class XorpBgp(XorpService): """ IPv4 inter-domain routing. AS numbers and peers must be customized. """ - - name: str = "XORP_BGP" - custom_needed: bool = True + name = "XORP_BGP" + custom_needed = True @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): cfg = "/* This is a sample config that should be customized with\n" cfg += " appropriate AS numbers and peers */\n" cfg += cls.fea("unicast-forwarding4") cfg += cls.policyexportconnected() - rtrid = cls.router_id(node) + rtrid = cls.routerid(node) cfg += "\nprotocols {\n" cfg += " bgp {\n" - cfg += f"\tbgp-id: {rtrid}\n" + cfg += "\tbgp-id: %s\n" % rtrid cfg += "\tlocal-as: 65001 /* change this */\n" - cfg += '\texport: "export-connected"\n' + cfg += "\texport: \"export-connected\"\n" cfg += "\tpeer 10.0.1.1 { /* change this */\n" cfg += "\t local-ip: 10.0.1.1\n" cfg += "\t as: 65002\n" @@ -251,20 +257,25 @@ class XorpRip(XorpService): RIP IPv4 unicast routing. """ - name: str = "XORP_RIP" + name = "XORP_RIP" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding4") cfg += cls.policyexportconnected() cfg += "\nprotocols {\n" cfg += " rip {\n" - cfg += '\texport: "export-connected"\n' - for iface in node.get_ifaces(control=False): - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - for ip4 in iface.ip4s: - cfg += f"\t\taddress {ip4.ip} {{\n" + cfg += "\texport: \"export-connected\"\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\tinterface %s {\n" % ifc.name + cfg += "\t vif %s {\n" % ifc.name + for a in ifc.addrlist: + if a.find(".") < 0: + continue + addr = a.split("/")[0] + cfg += "\t\taddress %s {\n" % addr cfg += "\t\t disable: false\n" cfg += "\t\t}\n" cfg += "\t }\n" @@ -278,20 +289,28 @@ class XorpRipng(XorpService): """ RIP NG IPv6 unicast routing. """ - - name: str = "XORP_RIPNG" + name = "XORP_RIPNG" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding6") cfg += cls.policyexportconnected() cfg += "\nprotocols {\n" cfg += " ripng {\n" - cfg += '\texport: "export-connected"\n' - for iface in node.get_ifaces(control=False): - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - cfg += f"\t\taddress {iface.mac.eui64()} {{\n" + cfg += "\texport: \"export-connected\"\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\tinterface %s {\n" % ifc.name + cfg += "\t vif %s {\n" % ifc.name + # for a in ifc.addrlist: + # if a.find(":") < 0: + # continue + # addr = a.split("/")[0] + # cfg += "\t\taddress %s {\n" % addr + # cfg += "\t\t disable: false\n" + # cfg += "\t\t}\n" + cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal() cfg += "\t\t disable: false\n" cfg += "\t\t}\n" cfg += "\t }\n" @@ -305,48 +324,53 @@ class XorpPimSm4(XorpService): """ PIM Sparse Mode IPv4 multicast routing. """ - - name: str = "XORP_PIMSM4" + name = "XORP_PIMSM4" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.mfea("mfea4", node) + def generatexorpconfig(cls, node): + cfg = cls.mfea("mfea4", node.netifs()) + cfg += "\nprotocols {\n" cfg += " igmp {\n" names = [] - for iface in node.get_ifaces(control=False): - names.append(iface.name) - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + names.append(ifc.name) + cfg += "\tinterface %s {\n" % ifc.name + cfg += "\t vif %s {\n" % ifc.name cfg += "\t\tdisable: false\n" cfg += "\t }\n" cfg += "\t}\n" cfg += " }\n" cfg += "}\n" + cfg += "\nprotocols {\n" cfg += " pimsm4 {\n" names.append("register_vif") for name in names: - cfg += f"\tinterface {name} {{\n" - cfg += f"\t vif {name} {{\n" + cfg += "\tinterface %s {\n" % name + cfg += "\t vif %s {\n" % name cfg += "\t\tdr-priority: 1\n" cfg += "\t }\n" cfg += "\t}\n" cfg += "\tbootstrap {\n" cfg += "\t cand-bsr {\n" cfg += "\t\tscope-zone 224.0.0.0/4 {\n" - cfg += f'\t\t cand-bsr-by-vif-name: "{names[0]}"\n' + cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0] cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t cand-rp {\n" cfg += "\t\tgroup-prefix 224.0.0.0/4 {\n" - cfg += f'\t\t cand-rp-by-vif-name: "{names[0]}"\n' + cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0] cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t}\n" + cfg += " }\n" cfg += "}\n" + cfg += "\nprotocols {\n" cfg += " fib2mrib {\n" cfg += "\tdisable: false\n" @@ -359,48 +383,53 @@ class XorpPimSm6(XorpService): """ PIM Sparse Mode IPv6 multicast routing. """ - - name: str = "XORP_PIMSM6" + name = "XORP_PIMSM6" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: - cfg = cls.mfea("mfea6", node) + def generatexorpconfig(cls, node): + cfg = cls.mfea("mfea6", node.netifs()) + cfg += "\nprotocols {\n" cfg += " mld {\n" names = [] - for iface in node.get_ifaces(control=False): - names.append(iface.name) - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + names.append(ifc.name) + cfg += "\tinterface %s {\n" % ifc.name + cfg += "\t vif %s {\n" % ifc.name cfg += "\t\tdisable: false\n" cfg += "\t }\n" cfg += "\t}\n" cfg += " }\n" cfg += "}\n" + cfg += "\nprotocols {\n" cfg += " pimsm6 {\n" names.append("register_vif") for name in names: - cfg += f"\tinterface {name} {{\n" - cfg += f"\t vif {name} {{\n" + cfg += "\tinterface %s {\n" % name + cfg += "\t vif %s {\n" % name cfg += "\t\tdr-priority: 1\n" cfg += "\t }\n" cfg += "\t}\n" cfg += "\tbootstrap {\n" cfg += "\t cand-bsr {\n" cfg += "\t\tscope-zone ff00::/8 {\n" - cfg += f'\t\t cand-bsr-by-vif-name: "{names[0]}"\n' + cfg += "\t\t cand-bsr-by-vif-name: \"%s\"\n" % names[0] cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t cand-rp {\n" cfg += "\t\tgroup-prefix ff00::/8 {\n" - cfg += f'\t\t cand-rp-by-vif-name: "{names[0]}"\n' + cfg += "\t\t cand-rp-by-vif-name: \"%s\"\n" % names[0] cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t}\n" + cfg += " }\n" cfg += "}\n" + cfg += "\nprotocols {\n" cfg += " fib2mrib {\n" cfg += "\tdisable: false\n" @@ -413,21 +442,25 @@ class XorpOlsr(XorpService): """ OLSR IPv4 unicast MANET routing. """ - - name: str = "XORP_OLSR" + name = "XORP_OLSR" @classmethod - def generate_xorp_config(cls, node: CoreNode) -> str: + def generatexorpconfig(cls, node): cfg = cls.fea("unicast-forwarding4") - rtrid = cls.router_id(node) + rtrid = cls.routerid(node) cfg += "\nprotocols {\n" cfg += " olsr4 {\n" - cfg += f"\tmain-address: {rtrid}\n" - for iface in node.get_ifaces(control=False): - cfg += f"\tinterface {iface.name} {{\n" - cfg += f"\t vif {iface.name} {{\n" - for ip4 in iface.ip4s: - cfg += f"\t\taddress {ip4.ip} {{\n" + cfg += "\tmain-address: %s\n" % rtrid + for ifc in node.netifs(): + if hasattr(ifc, 'control') and ifc.control is True: + continue + cfg += "\tinterface %s {\n" % ifc.name + cfg += "\t vif %s {\n" % ifc.name + for a in ifc.addrlist: + if a.find(".") < 0: + continue + addr = a.split("/")[0] + cfg += "\t\taddress %s {\n" % addr cfg += "\t\t}\n" cfg += "\t }\n" cfg += "\t}\n" diff --git a/daemon/core/session.py b/daemon/core/session.py new file mode 100644 index 00000000..84f1d822 --- /dev/null +++ b/daemon/core/session.py @@ -0,0 +1,1093 @@ +""" +session.py: defines the Session class used by the core-daemon daemon program +that manages a CORE session. +""" + +import logging +import os +import random +import shutil +import subprocess +import tempfile +import threading +import time +from multiprocessing.pool import ThreadPool + +import pwd + +from core import constants +from core.api import coreapi +from core.broker import CoreBroker +from core.conf import ConfigurableManager +from core.conf import ConfigurableOptions +from core.conf import Configuration +from core.data import EventData +from core.data import ExceptionData +from core.emane.emanemanager import EmaneManager +from core.enumerations import ConfigDataTypes +from core.enumerations import EventTypes +from core.enumerations import ExceptionLevels +from core.enumerations import NodeTypes +from core.enumerations import RegisterTlvs +from core.location import CoreLocation +from core.misc import nodeutils +from core.misc import utils +from core.misc.event import EventLoop +from core.misc.ipaddress import MacAddress +from core.mobility import MobilityManager +from core.netns import nodes +from core.sdt import Sdt +from core.service import CoreServices +from core.xml import corexml, corexmldeployment + + +class Session(object): + """ + CORE session manager. + """ + + def __init__(self, _id, config=None, mkdir=True): + """ + Create a Session instance. + + :param int _id: session id + :param dict config: session configuration + :param bool mkdir: flag to determine if a directory should be made + """ + self.id = _id + + # define and create session directory when desired + self.session_dir = os.path.join(tempfile.gettempdir(), "pycore.%s" % self.id) + if mkdir: + os.mkdir(self.session_dir) + + self.name = None + self.file_name = None + self.thumbnail = None + self.user = None + self.event_loop = EventLoop() + + # dict of objects: all nodes and nets + self.objects = {} + self._objects_lock = threading.Lock() + + # TODO: should the default state be definition? + self.state = EventTypes.NONE.value + self._state_time = time.time() + self._state_file = os.path.join(self.session_dir, "state") + + self._hooks = {} + self._state_hooks = {} + + self.add_state_hook(state=EventTypes.RUNTIME_STATE.value, hook=self.runtime_state_hook) + + self.master = False + + # handlers for broadcasting information + self.event_handlers = [] + self.exception_handlers = [] + self.node_handlers = [] + self.link_handlers = [] + self.file_handlers = [] + self.config_handlers = [] + self.shutdown_handlers = [] + + # session options/metadata + self.options = SessionConfig() + if not config: + config = {} + for key, value in config.iteritems(): + self.options.set_config(key, value) + self.metadata = SessionMetaData() + + # initialize session feature helpers + self.broker = CoreBroker(session=self) + self.location = CoreLocation() + self.mobility = MobilityManager(session=self) + self.services = CoreServices(session=self) + self.emane = EmaneManager(session=self) + self.sdt = Sdt(session=self) + + def shutdown(self): + """ + Shutdown all emulation objects and remove the session directory. + """ + # shutdown/cleanup feature helpers + self.emane.shutdown() + self.broker.shutdown() + self.sdt.shutdown() + + # delete all current objects + self.delete_objects() + + # remove this sessions working directory + preserve = self.options.get_config("preservedir") == "1" + if not preserve: + shutil.rmtree(self.session_dir, ignore_errors=True) + + # call session shutdown handlers + for handler in self.shutdown_handlers: + handler(self) + + def broadcast_event(self, event_data): + """ + Handle event data that should be provided to event handler. + + :param core.data.EventData event_data: event data to send out + :return: nothing + """ + + for handler in self.event_handlers: + handler(event_data) + + def broadcast_exception(self, exception_data): + """ + Handle exception data that should be provided to exception handlers. + + :param core.data.ExceptionData exception_data: exception data to send out + :return: nothing + """ + + for handler in self.exception_handlers: + handler(exception_data) + + def broadcast_node(self, node_data): + """ + Handle node data that should be provided to node handlers. + + :param core.data.ExceptionData node_data: node data to send out + :return: nothing + """ + + for handler in self.node_handlers: + handler(node_data) + + def broadcast_file(self, file_data): + """ + Handle file data that should be provided to file handlers. + + :param core.data.FileData file_data: file data to send out + :return: nothing + """ + + for handler in self.file_handlers: + handler(file_data) + + def broadcast_config(self, config_data): + """ + Handle config data that should be provided to config handlers. + + :param core.data.ConfigData config_data: config data to send out + :return: nothing + """ + + for handler in self.config_handlers: + handler(config_data) + + def broadcast_link(self, link_data): + """ + Handle link data that should be provided to link handlers. + + :param core.data.ExceptionData link_data: link data to send out + :return: nothing + """ + + for handler in self.link_handlers: + handler(link_data) + + def set_state(self, state, send_event=False): + """ + Set the session's current state. + + :param core.enumerations.EventTypes state: state to set to + :param send_event: if true, generate core API event messages + :return: nothing + """ + state_value = state.value + state_name = state.name + + if self.state == state_value: + logging.info("session(%s) is already in state: %s, skipping change", self.id, state_name) + return + + self.state = state_value + self._state_time = time.time() + logging.info("changing session(%s) to state %s", self.id, state_name) + + self.write_state(state_value) + self.run_hooks(state_value) + self.run_state_hooks(state_value) + + if send_event: + event_data = EventData(event_type=state_value, time="%s" % time.time()) + self.broadcast_event(event_data) + + def write_state(self, state): + """ + Write the current state to a state file in the session dir. + + :param int state: state to write to file + :return: nothing + """ + try: + state_file = open(self._state_file, "w") + state_file.write("%d %s\n" % (state, coreapi.state_name(state))) + state_file.close() + except IOError: + logging.exception("error writing state file: %s", state) + + def run_hooks(self, state): + """ + Run hook scripts upon changing states. If hooks is not specified, run all hooks in the given state. + + :param int state: state to run hooks for + :return: nothing + """ + + # check that state change hooks exist + if state not in self._hooks: + return + + # retrieve all state hooks + hooks = self._hooks.get(state, []) + + # execute all state hooks + if hooks: + for hook in hooks: + self.run_hook(hook) + else: + logging.info("no state hooks for %s", state) + + def set_hook(self, hook_type, file_name, source_name, data): + """ + Store a hook from a received file message. + + :param str hook_type: hook type + :param str file_name: file name for hook + :param str source_name: source name + :param data: hook data + :return: nothing + """ + logging.info("setting state hook: %s - %s from %s", hook_type, file_name, source_name) + + _hook_id, state = hook_type.split(':')[:2] + if not state.isdigit(): + logging.error("error setting hook having state '%s'", state) + return + + state = int(state) + hook = file_name, data + + # append hook to current state hooks + state_hooks = self._hooks.setdefault(state, []) + state_hooks.append(hook) + + # immediately run a hook if it is in the current state + # (this allows hooks in the definition and configuration states) + if self.state == state: + logging.info("immediately running new state hook") + self.run_hook(hook) + + def del_hooks(self): + """ + Clear the hook scripts dict. + """ + self._hooks.clear() + + def run_hook(self, hook): + """ + Run a hook. + + :param tuple hook: hook to run + :return: nothing + """ + file_name, data = hook + logging.info("running hook %s", file_name) + + # write data to hook file + try: + hook_file = open(os.path.join(self.session_dir, file_name), "w") + hook_file.write(data) + hook_file.close() + except IOError: + logging.exception("error writing hook '%s'", file_name) + + # setup hook stdout and stderr + try: + stdout = open(os.path.join(self.session_dir, file_name + ".log"), "w") + stderr = subprocess.STDOUT + except IOError: + logging.exception("error setting up hook stderr and stdout") + stdout = None + stderr = None + + # execute hook file + try: + args = ["/bin/sh", file_name] + subprocess.check_call(args, stdout=stdout, stderr=stderr, + close_fds=True, cwd=self.session_dir, env=self.get_environment()) + except (OSError, subprocess.CalledProcessError): + logging.exception("error running hook: %s", file_name) + + def run_state_hooks(self, state): + """ + Run state hooks. + + :param int state: state to run hooks for + :return: nothing + """ + for hook in self._state_hooks.get(state, []): + try: + hook(state) + except: + message = "exception occured when running %s state hook: %s" % (coreapi.state_name(state), hook) + logging.exception(message) + self.exception(ExceptionLevels.ERROR, "Session.run_state_hooks", None, message) + + def add_state_hook(self, state, hook): + """ + Add a state hook. + + :param int state: state to add hook for + :param func hook: hook callback for the state + :return: nothing + """ + hooks = self._state_hooks.setdefault(state, []) + if hook in hooks: + raise ValueError("attempting to add duplicate state hook") + hooks.append(hook) + + if self.state == state: + hook(state) + + def del_state_hook(self, state, hook): + """ + Delete a state hook. + + :param int state: state to delete hook for + :param func hook: hook to delete + :return: + """ + hooks = self._state_hooks.setdefault(state, []) + hooks.remove(hook) + + def runtime_state_hook(self, state): + """ + Runtime state hook check. + + :param int state: state to check + :return: nothing + """ + if state == EventTypes.RUNTIME_STATE.value: + self.emane.poststartup() + xml_file_version = self.options.get_config("xmlfilever") + if xml_file_version in ("1.0",): + xml_file_name = os.path.join(self.session_dir, "session-deployed.xml") + xml_writer = corexml.CoreXmlWriter(self) + corexmldeployment.CoreXmlDeployment(self, xml_writer.scenario) + xml_writer.write(xml_file_name) + + def get_environment(self, state=True): + """ + Get an environment suitable for a subprocess.Popen call. + This is the current process environment with some session-specific + variables. + + :param bool state: flag to determine if session state should be included + :return: + """ + env = os.environ.copy() + env["SESSION"] = "%s" % self.id + env["SESSION_SHORT"] = "%s" % self.short_session_id() + env["SESSION_DIR"] = "%s" % self.session_dir + env["SESSION_NAME"] = "%s" % self.name + env["SESSION_FILENAME"] = "%s" % self.file_name + env["SESSION_USER"] = "%s" % self.user + env["SESSION_NODE_COUNT"] = "%s" % self.get_node_count() + + if state: + env["SESSION_STATE"] = "%s" % self.state + + # attempt to read and add environment config file + environment_config_file = os.path.join(constants.CORE_CONF_DIR, "environment") + try: + if os.path.isfile(environment_config_file): + utils.load_config(environment_config_file, env) + except IOError: + logging.warn("environment configuration file does not exist: %s", environment_config_file) + + # attempt to read and add user environment file + if self.user: + environment_user_file = os.path.join("/home", self.user, ".core", "environment") + try: + utils.load_config(environment_user_file, env) + except IOError: + logging.debug("user core environment settings file not present: %s", environment_user_file) + + return env + + def set_thumbnail(self, thumb_file): + """ + Set the thumbnail filename. Move files from /tmp to session dir. + + :param str thumb_file: tumbnail file to set for session + :return: nothing + """ + if not os.path.exists(thumb_file): + logging.error("thumbnail file to set does not exist: %s", thumb_file) + self.thumbnail = None + return + + destination_file = os.path.join(self.session_dir, os.path.basename(thumb_file)) + shutil.copy(thumb_file, destination_file) + self.thumbnail = destination_file + + def set_user(self, user): + """ + Set the username for this session. Update the permissions of the + session dir to allow the user write access. + + :param str user: user to give write permissions to for the session directory + :return: nothing + """ + if user: + try: + uid = pwd.getpwnam(user).pw_uid + gid = os.stat(self.session_dir).st_gid + os.chown(self.session_dir, uid, gid) + except IOError: + logging.exception("failed to set permission on %s", self.session_dir) + + self.user = user + + def get_object_id(self): + """ + Return a unique, new random object id. + """ + object_id = None + + with self._objects_lock: + while True: + object_id = random.randint(1, 0xFFFF) + if object_id not in self.objects: + break + + return object_id + + def add_object(self, cls, *clsargs, **clskwds): + """ + Add an emulation object. + + :param class cls: object class to add + :param list clsargs: list of arguments for the class to create + :param dict clskwds: dictionary of arguments for the class to create + :return: the created class instance + """ + obj = cls(self, *clsargs, **clskwds) + + self._objects_lock.acquire() + if obj.objid in self.objects: + self._objects_lock.release() + obj.shutdown() + raise KeyError("duplicate object id %s for %s" % (obj.objid, obj)) + self.objects[obj.objid] = obj + self._objects_lock.release() + + return obj + + def get_object(self, object_id): + """ + Get an emulation object. + + :param int object_id: object id to retrieve + :return: object for the given id + :rtype: core.coreobj.PyCoreNode + """ + if object_id not in self.objects: + raise KeyError("unknown object id %s" % object_id) + return self.objects[object_id] + + def get_object_by_name(self, name): + """ + Get an emulation object using its name attribute. + + :param str name: name of object to retrieve + :return: object for the name given + """ + with self._objects_lock: + for obj in self.objects.itervalues(): + if hasattr(obj, "name") and obj.name == name: + return obj + raise KeyError("unknown object with name %s" % name) + + def delete_object(self, object_id): + """ + Remove an emulation object. + + :param int object_id: object id to remove + :return: nothing + """ + with self._objects_lock: + try: + obj = self.objects.pop(object_id) + obj.shutdown() + except KeyError: + logging.error("failed to remove object, object with id was not found: %s", object_id) + + def delete_objects(self): + """ + Clear the objects dictionary, and call shutdown for each object. + """ + with self._objects_lock: + while self.objects: + _, obj = self.objects.popitem() + obj.shutdown() + + def write_objects(self): + """ + Write objects to a 'nodes' file in the session dir. + The 'nodes' file lists: number, name, api-type, class-type + """ + try: + nodes_file = open(os.path.join(self.session_dir, "nodes"), "w") + with self._objects_lock: + for object_id in sorted(self.objects.keys()): + obj = self.objects[object_id] + nodes_file.write("%s %s %s %s\n" % (object_id, obj.name, obj.apitype, type(obj))) + nodes_file.close() + except IOError: + logging.exception("error writing nodes file") + + def dump_session(self): + """ + Log information about the session in its current state. + """ + logging.info("session id=%s name=%s state=%s", self.id, self.name, self.state) + logging.info("file=%s thumbnail=%s node_count=%s/%s", + self.file_name, self.thumbnail, self.get_node_count(), len(self.objects)) + + def exception(self, level, source, object_id, text): + """ + Generate and broadcast an exception event. + + :param str level: exception level + :param str source: source name + :param int object_id: object id + :param str text: exception message + :return: nothing + """ + + exception_data = ExceptionData( + node=object_id, + session=str(self.id), + level=level, + source=source, + date=time.ctime(), + text=text + ) + + self.broadcast_exception(exception_data) + + def instantiate(self): + """ + We have entered the instantiation state, invoke startup methods + of various managers and boot the nodes. Validate nodes and check + for transition to the runtime state. + """ + + # write current objects out to session directory file + self.write_objects() + + # controlnet may be needed by some EMANE models + self.add_remove_control_interface(node=None, remove=False) + + # instantiate will be invoked again upon Emane configure + if self.emane.startup() == self.emane.NOT_READY: + return + + # start feature helpers + self.broker.startup() + self.mobility.startup() + + # boot the services on each node + self.boot_nodes() + + # set broker local instantiation to complete + self.broker.local_instantiation_complete() + + # notify listeners that instantiation is complete + event = EventData(event_type=EventTypes.INSTANTIATION_COMPLETE.value) + self.broadcast_event(event) + + # assume either all nodes have booted already, or there are some + # nodes on slave servers that will be booted and those servers will + # send a node status response message + self.check_runtime() + + def get_node_count(self): + """ + Returns the number of CoreNodes and CoreNets, except for those + that are not considered in the GUI's node count. + """ + + with self._objects_lock: + count = len([x for x in self.objects.itervalues() + if not nodeutils.is_node(x, (NodeTypes.PEER_TO_PEER, NodeTypes.CONTROL_NET))]) + + # on Linux, GreTapBridges are auto-created, not part of GUI's node count + count -= len([x for x in self.objects.itervalues() + if nodeutils.is_node(x, NodeTypes.TAP_BRIDGE) and not nodeutils.is_node(x, NodeTypes.TUNNEL)]) + + return count + + def check_runtime(self): + """ + Check if we have entered the runtime state, that all nodes have been + started and the emulation is running. Start the event loop once we + have entered runtime (time=0). + """ + # this is called from instantiate() after receiving an event message + # for the instantiation state, and from the broker when distributed + # nodes have been started + logging.info("session(%s) checking if not in runtime state, current state: %s", self.id, + coreapi.state_name(self.state)) + if self.state == EventTypes.RUNTIME_STATE.value: + logging.info("valid runtime state found, returning") + return + + # check to verify that all nodes and networks are running + if not self.broker.instantiation_complete(): + return + + # start event loop and set to runtime + self.event_loop.run() + self.set_state(EventTypes.RUNTIME_STATE, send_event=True) + + def data_collect(self): + """ + Tear down a running session. Stop the event loop and any running + nodes, and perform clean-up. + """ + # stop event loop + self.event_loop.stop() + + # stop node services + with self._objects_lock: + for obj in self.objects.itervalues(): + # TODO: determine if checking for CoreNode alone is ok + if isinstance(obj, nodes.PyCoreNode): + self.services.stop_services(obj) + + # shutdown emane + self.emane.shutdown() + + # update control interface hosts + self.update_control_interface_hosts(remove=True) + + # remove all four possible control networks. Does nothing if ctrlnet is not installed. + self.add_remove_control_interface(node=None, net_index=0, remove=True) + self.add_remove_control_interface(node=None, net_index=1, remove=True) + self.add_remove_control_interface(node=None, net_index=2, remove=True) + self.add_remove_control_interface(node=None, net_index=3, remove=True) + + def check_shutdown(self): + """ + Check if we have entered the shutdown state, when no running nodes + and links remain. + """ + node_count = self.get_node_count() + logging.info("session(%s) checking shutdown: %s nodes remaining", self.id, node_count) + + shutdown = False + if node_count == 0: + shutdown = True + self.set_state(EventTypes.SHUTDOWN_STATE) + + return shutdown + + def short_session_id(self): + """ + Return a shorter version of the session ID, appropriate for + interface names, where length may be limited. + """ + ssid = (self.id >> 8) ^ (self.id & ((1 << 8) - 1)) + return "%x" % ssid + + def boot_nodes(self): + """ + Invoke the boot() procedure for all nodes and send back node + messages to the GUI for node messages that had the status + request flag. + """ + with self._objects_lock: + pool = ThreadPool() + results = [] + + start = time.time() + for obj in self.objects.itervalues(): + # TODO: PyCoreNode is not the type to check + if isinstance(obj, nodes.PyCoreNode) and not nodeutils.is_node(obj, NodeTypes.RJ45): + # add a control interface if configured + logging.info("booting node: %s", obj.name) + self.add_remove_control_interface(node=obj, remove=False) + result = pool.apply_async(self.services.boot_services, (obj,)) + results.append(result) + + pool.close() + pool.join() + for result in results: + result.get() + logging.debug("boot run time: %s", time.time() - start) + + self.update_control_interface_hosts() + + def get_control_net_prefixes(self): + """ + Retrieve control net prefixes. + + :return: control net prefix list + :rtype: list + """ + p = self.options.get_config("controlnet") + p0 = self.options.get_config("controlnet0") + p1 = self.options.get_config("controlnet1") + p2 = self.options.get_config("controlnet2") + p3 = self.options.get_config("controlnet3") + + if not p0 and p: + p0 = p + + return [p0, p1, p2, p3] + + def get_control_net_server_interfaces(self): + """ + Retrieve control net server interfaces. + + :return: list of control net server interfaces + :rtype: list + """ + d0 = self.options.get_config("controlnetif0") + if d0: + logging.error("controlnet0 cannot be assigned with a host interface") + d1 = self.options.get_config("controlnetif1") + d2 = self.options.get_config("controlnetif2") + d3 = self.options.get_config("controlnetif3") + return [None, d1, d2, d3] + + def get_control_net_index(self, dev): + """ + Retrieve control net index. + + :param str dev: device to get control net index for + :return: control net index, -1 otherwise + :rtype: int + """ + if dev[0:4] == "ctrl" and int(dev[4]) in [0, 1, 2, 3]: + index = int(dev[4]) + if index == 0: + return index + if index < 4 and self.get_control_net_prefixes()[index] is not None: + return index + return -1 + + def get_control_net_object(self, net_index): + # TODO: all nodes use an integer id and now this wants to use a string + object_id = "ctrl%dnet" % net_index + return self.get_object(object_id) + + def add_remove_control_net(self, net_index, remove=False, conf_required=True): + """ + Create a control network bridge as necessary. + When the remove flag is True, remove the bridge that connects control + interfaces. The conf_reqd flag, when False, causes a control network + bridge to be added even if one has not been configured. + + :param int net_index: network index + :param bool remove: flag to check if it should be removed + :param bool conf_required: flag to check if conf is required + :return: control net object + :rtype: core.netns.nodes.CtrlNet + """ + logging.debug("add/remove control net: index(%s) remove(%s) conf_required(%s)", net_index, remove, conf_required) + prefix_spec_list = self.get_control_net_prefixes() + prefix_spec = prefix_spec_list[net_index] + if not prefix_spec: + if conf_required: + # no controlnet needed + return None + else: + control_net_class = nodeutils.get_node_class(NodeTypes.CONTROL_NET) + prefix_spec = control_net_class.DEFAULT_PREFIX_LIST[net_index] + logging.debug("prefix spec: %s", prefix_spec) + + server_interface = self.get_control_net_server_interfaces()[net_index] + + # return any existing controlnet bridge + try: + control_net = self.get_control_net_object(net_index) + + if remove: + self.delete_object(control_net.objid) + return None + + return control_net + except KeyError: + if remove: + return None + + # build a new controlnet bridge + object_id = "ctrl%dnet" % net_index + + # use the updown script for control net 0 only. + updown_script = None + + if net_index == 0: + updown_script = self.options.get_config("controlnet_updown_script") + if not updown_script: + logging.warning("controlnet updown script not configured") + + prefixes = prefix_spec.split() + if len(prefixes) > 1: + # a list of per-host prefixes is provided + assign_address = True + if self.master: + try: + # split first (master) entry into server and prefix + prefix = prefixes[0].split(":", 1)[1] + except IndexError: + # no server name. possibly only one server + prefix = prefixes[0] + else: + # slave servers have their name and localhost in the serverlist + servers = self.broker.getservernames() + servers.remove("localhost") + prefix = None + + for server_prefix in prefixes: + try: + # split each entry into server and prefix + server, p = server_prefix.split(":") + except ValueError: + server = "" + p = None + + if server == servers[0]: + # the server name in the list matches this server + prefix = p + break + + if not prefix: + logging.error("Control network prefix not found for server '%s'" % servers[0]) + assign_address = False + try: + prefix = prefixes[0].split(':', 1)[1] + except IndexError: + prefix = prefixes[0] + # len(prefixes) == 1 + else: + # TODO: can we get the server name from the servers.conf or from the node assignments? + # with one prefix, only master gets a ctrlnet address + assign_address = self.master + prefix = prefixes[0] + + control_net_class = nodeutils.get_node_class(NodeTypes.CONTROL_NET) + control_net = self.add_object(cls=control_net_class, objid=object_id, prefix=prefix, + assign_address=assign_address, + updown_script=updown_script, serverintf=server_interface) + + # tunnels between controlnets will be built with Broker.addnettunnels() + # TODO: potentially remove documentation saying object ids are ints + # TODO: need to move broker code out of the session object + self.broker.addnet(object_id) + for server in self.broker.getservers(): + self.broker.addnodemap(server, object_id) + + return control_net + + def add_remove_control_interface(self, node, net_index=0, remove=False, conf_required=True): + """ + Add a control interface to a node when a 'controlnet' prefix is + listed in the config file or session options. Uses + addremovectrlnet() to build or remove the control bridge. + If conf_reqd is False, the control network may be built even + when the user has not configured one (e.g. for EMANE.) + + :param core.netns.nodes.CoreNode node: node to add or remove control interface + :param int net_index: network index + :param bool remove: flag to check if it should be removed + :param bool conf_required: flag to check if conf is required + :return: nothing + """ + control_net = self.add_remove_control_net(net_index, remove, conf_required) + if not control_net: + return + + if not node: + return + + # ctrl# already exists + if node.netif(control_net.CTRLIF_IDX_BASE + net_index): + return + + control_ip = node.objid + + try: + addrlist = ["%s/%s" % (control_net.prefix.addr(control_ip), control_net.prefix.prefixlen)] + except ValueError: + msg = "Control interface not added to node %s. " % node.objid + msg += "Invalid control network prefix (%s). " % control_net.prefix + msg += "A longer prefix length may be required for this many nodes." + logging.exception(msg) + return + + interface1 = node.newnetif(net=control_net, + ifindex=control_net.CTRLIF_IDX_BASE + net_index, + ifname="ctrl%d" % net_index, hwaddr=MacAddress.random(), + addrlist=addrlist) + node.netif(interface1).control = True + + def update_control_interface_hosts(self, net_index=0, remove=False): + """ + Add the IP addresses of control interfaces to the /etc/hosts file. + + :param int net_index: network index to update + :param bool remove: flag to check if it should be removed + :return: nothing + """ + if not self.options.get_config_bool("update_etc_hosts", default=False): + return + + try: + control_net = self.get_control_net_object(net_index) + except KeyError: + logging.exception("error retrieving control net object") + return + + header = "CORE session %s host entries" % self.id + if remove: + logging.info("Removing /etc/hosts file entries.") + utils.file_demunge("/etc/hosts", header) + return + + entries = [] + for interface in control_net.netifs(): + name = interface.node.name + for address in interface.addrlist: + entries.append("%s %s" % (address.split("/")[0], name)) + + logging.info("Adding %d /etc/hosts file entries." % len(entries)) + + utils.file_munge("/etc/hosts", header, "\n".join(entries) + "\n") + + def runtime(self): + """ + Return the current time we have been in the runtime state, or zero + if not in runtime. + """ + if self.state == EventTypes.RUNTIME_STATE.value: + return time.time() - self._state_time + else: + return 0.0 + + def add_event(self, event_time, node=None, name=None, data=None): + """ + Add an event to the event queue, with a start time relative to the + start of the runtime state. + + :param event_time: event time + :param core.netns.nodes.CoreNode node: node to add event for + :param str name: name of event + :param data: data for event + :return: nothing + """ + event_time = float(event_time) + current_time = self.runtime() + + if current_time > 0.0: + if time <= current_time: + logging.warn("could not schedule past event for time %s (run time is now %s)", time, current_time) + return + event_time = event_time - current_time + + self.event_loop.add_event(event_time, self.run_event, node=node, name=name, data=data) + + if not name: + name = "" + logging.info("scheduled event %s at time %s data=%s", name, event_time + current_time, data) + + # TODO: if data is None, this blows up, but this ties into how event functions are ran, need to clean that up + def run_event(self, node_id=None, name=None, data=None): + """ + Run a scheduled event, executing commands in the data string. + + :param int node_id: node id to run event + :param str name: event name + :param str data: event data + :return: nothing + """ + now = self.runtime() + if not name: + name = "" + + logging.info("running event %s at time %s cmd=%s" % (name, now, data)) + if not node_id: + utils.mute_detach(data) + else: + node = self.get_object(node_id) + node.cmd(data, wait=False) + + +class SessionConfig(ConfigurableManager, ConfigurableOptions): + """ + Session configuration object. + """ + name = "session" + options = [ + Configuration(_id="controlnet", _type=ConfigDataTypes.STRING, label="Control Network"), + Configuration(_id="controlnet0", _type=ConfigDataTypes.STRING, label="Control Network 0"), + Configuration(_id="controlnet1", _type=ConfigDataTypes.STRING, label="Control Network 1"), + Configuration(_id="controlnet2", _type=ConfigDataTypes.STRING, label="Control Network 2"), + Configuration(_id="controlnet3", _type=ConfigDataTypes.STRING, label="Control Network 3"), + Configuration(_id="controlnet_updown_script", _type=ConfigDataTypes.STRING, label="Control Network Script"), + Configuration(_id="enablerj45", _type=ConfigDataTypes.BOOL, default="1", options=["On", "Off"], + label="Enable RJ45s"), + Configuration(_id="preservedir", _type=ConfigDataTypes.BOOL, default="0", options=["On", "Off"], + label="Preserve session dir"), + Configuration(_id="enablesdt", _type=ConfigDataTypes.BOOL, default="0", options=["On", "Off"], + label="Enable SDT3D output"), + Configuration(_id="sdturl", _type=ConfigDataTypes.STRING, default=Sdt.DEFAULT_SDT_URL, label="SDT3D URL") + ] + config_type = RegisterTlvs.UTILITY.value + + def __init__(self): + super(SessionConfig, self).__init__() + self.set_configs(self.default_values()) + + def get_config(self, _id, node_id=ConfigurableManager._default_node, + config_type=ConfigurableManager._default_type, default=None): + value = super(SessionConfig, self).get_config(_id, node_id, config_type, default) + if value == "": + value = default + return value + + def get_config_bool(self, name, default=None): + value = self.get_config(name) + if value is None: + return default + return value.lower() == "true" + + def get_config_int(self, name, default=None): + value = self.get_config(name, default=default) + if value is not None: + value = int(value) + return value + + +class SessionMetaData(ConfigurableManager): + """ + Metadata is simply stored in a configs[] dict. Key=value pairs are + passed in from configure messages destined to the "metadata" object. + The data is not otherwise interpreted or processed. + """ + name = "metadata" + config_type = RegisterTlvs.UTILITY.value diff --git a/daemon/core/utils.py b/daemon/core/utils.py deleted file mode 100644 index df00984c..00000000 --- a/daemon/core/utils.py +++ /dev/null @@ -1,591 +0,0 @@ -""" -Miscellaneous utility functions, wrappers around some subprocess procedures. -""" - -import concurrent.futures -import fcntl -import hashlib -import importlib -import inspect -import json -import logging -import logging.config -import os -import random -import shlex -import shutil -import sys -import threading -from collections import OrderedDict -from collections.abc import Iterable -from pathlib import Path -from queue import Queue -from subprocess import PIPE, STDOUT, Popen -from typing import TYPE_CHECKING, Any, Callable, Generic, Optional, TypeVar, Union - -import netaddr - -from core.errors import CoreCommandError, CoreError - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emulator.coreemu import CoreEmu - from core.emulator.session import Session - from core.nodes.base import CoreNode -T = TypeVar("T") - -DEVNULL = open(os.devnull, "wb") -IFACE_CONFIG_FACTOR: int = 1000 - - -def execute_script(coreemu: "CoreEmu", file_path: Path, args: str) -> None: - """ - Provides utility function to execute a python script in context of the - provide coreemu instance. - - :param coreemu: coreemu to provide to script - :param file_path: python script to execute - :param args: args to provide script - :return: nothing - """ - sys.argv = shlex.split(args) - thread = threading.Thread( - target=execute_file, args=(file_path, {"coreemu": coreemu}), daemon=True - ) - thread.start() - thread.join() - - -def execute_file( - path: Path, exec_globals: dict[str, str] = None, exec_locals: dict[str, str] = None -) -> None: - """ - Provides a way to execute a file. - - :param path: path of file to execute - :param exec_globals: globals values to pass to execution - :param exec_locals: local values to pass to execution - :return: nothing - """ - if exec_globals is None: - exec_globals = {} - exec_globals.update({"__file__": str(path), "__name__": "__main__"}) - with path.open("rb") as f: - data = compile(f.read(), path, "exec") - exec(data, exec_globals, exec_locals) - - -def hashkey(value: Union[str, int]) -> int: - """ - Provide a consistent hash that can be used in place - of the builtin hash, that no longer behaves consistently - in python3. - - :param value: value to hash - :return: hash value - """ - if isinstance(value, int): - value = str(value) - value = value.encode() - return int(hashlib.sha256(value).hexdigest(), 16) - - -def _detach_init() -> None: - """ - Fork a child process and exit. - - :return: nothing - """ - if os.fork(): - # parent exits - os._exit(0) - os.setsid() - - -def _valid_module(path: Path) -> bool: - """ - Check if file is a valid python module. - - :param path: path to file - :return: True if a valid python module file, False otherwise - """ - if not path.is_file(): - return False - if path.name.startswith("_"): - return False - if not path.suffix == ".py": - return False - return True - - -def _is_class(module: Any, member: type, clazz: type) -> bool: - """ - Validates if a module member is a class and an instance of a CoreService. - - :param module: module to validate for service - :param member: member to validate for service - :param clazz: clazz type to check for validation - :return: True if a valid service, False otherwise - """ - if not inspect.isclass(member): - return False - if not issubclass(member, clazz): - return False - if member.__module__ != module.__name__: - return False - return True - - -def close_onexec(fd: int) -> None: - """ - Close on execution of a shell process. - - :param fd: file descriptor to close - :return: nothing - """ - fdflags = fcntl.fcntl(fd, fcntl.F_GETFD) - fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC) - - -def which(command: str, required: bool) -> str: - """ - Find location of desired executable within current PATH. - - :param command: command to find location for - :param required: command is required to be found, false otherwise - :return: command location or None - :raises ValueError: when not found and required - """ - found_path = shutil.which(command) - if found_path is None and required: - raise CoreError(f"failed to find required executable({command}) in path") - return found_path - - -def make_tuple_fromstr(s: str, value_type: Callable[[str], T]) -> tuple[T]: - """ - Create a tuple from a string. - - :param s: string to convert to a tuple - :param value_type: type of values to be contained within tuple - :return: tuple from string - """ - # remove tuple braces and strip commands and space from all values in the tuple - # string - values = [] - for x in s.strip("(), ").split(","): - x = x.strip("' ") - if x: - values.append(x) - return tuple(value_type(i) for i in values) - - -def mute_detach(args: str, **kwargs: dict[str, Any]) -> int: - """ - Run a muted detached process by forking it. - - :param args: arguments for the command - :param kwargs: keyword arguments for the command - :return: process id of the command - """ - args = shlex.split(args) - kwargs["preexec_fn"] = _detach_init - kwargs["stdout"] = DEVNULL - kwargs["stderr"] = STDOUT - return Popen(args, **kwargs).pid - - -def cmd( - args: str, - env: dict[str, str] = None, - cwd: Path = None, - wait: bool = True, - shell: bool = False, -) -> str: - """ - Execute a command on the host and returns the combined stderr stdout output. - - :param args: command arguments - :param env: environment to run command with - :param cwd: directory to run command in - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when there is a non-zero exit status or the file to - execute is not found - """ - logger.debug("command cwd(%s) wait(%s): %s", cwd, wait, args) - input_args = args - if shell is False: - args = shlex.split(args) - try: - output = PIPE if wait else DEVNULL - p = Popen(args, stdout=output, stderr=output, env=env, cwd=cwd, shell=shell) - if wait: - stdout, stderr = p.communicate() - stdout = stdout.decode().strip() - stderr = stderr.decode().strip() - status = p.returncode - if status != 0: - raise CoreCommandError(status, input_args, stdout, stderr) - return stdout - else: - return "" - except OSError as e: - logger.error("cmd error: %s", e.strerror) - raise CoreCommandError(1, input_args, "", e.strerror) - - -def run_cmds(args: list[str], wait: bool = True, shell: bool = False) -> list[str]: - """ - Execute a series of commands on the host and returns a list of the combined stderr - stdout output. - - :param args: command arguments - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when there is a non-zero exit status or the file to - execute is not found - """ - outputs = [] - for arg in args: - output = cmd(arg, wait=wait, shell=shell) - outputs.append(output) - return outputs - - -def file_munge(pathname: str, header: str, text: str) -> None: - """ - Insert text at the end of a file, surrounded by header comments. - - :param pathname: file path to add text to - :param header: header text comments - :param text: text to append to file - :return: nothing - """ - # prevent duplicates - file_demunge(pathname, header) - - with open(pathname, "a") as append_file: - append_file.write(f"# BEGIN {header}\n") - append_file.write(text) - append_file.write(f"# END {header}\n") - - -def file_demunge(pathname: str, header: str) -> None: - """ - Remove text that was inserted in a file surrounded by header comments. - - :param pathname: file path to open for removing a header - :param header: header text to target for removal - :return: nothing - """ - with open(pathname) as read_file: - lines = read_file.readlines() - - start = None - end = None - - for i, line in enumerate(lines): - if line == f"# BEGIN {header}\n": - start = i - elif line == f"# END {header}\n": - end = i + 1 - - if start is None or end is None: - return - - with open(pathname, "w") as write_file: - lines = lines[:start] + lines[end:] - write_file.write("".join(lines)) - - -def expand_corepath( - pathname: str, session: "Session" = None, node: "CoreNode" = None -) -> Path: - """ - Expand a file path given session information. - - :param pathname: file path to expand - :param session: core session object to expand path - :param node: node to expand path with - :return: expanded path - """ - if session is not None: - pathname = pathname.replace("~", f"/home/{session.user}") - pathname = pathname.replace("%SESSION%", str(session.id)) - pathname = pathname.replace("%SESSION_DIR%", str(session.directory)) - pathname = pathname.replace("%SESSION_USER%", session.user) - if node is not None: - pathname = pathname.replace("%NODE%", str(node.id)) - pathname = pathname.replace("%NODENAME%", node.name) - return Path(pathname) - - -def sysctl_devname(devname: str) -> Optional[str]: - """ - Translate a device name to the name used with sysctl. - - :param devname: device name to translate - :return: translated device name - """ - if devname is None: - return None - return devname.replace(".", "/") - - -def load_config(file_path: Path, d: dict[str, str]) -> None: - """ - Read key=value pairs from a file, into a dict. Skip comments; strip newline - characters and spacing. - - :param file_path: file path to read data from - :param d: dictionary to config into - :return: nothing - """ - with file_path.open("r") as f: - lines = f.readlines() - for line in lines: - if line[:1] == "#": - continue - try: - key, value = line.split("=", 1) - d[key] = value.strip() - except ValueError: - logger.exception("error reading file to dict: %s", file_path) - - -def load_module(import_statement: str, clazz: Generic[T]) -> list[T]: - classes = [] - try: - module = importlib.import_module(import_statement) - members = inspect.getmembers(module, lambda x: _is_class(module, x, clazz)) - for member in members: - valid_class = member[1] - classes.append(valid_class) - except Exception: - logger.exception( - "unexpected error during import, skipping: %s", import_statement - ) - return classes - - -def load_classes(path: Path, clazz: Generic[T]) -> list[T]: - """ - Dynamically load classes for use within CORE. - - :param path: path to load classes from - :param clazz: class type expected to be inherited from for loading - :return: list of classes loaded - """ - # validate path exists - logger.debug("attempting to load modules from path: %s", path) - if not path.is_dir(): - logger.warning("invalid custom module directory specified" ": %s", path) - # check if path is in sys.path - parent = str(path.parent) - if parent not in sys.path: - logger.debug("adding parent path to allow imports: %s", parent) - sys.path.append(parent) - # import and add all service modules in the path - classes = [] - for p in path.iterdir(): - if not _valid_module(p): - continue - import_statement = f"{path.name}.{p.stem}" - logger.debug("importing custom module: %s", import_statement) - loaded = load_module(import_statement, clazz) - classes.extend(loaded) - return classes - - -def load_logging_config(config_path: Path) -> None: - """ - Load CORE logging configuration file. - - :param config_path: path to logging config file - :return: nothing - """ - with config_path.open("r") as f: - log_config = json.load(f) - logging.config.dictConfig(log_config) - - -def run_cmds_threaded( - node_cmds: list[tuple["CoreNode", list[str]]], - wait: bool = True, - shell: bool = False, - workers: int = None, -) -> tuple[dict[int, list[str]], list[Exception]]: - """ - Run the set of commands for the node provided. Each node will - run the commands within the context of a threadpool. - - :param node_cmds: list of tuples of nodes and commands to run within them - :param wait: True to wait for status, False otherwise - :param shell: True to run shell like, False otherwise - :param workers: number of workers for threadpool, uses library default otherwise - :return: tuple including dict of node id to list of command output and a list of - exceptions if any - """ - - def _node_cmds( - _target: "CoreNode", _cmds: list[str], _wait: bool, _shell: bool - ) -> list[str]: - cmd_outputs = [] - for _cmd in _cmds: - output = _target.cmd(_cmd, wait=_wait, shell=_shell) - cmd_outputs.append(output) - return cmd_outputs - - with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor: - futures = [] - node_mappings = {} - for node, cmds in node_cmds: - future = executor.submit(_node_cmds, node, cmds, wait, shell) - node_mappings[future] = node - futures.append(future) - outputs = {} - exceptions = [] - for future in concurrent.futures.as_completed(futures): - try: - result = future.result() - node = node_mappings[future] - outputs[node.id] = result - except Exception as e: - logger.exception("thread pool exception") - exceptions.append(e) - return outputs, exceptions - - -def run_cmds_mp( - node_cmds: list[tuple["CoreNode", list[str]]], - wait: bool = True, - shell: bool = False, - workers: int = None, -) -> tuple[dict[int, list[str]], list[Exception]]: - """ - Run the set of commands for the node provided. Each node will - run the commands within the context of a process pool. This will not work - for distributed nodes and throws an exception when encountered. - - :param node_cmds: list of tuples of nodes and commands to run within them - :param wait: True to wait for status, False otherwise - :param shell: True to run shell like, False otherwise - :param workers: number of workers for threadpool, uses library default otherwise - :return: tuple including dict of node id to list of command output and a list of - exceptions if any - :raises CoreError: when a distributed node is provided as input - """ - with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor: - futures = [] - node_mapping = {} - for node, cmds in node_cmds: - node_cmds = [node.create_cmd(x) for x in cmds] - if node.server: - raise CoreError( - f"{node.name} uses a distributed server and not supported" - ) - future = executor.submit(run_cmds, node_cmds, wait=wait, shell=shell) - node_mapping[future] = node - futures.append(future) - exceptions = [] - outputs = {} - for future in concurrent.futures.as_completed(futures): - try: - result = future.result() - node = node_mapping[future] - outputs[node.id] = result - except Exception as e: - logger.exception("thread pool exception") - exceptions.append(e) - return outputs, exceptions - - -def threadpool( - funcs: list[tuple[Callable, Iterable[Any], dict[Any, Any]]], workers: int = 10 -) -> tuple[list[Any], list[Exception]]: - """ - Run provided functions, arguments, and keywords within a threadpool - collecting results and exceptions. - - :param funcs: iterable that provides a func, args, kwargs - :param workers: number of workers for the threadpool - :return: results and exceptions from running functions with args and kwargs - """ - with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor: - futures = [] - for func, args, kwargs in funcs: - future = executor.submit(func, *args, **kwargs) - futures.append(future) - results = [] - exceptions = [] - for future in concurrent.futures.as_completed(futures): - try: - result = future.result() - results.append(result) - except Exception as e: - logger.exception("thread pool exception") - exceptions.append(e) - return results, exceptions - - -def random_mac() -> str: - """ - Create a random mac address using Xen OID 00:16:3E. - - :return: random mac address - """ - value = random.randint(0, 0xFFFFFF) - value |= 0x00163E << 24 - mac = netaddr.EUI(value, dialect=netaddr.mac_unix_expanded) - return str(mac) - - -def iface_config_id(node_id: int, iface_id: int = None) -> int: - """ - Common utility to generate a configuration id, in case an interface is being - targeted. - - :param node_id: node for config id - :param iface_id: interface for config id - :return: generated config id when interface is present, node id otherwise - """ - if iface_id is not None and iface_id >= 0: - return node_id * IFACE_CONFIG_FACTOR + iface_id - else: - return node_id - - -def parse_iface_config_id(config_id: int) -> tuple[int, Optional[int]]: - """ - Parses configuration id, that may be potentially derived from an interface for a - node. - - :param config_id: configuration id to parse - :return: - """ - iface_id = None - node_id = config_id - if config_id >= IFACE_CONFIG_FACTOR: - iface_id = config_id % IFACE_CONFIG_FACTOR - node_id = config_id // IFACE_CONFIG_FACTOR - return node_id, iface_id - - -class SetQueue(Queue): - """ - Set backed queue to avoid duplicate submissions. - """ - - def _init(self, maxsize): - self.queue: OrderedDict = OrderedDict() - - def _put(self, item): - self.queue[item] = None - - def _get(self): - key, _ = self.queue.popitem(last=False) - return key diff --git a/daemon/core/xml/corexml.py b/daemon/core/xml/corexml.py index d566b501..bc206165 100644 --- a/daemon/core/xml/corexml.py +++ b/daemon/core/xml/corexml.py @@ -1,138 +1,115 @@ import logging -from pathlib import Path -from typing import TYPE_CHECKING, Any, Generic, Optional, TypeVar from lxml import etree -import core.nodes.base -import core.nodes.physical -from core import utils -from core.config import Configuration -from core.emane.nodes import EmaneNet, EmaneOptions -from core.emulator.data import InterfaceData, LinkOptions -from core.emulator.enumerations import EventTypes, NodeTypes -from core.errors import CoreXmlError -from core.nodes.base import CoreNodeBase, CoreNodeOptions, NodeBase, Position -from core.nodes.docker import DockerNode, DockerOptions -from core.nodes.interface import CoreInterface -from core.nodes.lxd import LxcNode, LxcOptions -from core.nodes.network import CtrlNet, GreTapBridge, PtpNet, WlanNode -from core.nodes.podman import PodmanNode, PodmanOptions -from core.nodes.wireless import WirelessNode -from core.services.coreservices import CoreService - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emane.emanemodel import EmaneModel - from core.emulator.session import Session - - EmaneModelType = type[EmaneModel] -T = TypeVar("T") +from core import coreobj +from core.emulator.emudata import InterfaceData +from core.emulator.emudata import LinkOptions +from core.emulator.emudata import NodeOptions +from core.enumerations import NodeTypes +from core.misc import nodeutils +from core.misc.ipaddress import MacAddress +from core.netns import nodes -def write_xml_file( - xml_element: etree.Element, file_path: Path, doctype: str = None -) -> None: - xml_data = etree.tostring( - xml_element, - xml_declaration=True, - pretty_print=True, - encoding="UTF-8", - doctype=doctype, - ) - with file_path.open("wb") as f: - f.write(xml_data) +def write_xml_file(xml_element, file_path, doctype=None): + xml_data = etree.tostring(xml_element, xml_declaration=True, pretty_print=True, encoding="UTF-8", doctype=doctype) + with open(file_path, "w") as xml_file: + xml_file.write(xml_data) -def get_type(element: etree.Element, name: str, _type: Generic[T]) -> Optional[T]: +def get_type(element, name, _type): value = element.get(name) if value is not None: value = _type(value) return value -def get_float(element: etree.Element, name: str) -> Optional[float]: +def get_float(element, name): return get_type(element, name, float) -def get_int(element: etree.Element, name: str) -> Optional[int]: +def get_int(element, name): return get_type(element, name, int) -def add_attribute(element: etree.Element, name: str, value: Any) -> None: +def add_attribute(element, name, value): if value is not None: element.set(name, str(value)) -def create_iface_data(iface_element: etree.Element) -> InterfaceData: - iface_id = int(iface_element.get("id")) - name = iface_element.get("name") - mac = iface_element.get("mac") - ip4 = iface_element.get("ip4") - ip4_mask = get_int(iface_element, "ip4_mask") - ip6 = iface_element.get("ip6") - ip6_mask = get_int(iface_element, "ip6_mask") - return InterfaceData( - id=iface_id, - name=name, - mac=mac, - ip4=ip4, - ip4_mask=ip4_mask, - ip6=ip6, - ip6_mask=ip6_mask, - ) +def create_interface_data(interface_element): + interface_id = int(interface_element.get("id")) + name = interface_element.get("name") + mac = interface_element.get("mac") + if mac: + mac = MacAddress.from_string(mac) + ip4 = interface_element.get("ip4") + ip4_mask = get_int(interface_element, "ip4_mask") + ip6 = interface_element.get("ip6") + ip6_mask = get_int(interface_element, "ip6_mask") + return InterfaceData(interface_id, name, mac, ip4, ip4_mask, ip6, ip6_mask) -def create_emane_model_config( - node_id: int, - model: "EmaneModelType", - config: dict[str, str], - iface_id: Optional[int], -) -> etree.Element: +def create_emane_config(node_id, emane_config, config): + emane_configuration = etree.Element("emane_configuration") + add_attribute(emane_configuration, "node", node_id) + add_attribute(emane_configuration, "model", "emane") + + emulator_element = etree.SubElement(emane_configuration, "emulator") + for emulator_config in emane_config.emulator_config: + value = config[emulator_config.id] + add_configuration(emulator_element, emulator_config.id, value) + + nem_element = etree.SubElement(emane_configuration, "nem") + for nem_config in emane_config.nem_config: + value = config[nem_config.id] + add_configuration(nem_element, nem_config.id, value) + + return emane_configuration + + +def create_emane_model_config(node_id, model, config): emane_element = etree.Element("emane_configuration") add_attribute(emane_element, "node", node_id) - add_attribute(emane_element, "iface", iface_id) add_attribute(emane_element, "model", model.name) - platform_element = etree.SubElement(emane_element, "platform") - for platform_config in model.platform_config: - value = config[platform_config.id] - add_configuration(platform_element, platform_config.id, value) + mac_element = etree.SubElement(emane_element, "mac") for mac_config in model.mac_config: value = config[mac_config.id] add_configuration(mac_element, mac_config.id, value) + phy_element = etree.SubElement(emane_element, "phy") for phy_config in model.phy_config: value = config[phy_config.id] add_configuration(phy_element, phy_config.id, value) + external_element = etree.SubElement(emane_element, "external") for external_config in model.external_config: value = config[external_config.id] add_configuration(external_element, external_config.id, value) + return emane_element -def add_configuration(parent: etree.Element, name: str, value: str) -> None: +def add_configuration(parent, name, value): config_element = etree.SubElement(parent, "configuration") add_attribute(config_element, "name", name) add_attribute(config_element, "value", value) -class NodeElement: - def __init__(self, session: "Session", node: NodeBase, element_name: str) -> None: - self.session: "Session" = session - self.node: NodeBase = node - self.element: etree.Element = etree.Element(element_name) - add_attribute(self.element, "id", node.id) +class NodeElement(object): + def __init__(self, session, node, element_name): + self.session = session + self.node = node + self.element = etree.Element(element_name) + add_attribute(self.element, "id", node.objid) add_attribute(self.element, "name", node.name) - server = self.node.server.name if self.node.server else None - add_attribute(self.element, "server", server) add_attribute(self.element, "icon", node.icon) add_attribute(self.element, "canvas", node.canvas) self.add_position() - def add_position(self) -> None: + def add_position(self): x = self.node.position.x y = self.node.position.y z = self.node.position.z @@ -148,10 +125,10 @@ class NodeElement: add_attribute(position, "alt", alt) -class ServiceElement: - def __init__(self, service: type[CoreService]) -> None: - self.service: type[CoreService] = service - self.element: etree.Element = etree.Element("service") +class ServiceElement(object): + def __init__(self, service): + self.service = service + self.element = etree.Element("service") add_attribute(self.element, "name", service.name) self.add_directories() self.add_startup() @@ -159,7 +136,7 @@ class ServiceElement: self.add_shutdown() self.add_files() - def add_directories(self) -> None: + def add_directories(self): # get custom directories directories = etree.Element("directories") for directory in self.service.dirs: @@ -169,17 +146,18 @@ class ServiceElement: if directories.getchildren(): self.element.append(directories) - def add_files(self) -> None: + def add_files(self): + # get custom files file_elements = etree.Element("files") - for file_name in self.service.config_data: - data = self.service.config_data[file_name] + for file_name, data in self.service.config_data.iteritems(): file_element = etree.SubElement(file_elements, "file") add_attribute(file_element, "name", file_name) - file_element.text = etree.CDATA(data) + file_element.text = data + if file_elements.getchildren(): self.element.append(file_elements) - def add_startup(self) -> None: + def add_startup(self): # get custom startup startup_elements = etree.Element("startups") for startup in self.service.startup: @@ -189,7 +167,7 @@ class ServiceElement: if startup_elements.getchildren(): self.element.append(startup_elements) - def add_validate(self) -> None: + def add_validate(self): # get custom validate validate_elements = etree.Element("validates") for validate in self.service.validate: @@ -199,7 +177,7 @@ class ServiceElement: if validate_elements.getchildren(): self.element.append(validate_elements) - def add_shutdown(self) -> None: + def add_shutdown(self): # get custom shutdown shutdown_elements = etree.Element("shutdowns") for shutdown in self.service.shutdown: @@ -211,103 +189,71 @@ class ServiceElement: class DeviceElement(NodeElement): - def __init__(self, session: "Session", node: NodeBase) -> None: - super().__init__(session, node, "device") - add_attribute(self.element, "type", node.model) - self.add_class() + def __init__(self, session, node): + super(DeviceElement, self).__init__(session, node, "device") + add_attribute(self.element, "type", node.type) self.add_services() - def add_class(self) -> None: - clazz = "" - image = "" - if isinstance(self.node, DockerNode): - clazz = "docker" - image = self.node.image - elif isinstance(self.node, LxcNode): - clazz = "lxc" - image = self.node.image - elif isinstance(self.node, PodmanNode): - clazz = "podman" - image = self.node.image - add_attribute(self.element, "class", clazz) - add_attribute(self.element, "image", image) - - def add_services(self) -> None: + def add_services(self): service_elements = etree.Element("services") for service in self.node.services: etree.SubElement(service_elements, "service", name=service.name) + if service_elements.getchildren(): self.element.append(service_elements) - config_service_elements = etree.Element("configservices") - for name, service in self.node.config_services.items(): - etree.SubElement(config_service_elements, "service", name=name) - if config_service_elements.getchildren(): - self.element.append(config_service_elements) - class NetworkElement(NodeElement): - def __init__(self, session: "Session", node: NodeBase) -> None: - super().__init__(session, node, "network") - if isinstance(self.node, WlanNode): - if self.node.wireless_model: - add_attribute(self.element, "model", self.node.wireless_model.name) - if self.node.mobility: - add_attribute(self.element, "mobility", self.node.mobility.name) - if isinstance(self.node, EmaneNet): - if self.node.wireless_model: - add_attribute(self.element, "model", self.node.wireless_model.name) - if self.node.mobility: - add_attribute(self.element, "mobility", self.node.mobility.name) - if isinstance(self.node, GreTapBridge): - add_attribute(self.element, "grekey", self.node.grekey) - if isinstance(self.node, WirelessNode): - config = self.node.get_config() - self.add_wireless_config(config) + def __init__(self, session, node): + super(NetworkElement, self).__init__(session, node, "network") + model = getattr(self.node, "model", None) + if model: + add_attribute(self.element, "model", model.name) + mobility = getattr(self.node, "mobility", None) + if mobility: + add_attribute(self.element, "mobility", mobility.name) + grekey = getattr(self.node, "grekey", None) + if grekey and grekey is not None: + add_attribute(self.element, "grekey", grekey) self.add_type() - def add_type(self) -> None: - node_type = self.session.get_node_type(type(self.node)) - add_attribute(self.element, "type", node_type.name) - - def add_wireless_config(self, config: dict[str, Configuration]) -> None: - wireless_element = etree.SubElement(self.element, "wireless") - for config_item in config.values(): - add_configuration(wireless_element, config_item.id, config_item.default) + def add_type(self): + if self.node.apitype: + node_type = NodeTypes(self.node.apitype).name + else: + node_type = self.node.__class__.__name__ + add_attribute(self.element, "type", node_type) -class CoreXmlWriter: - def __init__(self, session: "Session") -> None: - self.session: "Session" = session - self.scenario: etree.Element = etree.Element("scenario") - self.networks: etree.SubElement = etree.SubElement(self.scenario, "networks") - self.devices: etree.SubElement = etree.SubElement(self.scenario, "devices") +class CoreXmlWriter(object): + def __init__(self, session): + self.session = session + self.scenario = etree.Element("scenario") + self.networks = None + self.devices = None self.write_session() - def write_session(self) -> None: + def write_session(self): # generate xml content - self.write_nodes() - self.write_links() + links = self.write_nodes() + self.write_links(links) self.write_mobility_configs() self.write_emane_configs() self.write_service_configs() - self.write_configservice_configs() self.write_session_origin() - self.write_servers() self.write_session_hooks() self.write_session_options() self.write_session_metadata() self.write_default_services() - def write(self, path: Path) -> None: - self.scenario.set("name", str(path)) + def write(self, file_name): + self.scenario.set("name", file_name) + # write out generated xml xml_tree = etree.ElementTree(self.scenario) - xml_tree.write( - str(path), xml_declaration=True, pretty_print=True, encoding="UTF-8" - ) + xml_tree.write(file_name, xml_declaration=True, pretty_print=True, encoding="UTF-8") - def write_session_origin(self) -> None: + def write_session_origin(self): # origin: geolocation of cartesian coordinate 0,0,0 lat, lon, alt = self.session.location.refgeo origin = etree.Element("session_origin") @@ -327,91 +273,85 @@ class CoreXmlWriter: add_attribute(origin, "y", y) add_attribute(origin, "z", z) - def write_servers(self) -> None: - servers = etree.Element("servers") - for server in self.session.distributed.servers.values(): - server_element = etree.SubElement(servers, "server") - add_attribute(server_element, "name", server.name) - add_attribute(server_element, "address", server.host) - if servers.getchildren(): - self.scenario.append(servers) - - def write_session_hooks(self) -> None: + def write_session_hooks(self): # hook scripts hooks = etree.Element("session_hooks") - for state in sorted(self.session.hooks, key=lambda x: x.value): - for file_name, data in self.session.hooks[state]: + for state in sorted(self.session._hooks.keys()): + for file_name, data in self.session._hooks[state]: hook = etree.SubElement(hooks, "hook") add_attribute(hook, "name", file_name) - add_attribute(hook, "state", state.value) + add_attribute(hook, "state", state) hook.text = data if hooks.getchildren(): self.scenario.append(hooks) - def write_session_options(self) -> None: + def write_session_options(self): option_elements = etree.Element("session_options") - for option in self.session.options.options: - value = self.session.options.get(option.id) - add_configuration(option_elements, option.id, value) + options_config = self.session.options.get_configs() + if not options_config: + return + + for _id, default_value in self.session.options.default_values().iteritems(): + # TODO: should we just save the current config regardless, since it may change? + value = options_config[_id] + if value != default_value: + add_configuration(option_elements, _id, value) + if option_elements.getchildren(): self.scenario.append(option_elements) - def write_session_metadata(self) -> None: + def write_session_metadata(self): # metadata metadata_elements = etree.Element("session_metadata") - config = self.session.metadata + config = self.session.metadata.get_configs() if not config: return - for key in config: - value = config[key] - add_configuration(metadata_elements, key, value) + for _id, value in config.iteritems(): + add_configuration(metadata_elements, _id, value) if metadata_elements.getchildren(): self.scenario.append(metadata_elements) - def write_emane_configs(self) -> None: + def write_emane_configs(self): emane_configurations = etree.Element("emane_configurations") - for node_id, model_configs in self.session.emane.node_configs.items(): - node_id, iface_id = utils.parse_iface_config_id(node_id) - for model_name, config in model_configs.items(): - logger.debug( - "writing emane config node(%s) model(%s)", node_id, model_name - ) - model_class = self.session.emane.get_model(model_name) - emane_configuration = create_emane_model_config( - node_id, model_class, config, iface_id - ) + for node_id in self.session.emane.nodes(): + all_configs = self.session.emane.get_all_configs(node_id) + if not all_configs: + continue + + for model_name, config in all_configs.iteritems(): + logging.info("writing emane config node(%s) model(%s)", node_id, model_name) + if model_name == -1: + emane_configuration = create_emane_config(node_id, self.session.emane.emane_config, config) + else: + model = self.session.emane.models[model_name] + emane_configuration = create_emane_model_config(node_id, model, config) emane_configurations.append(emane_configuration) + if emane_configurations.getchildren(): self.scenario.append(emane_configurations) - def write_mobility_configs(self) -> None: + def write_mobility_configs(self): mobility_configurations = etree.Element("mobility_configurations") for node_id in self.session.mobility.nodes(): all_configs = self.session.mobility.get_all_configs(node_id) if not all_configs: continue - for model_name in all_configs: - config = all_configs[model_name] - logger.debug( - "writing mobility config node(%s) model(%s)", node_id, model_name - ) - mobility_configuration = etree.SubElement( - mobility_configurations, "mobility_configuration" - ) + for model_name, config in all_configs.iteritems(): + logging.info("writing mobility config node(%s) model(%s)", node_id, model_name) + mobility_configuration = etree.SubElement(mobility_configurations, "mobility_configuration") add_attribute(mobility_configuration, "node", node_id) add_attribute(mobility_configuration, "model", model_name) - for name in config: - value = config[name] + for name, value in config.iteritems(): add_configuration(mobility_configuration, name, value) if mobility_configurations.getchildren(): self.scenario.append(mobility_configurations) - def write_service_configs(self) -> None: + def write_service_configs(self): service_configurations = etree.Element("service_configurations") service_configs = self.session.services.all_configs() for node_id, service in service_configs: @@ -422,158 +362,146 @@ class CoreXmlWriter: if service_configurations.getchildren(): self.scenario.append(service_configurations) - def write_configservice_configs(self) -> None: - service_configurations = etree.Element("configservice_configurations") - for node in self.session.nodes.values(): - if not isinstance(node, CoreNodeBase): - continue - for name, service in node.config_services.items(): - service_element = etree.SubElement( - service_configurations, "service", name=name - ) - add_attribute(service_element, "node", node.id) - if service.custom_config: - configs_element = etree.SubElement(service_element, "configs") - for key, value in service.custom_config.items(): - etree.SubElement( - configs_element, "config", key=key, value=value - ) - if service.custom_templates: - templates_element = etree.SubElement(service_element, "templates") - for template_name, template in service.custom_templates.items(): - template_element = etree.SubElement( - templates_element, "template", name=template_name - ) - template_element.text = etree.CDATA(template) - if service_configurations.getchildren(): - self.scenario.append(service_configurations) - - def write_default_services(self) -> None: - models = etree.Element("default_services") - for model in self.session.services.default_services: - services = self.session.services.default_services[model] - model = etree.SubElement(models, "node", type=model) + def write_default_services(self): + node_types = etree.Element("default_services") + for node_type, services in self.session.services.default_services.iteritems(): + node_type = etree.SubElement(node_types, "node", type=node_type) for service in services: - etree.SubElement(model, "service", name=service) - if models.getchildren(): - self.scenario.append(models) + etree.SubElement(node_type, "service", name=service) - def write_nodes(self) -> None: - for node in self.session.nodes.values(): + if node_types.getchildren(): + self.scenario.append(node_types) + + def write_nodes(self): + self.networks = etree.SubElement(self.scenario, "networks") + self.devices = etree.SubElement(self.scenario, "devices") + + links = [] + for node in self.session.objects.itervalues(): # network node - is_network_or_rj45 = isinstance( - node, (core.nodes.base.CoreNetworkBase, core.nodes.physical.Rj45Node) - ) - is_controlnet = isinstance(node, CtrlNet) - is_ptp = isinstance(node, PtpNet) - if is_network_or_rj45 and not (is_controlnet or is_ptp): + if isinstance(node, (coreobj.PyCoreNet, nodes.RJ45Node)) and not nodeutils.is_node(node, NodeTypes.CONTROL_NET): self.write_network(node) # device node - elif isinstance(node, core.nodes.base.CoreNodeBase): + elif isinstance(node, nodes.PyCoreNode): self.write_device(node) - def write_network(self, node: NodeBase) -> None: + # add known links + links.extend(node.all_link_data(0)) + + return links + + def write_network(self, node): + # ignore p2p and other nodes that are not part of the api + if not node.apitype: + return + + # ignore nodes tied to a different network + if nodeutils.is_node(node, (NodeTypes.SWITCH, NodeTypes.HUB)): + for netif in node.netifs(sort=True): + othernet = getattr(netif, "othernet", None) + if othernet and othernet.objid != node.objid: + logging.info("writer ignoring node(%s) othernet(%s)", node.name, othernet.name) + return + network = NetworkElement(self.session, node) self.networks.append(network.element) - def write_links(self) -> None: + def write_links(self, links): link_elements = etree.Element("links") - for core_link in self.session.link_manager.links(): - node1, iface1 = core_link.node1, core_link.iface1 - node2, iface2 = core_link.node2, core_link.iface2 - unidirectional = core_link.is_unidirectional() - link_element = self.create_link_element( - node1, iface1, node2, iface2, core_link.options(), unidirectional - ) + # add link data + for link_data in links: + # skip basic range links + if link_data.interface1_id is None and link_data.interface2_id is None: + continue + + link_element = self.create_link_element(link_data) link_elements.append(link_element) - if unidirectional: - link_element = self.create_link_element( - node2, iface2, node1, iface1, iface2.options, unidirectional - ) - link_elements.append(link_element) + if link_elements.getchildren(): self.scenario.append(link_elements) - def write_device(self, node: NodeBase) -> None: + def write_device(self, node): device = DeviceElement(self.session, node) self.devices.append(device.element) - def create_iface_element( - self, element_name: str, iface: CoreInterface - ) -> etree.Element: - iface_element = etree.Element(element_name) - # check if interface if connected to emane - if isinstance(iface.node, CoreNodeBase) and isinstance(iface.net, EmaneNet): - nem_id = self.session.emane.get_nem_id(iface) - add_attribute(iface_element, "nem", nem_id) - ip4 = iface.get_ip4() - ip4_mask = None - if ip4: - ip4_mask = ip4.prefixlen - ip4 = str(ip4.ip) - ip6 = iface.get_ip6() - ip6_mask = None - if ip6: - ip6_mask = ip6.prefixlen - ip6 = str(ip6.ip) - add_attribute(iface_element, "id", iface.id) - add_attribute(iface_element, "name", iface.name) - add_attribute(iface_element, "mac", iface.mac) - add_attribute(iface_element, "ip4", ip4) - add_attribute(iface_element, "ip4_mask", ip4_mask) - add_attribute(iface_element, "ip6", ip6) - add_attribute(iface_element, "ip6_mask", ip6_mask) - return iface_element - - def create_link_element( - self, - node1: NodeBase, - iface1: Optional[CoreInterface], - node2: NodeBase, - iface2: Optional[CoreInterface], - options: LinkOptions, - unidirectional: bool, - ) -> etree.Element: + def create_link_element(self, link_data): link_element = etree.Element("link") - add_attribute(link_element, "node1", node1.id) - add_attribute(link_element, "node2", node2.id) + add_attribute(link_element, "node_one", link_data.node1_id) + add_attribute(link_element, "node_two", link_data.node2_id) + # check for interface one - if iface1 is not None: - iface1 = self.create_iface_element("iface1", iface1) - link_element.append(iface1) + if link_data.interface1_id is not None: + interface_one = etree.Element("interface_one") + node = self.session.get_object(link_data.node1_id) + node_interface = node.netif(link_data.interface1_id) + + add_attribute(interface_one, "id", link_data.interface1_id) + add_attribute(interface_one, "name", node_interface.name) + add_attribute(interface_one, "mac", link_data.interface1_mac) + add_attribute(interface_one, "ip4", link_data.interface1_ip4) + add_attribute(interface_one, "ip4_mask", link_data.interface1_ip4_mask) + add_attribute(interface_one, "ip6", link_data.interface1_ip6) + add_attribute(interface_one, "ip6_mask", link_data.interface1_ip6_mask) + + # check if emane interface + if nodeutils.is_node(node_interface.net, NodeTypes.EMANE): + nem = node_interface.net.getnemid(node_interface) + add_attribute(interface_one, "nem", nem) + + link_element.append(interface_one) + # check for interface two - if iface2 is not None: - iface2 = self.create_iface_element("iface2", iface2) - link_element.append(iface2) - # check for options, don't write for emane/wlan links - is_node1_wireless = isinstance(node1, (WlanNode, EmaneNet, WirelessNode)) - is_node2_wireless = isinstance(node2, (WlanNode, EmaneNet, WirelessNode)) - if not (is_node1_wireless or is_node2_wireless): - unidirectional = 1 if unidirectional else 0 - options_element = etree.Element("options") - add_attribute(options_element, "delay", options.delay) - add_attribute(options_element, "bandwidth", options.bandwidth) - add_attribute(options_element, "loss", options.loss) - add_attribute(options_element, "dup", options.dup) - add_attribute(options_element, "jitter", options.jitter) - add_attribute(options_element, "mer", options.mer) - add_attribute(options_element, "burst", options.burst) - add_attribute(options_element, "mburst", options.mburst) - add_attribute(options_element, "unidirectional", unidirectional) - add_attribute(options_element, "key", options.key) - add_attribute(options_element, "buffer", options.buffer) - if options_element.items(): - link_element.append(options_element) + if link_data.interface2_id is not None: + interface_two = etree.Element("interface_two") + node = self.session.get_object(link_data.node2_id) + node_interface = node.netif(link_data.interface2_id) + + add_attribute(interface_two, "id", link_data.interface2_id) + add_attribute(interface_two, "name", node_interface.name) + add_attribute(interface_two, "mac", link_data.interface2_mac) + add_attribute(interface_two, "ip4", link_data.interface2_ip4) + add_attribute(interface_two, "ip4_mask", link_data.interface2_ip4_mask) + add_attribute(interface_two, "ip6", link_data.interface2_ip6) + add_attribute(interface_two, "ip6_mask", link_data.interface2_ip6_mask) + + # check if emane interface + if nodeutils.is_node(node_interface.net, NodeTypes.EMANE): + nem = node_interface.net.getnemid(node_interface) + add_attribute(interface_two, "nem", nem) + + link_element.append(interface_two) + + # check for options + options = etree.Element("options") + add_attribute(options, "delay", link_data.delay) + add_attribute(options, "bandwidth", link_data.bandwidth) + add_attribute(options, "per", link_data.per) + add_attribute(options, "dup", link_data.dup) + add_attribute(options, "jitter", link_data.jitter) + add_attribute(options, "mer", link_data.mer) + add_attribute(options, "burst", link_data.burst) + add_attribute(options, "mburst", link_data.mburst) + add_attribute(options, "type", link_data.link_type) + add_attribute(options, "gui_attributes", link_data.gui_attributes) + add_attribute(options, "unidirectional", link_data.unidirectional) + add_attribute(options, "emulation_id", link_data.emulation_id) + add_attribute(options, "network_id", link_data.network_id) + add_attribute(options, "key", link_data.key) + add_attribute(options, "opaque", link_data.opaque) + add_attribute(options, "session", link_data.session) + if options.items(): + link_element.append(options) + return link_element -class CoreXmlReader: - def __init__(self, session: "Session") -> None: - self.session: "Session" = session - self.scenario: Optional[etree.ElementTree] = None +class CoreXmlReader(object): + def __init__(self, session): + self.session = session + self.scenario = None - def read(self, file_path: Path) -> None: - xml_tree = etree.parse(str(file_path)) + def read(self, file_name): + xml_tree = etree.parse(file_name) self.scenario = xml_tree.getroot() # read xml session content @@ -581,29 +509,27 @@ class CoreXmlReader: self.read_session_metadata() self.read_session_options() self.read_session_hooks() - self.read_servers() self.read_session_origin() self.read_service_configs() self.read_mobility_configs() + self.read_emane_configs() self.read_nodes() self.read_links() - self.read_emane_configs() - self.read_configservice_configs() - def read_default_services(self) -> None: + def read_default_services(self): default_services = self.scenario.find("default_services") if default_services is None: return for node in default_services.iterchildren(): - model = node.get("type") + node_type = node.get("type") services = [] for service in node.iterchildren(): services.append(service.get("name")) - logger.info("reading default services for nodes(%s): %s", model, services) - self.session.services.default_services[model] = services + logging.info("reading default services for nodes(%s): %s", node_type, services) + self.session.services.default_services[node_type] = services - def read_session_metadata(self) -> None: + def read_session_metadata(self): session_metadata = self.scenario.find("session_metadata") if session_metadata is None: return @@ -613,45 +539,36 @@ class CoreXmlReader: name = data.get("name") value = data.get("value") configs[name] = value - logger.info("reading session metadata: %s", configs) - self.session.metadata = configs + logging.info("reading session metadata: %s", configs) + self.session.metadata.set_configs(configs) - def read_session_options(self) -> None: + def read_session_options(self): session_options = self.scenario.find("session_options") if session_options is None: return - xml_config = {} - for configuration in session_options.iterchildren(): - name = configuration.get("name") - value = configuration.get("value") - xml_config[name] = value - logger.info("reading session options: %s", xml_config) - self.session.options.update(xml_config) - def read_session_hooks(self) -> None: + configs = {} + for config in session_options.iterchildren(): + name = config.get("name") + value = config.get("value") + configs[name] = value + logging.info("reading session options: %s", configs) + self.session.options.set_configs(configs) + + def read_session_hooks(self): session_hooks = self.scenario.find("session_hooks") if session_hooks is None: return for hook in session_hooks.iterchildren(): name = hook.get("name") - state = get_int(hook, "state") - state = EventTypes(state) + state = hook.get("state") data = hook.text - logger.info("reading hook: state(%s) name(%s)", state, name) - self.session.add_hook(state, name, data) + hook_type = "hook:%s" % state + logging.info("reading hook: state(%s) name(%s)", state, name) + self.session.set_hook(hook_type, file_name=name, source_name=None, data=data) - def read_servers(self) -> None: - servers = self.scenario.find("servers") - if servers is None: - return - for server in servers.iterchildren(): - name = server.get("name") - address = server.get("address") - logger.info("reading server: name(%s) address(%s)", name, address) - self.session.distributed.add_server(name, address) - - def read_session_origin(self) -> None: + def read_session_origin(self): session_origin = self.scenario.find("session_origin") if session_origin is None: return @@ -660,22 +577,22 @@ class CoreXmlReader: lon = get_float(session_origin, "lon") alt = get_float(session_origin, "alt") if all([lat, lon, alt]): - logger.info("reading session reference geo: %s, %s, %s", lat, lon, alt) + logging.info("reading session reference geo: %s, %s, %s", lat, lon, alt) self.session.location.setrefgeo(lat, lon, alt) scale = get_float(session_origin, "scale") if scale: - logger.info("reading session reference scale: %s", scale) + logging.info("reading session reference scale: %s", scale) self.session.location.refscale = scale x = get_float(session_origin, "x") y = get_float(session_origin, "y") z = get_float(session_origin, "z") if all([x, y]): - logger.info("reading session reference xyz: %s, %s, %s", x, y, z) + logging.info("reading session reference xyz: %s, %s, %s", x, y, z) self.session.location.refxyz = (x, y, z) - def read_service_configs(self) -> None: + def read_service_configs(self): service_configurations = self.scenario.find("service_configurations") if service_configurations is None: return @@ -683,9 +600,7 @@ class CoreXmlReader: for service_configuration in service_configurations.iterchildren(): node_id = get_int(service_configuration, "node") service_name = service_configuration.get("name") - logger.info( - "reading custom service(%s) for node(%s)", service_name, node_id - ) + logging.info("reading custom service(%s) for node(%s)", service_name, node_id) self.session.services.set_service(node_id, service_name) service = self.session.services.get_service(node_id, service_name) @@ -699,75 +614,51 @@ class CoreXmlReader: validate_elements = service_configuration.find("validates") if validate_elements is not None: - service.validate = tuple( - x.text for x in validate_elements.iterchildren() - ) + service.validate = tuple(x.text for x in validate_elements.iterchildren()) shutdown_elements = service_configuration.find("shutdowns") if shutdown_elements is not None: - service.shutdown = tuple( - x.text for x in shutdown_elements.iterchildren() - ) + service.shutdown = tuple(x.text for x in shutdown_elements.iterchildren()) file_elements = service_configuration.find("files") if file_elements is not None: - files = set(service.configs) for file_element in file_elements.iterchildren(): name = file_element.get("name") data = file_element.text service.config_data[name] = data - files.add(name) - service.configs = tuple(files) - def read_emane_configs(self) -> None: + def read_emane_configs(self): emane_configurations = self.scenario.find("emane_configurations") if emane_configurations is None: return + for emane_configuration in emane_configurations.iterchildren(): node_id = get_int(emane_configuration, "node") - iface_id = get_int(emane_configuration, "iface") model_name = emane_configuration.get("model") configs = {} - # validate node and model - node = self.session.nodes.get(node_id) - if not node: - raise CoreXmlError(f"node for emane config doesn't exist: {node_id}") - self.session.emane.get_model(model_name) - if iface_id is not None and iface_id not in node.ifaces: - raise CoreXmlError( - f"invalid interface id({iface_id}) for node({node.name})" - ) - - # read and set emane model configuration - platform_configuration = emane_configuration.find("platform") - for config in platform_configuration.iterchildren(): - name = config.get("name") - value = config.get("value") - configs[name] = value mac_configuration = emane_configuration.find("mac") for config in mac_configuration.iterchildren(): name = config.get("name") value = config.get("value") configs[name] = value + phy_configuration = emane_configuration.find("phy") for config in phy_configuration.iterchildren(): name = config.get("name") value = config.get("value") configs[name] = value + external_configuration = emane_configuration.find("external") for config in external_configuration.iterchildren(): name = config.get("name") value = config.get("value") configs[name] = value - logger.info( - "reading emane configuration node(%s) model(%s)", node_id, model_name - ) - node_id = utils.iface_config_id(node_id, iface_id) - self.session.emane.set_config(node_id, model_name, configs) + logging.info("reading emane configuration node(%s) model(%s)", node_id, model_name) + self.session.emane.set_model_config(node_id, model_name, configs) - def read_mobility_configs(self) -> None: + def read_mobility_configs(self): mobility_configurations = self.scenario.find("mobility_configurations") if mobility_configurations is None: return @@ -782,12 +673,10 @@ class CoreXmlReader: value = config.get("value") configs[name] = value - logger.info( - "reading mobility configuration node(%s) model(%s)", node_id, model_name - ) + logging.info("reading mobility configuration node(%s) model(%s)", node_id, model_name) self.session.mobility.set_model_config(node_id, model_name, configs) - def read_nodes(self) -> None: + def read_nodes(self): device_elements = self.scenario.find("devices") if device_elements is not None: for device_element in device_elements.iterchildren(): @@ -798,181 +687,91 @@ class CoreXmlReader: for network_element in network_elements.iterchildren(): self.read_network(network_element) - def read_device(self, device_element: etree.Element) -> None: + def read_device(self, device_element): node_id = get_int(device_element, "id") name = device_element.get("name") model = device_element.get("type") - icon = device_element.get("icon") - clazz = device_element.get("class") - image = device_element.get("image") - server = device_element.get("server") - canvas = get_int(device_element, "canvas") - node_type = NodeTypes.DEFAULT - if clazz == "docker": - node_type = NodeTypes.DOCKER - elif clazz == "lxc": - node_type = NodeTypes.LXC - elif clazz == "podman": - node_type = NodeTypes.PODMAN - _class = self.session.get_node_class(node_type) - options = _class.create_options() - options.icon = icon - options.canvas = canvas - # check for special options - if isinstance(options, CoreNodeOptions): - options.model = model - service_elements = device_element.find("services") - if service_elements is not None: - options.services.extend( - x.get("name") for x in service_elements.iterchildren() - ) - config_service_elements = device_element.find("configservices") - if config_service_elements is not None: - options.config_services.extend( - x.get("name") for x in config_service_elements.iterchildren() - ) - if isinstance(options, (DockerOptions, LxcOptions, PodmanOptions)): - options.image = image - # get position information + node_options = NodeOptions(name, model) + + service_elements = device_element.find("services") + if service_elements is not None: + node_options.services = [x.get("name") for x in service_elements.iterchildren()] + position_element = device_element.find("position") - position = None if position_element is not None: - position = Position() x = get_float(position_element, "x") y = get_float(position_element, "y") if all([x, y]): - position.set(x, y) + node_options.set_position(x, y) + lat = get_float(position_element, "lat") lon = get_float(position_element, "lon") alt = get_float(position_element, "alt") if all([lat, lon, alt]): - position.set_geo(lon, lat, alt) - logger.info("reading node id(%s) model(%s) name(%s)", node_id, model, name) - self.session.add_node(_class, node_id, name, server, position, options) + node_options.set_location(lat, lon, alt) - def read_network(self, network_element: etree.Element) -> None: + logging.info("reading node id(%s) model(%s) name(%s)", node_id, model, name) + self.session.add_node(_id=node_id, node_options=node_options) + + def read_network(self, network_element): node_id = get_int(network_element, "id") name = network_element.get("name") - server = network_element.get("server") node_type = NodeTypes[network_element.get("type")] - _class = self.session.get_node_class(node_type) - options = _class.create_options() - options.canvas = get_int(network_element, "canvas") - options.icon = network_element.get("icon") - if isinstance(options, EmaneOptions): - options.emane_model = network_element.get("model") + node_options = NodeOptions(name) + position_element = network_element.find("position") - position = None if position_element is not None: - position = Position() x = get_float(position_element, "x") y = get_float(position_element, "y") if all([x, y]): - position.set(x, y) + node_options.set_position(x, y) + lat = get_float(position_element, "lat") lon = get_float(position_element, "lon") alt = get_float(position_element, "alt") if all([lat, lon, alt]): - position.set_geo(lon, lat, alt) - logger.info( - "reading node id(%s) node_type(%s) name(%s)", node_id, node_type, name - ) - node = self.session.add_node(_class, node_id, name, server, position, options) - if isinstance(node, WirelessNode): - wireless_element = network_element.find("wireless") - if wireless_element: - config = {} - for config_element in wireless_element.iterchildren(): - name = config_element.get("name") - value = config_element.get("value") - config[name] = value - node.set_config(config) + node_options.set_location(lat, lon, alt) - def read_configservice_configs(self) -> None: - configservice_configs = self.scenario.find("configservice_configurations") - if configservice_configs is None: - return + logging.info("reading node id(%s) node_type(%s) name(%s)", node_id, node_type, name) + self.session.add_node(_type=node_type, _id=node_id, node_options=node_options) - for configservice_element in configservice_configs.iterchildren(): - name = configservice_element.get("name") - node_id = get_int(configservice_element, "node") - node = self.session.get_node(node_id, CoreNodeBase) - service = node.config_services[name] - - configs_element = configservice_element.find("configs") - if configs_element is not None: - config = {} - for config_element in configs_element.iterchildren(): - key = config_element.get("key") - value = config_element.get("value") - config[key] = value - service.set_config(config) - - templates_element = configservice_element.find("templates") - if templates_element is not None: - for template_element in templates_element.iterchildren(): - name = template_element.get("name") - template = template_element.text - logger.info( - "loading xml template(%s): %s", type(template), template - ) - service.set_template(name, template) - - def read_links(self) -> None: + def read_links(self): link_elements = self.scenario.find("links") if link_elements is None: return - node_sets = set() for link_element in link_elements.iterchildren(): - node1_id = get_int(link_element, "node1") - if node1_id is None: - node1_id = get_int(link_element, "node_one") - node2_id = get_int(link_element, "node2") - if node2_id is None: - node2_id = get_int(link_element, "node_two") - node_set = frozenset((node1_id, node2_id)) + node_one = get_int(link_element, "node_one") + node_two = get_int(link_element, "node_two") - iface1_element = link_element.find("iface1") - if iface1_element is None: - iface1_element = link_element.find("interface_one") - iface1_data = None - if iface1_element is not None: - iface1_data = create_iface_data(iface1_element) + interface_one_element = link_element.find("interface_one") + interface_one = None + if interface_one_element is not None: + interface_one = create_interface_data(interface_one_element) - iface2_element = link_element.find("iface2") - if iface2_element is None: - iface2_element = link_element.find("interface_two") - iface2_data = None - if iface2_element is not None: - iface2_data = create_iface_data(iface2_element) + interface_two_element = link_element.find("interface_two") + interface_two = None + if interface_two_element is not None: + interface_two = create_interface_data(interface_two_element) options_element = link_element.find("options") - options = LinkOptions() + link_options = LinkOptions() if options_element is not None: - options.bandwidth = get_int(options_element, "bandwidth") - options.burst = get_int(options_element, "burst") - options.delay = get_int(options_element, "delay") - options.dup = get_int(options_element, "dup") - options.mer = get_int(options_element, "mer") - options.mburst = get_int(options_element, "mburst") - options.jitter = get_int(options_element, "jitter") - options.key = get_int(options_element, "key") - options.loss = get_float(options_element, "loss") - if options.loss is None: - options.loss = get_float(options_element, "per") - options.unidirectional = get_int(options_element, "unidirectional") - options.buffer = get_int(options_element, "buffer") + link_options.bandwidth = get_float(options_element, "bandwidth") + link_options.burst = get_float(options_element, "burst") + link_options.delay = get_float(options_element, "delay") + link_options.dup = get_float(options_element, "dup") + link_options.mer = get_float(options_element, "mer") + link_options.mburst = get_float(options_element, "mburst") + link_options.jitter = get_float(options_element, "jitter") + link_options.key = get_float(options_element, "key") + link_options.per = get_float(options_element, "per") + link_options.unidirectional = get_int(options_element, "unidirectional") + link_options.session = options_element.get("session") + link_options.emulation_id = get_int(options_element, "emulation_id") + link_options.network_id = get_int(options_element, "network_id") + link_options.opaque = options_element.get("opaque") + link_options.gui_attributes = options_element.get("gui_attributes") - if options.unidirectional == 1 and node_set in node_sets: - logger.info("updating link node1(%s) node2(%s)", node1_id, node2_id) - self.session.update_link( - node1_id, node2_id, iface1_data.id, iface2_data.id, options - ) - else: - logger.info("adding link node1(%s) node2(%s)", node1_id, node2_id) - self.session.add_link( - node1_id, node2_id, iface1_data, iface2_data, options - ) - - node_sets.add(node_set) + logging.info("reading link node_one(%s) node_two(%s)", node_one, node_two) + self.session.add_link(node_one, node_two, interface_one, interface_two, link_options) diff --git a/daemon/core/xml/corexmldeployment.py b/daemon/core/xml/corexmldeployment.py index 0b38e9b0..ea1dfc72 100644 --- a/daemon/core/xml/corexmldeployment.py +++ b/daemon/core/xml/corexmldeployment.py @@ -1,169 +1,143 @@ import os import socket -from typing import TYPE_CHECKING -import netaddr from lxml import etree -from core import utils -from core.emane.nodes import EmaneNet -from core.executables import IP -from core.nodes.base import CoreNodeBase, NodeBase - -if TYPE_CHECKING: - from core.emulator.session import Session +from core import constants +from core.coreobj import PyCoreNode +from core.enumerations import NodeTypes +from core.misc import utils, nodeutils, ipaddress -def add_type(parent_element: etree.Element, name: str) -> None: +def add_type(parent_element, name): type_element = etree.SubElement(parent_element, "type") type_element.text = name -def add_address( - parent_element: etree.Element, - address_type: str, - address: str, - iface_name: str = None, -) -> None: +def add_address(parent_element, address_type, address, interface_name=None): address_element = etree.SubElement(parent_element, "address", type=address_type) address_element.text = address - if iface_name is not None: - address_element.set("iface", iface_name) + if interface_name is not None: + address_element.set("iface", interface_name) -def add_mapping(parent_element: etree.Element, maptype: str, mapref: str) -> None: +def add_mapping(parent_element, maptype, mapref): etree.SubElement(parent_element, "mapping", type=maptype, ref=mapref) -def add_emane_iface( - host_element: etree.Element, - nem_id: int, - platform_name: str = "p1", - transport_name: str = "t1", -) -> etree.Element: +def add_emane_interface(host_element, netif, platform_name="p1", transport_name="t1"): + nem_id = netif.net.nemidmap[netif] host_id = host_element.get("id") # platform data - platform_id = f"{host_id}/{platform_name}" - platform_element = etree.SubElement( - host_element, "emanePlatform", id=platform_id, name=platform_name - ) + platform_id = "%s/%s" % (host_id, platform_name) + platform_element = etree.SubElement(host_element, "emanePlatform", id=platform_id, name=platform_name) # transport data - transport_id = f"{host_id}/{transport_name}" - etree.SubElement( - platform_element, "transport", id=transport_id, name=transport_name - ) + transport_id = "%s/%s" % (host_id, transport_name) + etree.SubElement(platform_element, "transport", id=transport_id, name=transport_name) # nem data - nem_name = f"nem{nem_id}" - nem_element_id = f"{host_id}/{nem_name}" - nem_element = etree.SubElement( - platform_element, "nem", id=nem_element_id, name=nem_name - ) + nem_name = "nem%s" % nem_id + nem_element_id = "%s/%s" % (host_id, nem_name) + nem_element = etree.SubElement(platform_element, "nem", id=nem_element_id, name=nem_name) nem_id_element = etree.SubElement(nem_element, "parameter", name="nemid") nem_id_element.text = str(nem_id) return platform_element -def get_address_type(address: str) -> str: +def get_address_type(address): addr, _slash, _prefixlen = address.partition("/") - if netaddr.valid_ipv4(addr): + if ipaddress.is_ipv4_address(addr): address_type = "IPv4" - elif netaddr.valid_ipv6(addr): + elif ipaddress.is_ipv6_address(addr): address_type = "IPv6" else: raise NotImplementedError return address_type -def get_ipv4_addresses(hostname: str) -> list[tuple[str, str]]: +def get_ipv4_addresses(hostname): if hostname == "localhost": addresses = [] - args = f"{IP} -o -f inet address show" - output = utils.cmd(args) + args = [constants.IP_BIN, "-o", "-f", "inet", "addr", "show"] + output = utils.check_cmd(args) for line in output.split(os.linesep): split = line.split() if not split: continue - iface_name = split[1] + interface_name = split[1] address = split[3] if not address.startswith("127."): - addresses.append((iface_name, address)) + addresses.append((interface_name, address)) return addresses else: # TODO: handle other hosts raise NotImplementedError -class CoreXmlDeployment: - def __init__(self, session: "Session", scenario: etree.Element) -> None: - self.session: "Session" = session - self.scenario: etree.Element = scenario - self.root: etree.SubElement = etree.SubElement( - scenario, "container", id="TestBed", name="TestBed" - ) +class CoreXmlDeployment(object): + def __init__(self, session, scenario): + self.session = session + self.scenario = scenario + self.root = etree.SubElement(scenario, "container", id="TestBed", name="TestBed") self.add_deployment() - def find_device(self, name: str) -> etree.Element: - device = self.scenario.find(f"devices/device[@name='{name}']") + def find_device(self, name): + device = self.scenario.find("devices/device[@name='%s']" % name) return device - def find_iface(self, device: NodeBase, name: str) -> etree.Element: - iface = self.scenario.find( - f"devices/device[@name='{device.name}']/interfaces/interface[@name='{name}']" - ) - return iface + def find_interface(self, device, name): + interface = self.scenario.find("devices/device[@name='%s']/interfaces/interface[@name='%s']" % ( + device.name, name)) + return interface - def add_deployment(self) -> None: + def add_deployment(self): physical_host = self.add_physical_host(socket.gethostname()) - for node_id in self.session.nodes: - node = self.session.nodes[node_id] - if isinstance(node, CoreNodeBase): + # TODO: handle other servers + # servers = self.session.broker.getservernames() + # servers.remove("localhost") + + for node in self.session.objects.itervalues(): + if isinstance(node, PyCoreNode): self.add_virtual_host(physical_host, node) - def add_physical_host(self, name: str) -> etree.Element: + def add_physical_host(self, name): # add host - root_id = self.root.get("id") - host_id = f"{root_id}/{name}" + host_id = "%s/%s" % (self.root.get("id"), name) host_element = etree.SubElement(self.root, "testHost", id=host_id, name=name) # add type element add_type(host_element, "physical") # add ipv4 addresses - for iface_name, address in get_ipv4_addresses("localhost"): - add_address(host_element, "IPv4", address, iface_name) + for interface_name, address in get_ipv4_addresses("localhost"): + add_address(host_element, "IPv4", address, interface_name) return host_element - def add_virtual_host(self, physical_host: etree.Element, node: NodeBase) -> None: - if not isinstance(node, CoreNodeBase): - raise TypeError(f"invalid node type: {node}") + def add_virtual_host(self, physical_host, node): + if not isinstance(node, PyCoreNode): + raise TypeError("invalid node type: %s" % node) # create virtual host element - phys_id = physical_host.get("id") - host_id = f"{phys_id}/{node.name}" - host_element = etree.SubElement( - physical_host, "testHost", id=host_id, name=node.name - ) + host_id = "%s/%s" % (physical_host.get("id"), node.name) + host_element = etree.SubElement(physical_host, "testHost", id=host_id, name=node.name) # add host type add_type(host_element, "virtual") - for iface in node.get_ifaces(): + for netif in node.netifs(): emane_element = None - if isinstance(iface.net, EmaneNet): - nem_id = self.session.emane.get_nem_id(iface) - emane_element = add_emane_iface(host_element, nem_id) + if nodeutils.is_node(netif.net, NodeTypes.EMANE): + emane_element = add_emane_interface(host_element, netif) parent_element = host_element if emane_element is not None: parent_element = emane_element - for ip in iface.ips(): - address = str(ip.ip) + for address in netif.addrlist: address_type = get_address_type(address) - add_address(parent_element, address_type, address, iface.name) + add_address(parent_element, address_type, address, netif.name) diff --git a/daemon/core/xml/emanexml.py b/daemon/core/xml/emanexml.py index 4b8ada70..c1c9fbb6 100644 --- a/daemon/core/xml/emanexml.py +++ b/daemon/core/xml/emanexml.py @@ -1,129 +1,82 @@ import logging -from pathlib import Path -from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING, Optional +import os from lxml import etree -from core import utils -from core.config import Configuration -from core.emane.nodes import EmaneNet -from core.emulator.distributed import DistributedServer -from core.errors import CoreError -from core.nodes.base import CoreNode, CoreNodeBase -from core.nodes.interface import CoreInterface +from core.misc import utils +from core.misc.ipaddress import MacAddress from core.xml import corexml -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from core.emane.emanemodel import EmaneModel - -_MAC_PREFIX = "02:02" +_hwaddr_prefix = "02:02" -def is_external(config: dict[str, str]) -> bool: +def is_external(config): """ Checks if the configuration is for an external transport. - :param config: configuration to check + :param dict config: configuration to check :return: True if external, False otherwise + :rtype: bool """ return config.get("external") == "1" -def _value_to_params(value: str) -> Optional[tuple[str]]: +def _value_to_params(value): """ Helper to convert a parameter to a parameter tuple. - :param value: value string to convert to tuple + :param str value: value string to convert to tuple :return: parameter tuple, None otherwise """ try: values = utils.make_tuple_fromstr(value, str) + if not hasattr(values, "__iter__"): return None + if len(values) < 2: return None + return values + except SyntaxError: - logger.exception("error in value string to param list") + logging.exception("error in value string to param list") return None -def create_file( - xml_element: etree.Element, - doc_name: str, - file_path: Path, - server: DistributedServer = None, -) -> None: +def create_file(xml_element, doc_name, file_path): """ Create xml file. - :param xml_element: root element to write to file - :param doc_name: name to use in the emane doctype - :param file_path: file path to write xml file to - :param server: remote server to create file on + :param lxml.etree.Element xml_element: root element to write to file + :param str doc_name: name to use in the emane doctype + :param str file_path: file path to write xml file to :return: nothing """ - doctype = ( - f'' - ) - if server: - temp = NamedTemporaryFile(delete=False) - temp_path = Path(temp.name) - corexml.write_xml_file(xml_element, temp_path, doctype=doctype) - temp.close() - server.remote_put(temp_path, file_path) - temp_path.unlink() - else: - corexml.write_xml_file(xml_element, file_path, doctype=doctype) + doctype = '' % {"doc_name": doc_name} + corexml.write_xml_file(xml_element, file_path, doctype=doctype) -def create_node_file( - node: CoreNodeBase, xml_element: etree.Element, doc_name: str, file_name: str -) -> None: - """ - Create emane xml for an interface. - - :param node: node running emane - :param xml_element: root element to write to file - :param doc_name: name to use in the emane doctype - :param file_name: name of xml file - :return: - """ - if isinstance(node, CoreNode): - file_path = node.directory / file_name - else: - file_path = node.session.directory / file_name - create_file(xml_element, doc_name, file_path, node.server) - - -def add_param(xml_element: etree.Element, name: str, value: str) -> None: +def add_param(xml_element, name, value): """ Add emane configuration parameter to xml element. - :param xml_element: element to append parameter to - :param name: name of parameter - :param value: value for parameter + :param lxml.etree.Element xml_element: element to append parameter to + :param str name: name of parameter + :param str value: value for parameter :return: nothing """ etree.SubElement(xml_element, "param", name=name, value=value) -def add_configurations( - xml_element: etree.Element, - configurations: list[Configuration], - config: dict[str, str], - config_ignore: set[str], -) -> None: +def add_configurations(xml_element, configurations, config, config_ignore): """ Add emane model configurations to xml element. - :param xml_element: xml element to add emane configurations to - :param configurations: configurations to add to xml - :param config: configuration values - :param config_ignore: configuration options to ignore + :param lxml.etree.Element xml_element: xml element to add emane configurations to + :param list[core.config.Configuration] configurations: configurations to add to xml + :param dict config: configuration values + :param set config_ignore: configuration options to ignore :return: """ for configuration in configurations: @@ -143,253 +96,346 @@ def add_configurations( add_param(xml_element, name, value) -def build_platform_xml( - nem_id: int, - nem_port: int, - emane_net: EmaneNet, - iface: CoreInterface, - config: dict[str, str], -) -> None: +def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_xmls): """ - Create platform xml for a nem/interface. + Create platform xml for a specific node. - :param nem_id: nem id for current node/interface - :param nem_port: control port to configure for emane - :param emane_net: emane network associate with node and interface - :param iface: node interface to create platform xml for - :param config: emane configuration for interface + :param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations + :param core.netns.nodes.CtrlNet control_net: control net node for this emane network + :param core.emane.nodes.EmaneNode node: node to write platform xml for + :param int nem_id: nem id to use for interfaces for this node + :param dict platform_xmls: stores platform xml elements to append nem entries to + :return: the next nem id that can be used for creating platform xml files + :rtype: int + """ + logging.debug("building emane platform xml for node(%s): %s", node, node.name) + nem_entries = {} + + if node.model is None: + logging.warn("warning: EmaneNode %s has no associated model", node.name) + return nem_entries + + for netif in node.netifs(): + # build nem xml + nem_definition = nem_file_name(node.model, netif) + nem_element = etree.Element("nem", id=str(nem_id), name=netif.localname, definition=nem_definition) + + # check if this is an external transport, get default config if an interface specific one does not exist + config = emane_manager.getifcconfig(node.model.object_id, netif, node.model.name) + + if is_external(config): + nem_element.set("transport", "external") + platform_endpoint = "platformendpoint" + add_param(nem_element, platform_endpoint, config[platform_endpoint]) + transport_endpoint = "transportendpoint" + add_param(nem_element, transport_endpoint, config[transport_endpoint]) + else: + # build transport xml + transport_type = netif.transport_type + if not transport_type: + logging.info("warning: %s interface type unsupported!", netif.name) + transport_type = "raw" + transport_file = transport_file_name(node.objid, transport_type) + transport_element = etree.SubElement(nem_element, "transport", definition=transport_file) + + # add transport parameter + add_param(transport_element, "device", netif.name) + + # add nem entry + nem_entries[netif] = nem_element + + # merging code + key = netif.node.objid + if netif.transport_type == "raw": + key = "host" + otadev = control_net.brname + eventdev = control_net.brname + else: + otadev = None + eventdev = None + + platform_element = platform_xmls.get(key) + if platform_element is None: + platform_element = etree.Element("platform") + + if otadev: + emane_manager.set_config("otamanagerdevice", otadev) + + if eventdev: + emane_manager.set_config("eventservicedevice", eventdev) + + # append all platform options (except starting id) to doc + for configuration in emane_manager.emane_config.emulator_config: + name = configuration.id + if name == "platform_id_start": + continue + + value = emane_manager.get_config(name) + add_param(platform_element, name, value) + + # add platform xml + platform_xmls[key] = platform_element + + platform_element.append(nem_element) + + node.setnemid(netif, nem_id) + macstr = _hwaddr_prefix + ":00:00:" + macstr += "%02X:%02X" % ((nem_id >> 8) & 0xFF, nem_id & 0xFF) + netif.sethwaddr(MacAddress.from_string(macstr)) + + # increment nem id + nem_id += 1 + + for key in sorted(platform_xmls.keys()): + if key == "host": + file_name = "platform.xml" + else: + file_name = "platform%d.xml" % key + + platform_element = platform_xmls[key] + + doc_name = "platform" + file_path = os.path.join(emane_manager.session.session_dir, file_name) + create_file(platform_element, doc_name, file_path) + + return nem_id + + +def build_xml_files(emane_manager, node): + """ + Generate emane xml files required for node. + + :param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations + :param core.emane.nodes.EmaneNode node: node to write platform xml for :return: nothing """ - # create top level platform element - platform_element = etree.Element("platform") - for configuration in emane_net.wireless_model.platform_config: - name = configuration.id - value = config[configuration.id] - add_param(platform_element, name, value) - add_param( - platform_element, - emane_net.wireless_model.platform_controlport, - f"0.0.0.0:{nem_port}", - ) + logging.debug("building all emane xml for node(%s): %s", node, node.name) + if node.model is None: + return - # build nem xml - nem_definition = nem_file_name(iface) - nem_element = etree.Element( - "nem", id=str(nem_id), name=iface.localname, definition=nem_definition - ) + # get model configurations + config = emane_manager.get_configs(node.model.object_id, node.model.name) + if not config: + return - # create model based xml files - emane_net.wireless_model.build_xml_files(config, iface) + # build XML for overall network (EmaneNode) configs + node.model.build_xml_files(config) - # check if this is an external transport - if is_external(config): - nem_element.set("transport", "external") - platform_endpoint = "platformendpoint" - add_param(nem_element, platform_endpoint, config[platform_endpoint]) - transport_endpoint = "transportendpoint" - add_param(nem_element, transport_endpoint, config[transport_endpoint]) + # build XML for specific interface (NEM) configs + need_virtual = False + need_raw = False + vtype = "virtual" + rtype = "raw" - # define transport element - transport_name = transport_file_name(iface) - transport_element = etree.SubElement( - nem_element, "transport", definition=transport_name - ) - add_param(transport_element, "device", iface.name) + for netif in node.netifs(): + # check for interface specific emane configuration and write xml files, if needed + config = emane_manager.getifcconfig(node.model.object_id, netif, node.model.name) + if config: + node.model.build_xml_files(config, netif) - # add nem element to platform element - platform_element.append(nem_element) + # check transport type needed for interface + if "virtual" in netif.transport_type: + need_virtual = True + vtype = netif.transport_type + else: + need_raw = True + rtype = netif.transport_type - # generate and assign interface mac address based on nem id - mac = _MAC_PREFIX + ":00:00:" - mac += f"{(nem_id >> 8) & 0xFF:02X}:{nem_id & 0xFF:02X}" - iface.set_mac(mac) + if need_virtual: + build_transport_xml(emane_manager, node, vtype) - doc_name = "platform" - file_name = platform_file_name(iface) - create_node_file(iface.node, platform_element, doc_name, file_name) + if need_raw: + build_transport_xml(emane_manager, node, rtype) -def create_transport_xml(iface: CoreInterface, config: dict[str, str]) -> None: +def build_transport_xml(emane_manager, node, transport_type): """ Build transport xml file for node and transport type. - :param iface: interface to build transport xml for - :param config: all current configuration values + :param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations + :param core.emane.nodes.EmaneNode node: node to write platform xml for + :param str transport_type: transport type to build xml for :return: nothing """ - transport_type = iface.transport_type transport_element = etree.Element( "transport", - name=f"{transport_type.value.capitalize()} Transport", - library=f"trans{transport_type.value.lower()}", + name="%s Transport" % transport_type.capitalize(), + library="trans%s" % transport_type.lower() ) + + # add bitrate add_param(transport_element, "bitrate", "0") # get emane model cnfiguration + config = emane_manager.get_configs(node.objid, node.model.name) flowcontrol = config.get("flowcontrolenable", "0") == "1" - if isinstance(iface.node, CoreNode): + + if "virtual" in transport_type.lower(): device_path = "/dev/net/tun_flowctl" - if not iface.node.path_exists(device_path): + if not os.path.exists(device_path): device_path = "/dev/net/tun" add_param(transport_element, "devicepath", device_path) + if flowcontrol: add_param(transport_element, "flowcontrolenable", "on") + doc_name = "transport" - transport_name = transport_file_name(iface) - create_node_file(iface.node, transport_element, doc_name, transport_name) + file_name = transport_file_name(node.objid, transport_type) + file_path = os.path.join(emane_manager.session.session_dir, file_name) + create_file(transport_element, doc_name, file_path) -def create_phy_xml( - emane_model: "EmaneModel", iface: CoreInterface, config: dict[str, str] -) -> None: +def create_phy_xml(emane_model, config, file_path): """ Create the phy xml document. - :param emane_model: emane model to create xml - :param iface: interface to create xml for - :param config: all current configuration values + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param dict config: all current configuration values + :param str file_path: path to write file to :return: nothing """ - phy_element = etree.Element("phy", name=f"{emane_model.name} PHY") + phy_element = etree.Element("phy", name="%s PHY" % emane_model.name) if emane_model.phy_library: phy_element.set("library", emane_model.phy_library) - add_configurations( - phy_element, emane_model.phy_config, config, emane_model.config_ignore - ) - file_name = phy_file_name(iface) - create_node_file(iface.node, phy_element, "phy", file_name) + + add_configurations(phy_element, emane_model.phy_config, config, emane_model.config_ignore) + create_file(phy_element, "phy", file_path) -def create_mac_xml( - emane_model: "EmaneModel", iface: CoreInterface, config: dict[str, str] -) -> None: +def create_mac_xml(emane_model, config, file_path): """ Create the mac xml document. - :param emane_model: emane model to create xml - :param iface: interface to create xml for - :param config: all current configuration values + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param dict config: all current configuration values + :param str file_path: path to write file to :return: nothing """ if not emane_model.mac_library: - raise CoreError("must define emane model library") - mac_element = etree.Element( - "mac", name=f"{emane_model.name} MAC", library=emane_model.mac_library - ) - add_configurations( - mac_element, emane_model.mac_config, config, emane_model.config_ignore - ) - file_name = mac_file_name(iface) - create_node_file(iface.node, mac_element, "mac", file_name) + raise ValueError("must define emane model library") + + mac_element = etree.Element("mac", name="%s MAC" % emane_model.name, library=emane_model.mac_library) + add_configurations(mac_element, emane_model.mac_config, config, emane_model.config_ignore) + create_file(mac_element, "mac", file_path) -def create_nem_xml( - emane_model: "EmaneModel", iface: CoreInterface, config: dict[str, str] -) -> None: +def create_nem_xml(emane_model, config, nem_file, transport_definition, mac_definition, phy_definition): """ Create the nem xml document. - :param emane_model: emane model to create xml - :param iface: interface to create xml for - :param config: all current configuration values + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param dict config: all current configuration values + :param str nem_file: nem file path to write + :param str transport_definition: transport file definition path + :param str mac_definition: mac file definition path + :param str phy_definition: phy file definition path :return: nothing """ - nem_element = etree.Element("nem", name=f"{emane_model.name} NEM") + nem_element = etree.Element("nem", name="%s NEM" % emane_model.name) if is_external(config): nem_element.set("type", "unstructured") else: - transport_name = transport_file_name(iface) - etree.SubElement(nem_element, "transport", definition=transport_name) - mac_name = mac_file_name(iface) - etree.SubElement(nem_element, "mac", definition=mac_name) - phy_name = phy_file_name(iface) - etree.SubElement(nem_element, "phy", definition=phy_name) - nem_name = nem_file_name(iface) - create_node_file(iface.node, nem_element, "nem", nem_name) + etree.SubElement(nem_element, "transport", definition=transport_definition) + etree.SubElement(nem_element, "mac", definition=mac_definition) + etree.SubElement(nem_element, "phy", definition=phy_definition) + create_file(nem_element, "nem", nem_file) -def create_event_service_xml( - group: str, - port: str, - device: str, - file_directory: Path, - server: DistributedServer = None, -) -> None: +def create_event_service_xml(group, port, device, file_directory): """ Create a emane event service xml file. - :param group: event group - :param port: event port - :param device: event device - :param file_directory: directory to create file in - :param server: remote server node - will run on, default is None for localhost + :param str group: event group + :param str port: event port + :param str device: event device + :param str file_directory: directory to create file in :return: nothing """ event_element = etree.Element("emaneeventmsgsvc") - for name, value in ( - ("group", group), - ("port", port), - ("device", device), - ("mcloop", "1"), - ("ttl", "32"), - ): + for name, value in (("group", group), ("port", port), ("device", device), ("mcloop", "1"), ("ttl", "32")): sub_element = etree.SubElement(event_element, name) sub_element.text = value - file_path = file_directory / "libemaneeventservice.xml" - create_file(event_element, "emaneeventmsgsvc", file_path, server) + file_name = "libemaneeventservice.xml" + file_path = os.path.join(file_directory, file_name) + create_file(event_element, "emaneeventmsgsvc", file_path) -def transport_file_name(iface: CoreInterface) -> str: +def transport_file_name(node_id, transport_type): """ Create name for a transport xml file. - :param iface: interface running emane - :return: transport xml file name + :param int node_id: node id to generate transport file name for + :param str transport_type: transport type to generate transport file + :return: """ - return f"{iface.name}-trans-{iface.transport_type.value}.xml" + return "n%strans%s.xml" % (node_id, transport_type) -def nem_file_name(iface: CoreInterface) -> str: +def _basename(emane_model, interface=None): """ - Return the string name for the NEM XML file, e.g. "eth0-nem.xml" + Create name that is leveraged for configuration file creation. - :param iface: interface running emane - :return: nem xm file name + :param interface: interface for this model + :return: basename used for file creation + :rtype: str """ - append = "-raw" if not isinstance(iface.node, CoreNode) else "" - return f"{iface.name}-nem{append}.xml" + name = "n%s" % emane_model.object_id + + if interface: + node_id = interface.node.objid + if emane_model.session.emane.getifcconfig(node_id, interface, emane_model.name): + name = interface.localname.replace(".", "_") + + return "%s%s" % (name, emane_model.name) -def shim_file_name(iface: CoreInterface = None) -> str: +def nem_file_name(emane_model, interface=None): """ - Return the string name for the SHIM XML file, e.g. "eth0-shim.xml" + Return the string name for the NEM XML file, e.g. "n3rfpipenem.xml" - :param iface: interface running emane - :return: shim xml file name + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param interface: interface for this model + :return: nem xml filename + :rtype: str """ - return f"{iface.name}-shim.xml" + basename = _basename(emane_model, interface) + append = "" + if interface and interface.transport_type == "raw": + append = "_raw" + return "%snem%s.xml" % (basename, append) -def mac_file_name(iface: CoreInterface) -> str: +def shim_file_name(emane_model, interface=None): """ - Return the string name for the MAC XML file, e.g. "eth0-mac.xml" + Return the string name for the SHIM XML file, e.g. "commeffectshim.xml" - :param iface: interface running emane - :return: mac xml file name + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param interface: interface for this model + :return: shim xml filename + :rtype: str """ - return f"{iface.name}-mac.xml" + return "%sshim.xml" % _basename(emane_model, interface) -def phy_file_name(iface: CoreInterface) -> str: +def mac_file_name(emane_model, interface=None): """ - Return the string name for the PHY XML file, e.g. "eth0-phy.xml" + Return the string name for the MAC XML file, e.g. "n3rfpipemac.xml" - :param iface: interface running emane - :return: phy xml file name + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param interface: interface for this model + :return: mac xml filename + :rtype: str """ - return f"{iface.name}-phy.xml" + return "%smac.xml" % _basename(emane_model, interface) -def platform_file_name(iface: CoreInterface) -> str: - return f"{iface.name}-platform.xml" +def phy_file_name(emane_model, interface=None): + """ + Return the string name for the PHY XML file, e.g. "n3rfpipephy.xml" + + :param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for + :param interface: interface for this model + :return: phy xml filename + :rtype: str + """ + return "%sphy.xml" % _basename(emane_model, interface) diff --git a/package/etc/core.conf b/daemon/data/core.conf similarity index 78% rename from package/etc/core.conf rename to daemon/data/core.conf index 1923250d..27fa698e 100644 --- a/package/etc/core.conf +++ b/daemon/data/core.conf @@ -1,22 +1,30 @@ +# Configuration file for CORE (core-gui, core-daemon) + +### GUI configuration options ### +[core-gui] +# no options are presently defined; see the ~/.core preferences file + +### core-daemon configuration options ### [core-daemon] -#distributed_address = 127.0.0.1 -grpcaddress = localhost -grpcport = 50051 +xmlfilever = 1.0 +listenaddr = localhost +port = 4038 +numthreads = 1 quagga_bin_search = "/usr/local/bin /usr/bin /usr/lib/quagga" quagga_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/quagga" frr_bin_search = "/usr/local/bin /usr/bin /usr/lib/frr" -frr_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/frr /usr/libexec/frr" +frr_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/frr" # uncomment the following line to load custom services from the specified dir -# this may be a comma-separated list, and directory names should be unique -# and not named 'services' -#custom_services_dir = /home//.coregui/custom_services -#custom_config_services_dir = /home//.coregui/custom_services - +# this may be a comma-separated list, and directory names should be unique +# and not named 'services' +#custom_services_dir = /home/username/.core/myservices +# # uncomment to establish a standalone control backchannel for accessing nodes # (overriden by the session option of the same name) #controlnet = 172.16.0.0/24 - +# +# # uncomment and edit to establish a distributed control backchannel #controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24 @@ -46,7 +54,7 @@ emane_platform_port = 8101 emane_transform_port = 8201 emane_event_generate = True emane_event_monitor = False -#emane_models_dir = /home//.coregui/custom_emane +#emane_models_dir = /home/username/.core/myemane # EMANE log level range [0,4] default: 2 #emane_log_level = 2 emane_realtime = True diff --git a/daemon/data/logging.conf b/daemon/data/logging.conf new file mode 100644 index 00000000..7f3d496f --- /dev/null +++ b/daemon/data/logging.conf @@ -0,0 +1,20 @@ +{ + "version": 1, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": "default", + "level": "DEBUG", + "stream": "ext://sys.stdout" + } + }, + "formatters": { + "default": { + "format": "%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - %(message)s" + } + }, + "root": { + "level": "INFO", + "handlers": ["console"] + } +} diff --git a/daemon/doc/Makefile.am b/daemon/doc/Makefile.am index 9ce90bfa..6f287b09 100644 --- a/daemon/doc/Makefile.am +++ b/daemon/doc/Makefile.am @@ -1,4 +1,8 @@ # CORE +# (c)2012 the Boeing Company. +# See the LICENSE file included in this distribution. +# +# author: Jeff Ahrenholz # # Builds html and pdf documentation using Sphinx. # @@ -6,7 +10,7 @@ # extra cruft to remove DISTCLEANFILES = conf.py Makefile Makefile.in stamp-vti *.rst -all: html +all: index.rst # auto-generated Python documentation using Sphinx index.rst: diff --git a/daemon/doc/conf.py.in b/daemon/doc/conf.py.in index 99929cee..eee03477 100644 --- a/daemon/doc/conf.py.in +++ b/daemon/doc/conf.py.in @@ -121,7 +121,7 @@ html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. diff --git a/daemon/examples/api/emane80211.py b/daemon/examples/api/emane80211.py new file mode 100644 index 00000000..0e2d2377 --- /dev/null +++ b/daemon/examples/api/emane80211.py @@ -0,0 +1,63 @@ +#!/usr/bin/python -i +# +# Example CORE Python script that attaches N nodes to an EMANE 802.11abg network. + +import datetime + +import parser +from core import load_logging_config +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes +from core.enumerations import EventTypes + +load_logging_config() + + +def example(options): + # ip generator for example + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + + # create emulator instance for creating sessions and utility methods + coreemu = CoreEmu() + session = coreemu.create_session() + + # must be in configuration state for nodes to start, when using "node_add" below + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create emane network node + emane_network = session.create_emane_network( + model=EmaneIeee80211abgModel, + geo_reference=(47.57917, -122.13232, 2.00000) + ) + emane_network.setposition(x=80, y=50) + + # create nodes + for i in xrange(options.nodes): + node = session.create_wireless_node() + node.setposition(x=150 * (i + 1), y=150) + interface = prefixes.create_interface(node) + session.add_link(node.objid, emane_network.objid, interface_one=interface) + + # instantiate session + session.instantiate() + + # start a shell on the first node + node = session.get_object(2) + node.client.term("bash") + + # shutdown session + raw_input("press enter to exit...") + coreemu.shutdown() + + +def main(): + options = parser.parse_options("emane80211") + start = datetime.datetime.now() + print "running emane 80211 example: nodes(%s) time(%s)" % (options.nodes, options.time) + example(options) + print "elapsed time: %s" % (datetime.datetime.now() - start) + + +if __name__ == "__main__" or __name__ == "__builtin__": + main() diff --git a/daemon/examples/api/parser.py b/daemon/examples/api/parser.py new file mode 100644 index 00000000..c6523ba1 --- /dev/null +++ b/daemon/examples/api/parser.py @@ -0,0 +1,41 @@ +import argparse + +DEFAULT_NODES = 2 +DEFAULT_TIME = 10 +DEFAULT_STEP = 1 + + +def parse_options(name): + parser = argparse.ArgumentParser(description="Run %s example" % name) + parser.add_argument("-n", "--nodes", type=int, default=DEFAULT_NODES, + help="number of nodes to create in this example") + parser.add_argument("-t", "--time", type=int, default=DEFAULT_TIME, + help="example iperf run time in seconds") + + options = parser.parse_args() + + # usagestr = "usage: %prog [-h] [options] [args]" + # parser = optparse.OptionParser(usage=usagestr) + # + # parser.add_option("-n", "--nodes", dest="nodes", type=int, default=DEFAULT_NODES, + # help="number of nodes to create in this example") + # + # parser.add_option("-t", "--time", dest="time", type=int, default=DEFAULT_TIME, + # help="example iperf run time in seconds") + + # def usage(msg=None, err=0): + # print + # if msg: + # print "%s\n" % msg + # parser.print_help() + # sys.exit(err) + + # parse command line options + # options, args = parser.parse_args() + + if options.nodes < 2: + parser.error("invalid min number of nodes: %s" % options.nodes) + if options.time < 1: + parser.error("invalid test time: %s" % options.time) + + return options diff --git a/daemon/examples/api/switch.py b/daemon/examples/api/switch.py new file mode 100644 index 00000000..87f031cd --- /dev/null +++ b/daemon/examples/api/switch.py @@ -0,0 +1,67 @@ +#!/usr/bin/python +# +# run iperf to measure the effective throughput between two nodes when +# n nodes are connected to a virtual wlan; run test for testsec +# and repeat for minnodes <= n <= maxnodes with a step size of +# nodestep + +import datetime + +import parser +from core import load_logging_config +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes +from core.enumerations import NodeTypes, EventTypes + +load_logging_config() + + +def example(options): + # ip generator for example + prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + + # create emulator instance for creating sessions and utility methods + coreemu = CoreEmu() + session = coreemu.create_session() + + # must be in configuration state for nodes to start, when using "node_add" below + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create switch network node + switch = session.add_node(_type=NodeTypes.SWITCH) + + # create nodes + for _ in xrange(options.nodes): + node = session.add_node() + interface = prefixes.create_interface(node) + session.add_link(node.objid, switch.objid, interface_one=interface) + + # instantiate session + session.instantiate() + + # get nodes to run example + first_node = session.get_object(2) + last_node = session.get_object(options.nodes + 1) + + print "starting iperf server on node: %s" % first_node.name + first_node.cmd(["iperf", "-s", "-D"]) + first_node_address = prefixes.ip4_address(first_node) + print "node %s connecting to %s" % (last_node.name, first_node_address) + last_node.client.icmd(["iperf", "-t", str(options.time), "-c", first_node_address]) + first_node.cmd(["killall", "-9", "iperf"]) + + # shutdown session + coreemu.shutdown() + + +def main(): + options = parser.parse_options("switch") + + start = datetime.datetime.now() + print "running switch example: nodes(%s) time(%s)" % (options.nodes, options.time) + example(options) + print "elapsed time: %s" % (datetime.datetime.now() - start) + + +if __name__ == "__main__": + main() diff --git a/daemon/examples/api/switch_inject.py b/daemon/examples/api/switch_inject.py new file mode 100644 index 00000000..f727f0ca --- /dev/null +++ b/daemon/examples/api/switch_inject.py @@ -0,0 +1,39 @@ +#!/usr/bin/python +# +# run iperf to measure the effective throughput between two nodes when +# n nodes are connected to a virtual wlan; run test for testsec +# and repeat for minnodes <= n <= maxnodes with a step size of +# nodestep +from core import load_logging_config +from core.emulator.emudata import IpPrefixes +from core.enumerations import NodeTypes, EventTypes + +load_logging_config() + + +def example(nodes): + # ip generator for example + prefixes = IpPrefixes("10.83.0.0/16") + + # create emulator instance for creating sessions and utility methods + coreemu = globals()["coreemu"] + session = coreemu.create_session() + + # must be in configuration state for nodes to start, when using "node_add" below + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create switch network node + switch = session.add_node(_type=NodeTypes.SWITCH) + + # create nodes + for _ in xrange(nodes): + node = session.add_node() + interface = prefixes.create_interface(node) + session.add_link(node.objid, switch.objid, interface_one=interface) + + # instantiate session + session.instantiate() + + +if __name__ in {"__main__", "__builtin__"}: + example(2) diff --git a/daemon/examples/api/wlan.py b/daemon/examples/api/wlan.py new file mode 100644 index 00000000..f152de37 --- /dev/null +++ b/daemon/examples/api/wlan.py @@ -0,0 +1,71 @@ +#!/usr/bin/python +# +# run iperf to measure the effective throughput between two nodes when +# n nodes are connected to a virtual wlan; run test for testsec +# and repeat for minnodes <= n <= maxnodes with a step size of +# nodestep + +import datetime + +import parser +from core import load_logging_config +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes, NodeOptions +from core.enumerations import NodeTypes, EventTypes +from core.mobility import BasicRangeModel + +load_logging_config() + + +def example(options): + # ip generator for example + prefixes = IpPrefixes("10.83.0.0/16") + + # create emulator instance for creating sessions and utility methods + coreemu = CoreEmu() + session = coreemu.create_session() + + # must be in configuration state for nodes to start, when using "node_add" below + session.set_state(EventTypes.CONFIGURATION_STATE) + + # create wlan network node + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) + session.mobility.set_model(wlan, BasicRangeModel) + + # create nodes, must set a position for wlan basic range model + node_options = NodeOptions() + node_options.set_position(0, 0) + for _ in xrange(options.nodes): + node = session.add_node(node_options=node_options) + interface = prefixes.create_interface(node) + session.add_link(node.objid, wlan.objid, interface_one=interface) + + # instantiate session + session.instantiate() + + # get nodes for example run + first_node = session.get_object(2) + last_node = session.get_object(options.nodes + 1) + + print "starting iperf server on node: %s" % first_node.name + first_node.cmd(["iperf", "-s", "-D"]) + address = prefixes.ip4_address(first_node) + print "node %s connecting to %s" % (last_node.name, address) + last_node.client.icmd(["iperf", "-t", str(options.time), "-c", address]) + first_node.cmd(["killall", "-9", "iperf"]) + + # shutdown session + coreemu.shutdown() + + +def main(): + options = parser.parse_options("wlan") + + start = datetime.datetime.now() + print "running wlan example: nodes(%s) time(%s)" % (options.nodes, options.time) + example(options) + print "elapsed time: %s" % (datetime.datetime.now() - start) + + +if __name__ == "__main__": + main() diff --git a/package/examples/controlnet_updown b/daemon/examples/controlnet_updown similarity index 100% rename from package/examples/controlnet_updown rename to daemon/examples/controlnet_updown diff --git a/daemon/examples/eventloop.py b/daemon/examples/eventloop.py new file mode 100644 index 00000000..7c1b0f8f --- /dev/null +++ b/daemon/examples/eventloop.py @@ -0,0 +1,44 @@ +import logging +import time + +from core.misc.event import EventLoop + + +def main(): + loop = EventLoop() + + def msg(arg): + delta = time.time() - loop.start + logging.debug("%s arg: %s", delta, arg) + + def repeat(interval, count): + count -= 1 + msg("repeat: interval: %s; remaining: %s" % (interval, count)) + if count > 0: + loop.add_event(interval, repeat, interval, count) + + def sleep(delay): + msg("sleep %s" % delay) + time.sleep(delay) + msg("sleep done") + + def stop(arg): + msg(arg) + loop.stop() + + loop.add_event(0, msg, "start") + loop.add_event(0, msg, "time zero") + + for delay in 5, 4, 10, -1, 0, 9, 3, 7, 3.14: + loop.add_event(delay, msg, "time %s" % delay) + + loop.run() + + loop.add_event(0, repeat, 1, 5) + loop.add_event(12, sleep, 10) + + loop.add_event(15.75, stop, "stop time: 15.75") + + +if __name__ == "__main__": + main() diff --git a/daemon/core/configservices/nrlservices/__init__.py b/daemon/examples/grpc/__init__.py similarity index 100% rename from daemon/core/configservices/nrlservices/__init__.py rename to daemon/examples/grpc/__init__.py diff --git a/daemon/examples/grpc/switch.py b/daemon/examples/grpc/switch.py new file mode 100644 index 00000000..354e7b66 --- /dev/null +++ b/daemon/examples/grpc/switch.py @@ -0,0 +1,60 @@ +import logging + +from core.grpc import client +from core.grpc import core_pb2 + + +def log_event(event): + logging.info("event: %s", event) + + +def main(): + core = client.CoreGrpcClient() + + with core.context_connect(): + # create session + session = core.create_session() + logging.info("created session: %s", session) + + # handle events session may broadcast + core.exception_events(session.id, log_event) + core.node_events(session.id, log_event) + core.session_events(session.id, log_event) + core.link_events(session.id, log_event) + core.file_events(session.id, log_event) + core.config_events(session.id, log_event) + + # change session state + response = core.set_session_state(session.id, core_pb2.STATE_CONFIGURATION) + logging.info("set session state: %s", response) + + # create switch node + switch = core_pb2.Node(type=core_pb2.NODE_SWITCH) + response = core.add_node(session.id, switch) + logging.info("created switch: %s", response) + switch_id = response.id + + # helper to create interfaces + interface_helper = client.InterfaceHelper(ip4_prefix="10.83.0.0/16") + + for i in xrange(2): + # create node + position = core_pb2.Position(x=50 + 50 * i, y=50) + node = core_pb2.Node(position=position) + response = core.add_node(session.id, node) + logging.info("created node: %s", response) + node_id = response.id + + # create link + interface_one = interface_helper.create_interface(node_id, 0) + response = core.add_link(session.id, node_id, switch_id, interface_one) + logging.info("created link: %s", response) + + # change session state + response = core.set_session_state(session.id, core_pb2.STATE_INSTANTIATION) + logging.info("set session state: %s", response) + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + main() diff --git a/daemon/core/configservices/quaggaservices/__init__.py b/daemon/examples/myemane/__init__.py similarity index 100% rename from daemon/core/configservices/quaggaservices/__init__.py rename to daemon/examples/myemane/__init__.py diff --git a/daemon/examples/myemane/examplemodel.py b/daemon/examples/myemane/examplemodel.py new file mode 100644 index 00000000..308b6f1d --- /dev/null +++ b/daemon/examples/myemane/examplemodel.py @@ -0,0 +1,48 @@ +from core.emane import emanemanifest +from core.emane import emanemodel + + +## Custom EMANE Model +class ExampleModel(emanemodel.EmaneModel): + ### MAC Definition + + # Defines the emane model name that will show up in the GUI. + name = "emane_example" + + # Defines that mac library that the model will reference. + mac_library = "rfpipemaclayer" + # Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed + # within the GUI. + mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml" + # Allows you to override options that are maintained within the manifest file above. + mac_defaults = { + "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml", + } + # Parses the manifest file and converts configurations into core supported formats. + mac_config = emanemanifest.parse(mac_xml, mac_defaults) + + ### PHY Definition + # **NOTE: phy configuration will default to the universal model as seen below and the below section does not + # have to be included.** + + # Defines that phy library that the model will reference, used if you need to provide a custom phy. + phy_library = None + # Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed + # within the GUI. + phy_xml = "/usr/share/emane/manifest/emanephy.xml" + # Allows you to override options that are maintained within the manifest file above or for the default universal + # model. + phy_defaults = { + "subid": "1", + "propagationmodel": "2ray", + "noisemode": "none" + } + # Parses the manifest file and converts configurations into core supported formats. + phy_config = emanemanifest.parse(phy_xml, phy_defaults) + + ### Custom override options + # **NOTE: these options default to what's seen below and do not have to be included.** + + # Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display + # within the gui. + config_ignore = set() diff --git a/daemon/examples/myservices/README.txt b/daemon/examples/myservices/README.txt new file mode 100644 index 00000000..0f92f698 --- /dev/null +++ b/daemon/examples/myservices/README.txt @@ -0,0 +1,26 @@ +This directory contains a sample custom service that you can use as a template +for creating your own services. + +Follow these steps to add your own services: + +1. Modify the sample service MyService to do what you want. It could generate + config/script files, mount per-node directories, start processes/scripts, + etc. sample.py is a Python file that defines one or more classes to be + imported. You can create multiple Python files that will be imported. + Add any new filenames to the __init__.py file. + +2. Put these files in a directory such as /home/username/.core/myservices + Note that the last component of this directory name 'myservices' should not + be named something like 'services' which conflicts with an existing Python + name (the syntax 'from myservices import *' is used). + +3. Add a 'custom_services_dir = /home/username/.core/myservices' entry to the + /etc/core/core.conf file. + +4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) + should be displayed in the /var/log/core-daemon.log log file (or on screen). + +5. Start using your custom service on your nodes. You can create a new node + type that uses your service, or change the default services for an existing + node type, or change individual nodes. + diff --git a/package/examples/myservices/__init__.py b/daemon/examples/myservices/__init__.py similarity index 100% rename from package/examples/myservices/__init__.py rename to daemon/examples/myservices/__init__.py diff --git a/daemon/examples/myservices/sample.py b/daemon/examples/myservices/sample.py new file mode 100644 index 00000000..843e59ef --- /dev/null +++ b/daemon/examples/myservices/sample.py @@ -0,0 +1,85 @@ +""" +Sample user-defined service. +""" + +from core.service import CoreService +from core.service import ServiceMode + + +## Custom CORE Service +class MyService(CoreService): + ### Service Attributes + + # Name used as a unique ID for this service and is required, no spaces. + name = "MyService" + # Allows you to group services within the GUI under a common name. + group = "Utility" + # Executables this service depends on to function, if executable is not on the path, service will not be loaded. + executables = () + # Services that this service depends on for startup, tuple of service names. + dependencies = () + # Directories that this service will create within a node. + dirs = () + # Files that this service will generate, without a full path this file goes in the node's directory. + # e.g. /tmp/pycore.12345/n1.conf/myfile + configs = ("myservice1.sh", "myservice2.sh") + # Commands used to start this service, any non-zero exit code will cause a failure. + startup = ("sh %s" % configs[0], "sh %s" % configs[1]) + # Commands used to validate that a service was started, any non-zero exit code will cause a failure. + validate = () + # Validation mode, used to determine startup success. + # + # * NON_BLOCKING - runs startup commands, and validates success with validation commands + # * BLOCKING - runs startup commands, and validates success with the startup commands themselves + # * TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone + validation_mode = ServiceMode.NON_BLOCKING + # Time in seconds for a service to wait for validation, before determining success in TIMER/NON_BLOCKING modes. + validation_timer = 5 + # Period in seconds to wait before retrying validation, only used in NON_BLOCKING mode. + validation_period = 0.5 + # Shutdown commands to stop this service. + shutdown = () + + ### On Load + @classmethod + def on_load(cls): + # Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate + # dynamic settings for the environment. + pass + + ### Get Configs + @classmethod + def get_configs(cls, node): + # Provides a way to dynamically generate the config files from the node a service will run. + # Defaults to the class definition and can be left out entirely if not needed. + return cls.configs + + ### Generate Config + @classmethod + def generate_config(cls, node, filename): + # Returns a string representation for a file, given the node the service is starting on the config filename + # that this information will be used for. This must be defined, if "configs" are defined. + cfg = "#!/bin/sh\n" + + if filename == cls.configs[0]: + cfg += "# auto-generated by MyService (sample.py)\n" + for ifc in node.netifs(): + cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name) + elif filename == cls.configs[1]: + cfg += "echo hello" + + return cfg + + ### Get Startup + @classmethod + def get_startup(cls, node): + # Provides a way to dynamically generate the startup commands from the node a service will run. + # Defaults to the class definition and can be left out entirely if not needed. + return cls.startup + + ### Get Validate + @classmethod + def get_validate(cls, node): + # Provides a way to dynamically generate the validate commands from the node a service will run. + # Defaults to the class definition and can be left out entirely if not needed. + return cls.validate diff --git a/daemon/examples/netns/daemonnodes.py b/daemon/examples/netns/daemonnodes.py new file mode 100755 index 00000000..3fe1d599 --- /dev/null +++ b/daemon/examples/netns/daemonnodes.py @@ -0,0 +1,189 @@ +#!/usr/bin/python -i + +# Copyright (c)2010-2013 the Boeing Company. +# See the LICENSE file included in this distribution. + +# A distributed example where CORE API messaging is used to create a session +# on a daemon server. The daemon server defaults to 127.0.0.1:4038 +# to target a remote machine specify "-d " parameter, it needs to be +# running the daemon with listenaddr=0.0.0.0 in the core.conf file. +# This script creates no nodes locally and therefore can be run as an +# unprivileged user. + +import datetime +import optparse +import sys + +from core.api import coreapi +from core.api import dataconversion +from core.api.coreapi import CoreExecuteTlv +from core.enumerations import CORE_API_PORT +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.misc import ipaddress +from core.netns import nodes + +# declare classes for use with Broker + +from core.session import Session + +# node list (count from 1) +n = [None] +exec_num = 1 + + +def cmd(node, exec_cmd): + """ + :param node: The node the command should be issued too + :param exec_cmd: A string with the command to be run + :return: Returns the result of the command + """ + global exec_num + + # Set up the command api message + tlvdata = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.objid) + tlvdata += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, exec_num) + tlvdata += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd) + msg = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlvdata) + node.session.broker.handlerawmsg(msg) + exec_num += 1 + + # Now wait for the response + server = node.session.broker.servers["localhost"] + server.sock.settimeout(50.0) + + # receive messages until we get our execute response + result = None + while True: + msghdr = server.sock.recv(coreapi.CoreMessage.header_len) + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) + msgdata = server.sock.recv(msglen) + + # If we get the right response return the results + print "received response message: %s" % MessageTypes(msgtype) + if msgtype == MessageTypes.EXECUTE.value: + msg = coreapi.CoreExecMessage(msgflags, msghdr, msgdata) + result = msg.get_tlv(ExecuteTlvs.RESULT.value) + break + + return result + + +def main(): + usagestr = "usage: %prog [-n] number of nodes [-d] daemon address" + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5, daemon="127.0.0.1:" + str(CORE_API_PORT)) + + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-d", "--daemon-server", dest="daemon", type=str, + help="daemon server IP address") + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + # parse command line options + (options, args) = parser.parse_args() + + if options.numnodes < 1: + usage("invalid number of nodes: %s" % options.numnodes) + if not options.daemon: + usage("daemon server IP address (-d) is a required argument") + + for a in args: + sys.stderr.write("ignoring command line argument: %s\n" % a) + + start = datetime.datetime.now() + + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1) + if "server" in globals(): + server.addsession(session) + + # distributed setup - connect to daemon server + daemonport = options.daemon.split(":") + daemonip = daemonport[0] + + # Localhost is already set in the session but we change it to be the remote daemon + # This stops the remote daemon trying to build a tunnel back which would fail + daemon = "localhost" + if len(daemonport) > 1: + port = int(daemonport[1]) + else: + port = CORE_API_PORT + print "connecting to daemon at %s:%d" % (daemon, port) + session.broker.addserver(daemon, daemonip, port) + + # Set the local session id to match the port. + # Not necessary but seems neater. + session.broker.setupserver(daemon) + + # We do not want the recvloop running as we will deal ourselves + session.broker.dorecvloop = False + + # Change to configuration state on both machines + session.set_state(EventTypes.CONFIGURATION_STATE) + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value) + session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata)) + + flags = MessageFlags.ADD.value + switch = nodes.SwitchNode(session=session, name="switch", start=False) + switch.setposition(x=80, y=50) + switch.server = daemon + switch_data = switch.data(flags) + switch_message = dataconversion.convert_node(switch_data) + session.broker.handlerawmsg(switch_message) + + number_of_nodes = options.numnodes + + print "creating %d remote nodes with addresses from %s" % (options.numnodes, prefix) + + # create remote nodes via API + for i in xrange(1, number_of_nodes + 1): + node = nodes.CoreNode(session=session, objid=i, name="n%d" % i, start=False) + node.setposition(x=150 * i, y=150) + node.server = daemon + node_data = node.data(flags) + node_message = dataconversion.convert_node(node_data) + session.broker.handlerawmsg(node_message) + n.append(node) + + # create remote links via API + for i in xrange(1, number_of_nodes + 1): + tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.objid) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen) + msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) + session.broker.handlerawmsg(msg) + + # We change the daemon to Instantiation state + # We do not change the local session as it would try and build a tunnel and fail + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value) + msg = coreapi.CoreEventMessage.pack(0, tlvdata) + session.broker.handlerawmsg(msg) + + # Get the ip or last node and ping it from the first + print "Pinging from the first to the last node" + pingip = cmd(n[-1], "ip -4 -o addr show dev eth0").split()[3].split("/")[0] + print cmd(n[1], "ping -c 5 " + pingip) + + print "elapsed time: %s" % (datetime.datetime.now() - start) + + print "To stop this session, use the core-cleanup script on the remote daemon server." + raw_input("press enter to exit") + + +if __name__ == "__main__" or __name__ == "__builtin__": + main() diff --git a/daemon/examples/netns/distributed.py b/daemon/examples/netns/distributed.py new file mode 100755 index 00000000..b84450af --- /dev/null +++ b/daemon/examples/netns/distributed.py @@ -0,0 +1,127 @@ +#!/usr/bin/python -i + +# Copyright (c)2010-2013 the Boeing Company. +# See the LICENSE file included in this distribution. + +# A distributed example where CORE API messaging is used to create a session +# distributed across the local server and one slave server. The slave server +# must be specified using the '-s ' parameter, and needs to be +# running the daemon with listenaddr=0.0.0.0 in the core.conf file. +# + +import datetime +import optparse +import sys + +from core import constants +from core.api import coreapi, dataconversion +from core.enumerations import CORE_API_PORT, EventTypes, EventTlvs, LinkTlvs, LinkTypes, MessageFlags +from core.misc import ipaddress +from core.netns import nodes +from core.session import Session + +# node list (count from 1) +n = [None] + + +def main(): + usagestr = "usage: %prog [-h] [options] [args]" + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=5, slave=None) + + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-s", "--slave-server", dest="slave", type=str, + help="slave server IP address") + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + # parse command line options + (options, args) = parser.parse_args() + + if options.numnodes < 1: + usage("invalid number of nodes: %s" % options.numnodes) + if not options.slave: + usage("slave server IP address (-s) is a required argument") + + for a in args: + sys.stderr.write("ignoring command line argument: '%s'\n" % a) + + start = datetime.datetime.now() + + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + session = Session(1) + if 'server' in globals(): + server.addsession(session) + + # distributed setup - connect to slave server + slaveport = options.slave.split(':') + slave = slaveport[0] + if len(slaveport) > 1: + port = int(slaveport[1]) + else: + port = CORE_API_PORT + print "connecting to slave at %s:%d" % (slave, port) + session.broker.addserver(slave, slave, port) + session.broker.setupserver(slave) + session.set_state(EventTypes.CONFIGURATION_STATE) + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value) + session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata)) + + switch = session.add_object(cls=nodes.SwitchNode, name="switch") + switch.setposition(x=80, y=50) + num_local = options.numnodes / 2 + num_remote = options.numnodes / 2 + options.numnodes % 2 + print "creating %d (%d local / %d remote) nodes with addresses from %s" % \ + (options.numnodes, num_local, num_remote, prefix) + for i in xrange(1, num_local + 1): + node = session.add_object(cls=nodes.CoreNode, name="n%d" % i, objid=i) + node.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) + node.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + node.setposition(x=150 * i, y=150) + n.append(node) + + flags = MessageFlags.ADD.value + session.broker.handlerawmsg(switch.tonodemsg(flags=flags)) + + # create remote nodes via API + for i in xrange(num_local + 1, options.numnodes + 1): + node = nodes.CoreNode(session=session, objid=i, name="n%d" % i, start=False) + node.setposition(x=150 * i, y=150) + node.server = slave + n.append(node) + node_data = node.data(flags) + node_message = dataconversion.convert_node(node_data) + session.broker.handlerawmsg(node_message) + + # create remote links via API + for i in xrange(num_local + 1, options.numnodes + 1): + tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.objid) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i)) + tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen) + msg = coreapi.CoreLinkMessage.pack(flags, tlvdata) + session.broker.handlerawmsg(msg) + + session.instantiate() + tlvdata = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value) + msg = coreapi.CoreEventMessage.pack(0, tlvdata) + session.broker.handlerawmsg(msg) + + # start a shell on node 1 + n[1].client.term("bash") + + print "elapsed time: %s" % (datetime.datetime.now() - start) + print "To stop this session, use the 'core-cleanup' script on this server" + print "and on the remote slave server." + + +if __name__ == "__main__" or __name__ == "__builtin__": + main() diff --git a/daemon/examples/netns/howmanynodes.py b/daemon/examples/netns/howmanynodes.py new file mode 100755 index 00000000..851baeb6 --- /dev/null +++ b/daemon/examples/netns/howmanynodes.py @@ -0,0 +1,207 @@ +#!/usr/bin/python + +# Copyright (c)2010-2012 the Boeing Company. +# See the LICENSE file included in this distribution. +# +# author: Jeff Ahrenholz +# + +""" +howmanynodes.py - This is a CORE script that creates network namespace nodes +having one virtual Ethernet interface connected to a bridge. It continues to +add nodes until an exception occurs. The number of nodes per bridge can be +specified. +""" + +import datetime +import optparse +import shutil +import sys +import time + +from core import constants +from core.misc import ipaddress +from core.netns import nodes +from core.session import Session + +GBD = 1024.0 * 1024.0 + + +def linuxversion(): + """ Return a string having the Linux kernel version. + """ + f = open("/proc/version", "r") + v = f.readline().split() + version_str = " ".join(v[:3]) + f.close() + return version_str + + +MEMKEYS = ("total", "free", "buff", "cached", "stotal", "sfree") + + +def memfree(): + """ Returns kilobytes memory [total, free, buff, cached, stotal, sfree]. + useful stats are: + free memory = free + buff + cached + swap used = stotal - sfree + """ + f = open("/proc/meminfo", "r") + lines = f.readlines() + f.close() + kbs = {} + for k in MEMKEYS: + kbs[k] = 0 + for l in lines: + if l[:9] == "MemTotal:": + kbs["total"] = int(l.split()[1]) + elif l[:8] == "MemFree:": + kbs["free"] = int(l.split()[1]) + elif l[:8] == "Buffers:": + kbs["buff"] = int(l.split()[1]) + elif l[:8] == "Cached:": + kbs["cache"] = int(l.split()[1]) + elif l[:10] == "SwapTotal:": + kbs["stotal"] = int(l.split()[1]) + elif l[:9] == "SwapFree:": + kbs["sfree"] = int(l.split()[1]) + break + return kbs + + +# node list (count from 1) +nodelist = [None] +switchlist = [] + + +def main(): + usagestr = "usage: %prog [-h] [options] [args]" + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(waittime=0.2, numnodes=0, bridges=0, retries=0, + logfile=None, services=None) + + parser.add_option("-w", "--waittime", dest="waittime", type=float, + help="number of seconds to wait between node creation" \ + " (default = %s)" % parser.defaults["waittime"]) + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes (default = unlimited)") + parser.add_option("-b", "--bridges", dest="bridges", type=int, + help="number of nodes per bridge; 0 = one bridge " \ + "(def. = %s)" % parser.defaults["bridges"]) + parser.add_option("-r", "--retry", dest="retries", type=int, + help="number of retries on error (default = %s)" % \ + parser.defaults["retries"]) + parser.add_option("-l", "--log", dest="logfile", type=str, + help="log memory usage to this file (default = %s)" % \ + parser.defaults["logfile"]) + parser.add_option("-s", "--services", dest="services", type=str, + help="pipe-delimited list of services added to each " + "node (default = %s)\n(Example: zebra|OSPFv2|OSPFv3|" + "IPForward)" % parser.defaults["services"]) + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + options, args = parser.parse_args() + + for a in args: + sys.stderr.write("ignoring command line argument: %s\n" % a) + + start = datetime.datetime.now() + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + + print "Testing how many network namespace nodes this machine can create." + print " - %s" % linuxversion() + mem = memfree() + print " - %.02f GB total memory (%.02f GB swap)" % (mem["total"] / GBD, mem["stotal"] / GBD) + print " - using IPv4 network prefix %s" % prefix + print " - using wait time of %s" % options.waittime + print " - using %d nodes per bridge" % options.bridges + print " - will retry %d times on failure" % options.retries + print " - adding these services to each node: %s" % options.services + print " " + + lfp = None + if options.logfile is not None: + # initialize a csv log file header + lfp = open(options.logfile, "a") + lfp.write("# log from howmanynodes.py %s\n" % time.ctime()) + lfp.write("# options = %s\n#\n" % options) + lfp.write("# numnodes,%s\n" % ",".join(MEMKEYS)) + lfp.flush() + + session = Session(1) + switch = session.add_object(cls=nodes.SwitchNode) + switchlist.append(switch) + print "Added bridge %s (%d)." % (switch.brname, len(switchlist)) + + i = 0 + retry_count = options.retries + while True: + i += 1 + # optionally add a bridge (options.bridges nodes per bridge) + try: + if 0 < options.bridges <= switch.numnetif(): + switch = session.add_object(cls=nodes.SwitchNode) + switchlist.append(switch) + print "\nAdded bridge %s (%d) for node %d." % (switch.brname, len(switchlist), i) + except Exception, e: + print "At %d bridges (%d nodes) caught exception:\n%s\n" % (len(switchlist), i - 1, e) + break + + # create a node + try: + n = session.add_object(cls=nodes.LxcNode, name="n%d" % i) + n.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) + n.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"]) + if options.services is not None: + session.services.add_services(n, "", options.services.split("|")) + session.services.boot_services(n) + nodelist.append(n) + if i % 25 == 0: + print "\n%s nodes created " % i, + mem = memfree() + free = mem["free"] + mem["buff"] + mem["cached"] + swap = mem["stotal"] - mem["sfree"] + print "(%.02f/%.02f GB free/swap)" % (free / GBD, swap / GBD), + if lfp: + lfp.write("%d," % i) + lfp.write("%s\n" % ",".join(str(mem[x]) for x in MEMKEYS)) + lfp.flush() + else: + sys.stdout.write(".") + sys.stdout.flush() + time.sleep(options.waittime) + except Exception, e: + print "At %d nodes caught exception:\n" % i, e + if retry_count > 0: + print "\nWill retry creating node %d." % i + shutil.rmtree(n.nodedir, ignore_errors=True) + retry_count -= 1 + i -= 1 + time.sleep(options.waittime) + continue + else: + print "Stopping at %d nodes!" % i + break + + if i == options.numnodes: + print "Stopping at %d nodes due to numnodes option." % i + break + # node creation was successful at this point + retry_count = options.retries + + if lfp: + lfp.flush() + lfp.close() + + print "elapsed time: %s" % (datetime.datetime.now() - start) + print "Use the core-cleanup script to remove nodes and bridges." + + +if __name__ == "__main__": + main() diff --git a/daemon/examples/netns/ospfmanetmdrtest.py b/daemon/examples/netns/ospfmanetmdrtest.py new file mode 100755 index 00000000..7d0800a7 --- /dev/null +++ b/daemon/examples/netns/ospfmanetmdrtest.py @@ -0,0 +1,604 @@ +#!/usr/bin/python + +# Copyright (c)2011-2014 the Boeing Company. +# See the LICENSE file included in this distribution. + +# create a random topology running OSPFv3 MDR, wait and then check +# that all neighbor states are either full or two-way, and check the routes +# in zebra vs those installed in the kernel. + +import datetime +import optparse +import os +import random +import sys +import time +from string import Template + +from core.constants import QUAGGA_STATE_DIR + +from core.misc import ipaddress +from core.misc.utils import check_cmd +from core.netns import nodes + +# this is the /etc/core/core.conf default +from core.session import Session + +quagga_sbin_search = ("/usr/local/sbin", "/usr/sbin", "/usr/lib/quagga") +quagga_path = "zebra" + +# sanity check that zebra is installed +try: + for p in quagga_sbin_search: + if os.path.exists(os.path.join(p, "zebra")): + quagga_path = p + break + check_cmd([os.path.join(quagga_path, "zebra"), "-u", "root", "-g", "root", "-v"]) +except OSError: + sys.stderr.write("ERROR: running zebra failed\n") + sys.exit(1) + + +class ManetNode(nodes.LxcNode): + """ An Lxc namespace node configured for Quagga OSPFv3 MANET MDR + """ + conftemp = Template("""\ +interface eth0 + ip address $ipaddr + ipv6 ospf6 instance-id 65 + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 6 + ipv6 ospf6 retransmit-interval 5 + ipv6 ospf6 network manet-designated-router + ipv6 ospf6 diffhellos + ipv6 ospf6 adjacencyconnectivity biconnected + ipv6 ospf6 lsafullness mincostlsa +! +router ospf6 + router-id $routerid + interface eth0 area 0.0.0.0 +! +ip forwarding +""") + + confdir = "/usr/local/etc/quagga" + + def __init__(self, core, ipaddr, routerid=None, + objid=None, name=None, nodedir=None): + if routerid is None: + routerid = ipaddr.split("/")[0] + self.ipaddr = ipaddr + self.routerid = routerid + nodes.LxcNode.__init__(self, core, objid, name, nodedir) + self.privatedir(self.confdir) + self.privatedir(QUAGGA_STATE_DIR) + + def qconf(self): + return self.conftemp.substitute(ipaddr=self.ipaddr, + routerid=self.routerid) + + def config(self): + filename = os.path.join(self.confdir, "Quagga.conf") + f = self.opennodefile(filename, "w") + f.write(self.qconf()) + f.close() + tmp = self.bootscript() + if tmp: + self.nodefile(self.bootsh, tmp, mode=0755) + + def boot(self): + self.config() + self.session.services.boot_services(self) + + def bootscript(self): + return """\ +#!/bin/sh -e + +STATEDIR=%s + +waitfile() +{ + fname=$1 + + i=0 + until [ -e $fname ]; do + i=$(($i + 1)) + if [ $i -eq 10 ]; then + echo "file not found: $fname" >&2 + exit 1 + fi + sleep 0.1 + done +} + +mkdir -p $STATEDIR + +%s/zebra -d -u root -g root +waitfile $STATEDIR/zebra.vty + +%s/ospf6d -d -u root -g root +waitfile $STATEDIR/ospf6d.vty + +vtysh -b +""" % (QUAGGA_STATE_DIR, quagga_path, quagga_path) + + +class Route(object): + """ Helper class for organzing routing table entries. """ + + def __init__(self, prefix=None, gw=None, metric=None): + try: + self.prefix = ipaddress.Ipv4Prefix(prefix) + except Exception, e: + raise ValueError, "Invalid prefix given to Route object: %s\n%s" % \ + (prefix, e) + self.gw = gw + self.metric = metric + + def __eq__(self, other): + try: + return self.prefix == other.prefix and self.gw == other.gw and \ + self.metric == other.metric + except: + return False + + def __str__(self): + return "(%s,%s,%s)" % (self.prefix, self.gw, self.metric) + + @staticmethod + def key(r): + if not r.prefix: + return 0 + return r.prefix.prefix + + +class ManetExperiment(object): + """ A class for building an MDR network and checking and logging its state. + """ + + def __init__(self, options, start): + """ Initialize with options and start time. """ + self.session = None + # node list + self.nodes = [] + # WLAN network + self.net = None + self.verbose = options.verbose + # dict from OptionParser + self.options = options + self.start = start + self.logbegin() + + def info(self, msg): + ''' Utility method for writing output to stdout. ''' + print msg + sys.stdout.flush() + self.log(msg) + + def warn(self, msg): + ''' Utility method for writing output to stderr. ''' + print >> sys.stderr, msg + sys.stderr.flush() + self.log(msg) + + def logbegin(self): + """ Start logging. """ + self.logfp = None + if not self.options.logfile: + return + self.logfp = open(self.options.logfile, "w") + self.log("ospfmanetmdrtest begin: %s\n" % self.start.ctime()) + + def logend(self): + """ End logging. """ + if not self.logfp: + return + end = datetime.datetime.now() + self.log("ospfmanetmdrtest end: %s (%s)\n" % \ + (end.ctime(), end - self.start)) + self.logfp.flush() + self.logfp.close() + self.logfp = None + + def log(self, msg): + """ Write to the log file, if any. """ + if not self.logfp: + return + print >> self.logfp, msg + + def logdata(self, nbrs, mdrs, lsdbs, krs, zrs): + """ Dump experiment parameters and data to the log file. """ + self.log("ospfmantetmdrtest data:") + self.log("----- parameters -----") + self.log("%s" % self.options) + self.log("----- neighbors -----") + for rtrid in sorted(nbrs.keys()): + self.log("%s: %s" % (rtrid, nbrs[rtrid])) + self.log("----- mdr levels -----") + self.log(mdrs) + self.log("----- link state databases -----") + for rtrid in sorted(lsdbs.keys()): + self.log("%s lsdb:" % rtrid) + for line in lsdbs[rtrid].split("\n"): + self.log(line) + self.log("----- kernel routes -----") + for rtrid in sorted(krs.keys()): + msg = rtrid + ": " + for rt in krs[rtrid]: + msg += "%s" % rt + self.log(msg) + self.log("----- zebra routes -----") + for rtrid in sorted(zrs.keys()): + msg = rtrid + ": " + for rt in zrs[rtrid]: + msg += "%s" % rt + self.log(msg) + + def topology(self, numnodes, linkprob, verbose=False): + """ Build a topology consisting of the given number of ManetNodes + connected to a WLAN and probabilty of links and set + the session, WLAN, and node list objects. + """ + # IP subnet + prefix = ipaddress.Ipv4Prefix("10.14.0.0/16") + self.session = Session(1) + # emulated network + self.net = self.session.add_object(cls=nodes.WlanNode) + for i in xrange(1, numnodes + 1): + addr = "%s/%s" % (prefix.addr(i), 32) + tmp = self.session.add_object(cls=ManetNode, ipaddr=addr, objid="%d" % i, name="n%d" % i) + tmp.newnetif(self.net, [addr]) + self.nodes.append(tmp) + # connect nodes with probability linkprob + for i in xrange(numnodes): + for j in xrange(i + 1, numnodes): + r = random.random() + if r < linkprob: + if self.verbose: + self.info("linking (%d,%d)" % (i, j)) + self.net.link(self.nodes[i].netif(0), self.nodes[j].netif(0)) + # force one link to avoid partitions (should check if this is needed) + j = i + while j == i: + j = random.randint(0, numnodes - 1) + if self.verbose: + self.info("linking (%d,%d)" % (i, j)) + self.net.link(self.nodes[i].netif(0), self.nodes[j].netif(0)) + self.nodes[i].boot() + # run the boot.sh script on all nodes to start Quagga + for i in xrange(numnodes): + self.nodes[i].cmd(["./%s" % self.nodes[i].bootsh]) + + def compareroutes(self, node, kr, zr): + """ Compare two lists of Route objects. + """ + kr.sort(key=Route.key) + zr.sort(key=Route.key) + if kr != zr: + self.warn("kernel and zebra routes differ") + if self.verbose: + msg = "kernel: " + for r in kr: + msg += "%s " % r + msg += "\nzebra: " + for r in zr: + msg += "%s " % r + self.warn(msg) + else: + self.info(" kernel and zebra routes match") + + def comparemdrlevels(self, nbrs, mdrs): + """ Check that all routers form a connected dominating set, i.e. all + routers are either MDR, BMDR, or adjacent to one. + """ + msg = "All routers form a CDS" + for n in self.nodes: + if mdrs[n.routerid] != "OTHER": + continue + connected = False + for nbr in nbrs[n.routerid]: + if mdrs[nbr] == "MDR" or mdrs[nbr] == "BMDR": + connected = True + break + if not connected: + msg = "All routers do not form a CDS" + self.warn("XXX %s: not in CDS; neighbors: %s" % \ + (n.routerid, nbrs[n.routerid])) + if self.verbose: + self.info(msg) + + def comparelsdbs(self, lsdbs): + """ Check LSDBs for consistency. + """ + msg = "LSDBs of all routers are consistent" + prev = self.nodes[0] + for n in self.nodes: + db = lsdbs[n.routerid] + if lsdbs[prev.routerid] != db: + msg = "LSDBs of all routers are not consistent" + self.warn("XXX LSDBs inconsistent for %s and %s" % \ + (n.routerid, prev.routerid)) + i = 0 + for entry in lsdbs[n.routerid].split("\n"): + preventries = lsdbs[prev.routerid].split("\n") + try: + preventry = preventries[i] + except IndexError: + preventry = None + if entry != preventry: + self.warn("%s: %s" % (n.routerid, entry)) + self.warn("%s: %s" % (prev.routerid, preventry)) + i += 1 + prev = n + if self.verbose: + self.info(msg) + + def checknodes(self): + """ Check the neighbor state and routing tables of all nodes. """ + nbrs = {} + mdrs = {} + lsdbs = {} + krs = {} + zrs = {} + v = self.verbose + for n in self.nodes: + self.info("checking %s" % n.name) + nbrs[n.routerid] = Ospf6NeighState(n, verbose=v).run() + krs[n.routerid] = KernelRoutes(n, verbose=v).run() + zrs[n.routerid] = ZebraRoutes(n, verbose=v).run() + self.compareroutes(n, krs[n.routerid], zrs[n.routerid]) + mdrs[n.routerid] = Ospf6MdrLevel(n, verbose=v).run() + lsdbs[n.routerid] = Ospf6Database(n, verbose=v).run() + self.comparemdrlevels(nbrs, mdrs) + self.comparelsdbs(lsdbs) + self.logdata(nbrs, mdrs, lsdbs, krs, zrs) + + +class Cmd: + """ Helper class for running a command on a node and parsing the result. """ + args = "" + + def __init__(self, node, verbose=False): + """ Initialize with a CoreNode (LxcNode) """ + self.id = None + self.stdin = None + self.out = None + self.node = node + self.verbose = verbose + + def info(self, msg): + ''' Utility method for writing output to stdout.''' + print msg + sys.stdout.flush() + + def warn(self, msg): + ''' Utility method for writing output to stderr. ''' + print >> sys.stderr, "XXX %s:" % self.node.routerid, msg + sys.stderr.flush() + + def run(self): + """ This is the primary method used for running this command. """ + self.open() + r = self.parse() + self.cleanup() + return r + + def open(self): + """ Exceute call to node.popen(). """ + self.id, self.stdin, self.out, self.err = self.node.client.popen(self.args) + + def parse(self): + """ This method is overloaded by child classes and should return some + result. + """ + return None + + def cleanup(self): + """ Close the Popen channels.""" + self.stdin.close() + self.out.close() + self.err.close() + tmp = self.id.wait() + if tmp: + self.warn("nonzero exit status:", tmp) + + +class VtyshCmd(Cmd): + """ Runs a vtysh command. """ + + def open(self): + args = ("vtysh", "-c", self.args) + self.id, self.stdin, self.out, self.err = self.node.client.popen(args) + + +class Ospf6NeighState(VtyshCmd): + """ Check a node for OSPFv3 neighbors in the full/two-way states. """ + args = "show ipv6 ospf6 neighbor" + + def parse(self): + # skip first line + self.out.readline() + nbrlist = [] + for line in self.out: + field = line.split() + nbr = field[0] + state = field[3].split("/")[0] + if not state.lower() in ("full", "twoway"): + self.warn("neighbor %s state: %s" % (nbr, state)) + nbrlist.append(nbr) + + if len(nbrlist) == 0: + self.warn("no neighbors") + if self.verbose: + self.info(" %s has %d neighbors" % (self.node.routerid, len(nbrlist))) + return nbrlist + + +class Ospf6MdrLevel(VtyshCmd): + """ Retrieve the OSPFv3 MDR level for a node. """ + args = "show ipv6 ospf6 mdrlevel" + + def parse(self): + line = self.out.readline() + # TODO: handle multiple interfaces + field = line.split() + mdrlevel = field[4] + if not mdrlevel in ("MDR", "BMDR", "OTHER"): + self.warn("mdrlevel: %s" % mdrlevel) + if self.verbose: + self.info(" %s is %s" % (self.node.routerid, mdrlevel)) + return mdrlevel + + +class Ospf6Database(VtyshCmd): + """ Retrieve the OSPFv3 LSDB summary for a node. """ + args = "show ipv6 ospf6 database" + + def parse(self): + db = "" + for line in self.out: + field = line.split() + if len(field) < 8: + continue + # filter out Age and Duration columns + filtered = field[:3] + field[4:7] + db += " ".join(filtered) + "\n" + return db + + +class ZebraRoutes(VtyshCmd): + """ Return a list of Route objects for a node based on its zebra + routing table. + """ + args = "show ip route" + + def parse(self): + for i in xrange(0, 3): + # skip first three lines + self.out.readline() + r = [] + prefix = None + for line in self.out: + field = line.split() + if len(field) < 1: + continue + # only use OSPFv3 selected FIB routes + elif field[0][:2] == "o>": + prefix = field[1] + metric = field[2].split("/")[1][:-1] + if field[0][2:] != "*": + continue + if field[3] == "via": + gw = field[4][:-1] + else: + gw = field[6][:-1] + r.append(Route(prefix, gw, metric)) + prefix = None + elif prefix and field[0] == "*": + # already have prefix and metric from previous line + gw = field[2][:-1] + r.append(Route(prefix, gw, metric)) + prefix = None + + if len(r) == 0: + self.warn("no zebra routes") + if self.verbose: + self.info(" %s has %d zebra routes" % (self.node.routerid, len(r))) + return r + + +class KernelRoutes(Cmd): + """ Return a list of Route objects for a node based on its kernel + routing table. + """ + args = ("/sbin/ip", "route", "show") + + def parse(self): + r = [] + prefix = None + for line in self.out: + field = line.split() + if field[0] == "nexthop": + if not prefix: + # this saves only the first nexthop entry if multiple exist + continue + else: + prefix = field[0] + metric = field[-1] + tmp = prefix.split("/") + if len(tmp) < 2: + prefix += "/32" + if field[1] == "proto": + # nexthop entry is on the next line + continue + # nexthop IP or interface + gw = field[2] + r.append(Route(prefix, gw, metric)) + prefix = None + + if len(r) == 0: + self.warn("no kernel routes") + if self.verbose: + self.info(" %s has %d kernel routes" % (self.node.routerid, len(r))) + return r + + +def main(): + usagestr = "usage: %prog [-h] [options] [args]" + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=10, linkprob=0.35, delay=20, seed=None) + + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-p", "--linkprob", dest="linkprob", type=float, + help="link probabilty") + parser.add_option("-d", "--delay", dest="delay", type=float, + help="wait time before checking") + parser.add_option("-s", "--seed", dest="seed", type=int, + help="specify integer to use for random seed") + parser.add_option("-v", "--verbose", dest="verbose", + action="store_true", help="be more verbose") + parser.add_option("-l", "--logfile", dest="logfile", type=str, + help="log detailed output to the specified file") + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + # parse command line options + (options, args) = parser.parse_args() + + if options.numnodes < 2: + usage("invalid numnodes: %s" % options.numnodes) + if options.linkprob <= 0.0 or options.linkprob > 1.0: + usage("invalid linkprob: %s" % options.linkprob) + if options.delay < 0.0: + usage("invalid delay: %s" % options.delay) + + for a in args: + sys.stderr.write("ignoring command line argument: '%s'\n" % a) + + if options.seed: + random.seed(options.seed) + + me = ManetExperiment(options=options, start=datetime.datetime.now()) + me.info("creating topology: numnodes = %s; linkprob = %s" % \ + (options.numnodes, options.linkprob)) + me.topology(options.numnodes, options.linkprob) + + me.info("waiting %s sec" % options.delay) + time.sleep(options.delay) + me.info("checking neighbor state and routes") + me.checknodes() + me.info("done") + me.info("elapsed time: %s" % (datetime.datetime.now() - me.start)) + me.logend() + + return me + + +if __name__ == "__main__": + me = main() diff --git a/daemon/examples/netns/wlanemanetests.py b/daemon/examples/netns/wlanemanetests.py new file mode 100755 index 00000000..9befcc23 --- /dev/null +++ b/daemon/examples/netns/wlanemanetests.py @@ -0,0 +1,847 @@ +#!/usr/bin/python + +# Copyright (c)2011-2014 the Boeing Company. +# See the LICENSE file included in this distribution. +# +# author: Jeff Ahrenholz +# +""" +wlanemanetests.py - This script tests the performance of the WLAN device in +CORE by measuring various metrics: + - delay experienced when pinging end-to-end + - maximum TCP throughput achieved using iperf end-to-end + - the CPU used and loss experienced when running an MGEN flow of UDP traffic + +All MANET nodes are arranged in a row, so that any given node can only +communicate with the node to its right or to its left. Performance is measured +using traffic that travels across each hop in the network. Static /32 routing +is used instead of any dynamic routing protocol. + +Various underlying network types are tested: + - bridged (the CORE default, uses ebtables) + - bridged with netem (add link effects to the bridge using tc queues) + - EMANE bypass - the bypass model just forwards traffic + - EMANE RF-PIPE - the bandwidth (bitrate) is set very high / no restrictions + - EMANE RF-PIPE - bandwidth is set similar to netem case + - EMANE RF-PIPE - default connectivity is off and pathloss events are + generated to connect the nodes in a line + +Results are printed/logged in CSV format. + +""" + +import datetime +import math +import optparse +import os +import sys +import time + +from core import emane +from core.emane.bypass import EmaneBypassModel +from core.emane.nodes import EmaneNode +from core.emane.rfpipe import EmaneRfPipeModel +from core.misc import ipaddress +from core.netns import nodes +from core.session import Session + +try: + import emaneeventservice + import emaneeventpathloss +except Exception, e: + try: + from emanesh.events import EventService + from emanesh.events import PathlossEvent + except Exception, e2: + raise ImportError("failed to import EMANE Python bindings:\n%s\n%s" % (e, e2)) + +# global Experiment object (for interaction with "python -i") +exp = None + + +# move these to core.misc.utils +def readstat(): + f = open("/proc/stat", "r") + lines = f.readlines() + f.close() + return lines + + +def numcpus(): + lines = readstat() + n = 0 + for l in lines[1:]: + if l[:3] != "cpu": + break + n += 1 + return n + + +def getcputimes(line): + # return (user, nice, sys, idle) from a /proc/stat cpu line + # assume columns are: + # cpu# user nice sys idle iowait irq softirq steal guest (man 5 proc) + items = line.split() + (user, nice, sys, idle) = map(lambda (x): int(x), items[1:5]) + return [user, nice, sys, idle] + + +def calculatecpu(timesa, timesb): + for i in range(len(timesa)): + timesb[i] -= timesa[i] + total = sum(timesb) + if total == 0: + return 0.0 + else: + # subtract % time spent in idle time + return 100 - ((100.0 * timesb[-1]) / total) + + +# end move these to core.misc.utils + +class Cmd(object): + """ Helper class for running a command on a node and parsing the result. """ + args = "" + + def __init__(self, node, verbose=False): + """ Initialize with a CoreNode (LxcNode) """ + self.id = None + self.stdin = None + self.out = None + self.node = node + self.verbose = verbose + + def info(self, msg): + """ Utility method for writing output to stdout.""" + print msg + sys.stdout.flush() + + def warn(self, msg): + """ Utility method for writing output to stderr. """ + print >> sys.stderr, "XXX %s:" % self.node.name, msg + sys.stderr.flush() + + def run(self): + """ This is the primary method used for running this command. """ + self.open() + status = self.id.wait() + r = self.parse() + self.cleanup() + return r + + def open(self): + """ Exceute call to node.popen(). """ + self.id, self.stdin, self.out, self.err = self.node.client.popen(self.args) + + def parse(self): + """ This method is overloaded by child classes and should return some + result. + """ + return None + + def cleanup(self): + """ Close the Popen channels.""" + self.stdin.close() + self.out.close() + self.err.close() + tmp = self.id.wait() + if tmp: + self.warn("nonzero exit status:", tmp) + + +class ClientServerCmd(Cmd): + """ Helper class for running a command on a node and parsing the result. """ + args = "" + client_args = "" + + def __init__(self, node, client_node, verbose=False): + """ Initialize with two CoreNodes, node is the server """ + Cmd.__init__(self, node, verbose) + self.client_node = client_node + + def run(self): + """ Run the server command, then the client command, then + kill the server """ + self.open() # server + self.client_open() # client + status = self.client_id.wait() + # stop the server + self.node.cmd_output(["killall", self.args[0]]) + r = self.parse() + self.cleanup() + return r + + def client_open(self): + """ Exceute call to client_node.popen(). """ + self.client_id, self.client_stdin, self.client_out, self.client_err = \ + self.client_node.client.popen(self.client_args) + + def parse(self): + """ This method is overloaded by child classes and should return some + result. + """ + return None + + def cleanup(self): + """ Close the Popen channels.""" + self.stdin.close() + self.out.close() + self.err.close() + tmp = self.id.wait() + if tmp: + self.warn("nonzero exit status: %s" % tmp) + self.warn("command was: %s" % (self.args,)) + + +class PingCmd(Cmd): + """ Test latency using ping. + """ + + def __init__(self, node, verbose=False, addr=None, count=50, interval=0.1, ): + Cmd.__init__(self, node, verbose) + self.addr = addr + self.count = count + self.interval = interval + self.args = ["ping", "-q", "-c", "%s" % count, "-i", "%s" % interval, addr] + + def run(self): + if self.verbose: + self.info("%s initial test ping (max 1 second)..." % self.node.name) + (status, result) = self.node.cmd_output(["ping", "-q", "-c", "1", "-w", "1", self.addr]) + if status != 0: + self.warn("initial ping from %s to %s failed! result:\n%s" % + (self.node.name, self.addr, result)) + return 0.0, 0.0 + if self.verbose: + self.info("%s pinging %s (%d seconds)..." % + (self.node.name, self.addr, self.count * self.interval)) + return Cmd.run(self) + + def parse(self): + lines = self.out.readlines() + avg_latency = 0 + mdev = 0 + try: + stats_str = lines[-1].split("=")[1] + stats = stats_str.split("/") + avg_latency = float(stats[1]) + mdev = float(stats[3].split(" ")[0]) + except: + self.warn("ping parsing exception: %s" % e) + return avg_latency, mdev + + +class IperfCmd(ClientServerCmd): + """ Test throughput using iperf. + """ + + def __init__(self, node, client_node, verbose=False, addr=None, time=10): + # node is the server + ClientServerCmd.__init__(self, node, client_node, verbose) + self.addr = addr + self.time = time + # -s server, -y c CSV report output + self.args = ["iperf", "-s", "-y", "c"] + self.client_args = ["iperf", "-c", self.addr, "-t", "%s" % self.time] + + def run(self): + if self.verbose: + self.info("Launching the iperf server on %s..." % self.node.name) + self.info("Running the iperf client on %s (%s seconds)..." % \ + (self.client_node.name, self.time)) + return ClientServerCmd.run(self) + + def parse(self): + lines = self.out.readlines() + try: + bps = int(lines[-1].split(",")[-1].strip("\n")) + except Exception, e: + self.warn("iperf parsing exception: %s" % e) + bps = 0 + return bps + + +class MgenCmd(ClientServerCmd): + """ Run a test traffic flow using an MGEN sender and receiver. + """ + + def __init__(self, node, client_node, verbose=False, addr=None, time=10, + rate=512): + ClientServerCmd.__init__(self, node, client_node, verbose) + self.addr = addr + self.time = time + self.args = ["mgen", "event", "listen udp 5000", "output", + "/var/log/mgen.log"] + self.rate = rate + sendevent = "ON 1 UDP DST %s/5000 PERIODIC [%s]" % \ + (addr, self.mgenrate(self.rate)) + stopevent = "%s OFF 1" % time + self.client_args = ["mgen", "event", sendevent, "event", stopevent, + "output", "/var/log/mgen.log"] + + @staticmethod + def mgenrate(kbps): + """ Return a MGEN periodic rate string for the given kilobits-per-sec. + Assume 1500 byte MTU, 20-byte IP + 8-byte UDP headers, leaving + 1472 bytes for data. + """ + bps = (kbps / 8) * 1000.0 + maxdata = 1472 + pps = math.ceil(bps / maxdata) + return "%s %s" % (pps, maxdata) + + def run(self): + if self.verbose: + self.info("Launching the MGEN receiver on %s..." % self.node.name) + self.info("Running the MGEN sender on %s (%s seconds)..." % \ + (self.client_node.name, self.time)) + return ClientServerCmd.run(self) + + def cleanup(self): + """ Close the Popen channels.""" + self.stdin.close() + self.out.close() + self.err.close() + # non-zero mgen exit status OK + tmp = self.id.wait() + + def parse(self): + """ Check MGEN receiver"s log file for packet sequence numbers, and + return the percentage of lost packets. + """ + logfile = os.path.join(self.node.nodedir, "var.log/mgen.log") + f = open(logfile, "r") + numlost = 0 + lastseq = 0 + for line in f.readlines(): + fields = line.split() + if fields[1] != "RECV": + continue + try: + seq = int(fields[4].split(">")[1]) + except: + self.info("Unexpected MGEN line:\n%s" % fields) + if seq > (lastseq + 1): + numlost += seq - (lastseq + 1) + lastseq = seq + f.close() + if lastseq > 0: + loss = 100.0 * numlost / lastseq + else: + loss = 0 + if self.verbose: + self.info("Receiver log shows %d of %d packets lost" % \ + (numlost, lastseq)) + return loss + + +class Experiment(object): + """ Experiment object to organize tests. + """ + + def __init__(self, opt, start): + """ Initialize with opt and start time. """ + self.session = None + # node list + self.nodes = [] + # WLAN network + self.net = None + self.verbose = opt.verbose + # dict from OptionParser + self.opt = opt + self.start = start + self.numping = opt.numping + self.numiperf = opt.numiperf + self.nummgen = opt.nummgen + self.logbegin() + + def info(self, msg): + """ Utility method for writing output to stdout. """ + print msg + sys.stdout.flush() + self.log(msg) + + def warn(self, msg): + """ Utility method for writing output to stderr. """ + print >> sys.stderr, msg + sys.stderr.flush() + self.log(msg) + + def logbegin(self): + """ Start logging. """ + self.logfp = None + if not self.opt.logfile: + return + self.logfp = open(self.opt.logfile, "w") + self.log("%s begin: %s\n" % (sys.argv[0], self.start.ctime())) + self.log("%s args: %s\n" % (sys.argv[0], sys.argv[1:])) + (sysname, rel, ver, machine, nodename) = os.uname() + self.log("%s %s %s %s on %s" % (sysname, rel, ver, machine, nodename)) + + def logend(self): + """ End logging. """ + if not self.logfp: + return + end = datetime.datetime.now() + self.log("%s end: %s (%s)\n" % \ + (sys.argv[0], end.ctime(), end - self.start)) + self.logfp.flush() + self.logfp.close() + self.logfp = None + + def log(self, msg): + """ Write to the log file, if any. """ + if not self.logfp: + return + print >> self.logfp, msg + + def reset(self): + """ Prepare for another experiment run. + """ + if self.session: + self.session.shutdown() + del self.session + self.session = None + self.nodes = [] + self.net = None + + def createbridgedsession(self, numnodes, verbose=False): + """ Build a topology consisting of the given number of LxcNodes + connected to a WLAN. + """ + # IP subnet + prefix = ipaddress.Ipv4Prefix("10.0.0.0/16") + self.session = Session(1) + # emulated network + self.net = self.session.add_object(cls=nodes.WlanNode, name="wlan1") + prev = None + for i in xrange(1, numnodes + 1): + addr = "%s/%s" % (prefix.addr(i), 32) + tmp = self.session.add_object(cls=nodes.CoreNode, objid=i, name="n%d" % i) + tmp.newnetif(self.net, [addr]) + self.nodes.append(tmp) + self.session.services.add_services(tmp, "router", "IPForward") + self.session.services.boot_services(tmp) + self.staticroutes(i, prefix, numnodes) + + # link each node in a chain, with the previous node + if prev: + self.net.link(prev.netif(0), tmp.netif(0)) + prev = tmp + + def createemanesession(self, numnodes, verbose=False, cls=None, values=None): + """ Build a topology consisting of the given number of LxcNodes + connected to an EMANE WLAN. + """ + prefix = ipaddress.Ipv4Prefix("10.0.0.0/16") + self.session = Session(2) + self.session.node_count = str(numnodes + 1) + self.session.master = True + self.session.location.setrefgeo(47.57917, -122.13232, 2.00000) + self.session.location.refscale = 150.0 + self.session.emane.loadmodels() + self.net = self.session.add_object(cls=EmaneNode, objid=numnodes + 1, name="wlan1") + self.net.verbose = verbose + # self.session.emane.addobj(self.net) + for i in xrange(1, numnodes + 1): + addr = "%s/%s" % (prefix.addr(i), 32) + tmp = self.session.add_object(cls=nodes.CoreNode, objid=i, + name="n%d" % i) + # tmp.setposition(i * 20, 50, None) + tmp.setposition(50, 50, None) + tmp.newnetif(self.net, [addr]) + self.nodes.append(tmp) + self.session.services.add_services(tmp, "router", "IPForward") + + if values is None: + values = cls.getdefaultvalues() + self.session.emane.setconfig(self.net.objid, cls.name, values) + self.session.instantiate() + + self.info("waiting %s sec (TAP bring-up)" % 2) + time.sleep(2) + + for i in xrange(1, numnodes + 1): + tmp = self.nodes[i - 1] + self.session.services.boot_services(tmp) + self.staticroutes(i, prefix, numnodes) + + def setnodes(self): + """ Set the sender and receiver nodes for use in this experiment, + along with the address of the receiver to be used. + """ + self.firstnode = self.nodes[0] + self.lastnode = self.nodes[-1] + self.lastaddr = self.lastnode.netif(0).addrlist[0].split("/")[0] + + def staticroutes(self, i, prefix, numnodes): + """ Add static routes on node number i to the other nodes in the chain. + """ + routecmd = ["/sbin/ip", "route", "add"] + node = self.nodes[i - 1] + neigh_left = "" + neigh_right = "" + # add direct interface routes first + if i > 1: + neigh_left = "%s" % prefix.addr(i - 1) + cmd = routecmd + [neigh_left, "dev", node.netif(0).name] + (status, result) = node.cmd_output(cmd) + if status != 0: + self.warn("failed to add interface route: %s" % cmd) + if i < numnodes: + neigh_right = "%s" % prefix.addr(i + 1) + cmd = routecmd + [neigh_right, "dev", node.netif(0).name] + (status, result) = node.cmd_output(cmd) + if status != 0: + self.warn("failed to add interface route: %s" % cmd) + + # add static routes to all other nodes via left/right neighbors + for j in xrange(1, numnodes + 1): + if abs(j - i) < 2: + continue + addr = "%s" % prefix.addr(j) + if j < i: + gw = neigh_left + else: + gw = neigh_right + cmd = routecmd + [addr, "via", gw] + (status, result) = node.cmd_output(cmd) + if status != 0: + self.warn("failed to add route: %s" % cmd) + + def setpathloss(self, numnodes): + """ Send EMANE pathloss events to connect all NEMs in a chain. + """ + if self.session.emane.version < self.session.emane.EMANE091: + service = emaneeventservice.EventService() + e = emaneeventpathloss.EventPathloss(1) + old = True + else: + if self.session.emane.version == self.session.emane.EMANE091: + dev = "lo" + else: + dev = self.session.obj("ctrlnet").brname + service = EventService(eventchannel=("224.1.2.8", 45703, dev), + otachannel=None) + old = False + + for i in xrange(1, numnodes + 1): + rxnem = i + # inform rxnem that it can hear node to the left with 10dB noise + txnem = rxnem - 1 + if txnem > 0: + if old: + e.set(0, txnem, 10.0, 10.0) + service.publish(emaneeventpathloss.EVENT_ID, + emaneeventservice.PLATFORMID_ANY, rxnem, + emaneeventservice.COMPONENTID_ANY, e.export()) + else: + e = PathlossEvent() + e.append(txnem, forward=10.0, reverse=10.0) + service.publish(rxnem, e) + # inform rxnem that it can hear node to the right with 10dB noise + txnem = rxnem + 1 + if txnem > numnodes: + continue + if old: + e.set(0, txnem, 10.0, 10.0) + service.publish(emaneeventpathloss.EVENT_ID, + emaneeventservice.PLATFORMID_ANY, rxnem, + emaneeventservice.COMPONENTID_ANY, e.export()) + else: + e = PathlossEvent() + e.append(txnem, forward=10.0, reverse=10.0) + service.publish(rxnem, e) + + def setneteffects(self, bw=None, delay=None): + """ Set link effects for all interfaces attached to the network node. + """ + if not self.net: + self.warn("failed to set effects: no network node") + return + for netif in self.net.netifs(): + self.net.linkconfig(netif, bw=bw, delay=delay) + + def runalltests(self, title=""): + """ Convenience helper to run all defined experiment tests. + If tests are run multiple times, this returns the average of + those runs. + """ + duration = self.opt.duration + rate = self.opt.rate + if len(title) > 0: + self.info("----- running %s tests (duration=%s, rate=%s) -----" % \ + (title, duration, rate)) + (latency, mdev, throughput, cpu, loss) = (0, 0, 0, 0, 0) + + self.info("number of runs: ping=%d, iperf=%d, mgen=%d" % \ + (self.numping, self.numiperf, self.nummgen)) + + if self.numping > 0: + (latency, mdev) = self.pingtest(count=self.numping) + + if self.numiperf > 0: + throughputs = [] + for i in range(1, self.numiperf + 1): + throughput = self.iperftest(time=duration) + if self.numiperf > 1: + throughputs += throughput + # iperf is very CPU intensive + time.sleep(1) + if self.numiperf > 1: + throughput = sum(throughputs) / len(throughputs) + self.info("throughputs=%s" % ["%.2f" % v for v in throughputs]) + + if self.nummgen > 0: + cpus = [] + losses = [] + for i in range(1, self.nummgen + 1): + (cpu, loss) = self.cputest(time=duration, rate=rate) + if self.nummgen > 1: + cpus += cpu, + losses += loss, + if self.nummgen > 1: + cpu = sum(cpus) / len(cpus) + loss = sum(losses) / len(losses) + self.info("cpus=%s" % ["%.2f" % v for v in cpus]) + self.info("losses=%s" % ["%.2f" % v for v in losses]) + + return latency, mdev, throughput, cpu, loss + + def pingtest(self, count=50): + """ Ping through a chain of nodes and report the average latency. + """ + p = PingCmd(node=self.firstnode, verbose=self.verbose, + addr=self.lastaddr, count=count, interval=0.1).run() + (latency, mdev) = p + self.info("latency (ms): %.03f, %.03f" % (latency, mdev)) + return p + + def iperftest(self, time=10): + """ Run iperf through a chain of nodes and report the maximum + throughput. + """ + bps = IperfCmd(node=self.lastnode, client_node=self.firstnode, + verbose=False, addr=self.lastaddr, time=time).run() + self.info("throughput (bps): %s" % bps) + return bps + + def cputest(self, time=10, rate=512): + """ Run MGEN through a chain of nodes and report the CPU usage and + percent of lost packets. Rate is in kbps. + """ + if self.verbose: + self.info("%s initial test ping (max 1 second)..." % \ + self.firstnode.name) + (status, result) = self.firstnode.cmd_output(["ping", "-q", "-c", "1", + "-w", "1", self.lastaddr]) + if status != 0: + self.warn("initial ping from %s to %s failed! result:\n%s" % \ + (self.firstnode.name, self.lastaddr, result)) + return 0.0, 0.0 + lines = readstat() + cpustart = getcputimes(lines[0]) + loss = MgenCmd(node=self.lastnode, client_node=self.firstnode, + verbose=False, addr=self.lastaddr, + time=time, rate=rate).run() + lines = readstat() + cpuend = getcputimes(lines[0]) + percent = calculatecpu(cpustart, cpuend) + self.info("CPU usage (%%): %.02f, %.02f loss" % (percent, loss)) + return percent, loss + + +def main(): + """ Main routine when running from command-line. + """ + usagestr = "usage: %prog [-h] [options] [args]" + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(numnodes=10, delay=3, duration=10, rate=512, + verbose=False, + numping=50, numiperf=1, nummgen=1) + + parser.add_option("-d", "--delay", dest="delay", type=float, + help="wait time before testing") + parser.add_option("-l", "--logfile", dest="logfile", type=str, + help="log detailed output to the specified file") + parser.add_option("-n", "--numnodes", dest="numnodes", type=int, + help="number of nodes") + parser.add_option("-r", "--rate", dest="rate", type=float, + help="kbps rate to use for MGEN CPU tests") + parser.add_option("--numping", dest="numping", type=int, + help="number of ping latency test runs") + parser.add_option("--numiperf", dest="numiperf", type=int, + help="number of iperf throughput test runs") + parser.add_option("--nummgen", dest="nummgen", type=int, + help="number of MGEN CPU tests runs") + parser.add_option("-t", "--time", dest="duration", type=int, + help="duration in seconds of throughput and CPU tests") + parser.add_option("-v", "--verbose", dest="verbose", + action="store_true", help="be more verbose") + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + # parse command line opt + (opt, args) = parser.parse_args() + + if opt.numnodes < 2: + usage("invalid numnodes: %s" % opt.numnodes) + if opt.delay < 0.0: + usage("invalid delay: %s" % opt.delay) + if opt.rate < 0.0: + usage("invalid rate: %s" % opt.rate) + + for a in args: + sys.stderr.write("ignoring command line argument: %s\n" % a) + + results = {} + starttime = datetime.datetime.now() + exp = Experiment(opt=opt, start=starttime) + exp.info("Starting wlanemanetests.py tests %s" % starttime.ctime()) + + # bridged + exp.info("setting up bridged tests 1/2 no link effects") + exp.info("creating topology: numnodes = %s" % (opt.numnodes,)) + exp.createbridgedsession(numnodes=opt.numnodes, verbose=opt.verbose) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["0 bridged"] = exp.runalltests("bridged") + exp.info("done; elapsed time: %s" % (datetime.datetime.now() - exp.start)) + + # bridged with netem + exp.info("setting up bridged tests 2/2 with netem") + exp.setneteffects(bw=54000000, delay=0) + exp.info("waiting %s sec (queue bring-up)" % opt.delay) + results["1.0 netem"] = exp.runalltests("netem") + exp.info("shutting down bridged session") + + # bridged with netem (1 Mbps,200ms) + exp.info("setting up bridged tests 3/2 with netem") + exp.setneteffects(bw=1000000, delay=20000) + exp.info("waiting %s sec (queue bring-up)" % opt.delay) + results["1.2 netem_1M"] = exp.runalltests("netem_1M") + exp.info("shutting down bridged session") + + # bridged with netem (54 kbps,500ms) + exp.info("setting up bridged tests 3/2 with netem") + exp.setneteffects(bw=54000, delay=100000) + exp.info("waiting %s sec (queue bring-up)" % opt.delay) + results["1.4 netem_54K"] = exp.runalltests("netem_54K") + exp.info("shutting down bridged session") + exp.reset() + + # EMANE bypass model + exp.info("setting up EMANE tests 1/2 with bypass model") + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, cls=EmaneBypassModel, values=None) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["2.0 bypass"] = exp.runalltests("bypass") + exp.info("shutting down bypass session") + exp.reset() + + exp.info("waiting %s sec (between EMANE tests)" % opt.delay) + time.sleep(opt.delay) + + # EMANE RF-PIPE model: no restrictions (max datarate) + exp.info("setting up EMANE tests 2/4 with RF-PIPE model") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + # max value + rfpipevals[rfpnames.index("datarate")] = "4294967295" + if emanever < emane.EMANE091: + rfpipevals[rfpnames.index("pathlossmode")] = "2ray" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "1" + else: + rfpipevals[rfpnames.index("propagationmodel")] = "2ray" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["3.0 rfpipe"] = exp.runalltests("rfpipe") + exp.info("shutting down RF-PIPE session") + exp.reset() + + # EMANE RF-PIPE model: 54M datarate + exp.info("setting up EMANE tests 3/4 with RF-PIPE model 54M") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + rfpipevals[rfpnames.index("datarate")] = "54000000" + # TX delay != propagation delay + # rfpipevals[ rfpnames.index("delay") ] = "5000" + if emanever < emane.EMANE091: + rfpipevals[rfpnames.index("pathlossmode")] = "2ray" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "1" + else: + rfpipevals[rfpnames.index("propagationmodel")] = "2ray" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, + cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + results["4.0 rfpipe54m"] = exp.runalltests("rfpipe54m") + exp.info("shutting down RF-PIPE session") + exp.reset() + + # EMANE RF-PIPE model: 54K datarate + exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + rfpipevals[rfpnames.index("datarate")] = "54000" + if emanever < emane.EMANE091: + rfpipevals[rfpnames.index("pathlossmode")] = "pathloss" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "0" + else: + rfpipevals[rfpnames.index("propagationmodel")] = "precomputed" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, + cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + exp.info("sending pathloss events to govern connectivity") + exp.setpathloss(opt.numnodes) + results["5.0 pathloss"] = exp.runalltests("pathloss") + exp.info("shutting down RF-PIPE session") + exp.reset() + + # EMANE RF-PIPE model (512K, 200ms) + exp.info("setting up EMANE tests 4/4 with RF-PIPE model pathloss") + rfpipevals = list(EmaneRfPipeModel.getdefaultvalues()) + rfpnames = EmaneRfPipeModel.getnames() + rfpipevals[rfpnames.index("datarate")] = "512000" + rfpipevals[rfpnames.index("delay")] = "200" + rfpipevals[rfpnames.index("pathlossmode")] = "pathloss" + rfpipevals[rfpnames.index("defaultconnectivitymode")] = "0" + exp.createemanesession(numnodes=opt.numnodes, verbose=opt.verbose, + cls=EmaneRfPipeModel, values=rfpipevals) + exp.setnodes() + exp.info("waiting %s sec (node/route bring-up)" % opt.delay) + time.sleep(opt.delay) + exp.info("sending pathloss events to govern connectivity") + exp.setpathloss(opt.numnodes) + results["5.1 pathloss"] = exp.runalltests("pathloss") + exp.info("shutting down RF-PIPE session") + exp.reset() + + # summary of results in CSV format + exp.info("----- summary of results (%s nodes, rate=%s, duration=%s) -----" \ + % (opt.numnodes, opt.rate, opt.duration)) + exp.info("netname:latency,mdev,throughput,cpu,loss") + + for test in sorted(results.keys()): + (latency, mdev, throughput, cpu, loss) = results[test] + exp.info("%s:%.03f,%.03f,%d,%.02f,%.02f" % \ + (test, latency, mdev, throughput, cpu, loss)) + + exp.logend() + return exp + + +if __name__ == "__main__": + main() diff --git a/package/examples/services/sampleFirewall b/daemon/examples/services/sampleFirewall similarity index 100% rename from package/examples/services/sampleFirewall rename to daemon/examples/services/sampleFirewall diff --git a/package/examples/services/sampleIPsec b/daemon/examples/services/sampleIPsec similarity index 100% rename from package/examples/services/sampleIPsec rename to daemon/examples/services/sampleIPsec diff --git a/package/examples/services/sampleVPNClient b/daemon/examples/services/sampleVPNClient similarity index 97% rename from package/examples/services/sampleVPNClient rename to daemon/examples/services/sampleVPNClient index addbf23e..af17ef41 100644 --- a/package/examples/services/sampleVPNClient +++ b/daemon/examples/services/sampleVPNClient @@ -13,13 +13,13 @@ keyname=client1 vpnserver="10.0.2.10" # optional next hop for adding a static route to reach the VPN server -#nexthop="10.0.1.1" +nexthop="10.0.1.1" # --------- END CUSTOMIZATION -------- # validate addresses if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then - echo "WARNING: ip validation disabled because package sipcalc not installed + echo "WARNING: ip validation disabled because package sipcalc not installed " > $PWD/vpnclient.log else if [ "$(sipcalc "$vpnserver" "$nexthop" | grep ERR)" != "" ]; then @@ -59,5 +59,5 @@ verb 4 daemon EOF ) > client.conf - + openvpn --config client.conf diff --git a/package/examples/services/sampleVPNServer b/daemon/examples/services/sampleVPNServer similarity index 95% rename from package/examples/services/sampleVPNServer rename to daemon/examples/services/sampleVPNServer index 39639d05..f94c4d7b 100644 --- a/package/examples/services/sampleVPNServer +++ b/daemon/examples/services/sampleVPNServer @@ -12,7 +12,7 @@ keydir=/etc/core/keys # the name used for a "$keyname.crt" certificate and "$keyname.key" private key. keyname=server2 -# the VPN subnet address from which the client VPN IP (for the TUN/TAP) +# the VPN subnet address from which the client VPN IP (for the TUN/TAP) # will be allocated vpnsubnet=10.0.200.0 @@ -22,18 +22,18 @@ vpnserver=10.0.2.10 # optional list of private subnets reachable behind this VPN server # each subnet and next hop is separated by a space # ", , ..." -#privatenets="10.0.11.0,10.0.10.1 10.0.12.0,10.0.10.1" +privatenets="10.0.11.0,10.0.10.1 10.0.12.0,10.0.10.1" # optional list of VPN clients, for statically assigning IP addresses to # clients; also, an optional client subnet can be specified for adding static # routes via the client # Note: VPN addresses x.x.x.0-3 are reserved # ",, ,, ..." -#vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,," +vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,," # NOTE: you may need to enable the StaticRoutes service on nodes within the # private subnet, in order to have routes back to the client. -# /sbin/ip ro add /24 via +# /sbin/ip ro add /24 via # /sbin/ip ro add /24 via # -------- END CUSTOMIZATION -------- @@ -60,7 +60,7 @@ else $vpnsubnet or $vpnserver " >> $PWD/vpnserver.log fi fi - + # create client vpn ip pool file ( cat << EOF @@ -73,7 +73,7 @@ cat << EOF # openvpn server config local $vpnserver server $vpnsubnet 255.255.255.0 -push "redirect-gateway def1" +push redirect-gateway def1 EOF )> $PWD/server.conf diff --git a/daemon/examples/stopsession.py b/daemon/examples/stopsession.py new file mode 100755 index 00000000..9cbfc2cb --- /dev/null +++ b/daemon/examples/stopsession.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# (c)2010-2012 the Boeing Company +# author: Jeff Ahrenholz +# +# List and stop CORE sessions from the command line. +# + +import optparse +import socket + +from core.api import coreapi +from core.enumerations import MessageFlags, SessionTlvs, CORE_API_PORT + + +def main(): + parser = optparse.OptionParser(usage="usage: %prog [-l] ") + parser.add_option("-l", "--list", dest="list", action="store_true", + help="list running sessions") + (options, args) = parser.parse_args() + + if options.list is True: + num = '0' + flags = MessageFlags.STRING.value + else: + num = args[0] + flags = MessageFlags.DELETE.value + tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, num) + message = coreapi.CoreSessionMessage.pack(flags, tlvdata) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect(('localhost', CORE_API_PORT)) + sock.send(message) + + # receive and print a session list + if options.list is True: + hdr = sock.recv(coreapi.CoreMessage.header_len) + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(hdr) + data = "" + if msglen: + data = sock.recv(msglen) + message = coreapi.CoreMessage(msgflags, hdr, data) + sessions = message.get_tlv(coreapi.SessionTlvs.NUMBER.value) + print "sessions:", sessions + + sock.close() + + +if __name__ == "__main__": + main() diff --git a/package/examples/tdma/schedule.xml b/daemon/examples/tdma/schedule.xml similarity index 100% rename from package/examples/tdma/schedule.xml rename to daemon/examples/tdma/schedule.xml diff --git a/daemon/poetry.lock b/daemon/poetry.lock deleted file mode 100644 index c2aae40d..00000000 --- a/daemon/poetry.lock +++ /dev/null @@ -1,1259 +0,0 @@ -[[package]] -name = "atomicwrites" -version = "1.4.1" -description = "Atomic file writes." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "attrs" -version = "22.2.0" -description = "Classes Without Boilerplate" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.extras] -cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] -dev = ["attrs[docs,tests]"] -docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] -tests = ["attrs[tests-no-zope]", "zope.interface"] -tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=0.971,<0.990)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests_no_zope = ["cloudpickle", "hypothesis", "mypy (>=0.971,<0.990)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] - -[[package]] -name = "bcrypt" -version = "4.0.1" -description = "Modern password hashing for your software and your servers" -category = "main" -optional = false -python-versions = ">=3.6" - -[package.extras] -tests = ["pytest (>=3.2.1,!=3.3.0)"] -typecheck = ["mypy"] - -[[package]] -name = "black" -version = "22.12.0" -description = "The uncompromising code formatter." -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} -typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - -[[package]] -name = "certifi" -version = "2022.12.7" -description = "Python package for providing Mozilla's CA Bundle." -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "cffi" -version = "1.15.1" -description = "Foreign Function Interface for Python calling C code." -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -pycparser = "*" - -[[package]] -name = "cfgv" -version = "3.3.1" -description = "Validate configuration and produce human readable error messages." -category = "dev" -optional = false -python-versions = ">=3.6.1" - -[[package]] -name = "click" -version = "8.1.3" -description = "Composable command line interface toolkit" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -colorama = {version = "*", markers = "platform_system == \"Windows\""} - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -category = "dev" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" - -[[package]] -name = "cryptography" -version = "39.0.1" -description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." -category = "main" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -cffi = ">=1.12" - -[package.extras] -docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"] -sdist = ["setuptools-rust (>=0.11.4)"] -ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"] -test-randomorder = ["pytest-randomly"] -tox = ["tox"] - -[[package]] -name = "distlib" -version = "0.3.6" -description = "Distribution utilities" -category = "dev" -optional = false -python-versions = "*" - -[[package]] -name = "fabric" -version = "2.7.1" -description = "High level SSH command execution" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -invoke = ">=1.3,<2.0" -paramiko = ">=2.4" -pathlib2 = "*" - -[package.extras] -pytest = ["mock (>=2.0.0,<3.0)", "pytest (>=3.2.5,<4.0)"] -testing = ["mock (>=2.0.0,<3.0)"] - -[[package]] -name = "filelock" -version = "3.9.0" -description = "A platform independent file lock." -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo (>=2022.12.7)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] -testing = ["covdefaults (>=2.2.2)", "coverage (>=7.0.1)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-timeout (>=2.1)"] - -[[package]] -name = "flake8" -version = "3.8.2" -description = "the modular source code checker: pep8 pyflakes and co" -category = "dev" -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" - -[package.dependencies] -mccabe = ">=0.6.0,<0.7.0" -pycodestyle = ">=2.6.0a1,<2.7.0" -pyflakes = ">=2.2.0,<2.3.0" - -[[package]] -name = "grpcio" -version = "1.54.2" -description = "HTTP/2-based RPC framework" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -protobuf = ["grpcio-tools (>=1.54.2)"] - -[[package]] -name = "grpcio-tools" -version = "1.54.2" -description = "Protobuf code generator for gRPC" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -grpcio = ">=1.54.2" -protobuf = ">=4.21.6,<5.0dev" -setuptools = "*" - -[[package]] -name = "identify" -version = "2.5.18" -description = "File identification library for Python" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.extras] -license = ["ukkonen"] - -[[package]] -name = "iniconfig" -version = "2.0.0" -description = "brain-dead simple config-ini parsing" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "invoke" -version = "1.7.3" -description = "Pythonic task execution" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "isort" -version = "4.3.21" -description = "A Python utility / library to sort Python imports." -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[package.extras] -pipfile = ["pipreqs", "requirementslib"] -pyproject = ["toml"] -requirements = ["pip-api", "pipreqs"] -xdg_home = ["appdirs (>=1.4.0)"] - -[[package]] -name = "lxml" -version = "4.9.1" -description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" - -[package.extras] -cssselect = ["cssselect (>=0.7)"] -html5 = ["html5lib"] -htmlsoup = ["BeautifulSoup4"] -source = ["Cython (>=0.29.7)"] - -[[package]] -name = "Mako" -version = "1.2.3" -description = "A super-fast templating language that borrows the best ideas from the existing templating languages." -category = "main" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -MarkupSafe = ">=0.9.2" - -[package.extras] -babel = ["Babel"] -lingua = ["lingua"] -testing = ["pytest"] - -[[package]] -name = "MarkupSafe" -version = "2.1.2" -description = "Safely add untrusted strings to HTML/XML markup." -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "mccabe" -version = "0.6.1" -description = "McCabe checker, plugin for flake8" -category = "dev" -optional = false -python-versions = "*" - -[[package]] -name = "mock" -version = "4.0.2" -description = "Rolling backport of unittest.mock for all Pythons" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.extras] -build = ["blurb", "twine", "wheel"] -docs = ["sphinx"] -test = ["pytest", "pytest-cov"] - -[[package]] -name = "mypy-extensions" -version = "1.0.0" -description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" -optional = false -python-versions = ">=3.5" - -[[package]] -name = "netaddr" -version = "0.7.19" -description = "A network address manipulation library for Python" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "nodeenv" -version = "1.7.0" -description = "Node.js virtual environment builder" -category = "dev" -optional = false -python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" - -[package.dependencies] -setuptools = "*" - -[[package]] -name = "packaging" -version = "23.0" -description = "Core utilities for Python packages" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "paramiko" -version = "3.0.0" -description = "SSH2 protocol library" -category = "main" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -bcrypt = ">=3.2" -cryptography = ">=3.3" -pynacl = ">=1.5" - -[package.extras] -all = ["gssapi (>=1.4.1)", "invoke (>=2.0)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] -gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] -invoke = ["invoke (>=2.0)"] - -[[package]] -name = "pathlib2" -version = "2.3.7.post1" -description = "Object-oriented filesystem paths" -category = "main" -optional = false -python-versions = "*" - -[package.dependencies] -six = "*" - -[[package]] -name = "pathspec" -version = "0.11.0" -description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "Pillow" -version = "9.4.0" -description = "Python Imaging Library (Fork)" -category = "main" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] - -[[package]] -name = "platformdirs" -version = "3.0.0" -description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] - -[[package]] -name = "pluggy" -version = "1.0.0" -description = "plugin and hook calling mechanisms for python" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.extras] -dev = ["pre-commit", "tox"] -testing = ["pytest", "pytest-benchmark"] - -[[package]] -name = "pre-commit" -version = "2.1.1" -description = "A framework for managing and maintaining multi-language pre-commit hooks." -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -cfgv = ">=2.0.0" -identify = ">=1.0.0" -nodeenv = ">=0.11.1" -pyyaml = ">=5.1" -toml = "*" -virtualenv = ">=15.2" - -[[package]] -name = "protobuf" -version = "4.21.9" -description = "" -category = "main" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" - -[[package]] -name = "pycodestyle" -version = "2.6.0" -description = "Python style guide checker" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "pycparser" -version = "2.21" -description = "C parser in Python" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "pyflakes" -version = "2.2.0" -description = "passive checker of Python programs" -category = "dev" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[[package]] -name = "PyNaCl" -version = "1.5.0" -description = "Python binding to the Networking and Cryptography (NaCl) library" -category = "main" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -cffi = ">=1.4.1" - -[package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] -tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] - -[[package]] -name = "pyproj" -version = "3.3.1" -description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" -category = "main" -optional = false -python-versions = ">=3.8" - -[package.dependencies] -certifi = "*" - -[[package]] -name = "pytest" -version = "6.2.5" -description = "pytest: simple powerful testing with Python" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} -attrs = ">=19.2.0" -colorama = {version = "*", markers = "sys_platform == \"win32\""} -iniconfig = "*" -packaging = "*" -pluggy = ">=0.12,<2.0" -py = ">=1.8.2" -toml = "*" - -[package.extras] -testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] - -[[package]] -name = "PyYAML" -version = "6.0.1" -description = "YAML parser and emitter for Python" -category = "main" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "setuptools" -version = "67.4.0" -description = "Easily download, build, install, upgrade, and uninstall Python packages" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.extras] -docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] -testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "toml" -version = "0.10.2" -description = "Python Library for Tom's Obvious, Minimal Language" -category = "dev" -optional = false -python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "tomli" -version = "2.0.1" -description = "A lil' TOML parser" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "typing-extensions" -version = "4.5.0" -description = "Backported and Experimental Type Hints for Python 3.7+" -category = "dev" -optional = false -python-versions = ">=3.7" - -[[package]] -name = "virtualenv" -version = "20.19.0" -description = "Virtual Python Environment builder" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -distlib = ">=0.3.6,<1" -filelock = ">=3.4.1,<4" -platformdirs = ">=2.4,<4" - -[package.extras] -docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"] -test = ["covdefaults (>=2.2.2)", "coverage (>=7.1)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23)", "pytest (>=7.2.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)"] - -[metadata] -lock-version = "1.1" -python-versions = "^3.9" -content-hash = "10902a50368c4381aec5a3e72a221a4c4225ae1be17ee38600f89aaee4a49c1f" - -[metadata.files] -atomicwrites = [ - {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, -] -attrs = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, -] -bcrypt = [ - {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, - {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, - {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, - {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, - {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, - {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, - {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, - {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, - {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, - {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, - {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, - {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, - {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, - {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, - {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, -] -black = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, -] -certifi = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] -cffi = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] -cfgv = [ - {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"}, - {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -colorama = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] -cryptography = [ - {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"}, - {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"}, - {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"}, - {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"}, - {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"}, - {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"}, - {file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"}, -] -distlib = [ - {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, - {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, -] -fabric = [ - {file = "fabric-2.7.1-py2.py3-none-any.whl", hash = "sha256:7610362318ef2d391cc65d4befb684393975d889ed5720f23499394ec0e136fa"}, - {file = "fabric-2.7.1.tar.gz", hash = "sha256:76f8fef59cf2061dbd849bbce4fe49bdd820884385004b0ca59136ac3db129e4"}, -] -filelock = [ - {file = "filelock-3.9.0-py3-none-any.whl", hash = "sha256:f58d535af89bb9ad5cd4df046f741f8553a418c01a7856bf0d173bbc9f6bd16d"}, - {file = "filelock-3.9.0.tar.gz", hash = "sha256:7b319f24340b51f55a2bf7a12ac0755a9b03e718311dac567a0f4f7fabd2f5de"}, -] -flake8 = [ - {file = "flake8-3.8.2-py2.py3-none-any.whl", hash = "sha256:ccaa799ef9893cebe69fdfefed76865aeaefbb94cb8545617b2298786a4de9a5"}, - {file = "flake8-3.8.2.tar.gz", hash = "sha256:c69ac1668e434d37a2d2880b3ca9aafd54b3a10a3ac1ab101d22f29e29cf8634"}, -] -grpcio = [ - {file = "grpcio-1.54.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:40e1cbf69d6741b40f750f3cccc64326f927ac6145a9914d33879e586002350c"}, - {file = "grpcio-1.54.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2288d76e4d4aa7ef3fe7a73c1c470b66ea68e7969930e746a8cd8eca6ef2a2ea"}, - {file = "grpcio-1.54.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:c0e3155fc5335ec7b3b70f15230234e529ca3607b20a562b6c75fb1b1218874c"}, - {file = "grpcio-1.54.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bf88004fe086c786dc56ef8dd6cb49c026833fdd6f42cb853008bce3f907148"}, - {file = "grpcio-1.54.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2be88c081e33f20630ac3343d8ad9f1125f32987968e9c8c75c051c9800896e8"}, - {file = "grpcio-1.54.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:33d40954199bddbb6a78f8f6f2b2082660f381cd2583ec860a6c2fa7c8400c08"}, - {file = "grpcio-1.54.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b52d00d1793d290c81ad6a27058f5224a7d5f527867e5b580742e1bd211afeee"}, - {file = "grpcio-1.54.2-cp310-cp310-win32.whl", hash = "sha256:881d058c5ccbea7cc2c92085a11947b572498a27ef37d3eef4887f499054dca8"}, - {file = "grpcio-1.54.2-cp310-cp310-win_amd64.whl", hash = "sha256:0212e2f7fdf7592e4b9d365087da30cb4d71e16a6f213120c89b4f8fb35a3ab3"}, - {file = "grpcio-1.54.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:1e623e0cf99a0ac114f091b3083a1848dbc64b0b99e181473b5a4a68d4f6f821"}, - {file = "grpcio-1.54.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:66233ccd2a9371158d96e05d082043d47dadb18cbb294dc5accfdafc2e6b02a7"}, - {file = "grpcio-1.54.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:4cb283f630624ebb16c834e5ac3d7880831b07cbe76cb08ab7a271eeaeb8943e"}, - {file = "grpcio-1.54.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a1e601ee31ef30a9e2c601d0867e236ac54c922d32ed9f727b70dd5d82600d5"}, - {file = "grpcio-1.54.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8da84bbc61a4e92af54dc96344f328e5822d574f767e9b08e1602bb5ddc254a"}, - {file = "grpcio-1.54.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5008964885e8d23313c8e5ea0d44433be9bfd7e24482574e8cc43c02c02fc796"}, - {file = "grpcio-1.54.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2f5a1f1080ccdc7cbaf1171b2cf384d852496fe81ddedeb882d42b85727f610"}, - {file = "grpcio-1.54.2-cp311-cp311-win32.whl", hash = "sha256:b74ae837368cfffeb3f6b498688a123e6b960951be4dec0e869de77e7fa0439e"}, - {file = "grpcio-1.54.2-cp311-cp311-win_amd64.whl", hash = "sha256:8cdbcbd687e576d48f7886157c95052825ca9948c0ed2afdc0134305067be88b"}, - {file = "grpcio-1.54.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:782f4f8662a2157c4190d0f99eaaebc602899e84fb1e562a944e5025929e351c"}, - {file = "grpcio-1.54.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:714242ad0afa63a2e6dabd522ae22e1d76e07060b5af2ddda5474ba4f14c2c94"}, - {file = "grpcio-1.54.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:f900ed4ad7a0f1f05d35f955e0943944d5a75f607a836958c6b8ab2a81730ef2"}, - {file = "grpcio-1.54.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96a41817d2c763b1d0b32675abeb9179aa2371c72aefdf74b2d2b99a1b92417b"}, - {file = "grpcio-1.54.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70fcac7b94f4c904152809a050164650ac81c08e62c27aa9f156ac518029ebbe"}, - {file = "grpcio-1.54.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fd6c6c29717724acf9fc1847c4515d57e4dc12762452457b9cb37461f30a81bb"}, - {file = "grpcio-1.54.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:c2392f5b5d84b71d853918687d806c1aa4308109e5ca158a16e16a6be71041eb"}, - {file = "grpcio-1.54.2-cp37-cp37m-win_amd64.whl", hash = "sha256:51630c92591d6d3fe488a7c706bd30a61594d144bac7dee20c8e1ce78294f474"}, - {file = "grpcio-1.54.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:b04202453941a63b36876a7172b45366dc0cde10d5fd7855c0f4a4e673c0357a"}, - {file = "grpcio-1.54.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:89dde0ac72a858a44a2feb8e43dc68c0c66f7857a23f806e81e1b7cc7044c9cf"}, - {file = "grpcio-1.54.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:09d4bfd84686cd36fd11fd45a0732c7628308d094b14d28ea74a81db0bce2ed3"}, - {file = "grpcio-1.54.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7fc2b4edb938c8faa4b3c3ea90ca0dd89b7565a049e8e4e11b77e60e4ed2cc05"}, - {file = "grpcio-1.54.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61f7203e2767800edee7a1e1040aaaf124a35ce0c7fe0883965c6b762defe598"}, - {file = "grpcio-1.54.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e416c8baf925b5a1aff31f7f5aecc0060b25d50cce3a5a7255dc5cf2f1d4e5eb"}, - {file = "grpcio-1.54.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dc80c9c6b608bf98066a038e0172013a49cfa9a08d53335aefefda2c64fc68f4"}, - {file = "grpcio-1.54.2-cp38-cp38-win32.whl", hash = "sha256:8d6192c37a30a115f4663592861f50e130caed33efc4eec24d92ec881c92d771"}, - {file = "grpcio-1.54.2-cp38-cp38-win_amd64.whl", hash = "sha256:46a057329938b08e5f0e12ea3d7aed3ecb20a0c34c4a324ef34e00cecdb88a12"}, - {file = "grpcio-1.54.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:2296356b5c9605b73ed6a52660b538787094dae13786ba53080595d52df13a98"}, - {file = "grpcio-1.54.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:c72956972e4b508dd39fdc7646637a791a9665b478e768ffa5f4fe42123d5de1"}, - {file = "grpcio-1.54.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:9bdbb7624d65dc0ed2ed8e954e79ab1724526f09b1efa88dcd9a1815bf28be5f"}, - {file = "grpcio-1.54.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c44e1a765b31e175c391f22e8fc73b2a2ece0e5e6ff042743d8109b5d2eff9f"}, - {file = "grpcio-1.54.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cc928cfe6c360c1df636cf7991ab96f059666ac7b40b75a769410cc6217df9c"}, - {file = "grpcio-1.54.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a08920fa1a97d4b8ee5db2f31195de4a9def1a91bc003544eb3c9e6b8977960a"}, - {file = "grpcio-1.54.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4864f99aac207e3e45c5e26c6cbb0ad82917869abc2f156283be86c05286485c"}, - {file = "grpcio-1.54.2-cp39-cp39-win32.whl", hash = "sha256:b38b3de8cff5bc70f8f9c615f51b48eff7313fc9aca354f09f81b73036e7ddfa"}, - {file = "grpcio-1.54.2-cp39-cp39-win_amd64.whl", hash = "sha256:be48496b0e00460717225e7680de57c38be1d8629dc09dadcd1b3389d70d942b"}, - {file = "grpcio-1.54.2.tar.gz", hash = "sha256:50a9f075eeda5097aa9a182bb3877fe1272875e45370368ac0ee16ab9e22d019"}, -] -grpcio-tools = [ - {file = "grpcio-tools-1.54.2.tar.gz", hash = "sha256:e11c2c2aee53f340992e8e4d6a59172cbbbd0193f1351de98c4f810a5041d5ca"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:2b96f5f17d3156058be247fd25b062b4768138665694c00b056659618b8fb418"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:11939c9a8a39bd4815c7e88cb2fee48e1948775b59dbb06de8fcae5991e84f9e"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:129de5579f95d6a55dde185f188b4cbe19d1e2f1471425431d9930c31d300d70"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4128c01cd6f5ea8f7c2db405dbfd8582cd967d36e6fa0952565436633b0e591"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5c7292dd899ad8fa09a2be96719648cee37b17909fe8c12007e3bff58ebee61"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5ef30c2dbc63c1e0a462423ca4f95001814d26ef4fe66208e53fcf220ea3b717"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4abfc1892380abe6cef381eab86f9350cbd703bfe5d834095aa66fd91c886b6d"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-win32.whl", hash = "sha256:9acf443dcf6f68fbea3b7fb519e1716e014db1a561939f5aecc4abda74e4015d"}, - {file = "grpcio_tools-1.54.2-cp310-cp310-win_amd64.whl", hash = "sha256:21b9d2dee80f3f77e4097252e7f0db89772335a7300b72ab3d2e5c280872b1db"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:7b24fbab9e7598518ce4549e066df00aab79c2bf9bedcdde23fb5ef6a3cf532f"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:7baa210c20f71a242d9ae0e02734628f6948e8bee3bf538647894af427d28800"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:e3d0e5188ff8dbaddac2ee44731d36f09c4eccd3eac7328e547862c44f75cacd"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27671c68c7e0e3c5ff9967f5500799f65a04e7b153b8ce10243c87c43199039d"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f39d8e8806b8857fb473ca6a9c7bd800b0673dfdb7283ff569af0345a222f32c"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8e4c5a48f7b2e8798ce381498ee7b9a83c65b87ae66ee5022387394e5eb51771"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f285f8ef3de422717a36bd372239ae778b8cc112ce780ca3c7fe266dadc49fb"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-win32.whl", hash = "sha256:0f952c8a5c47e9204fe8959f7e9add149e660f6579d67cf65024c32736d34caf"}, - {file = "grpcio_tools-1.54.2-cp311-cp311-win_amd64.whl", hash = "sha256:3237149beec39e897fd62cef4aa1e1cd9422d7a95661d24bd0a79200b167e730"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-linux_armv7l.whl", hash = "sha256:0ab1b323905d449298523db5d34fa5bf5fffd645bd872b25598e2f8a01f0ea39"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:7d7e6e8d62967b3f037f952620cb7381cc39a4bd31790c75fcfba56cc975d70b"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:7f4624ef2e76a3a5313c4e61a81be38bcc16b59a68a85d30758b84cd2102b161"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e543f457935ba7b763b121f1bf893974393b4d30065042f947f85a8d81081b80"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0239b929eb8b3b30b2397eef3b9abb245087754d77c3721e3be43c44796de87d"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:0de05c7698c655e9a240dc34ae91d6017b93143ac89e5b20046d7ca3bd09c27c"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a3ce0b98fb581c471424d2cda45120f57658ed97677c6fec4d6decf5d7c1b976"}, - {file = "grpcio_tools-1.54.2-cp37-cp37m-win_amd64.whl", hash = "sha256:37393ef90674964175923afe3859fc5a208e1ece565f642b4f76a8c0224a0993"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-linux_armv7l.whl", hash = "sha256:8e4531267736d88fde1022b36dd42ed8163e3575bcbd12bfed96662872aa93fe"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:a0b7049814442f918b522d66b1d015286afbeb9e6d141af54bbfafe31710a3c8"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:b80585e06c4f0082327eb5c9ad96fbdb2b0e7c14971ea5099fe78c22f4608451"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39fd530cfdf58dc05125775cc233b05554d553d27478f14ae5fd8a6306f0cb28"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bb9ec4aea0f2b3006fb002fa59e5c10f92b48fc374619fbffd14d2b0e388c3e"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d512de051342a576bb89777476d13c5266d9334cf4badb6468aed9dc8f5bdec1"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1b8ee3099c51ce987fa8a08e6b93fc342b10228415dd96b5c0caa0387f636a6f"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-win32.whl", hash = "sha256:6037f123905dc0141f7c8383ca616ef0195e79cd3b4d82faaee789d4045e891b"}, - {file = "grpcio_tools-1.54.2-cp38-cp38-win_amd64.whl", hash = "sha256:10dd41862f579d185c60f629b5ee89103e216f63b576079d258d974d980bad87"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-linux_armv7l.whl", hash = "sha256:f6787d07fdab31a32c433c1ba34883dea6559d8a3fbe08fb93d834ca34136b71"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:21b1467e31e44429d2a78b50135c9cdbd4b8f6d3b5cd548bc98985d3bdc352d0"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:30a49b8b168aced2a4ff40959e6c4383ad6cfd7a20839a47a215e9837eb722dc"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8742122782953d2fd038f0a199f047a24e941cc9718b1aac90876dbdb7167739"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:503ef1351c62fb1d6747eaf74932b609d8fdd4345b3591ef910adef8fa9969d0"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:72d15de4c4b6a764a76c4ae69d99c35f7a0751223688c3f7e62dfa95eb4f61be"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:df079479fb1b9e488334312e35ebbf30cbf5ecad6c56599f1a961800b33ab7c1"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-win32.whl", hash = "sha256:49c2846dcc4803476e839d8bd4db8845e928f19130e0ea86121f2d1f43d2b452"}, - {file = "grpcio_tools-1.54.2-cp39-cp39-win_amd64.whl", hash = "sha256:b82ca472db9c914c44e39a41e9e8bd3ed724523dd7aff5ce37592b8d16920ed9"}, -] -identify = [ - {file = "identify-2.5.18-py2.py3-none-any.whl", hash = "sha256:93aac7ecf2f6abf879b8f29a8002d3c6de7086b8c28d88e1ad15045a15ab63f9"}, - {file = "identify-2.5.18.tar.gz", hash = "sha256:89e144fa560cc4cffb6ef2ab5e9fb18ed9f9b3cb054384bab4b95c12f6c309fe"}, -] -iniconfig = [ - {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, - {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, -] -invoke = [ - {file = "invoke-1.7.3-py3-none-any.whl", hash = "sha256:d9694a865764dd3fd91f25f7e9a97fb41666e822bbb00e670091e3f43933574d"}, - {file = "invoke-1.7.3.tar.gz", hash = "sha256:41b428342d466a82135d5ab37119685a989713742be46e42a3a399d685579314"}, -] -isort = [ - {file = "isort-4.3.21-py2.py3-none-any.whl", hash = "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd"}, - {file = "isort-4.3.21.tar.gz", hash = "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1"}, -] -lxml = [ - {file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"}, - {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"}, - {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"}, - {file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"}, - {file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"}, - {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"}, - {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"}, - {file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"}, - {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"}, - {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"}, - {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"}, - {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"}, - {file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"}, - {file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"}, - {file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"}, - {file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"}, - {file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"}, - {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"}, - {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"}, - {file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"}, - {file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"}, - {file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"}, - {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"}, - {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"}, - {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"}, - {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"}, - {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"}, - {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"}, - {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"}, - {file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"}, - {file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"}, - {file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"}, - {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"}, - {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"}, - {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"}, - {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"}, - {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"}, - {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"}, - {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"}, - {file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"}, - {file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"}, - {file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"}, - {file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"}, - {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"}, - {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"}, - {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"}, - {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"}, - {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"}, - {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"}, - {file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"}, - {file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"}, - {file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"}, - {file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"}, - {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"}, - {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"}, - {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"}, - {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"}, - {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"}, - {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"}, - {file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"}, - {file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"}, - {file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"}, - {file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"}, - {file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"}, - {file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"}, - {file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"}, - {file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"}, - {file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"}, - {file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"}, - {file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"}, -] -Mako = [ - {file = "Mako-1.2.3-py3-none-any.whl", hash = "sha256:c413a086e38cd885088d5e165305ee8eed04e8b3f8f62df343480da0a385735f"}, - {file = "Mako-1.2.3.tar.gz", hash = "sha256:7fde96466fcfeedb0eed94f187f20b23d85e4cb41444be0e542e2c8c65c396cd"}, -] -MarkupSafe = [ - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, - {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, - {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, - {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, - {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, - {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, - {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, -] -mccabe = [ - {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"}, - {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"}, -] -mock = [ - {file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"}, - {file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"}, -] -mypy-extensions = [ - {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, - {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, -] -netaddr = [ - {file = "netaddr-0.7.19-py2.py3-none-any.whl", hash = "sha256:56b3558bd71f3f6999e4c52e349f38660e54a7a8a9943335f73dfc96883e08ca"}, - {file = "netaddr-0.7.19.tar.gz", hash = "sha256:38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"}, -] -nodeenv = [ - {file = "nodeenv-1.7.0-py2.py3-none-any.whl", hash = "sha256:27083a7b96a25f2f5e1d8cb4b6317ee8aeda3bdd121394e5ac54e498028a042e"}, - {file = "nodeenv-1.7.0.tar.gz", hash = "sha256:e0e7f7dfb85fc5394c6fe1e8fa98131a2473e04311a45afb6508f7cf1836fa2b"}, -] -packaging = [ - {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, - {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, -] -paramiko = [ - {file = "paramiko-3.0.0-py3-none-any.whl", hash = "sha256:6bef55b882c9d130f8015b9a26f4bd93f710e90fe7478b9dcc810304e79b3cd8"}, - {file = "paramiko-3.0.0.tar.gz", hash = "sha256:fedc9b1dd43bc1d45f67f1ceca10bc336605427a46dcdf8dec6bfea3edf57965"}, -] -pathlib2 = [ - {file = "pathlib2-2.3.7.post1-py2.py3-none-any.whl", hash = "sha256:5266a0fd000452f1b3467d782f079a4343c63aaa119221fbdc4e39577489ca5b"}, - {file = "pathlib2-2.3.7.post1.tar.gz", hash = "sha256:9fe0edad898b83c0c3e199c842b27ed216645d2e177757b2dd67384d4113c641"}, -] -pathspec = [ - {file = "pathspec-0.11.0-py3-none-any.whl", hash = "sha256:3a66eb970cbac598f9e5ccb5b2cf58930cd8e3ed86d393d541eaf2d8b1705229"}, - {file = "pathspec-0.11.0.tar.gz", hash = "sha256:64d338d4e0914e91c1792321e6907b5a593f1ab1851de7fc269557a21b30ebbc"}, -] -Pillow = [ - {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, - {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, - {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, - {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, - {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, - {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, - {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, - {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, - {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, - {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, - {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, - {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, - {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, - {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, - {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, - {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, - {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, - {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, - {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, - {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, - {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, - {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, - {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, - {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, - {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, - {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, -] -platformdirs = [ - {file = "platformdirs-3.0.0-py3-none-any.whl", hash = "sha256:b1d5eb14f221506f50d6604a561f4c5786d9e80355219694a1b244bcd96f4567"}, - {file = "platformdirs-3.0.0.tar.gz", hash = "sha256:8a1228abb1ef82d788f74139988b137e78692984ec7b08eaa6c65f1723af28f9"}, -] -pluggy = [ - {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, - {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, -] -pre-commit = [ - {file = "pre_commit-2.1.1-py2.py3-none-any.whl", hash = "sha256:09ebe467f43ce24377f8c2f200fe3cd2570d328eb2ce0568c8e96ce19da45fa6"}, - {file = "pre_commit-2.1.1.tar.gz", hash = "sha256:f8d555e31e2051892c7f7b3ad9f620bd2c09271d87e9eedb2ad831737d6211eb"}, -] -protobuf = [ - {file = "protobuf-4.21.9-cp310-abi3-win32.whl", hash = "sha256:6e0be9f09bf9b6cf497b27425487706fa48c6d1632ddd94dab1a5fe11a422392"}, - {file = "protobuf-4.21.9-cp310-abi3-win_amd64.whl", hash = "sha256:a7d0ea43949d45b836234f4ebb5ba0b22e7432d065394b532cdca8f98415e3cf"}, - {file = "protobuf-4.21.9-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b5ab0b8918c136345ff045d4b3d5f719b505b7c8af45092d7f45e304f55e50a1"}, - {file = "protobuf-4.21.9-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:2c9c2ed7466ad565f18668aa4731c535511c5d9a40c6da39524bccf43e441719"}, - {file = "protobuf-4.21.9-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:e575c57dc8b5b2b2caa436c16d44ef6981f2235eb7179bfc847557886376d740"}, - {file = "protobuf-4.21.9-cp37-cp37m-win32.whl", hash = "sha256:9227c14010acd9ae7702d6467b4625b6fe853175a6b150e539b21d2b2f2b409c"}, - {file = "protobuf-4.21.9-cp37-cp37m-win_amd64.whl", hash = "sha256:a419cc95fca8694804709b8c4f2326266d29659b126a93befe210f5bbc772536"}, - {file = "protobuf-4.21.9-cp38-cp38-win32.whl", hash = "sha256:5b0834e61fb38f34ba8840d7dcb2e5a2f03de0c714e0293b3963b79db26de8ce"}, - {file = "protobuf-4.21.9-cp38-cp38-win_amd64.whl", hash = "sha256:84ea107016244dfc1eecae7684f7ce13c788b9a644cd3fca5b77871366556444"}, - {file = "protobuf-4.21.9-cp39-cp39-win32.whl", hash = "sha256:f9eae277dd240ae19bb06ff4e2346e771252b0e619421965504bd1b1bba7c5fa"}, - {file = "protobuf-4.21.9-cp39-cp39-win_amd64.whl", hash = "sha256:6e312e280fbe3c74ea9e080d9e6080b636798b5e3939242298b591064470b06b"}, - {file = "protobuf-4.21.9-py2.py3-none-any.whl", hash = "sha256:7eb8f2cc41a34e9c956c256e3ac766cf4e1a4c9c925dc757a41a01be3e852965"}, - {file = "protobuf-4.21.9-py3-none-any.whl", hash = "sha256:48e2cd6b88c6ed3d5877a3ea40df79d08374088e89bedc32557348848dff250b"}, - {file = "protobuf-4.21.9.tar.gz", hash = "sha256:61f21493d96d2a77f9ca84fefa105872550ab5ef71d21c458eb80edcf4885a99"}, -] -py = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] -pycodestyle = [ - {file = "pycodestyle-2.6.0-py2.py3-none-any.whl", hash = "sha256:2295e7b2f6b5bd100585ebcb1f616591b652db8a741695b3d8f5d28bdc934367"}, - {file = "pycodestyle-2.6.0.tar.gz", hash = "sha256:c58a7d2815e0e8d7972bf1803331fb0152f867bd89adf8a01dfd55085434192e"}, -] -pycparser = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] -pyflakes = [ - {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, - {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, -] -PyNaCl = [ - {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, - {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, -] -pyproj = [ - {file = "pyproj-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:473961faef7a9fd723c5d432f65220ea6ab3854e606bf84b4d409a75a4261c78"}, - {file = "pyproj-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07c9d8d7ec009bbac09e233cfc725601586fe06880e5538a3a44eaf560ba3a62"}, - {file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fef9c1e339f25c57f6ae0558b5ab1bbdf7994529a30d8d7504fc6302ea51c03"}, - {file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:140fa649fedd04f680a39f8ad339799a55cb1c49f6a84e1b32b97e49646647aa"}, - {file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b59c08aea13ee428cf8a919212d55c036cc94784805ed77c8f31a4d1f541058c"}, - {file = "pyproj-3.3.1-cp310-cp310-win32.whl", hash = "sha256:1adc9ccd1bf04998493b6a2e87e60656c75ab790653b36cfe351e9ef214828ed"}, - {file = "pyproj-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:42eea10afc750fccd1c5c4ba56de29ab791ab4d83c1f7db72705566282ac5396"}, - {file = "pyproj-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:531ea36519fa7b581466d4b6ab32f66ae4dadd9499d726352f71ee5e19c3d1c5"}, - {file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67025e37598a6bbed2c9c6c9e4c911f6dd39315d3e1148ead935a5c4d64309d5"}, - {file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aed1a3c0cd4182425f91b48d5db39f459bc2fe0d88017ead6425a1bc85faee33"}, - {file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cc4771403db54494e1e55bca8e6d33cde322f8cf0ed39f1557ff109c66d2cd1"}, - {file = "pyproj-3.3.1-cp38-cp38-win32.whl", hash = "sha256:c99f7b5757a28040a2dd4a28c9805fdf13eef79a796f4a566ab5cb362d10630d"}, - {file = "pyproj-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:5dac03d4338a4c8bd0f69144c527474f517b4cbd7d2d8c532cd8937799723248"}, - {file = "pyproj-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:56b0f9ee2c5b2520b18db30a393a7b86130cf527ddbb8c96e7f3c837474a9d79"}, - {file = "pyproj-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1032e5dfb50eae06382bcc7b9011b994f7104d932fe91bd83a722275e30e8ce"}, - {file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f92d8f6514516124abb714dce912b20867831162cfff9fae2678ef07b6fcf0f"}, - {file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ef1bfbe2dcc558c7a98e2f1836abdcd630390f3160724a6f4f5c818b2be0ad5"}, - {file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ca5f32b56210429b367ca4f9a57ffe67975c487af82e179a24370879a3daf68"}, - {file = "pyproj-3.3.1-cp39-cp39-win32.whl", hash = "sha256:aba199704c824fb84ab64927e7bc9ef71e603e483130ec0f7e09e97259b8f61f"}, - {file = "pyproj-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:120d45ed73144c65e9677dc73ba8a531c495d179dd9f9f0471ac5acc02d7ac4b"}, - {file = "pyproj-3.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:52efb681647dfac185cc655a709bc0caaf910031a0390f816f5fc8ce150cbedc"}, - {file = "pyproj-3.3.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ab0d6e38fda7c13726afacaf62e9f9dd858089d67910471758afd9cb24e0ecd"}, - {file = "pyproj-3.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45487942c19c5a8b09c91964ea3201f4e094518e34743cae373889a36e3d9260"}, - {file = "pyproj-3.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:797ad5655d484feac14b0fbb4a4efeaac0cf780a223046e2465494c767fd1c3b"}, - {file = "pyproj-3.3.1.tar.gz", hash = "sha256:b3d8e14d91cc95fb3dbc03a9d0588ac58326803eefa5bbb0978d109de3304fbe"}, -] -pytest = [ - {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, - {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, -] -PyYAML = [ - {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, - {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"}, - {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"}, - {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"}, - {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"}, - {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"}, - {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"}, - {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"}, - {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"}, - {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"}, - {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"}, - {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"}, - {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"}, - {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"}, - {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"}, - {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"}, - {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"}, - {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"}, - {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"}, - {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"}, - {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"}, - {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"}, - {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, -] -setuptools = [ - {file = "setuptools-67.4.0-py3-none-any.whl", hash = "sha256:f106dee1b506dee5102cc3f3e9e68137bbad6d47b616be7991714b0c62204251"}, - {file = "setuptools-67.4.0.tar.gz", hash = "sha256:e5fd0a713141a4a105412233c63dc4e17ba0090c8e8334594ac790ec97792330"}, -] -six = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] -toml = [ - {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, - {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, -] -tomli = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, -] -typing-extensions = [ - {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, - {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, -] -virtualenv = [ - {file = "virtualenv-20.19.0-py3-none-any.whl", hash = "sha256:54eb59e7352b573aa04d53f80fc9736ed0ad5143af445a1e539aada6eb947dd1"}, - {file = "virtualenv-20.19.0.tar.gz", hash = "sha256:37a640ba82ed40b226599c522d411e4be5edb339a0c0de030c0dc7b646d61590"}, -] diff --git a/daemon/proto/Makefile.am b/daemon/proto/Makefile.am index af535c1e..bcf62a2c 100644 --- a/daemon/proto/Makefile.am +++ b/daemon/proto/Makefile.am @@ -1,6 +1,5 @@ all: - $(PYTHON) -m grpc_tools.protoc -I . --python_out=.. core/api/grpc/*.proto - $(PYTHON) -m grpc_tools.protoc -I . --grpc_python_out=.. core/api/grpc/core.proto + $(PYTHON) -m grpc_tools.protoc -I . --python_out=../core/grpc --grpc_python_out=../core/grpc core.proto clean: - -rm -f ../core/api/grpc/*_pb2* + -rm -f ../core/grpc/core_pb2* diff --git a/daemon/proto/core.proto b/daemon/proto/core.proto new file mode 100644 index 00000000..45fd597e --- /dev/null +++ b/daemon/proto/core.proto @@ -0,0 +1,778 @@ +syntax = "proto3"; + +package core; + +service CoreApi { + // session rpc + rpc CreateSession (CreateSessionRequest) returns (CreateSessionResponse) { + } + rpc DeleteSession (DeleteSessionRequest) returns (DeleteSessionResponse) { + } + rpc GetSessions (GetSessionsRequest) returns (GetSessionsResponse) { + } + rpc GetSession (GetSessionRequest) returns (GetSessionResponse) { + } + rpc GetSessionOptions (GetSessionOptionsRequest) returns (GetSessionOptionsResponse) { + } + rpc SetSessionOptions (SetSessionOptionsRequest) returns (SetSessionOptionsResponse) { + } + rpc GetSessionLocation (GetSessionLocationRequest) returns (GetSessionLocationResponse) { + } + rpc SetSessionLocation (SetSessionLocationRequest) returns (SetSessionLocationResponse) { + } + rpc SetSessionState (SetSessionStateRequest) returns (SetSessionStateResponse) { + } + + // event streams + rpc NodeEvents (NodeEventsRequest) returns (stream NodeEvent) { + } + rpc LinkEvents (LinkEventsRequest) returns (stream LinkEvent) { + } + rpc SessionEvents (SessionEventsRequest) returns (stream SessionEvent) { + } + rpc ConfigEvents (ConfigEventsRequest) returns (stream ConfigEvent) { + } + rpc ExceptionEvents (ExceptionEventsRequest) returns (stream ExceptionEvent) { + } + rpc FileEvents (FileEventsRequest) returns (stream FileEvent) { + } + + // node rpc + rpc AddNode (AddNodeRequest) returns (AddNodeResponse) { + } + rpc GetNode (GetNodeRequest) returns (GetNodeResponse) { + } + rpc EditNode (EditNodeRequest) returns (EditNodeResponse) { + } + rpc DeleteNode (DeleteNodeRequest) returns (DeleteNodeResponse) { + } + + // link rpc + rpc GetNodeLinks (GetNodeLinksRequest) returns (GetNodeLinksResponse) { + } + rpc AddLink (AddLinkRequest) returns (AddLinkResponse) { + } + rpc EditLink (EditLinkRequest) returns (EditLinkResponse) { + } + rpc DeleteLink (DeleteLinkRequest) returns (DeleteLinkResponse) { + } + + // hook rpc + rpc GetHooks (GetHooksRequest) returns (GetHooksResponse) { + } + rpc AddHook (AddHookRequest) returns (AddHookResponse) { + } + + // mobility rpc + rpc GetMobilityConfigs (GetMobilityConfigsRequest) returns (GetMobilityConfigsResponse) { + } + rpc GetMobilityConfig (GetMobilityConfigRequest) returns (GetMobilityConfigResponse) { + } + rpc SetMobilityConfig (SetMobilityConfigRequest) returns (SetMobilityConfigResponse) { + } + rpc MobilityAction (MobilityActionRequest) returns (MobilityActionResponse) { + } + + // service rpc + rpc GetServices (GetServicesRequest) returns (GetServicesResponse) { + } + rpc GetServiceDefaults (GetServiceDefaultsRequest) returns (GetServiceDefaultsResponse) { + } + rpc SetServiceDefaults (SetServiceDefaultsRequest) returns (SetServiceDefaultsResponse) { + } + rpc GetNodeService (GetNodeServiceRequest) returns (GetNodeServiceResponse) { + } + rpc GetNodeServiceFile (GetNodeServiceFileRequest) returns (GetNodeServiceFileResponse) { + } + rpc SetNodeService (SetNodeServiceRequest) returns (SetNodeServiceResponse) { + } + rpc SetNodeServiceFile (SetNodeServiceFileRequest) returns (SetNodeServiceFileResponse) { + } + rpc ServiceAction (ServiceActionRequest) returns (ServiceActionResponse) { + } + + // wlan rpc + rpc GetWlanConfig (GetWlanConfigRequest) returns (GetWlanConfigResponse) { + } + rpc SetWlanConfig (SetWlanConfigRequest) returns (SetWlanConfigResponse) { + } + + // emane rpc + rpc GetEmaneConfig (GetEmaneConfigRequest) returns (GetEmaneConfigResponse) { + } + rpc SetEmaneConfig (SetEmaneConfigRequest) returns (SetEmaneConfigResponse) { + } + rpc GetEmaneModels (GetEmaneModelsRequest) returns (GetEmaneModelsResponse) { + } + rpc GetEmaneModelConfig (GetEmaneModelConfigRequest) returns (GetEmaneModelConfigResponse) { + } + rpc SetEmaneModelConfig (SetEmaneModelConfigRequest) returns (SetEmaneModelConfigResponse) { + } + rpc GetEmaneModelConfigs (GetEmaneModelConfigsRequest) returns (GetEmaneModelConfigsResponse) { + } + + // xml rpc + rpc SaveXml (SaveXmlRequest) returns (SaveXmlResponse) { + } + rpc OpenXml (OpenXmlRequest) returns (OpenXmlResponse) { + } +} + +// rpc request/response messages +message CreateSessionRequest { + int32 id = 1; +} + +message CreateSessionResponse { + int32 id = 1; + SessionState state = 2; +} + +message DeleteSessionRequest { + int32 id = 1; +} + +message DeleteSessionResponse { + bool result = 1; +} + +message GetSessionsRequest { +} + +message GetSessionsResponse { + repeated SessionSummary sessions = 1; +} + +message GetSessionRequest { + int32 id = 1; +} + +message GetSessionResponse { + Session session = 1; +} + +message GetSessionOptionsRequest { + int32 id = 1; +} + +message GetSessionOptionsResponse { + repeated ConfigGroup groups = 1; +} + +message SetSessionOptionsRequest { + int32 id = 1; + map config = 2; +} + +message SetSessionOptionsResponse { + bool result = 1; +} + +message GetSessionLocationRequest { + int32 id = 1; +} + +message GetSessionLocationResponse { + Position position = 1; + float scale = 2; +} + +message SetSessionLocationRequest { + int32 id = 1; + Position position = 2; + float scale = 3; +} + +message SetSessionLocationResponse { + bool result = 1; +} + +message SetSessionStateRequest { + int32 id = 1; + SessionState state = 2; +} + +message SetSessionStateResponse { + bool result = 1; +} + +message NodeEventsRequest { + int32 id = 1; +} + +message NodeEvent { + Node node = 1; +} + +message LinkEventsRequest { + int32 id = 1; +} + +message LinkEvent { + MessageType message_type = 1; + Link link = 2; +} + +message SessionEventsRequest { + int32 id = 1; +} + +message SessionEvent { + int32 node = 1; + int32 event = 2; + string name = 3; + bytes data = 4; + float time = 5; + int32 session = 6; +} + +message ConfigEventsRequest { + int32 id = 1; +} + +message ConfigEvent { + MessageType message_type = 1; + int32 node = 2; + string object = 3; + int32 type = 4; + repeated int32 data_types = 5; + string data_values = 6; + string captions = 7; + string bitmap = 8; + string possible_values = 9; + string groups = 10; + string session = 11; + int32 interface = 12; + int32 network_id = 13; + string opaque = 14; +} + +message ExceptionEventsRequest { + int32 id = 1; +} + +message ExceptionEvent { + int32 node = 1; + int32 session = 2; + ExceptionLevel level = 3; + string source = 4; + string date = 5; + string text = 6; + string opaque = 7; +} + +message FileEventsRequest { + int32 id = 1; +} + +message FileEvent { + MessageType message_type = 1; + int32 node = 2; + string name = 3; + string mode = 4; + int32 number = 5; + string type = 6; + string source = 7; + int32 session = 8; + bytes data = 9; + bytes compressed_data = 10; +} + +message AddNodeRequest { + int32 session = 1; + Node node = 2; +} + +message AddNodeResponse { + int32 id = 1; +} + +message GetNodeRequest { + int32 session = 1; + int32 id = 2; +} + +message GetNodeResponse { + Node node = 1; + repeated Interface interfaces = 2; +} + +message EditNodeRequest { + int32 session = 1; + int32 id = 2; + Position position = 3; +} + +message EditNodeResponse { + bool result = 1; +} + +message DeleteNodeRequest { + int32 session = 1; + int32 id = 2; +} + +message DeleteNodeResponse { + bool result = 1; +} + +message GetNodeLinksRequest { + int32 session = 1; + int32 id = 2; +} + +message GetNodeLinksResponse { + repeated Link links = 1; +} + +message AddLinkRequest { + int32 session = 1; + Link link = 2; +} + +message AddLinkResponse { + bool result = 1; +} + +message EditLinkRequest { + int32 session = 1; + int32 node_one = 2; + int32 node_two = 3; + int32 interface_one = 4; + int32 interface_two = 5; + LinkOptions options = 6; +} + +message EditLinkResponse { + bool result = 1; +} + +message DeleteLinkRequest { + int32 session = 1; + int32 node_one = 2; + int32 node_two = 3; + int32 interface_one = 4; + int32 interface_two = 5; +} + +message DeleteLinkResponse { + bool result = 1; +} + +message GetHooksRequest { + int32 session = 1; +} + +message GetHooksResponse { + repeated Hook hooks = 1; +} + +message AddHookRequest { + int32 session = 1; + Hook hook = 2; +} + +message AddHookResponse { + bool result = 1; +} + +message GetMobilityConfigsRequest { + int32 session = 1; +} + +message GetMobilityConfigsResponse { + message MobilityConfig { + repeated ConfigGroup groups = 1; + } + map configs = 1; +} + +message GetMobilityConfigRequest { + int32 session = 1; + int32 id = 2; +} + +message GetMobilityConfigResponse { + repeated ConfigGroup groups = 1; +} + +message SetMobilityConfigRequest { + int32 session = 1; + int32 id = 2; + map config = 3; +} + +message SetMobilityConfigResponse { + bool result = 1; +} + +message MobilityActionRequest { + int32 session = 1; + int32 id = 2; + MobilityAction action = 3; +} + +message MobilityActionResponse { + bool result = 1; +} + +message GetServicesRequest { + +} + +message GetServicesResponse { + repeated Service services = 1; +} + +message GetServiceDefaultsRequest { + int32 session = 1; +} + +message GetServiceDefaultsResponse { + repeated ServiceDefaults defaults = 1; +} + +message SetServiceDefaultsRequest { + int32 session = 1; + repeated ServiceDefaults defaults = 2; +} + +message SetServiceDefaultsResponse { + bool result = 1; +} + +message GetNodeServiceRequest { + int32 session = 1; + int32 id = 2; + string service = 3; +} + +message GetNodeServiceResponse { + NodeServiceData service = 1; +} + +message GetNodeServiceFileRequest { + int32 session = 1; + int32 id = 2; + string service = 3; + string file = 4; +} + +message GetNodeServiceFileResponse { + bytes data = 1; +} + +message SetNodeServiceRequest { + int32 session = 1; + int32 id = 2; + string service = 3; + repeated string startup = 4; + repeated string validate = 5; + repeated string shutdown = 6; +} + +message SetNodeServiceResponse { + bool result = 1; +} + +message SetNodeServiceFileRequest { + int32 session = 1; + int32 id = 2; + string service = 3; + string file = 4; + bytes data = 5; +} + +message SetNodeServiceFileResponse { + bool result = 1; +} + +message ServiceActionRequest { + int32 session = 1; + int32 id = 2; + string service = 3; + ServiceAction action = 4; +} + +message ServiceActionResponse { + bool result = 1; +} + +message GetWlanConfigRequest { + int32 session = 1; + int32 id = 2; +} + +message GetWlanConfigResponse { + repeated ConfigGroup groups = 1; +} + +message SetWlanConfigRequest { + int32 session = 1; + int32 id = 2; + map config = 3; +} + +message SetWlanConfigResponse { + bool result = 1; +} + +message GetEmaneConfigRequest { + int32 session = 1; +} + +message GetEmaneConfigResponse { + repeated ConfigGroup groups = 1; +} + +message SetEmaneConfigRequest { + int32 session = 1; + map config = 2; +} + +message SetEmaneConfigResponse { + bool result = 1; +} + +message GetEmaneModelsRequest { + int32 session = 1; +} + +message GetEmaneModelsResponse { + repeated string models = 1; +} + +message GetEmaneModelConfigRequest { + int32 session = 1; + int32 id = 2; + int32 interface = 3; + string model = 4; +} + +message GetEmaneModelConfigResponse { + repeated ConfigGroup groups = 1; +} + +message SetEmaneModelConfigRequest { + int32 session = 1; + int32 id = 2; + int32 interface = 3; + string model = 4; + map config = 5; +} + +message SetEmaneModelConfigResponse { + bool result = 1; +} + +message GetEmaneModelConfigsRequest { + int32 session = 1; +} + +message GetEmaneModelConfigsResponse { + message ModelConfig { + string model = 1; + repeated ConfigGroup groups = 2; + } + map configs = 1; +} + +message SaveXmlRequest { + int32 session = 1; +} + +message SaveXmlResponse { + bytes data = 1; +} + +message OpenXmlRequest { + bytes data = 1; +} + +message OpenXmlResponse { + bool result = 1; + int32 session = 2; +} + +// data structures for messages below +enum MessageType { + MESSAGE_NONE = 0; + MESSAGE_ADD = 1; + MESSAGE_DELETE = 2; + MESSAGE_CRI = 4; + MESSAGE_LOCAL = 8; + MESSAGE_STRING = 16; + MESSAGE_TEXT = 32; + MESSAGE_TTY = 64; +} + +enum LinkType { + LINK_WIRELESS = 0; + LINK_WIRED = 1; +} + +enum SessionState { + STATE_NONE = 0; + STATE_DEFINITION = 1; + STATE_CONFIGURATION = 2; + STATE_INSTANTIATION = 3; + STATE_RUNTIME = 4; + STATE_DATACOLLECT = 5; + STATE_SHUTDOWN = 6; +} + +enum NodeType { + NODE_DEFAULT = 0; + NODE_PHYSICAL = 1; + NODE_TBD = 3; + NODE_SWITCH = 4; + NODE_HUB = 5; + NODE_WIRELESS_LAN = 6; + NODE_RJ45 = 7; + NODE_TUNNEL = 8; + NODE_KTUNNEL = 9; + NODE_EMANE = 10; + NODE_TAP_BRIDGE = 11; + NODE_PEER_TO_PEER = 12; + NODE_CONTROL_NET = 13; + NODE_EMANE_NET = 14; +} + +enum ServiceValidationMode { + VALIDATION_BLOCKING = 0; + VALIDATION_NON_BLOCKING = 1; + VALIDATION_TIMER = 2; +} + +enum ServiceAction { + SERVICE_START = 0; + SERVICE_STOP = 1; + SERVICE_RESTART = 2; + SERVICE_VALIDATE = 3; +} + +enum MobilityAction { + MOBILITY_START = 0; + MOBILITY_PAUSE = 1; + MOBILITY_STOP = 2; +} + +enum ExceptionLevel { + EXCEPTION_DEFAULT = 0; + EXCEPTION_FATAL = 1; + EXCEPTION_ERROR = 2; + EXCEPTION_WARNING = 3; + EXCEPTION_NOTICE = 4; +} + +message Hook { + SessionState state = 1; + string file = 2; + bytes data = 3; +} + +message ServiceDefaults { + string node_type = 1; + repeated string services = 2; +} + +message Service { + string group = 1; + string name = 2; +} + +message NodeServiceData { + repeated string executables = 1; + repeated string dependencies = 2; + repeated string dirs = 3; + repeated string configs = 4; + repeated string startup = 5; + repeated string validate = 6; + ServiceValidationMode validation_mode = 7; + int32 validation_timer = 8; + repeated string shutdown = 9; + string meta = 10; +} + +message ConfigGroup { + string name = 1; + repeated ConfigOption options = 2; +} + +message ConfigOption { + string label = 1; + string name = 2; + string value = 3; + int32 type = 4; + repeated string select = 5; +} + +message Session { + int32 id = 1; + SessionState state = 2; + repeated Node nodes = 3; + repeated Link links = 4; +} + +message SessionSummary { + int32 id = 1; + SessionState state = 2; + int32 nodes = 3; +} + +message Node { + int32 id = 1; + string name = 2; + NodeType type = 3; + string model = 4; + Position position = 5; + repeated string services = 6; + string emane = 7; + string icon = 8; + string opaque = 9; +} + +message Link { + int32 node_one = 1; + int32 node_two = 2; + LinkType type = 3; + Interface interface_one = 4; + Interface interface_two = 5; + LinkOptions options = 6; +} + +message LinkOptions { + string opaque = 1; + float jitter = 2; + string key = 3; + float mburst = 4; + float mer = 5; + float per = 6; + float bandwidth = 7; + float burst = 8; + float delay = 9; + float dup = 10; + bool unidirectional = 11; +} + +message Interface { + int32 id = 1; + string name = 2; + string mac = 3; + string ip4 = 4; + int32 ip4mask = 5; + string ip6 = 6; + int32 ip6mask = 7; + int32 netid = 8; + int32 flowid = 9; + int32 mtu = 10; +} + +message Position { + float x = 1; + float y = 2; + float z = 3; + float lat = 4; + float lon = 5; + float alt = 6; +} diff --git a/daemon/proto/core/api/grpc/common.proto b/daemon/proto/core/api/grpc/common.proto deleted file mode 100644 index 065bee7a..00000000 --- a/daemon/proto/core/api/grpc/common.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package common; - -message ConfigOption { - string label = 1; - string name = 2; - string value = 3; - int32 type = 4; - repeated string select = 5; - string group = 6; -} - -message MappedConfig { - map config = 1; -} diff --git a/daemon/proto/core/api/grpc/configservices.proto b/daemon/proto/core/api/grpc/configservices.proto deleted file mode 100644 index 25be616d..00000000 --- a/daemon/proto/core/api/grpc/configservices.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; - -package configservices; - -import "core/api/grpc/common.proto"; - -message ConfigServiceConfig { - int32 node_id = 1; - string name = 2; - map templates = 3; - map config = 4; -} - -message ConfigServiceValidationMode { - enum Enum { - BLOCKING = 0; - NON_BLOCKING = 1; - TIMER = 2; - } -} - -message ConfigService { - string group = 1; - string name = 2; - repeated string executables = 3; - repeated string dependencies = 4; - repeated string directories = 5; - repeated string files = 6; - repeated string startup = 7; - repeated string validate = 8; - repeated string shutdown = 9; - ConfigServiceValidationMode.Enum validation_mode = 10; - int32 validation_timer = 11; - float validation_period = 12; -} - -message ConfigMode { - string name = 1; - map config = 2; -} - -message GetConfigServiceDefaultsRequest { - string name = 1; - int32 session_id = 2; - int32 node_id = 3; -} - -message GetConfigServiceDefaultsResponse { - map templates = 1; - map config = 2; - repeated ConfigMode modes = 3; -} - -message GetNodeConfigServiceRequest { - int32 session_id = 1; - int32 node_id = 2; - string name = 3; -} - -message GetNodeConfigServiceResponse { - map config = 1; -} - -message GetConfigServiceRenderedRequest { - int32 session_id = 1; - int32 node_id = 2; - string name = 3; -} - -message GetConfigServiceRenderedResponse { - map rendered = 1; -} diff --git a/daemon/proto/core/api/grpc/core.proto b/daemon/proto/core/api/grpc/core.proto deleted file mode 100644 index 09f2c764..00000000 --- a/daemon/proto/core/api/grpc/core.proto +++ /dev/null @@ -1,746 +0,0 @@ -syntax = "proto3"; - -package core; - -import "core/api/grpc/configservices.proto"; -import "core/api/grpc/common.proto"; -import "core/api/grpc/emane.proto"; -import "core/api/grpc/mobility.proto"; -import "core/api/grpc/services.proto"; -import "core/api/grpc/wlan.proto"; - -service CoreApi { - // session rpc - rpc StartSession (StartSessionRequest) returns (StartSessionResponse) { - } - rpc StopSession (StopSessionRequest) returns (StopSessionResponse) { - } - rpc CreateSession (CreateSessionRequest) returns (CreateSessionResponse) { - } - rpc DeleteSession (DeleteSessionRequest) returns (DeleteSessionResponse) { - } - rpc GetSessions (GetSessionsRequest) returns (GetSessionsResponse) { - } - rpc GetSession (GetSessionRequest) returns (GetSessionResponse) { - } - rpc CheckSession (CheckSessionRequest) returns (CheckSessionResponse) { - } - rpc SessionAlert (SessionAlertRequest) returns (SessionAlertResponse) { - } - - // streams - rpc Events (EventsRequest) returns (stream Event) { - } - rpc Throughputs (ThroughputsRequest) returns (stream ThroughputsEvent) { - } - rpc CpuUsage (CpuUsageRequest) returns (stream CpuUsageEvent) { - } - - // node rpc - rpc AddNode (AddNodeRequest) returns (AddNodeResponse) { - } - rpc GetNode (GetNodeRequest) returns (GetNodeResponse) { - } - rpc EditNode (EditNodeRequest) returns (EditNodeResponse) { - } - rpc DeleteNode (DeleteNodeRequest) returns (DeleteNodeResponse) { - } - rpc NodeCommand (NodeCommandRequest) returns (NodeCommandResponse) { - } - rpc GetNodeTerminal (GetNodeTerminalRequest) returns (GetNodeTerminalResponse) { - } - rpc MoveNode (MoveNodeRequest) returns (MoveNodeResponse) { - } - rpc MoveNodes (stream MoveNodesRequest) returns (MoveNodesResponse) { - } - - // link rpc - rpc AddLink (AddLinkRequest) returns (AddLinkResponse) { - } - rpc EditLink (EditLinkRequest) returns (EditLinkResponse) { - } - rpc DeleteLink (DeleteLinkRequest) returns (DeleteLinkResponse) { - } - rpc Linked (LinkedRequest) returns (LinkedResponse) { - } - - // mobility rpc - rpc GetMobilityConfig (mobility.GetMobilityConfigRequest) returns (mobility.GetMobilityConfigResponse) { - } - rpc SetMobilityConfig (mobility.SetMobilityConfigRequest) returns (mobility.SetMobilityConfigResponse) { - } - rpc MobilityAction (mobility.MobilityActionRequest) returns (mobility.MobilityActionResponse) { - } - - // service rpc - rpc GetServiceDefaults (services.GetServiceDefaultsRequest) returns (services.GetServiceDefaultsResponse) { - } - rpc SetServiceDefaults (services.SetServiceDefaultsRequest) returns (services.SetServiceDefaultsResponse) { - } - rpc GetNodeService (services.GetNodeServiceRequest) returns (services.GetNodeServiceResponse) { - } - rpc GetNodeServiceFile (services.GetNodeServiceFileRequest) returns (services.GetNodeServiceFileResponse) { - } - rpc ServiceAction (services.ServiceActionRequest) returns (services.ServiceActionResponse) { - } - - // config services - rpc GetConfigServiceDefaults (configservices.GetConfigServiceDefaultsRequest) returns (configservices.GetConfigServiceDefaultsResponse) { - } - rpc GetNodeConfigService (configservices.GetNodeConfigServiceRequest) returns (configservices.GetNodeConfigServiceResponse) { - } - rpc ConfigServiceAction (services.ServiceActionRequest) returns (services.ServiceActionResponse) { - } - rpc GetConfigServiceRendered (configservices.GetConfigServiceRenderedRequest) returns (configservices.GetConfigServiceRenderedResponse) { - } - - // wlan rpc - rpc GetWlanConfig (wlan.GetWlanConfigRequest) returns (wlan.GetWlanConfigResponse) { - } - rpc SetWlanConfig (wlan.SetWlanConfigRequest) returns (wlan.SetWlanConfigResponse) { - } - rpc WlanLink (wlan.WlanLinkRequest) returns (wlan.WlanLinkResponse) { - } - - // wireless rpc - rpc WirelessLinked (WirelessLinkedRequest) returns (WirelessLinkedResponse) { - } - rpc WirelessConfig (WirelessConfigRequest) returns (WirelessConfigResponse) { - } - rpc GetWirelessConfig (GetWirelessConfigRequest) returns (GetWirelessConfigResponse) { - } - - // emane rpc - rpc GetEmaneModelConfig (emane.GetEmaneModelConfigRequest) returns (emane.GetEmaneModelConfigResponse) { - } - rpc SetEmaneModelConfig (emane.SetEmaneModelConfigRequest) returns (emane.SetEmaneModelConfigResponse) { - } - rpc GetEmaneEventChannel (emane.GetEmaneEventChannelRequest) returns (emane.GetEmaneEventChannelResponse) { - } - rpc EmanePathlosses (stream emane.EmanePathlossesRequest) returns (emane.EmanePathlossesResponse) { - } - rpc EmaneLink (emane.EmaneLinkRequest) returns (emane.EmaneLinkResponse) { - } - - // xml rpc - rpc SaveXml (SaveXmlRequest) returns (SaveXmlResponse) { - } - rpc OpenXml (OpenXmlRequest) returns (OpenXmlResponse) { - } - - // utilities - rpc GetInterfaces (GetInterfacesRequest) returns (GetInterfacesResponse) { - } - rpc ExecuteScript (ExecuteScriptRequest) returns (ExecuteScriptResponse) { - } - - // globals - rpc GetConfig (GetConfigRequest) returns (GetConfigResponse) { - } -} - -// rpc request/response messages -message GetConfigRequest { -} - -message GetConfigResponse { - repeated services.Service services = 1; - repeated configservices.ConfigService config_services = 2; - repeated string emane_models = 3; -} - - -message StartSessionRequest { - Session session = 1; - bool definition = 2; -} - -message StartSessionResponse { - bool result = 1; - repeated string exceptions = 2; -} - -message StopSessionRequest { - int32 session_id = 1; -} - -message StopSessionResponse { - bool result = 1; -} - -message CreateSessionRequest { - int32 session_id = 1; -} - -message CreateSessionResponse { - Session session = 1; -} - -message DeleteSessionRequest { - int32 session_id = 1; -} - -message DeleteSessionResponse { - bool result = 1; -} - -message GetSessionsRequest { -} - -message GetSessionsResponse { - repeated SessionSummary sessions = 1; -} - -message CheckSessionRequest { - int32 session_id = 1; -} - -message CheckSessionResponse { - bool result = 1; -} - -message GetSessionRequest { - int32 session_id = 1; -} - -message GetSessionResponse { - Session session = 1; -} - -message SessionAlertRequest { - int32 session_id = 1; - ExceptionLevel.Enum level = 2; - string source = 3; - string text = 4; - int32 node_id = 5; -} - -message SessionAlertResponse { - bool result = 1; -} - -message EventsRequest { - int32 session_id = 1; - repeated EventType.Enum events = 2; -} - -message ThroughputsRequest { - int32 session_id = 1; -} - -message ThroughputsEvent { - int32 session_id = 1; - repeated BridgeThroughput bridge_throughputs = 2; - repeated InterfaceThroughput iface_throughputs = 3; -} - -message CpuUsageRequest { - int32 delay = 1; -} - -message CpuUsageEvent { - double usage = 1; -} - -message InterfaceThroughput { - int32 node_id = 1; - int32 iface_id = 2; - double throughput = 3; -} - -message BridgeThroughput { - int32 node_id = 1; - double throughput = 2; -} - -message Event { - oneof event_type { - SessionEvent session_event = 1; - NodeEvent node_event = 2; - LinkEvent link_event = 3; - ConfigEvent config_event = 4; - ExceptionEvent exception_event = 5; - FileEvent file_event = 6; - } - int32 session_id = 7; - string source = 8; -} - -message NodeEvent { - Node node = 1; - MessageType.Enum message_type = 2; -} - -message LinkEvent { - MessageType.Enum message_type = 1; - Link link = 2; -} - -message SessionEvent { - int32 node_id = 1; - int32 event = 2; - string name = 3; - string data = 4; - float time = 5; -} - -message ConfigEvent { - MessageType.Enum message_type = 1; - int32 node_id = 2; - string object = 3; - int32 type = 4; - repeated int32 data_types = 5; - string data_values = 6; - string captions = 7; - string possible_values = 8; - string groups = 9; - int32 iface_id = 10; - int32 network_id = 11; - string opaque = 12; -} - -message ExceptionEvent { - int32 node_id = 1; - ExceptionLevel.Enum level = 2; - string source = 3; - string date = 4; - string text = 5; - string opaque = 6; -} - -message FileEvent { - MessageType.Enum message_type = 1; - int32 node_id = 2; - string name = 3; - string mode = 4; - int32 number = 5; - string type = 6; - string source = 7; - string data = 8; - string compressed_data = 9; -} - -message AddNodeRequest { - int32 session_id = 1; - Node node = 2; - string source = 3; -} - -message AddNodeResponse { - int32 node_id = 1; -} - -message GetNodeRequest { - int32 session_id = 1; - int32 node_id = 2; -} - -message GetNodeResponse { - Node node = 1; - repeated Interface ifaces = 2; - repeated Link links = 3; -} - -message EditNodeRequest { - int32 session_id = 1; - int32 node_id = 2; - string icon = 3; - string source = 4; -} - -message EditNodeResponse { - bool result = 1; -} - -message DeleteNodeRequest { - int32 session_id = 1; - int32 node_id = 2; - string source = 3; -} - -message DeleteNodeResponse { - bool result = 1; -} - -message GetNodeTerminalRequest { - int32 session_id = 1; - int32 node_id = 2; -} - -message GetNodeTerminalResponse { - string terminal = 1; -} - - -message MoveNodeRequest { - int32 session_id = 1; - int32 node_id = 2; - string source = 3; - oneof move_type { - Position position = 4; - Geo geo = 5; - } -} - -message MoveNodeResponse { - bool result = 1; -} - -message MoveNodesRequest { - int32 session_id = 1; - int32 node_id = 2; - string source = 3; - oneof move_type { - Position position = 4; - Geo geo = 5; - } -} - -message MoveNodesResponse { -} - -message NodeCommandRequest { - int32 session_id = 1; - int32 node_id = 2; - string command = 3; - bool wait = 4; - bool shell = 5; -} - -message NodeCommandResponse { - string output = 1; - int32 return_code = 2; -} - -message AddLinkRequest { - int32 session_id = 1; - Link link = 2; - string source = 3; -} - -message AddLinkResponse { - bool result = 1; - Interface iface1 = 2; - Interface iface2 = 3; -} - -message EditLinkRequest { - int32 session_id = 1; - int32 node1_id = 2; - int32 node2_id = 3; - int32 iface1_id = 4; - int32 iface2_id = 5; - LinkOptions options = 6; - string source = 7; -} - -message EditLinkResponse { - bool result = 1; -} - -message DeleteLinkRequest { - int32 session_id = 1; - int32 node1_id = 2; - int32 node2_id = 3; - int32 iface1_id = 4; - int32 iface2_id = 5; - string source = 6; -} - -message DeleteLinkResponse { - bool result = 1; -} - -message SaveXmlRequest { - int32 session_id = 1; -} - -message SaveXmlResponse { - string data = 1; -} - -message OpenXmlRequest { - string data = 1; - bool start = 2; - string file = 3; -} - -message OpenXmlResponse { - bool result = 1; - int32 session_id = 2; -} - -message GetInterfacesRequest { -} - -message GetInterfacesResponse { - repeated string ifaces = 1; -} - -message ExecuteScriptRequest { - string script = 1; - string args = 2; -} - -message ExecuteScriptResponse { - int32 session_id = 1; -} - -// data structures for messages below -message EventType { - enum Enum { - SESSION = 0; - NODE = 1; - LINK = 2; - CONFIG = 3; - EXCEPTION = 4; - FILE = 5; - } -} - -message MessageType { - enum Enum { - NONE = 0; - ADD = 1; - DELETE = 2; - CRI = 4; - LOCAL = 8; - STRING = 16; - TEXT = 32; - TTY = 64; - } -} - -message LinkType { - enum Enum { - WIRELESS = 0; - WIRED = 1; - } -} - -message SessionState { - enum Enum { - NONE = 0; - DEFINITION = 1; - CONFIGURATION = 2; - INSTANTIATION = 3; - RUNTIME = 4; - DATACOLLECT = 5; - SHUTDOWN = 6; - } -} - -message NodeType { - enum Enum { - DEFAULT = 0; - PHYSICAL = 1; - SWITCH = 4; - HUB = 5; - WIRELESS_LAN = 6; - RJ45 = 7; - TUNNEL = 8; - EMANE = 10; - TAP_BRIDGE = 11; - PEER_TO_PEER = 12; - CONTROL_NET = 13; - DOCKER = 15; - LXC = 16; - WIRELESS = 17; - PODMAN = 18; - } -} - -message ConfigOptionType { - enum Enum { - NONE = 0; - UINT8 = 1; - UINT16 = 2; - UINT32 = 3; - UINT64 = 4; - INT8 = 5; - INT16 = 6; - INT32 = 7; - INT64 = 8; - FLOAT = 9; - STRING = 10; - BOOL = 11; - } -} - -message ExceptionLevel { - enum Enum { - DEFAULT = 0; - FATAL = 1; - ERROR = 2; - WARNING = 3; - NOTICE = 4; - } -} - -message Hook { - SessionState.Enum state = 1; - string file = 2; - string data = 3; -} - -message Session { - int32 id = 1; - SessionState.Enum state = 2; - repeated Node nodes = 3; - repeated Link links = 4; - string dir = 5; - string user = 6; - repeated services.ServiceDefaults default_services = 7; - SessionLocation location = 8; - repeated Hook hooks = 9; - map metadata = 10; - string file = 11; - map options = 12; - repeated Server servers = 13; -} - -message SessionSummary { - int32 id = 1; - SessionState.Enum state = 2; - int32 nodes = 3; - string file = 4; - string dir = 5; -} - -message Node { - int32 id = 1; - string name = 2; - NodeType.Enum type = 3; - string model = 4; - Position position = 5; - repeated string services = 6; - string emane = 7; - string icon = 8; - string image = 9; - string server = 10; - repeated string config_services = 11; - Geo geo = 12; - string dir = 13; - string channel = 14; - int32 canvas = 15; - map wlan_config = 16; - map mobility_config = 17; - map service_configs = 18; - map config_service_configs= 19; - repeated emane.NodeEmaneConfig emane_configs = 20; - map wireless_config = 21; -} - -message Link { - int32 node1_id = 1; - int32 node2_id = 2; - LinkType.Enum type = 3; - Interface iface1 = 4; - Interface iface2 = 5; - LinkOptions options = 6; - int32 network_id = 7; - string label = 8; - string color = 9; -} - -message LinkOptions { - int64 jitter = 1; - int32 key = 2; - int32 mburst = 3; - int32 mer = 4; - float loss = 5; - int64 bandwidth = 6; - int32 burst = 7; - int64 delay = 8; - int32 dup = 9; - bool unidirectional = 10; - int32 buffer = 11; -} - -message Interface { - int32 id = 1; - string name = 2; - string mac = 3; - string ip4 = 4; - int32 ip4_mask = 5; - string ip6 = 6; - int32 ip6_mask = 7; - int32 net_id = 8; - int32 flow_id = 9; - int32 mtu = 10; - int32 node_id = 11; - int32 net2_id = 12; - int32 nem_id = 13; - int32 nem_port = 14; -} - -message SessionLocation { - float x = 1; - float y = 2; - float z = 3; - float lat = 4; - float lon = 5; - float alt = 6; - float scale = 7; -} - -message Position { - float x = 1; - float y = 2; - float z = 3; -} - -message Geo { - float lat = 1; - float lon = 2; - float alt = 3; -} - -message Server { - string name = 1; - string host = 2; -} - -message LinkedRequest { - int32 session_id = 1; - int32 node1_id = 2; - int32 node2_id = 3; - int32 iface1_id = 4; - int32 iface2_id = 5; - bool linked = 6; -} - -message LinkedResponse { -} - -message WirelessLinkedRequest { - int32 session_id = 1; - int32 wireless_id = 2; - int32 node1_id = 3; - int32 node2_id = 4; - bool linked = 5; -} - -message WirelessLinkedResponse { -} - -message WirelessConfigRequest { - int32 session_id = 1; - int32 wireless_id = 2; - int32 node1_id = 3; - int32 node2_id = 4; - LinkOptions options1 = 5; - LinkOptions options2 = 6; -} - -message WirelessConfigResponse { -} - -message GetWirelessConfigRequest { - int32 session_id = 1; - int32 node_id = 2; -} - -message GetWirelessConfigResponse { - map config = 1; -} diff --git a/daemon/proto/core/api/grpc/emane.proto b/daemon/proto/core/api/grpc/emane.proto deleted file mode 100644 index b8579917..00000000 --- a/daemon/proto/core/api/grpc/emane.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package emane; - -import "core/api/grpc/common.proto"; - -message GetEmaneModelConfigRequest { - int32 session_id = 1; - int32 node_id = 2; - int32 iface_id = 3; - string model = 4; -} - -message GetEmaneModelConfigResponse { - map config = 1; -} - -message SetEmaneModelConfigRequest { - int32 session_id = 1; - EmaneModelConfig emane_model_config = 2; -} - -message SetEmaneModelConfigResponse { - bool result = 1; -} - -message GetEmaneModelConfig { - int32 node_id = 1; - string model = 2; - int32 iface_id = 3; - map config = 4; -} - -message NodeEmaneConfig { - int32 iface_id = 1; - string model = 2; - map config = 3; -} - -message GetEmaneEventChannelRequest { - int32 session_id = 1; - int32 nem_id = 2; -} - -message GetEmaneEventChannelResponse { - string group = 1; - int32 port = 2; - string device = 3; -} - -message EmaneLinkRequest { - int32 session_id = 1; - int32 nem1 = 2; - int32 nem2 = 3; - bool linked = 4; -} - -message EmaneLinkResponse { - bool result = 1; -} - -message EmaneModelConfig { - int32 node_id = 1; - int32 iface_id = 2; - string model = 3; - map config = 4; -} - -message EmanePathlossesRequest { - int32 session_id = 1; - int32 node1_id = 2; - float rx1 = 3; - int32 iface1_id = 4; - int32 node2_id = 5; - float rx2 = 6; - int32 iface2_id = 7; -} - -message EmanePathlossesResponse { -} diff --git a/daemon/proto/core/api/grpc/mobility.proto b/daemon/proto/core/api/grpc/mobility.proto deleted file mode 100644 index 6eaf8fc3..00000000 --- a/daemon/proto/core/api/grpc/mobility.proto +++ /dev/null @@ -1,46 +0,0 @@ -syntax = "proto3"; - -package mobility; - -import "core/api/grpc/common.proto"; - -message MobilityAction { - enum Enum { - START = 0; - PAUSE = 1; - STOP = 2; - } -} - -message MobilityConfig { - int32 node_id = 1; - map config = 2; -} - -message GetMobilityConfigRequest { - int32 session_id = 1; - int32 node_id = 2; -} - -message GetMobilityConfigResponse { - map config = 1; -} - -message SetMobilityConfigRequest { - int32 session_id = 1; - MobilityConfig mobility_config = 2; -} - -message SetMobilityConfigResponse { - bool result = 1; -} - -message MobilityActionRequest { - int32 session_id = 1; - int32 node_id = 2; - MobilityAction.Enum action = 3; -} - -message MobilityActionResponse { - bool result = 1; -} diff --git a/daemon/proto/core/api/grpc/services.proto b/daemon/proto/core/api/grpc/services.proto deleted file mode 100644 index 1b430f99..00000000 --- a/daemon/proto/core/api/grpc/services.proto +++ /dev/null @@ -1,116 +0,0 @@ -syntax = "proto3"; - -package services; - -message ServiceConfig { - int32 node_id = 1; - string service = 2; - repeated string startup = 3; - repeated string validate = 4; - repeated string shutdown = 5; - repeated string files = 6; - repeated string directories = 7; -} - -message ServiceFileConfig { - int32 node_id = 1; - string service = 2; - string file = 3; - string data = 4; -} - -message ServiceValidationMode { - enum Enum { - BLOCKING = 0; - NON_BLOCKING = 1; - TIMER = 2; - } -} - -message ServiceAction { - enum Enum { - START = 0; - STOP = 1; - RESTART = 2; - VALIDATE = 3; - } -} - -message ServiceDefaults { - string model = 1; - repeated string services = 2; -} - -message Service { - string group = 1; - string name = 2; -} - -message NodeServiceData { - repeated string executables = 1; - repeated string dependencies = 2; - repeated string dirs = 3; - repeated string configs = 4; - repeated string startup = 5; - repeated string validate = 6; - ServiceValidationMode.Enum validation_mode = 7; - int32 validation_timer = 8; - repeated string shutdown = 9; - string meta = 10; -} - -message NodeServiceConfig { - int32 node_id = 1; - string service = 2; - NodeServiceData data = 3; - map files = 4; -} - -message GetServiceDefaultsRequest { - int32 session_id = 1; -} - -message GetServiceDefaultsResponse { - repeated ServiceDefaults defaults = 1; -} - -message SetServiceDefaultsRequest { - int32 session_id = 1; - repeated ServiceDefaults defaults = 2; -} - -message SetServiceDefaultsResponse { - bool result = 1; -} - -message GetNodeServiceRequest { - int32 session_id = 1; - int32 node_id = 2; - string service = 3; -} - -message GetNodeServiceResponse { - NodeServiceData service = 1; -} - -message GetNodeServiceFileRequest { - int32 session_id = 1; - int32 node_id = 2; - string service = 3; - string file = 4; -} - -message GetNodeServiceFileResponse { - string data = 1; -} - -message ServiceActionRequest { - int32 session_id = 1; - int32 node_id = 2; - string service = 3; - ServiceAction.Enum action = 4; -} - -message ServiceActionResponse { - bool result = 1; -} diff --git a/daemon/proto/core/api/grpc/wlan.proto b/daemon/proto/core/api/grpc/wlan.proto deleted file mode 100644 index 2d161a04..00000000 --- a/daemon/proto/core/api/grpc/wlan.proto +++ /dev/null @@ -1,40 +0,0 @@ -syntax = "proto3"; - -package wlan; - -import "core/api/grpc/common.proto"; - -message WlanConfig { - int32 node_id = 1; - map config = 2; -} - -message GetWlanConfigRequest { - int32 session_id = 1; - int32 node_id = 2; -} - -message GetWlanConfigResponse { - map config = 1; -} - -message SetWlanConfigRequest { - int32 session_id = 1; - WlanConfig wlan_config = 2; -} - -message SetWlanConfigResponse { - bool result = 1; -} - -message WlanLinkRequest { - int32 session_id = 1; - int32 wlan = 2; - int32 node1_id = 3; - int32 node2_id = 4; - bool linked = 5; -} - -message WlanLinkResponse { - bool result = 1; -} diff --git a/daemon/pyproject.toml b/daemon/pyproject.toml deleted file mode 100644 index 0d1acf7a..00000000 --- a/daemon/pyproject.toml +++ /dev/null @@ -1,63 +0,0 @@ -[tool.poetry] -name = "core" -version = "9.0.3" -description = "CORE Common Open Research Emulator" -authors = ["Boeing Research and Technology"] -license = "BSD-2-Clause" -repository = "https://github.com/coreemu/core" -documentation = "https://coreemu.github.io/core/" -include = [ - "core/api/grpc/*", - "core/configservices/*/templates", - "core/constants.py", - "core/gui/data/**/*", -] -exclude = ["core/constants.py.in"] - -[tool.poetry.scripts] -core-daemon = "core.scripts.daemon:main" -core-cli = "core.scripts.cli:main" -core-gui = "core.scripts.gui:main" -core-player = "core.scripts.player:main" -core-route-monitor = "core.scripts.routemonitor:main" -core-service-update = "core.scripts.serviceupdate:main" -core-cleanup = "core.scripts.cleanup:main" - -[tool.poetry.dependencies] -python = "^3.9" -fabric = "2.7.1" -grpcio = "1.54.2" -invoke = "1.7.3" -lxml = "4.9.1" -netaddr = "0.7.19" -protobuf = "4.21.9" -pyproj = "3.3.1" -Pillow = "9.4.0" -Mako = "1.2.3" -PyYAML = "6.0.1" - -[tool.poetry.group.dev.dependencies] -pytest = "6.2.5" -grpcio-tools = "1.54.2" -black = "22.12.0" -flake8 = "3.8.2" -isort = "4.3.21" -mock = "4.0.2" -pre-commit = "2.1.1" - -[tool.isort] -skip_glob = "*_pb2*.py,doc,build" -multi_line_output = 3 -include_trailing_comma = "True" -force_grid_wrap = 0 -use_parentheses = "True" -line_length = 88 - -[tool.black] -line_length = 88 -exclude = ".+_pb2.*.py|doc/|build/|__pycache__/" - -[build-system] -requires = ["poetry>=0.12"] -build-backend = "poetry.masonry.api" - diff --git a/daemon/requirements.txt b/daemon/requirements.txt new file mode 100644 index 00000000..ae84c527 --- /dev/null +++ b/daemon/requirements.txt @@ -0,0 +1,5 @@ +enum34==1.1.6 +futures==3.2.0 +grpcio==1.18.0 +lxml==3.5.0 +six==1.12.0 diff --git a/daemon/scripts/core-cleanup b/daemon/scripts/core-cleanup new file mode 100644 index 00000000..f73275df --- /dev/null +++ b/daemon/scripts/core-cleanup @@ -0,0 +1,70 @@ +#!/bin/sh + +if [ "z$1" = "z-h" -o "z$1" = "z--help" ]; then + echo "usage: $0 [-d [-l]]" + echo -n " Clean up all CORE namespaces processes, bridges, interfaces, " + echo "and session\n directories. Options:" + echo " -h show this help message and exit" + echo " -d also kill the Python daemon" + echo " -l remove the core-daemon.log file" + exit 0 +fi + +if [ `id -u` != 0 ]; then + echo "Permission denied. Re-run this script as root." + exit 1 +fi + +PATH="/sbin:/bin:/usr/sbin:/usr/bin" +export PATH + +if [ "z$1" = "z-d" ]; then + pypids=`pidof python python2` + for p in $pypids; do + grep -q core-daemon /proc/$p/cmdline + if [ $? = 0 ]; then + echo "cleaning up core-daemon process: $p" + kill -9 $p + fi + done +fi + +if [ "z$2" = "z-l" ]; then + rm -f /var/log/core-daemon.log +fi + +kaopts="-v" +killall --help 2>&1 | grep -q namespace +if [ $? = 0 ]; then + kaopts="$kaopts --ns 0" +fi + +vnodedpids=`pidof vnoded` +if [ "z$vnodedpids" != "z" ]; then + echo "cleaning up old vnoded processes: $vnodedpids" + killall $kaopts -KILL vnoded + # pause for 1 second for interfaces to disappear + sleep 1 +fi +killall -q emane +killall -q emanetransportd +killall -q emaneeventservice + +if [ -d /sys/class/net ]; then + ifcommand="ls -1 /sys/class/net" +else + ifcommand="ip -o link show | sed -r -e 's/[0-9]+: ([^[:space:]]+): .*/\1/'" +fi + +eval "$ifcommand" | awk ' + /^veth[0-9]+\./ {print "removing interface " $1; system("ip link del " $1);} + /tmp\./ {print "removing interface " $1; system("ip link del " $1);} + /gt\./ {print "removing interface " $1; system("ip link del " $1);} + /b\./ {print "removing bridge " $1; system("ip link set " $1 " down; ip link del " $1);} +' + +ebtables -L FORWARD | awk ' + /^-.*b\./ {print "removing ebtables " $0; system("ebtables -D FORWARD " $0); print "removing ebtables chain " $4; system("ebtables -X " $4);} +' + +rm -rf /tmp/pycore* diff --git a/daemon/scripts/core-daemon b/daemon/scripts/core-daemon new file mode 100755 index 00000000..a1c14cec --- /dev/null +++ b/daemon/scripts/core-daemon @@ -0,0 +1,136 @@ +#!/usr/bin/env python +""" +core-daemon: the CORE daemon is a server process that receives CORE API +messages and instantiates emulated nodes and networks within the kernel. Various +message handlers are defined and some support for sending messages. +""" + +import argparse +import ConfigParser +import logging +import sys +import threading +import time + +from core import load_logging_config +from core import constants +from core import enumerations +from core.corehandlers import CoreHandler +from core.coreserver import CoreServer +from core.grpc.server import CoreGrpcServer +from core.misc.utils import close_onexec + +load_logging_config() + + +def banner(): + """ + Output the program banner printed to the terminal or log file. + + :return: nothing + """ + logging.info("CORE daemon v.%s started %s", constants.COREDPY_VERSION, time.ctime()) + + +def cored(cfg): + """ + Start the CoreServer object and enter the server loop. + + :param dict cfg: core configuration + :param bool use_ovs: flag to determine if ovs nodes should be used + :return: nothing + """ + host = cfg["listenaddr"] + port = int(cfg["port"]) + if host == "" or host is None: + host = "localhost" + + try: + server = CoreServer((host, port), CoreHandler, cfg) + if cfg["ovs"] == "True": + from core.netns.openvswitch import OVS_NODES + server.coreemu.update_nodes(OVS_NODES) + except: + logging.exception("error starting main server on: %s:%s", host, port) + sys.exit(1) + + # initialize grpc api + if cfg["grpc"] == "True": + grpc_server = CoreGrpcServer(server.coreemu) + grpc_thread = threading.Thread(target=grpc_server.listen) + grpc_thread.daemon = True + grpc_thread.start() + + close_onexec(server.fileno()) + logging.info("server started, listening on: %s:%s", host, port) + server.serve_forever() + + +def get_merged_config(filename): + """ + Return a configuration after merging config file and command-line arguments. + + :param str filename: file name to merge configuration settings with + :return: merged configuration + :rtype: dict + """ + # these are the defaults used in the config file + defaults = { + "port": "%d" % enumerations.CORE_API_PORT, + "listenaddr": "localhost", + "xmlfilever": "1.0", + "numthreads": "1", + } + + parser = argparse.ArgumentParser( + description="CORE daemon v.%s instantiates Linux network namespace nodes." % constants.COREDPY_VERSION) + parser.add_argument("-f", "--configfile", dest="configfile", + help="read config from specified file; default = %s" % filename) + parser.add_argument("-p", "--port", dest="port", type=int, + help="port number to listen on; default = %s" % defaults["port"]) + parser.add_argument("-n", "--numthreads", dest="numthreads", type=int, + help="number of server threads; default = %s" % defaults["numthreads"]) + parser.add_argument("--ovs", action="store_true", help="enable experimental ovs mode, default is false") + parser.add_argument("--grpc", action="store_true", help="enable grpc api, default is false") + + # parse command line options + args = parser.parse_args() + + # read the config file + if args.configfile is not None: + filename = args.configfile + del args.configfile + cfg = ConfigParser.SafeConfigParser(defaults) + cfg.read(filename) + + section = "core-daemon" + if not cfg.has_section(section): + cfg.add_section(section) + + # merge command line with config file + for opt in args.__dict__: + val = args.__dict__[opt] + if val is not None: + cfg.set(section, opt, str(val)) + + return dict(cfg.items(section)) + + +def main(): + """ + Main program startup. + + :return: nothing + """ + # get a configuration merged from config file and command-line arguments + cfg = get_merged_config("%s/core.conf" % constants.CORE_CONF_DIR) + banner() + + try: + cored(cfg) + except KeyboardInterrupt: + logging.info("keyboard interrupt, stopping core daemon") + + +if __name__ == "__main__": + main() diff --git a/daemon/scripts/core-manage b/daemon/scripts/core-manage new file mode 100644 index 00000000..87d23a24 --- /dev/null +++ b/daemon/scripts/core-manage @@ -0,0 +1,248 @@ +#!/usr/bin/env python +""" +core-manage: Helper tool to add, remove, or check for services, models, and +node types in a CORE installation. +""" + +import ast +import optparse +import os +import re +import sys + +from core import services +from core.constants import CORE_CONF_DIR + + +class FileUpdater(object): + """ + Helper class for changing configuration files. + """ + actions = ("add", "remove", "check") + targets = ("service", "model", "nodetype") + + def __init__(self, action, target, data, options): + """ + """ + self.action = action + self.target = target + self.data = data + self.options = options + self.verbose = options.verbose + self.search, self.filename = self.get_filename(target) + + def process(self): + """ Invoke update_file() using a helper method depending on target. + """ + if self.verbose: + txt = "Updating" + if self.action == "check": + txt = "Checking" + sys.stdout.write("%s file: %s\n" % (txt, self.filename)) + + if self.target == "service": + r = self.update_file(fn=self.update_services) + elif self.target == "model": + r = self.update_file(fn=self.update_emane_models) + elif self.target == "nodetype": + r = self.update_nodes_conf() + + if self.verbose: + txt = "" + if not r: + txt = "NOT " + if self.action == "check": + sys.stdout.write("String %sfound.\n" % txt) + else: + sys.stdout.write("File %supdated.\n" % txt) + + return r + + def update_services(self, line): + """ Modify the __init__.py file having this format: + __all__ = ["quagga", "nrl", "xorp", "bird", ] + Returns True or False when "check" is the action, a modified line + otherwise. + """ + line = line.strip("\n") + key, valstr = line.split("= ") + vals = ast.literal_eval(valstr) + r = self.update_keyvals(key, vals) + if self.action == "check": + return r + valstr = "%s" % r + return "= ".join([key, valstr]) + "\n" + + def update_emane_models(self, line): + """ Modify the core.conf file having this format: + emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass + Returns True or False when "check" is the action, a modified line + otherwise. + """ + line = line.strip("\n") + key, valstr = line.split("= ") + vals = valstr.split(", ") + r = self.update_keyvals(key, vals) + if self.action == "check": + return r + valstr = ", ".join(r) + return "= ".join([key, valstr]) + "\n" + + def update_keyvals(self, key, vals): + """ Perform self.action on (key, vals). + Returns True or False when "check" is the action, a modified line + otherwise. + """ + if self.action == "check": + if self.data in vals: + return True + else: + return False + elif self.action == "add": + if self.data not in vals: + vals.append(self.data) + elif self.action == "remove": + try: + vals.remove(self.data) + except ValueError: + pass + return vals + + def get_filename(self, target): + """ Return search string and filename based on target. + """ + if target == "service": + filename = os.path.abspath(services.__file__) + search = "__all__ =" + elif target == "model": + filename = os.path.join(CORE_CONF_DIR, "core.conf") + search = "emane_models =" + elif target == "nodetype": + if self.options.userpath is None: + raise ValueError, "missing user path" + filename = os.path.join(self.options.userpath, "nodes.conf") + search = self.data + else: + raise ValueError, "unknown target" + if not os.path.exists(filename): + raise ValueError, "file %s does not exist" % filename + return search, filename + + def update_file(self, fn=None): + """ Open a file and search for self.search, invoking the supplied + function on the matching line. Write file changes if necessary. + Returns True if the file has changed (or action is "check" and the + search string is found), False otherwise. + """ + changed = False + output = "" # this accumulates output, assumes input is small + with open(self.filename, "r") as f: + for line in f: + if line[:len(self.search)] == self.search: + r = fn(line) # line may be modified by fn() here + if self.action == "check": + return r + else: + if line != r: + changed = True + line = r + output += line + if changed: + with open(self.filename, "w") as f: + f.write(output) + + return changed + + def update_nodes_conf(self): + """ Add/remove/check entries from nodes.conf. This file + contains a Tcl-formatted array of node types. The array index must be + properly set for new entries. Uses self.{action, filename, search, + data} variables as input and returns the same value as update_file(). + """ + changed = False + output = "" # this accumulates output, assumes input is small + with open(self.filename, "r") as f: + for line in f: + # make sure data is not added twice + if line.find(self.search) >= 0: + if self.action == "check": + return True + elif self.action == "add": + return False + elif self.action == "remove": + changed = True + continue + else: + output += line + + if self.action == "add": + index = int(re.match("^\d+", line).group(0)) + output += str(index + 1) + " " + self.data + "\n" + changed = True + if changed: + with open(self.filename, "w") as f: + f.write(output) + + return changed + + +def main(): + usagestr = "usage: %prog [-h] [options] \n" + usagestr += "\nHelper tool to add, remove, or check for " + usagestr += "services, models, and node types\nin a CORE installation.\n" + usagestr += "\nExamples:\n %prog add service newrouting" + usagestr += "\n %prog -v check model RfPipe" + usagestr += "\n %prog --userpath=\"$HOME/.core\" add nodetype \"{ftp ftp.gif ftp.gif {DefaultRoute FTP} netns {FTP server} }\" \n" + usagestr += "\nArguments:\n should be one of: %s" % \ + ", ".join(FileUpdater.actions) + usagestr += "\n should be one of: %s" % \ + ", ".join(FileUpdater.targets) + usagestr += "\n is the text to %s" % \ + ", ".join(FileUpdater.actions) + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults(userpath=None, verbose=False, ) + + parser.add_option("--userpath", dest="userpath", type="string", + help="use the specified user path (e.g. \"$HOME/.core" \ + "\") to access nodes.conf") + parser.add_option("-v", "--verbose", dest="verbose", action="store_true", + help="be verbose when performing action") + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + (options, args) = parser.parse_args() + + if len(args) != 3: + usage("Missing required arguments!", 1) + + action = args[0] + if action not in FileUpdater.actions: + usage("invalid action %s" % action, 1) + + target = args[1] + if target not in FileUpdater.targets: + usage("invalid target %s" % target, 1) + + if target == "nodetype" and not options.userpath: + usage("user path option required for this target (%s)" % target) + + data = args[2] + + try: + up = FileUpdater(action, target, data, options) + r = up.process() + except Exception, e: + sys.stderr.write("Exception: %s\n" % e) + sys.exit(1) + if not r: + sys.exit(1) + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/daemon/scripts/coresendmsg b/daemon/scripts/coresendmsg new file mode 100644 index 00000000..be6e030b --- /dev/null +++ b/daemon/scripts/coresendmsg @@ -0,0 +1,274 @@ +#!/usr/bin/env python +""" +coresendmsg: utility for generating CORE messages +""" + +import optparse +import os +import socket +import sys + +from core.api import coreapi +from core.enumerations import CORE_API_PORT +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.enumerations import SessionTlvs + + +def print_available_tlvs(t, tlv_class): + """ + Print a TLV list. + """ + print "TLVs available for %s message:" % t + for tlv in sorted([tlv for tlv in tlv_class.tlv_type_map], key=lambda x: x.name): + print "%s:%s" % (tlv.value, tlv.name) + + +def print_examples(name): + """ + Print example usage of this script. + """ + examples = [ + ("link n1number=2 n2number=3 delay=15000", + "set a 15ms delay on the link between n2 and n3"), + ("link n1number=2 n2number=3 guiattr=\"color=blue\"", + "change the color of the link between n2 and n3"), + ("node number=3 xpos=125 ypos=525", + "move node number 3 to x,y=(125,525)"), + ("node number=4 icon=/usr/local/share/core/icons/normal/router_red.gif", + "change node number 4\"s icon to red"), + ("node flags=add number=5 type=0 name=\"n5\" xpos=500 ypos=500", + "add a new router node n5"), + ("link flags=add n1number=4 n2number=5 if1ip4=\"10.0.3.2\" " \ + "if1ip4mask=24 if2ip4=\"10.0.3.1\" if2ip4mask=24", + "link node n5 with n4 using the given interface addresses"), + ("exec flags=str,txt node=1 num=1000 cmd=\"uname -a\" -l", + "run a command on node 1 and wait for the result"), + ("exec node=2 num=1001 cmd=\"killall ospfd\"", + "run a command on node 2 and ignore the result"), + ("file flags=add node=1 name=\"/var/log/test.log\" data=\"Hello World.\"", + "write a test.log file on node 1 with the given contents"), + ("file flags=add node=2 name=\"test.log\" " \ + "srcname=\"./test.log\"", + "move a test.log file from host to node 2"), + ] + print "Example %s invocations:" % name + for cmd, descr in examples: + print " %s %s\n\t\t%s" % (name, cmd, descr) + + +def receive_message(sock): + """ + Retrieve a message from a socket and return the CoreMessage object or + None upon disconnect. Socket data beyond the first message is dropped. + """ + try: + # large receive buffer used for UDP sockets, instead of just receiving + # the 4-byte header + data = sock.recv(4096) + msghdr = data[:coreapi.CoreMessage.header_len] + except KeyboardInterrupt: + print "CTRL+C pressed" + sys.exit(1) + + if len(msghdr) == 0: + return None + + msgdata = None + msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) + + if msglen: + msgdata = data[coreapi.CoreMessage.header_len:] + try: + msgcls = coreapi.CLASS_MAP[msgtype] + except KeyError: + msg = coreapi.CoreMessage(msgflags, msghdr, msgdata) + msg.message_type = msgtype + print "unimplemented CORE message type: %s" % msg.type_str() + return msg + if len(data) > msglen + coreapi.CoreMessage.header_len: + print "received a message of type %d, dropping %d bytes of extra data" \ + % (msgtype, len(data) - (msglen + coreapi.CoreMessage.header_len)) + return msgcls(msgflags, msghdr, msgdata) + + +def connect_to_session(sock, requested): + """ + Use Session Messages to retrieve the current list of sessions and + connect to the first one. + """ + # request the session list + tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, "") + flags = MessageFlags.STRING.value + smsg = coreapi.CoreSessionMessage.pack(flags, tlvdata) + sock.sendall(smsg) + + print "waiting for session list..." + smsgreply = receive_message(sock) + if smsgreply is None: + print "disconnected" + return False + + sessstr = smsgreply.get_tlv(SessionTlvs.NUMBER.value) + if sessstr is None: + print "missing session numbers" + return False + + # join the first session (that is not our own connection) + tmp, localport = sock.getsockname() + sessions = sessstr.split("|") + sessions.remove(str(localport)) + if len(sessions) == 0: + print "no sessions to join" + return False + + if not requested: + session = sessions[0] + elif requested in sessions: + session = requested + else: + print "requested session not found!" + return False + + print "joining session: %s" % session + tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session) + flags = MessageFlags.ADD.value + smsg = coreapi.CoreSessionMessage.pack(flags, tlvdata) + sock.sendall(smsg) + return True + + +def receive_response(sock, opt): + """ + Receive and print a CORE message from the given socket. + """ + print "waiting for response..." + msg = receive_message(sock) + if msg is None: + print "disconnected from %s:%s" % (opt.address, opt.port) + sys.exit(0) + print "received message:", msg + + +def main(): + """ + Parse command-line arguments to build and send a CORE message. + """ + types = [message_type.name for message_type in MessageTypes] + flags = [flag.name for flag in MessageFlags] + usagestr = "usage: %prog [-h|-H] [options] [message-type] [flags=flags] " + usagestr += "[message-TLVs]\n\n" + usagestr += "Supported message types:\n %s\n" % types + usagestr += "Supported message flags (flags=f1,f2,...):\n %s" % flags + parser = optparse.OptionParser(usage=usagestr) + parser.set_defaults( + port=CORE_API_PORT, + address="localhost", + session=None, + listen=False, + examples=False, + tlvs=False, + tcp=False + ) + + parser.add_option("-H", dest="examples", action="store_true", + help="show example usage help message and exit") + parser.add_option("-p", "--port", dest="port", type=int, + help="TCP port to connect to, default: %d" % \ + parser.defaults["port"]) + parser.add_option("-a", "--address", dest="address", type=str, + help="Address to connect to, default: %s" % \ + parser.defaults["address"]) + parser.add_option("-s", "--session", dest="session", type=str, + help="Session to join, default: %s" % \ + parser.defaults["session"]) + parser.add_option("-l", "--listen", dest="listen", action="store_true", + help="Listen for a response message and print it.") + parser.add_option("-t", "--list-tlvs", dest="tlvs", action="store_true", + help="List TLVs for the specified message type.") + + def usage(msg=None, err=0): + sys.stdout.write("\n") + if msg: + sys.stdout.write(msg + "\n\n") + parser.print_help() + sys.exit(err) + + # parse command line opt + opt, args = parser.parse_args() + if opt.examples: + print_examples(os.path.basename(sys.argv[0])) + sys.exit(0) + if len(args) == 0: + usage("Please specify a message type to send.") + + # given a message type t, determine the message and TLV classes + t = args.pop(0) + if t not in types: + usage("Unknown message type requested: %s" % t) + message_type = MessageTypes[t] + msg_cls = coreapi.CLASS_MAP[message_type.value] + tlv_cls = msg_cls.tlv_class + + # list TLV types for this message type + if opt.tlvs: + print_available_tlvs(t, tlv_cls) + sys.exit(0) + + # build a message consisting of TLVs from "type=value" arguments + flagstr = "" + tlvdata = "" + for a in args: + typevalue = a.split("=") + if len(typevalue) < 2: + usage("Use \"type=value\" syntax instead of \"%s\"." % a) + tlv_typestr = typevalue[0] + tlv_valstr = "=".join(typevalue[1:]) + if tlv_typestr == "flags": + flagstr = tlv_valstr + continue + + tlv_name = tlv_typestr + try: + tlv_type = tlv_cls.tlv_type_map[tlv_name] + tlvdata += tlv_cls.pack_string(tlv_type.value, tlv_valstr) + except KeyError: + usage("Unknown TLV: \"%s\"" % tlv_name) + + flags = 0 + for f in flagstr.split(","): + if f == "": + continue + + try: + flag_enum = MessageFlags[f] + n = flag_enum.value + flags |= n + except KeyError: + usage("Invalid flag \"%s\"." % f) + + msg = msg_cls.pack(flags, tlvdata) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setblocking(True) + + try: + sock.connect((opt.address, opt.port)) + except Exception as e: + print "Error connecting to %s:%s:\n\t%s" % (opt.address, opt.port, e) + sys.exit(1) + + if not connect_to_session(sock, opt.session): + print "warning: continuing without joining a session!" + + sock.sendall(msg) + if opt.listen: + receive_response(sock, opt) + if opt.tcp: + sock.shutdown(socket.SHUT_RDWR) + sock.close() + sys.exit(0) + + +if __name__ == "__main__": + main() diff --git a/daemon/setup.cfg b/daemon/setup.cfg index 89c968b9..9af7e6f1 100644 --- a/daemon/setup.cfg +++ b/daemon/setup.cfg @@ -1,9 +1,2 @@ -[flake8] -ignore=E501,W503,E203 -max-line-length=88 -max-complexity=26 -select=B,C,E,F,W,T4 -exclude=*_pb2*.py,doc,build - -[tool:pytest] -norecursedirs=distributed emane +[aliases] +test=pytest \ No newline at end of file diff --git a/daemon/setup.py.in b/daemon/setup.py.in new file mode 100644 index 00000000..89be649e --- /dev/null +++ b/daemon/setup.py.in @@ -0,0 +1,62 @@ +""" +Defines how CORE will be built for installation. +""" + +import glob +import os + +from setuptools import find_packages +from distutils.core import setup + +_CORE_DIR = "/etc/core" +_MAN_DIR = "share/man/man1" +_EXAMPLES_DIR = "share/core" + + +def recursive_files(data_path, files_path): + all_files = [] + for path, _directories, filenames in os.walk(files_path): + directory = os.path.join(data_path, path) + files = [] + for filename in filenames: + files.append(os.path.join(path, filename)) + all_files.append((directory, files)) + return all_files + + +def glob_files(glob_path): + return glob.glob(glob_path) + + +data_files = [ + (_CORE_DIR, [ + "data/core.conf", + "data/logging.conf", + ]), + (_MAN_DIR, glob_files("../man/**.1")), +] +data_files.extend(recursive_files(_EXAMPLES_DIR, "examples")) + +setup( + name="core", + version="@PACKAGE_VERSION@", + packages=find_packages(), + install_requires=[ + "enum34", + "lxml" + ], + tests_require=[ + "pytest", + "pytest-runner", + "pytest-cov", + "mock", + ], + data_files=data_files, + scripts=glob.glob("scripts/*"), + description="Python components of CORE", + url="https://github.com/coreemu/core", + author="Boeing Research & Technology", + author_email="core-dev@nrl.navy.mil", + license="BSD", + long_description="Python scripts and modules for building virtual emulated networks.", +) diff --git a/daemon/test.py b/daemon/test.py new file mode 100644 index 00000000..8a903ab3 --- /dev/null +++ b/daemon/test.py @@ -0,0 +1,12 @@ +import sys + +import pytest + +distributed = sys.argv[1] +pytest.main([ + "-v", + "--distributed", distributed, + "--cov-report", "xml", + "--cov=.", + "tests" +]) diff --git a/daemon/tests/conftest.py b/daemon/tests/conftest.py index b668fb07..3bd32121 100644 --- a/daemon/tests/conftest.py +++ b/daemon/tests/conftest.py @@ -2,122 +2,279 @@ Unit test fixture module. """ +import os import threading import time -import mock import pytest +from mock.mock import MagicMock -from core.api.grpc.client import InterfaceHelper -from core.api.grpc.server import CoreGrpcServer +from core.api.coreapi import CoreConfMessage +from core.api.coreapi import CoreEventMessage +from core.api.coreapi import CoreExecMessage +from core.api.coreapi import CoreLinkMessage +from core.api.coreapi import CoreNodeMessage +from core.corehandlers import CoreHandler +from core.coreserver import CoreServer from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import EventTypes -from core.emulator.session import Session -from core.nodes.base import CoreNode -from core.nodes.netclient import LinuxNetClient +from core.emulator.emudata import IpPrefixes +from core.enumerations import CORE_API_PORT +from core.enumerations import ConfigTlvs +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import NodeTlvs +from core.enumerations import NodeTypes +from core.grpc.client import InterfaceHelper +from core.grpc.server import CoreGrpcServer +from core.misc import ipaddress +from core.misc.ipaddress import MacAddress +from core.service import ServiceManager EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward" -class PatchManager: - def __init__(self): - self.patches = [] +def node_message(objid, name, emulation_server=None, node_type=NodeTypes.DEFAULT, model=None): + """ + Convenience method for creating a node TLV messages. - def patch_obj(self, _cls, attribute, return_value=None): - p = mock.patch.object(_cls, attribute, return_value=return_value) - p.start() - self.patches.append(p) + :param int objid: node id + :param str name: node name + :param str emulation_server: distributed server name, if desired + :param core.enumerations.NodeTypes node_type: node type + :param str model: model for node + :return: tlv message + :rtype: core.api.coreapi.CoreNodeMessage + """ + values = [ + (NodeTlvs.NUMBER, objid), + (NodeTlvs.TYPE, node_type.value), + (NodeTlvs.NAME, name), + (NodeTlvs.EMULATION_SERVER, emulation_server), + ] - def patch(self, func): - p = mock.patch(func) - p.start() - self.patches.append(p) + if model: + values.append((NodeTlvs.MODEL, model)) + + return CoreNodeMessage.create(MessageFlags.ADD.value, values) + + +def link_message(n1, n2, intf_one=None, address_one=None, intf_two=None, address_two=None, key=None): + """ + Convenience method for creating link TLV messages. + + :param int n1: node one id + :param int n2: node two id + :param int intf_one: node one interface id + :param core.misc.ipaddress.IpAddress address_one: node one ip4 address + :param int intf_two: node two interface id + :param core.misc.ipaddress.IpAddress address_two: node two ip4 address + :param int key: tunnel key for link if needed + :return: tlv mesage + :rtype: core.api.coreapi.CoreLinkMessage + """ + mac_one, mac_two = None, None + if address_one: + mac_one = MacAddress.random() + if address_two: + mac_two = MacAddress.random() + + values = [ + (LinkTlvs.N1_NUMBER, n1), + (LinkTlvs.N2_NUMBER, n2), + (LinkTlvs.DELAY, 0), + (LinkTlvs.BANDWIDTH, 0), + (LinkTlvs.PER, "0"), + (LinkTlvs.DUP, "0"), + (LinkTlvs.JITTER, 0), + (LinkTlvs.TYPE, LinkTypes.WIRED.value), + (LinkTlvs.INTERFACE1_NUMBER, intf_one), + (LinkTlvs.INTERFACE1_IP4, address_one), + (LinkTlvs.INTERFACE1_IP4_MASK, 24), + (LinkTlvs.INTERFACE1_MAC, mac_one), + (LinkTlvs.INTERFACE2_NUMBER, intf_two), + (LinkTlvs.INTERFACE2_IP4, address_two), + (LinkTlvs.INTERFACE2_IP4_MASK, 24), + (LinkTlvs.INTERFACE2_MAC, mac_two), + ] + + if key: + values.append((LinkTlvs.KEY, key)) + + return CoreLinkMessage.create(MessageFlags.ADD.value, values) + + +def command_message(node, command): + """ + Create an execute command TLV message. + + :param node: node to execute command for + :param command: command to execute + :return: tlv message + :rtype: core.api.coreapi.CoreExecMessage + """ + flags = MessageFlags.STRING.value | MessageFlags.TEXT.value + return CoreExecMessage.create(flags, [ + (ExecuteTlvs.NODE, node.objid), + (ExecuteTlvs.NUMBER, 1), + (ExecuteTlvs.COMMAND, command) + ]) + + +def state_message(state): + """ + Create a event TLV message for a new state. + + :param core.enumerations.EventTypes state: state to create message for + :return: tlv message + :rtype: core.api.coreapi.CoreEventMessage + """ + return CoreEventMessage.create(0, [ + (EventTlvs.TYPE, state.value) + ]) + + +class CoreServerTest(object): + def __init__(self, port=CORE_API_PORT): + self.host = "localhost" + self.port = port + address = (self.host, self.port) + self.server = CoreServer(address, CoreHandler, { + "numthreads": 1, + "daemonize": False, + }) + + self.distributed_server = "core2" + self.prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + self.session = None + self.request_handler = None + + def setup(self, distributed_address, port): + # validate address + assert distributed_address, "distributed server address was not provided" + + # create session + self.session = self.server.coreemu.create_session(1) + self.session.master = True + + # create request handler + request_mock = MagicMock() + request_mock.fileno = MagicMock(return_value=1) + self.request_handler = CoreHandler(request_mock, "", self.server) + self.request_handler.session = self.session + self.request_handler.add_session_handlers() + self.session.broker.session_clients.append(self.request_handler) + + # have broker handle a configuration state change + self.session.set_state(EventTypes.DEFINITION_STATE) + message = state_message(EventTypes.CONFIGURATION_STATE) + self.request_handler.handle_message(message) + + # add broker server for distributed core + distributed = "%s:%s:%s" % (self.distributed_server, distributed_address, port) + message = CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "broker"), + (ConfigTlvs.TYPE, 0), + (ConfigTlvs.DATA_TYPES, (10,)), + (ConfigTlvs.VALUES, distributed) + ]) + self.request_handler.handle_message(message) + + # set session location + message = CoreConfMessage.create(0, [ + (ConfigTlvs.OBJECT, "location"), + (ConfigTlvs.TYPE, 0), + (ConfigTlvs.DATA_TYPES, (9, 9, 9, 9, 9, 9)), + (ConfigTlvs.VALUES, "0|0| 47.5766974863|-122.125920191|0.0|150.0") + ]) + self.request_handler.handle_message(message) + + # set services for host nodes + message = CoreConfMessage.create(0, [ + (ConfigTlvs.SESSION, str(self.session.id)), + (ConfigTlvs.OBJECT, "services"), + (ConfigTlvs.TYPE, 0), + (ConfigTlvs.DATA_TYPES, (10, 10, 10)), + (ConfigTlvs.VALUES, "host|DefaultRoute|SSH") + ]) + self.request_handler.handle_message(message) def shutdown(self): - for p in self.patches: - p.stop() + self.server.coreemu.shutdown() + self.server.shutdown() + self.server.server_close() -class MockServer: - def __init__(self, coreemu): - self.config = {} - self.coreemu = coreemu - - -@pytest.fixture(scope="session") -def patcher(request): - patch_manager = PatchManager() - patch_manager.patch_obj(DistributedServer, "remote_cmd", return_value="1") - if request.config.getoption("mock"): - patch_manager.patch("os.mkdir") - patch_manager.patch("core.utils.cmd") - patch_manager.patch("core.utils.which") - patch_manager.patch("core.nodes.netclient.get_net_client") - patch_manager.patch_obj( - LinuxNetClient, "get_mac", return_value="00:00:00:00:00:00" - ) - patch_manager.patch_obj(CoreNode, "create_file") - yield patch_manager - patch_manager.shutdown() - - -@pytest.fixture(scope="session") -def global_coreemu(patcher): - coreemu = CoreEmu(config={"emane_prefix": "/usr"}) - yield coreemu - coreemu.shutdown() - - -@pytest.fixture(scope="session") -def global_session(request, patcher, global_coreemu): - mkdir = not request.config.getoption("mock") - session = Session(1000, {"emane_prefix": "/usr"}, mkdir) - session.service_manager = global_coreemu.service_manager - yield session - session.shutdown() - - -@pytest.fixture(scope="session") -def ip_prefixes(): - return IpPrefixes(ip4_prefix="10.83.0.0/16") - - -@pytest.fixture(scope="session") -def iface_helper(): - return InterfaceHelper(ip4_prefix="10.83.0.0/16") - - -@pytest.fixture(scope="module") -def module_grpc(global_coreemu): - grpc_server = CoreGrpcServer(global_coreemu) - thread = threading.Thread(target=grpc_server.listen, args=("localhost:50051",)) +@pytest.fixture +def grpc_server(): + coremu = CoreEmu() + grpc_server = CoreGrpcServer(coremu) + thread = threading.Thread(target=grpc_server.listen) thread.daemon = True thread.start() time.sleep(0.1) yield grpc_server + coremu.shutdown() grpc_server.server.stop(None) @pytest.fixture -def grpc_server(module_grpc): - yield module_grpc - for session in module_grpc.coreemu.sessions.values(): - session.set_state(EventTypes.CONFIGURATION_STATE) - module_grpc.coreemu.shutdown() +def session(): + # use coreemu and create a session + coreemu = CoreEmu(config={"emane_prefix": "/usr"}) + session_fixture = coreemu.create_session() + session_fixture.set_state(EventTypes.CONFIGURATION_STATE) + assert os.path.exists(session_fixture.session_dir) + + # return created session + yield session_fixture + + # clear session configurations + session_fixture.location.reset() + session_fixture.services.reset() + session_fixture.mobility.config_reset() + session_fixture.emane.config_reset() + + # shutdown coreemu + coreemu.shutdown() + + # clear services, since they will be reloaded + ServiceManager.services.clear() -@pytest.fixture -def session(global_session): - global_session.set_state(EventTypes.CONFIGURATION_STATE) - yield global_session - global_session.clear() +@pytest.fixture(scope="module") +def ip_prefixes(): + return IpPrefixes(ip4_prefix="10.83.0.0/16") + + +@pytest.fixture(scope="module") +def interface_helper(): + return InterfaceHelper(ip4_prefix="10.83.0.0/16") + + +@pytest.fixture() +def cored(): + # create and return server + server = CoreServerTest() + yield server + + # cleanup + server.shutdown() + + # cleanup services + ServiceManager.services.clear() + + +def ping(from_node, to_node, ip_prefixes, count=3): + address = ip_prefixes.ip4_address(to_node) + return from_node.cmd(["ping", "-c", str(count), address]) def pytest_addoption(parser): parser.addoption("--distributed", help="distributed server address") - parser.addoption("--mock", action="store_true", help="run without mocking") def pytest_generate_tests(metafunc): diff --git a/daemon/tests/distributed/test_distributed.py b/daemon/tests/distributed/test_distributed.py new file mode 100644 index 00000000..1a8b5d9f --- /dev/null +++ b/daemon/tests/distributed/test_distributed.py @@ -0,0 +1,207 @@ +""" +Unit tests for testing CORE with distributed networks. +""" + +import conftest + +from core.api.coreapi import CoreExecMessage +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import MessageFlags +from core.enumerations import NodeTypes +from core.misc.ipaddress import IpAddress + + +def validate_response(replies, _): + """ + Patch method for handling dispatch replies within a CoreRequestHandler to validate a response. + + :param tuple replies: replies to handle + :param _: nothing + :return: nothing + """ + response = replies[0] + header = response[:CoreExecMessage.header_len] + tlv_data = response[CoreExecMessage.header_len:] + response = CoreExecMessage(MessageFlags.TEXT, header, tlv_data) + assert not response.get_tlv(ExecuteTlvs.STATUS.value) + + +class TestDistributed: + def test_distributed(self, cored, distributed_address): + """ + Test creating a distributed network. + + :param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param str distributed_address: distributed server to test against + """ + # initialize server for testing + cored.setup(distributed_address) + + # create local node + message = conftest.node_message( + objid=1, + name="n1", + model="host" + ) + cored.request_handler.handle_message(message) + + # create distributed node and assign to distributed server + message = conftest.node_message( + objid=2, + name="n2", + emulation_server=cored.distributed_server, + model="host" + ) + cored.request_handler.handle_message(message) + + # create distributed switch and assign to distributed server + message = conftest.node_message( + objid=3, + name="n3", + emulation_server=cored.distributed_server, + node_type=NodeTypes.SWITCH + ) + cored.request_handler.handle_message(message) + + # link message one + ip4_address = cored.prefix.addr(1) + message = conftest.link_message( + n1=1, + n2=3, + intf_one=0, + address_one=ip4_address + ) + cored.request_handler.handle_message(message) + + # link message two + ip4_address = cored.prefix.addr(2) + message = conftest.link_message( + n1=3, + n2=2, + intf_two=0, + address_two=ip4_address + ) + cored.request_handler.handle_message(message) + + # change session to instantiation state + message = conftest.state_message(EventTypes.INSTANTIATION_STATE) + cored.request_handler.handle_message(message) + + # test a ping command + node_one = cored.session.get_object(1) + message = conftest.command_message(node_one, "ping -c 5 %s" % ip4_address) + cored.request_handler.dispatch_replies = validate_response + cored.request_handler.handle_message(message) + + def test_prouter(self, cored, distributed_address): + """ + Test creating a distributed prouter node. + + :param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param str distributed_address: distributed server to test against + """ + # initialize server for testing + cored.setup(distributed_address) + + # create local node + message = conftest.node_message( + objid=1, + name="n1", + model="host" + ) + cored.request_handler.handle_message(message) + + # create distributed node and assign to distributed server + message = conftest.node_message( + objid=2, + name="n2", + emulation_server=cored.distributed_server, + node_type=NodeTypes.PHYSICAL, + model="prouter" + ) + cored.request_handler.handle_message(message) + + # create distributed switch and assign to distributed server + message = conftest.node_message( + objid=3, + name="n3", + node_type=NodeTypes.SWITCH + ) + cored.request_handler.handle_message(message) + + # link message one + ip4_address = cored.prefix.addr(1) + message = conftest.link_message( + n1=1, + n2=3, + intf_one=0, + address_one=ip4_address + ) + cored.request_handler.handle_message(message) + + # link message two + ip4_address = cored.prefix.addr(2) + message = conftest.link_message( + n1=3, + n2=2, + intf_two=0, + address_two=ip4_address + ) + cored.request_handler.handle_message(message) + + # change session to instantiation state + message = conftest.state_message(EventTypes.INSTANTIATION_STATE) + cored.request_handler.handle_message(message) + + # test a ping command + node_one = cored.session.get_object(1) + message = conftest.command_message(node_one, "ping -c 5 %s" % ip4_address) + cored.request_handler.dispatch_replies = validate_response + cored.request_handler.handle_message(message) + cored.request_handler.handle_message(message) + + def test_tunnel(self, cored, distributed_address): + """ + Test session broker creation. + + :param core.coreserver.CoreServer conftest.Core cored: core daemon server to test with + :param str distributed_address: distributed server to test against + """ + # initialize server for testing + cored.setup(distributed_address) + + # create local node + message = conftest.node_message( + objid=1, + name="n1", + model="host" + ) + cored.request_handler.handle_message(message) + + # create distributed node and assign to distributed server + message = conftest.node_message( + objid=2, + name=distributed_address, + emulation_server=cored.distributed_server, + node_type=NodeTypes.TUNNEL + ) + cored.request_handler.handle_message(message) + + # link message one + ip4_address = cored.prefix.addr(1) + address_two = IpAddress.from_string(distributed_address) + message = conftest.link_message( + n1=1, + n2=2, + intf_one=0, + address_one=ip4_address, + intf_two=0, + address_two=address_two, + key=1 + ) + cored.request_handler.handle_message(message) + + # change session to instantiation state + message = conftest.state_message(EventTypes.INSTANTIATION_STATE) + cored.request_handler.handle_message(message) diff --git a/daemon/tests/emane/test_emane.py b/daemon/tests/emane/test_emane.py deleted file mode 100644 index 2ddb1a5d..00000000 --- a/daemon/tests/emane/test_emane.py +++ /dev/null @@ -1,332 +0,0 @@ -""" -Unit tests for testing CORE EMANE networks. -""" -from pathlib import Path -from tempfile import TemporaryFile -from typing import Type -from xml.etree import ElementTree - -import pytest - -from core import utils -from core.emane.emanemodel import EmaneModel -from core.emane.models.bypass import EmaneBypassModel -from core.emane.models.commeffect import EmaneCommEffectModel -from core.emane.models.ieee80211abg import EmaneIeee80211abgModel -from core.emane.models.rfpipe import EmaneRfPipeModel -from core.emane.models.tdma import EmaneTdmaModel -from core.emane.nodes import EmaneNet -from core.emulator.data import IpPrefixes -from core.emulator.session import Session -from core.errors import CoreCommandError, CoreError -from core.nodes.base import CoreNode, Position - -_EMANE_MODELS = [ - EmaneIeee80211abgModel, - EmaneRfPipeModel, - EmaneBypassModel, - EmaneCommEffectModel, - EmaneTdmaModel, -] -_DIR: Path = Path(__file__).resolve().parent -_SCHEDULE: Path = _DIR / "../../examples/tdma/schedule.xml" - - -def ping( - from_node: CoreNode, to_node: CoreNode, ip_prefixes: IpPrefixes, count: int = 3 -): - address = ip_prefixes.ip4_address(to_node.id) - try: - from_node.cmd(f"ping -c {count} {address}") - status = 0 - except CoreCommandError as e: - status = e.returncode - return status - - -class TestEmane: - def test_two_emane_interfaces(self, session: Session): - """ - Test nodes running multiple emane interfaces. - - :param core.emulator.coreemu.EmuSession session: session for test - """ - # create emane node for networking the core nodes - session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = EmaneNet.create_options() - options.emane_model = EmaneIeee80211abgModel.name - position = Position(x=80, y=50) - emane_net1 = session.add_node(EmaneNet, position=position, options=options) - options = EmaneNet.create_options() - options.emane_model = EmaneRfPipeModel.name - position = Position(x=80, y=50) - emane_net2 = session.add_node(EmaneNet, position=position, options=options) - - # create nodes - options = CoreNode.create_options() - options.model = "mdr" - position = Position(x=150, y=150) - node1 = session.add_node(CoreNode, position=position, options=options) - position = Position(x=300, y=150) - node2 = session.add_node(CoreNode, position=position, options=options) - - # create interfaces - ip_prefix1 = IpPrefixes("10.0.0.0/24") - ip_prefix2 = IpPrefixes("10.0.1.0/24") - for i, node in enumerate([node1, node2]): - node.setposition(x=150 * (i + 1), y=150) - iface_data = ip_prefix1.create_iface(node) - session.add_link(node.id, emane_net1.id, iface1_data=iface_data) - iface_data = ip_prefix2.create_iface(node) - session.add_link(node.id, emane_net2.id, iface1_data=iface_data) - - # instantiate session - session.instantiate() - - # ping node2 from node1 on both interfaces and check success - status = ping(node1, node2, ip_prefix1, count=5) - assert not status - status = ping(node1, node2, ip_prefix2, count=5) - assert not status - - @pytest.mark.parametrize("model", _EMANE_MODELS) - def test_models( - self, session: Session, model: Type[EmaneModel], ip_prefixes: IpPrefixes - ): - """ - Test emane models within a basic network. - - :param core.emulator.coreemu.EmuSession session: session for test - :param model: emane model to test - :param ip_prefixes: generates ip addresses for nodes - """ - - # create emane node for networking the core nodes - session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = EmaneNet.create_options() - options.emane_model = model.name - position = Position(x=80, y=50) - emane_network = session.add_node(EmaneNet, position=position, options=options) - - # configure tdma - if model == EmaneTdmaModel: - session.emane.set_config( - emane_network.id, EmaneTdmaModel.name, {"schedule": str(_SCHEDULE)} - ) - - # create nodes - options = CoreNode.create_options() - options.model = "mdr" - position = Position(x=150, y=150) - node1 = session.add_node(CoreNode, position=position, options=options) - position = Position(x=300, y=150) - node2 = session.add_node(CoreNode, position=position, options=options) - - for i, node in enumerate([node1, node2]): - node.setposition(x=150 * (i + 1), y=150) - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, emane_network.id, iface1_data=iface_data) - - # instantiate session - session.instantiate() - - # ping node2 from node1 and assert success - status = ping(node1, node2, ip_prefixes, count=5) - assert not status - - def test_xml_emane( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): - """ - Test xml client methods for emane. - - :param session: session for test - :param tmpdir: tmpdir to create data in - :param ip_prefixes: generates ip addresses for nodes - """ - # create emane node for networking the core nodes - session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = EmaneNet.create_options() - options.emane_model = EmaneIeee80211abgModel.name - position = Position(x=80, y=50) - emane_network = session.add_node(EmaneNet, position=position, options=options) - config_key = "txpower" - config_value = "10" - session.emane.set_config( - emane_network.id, EmaneIeee80211abgModel.name, {config_key: config_value} - ) - - # create nodes - options = CoreNode.create_options() - options.model = "mdr" - position = Position(x=150, y=150) - node1 = session.add_node(CoreNode, position=position, options=options) - position = Position(x=300, y=150) - node2 = session.add_node(CoreNode, position=position, options=options) - - for i, node in enumerate([node1, node2]): - node.setposition(x=150 * (i + 1), y=150) - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, emane_network.id, iface1_data=iface_data) - - # instantiate session - session.instantiate() - - # get ids for nodes - emane_id = emane_network.id - node1_id = node1.id - node2_id = node2.id - - # save xml - xml_file = tmpdir.join("session.xml") - file_path = xml_file.strpath - session.save_xml(Path(file_path)) - - # verify xml file was created and can be parsed - assert xml_file.isfile() - assert ElementTree.parse(file_path) - - # stop current session, clearing data - session.shutdown() - - # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2_id, CoreNode) - - # load saved xml - session.open_xml(Path(file_path), start=True) - - # retrieve configuration we set originally - config = session.emane.get_config(emane_id, EmaneIeee80211abgModel.name) - value = config[config_key] - - # verify nodes and configuration were restored - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, CoreNode) - assert session.get_node(emane_id, EmaneNet) - assert value == config_value - - def test_xml_emane_node_config( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): - # create nodes - options = CoreNode.create_options() - options.model = "mdr" - position = Position(x=50, y=50) - node1 = session.add_node(CoreNode, position=position, options=options) - iface1_data = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode, position=position, options=options) - iface2_data = ip_prefixes.create_iface(node2) - - # create emane node - options = EmaneNet.create_options() - options.emane_model = EmaneRfPipeModel.name - emane_node = session.add_node(EmaneNet, options=options) - - # create links - session.add_link(node1.id, emane_node.id, iface1_data) - session.add_link(node2.id, emane_node.id, iface2_data) - - # set node specific config - datarate = "101" - session.emane.set_config( - node1.id, EmaneRfPipeModel.name, {"datarate": datarate} - ) - - # instantiate session - session.instantiate() - - # save xml - xml_file = tmpdir.join("session.xml") - file_path = xml_file.strpath - session.save_xml(Path(file_path)) - - # verify xml file was created and can be parsed - assert xml_file.isfile() - assert ElementTree.parse(file_path) - - # stop current session, clearing data - session.shutdown() - - # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(emane_node.id, EmaneNet) - - # load saved xml - session.open_xml(Path(file_path), start=True) - - # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) - assert session.get_node(emane_node.id, EmaneNet) - assert len(session.link_manager.links()) == 2 - config = session.emane.get_config(node1.id, EmaneRfPipeModel.name) - assert config["datarate"] == datarate - - def test_xml_emane_interface_config( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): - # create nodes - options = CoreNode.create_options() - options.model = "mdr" - position = Position(x=50, y=50) - node1 = session.add_node(CoreNode, position=position, options=options) - iface1_data = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode, position=position, options=options) - iface2_data = ip_prefixes.create_iface(node2) - - # create emane node - options = EmaneNet.create_options() - options.emane_model = EmaneRfPipeModel.name - emane_node = session.add_node(EmaneNet, options=options) - - # create links - session.add_link(node1.id, emane_node.id, iface1_data) - session.add_link(node2.id, emane_node.id, iface2_data) - - # set node specific conifg - datarate = "101" - config_id = utils.iface_config_id(node1.id, iface1_data.id) - session.emane.set_config( - config_id, EmaneRfPipeModel.name, {"datarate": datarate} - ) - - # instantiate session - session.instantiate() - - # save xml - xml_file = tmpdir.join("session.xml") - file_path = xml_file.strpath - session.save_xml(Path(file_path)) - - # verify xml file was created and can be parsed - assert xml_file.isfile() - assert ElementTree.parse(file_path) - - # stop current session, clearing data - session.shutdown() - - # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(emane_node.id, EmaneNet) - - # load saved xml - session.open_xml(Path(file_path), start=True) - - # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) - assert session.get_node(emane_node.id, EmaneNet) - assert len(session.link_manager.links()) == 2 - config = session.emane.get_config(config_id, EmaneRfPipeModel.name) - assert config["datarate"] == datarate diff --git a/daemon/tests/myservices/sample.py b/daemon/tests/myservices/sample.py index 5a12c795..e6673670 100644 --- a/daemon/tests/myservices/sample.py +++ b/daemon/tests/myservices/sample.py @@ -2,7 +2,7 @@ Sample user-defined services for testing. """ -from core.services.coreservices import CoreService +from core.service import CoreService class MyService(CoreService): diff --git a/daemon/tests/pytest.ini b/daemon/tests/pytest.ini new file mode 100644 index 00000000..880841eb --- /dev/null +++ b/daemon/tests/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +norecursedirs = distributed diff --git a/daemon/tests/test_conf.py b/daemon/tests/test_conf.py index 2c74841d..6516de04 100644 --- a/daemon/tests/test_conf.py +++ b/daemon/tests/test_conf.py @@ -1,21 +1,30 @@ import pytest -from core.config import ( - ConfigString, - ConfigurableManager, - ConfigurableOptions, - ModelManager, -) -from core.emane.models.ieee80211abg import EmaneIeee80211abgModel -from core.emulator.session import Session -from core.location.mobility import BasicRangeModel -from core.nodes.network import WlanNode +from core.conf import ConfigurableManager +from core.conf import ConfigurableOptions +from core.conf import Configuration +from core.conf import ModelManager +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.enumerations import ConfigDataTypes +from core.enumerations import NodeTypes +from core.mobility import BasicRangeModel class TestConfigurableOptions(ConfigurableOptions): - name1 = "value1" - name2 = "value2" - options = [ConfigString(id=name1, label=name1), ConfigString(id=name2, label=name2)] + name_one = "value1" + name_two = "value2" + options = [ + Configuration( + _id=name_one, + _type=ConfigDataTypes.STRING, + label=name_one + ), + Configuration( + _id=name_two, + _type=ConfigDataTypes.STRING, + label=name_two + ) + ] class TestConf: @@ -29,16 +38,16 @@ class TestConf: # then assert len(default_values) == 2 - assert TestConfigurableOptions.name1 in default_values - assert TestConfigurableOptions.name2 in default_values + assert TestConfigurableOptions.name_one in default_values + assert TestConfigurableOptions.name_two in default_values assert len(instance_default_values) == 2 - assert TestConfigurableOptions.name1 in instance_default_values - assert TestConfigurableOptions.name2 in instance_default_values + assert TestConfigurableOptions.name_one in instance_default_values + assert TestConfigurableOptions.name_two in instance_default_values def test_nodes(self): # given config_manager = ConfigurableManager() - test_config = {"1": "2"} + test_config = {1: 2} node_id = 1 config_manager.set_configs(test_config) config_manager.set_configs(test_config, node_id=node_id) @@ -53,7 +62,7 @@ class TestConf: def test_config_reset_all(self): # given config_manager = ConfigurableManager() - test_config = {"1": "2"} + test_config = {1: 2} node_id = 1 config_manager.set_configs(test_config) config_manager.set_configs(test_config, node_id=node_id) @@ -67,7 +76,7 @@ class TestConf: def test_config_reset_node(self): # given config_manager = ConfigurableManager() - test_config = {"1": "2"} + test_config = {1: 2} node_id = 1 config_manager.set_configs(test_config) config_manager.set_configs(test_config, node_id=node_id) @@ -82,7 +91,7 @@ class TestConf: def test_configs_setget(self): # given config_manager = ConfigurableManager() - test_config = {"1": "2"} + test_config = {1: 2} node_id = 1 config_manager.set_configs(test_config) config_manager.set_configs(test_config, node_id=node_id) @@ -143,27 +152,27 @@ class TestConf: with pytest.raises(ValueError): manager.get_model_config(1, bad_name) - def test_model_set(self, session: Session): + def test_model_set(self, session): # given - wlan_node = session.add_node(WlanNode) + wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN) # when session.mobility.set_model(wlan_node, BasicRangeModel) # then - assert session.mobility.get_model_config(wlan_node.id, BasicRangeModel.name) + assert session.mobility.get_model_config(wlan_node.objid, BasicRangeModel.name) - def test_model_set_error(self, session: Session): + def test_model_set_error(self, session): # given - wlan_node = session.add_node(WlanNode) + wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN) # when / then with pytest.raises(ValueError): session.mobility.set_model(wlan_node, EmaneIeee80211abgModel) - def test_get_models(self, session: Session): + def test_get_models(self, session): # given - wlan_node = session.add_node(WlanNode) + wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN) session.mobility.set_model(wlan_node, BasicRangeModel) # when diff --git a/daemon/tests/test_config_services.py b/daemon/tests/test_config_services.py deleted file mode 100644 index 876b7f32..00000000 --- a/daemon/tests/test_config_services.py +++ /dev/null @@ -1,300 +0,0 @@ -from pathlib import Path -from unittest import mock - -import pytest - -from core.config import ConfigBool, ConfigString -from core.configservice.base import ( - ConfigService, - ConfigServiceBootError, - ConfigServiceMode, -) -from core.errors import CoreCommandError, CoreError - -TEMPLATE_TEXT = "echo hello" - - -class MyService(ConfigService): - name = "MyService" - group = "MyGroup" - directories = ["/usr/local/lib"] - files = ["test.sh"] - executables = [] - dependencies = [] - startup = [f"sh {files[0]}"] - validate = [f"pidof {files[0]}"] - shutdown = [f"pkill {files[0]}"] - validation_mode = ConfigServiceMode.BLOCKING - default_configs = [ - ConfigString(id="value1", label="Text"), - ConfigBool(id="value2", label="Boolean"), - ConfigString( - id="value3", label="Multiple Choice", options=["value1", "value2", "value3"] - ), - ] - modes = { - "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, - "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, - "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, - } - - def get_text_template(self, name: str) -> str: - return TEMPLATE_TEXT - - -class TestConfigServices: - def test_set_template(self): - # given - node = mock.MagicMock() - text = "echo custom" - service = MyService(node) - - # when - service.set_template(MyService.files[0], text) - - # then - assert MyService.files[0] in service.custom_templates - assert service.custom_templates[MyService.files[0]] == text - - def test_create_directories(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.create_dirs() - - # then - directory = Path(MyService.directories[0]) - node.create_dir.assert_called_with(directory) - - def test_create_files_custom(self): - # given - node = mock.MagicMock() - service = MyService(node) - text = "echo custom" - service.set_template(MyService.files[0], text) - - # when - service.create_files() - - # then - file_path = Path(MyService.files[0]) - node.create_file.assert_called_with(file_path, text) - - def test_create_files_text(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.create_files() - - # then - file_path = Path(MyService.files[0]) - node.create_file.assert_called_with(file_path, TEMPLATE_TEXT) - - def test_run_startup(self): - # given - node = mock.MagicMock() - wait = True - service = MyService(node) - - # when - service.run_startup(wait=wait) - - # then - node.cmd.assert_called_with(MyService.startup[0], wait=wait) - - def test_run_startup_exception(self): - # given - node = mock.MagicMock() - node.cmd.side_effect = CoreCommandError(1, "error") - service = MyService(node) - - # when - with pytest.raises(ConfigServiceBootError): - service.run_startup(wait=True) - - def test_shutdown(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.stop() - - # then - node.cmd.assert_called_with(MyService.shutdown[0]) - - def test_run_validation(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - service.run_validation() - - # then - node.cmd.assert_called_with(MyService.validate[0]) - - def test_run_validation_timer(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.TIMER - service.validation_timer = 0 - - # when - service.run_validation() - - # then - node.cmd.assert_called_with(MyService.validate[0]) - - def test_run_validation_timer_exception(self): - # given - node = mock.MagicMock() - node.cmd.side_effect = CoreCommandError(1, "error") - service = MyService(node) - service.validation_mode = ConfigServiceMode.TIMER - service.validation_period = 0 - service.validation_timer = 0 - - # when - with pytest.raises(ConfigServiceBootError): - service.run_validation() - - def test_run_validation_non_blocking(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.NON_BLOCKING - service.validation_period = 0 - service.validation_timer = 0 - - # when - service.run_validation() - - # then - node.cmd.assert_called_with(MyService.validate[0]) - - def test_run_validation_non_blocking_exception(self): - # given - node = mock.MagicMock() - node.cmd.side_effect = CoreCommandError(1, "error") - service = MyService(node) - service.validation_mode = ConfigServiceMode.NON_BLOCKING - service.validation_period = 0 - service.validation_timer = 0 - - # when - with pytest.raises(ConfigServiceBootError): - service.run_validation() - - def test_render_config(self): - # given - node = mock.MagicMock() - service = MyService(node) - - # when - config = service.render_config() - - # then - assert config == {"value1": "", "value2": "", "value3": ""} - - def test_render_config_custom(self): - # given - node = mock.MagicMock() - service = MyService(node) - custom_config = {"value1": "1", "value2": "2", "value3": "3"} - service.set_config(custom_config) - - # when - config = service.render_config() - - # then - assert config == custom_config - - def test_set_config(self): - # given - node = mock.MagicMock() - service = MyService(node) - custom_config = {"value1": "1", "value2": "2", "value3": "3"} - - # when - service.set_config(custom_config) - - # then - assert service.custom_config == custom_config - - def test_set_config_exception(self): - # given - node = mock.MagicMock() - service = MyService(node) - custom_config = {"value4": "1"} - - # when - with pytest.raises(CoreError): - service.set_config(custom_config) - - def test_start_blocking(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.create_dirs = mock.MagicMock() - service.create_files = mock.MagicMock() - service.run_startup = mock.MagicMock() - service.run_validation = mock.MagicMock() - service.wait_validation = mock.MagicMock() - - # when - service.start() - - # then - service.create_files.assert_called_once() - service.create_dirs.assert_called_once() - service.run_startup.assert_called_once() - service.run_validation.assert_not_called() - service.wait_validation.assert_not_called() - - def test_start_timer(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.TIMER - service.create_dirs = mock.MagicMock() - service.create_files = mock.MagicMock() - service.run_startup = mock.MagicMock() - service.run_validation = mock.MagicMock() - service.wait_validation = mock.MagicMock() - - # when - service.start() - - # then - service.create_files.assert_called_once() - service.create_dirs.assert_called_once() - service.run_startup.assert_called_once() - service.run_validation.assert_not_called() - service.wait_validation.assert_called_once() - - def test_start_non_blocking(self): - # given - node = mock.MagicMock() - service = MyService(node) - service.validation_mode = ConfigServiceMode.NON_BLOCKING - service.create_dirs = mock.MagicMock() - service.create_files = mock.MagicMock() - service.run_startup = mock.MagicMock() - service.run_validation = mock.MagicMock() - service.wait_validation = mock.MagicMock() - - # when - service.start() - - # then - service.create_files.assert_called_once() - service.create_dirs.assert_called_once() - service.run_startup.assert_called_once() - service.run_validation.assert_called_once() - service.wait_validation.assert_not_called() diff --git a/daemon/tests/test_core.py b/daemon/tests/test_core.py index 919e4478..7b29ba75 100644 --- a/daemon/tests/test_core.py +++ b/daemon/tests/test_core.py @@ -2,39 +2,55 @@ Unit tests for testing basic CORE networks. """ +import os +import stat import threading -from pathlib import Path -from typing import List, Type import pytest +from mock import MagicMock -from core.emulator.data import IpPrefixes -from core.emulator.session import Session -from core.errors import CoreCommandError -from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility -from core.nodes.base import CoreNode, NodeBase -from core.nodes.network import HubNode, PtpNet, SwitchNode, WlanNode +from core.emulator.emudata import NodeOptions +from core.enumerations import MessageFlags +from core.enumerations import NodeTypes +from core.mobility import BasicRangeModel +from core.mobility import Ns2ScriptedMobility +from core.netns.vnodeclient import VnodeClient -_PATH: Path = Path(__file__).resolve().parent -_MOBILITY_FILE: Path = _PATH / "mobility.scen" -_WIRED: List = [PtpNet, HubNode, SwitchNode] +_PATH = os.path.abspath(os.path.dirname(__file__)) +_MOBILITY_FILE = os.path.join(_PATH, "mobility.scen") +_WIRED = [ + NodeTypes.PEER_TO_PEER, + NodeTypes.HUB, + NodeTypes.SWITCH +] -def ping(from_node: CoreNode, to_node: CoreNode, ip_prefixes: IpPrefixes): - address = ip_prefixes.ip4_address(to_node.id) - try: - from_node.cmd(f"ping -c 1 {address}") - status = 0 - except CoreCommandError as e: - status = e.returncode - return status +def createclients(sessiondir, clientcls=VnodeClient, cmdchnlfilterfunc=None): + """ + Create clients + + :param str sessiondir: session directory to create clients + :param class clientcls: class to create clients from + :param func cmdchnlfilterfunc: command channel filter function + :return: list of created clients + :rtype: list + """ + direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir)) + cmdchnls = filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries) + if cmdchnlfilterfunc: + cmdchnls = filter(cmdchnlfilterfunc, cmdchnls) + cmdchnls.sort() + return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls) + + +def ping(from_node, to_node, ip_prefixes): + address = ip_prefixes.ip4_address(to_node) + return from_node.cmd(["ping", "-c", "3", address]) class TestCore: @pytest.mark.parametrize("net_type", _WIRED) - def test_wired_ping( - self, session: Session, net_type: Type[NodeBase], ip_prefixes: IpPrefixes - ): + def test_wired_ping(self, session, net_type, ip_prefixes): """ Test ptp node network. @@ -44,25 +60,125 @@ class TestCore: """ # create net node - net_node = session.add_node(net_type) + net_node = session.add_node(_type=net_type) # create nodes - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) + node_one = session.add_node() + node_two = session.add_node() # link nodes to net node - for node in [node1, node2]: - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, net_node.id, iface1_data=iface_data) + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, net_node.objid, interface_one=interface) # instantiate session session.instantiate() - # ping node2 from node1 and assert success - status = ping(node1, node2, ip_prefixes) + # ping n2 from n1 and assert success + status = ping(node_one, node_two, ip_prefixes) assert not status - def test_wlan_ping(self, session: Session, ip_prefixes: IpPrefixes): + def test_vnode_client(self, session, ip_prefixes): + """ + Test vnode client methods. + + :param session: session for test + :param ip_prefixes: generates ip addresses for nodes + """ + + # create ptp + ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER) + + # create nodes + node_one = session.add_node() + node_two = session.add_node() + + # link nodes to ptp net + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, ptp_node.objid, interface_one=interface) + + # get node client for testing + client = node_one.client + + # instantiate session + session.instantiate() + + # check we are connected + assert client.connected() + + # check various command using vcmd module + command = ["ls"] + assert not client.cmd(command) + status, output = client.cmd_output(command) + assert not status + p, stdin, stdout, stderr = client.popen(command) + assert not p.status() + assert not client.icmd(command) + assert not client.redircmd(MagicMock(), MagicMock(), MagicMock(), command) + assert not client.shcmd(command[0]) + + # check various command using command line + assert not client.cmd(command) + status, output = client.cmd_output(command) + assert not status + p, stdin, stdout, stderr = client.popen(command) + assert not p.wait() + assert not client.icmd(command) + assert not client.shcmd(command[0]) + + # check module methods + assert createclients(session.session_dir) + + # check convenience methods for interface information + assert client.getaddr("eth0") + assert client.netifstats() + + def test_netif(self, session, ip_prefixes): + """ + Test netif methods. + + :param session: session for test + :param ip_prefixes: generates ip addresses for nodes + """ + + # create ptp + ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER) + + # create nodes + node_one = session.add_node() + node_two = session.add_node() + + # link nodes to ptp net + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, ptp_node.objid, interface_one=interface) + + # instantiate session + session.instantiate() + + # check link data gets generated + assert ptp_node.all_link_data(MessageFlags.ADD.value) + + # check common nets exist between linked nodes + assert node_one.commonnets(node_two) + assert node_two.commonnets(node_one) + + # check we can retrieve netif index + assert node_one.getifindex(0) + assert node_two.getifindex(0) + + # check interface parameters + interface = node_one.netif(0) + interface.setparam("test", 1) + assert interface.getparam("test") == 1 + assert interface.getparams() + + # delete netif and test that if no longer exists + node_one.delnetif(0) + assert not node_one.netif(0) + + def test_wlan_ping(self, session, ip_prefixes): """ Test basic wlan network. @@ -71,28 +187,28 @@ class TestCore: """ # create wlan - wlan_node = session.add_node(WlanNode) + wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN) session.mobility.set_model(wlan_node, BasicRangeModel) # create nodes - options = CoreNode.create_options() - options.model = "mdr" - node1 = session.add_node(CoreNode, options=options) - node2 = session.add_node(CoreNode, options=options) + node_options = NodeOptions() + node_options.set_position(0, 0) + node_one = session.create_wireless_node(node_options=node_options) + node_two = session.create_wireless_node(node_options=node_options) # link nodes - for node in [node1, node2]: - iface_id = ip_prefixes.create_iface(node) - session.add_link(node.id, wlan_node.id, iface1_data=iface_id) + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, wlan_node.objid, interface_one=interface) # instantiate session session.instantiate() - # ping node2 from node1 and assert success - status = ping(node1, node2, ip_prefixes) + # ping n2 from n1 and assert success + status = ping(node_one, node_two, ip_prefixes) assert not status - def test_mobility(self, session: Session, ip_prefixes: IpPrefixes): + def test_mobility(self, session, ip_prefixes): """ Test basic wlan network. @@ -101,23 +217,23 @@ class TestCore: """ # create wlan - wlan_node = session.add_node(WlanNode) + wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN) session.mobility.set_model(wlan_node, BasicRangeModel) # create nodes - options = CoreNode.create_options() - options.model = "mdr" - node1 = session.add_node(CoreNode, options=options) - node2 = session.add_node(CoreNode, options=options) + node_options = NodeOptions() + node_options.set_position(0, 0) + node_one = session.create_wireless_node(node_options=node_options) + node_two = session.create_wireless_node(node_options=node_options) # link nodes - for node in [node1, node2]: - iface_id = ip_prefixes.create_iface(node) - session.add_link(node.id, wlan_node.id, iface1_data=iface_id) + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, wlan_node.objid, interface_one=interface) # configure mobility script for session config = { - "file": str(_MOBILITY_FILE), + "file": _MOBILITY_FILE, "refresh_ms": "50", "loop": "1", "autostart": "0.0", diff --git a/daemon/tests/test_distributed.py b/daemon/tests/test_distributed.py deleted file mode 100644 index 3a9d43fb..00000000 --- a/daemon/tests/test_distributed.py +++ /dev/null @@ -1,39 +0,0 @@ -from core.emulator.session import Session -from core.nodes.base import CoreNode -from core.nodes.network import HubNode - - -class TestDistributed: - def test_remote_node(self, session: Session): - # given - server_name = "core2" - host = "127.0.0.1" - - # when - session.distributed.add_server(server_name, host) - node = session.add_node(CoreNode, server=server_name) - session.instantiate() - - # then - assert node.server is not None - assert node.server.name == server_name - assert node.server.host == host - - def test_remote_bridge(self, session: Session): - # given - server_name = "core2" - host = "127.0.0.1" - session.distributed.address = host - - # when - session.distributed.add_server(server_name, host) - node1 = session.add_node(HubNode) - node2 = session.add_node(HubNode, server=server_name) - session.add_link(node1.id, node2.id) - session.instantiate() - - # then - assert node2.server is not None - assert node2.server.name == server_name - assert node2.server.host == host - assert len(session.distributed.tunnels) == 1 diff --git a/daemon/tests/test_emane.py b/daemon/tests/test_emane.py new file mode 100644 index 00000000..1c77d8ea --- /dev/null +++ b/daemon/tests/test_emane.py @@ -0,0 +1,67 @@ +""" +Unit tests for testing CORE EMANE networks. +""" +import os + +import pytest + +from conftest import ping +from core.emane.bypass import EmaneBypassModel +from core.emane.commeffect import EmaneCommEffectModel +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emane.rfpipe import EmaneRfPipeModel +from core.emane.tdma import EmaneTdmaModel +from core.emulator.emudata import NodeOptions + +_EMANE_MODELS = [ + EmaneIeee80211abgModel, + EmaneRfPipeModel, + EmaneBypassModel, + EmaneCommEffectModel, + EmaneTdmaModel, +] +_DIR = os.path.dirname(os.path.abspath(__file__)) + + +class TestEmane: + @pytest.mark.parametrize("model", _EMANE_MODELS) + def test_models(self, session, model, ip_prefixes): + """ + Test emane models within a basic network. + + :param core.emulator.coreemu.EmuSession session: session for test + :param model: emane model to test + :param ip_prefixes: generates ip addresses for nodes + """ + + # create emane node for networking the core nodes + emane_network = session.create_emane_network( + model, + geo_reference=(47.57917, -122.13232, 2.00000) + ) + emane_network.setposition(x=80, y=50) + + # configure tdma + if model == EmaneTdmaModel: + session.emane.set_model_config(emane_network.objid, EmaneTdmaModel.name, { + "schedule": os.path.join(_DIR, "../examples/tdma/schedule.xml") + }) + + # create nodes + node_options = NodeOptions() + node_options.set_position(150, 150) + node_one = session.create_wireless_node(node_options=node_options) + node_options.set_position(300, 150) + node_two = session.create_wireless_node(node_options=node_options) + + for i, node in enumerate([node_one, node_two]): + node.setposition(x=150 * (i + 1), y=150) + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, emane_network.objid, interface_one=interface) + + # instantiate session + session.instantiate() + + # ping n2 from n1 and assert success + status = ping(node_one, node_two, ip_prefixes, count=5) + assert not status diff --git a/daemon/tests/test_grpc.py b/daemon/tests/test_grpc.py index 9aed3395..85a7910d 100644 --- a/daemon/tests/test_grpc.py +++ b/daemon/tests/test_grpc.py @@ -1,192 +1,44 @@ import time -from pathlib import Path -from queue import Queue -from tempfile import TemporaryFile -from typing import Optional + +from Queue import Queue import grpc import pytest -from mock import patch -from core.api.grpc import wrappers -from core.api.grpc.client import CoreGrpcClient, InterfaceHelper, MoveNodesStreamer -from core.api.grpc.server import CoreGrpcServer -from core.api.grpc.wrappers import ( - ConfigOption, - ConfigOptionType, - EmaneModelConfig, - Event, - Geo, - Hook, - Interface, - Link, - LinkOptions, - MobilityAction, - MoveNodesRequest, - Node, - NodeServiceData, - NodeType, - Position, - ServiceAction, - ServiceValidationMode, - SessionLocation, - SessionState, -) -from core.emane.models.ieee80211abg import EmaneIeee80211abgModel -from core.emane.nodes import EmaneNet -from core.emulator.data import EventData, IpPrefixes, NodeData -from core.emulator.enumerations import EventTypes, ExceptionLevels, MessageFlags -from core.errors import CoreError -from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility -from core.nodes.base import CoreNode -from core.nodes.network import SwitchNode, WlanNode -from core.xml.corexml import CoreXmlWriter +from core.conf import ConfigShim +from core.data import EventData +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.enumerations import NodeTypes, EventTypes, ConfigFlags, ExceptionLevels +from core.grpc import core_pb2 +from core.grpc.client import CoreGrpcClient +from core.mobility import BasicRangeModel, Ns2ScriptedMobility class TestGrpc: - @pytest.mark.parametrize("definition", [False, True]) - def test_start_session(self, grpc_server: CoreGrpcServer, definition): - # given - client = CoreGrpcClient() - with client.context_connect(): - session = client.create_session() - position = Position(x=50, y=100) - node1 = session.add_node(1, position=position) - position = Position(x=100, y=100) - node2 = session.add_node(2, position=position) - position = Position(x=200, y=200) - wlan_node = session.add_node(3, _type=NodeType.WIRELESS_LAN, position=position) - iface_helper = InterfaceHelper(ip4_prefix="10.83.0.0/16") - iface1_id = 0 - iface1 = iface_helper.create_iface(node1.id, iface1_id) - iface2_id = 0 - iface2 = iface_helper.create_iface(node2.id, iface2_id) - link = Link(node1_id=node1.id, node2_id=node2.id, iface1=iface1, iface2=iface2) - session.links = [link] - hook = Hook(state=SessionState.RUNTIME, file="echo.sh", data="echo hello") - session.hooks = {hook.file: hook} - location_x = 5 - location_y = 10 - location_z = 15 - location_lat = 20 - location_lon = 30 - location_alt = 40 - location_scale = 5 - session.location = SessionLocation( - x=location_x, - y=location_y, - z=location_z, - lat=location_lat, - lon=location_lon, - alt=location_alt, - scale=location_scale, - ) - - # setup wlan config - wlan_config_key = "range" - wlan_config_value = "333" - wlan_node.set_wlan({wlan_config_key: wlan_config_value}) - - # setup mobility config - mobility_config_key = "refresh_ms" - mobility_config_value = "60" - wlan_node.set_mobility({mobility_config_key: mobility_config_value}) - - # setup service config - service_name = "DefaultRoute" - service_validate = ["echo hello"] - node1.service_configs[service_name] = NodeServiceData( - executables=[], - dependencies=[], - dirs=[], - configs=[], - startup=[], - validate=service_validate, - validation_mode=ServiceValidationMode.NON_BLOCKING, - validation_timer=0, - shutdown=[], - meta="", - ) - - # setup service file config - service_file = "defaultroute.sh" - service_file_data = "echo hello" - node1.service_file_configs[service_name] = {service_file: service_file_data} - - # setup session option - option_key = "controlnet" - option_value = "172.16.0.0/24" - session.set_options({option_key: option_value}) - - # when - with patch.object(CoreXmlWriter, "write"): - with client.context_connect(): - client.start_session(session, definition=definition) - - # then - real_session = grpc_server.coreemu.sessions[session.id] - if definition: - state = EventTypes.DEFINITION_STATE - else: - state = EventTypes.RUNTIME_STATE - assert real_session.state == state - assert node1.id in real_session.nodes - assert node2.id in real_session.nodes - assert wlan_node.id in real_session.nodes - assert iface1_id in real_session.nodes[node1.id].ifaces - assert iface2_id in real_session.nodes[node2.id].ifaces - hook_file, hook_data = real_session.hooks[EventTypes.RUNTIME_STATE][0] - assert hook_file == hook.file - assert hook_data == hook.data - assert real_session.location.refxyz == (location_x, location_y, location_z) - assert real_session.location.refgeo == ( - location_lat, - location_lon, - location_alt, - ) - assert real_session.location.refscale == location_scale - set_wlan_config = real_session.mobility.get_model_config( - wlan_node.id, BasicRangeModel.name - ) - assert set_wlan_config[wlan_config_key] == wlan_config_value - set_mobility_config = real_session.mobility.get_model_config( - wlan_node.id, Ns2ScriptedMobility.name - ) - assert set_mobility_config[mobility_config_key] == mobility_config_value - service = real_session.services.get_service( - node1.id, service_name, default_service=True - ) - assert service.validate == tuple(service_validate) - real_node1 = real_session.get_node(node1.id, CoreNode) - service_file = real_session.services.get_service_file( - real_node1, service_name, service_file - ) - assert service_file.data == service_file_data - assert option_value == real_session.options.get(option_key) - @pytest.mark.parametrize("session_id", [None, 6013]) - def test_create_session( - self, grpc_server: CoreGrpcServer, session_id: Optional[int] - ): + def test_create_session(self, grpc_server, session_id): # given client = CoreGrpcClient() # when with client.context_connect(): - created_session = client.create_session(session_id) + response = client.create_session(session_id) # then - assert isinstance(created_session, wrappers.Session) - session = grpc_server.coreemu.sessions.get(created_session.id) + assert isinstance(response.id, int) + assert isinstance(response.state, int) + session = grpc_server.coreemu.sessions.get(response.id) assert session is not None + assert session.state == response.state if session_id is not None: - assert created_session.id == session_id + assert response.id == session_id assert session.id == session_id - @pytest.mark.parametrize("session_id, expected", [(None, True), (6013, False)]) - def test_delete_session( - self, grpc_server: CoreGrpcServer, session_id: Optional[int], expected: bool - ): + @pytest.mark.parametrize("session_id, expected", [ + (None, True), + (6013, False) + ]) + def test_delete_session(self, grpc_server, session_id, expected): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() @@ -195,190 +47,232 @@ class TestGrpc: # then with client.context_connect(): - result = client.delete_session(session_id) + response = client.delete_session(session_id) # then - assert result is expected + assert response.result is expected assert grpc_server.coreemu.sessions.get(session_id) is None - def test_get_session(self, grpc_server: CoreGrpcServer): + def test_get_session(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.add_node(CoreNode) + session.add_node() session.set_state(EventTypes.DEFINITION_STATE) # then with client.context_connect(): - session = client.get_session(session.id) + response = client.get_session(session.id) # then - assert session.state == SessionState.DEFINITION - assert len(session.nodes) == 1 - assert len(session.links) == 0 + assert response.session.state == core_pb2.STATE_DEFINITION + assert len(response.session.nodes) == 1 + assert len(response.session.links) == 0 - def test_get_sessions(self, grpc_server: CoreGrpcServer): + def test_get_sessions(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() # then with client.context_connect(): - sessions = client.get_sessions() + response = client.get_sessions() # then found_session = None - for current_session in sessions: + for current_session in response.sessions: if current_session.id == session.id: found_session = current_session break - assert len(sessions) == 1 + assert len(response.sessions) == 1 assert found_session is not None - def test_add_node(self, grpc_server: CoreGrpcServer): + def test_get_session_options(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() # then with client.context_connect(): - position = Position(x=0, y=0) - node = Node(id=1, name="n1", type=NodeType.DEFAULT, position=position) - node_id = client.add_node(session.id, node) + response = client.get_session_options(session.id) # then - assert node_id is not None - assert session.get_node(node_id, CoreNode) is not None + assert len(response.groups) > 0 - def test_get_node(self, grpc_server: CoreGrpcServer): + def test_get_session_location(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) # then with client.context_connect(): - get_node, ifaces, links = client.get_node(session.id, node.id) + response = client.get_session_location(session.id) # then - assert node.id == get_node.id - assert len(ifaces) == 0 - assert len(links) == 0 + assert response.scale == 1.0 + assert response.position.x == 0 + assert response.position.y == 0 + assert response.position.z == 0 + assert response.position.lat == 0 + assert response.position.lon == 0 + assert response.position.alt == 0 - def test_move_node_pos(self, grpc_server: CoreGrpcServer): + def test_set_session_location(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + + # then + scale = 2 + xyz = (1, 1, 1) + lat_lon_alt = (1, 1, 1) + with client.context_connect(): + response = client.set_session_location( + session.id, + x=xyz[0], y=xyz[1], z=xyz[2], + lat=lat_lon_alt[0], lon=lat_lon_alt[1], alt=lat_lon_alt[2], + scale=scale + ) + + # then + assert response.result is True + assert session.location.refxyz == xyz + assert session.location.refscale == scale + assert session.location.refgeo == lat_lon_alt + + def test_set_session_options(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + + # then + option = "enablerj45" + value = "1" + with client.context_connect(): + response = client.set_session_options(session.id, {option: value}) + + # then + assert response.result is True + assert session.options.get_config(option) == value + config = session.options.get_configs() + assert len(config) > 0 + + def test_set_session_state(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - position = Position(x=100.0, y=50.0) # then with client.context_connect(): - result = client.move_node(session.id, node.id, position=position) + response = client.set_session_state(session.id, core_pb2.STATE_DEFINITION) # then - assert result is True - assert node.position.x == position.x - assert node.position.y == position.y + assert response.result is True + assert session.state == core_pb2.STATE_DEFINITION - def test_move_node_geo(self, grpc_server: CoreGrpcServer): + def test_add_node(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - geo = Geo(lon=0.0, lat=0.0, alt=0.0) # then with client.context_connect(): - result = client.move_node(session.id, node.id, geo=geo) + node = core_pb2.Node() + response = client.add_node(session.id, node) # then - assert result is True - assert node.position.lon == geo.lon - assert node.position.lat == geo.lat - assert node.position.alt == geo.alt + assert response.id is not None + assert session.get_object(response.id) is not None - def test_move_node_exception(self, grpc_server: CoreGrpcServer): + def test_get_node(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - - # then and when - with pytest.raises(CoreError), client.context_connect(): - client.move_node(session.id, node.id) - - def test_edit_node(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - icon = "test.png" + node = session.add_node() # then with client.context_connect(): - result = client.edit_node(session.id, node.id, icon) + response = client.get_node(session.id, node.objid) # then - assert result is True - assert node.icon == icon + assert response.node.id == node.objid - @pytest.mark.parametrize("node_id, expected", [(1, True), (2, False)]) - def test_delete_node( - self, grpc_server: CoreGrpcServer, node_id: int, expected: bool - ): + @pytest.mark.parametrize("node_id, expected", [ + (1, True), + (2, False) + ]) + def test_edit_node(self, grpc_server, node_id, expected): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) + node = session.add_node() # then + x, y = 10, 10 with client.context_connect(): - result = client.delete_node(session.id, node_id) + position = core_pb2.Position(x=x, y=y) + response = client.edit_node(session.id, node_id, position) # then - assert result is expected + assert response.result is expected if expected is True: - with pytest.raises(CoreError): - assert session.get_node(node.id, CoreNode) - - def test_node_command(self, request, grpc_server: CoreGrpcServer): - if request.config.getoption("mock"): - pytest.skip("mocking calls") + assert node.position.x == x + assert node.position.y == y + @pytest.mark.parametrize("node_id, expected", [ + (1, True), + (2, False) + ]) + def test_delete_node(self, grpc_server, node_id, expected): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - node = session.add_node(CoreNode) - session.instantiate() - expected_output = "hello world" - expected_status = 0 + node = session.add_node() # then - command = f"echo {expected_output}" with client.context_connect(): - output = client.node_command(session.id, node.id, command) + response = client.delete_node(session.id, node_id) # then - assert (expected_status, expected_output) == output + assert response.result is expected + if expected is True: + with pytest.raises(KeyError): + assert session.get_object(node.objid) - def test_get_node_terminal(self, grpc_server: CoreGrpcServer): + def test_get_hooks(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - node = session.add_node(CoreNode) - session.instantiate() + file_name = "test" + file_data = "echo hello" + session.add_hook(EventTypes.RUNTIME_STATE.value, file_name, None, file_data) # then with client.context_connect(): - terminal = client.get_node_terminal(session.id, node.id) + response = client.get_hooks(session.id) # then - assert terminal is not None + assert len(response.hooks) == 1 + hook = response.hooks[0] + assert hook.state == EventTypes.RUNTIME_STATE.value + assert hook.file == file_name + assert hook.data == file_data - def test_save_xml(self, grpc_server: CoreGrpcServer, tmpdir: TemporaryFile): + def test_add_hook(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + + # then + file_name = "test" + file_data = "echo hello" + with client.context_connect(): + response = client.add_hook(session.id, core_pb2.STATE_RUNTIME, file_name, file_data) + + # then + assert response.result is True + + def test_save_xml(self, grpc_server, tmpdir): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() @@ -391,566 +285,546 @@ class TestGrpc: # then assert tmp.exists() - def test_open_xml_hook(self, grpc_server: CoreGrpcServer, tmpdir: TemporaryFile): + def test_open_xml_hook(self, grpc_server, tmpdir): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - tmp = Path(tmpdir.join("text.xml")) - session.save_xml(tmp) + tmp = tmpdir.join("text.xml") + session.save_xml(str(tmp)) # then with client.context_connect(): - result, session_id = client.open_xml(tmp) + response = client.open_xml(str(tmp)) # then - assert result is True - assert session_id is not None + assert response.result is True + assert response.session is not None - def test_add_link(self, grpc_server: CoreGrpcServer): + def test_get_node_links(self, grpc_server, ip_prefixes): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - switch = session.add_node(SwitchNode) - node = session.add_node(CoreNode) - assert len(session.link_manager.links()) == 0 - iface = InterfaceHelper("10.0.0.0/24").create_iface(node.id, 0) - link = Link(node.id, switch.id, iface1=iface) + switch = session.add_node(_type=NodeTypes.SWITCH) + node = session.add_node() + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, switch.objid, interface) # then with client.context_connect(): - result, iface1, _ = client.add_link(session.id, link) + response = client.get_node_links(session.id, switch.objid) # then - assert result is True - assert len(session.link_manager.links()) == 1 - assert iface1.id == iface.id - assert iface1.ip4 == iface.ip4 + assert len(response.links) == 1 - def test_add_link_exception(self, grpc_server: CoreGrpcServer): + def test_get_node_links_exception(self, grpc_server, ip_prefixes): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) + switch = session.add_node(_type=NodeTypes.SWITCH) + node = session.add_node() + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, switch.objid, interface) # then - link = Link(node.id, 3) with pytest.raises(grpc.RpcError): with client.context_connect(): - client.add_link(session.id, link) + client.get_node_links(session.id, 3) - def test_edit_link(self, grpc_server: CoreGrpcServer, ip_prefixes: IpPrefixes): + def test_add_link(self, grpc_server, interface_helper): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - switch = session.add_node(SwitchNode) - node = session.add_node(CoreNode) - iface_data = ip_prefixes.create_iface(node) - iface, _ = session.add_link(node.id, switch.id, iface_data) - session.instantiate() - options = LinkOptions(bandwidth=30000) - assert iface.options.bandwidth != options.bandwidth - link = Link(node.id, switch.id, iface1=Interface(id=iface.id), options=options) + switch = session.add_node(_type=NodeTypes.SWITCH) + node = session.add_node() + assert len(switch.all_link_data(0)) == 0 + + # then + interface = interface_helper.create_interface(node.objid, 0) + with client.context_connect(): + response = client.add_link(session.id, node.objid, switch.objid, interface) + + # then + assert response.result is True + assert len(switch.all_link_data(0)) == 1 + + def test_add_link_exception(self, grpc_server, interface_helper): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + node = session.add_node() + + # then + interface = interface_helper.create_interface(node.objid, 0) + with pytest.raises(grpc.RpcError): + with client.context_connect(): + client.add_link(session.id, 1, 3, interface) + + def test_edit_link(self, grpc_server, ip_prefixes): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + switch = session.add_node(_type=NodeTypes.SWITCH) + node = session.add_node() + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, switch.objid, interface) + options = core_pb2.LinkOptions(bandwidth=30000) + link = switch.all_link_data(0)[0] + assert options.bandwidth != link.bandwidth # then with client.context_connect(): - result = client.edit_link(session.id, link) + response = client.edit_link(session.id, node.objid, switch.objid, options) # then - assert result is True - assert options.bandwidth == iface.options.bandwidth + assert response.result is True + link = switch.all_link_data(0)[0] + assert options.bandwidth == link.bandwidth - def test_delete_link(self, grpc_server: CoreGrpcServer, ip_prefixes: IpPrefixes): + def test_delete_link(self, grpc_server, ip_prefixes): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node1 = session.add_node(CoreNode) - iface1 = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode) - iface2 = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface1, iface2) - assert len(session.link_manager.links()) == 1 - link = Link( - node1.id, - node2.id, - iface1=Interface(id=iface1.id), - iface2=Interface(id=iface2.id), - ) + node_one = session.add_node() + interface_one = ip_prefixes.create_interface(node_one) + node_two = session.add_node() + interface_two = ip_prefixes.create_interface(node_two) + session.add_link(node_one.objid, node_two.objid, interface_one, interface_two) + link_node = None + for node_id in session.objects: + node = session.objects[node_id] + if node.objid not in {node_one.objid, node_two.objid}: + link_node = node + break + assert len(link_node.all_link_data(0)) == 1 # then with client.context_connect(): - result = client.delete_link(session.id, link) + response = client.delete_link( + session.id, node_one.objid, node_two.objid, interface_one.id, interface_two.id) # then - assert result is True - assert len(session.link_manager.links()) == 0 + assert response.result is True + assert len(link_node.all_link_data(0)) == 0 - def test_get_wlan_config(self, grpc_server: CoreGrpcServer): + def test_get_wlan_config(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - wlan = session.add_node(WlanNode) + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) # then with client.context_connect(): - config = client.get_wlan_config(session.id, wlan.id) + response = client.get_wlan_config(session.id, wlan.objid) # then - assert len(config) > 0 + assert len(response.groups) > 0 - def test_set_wlan_config(self, grpc_server: CoreGrpcServer): + def test_set_wlan_config(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - wlan = session.add_node(WlanNode) - wlan.setmodel(BasicRangeModel, BasicRangeModel.default_values()) - session.instantiate() + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) range_key = "range" - range_value = "50" + range_value = "300" # then with client.context_connect(): - result = client.set_wlan_config( - session.id, - wlan.id, - { - range_key: range_value, - "delay": "0", - "loss": "0", - "bandwidth": "50000", - "error": "0", - "jitter": "0", - }, - ) + response = client.set_wlan_config(session.id, wlan.objid, {range_key: range_value}) # then - assert result is True - config = session.mobility.get_model_config(wlan.id, BasicRangeModel.name) + assert response.result is True + config = session.mobility.get_model_config(wlan.objid, BasicRangeModel.name) assert config[range_key] == range_value - assert wlan.wireless_model.range == int(range_value) - def test_set_emane_model_config(self, grpc_server: CoreGrpcServer): + def test_get_emane_config(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = EmaneNet.create_options() - options.emane_model = EmaneIeee80211abgModel.name - emane_network = session.add_node(EmaneNet, options=options) - session.emane.node_models[emane_network.id] = EmaneIeee80211abgModel.name - config_key = "bandwidth" - config_value = "900000" - option = ConfigOption( - label=config_key, - name=config_key, - value=config_value, - type=ConfigOptionType.INT32, - group="Default", - ) - config = EmaneModelConfig( - emane_network.id, EmaneIeee80211abgModel.name, config={config_key: option} - ) # then with client.context_connect(): - result = client.set_emane_model_config(session.id, config) + response = client.get_emane_config(session.id) # then - assert result is True - config = session.emane.get_config(emane_network.id, EmaneIeee80211abgModel.name) + assert len(response.groups) > 0 + + def test_set_emane_config(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + config_key = "platform_id_start" + config_value = "2" + + # then + with client.context_connect(): + response = client.set_emane_config(session.id, {config_key: config_value}) + + # then + assert response.result is True + config = session.emane.get_configs() + assert len(config) > 1 assert config[config_key] == config_value - def test_get_emane_model_config(self, grpc_server: CoreGrpcServer): + def test_get_emane_model_configs(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = EmaneNet.create_options() - options.emane_model = EmaneIeee80211abgModel.name - emane_network = session.add_node(EmaneNet, options=options) - session.emane.node_models[emane_network.id] = EmaneIeee80211abgModel.name + emane_network = session.create_emane_network( + model=EmaneIeee80211abgModel, + geo_reference=(47.57917, -122.13232, 2.00000) + ) + config_key = "platform_id_start" + config_value = "2" + session.emane.set_model_config(emane_network.objid, EmaneIeee80211abgModel.name, {config_key: config_value}) # then with client.context_connect(): - config = client.get_emane_model_config( - session.id, emane_network.id, EmaneIeee80211abgModel.name - ) + response = client.get_emane_model_configs(session.id) # then - assert len(config) > 0 + assert len(response.configs) == 1 + assert emane_network.objid in response.configs - def test_get_mobility_config(self, grpc_server: CoreGrpcServer): + def test_set_emane_model_config(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - wlan = session.add_node(WlanNode) - session.mobility.set_model_config(wlan.id, Ns2ScriptedMobility.name, {}) + emane_network = session.create_emane_network( + model=EmaneIeee80211abgModel, + geo_reference=(47.57917, -122.13232, 2.00000) + ) + config_key = "bandwidth" + config_value = "900000" # then with client.context_connect(): - config = client.get_mobility_config(session.id, wlan.id) + response = client.set_emane_model_config( + session.id, emane_network.objid, EmaneIeee80211abgModel.name, {config_key: config_value}) # then - assert len(config) > 0 + assert response.result is True + config = session.emane.get_model_config(emane_network.objid, EmaneIeee80211abgModel.name) + assert config[config_key] == config_value - def test_set_mobility_config(self, grpc_server: CoreGrpcServer): + def test_get_emane_model_config(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - wlan = session.add_node(WlanNode) + emane_network = session.create_emane_network( + model=EmaneIeee80211abgModel, + geo_reference=(47.57917, -122.13232, 2.00000) + ) + + # then + with client.context_connect(): + response = client.get_emane_model_config( + session.id, emane_network.objid, EmaneIeee80211abgModel.name) + + # then + assert len(response.groups) > 0 + + def test_get_emane_models(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + + # then + with client.context_connect(): + response = client.get_emane_models(session.id) + + # then + assert len(response.models) > 0 + + def test_get_mobility_configs(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) + session.mobility.set_model_config(wlan.objid, Ns2ScriptedMobility.name, {}) + + # then + with client.context_connect(): + response = client.get_mobility_configs(session.id) + + # then + assert len(response.configs) > 0 + assert wlan.objid in response.configs + + def test_get_mobility_config(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) + session.mobility.set_model_config(wlan.objid, Ns2ScriptedMobility.name, {}) + + # then + with client.context_connect(): + response = client.get_mobility_config(session.id, wlan.objid) + + # then + assert len(response.groups) > 0 + + def test_set_mobility_config(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) config_key = "refresh_ms" config_value = "60" # then with client.context_connect(): - result = client.set_mobility_config( - session.id, wlan.id, {config_key: config_value} - ) + response = client.set_mobility_config(session.id, wlan.objid, {config_key: config_value}) # then - assert result is True - config = session.mobility.get_model_config(wlan.id, Ns2ScriptedMobility.name) + assert response.result is True + config = session.mobility.get_model_config(wlan.objid, Ns2ScriptedMobility.name) assert config[config_key] == config_value - def test_mobility_action(self, grpc_server: CoreGrpcServer): + def test_mobility_action(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - wlan = session.add_node(WlanNode) - session.mobility.set_model_config(wlan.id, Ns2ScriptedMobility.name, {}) + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) + session.mobility.set_model_config(wlan.objid, Ns2ScriptedMobility.name, {}) session.instantiate() # then with client.context_connect(): - result = client.mobility_action(session.id, wlan.id, MobilityAction.STOP) + response = client.mobility_action(session.id, wlan.objid, core_pb2.MOBILITY_STOP) # then - assert result is True + assert response.result is True - def test_get_service_defaults(self, grpc_server: CoreGrpcServer): + def test_get_services(self, grpc_server): + # given + client = CoreGrpcClient() + + # then + with client.context_connect(): + response = client.get_services() + + # then + assert len(response.services) > 0 + + def test_get_service_defaults(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() # then with client.context_connect(): - defaults = client.get_service_defaults(session.id) + response = client.get_service_defaults(session.id) # then - assert len(defaults) > 0 + assert len(response.defaults) > 0 - def test_set_service_defaults(self, grpc_server: CoreGrpcServer): + def test_set_service_defaults(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - model = "test" + node_type = "test" services = ["SSH"] # then with client.context_connect(): - result = client.set_service_defaults(session.id, {model: services}) + response = client.set_service_defaults(session.id, {node_type: services}) # then - assert result is True - assert session.services.default_services[model] == services + assert response.result is True + assert session.services.default_services[node_type] == services - def test_get_node_service(self, grpc_server: CoreGrpcServer): + def test_get_node_service(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) + node = session.add_node() # then with client.context_connect(): - service = client.get_node_service(session.id, node.id, "DefaultRoute") + response = client.get_node_service(session.id, node.objid, "IPForward") # then - assert len(service.configs) > 0 + assert len(response.service.configs) > 0 - def test_get_node_service_file(self, grpc_server: CoreGrpcServer): + def test_get_node_service_file(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) + node = session.add_node() # then with client.context_connect(): - data = client.get_node_service_file( - session.id, node.id, "DefaultRoute", "defaultroute.sh" - ) + response = client.get_node_service_file(session.id, node.objid, "IPForward", "ipforward.sh") # then - assert data is not None + assert response.data is not None - def test_service_action(self, grpc_server: CoreGrpcServer): + def test_set_node_service(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - options = CoreNode.create_options() - options.legacy = True - node = session.add_node(CoreNode, options=options) - service_name = "DefaultRoute" + node = session.add_node() + service_name = "IPForward" + validate = ("echo hello",) # then with client.context_connect(): - result = client.service_action( - session.id, node.id, service_name, ServiceAction.STOP - ) + response = client.set_node_service(session.id, node.objid, service_name, (), validate, ()) # then - assert result is True + assert response.result is True + service = session.services.get_service(node.objid, service_name, default_service=True) + assert service.validate == validate - def test_config_service_action(self, grpc_server: CoreGrpcServer): + def test_set_node_service_file(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - service_name = "DefaultRoute" + node = session.add_node() + service_name = "IPForward" + file_name = "ipforward.sh" + file_data = "echo hello" # then with client.context_connect(): - result = client.config_service_action( - session.id, node.id, service_name, ServiceAction.STOP - ) + response = client.set_node_service_file(session.id, node.objid, service_name, file_name, file_data) # then - assert result is True + assert response.result is True + service_file = session.services.get_service_file(node, service_name, file_name) + assert service_file.data == file_data - def test_node_events(self, grpc_server: CoreGrpcServer): + def test_service_action(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - node.position.lat = 10.0 - node.position.lon = 20.0 - node.position.alt = 5.0 + node = session.add_node() + service_name = "IPForward" + + # then + with client.context_connect(): + response = client.service_action(session.id, node.objid, service_name, core_pb2.SERVICE_STOP) + + # then + assert response.result is True + + def test_node_events(self, grpc_server): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + node = session.add_node() + node_data = node.data(message_type=0) queue = Queue() - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.node_event is not None - event_node = event.node_event.node - assert event_node.geo.lat == node.position.lat - assert event_node.geo.lon == node.position.lon - assert event_node.geo.alt == node.position.alt - queue.put(event) + def handle_event(event_data): + queue.put(event_data) # then with client.context_connect(): - client.events(session.id, handle_event) + client.node_events(session.id, handle_event) time.sleep(0.1) - session.broadcast_node(node) + session.broadcast_node(node_data) # then queue.get(timeout=5) - def test_link_events(self, grpc_server: CoreGrpcServer, ip_prefixes: IpPrefixes): + def test_link_events(self, grpc_server, ip_prefixes): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - wlan = session.add_node(WlanNode) - node = session.add_node(CoreNode) - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, wlan.id, iface_data) - core_link = list(session.link_manager.links())[0] - link_data = core_link.get_data(MessageFlags.ADD) - + wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN) + node = session.add_node() + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, wlan.objid, interface) + link_data = wlan.all_link_data(0)[0] queue = Queue() - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.link_event is not None - queue.put(event) + def handle_event(event_data): + queue.put(event_data) # then with client.context_connect(): - client.events(session.id, handle_event) + client.link_events(session.id, handle_event) time.sleep(0.1) session.broadcast_link(link_data) # then queue.get(timeout=5) - def test_throughputs(self, request, grpc_server: CoreGrpcServer): - if request.config.getoption("mock"): - pytest.skip("mocking calls") - + def test_session_events(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() queue = Queue() def handle_event(event_data): - assert event_data.session_id == session.id queue.put(event_data) # then with client.context_connect(): - client.throughputs(session.id, handle_event) + client.session_events(session.id, handle_event) time.sleep(0.1) + event = EventData(event_type=EventTypes.RUNTIME_STATE.value, time="%s" % time.time()) + session.broadcast_event(event) # then queue.get(timeout=5) - def test_session_events(self, grpc_server: CoreGrpcServer): + def test_config_events(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() queue = Queue() - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.session_event is not None - queue.put(event) + def handle_event(event_data): + queue.put(event_data) # then with client.context_connect(): - client.events(session.id, handle_event) + client.config_events(session.id, handle_event) time.sleep(0.1) - event_data = EventData( - event_type=EventTypes.RUNTIME_STATE, time=str(time.monotonic()) - ) - session.broadcast_event(event_data) + session_config = session.options.get_configs() + config_data = ConfigShim.config_data(0, None, ConfigFlags.UPDATE.value, session.options, session_config) + session.broadcast_config(config_data) # then queue.get(timeout=5) - def test_exception_events(self, grpc_server: CoreGrpcServer): + def test_exception_events(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() queue = Queue() - exception_level = ExceptionLevels.FATAL - source = "test" - node_id = None - text = "exception message" - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.exception_event is not None - exception_event = event.exception_event - assert exception_event.level.value == exception_level.value - assert exception_event.node_id == 0 - assert exception_event.source == source - assert exception_event.text == text - queue.put(event) + def handle_event(event_data): + queue.put(event_data) # then with client.context_connect(): - client.events(session.id, handle_event) + client.exception_events(session.id, handle_event) time.sleep(0.1) - session.exception(exception_level, source, text, node_id) + session.exception(ExceptionLevels.FATAL, "test", None, "exception message") # then queue.get(timeout=5) - def test_file_events(self, grpc_server: CoreGrpcServer): + def test_file_events(self, grpc_server): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) + node = session.add_node() queue = Queue() - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.file_event is not None - queue.put(event) + def handle_event(event_data): + queue.put(event_data) # then with client.context_connect(): - client.events(session.id, handle_event) + client.file_events(session.id, handle_event) time.sleep(0.1) - file_data = session.services.get_service_file( - node, "DefaultRoute", "defaultroute.sh" - ) + file_data = session.services.get_service_file(node, "IPForward", "ipforward.sh") session.broadcast_file(file_data) # then queue.get(timeout=5) - - def test_move_nodes(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - x, y = 10.0, 15.0 - streamer = MoveNodesStreamer(session.id) - streamer.send_position(node.id, x, y) - streamer.stop() - - # then - with client.context_connect(): - client.move_nodes(streamer) - - # assert - assert node.position.x == x - assert node.position.y == y - - def test_move_nodes_geo(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - node = session.add_node(CoreNode) - lon, lat, alt = 10.0, 15.0, 5.0 - streamer = MoveNodesStreamer(session.id) - streamer.send_geo(node.id, lon, lat, alt) - streamer.stop() - queue = Queue() - - def node_handler(node_data: NodeData): - n = node_data.node - assert n.position.lon == lon - assert n.position.lat == lat - assert n.position.alt == alt - queue.put(node_data) - - session.node_handlers.append(node_handler) - - # then - with client.context_connect(): - client.move_nodes(streamer) - - # assert - assert queue.get(timeout=5) - assert node.position.lon == lon - assert node.position.lat == lat - assert node.position.alt == alt - - def test_move_nodes_exception(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - streamer = MoveNodesStreamer(session.id) - request = MoveNodesRequest(session.id + 1, 1) - streamer.send(request) - streamer.stop() - - # then - with pytest.raises(grpc.RpcError): - with client.context_connect(): - client.move_nodes(streamer) - - def test_wlan_link(self, grpc_server: CoreGrpcServer, ip_prefixes: IpPrefixes): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - wlan = session.add_node(WlanNode) - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, wlan.id, iface1_data) - session.add_link(node2.id, wlan.id, iface2_data) - session.instantiate() - assert len(session.link_manager.links()) == 2 - - # when - with client.context_connect(): - result1 = client.wlan_link(session.id, wlan.id, node1.id, node2.id, True) - result2 = client.wlan_link(session.id, wlan.id, node1.id, node2.id, False) - - # then - assert result1 is True - assert result2 is True diff --git a/daemon/tests/test_gui.py b/daemon/tests/test_gui.py new file mode 100644 index 00000000..d0fd91fc --- /dev/null +++ b/daemon/tests/test_gui.py @@ -0,0 +1,174 @@ +""" +Unit tests for testing with a CORE switch. +""" + +import threading + +from core.api import coreapi, dataconversion +from core.api.coreapi import CoreExecuteTlv +from core.enumerations import CORE_API_PORT, NodeTypes +from core.enumerations import EventTlvs +from core.enumerations import EventTypes +from core.enumerations import ExecuteTlvs +from core.enumerations import LinkTlvs +from core.enumerations import LinkTypes +from core.enumerations import MessageFlags +from core.enumerations import MessageTypes +from core.misc import ipaddress + + +def command_message(node, command): + """ + Create an execute command TLV message. + + :param node: node to execute command for + :param command: command to execute + :return: packed execute message + """ + tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.objid) + tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1) + tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command) + return coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data) + + +def state_message(state): + """ + Create a event TLV message for a new state. + + :param core.enumerations.EventTypes state: state to create message for + :return: packed event message + """ + tlv_data = coreapi.CoreEventTlv.pack(EventTlvs.TYPE.value, state.value) + return coreapi.CoreEventMessage.pack(0, tlv_data) + + +def switch_link_message(switch, node, address, prefix_len): + """ + Create a link TLV message for node to a switch, with the provided address and prefix length. + + :param switch: switch for link + :param node: node for link + :param address: address node on link + :param prefix_len: prefix length of address + :return: packed link message + """ + tlv_data = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.objid) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, node.objid) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4.value, address) + tlv_data += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_IP4_MASK.value, prefix_len) + return coreapi.CoreLinkMessage.pack(MessageFlags.ADD.value, tlv_data) + + +def run_cmd(node, exec_cmd): + """ + Convenience method for sending commands to a node using the legacy API. + + :param node: The node the command should be issued too + :param exec_cmd: A string with the command to be run + :return: Returns the result of the command + """ + # Set up the command api message + # tlv_data = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.objid) + # tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, 1) + # tlv_data += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd) + # message = coreapi.CoreExecMessage.pack(MessageFlags.STRING.value | MessageFlags.TEXT.value, tlv_data) + message = command_message(node, exec_cmd) + node.session.broker.handlerawmsg(message) + + # Now wait for the response + server = node.session.broker.servers["localhost"] + server.sock.settimeout(50.0) + + # receive messages until we get our execute response + result = None + status = False + while True: + message_header = server.sock.recv(coreapi.CoreMessage.header_len) + message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header(message_header) + message_data = server.sock.recv(message_length) + + # If we get the right response return the results + print "received response message: %s" % message_type + if message_type == MessageTypes.EXECUTE.value: + message = coreapi.CoreExecMessage(message_flags, message_header, message_data) + result = message.get_tlv(ExecuteTlvs.RESULT.value) + status = message.get_tlv(ExecuteTlvs.STATUS.value) + break + + return result, status + + +class TestGui: + def test_broker(self, cored): + """ + Test session broker creation. + + :param core.emulator.coreemu.EmuSession session: session for test + :param cored: cored daemon server to test with + """ + + # set core daemon to run in the background + thread = threading.Thread(target=cored.server.serve_forever) + thread.daemon = True + thread.start() + + # ip prefix for nodes + prefix = ipaddress.Ipv4Prefix("10.83.0.0/16") + daemon = "localhost" + + # add server + session = cored.server.coreemu.create_session() + session.broker.addserver(daemon, "127.0.0.1", CORE_API_PORT) + + # setup server + session.broker.setupserver(daemon) + + # do not want the recvloop running as we will deal ourselves + session.broker.dorecvloop = False + + # have broker handle a configuration state change + session.set_state(EventTypes.CONFIGURATION_STATE) + event_message = state_message(EventTypes.CONFIGURATION_STATE) + session.broker.handlerawmsg(event_message) + + # create a switch node + switch = session.add_node(_type=NodeTypes.SWITCH) + switch.setposition(x=80, y=50) + switch.server = daemon + + # retrieve switch data representation, create a switch message for broker to handle + switch_data = switch.data(MessageFlags.ADD.value) + switch_message = dataconversion.convert_node(switch_data) + session.broker.handlerawmsg(switch_message) + + # create node one + node_one = session.add_node() + node_one.server = daemon + + # create node two + node_two = session.add_node() + node_two.server = daemon + + # create node messages for the broker to handle + for node in [node_one, node_two]: + node_data = node.data(MessageFlags.ADD.value) + node_message = dataconversion.convert_node(node_data) + session.broker.handlerawmsg(node_message) + + # create links to switch from nodes for broker to handle + for index, node in enumerate([node_one, node_two], start=1): + ip4_address = prefix.addr(index) + link_message = switch_link_message(switch, node, ip4_address, prefix.prefixlen) + session.broker.handlerawmsg(link_message) + + # change session to instantiation state + event_message = state_message(EventTypes.INSTANTIATION_STATE) + session.broker.handlerawmsg(event_message) + + # Get the ip or last node and ping it from the first + output, status = run_cmd(node_one, "ip -4 -o addr show dev eth0") + pingip = output.split()[3].split("/")[0] + output, status = run_cmd(node_two, "ping -c 5 " + pingip) + assert not status diff --git a/daemon/tests/test_links.py b/daemon/tests/test_links.py index eea88fb3..27289014 100644 --- a/daemon/tests/test_links.py +++ b/daemon/tests/test_links.py @@ -1,418 +1,258 @@ -from typing import Tuple - -import pytest - -from core.emulator.data import IpPrefixes, LinkOptions -from core.emulator.session import Session -from core.errors import CoreError -from core.nodes.base import CoreNode -from core.nodes.network import SwitchNode - -INVALID_ID: int = 100 -LINK_OPTIONS: LinkOptions = LinkOptions( - delay=50, bandwidth=5000000, loss=25, dup=25, jitter=10, buffer=100 -) +from core.emulator.emudata import LinkOptions +from core.enumerations import NodeTypes +from core.misc import utils -def create_ptp_network( - session: Session, ip_prefixes: IpPrefixes -) -> Tuple[CoreNode, CoreNode]: +def create_ptp_network(session, ip_prefixes): # create nodes - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) + node_one = session.add_node() + node_two = session.add_node() # link nodes to net node - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface1_data, iface2_data) + interface_one = ip_prefixes.create_interface(node_one) + interface_two = ip_prefixes.create_interface(node_two) + session.add_link(node_one.objid, node_two.objid, interface_one, interface_two) # instantiate session session.instantiate() - return node1, node2 + return node_one, node_two + + +def ping_output(from_node, to_node, ip_prefixes): + address = ip_prefixes.ip4_address(to_node) + output = from_node.check_cmd(["ping", "-i", "0.05", "-c", "3", address]) + return output + + +def iperf(from_node, to_node, ip_prefixes): + # run iperf server, run client, kill iperf server + address = ip_prefixes.ip4_address(to_node) + vcmd, stdin, stdout, stderr = to_node.client.popen(["iperf", "-s", "-u", "-y", "C"]) + from_node.cmd(["iperf", "-u", "-t", "5", "-c", address]) + to_node.cmd(["killall", "-9", "iperf"]) + return stdout.read().strip() class TestLinks: - def test_add_node_to_node(self, session: Session, ip_prefixes: IpPrefixes): + def test_ptp(self, session, ip_prefixes): # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) + node_one = session.add_node() + node_two = session.add_node() + interface_one = ip_prefixes.create_interface(node_one) + inteface_two = ip_prefixes.create_interface(node_two) # when - iface1, iface2 = session.add_link( - node1.id, node2.id, iface1_data, iface2_data, options=LINK_OPTIONS - ) + session.add_link(node_one.objid, node_two.objid, interface_one, inteface_two) # then - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1_data.id) - assert node2.get_iface(iface2_data.id) - assert iface1 is not None - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert node1.get_iface(iface1_data.id) - assert iface2 is not None - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem - assert node1.get_iface(iface1_data.id) + assert node_one.netif(interface_one.id) + assert node_two.netif(inteface_two.id) - def test_add_node_to_net(self, session: Session, ip_prefixes: IpPrefixes): + def test_node_to_net(self, session, ip_prefixes): # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(SwitchNode) - iface1_data = ip_prefixes.create_iface(node1) + node_one = session.add_node() + node_two = session.add_node(_type=NodeTypes.SWITCH) + interface_one = ip_prefixes.create_interface(node_one) # when - iface1, iface2 = session.add_link( - node1.id, node2.id, iface1_data=iface1_data, options=LINK_OPTIONS - ) + session.add_link(node_one.objid, node_two.objid, interface_one) # then - assert len(session.link_manager.links()) == 1 - assert iface1 is not None - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert node1.get_iface(iface1_data.id) - assert iface2 is not None - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem - assert node2.get_iface(iface1_data.id) + assert node_two.all_link_data(0) + assert node_one.netif(interface_one.id) - def test_add_net_to_node(self, session: Session, ip_prefixes: IpPrefixes): + def test_net_to_node(self, session, ip_prefixes): # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(CoreNode) - iface2_data = ip_prefixes.create_iface(node2) + node_one = session.add_node(_type=NodeTypes.SWITCH) + node_two = session.add_node() + interface_two = ip_prefixes.create_interface(node_two) # when - iface1, iface2 = session.add_link( - node1.id, node2.id, iface2_data=iface2_data, options=LINK_OPTIONS - ) + session.add_link(node_one.objid, node_two.objid, interface_two=interface_two) # then - assert len(session.link_manager.links()) == 1 - assert iface1 is not None - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert node1.get_iface(iface1.id) - assert iface2 is not None - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem - assert node2.get_iface(iface2.id) + assert node_one.all_link_data(0) + assert node_two.netif(interface_two.id) - def test_add_net_to_net(self, session: Session): + def test_net_to_net(self, session): # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(SwitchNode) + node_one = session.add_node(_type=NodeTypes.SWITCH) + node_two = session.add_node(_type=NodeTypes.SWITCH) # when - iface1, iface2 = session.add_link(node1.id, node2.id, options=LINK_OPTIONS) + session.add_link(node_one.objid, node_two.objid) # then - assert len(session.link_manager.links()) == 1 - assert iface1 is not None - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert iface2 is not None - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem + assert node_one.all_link_data(0) - def test_add_node_to_node_uni(self, session: Session, ip_prefixes: IpPrefixes): + def test_link_update(self, session, ip_prefixes): # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - link_options1 = LinkOptions( - delay=50, - bandwidth=5000000, - loss=25, - dup=25, - jitter=10, - buffer=100, - unidirectional=True, - ) - link_options2 = LinkOptions( - delay=51, - bandwidth=5000001, - loss=26, - dup=26, - jitter=11, - buffer=101, - unidirectional=True, - ) + node_one = session.add_node() + node_two = session.add_node(_type=NodeTypes.SWITCH) + interface_one = ip_prefixes.create_interface(node_one) + session.add_link(node_one.objid, node_two.objid, interface_one) + interface = node_one.netif(interface_one.id) + output = utils.check_cmd(["tc", "qdisc", "show", "dev", interface.localname]) + assert "delay" not in output + assert "rate" not in output + assert "loss" not in output + assert "duplicate" not in output # when - iface1, iface2 = session.add_link( - node1.id, node2.id, iface1_data, iface2_data, link_options1 - ) - session.update_link( - node2.id, node1.id, iface2_data.id, iface1_data.id, link_options2 - ) + link_options = LinkOptions() + link_options.delay = 50 + link_options.bandwidth = 5000000 + link_options.per = 25 + link_options.dup = 25 + session.update_link(node_one.objid, node_two.objid, + interface_one_id=interface_one.id, link_options=link_options) # then - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1_data.id) - assert node2.get_iface(iface2_data.id) - assert iface1 is not None - assert iface1.options == link_options1 - assert iface1.has_netem - assert iface2 is not None - assert iface2.options == link_options2 - assert iface2.has_netem + output = utils.check_cmd(["tc", "qdisc", "show", "dev", interface.localname]) + assert "delay" in output + assert "rate" in output + assert "loss" in output + assert "duplicate" in output - def test_update_node_to_net(self, session: Session, ip_prefixes: IpPrefixes): + def test_link_delete(self, session, ip_prefixes): # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(SwitchNode) - iface1_data = ip_prefixes.create_iface(node1) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data) - assert len(session.link_manager.links()) == 1 - assert iface1.options != LINK_OPTIONS - assert iface2.options != LINK_OPTIONS + node_one = session.add_node() + node_two = session.add_node() + interface_one = ip_prefixes.create_interface(node_one) + interface_two = ip_prefixes.create_interface(node_two) + session.add_link(node_one.objid, node_two.objid, interface_one, interface_two) + assert node_one.netif(interface_one.id) + assert node_two.netif(interface_two.id) # when - session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) + session.delete_link(node_one.objid, node_two.objid, interface_one.id, interface_two.id) # then - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem + assert not node_one.netif(interface_one.id) + assert not node_two.netif(interface_two.id) - def test_update_net_to_node(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(CoreNode) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) - assert iface1.options != LINK_OPTIONS - assert iface2.options != LINK_OPTIONS + def test_link_bandwidth(self, session, ip_prefixes): + """ + Test ptp node network with modifying link bandwidth. - # when - session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) + :param core.emulator.coreemu.EmuSession session: session for test + :param ip_prefixes: generates ip addresses for nodes + """ - # then - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem + # create link network + node_one, node_two = create_ptp_network(session, ip_prefixes) - def test_update_ptp(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert iface1.options != LINK_OPTIONS - assert iface2.options != LINK_OPTIONS + # output csv index + bandwidth_index = 8 - # when - session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) + # run iperf, validate normal bandwidth + stdout = iperf(node_one, node_two, ip_prefixes) + assert stdout + value = int(stdout.split(',')[bandwidth_index]) + assert 900000 <= value <= 1100000 - # then - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem + # change bandwidth in bits per second + link_options = LinkOptions() + link_options.bandwidth = 500000 + session.update_link(node_one.objid, node_two.objid, link_options=link_options) - def test_update_net_to_net(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(SwitchNode) - iface1, iface2 = session.add_link(node1.id, node2.id) - assert iface1.options != LINK_OPTIONS - assert iface2.options != LINK_OPTIONS + # run iperf again + stdout = iperf(node_one, node_two, ip_prefixes) + assert stdout + value = int(stdout.split(',')[bandwidth_index]) + assert 400000 <= value <= 600000 - # when - session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) + def test_link_loss(self, session, ip_prefixes): + """ + Test ptp node network with modifying link packet loss. - # then - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem + :param core.emulator.coreemu.EmuSession session: session for test + :param ip_prefixes: generates ip addresses for nodes + """ - def test_update_error(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert iface1.options != LINK_OPTIONS - assert iface2.options != LINK_OPTIONS + # create link network + node_one, node_two = create_ptp_network(session, ip_prefixes) - # when - with pytest.raises(CoreError): - session.delete_link(node1.id, INVALID_ID, iface1.id, iface2.id) + # output csv index + loss_index = -2 - def test_clear_net_to_net(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(SwitchNode) - iface1, iface2 = session.add_link(node1.id, node2.id, options=LINK_OPTIONS) - assert iface1.options == LINK_OPTIONS - assert iface1.has_netem - assert iface2.options == LINK_OPTIONS - assert iface2.has_netem + # run iperf, validate normal bandwidth + stdout = iperf(node_one, node_two, ip_prefixes) + assert stdout + value = float(stdout.split(',')[loss_index]) + assert 0 <= value <= 0.5 - # when - options = LinkOptions(delay=0, bandwidth=0, loss=0.0, dup=0, jitter=0, buffer=0) - session.update_link(node1.id, node2.id, iface1.id, iface2.id, options) + # change bandwidth in bits per second + link_options = LinkOptions() + link_options.per = 50 + session.update_link(node_one.objid, node_two.objid, link_options=link_options) - # then - assert iface1.options.is_clear() - assert not iface1.has_netem - assert iface2.options.is_clear() - assert not iface2.has_netem + # run iperf again + stdout = iperf(node_one, node_two, ip_prefixes) + assert stdout + value = float(stdout.split(',')[loss_index]) + assert 40 <= value <= 60 - def test_delete_node_to_node(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) + def test_link_delay(self, session, ip_prefixes): + """ + Test ptp node network with modifying link packet delay. - # when - session.delete_link(node1.id, node2.id, iface1.id, iface2.id) + :param core.emulator.coreemu.EmuSession session: session for test + :param ip_prefixes: generates ip addresses for nodes + """ - # then - assert len(session.link_manager.links()) == 0 - assert iface1.id not in node1.ifaces - assert iface2.id not in node2.ifaces + # create link network + node_one, node_two = create_ptp_network(session, ip_prefixes) - def test_delete_node_to_net(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(SwitchNode) - iface1_data = ip_prefixes.create_iface(node1) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) + # run ping for delay information + stdout = ping_output(node_one, node_two, ip_prefixes) + assert stdout + rtt_line = stdout.split("\n")[-1] + rtt_values = rtt_line.split("=")[1].split("ms")[0].strip() + rtt_avg = float(rtt_values.split("/")[2]) + assert 0 <= rtt_avg <= 0.2 - # when - session.delete_link(node1.id, node2.id, iface1.id, iface2.id) + # change delay in microseconds + link_options = LinkOptions() + link_options.delay = 1000000 + session.update_link(node_one.objid, node_two.objid, link_options=link_options) - # then - assert len(session.link_manager.links()) == 0 - assert iface1.id not in node1.ifaces - assert iface2.id not in node2.ifaces + # run ping for delay information again + stdout = ping_output(node_one, node_two, ip_prefixes) + assert stdout + rtt_line = stdout.split("\n")[-1] + rtt_values = rtt_line.split("=")[1].split("ms")[0].strip() + rtt_avg = float(rtt_values.split("/")[2]) + assert 1800 <= rtt_avg <= 2200 - def test_delete_net_to_node(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(CoreNode) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) + def test_link_jitter(self, session, ip_prefixes): + """ + Test ptp node network with modifying link packet jitter. - # when - session.delete_link(node1.id, node2.id, iface1.id, iface2.id) + :param core.emulator.coreemu.EmuSession session: session for test + :param ip_prefixes: generates ip addresses for nodes + """ - # then - assert len(session.link_manager.links()) == 0 - assert iface1.id not in node1.ifaces - assert iface2.id not in node2.ifaces + # create link network + node_one, node_two = create_ptp_network(session, ip_prefixes) - def test_delete_net_to_net(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(SwitchNode) - iface1, iface2 = session.add_link(node1.id, node2.id) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) + # output csv index + jitter_index = 9 - # when - session.delete_link(node1.id, node2.id, iface1.id, iface2.id) + # run iperf + stdout = iperf(node_one, node_two, ip_prefixes) + assert stdout + value = float(stdout.split(",")[jitter_index]) + assert -0.5 <= value <= 0.05 - # then - assert len(session.link_manager.links()) == 0 - assert iface1.id not in node1.ifaces - assert iface2.id not in node2.ifaces + # change jitter in microseconds + link_options = LinkOptions() + link_options.jitter = 1000000 + session.update_link(node_one.objid, node_two.objid, link_options=link_options) - def test_delete_node_error(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(SwitchNode) - iface1, iface2 = session.add_link(node1.id, node2.id) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) - - # when - with pytest.raises(CoreError): - session.delete_link(node1.id, INVALID_ID, iface1.id, iface2.id) - with pytest.raises(CoreError): - session.delete_link(INVALID_ID, node2.id, iface1.id, iface2.id) - - def test_delete_net_to_net_error(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(SwitchNode) - node3 = session.add_node(SwitchNode) - iface1, iface2 = session.add_link(node1.id, node2.id) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) - - # when - with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id, iface1.id, iface2.id) - - def test_delete_node_to_net_error(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(SwitchNode) - node3 = session.add_node(SwitchNode) - iface1_data = ip_prefixes.create_iface(node1) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) - - # when - with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id, iface1.id, iface2.id) - - def test_delete_net_to_node_error(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(SwitchNode) - node2 = session.add_node(CoreNode) - node3 = session.add_node(SwitchNode) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) - - # when - with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id, iface1.id, iface2.id) - - def test_delete_node_to_node_error(self, session: Session, ip_prefixes: IpPrefixes): - # given - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - node3 = session.add_node(SwitchNode) - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert len(session.link_manager.links()) == 1 - assert node1.get_iface(iface1.id) - assert node2.get_iface(iface2.id) - - # when - with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id, iface1.id, iface2.id) + # run iperf again + stdout = iperf(node_one, node_two, ip_prefixes) + assert stdout + value = float(stdout.split(",")[jitter_index]) + assert 200 <= value <= 500 diff --git a/daemon/tests/test_mobility.py b/daemon/tests/test_mobility.py deleted file mode 100644 index aab7b30f..00000000 --- a/daemon/tests/test_mobility.py +++ /dev/null @@ -1,19 +0,0 @@ -import pytest - -from core.location.mobility import WayPoint - -POSITION = (0.0, 0.0, 0.0) - - -class TestMobility: - @pytest.mark.parametrize( - "wp1, wp2, expected", - [ - (WayPoint(10.0, 1, POSITION, 1.0), WayPoint(1.0, 2, POSITION, 1.0), False), - (WayPoint(1.0, 1, POSITION, 1.0), WayPoint(10.0, 2, POSITION, 1.0), True), - (WayPoint(1.0, 1, POSITION, 1.0), WayPoint(1.0, 2, POSITION, 1.0), True), - (WayPoint(1.0, 2, POSITION, 1.0), WayPoint(1.0, 1, POSITION, 1.0), False), - ], - ) - def test_waypoint_lessthan(self, wp1, wp2, expected): - assert (wp1 < wp2) == expected diff --git a/daemon/tests/test_nodes.py b/daemon/tests/test_nodes.py index bb76bb4e..513856ec 100644 --- a/daemon/tests/test_nodes.py +++ b/daemon/tests/test_nodes.py @@ -1,174 +1,78 @@ +import os +import time + import pytest -from core.emulator.data import InterfaceData -from core.emulator.session import Session -from core.errors import CoreError -from core.nodes.base import CoreNode -from core.nodes.network import HubNode, SwitchNode, WlanNode +from core.emulator.emudata import NodeOptions +from core.enumerations import NodeTypes +from core.misc import utils -MODELS = ["router", "host", "PC", "mdr"] -NET_TYPES = [SwitchNode, HubNode, WlanNode] +MODELS = [ + "router", + "host", + "PC", + "mdr", +] + +NET_TYPES = [ + NodeTypes.SWITCH, + NodeTypes.HUB, + NodeTypes.WIRELESS_LAN +] class TestNodes: @pytest.mark.parametrize("model", MODELS) - def test_node_add(self, session: Session, model: str): + def test_node_add(self, session, model): # given - options = CoreNode.create_options() - options.model = model + node_options = NodeOptions(model=model) # when - node = session.add_node(CoreNode, options=options) + node = session.add_node(node_options=node_options) + + # give time for node services to boot + time.sleep(1) # then assert node + assert os.path.exists(node.nodedir) assert node.alive() assert node.up + assert node.check_cmd(["ip", "addr", "show", "lo"]) - def test_node_set_pos(self, session: Session): + def test_node_update(self, session): # given - node = session.add_node(CoreNode) - x, y = 100.0, 50.0 + node = session.add_node() + position_value = 100 + update_options = NodeOptions() + update_options.set_position(x=position_value, y=position_value) # when - session.set_node_pos(node, x, y) + session.update_node(node.objid, update_options) # then - assert node.position.x == x - assert node.position.y == y + assert node.position.x == position_value + assert node.position.y == position_value - def test_node_set_geo(self, session: Session): + def test_node_delete(self, session): # given - node = session.add_node(CoreNode) - lon, lat, alt = 0.0, 0.0, 0.0 + node = session.add_node() # when - session.set_node_geo(node, lon, lat, alt) + session.delete_node(node.objid) # then - assert node.position.lon == lon - assert node.position.lat == lat - assert node.position.alt == alt - - def test_node_delete(self, session: Session): - # given - node = session.add_node(CoreNode) - - # when - session.delete_node(node.id) - - # then - with pytest.raises(CoreError): - session.get_node(node.id, CoreNode) - - def test_node_add_iface(self, session: Session): - # given - node = session.add_node(CoreNode) - - # when - iface = node.create_iface() - - # then - assert iface.id in node.ifaces - - def test_node_get_iface(self, session: Session): - # given - node = session.add_node(CoreNode) - iface = node.create_iface() - assert iface.id in node.ifaces - - # when - iface2 = node.get_iface(iface.id) - - # then - assert iface == iface2 - - def test_node_delete_iface(self, session: Session): - # given - node = session.add_node(CoreNode) - iface = node.create_iface() - assert iface.id in node.ifaces - - # when - node.delete_iface(iface.id) - - # then - assert iface.id not in node.ifaces - - @pytest.mark.parametrize( - "mac,expected", - [ - ("AA-AA-AA-FF-FF-FF", "aa:aa:aa:ff:ff:ff"), - ("00:00:00:FF:FF:FF", "00:00:00:ff:ff:ff"), - ], - ) - def test_node_set_mac(self, session: Session, mac: str, expected: str): - # given - node = session.add_node(CoreNode) - iface_data = InterfaceData() - iface = node.create_iface(iface_data) - - # when - iface.set_mac(mac) - - # then - assert str(iface.mac) == expected - - @pytest.mark.parametrize( - "mac", ["AAA:AA:AA:FF:FF:FF", "AA:AA:AA:FF:FF", "AA/AA/AA/FF/FF/FF"] - ) - def test_node_set_mac_exception(self, session: Session, mac: str): - # given - node = session.add_node(CoreNode) - iface_data = InterfaceData() - iface = node.create_iface(iface_data) - - # when - with pytest.raises(CoreError): - iface.set_mac(mac) - - @pytest.mark.parametrize( - "ip,expected,is_ip6", - [ - ("127", "127.0.0.0/32", False), - ("10.0.0.1/24", "10.0.0.1/24", False), - ("2001::", "2001::/128", True), - ("2001::/64", "2001::/64", True), - ], - ) - def test_node_add_ip(self, session: Session, ip: str, expected: str, is_ip6: bool): - # given - node = session.add_node(CoreNode) - iface_data = InterfaceData() - iface = node.create_iface(iface_data) - - # when - iface.add_ip(ip) - - # then - if is_ip6: - assert str(iface.get_ip6()) == expected - else: - assert str(iface.get_ip4()) == expected - - def test_node_add_ip_exception(self, session): - # given - node = session.add_node(CoreNode) - iface_data = InterfaceData() - iface = node.create_iface(iface_data) - ip = "256.168.0.1/24" - - # when - with pytest.raises(CoreError): - iface.add_ip(ip) + with pytest.raises(KeyError): + session.get_object(node.objid) @pytest.mark.parametrize("net_type", NET_TYPES) def test_net(self, session, net_type): # given # when - node = session.add_node(net_type) + node = session.add_node(_type=net_type) # then assert node assert node.up + assert utils.check_cmd(["brctl", "show", node.brname]) diff --git a/daemon/tests/test_services.py b/daemon/tests/test_services.py index 69234e3a..789d05f5 100644 --- a/daemon/tests/test_services.py +++ b/daemon/tests/test_services.py @@ -1,84 +1,114 @@ -import itertools -from pathlib import Path +import os import pytest -from mock import MagicMock -from core.emulator.session import Session -from core.errors import CoreCommandError -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceDependencies, ServiceManager +from core.service import CoreService +from core.service import ServiceDependencies +from core.service import ServiceManager -_PATH: Path = Path(__file__).resolve().parent -_SERVICES_PATH = _PATH / "myservices" +_PATH = os.path.abspath(os.path.dirname(__file__)) +_SERVICES_PATH = os.path.join(_PATH, "myservices") SERVICE_ONE = "MyService" SERVICE_TWO = "MyService2" +class ServiceA(CoreService): + name = "A" + dependencies = ("B",) + + +class ServiceB(CoreService): + name = "B" + dependencies = () + + +class ServiceC(CoreService): + name = "C" + dependencies = ("B", "D") + + +class ServiceD(CoreService): + name = "D" + dependencies = () + + +class ServiceBadDependency(CoreService): + name = "E" + dependencies = ("Z",) + + +class ServiceF(CoreService): + name = "F" + dependencies = () + + +class ServiceCycleDependency(CoreService): + name = "G" + + class TestServices: - def test_service_all_files(self, session: Session): + def test_service_all_files(self, session): # given ServiceManager.add_services(_SERVICES_PATH) file_name = "myservice.sh" - node = session.add_node(CoreNode) + node = session.add_node() # when - session.services.set_service_file(node.id, SERVICE_ONE, file_name, "# test") + session.services.set_service_file(node.objid, SERVICE_ONE, file_name, "# test") # then - service = session.services.get_service(node.id, SERVICE_ONE) + service = session.services.get_service(node.objid, SERVICE_ONE) all_files = session.services.all_files(service) assert service assert all_files and len(all_files) == 1 - def test_service_all_configs(self, session: Session): + def test_service_all_configs(self, session): # given ServiceManager.add_services(_SERVICES_PATH) - node = session.add_node(CoreNode) + node = session.add_node() # when - session.services.set_service(node.id, SERVICE_ONE) - session.services.set_service(node.id, SERVICE_TWO) + session.services.set_service(node.objid, SERVICE_ONE) + session.services.set_service(node.objid, SERVICE_TWO) # then all_configs = session.services.all_configs() assert all_configs assert len(all_configs) == 2 - def test_service_add_services(self, session: Session): + def test_service_add_services(self, session): # given ServiceManager.add_services(_SERVICES_PATH) - node = session.add_node(CoreNode) + node = session.add_node() total_service = len(node.services) # when - session.services.add_services(node, node.model, [SERVICE_ONE, SERVICE_TWO]) + session.services.add_services(node, node.type, [SERVICE_ONE, SERVICE_TWO]) # then assert node.services assert len(node.services) == total_service + 2 - def test_service_file(self, request, session: Session): + def test_service_file(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) - file_path = Path(my_service.configs[0]) - file_path = node.host_path(file_path) + node = session.add_node() + file_name = my_service.configs[0] + file_path = node.hostfilename(file_name) # when session.services.create_service_files(node, my_service) # then - if not request.config.getoption("mock"): - assert file_path.exists() + assert os.path.exists(file_path) - def test_service_validate(self, session: Session): + def test_service_validate(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = session.add_node() session.services.create_service_files(node, my_service) # when @@ -87,13 +117,12 @@ class TestServices: # then assert not status - def test_service_validate_error(self, session: Session): + def test_service_validate_error(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_TWO) - node = session.add_node(CoreNode) + node = session.add_node() session.services.create_service_files(node, my_service) - node.cmd = MagicMock(side_effect=CoreCommandError(-1, "invalid")) # when status = session.services.validate_service(node, my_service) @@ -101,11 +130,11 @@ class TestServices: # then assert status - def test_service_startup(self, session: Session): + def test_service_startup(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = session.add_node() session.services.create_service_files(node, my_service) # when @@ -114,13 +143,12 @@ class TestServices: # then assert not status - def test_service_startup_error(self, session: Session): + def test_service_startup_error(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_TWO) - node = session.add_node(CoreNode) + node = session.add_node() session.services.create_service_files(node, my_service) - node.cmd = MagicMock(side_effect=CoreCommandError(-1, "invalid")) # when status = session.services.startup_service(node, my_service, wait=True) @@ -128,11 +156,11 @@ class TestServices: # then assert status - def test_service_stop(self, session: Session): + def test_service_stop(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = session.add_node() session.services.create_service_files(node, my_service) # when @@ -141,13 +169,12 @@ class TestServices: # then assert not status - def test_service_stop_error(self, session: Session): + def test_service_stop_error(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_TWO) - node = session.add_node(CoreNode) + node = session.add_node() session.services.create_service_files(node, my_service) - node.cmd = MagicMock(side_effect=CoreCommandError(-1, "invalid")) # when status = session.services.stop_service(node, my_service) @@ -155,41 +182,48 @@ class TestServices: # then assert status - def test_service_custom_startup(self, session: Session): + def test_service_custom_startup(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = session.add_node() # when - session.services.set_service(node.id, my_service.name) - custom_my_service = session.services.get_service(node.id, my_service.name) + session.services.set_service(node.objid, my_service.name) + custom_my_service = session.services.get_service(node.objid, my_service.name) custom_my_service.startup = ("sh custom.sh",) # then assert my_service.startup != custom_my_service.startup - def test_service_set_file(self, session: Session): + def test_service_set_file(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) + node_one = session.add_node() + node_two = session.add_node() file_name = my_service.configs[0] - file_data1 = "# custom file one" - file_data2 = "# custom file two" - session.services.set_service_file( - node1.id, my_service.name, file_name, file_data1 - ) - session.services.set_service_file( - node2.id, my_service.name, file_name, file_data2 - ) + file_data_one = "# custom file one" + file_data_two = "# custom file two" + session.services.set_service_file(node_one.objid, my_service.name, file_name, file_data_one) + session.services.set_service_file(node_two.objid, my_service.name, file_name, file_data_two) # when - custom_service1 = session.services.get_service(node1.id, my_service.name) - session.services.create_service_files(node1, custom_service1) - custom_service2 = session.services.get_service(node2.id, my_service.name) - session.services.create_service_files(node2, custom_service2) + custom_service_one = session.services.get_service(node_one.objid, my_service.name) + session.services.create_service_files(node_one, custom_service_one) + custom_service_two = session.services.get_service(node_two.objid, my_service.name) + session.services.create_service_files(node_two, custom_service_two) + + # then + file_path_one = node_one.hostfilename(file_name) + assert os.path.exists(file_path_one) + with open(file_path_one, "r") as custom_file: + assert custom_file.read() == file_data_one + + file_path_two = node_two.hostfilename(file_name) + assert os.path.exists(file_path_two) + with open(file_path_two, "r") as custom_file: + assert custom_file.read() == file_data_two def test_service_import(self): """ @@ -199,178 +233,66 @@ class TestServices: assert ServiceManager.get(SERVICE_ONE) assert ServiceManager.get(SERVICE_TWO) - def test_service_setget(self, session: Session): + def test_service_setget(self, session): # given ServiceManager.add_services(_SERVICES_PATH) my_service = ServiceManager.get(SERVICE_ONE) - node = session.add_node(CoreNode) + node = session.add_node() # when - no_service = session.services.get_service(node.id, SERVICE_ONE) - default_service = session.services.get_service( - node.id, SERVICE_ONE, default_service=True - ) - session.services.set_service(node.id, SERVICE_ONE) - custom_service = session.services.get_service( - node.id, SERVICE_ONE, default_service=True - ) + no_service = session.services.get_service(node.objid, SERVICE_ONE) + default_service = session.services.get_service(node.objid, SERVICE_ONE, default_service=True) + session.services.set_service(node.objid, SERVICE_ONE) + custom_service = session.services.get_service(node.objid, SERVICE_ONE, default_service=True) # then assert no_service is None assert default_service == my_service assert custom_service and custom_service != my_service - def test_services_dependency(self): + def test_services_dependencies(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_d = CoreService() - service_d.name = "d" - service_e = CoreService() - service_e.name = "e" - service_a.dependencies = (service_b.name,) - service_b.dependencies = () - service_c.dependencies = (service_b.name, service_d.name) - service_d.dependencies = () - service_e.dependencies = () - services = [service_a, service_b, service_c, service_d, service_e] - expected1 = {service_a.name, service_b.name, service_c.name, service_d.name} - expected2 = [service_e] + services = [ + ServiceA, + ServiceB, + ServiceC, + ServiceD, + ServiceF + ] # when - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - results = ServiceDependencies(permutation).boot_order() - # then - for result in results: - result_set = {x.name for x in result} - if len(result) == 4: - a_index = result.index(service_a) - b_index = result.index(service_b) - c_index = result.index(service_c) - d_index = result.index(service_d) - assert b_index < a_index - assert b_index < c_index - assert d_index < c_index - assert result_set == expected1 - elif len(result) == 1: - assert expected2 == result - else: - raise ValueError( - f"unexpected result: {results}, perm({permutation})" - ) + boot_paths = ServiceDependencies(services).boot_paths() - def test_services_dependency_missing(self): + # then + assert len(boot_paths) == 2 + + def test_services_dependencies_not_present(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_a.dependencies = (service_b.name,) - service_b.dependencies = (service_c.name,) - service_c.dependencies = ("d",) - services = [service_a, service_b, service_c] + services = [ + ServiceA, + ServiceB, + ServiceC, + ServiceD, + ServiceF, + ServiceBadDependency + ] # when, then - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - with pytest.raises(ValueError): - ServiceDependencies(permutation).boot_order() + with pytest.raises(ValueError): + ServiceDependencies(services).boot_paths() - def test_services_dependency_cycle(self): + def test_services_dependencies_cycle(self): # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_a.dependencies = (service_b.name,) - service_b.dependencies = (service_c.name,) - service_c.dependencies = (service_a.name,) - services = [service_a, service_b, service_c] + service_d = ServiceD() + service_d.dependencies = ("C",) + services = [ + ServiceA, + ServiceB, + ServiceC, + service_d, + ServiceF + ] # when, then - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - with pytest.raises(ValueError): - ServiceDependencies(permutation).boot_order() - - def test_services_dependency_common(self): - # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_d = CoreService() - service_d.name = "d" - service_a.dependencies = (service_b.name,) - service_c.dependencies = (service_d.name, service_b.name) - services = [service_a, service_b, service_c, service_d] - expected = {service_a.name, service_b.name, service_c.name, service_d.name} - - # when - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - results = ServiceDependencies(permutation).boot_order() - - # then - for result in results: - assert len(result) == 4 - result_set = {x.name for x in result} - a_index = result.index(service_a) - b_index = result.index(service_b) - c_index = result.index(service_c) - d_index = result.index(service_d) - assert b_index < a_index - assert d_index < c_index - assert b_index < c_index - assert expected == result_set - - def test_services_dependency_common2(self): - # given - service_a = CoreService() - service_a.name = "a" - service_b = CoreService() - service_b.name = "b" - service_c = CoreService() - service_c.name = "c" - service_d = CoreService() - service_d.name = "d" - service_a.dependencies = (service_b.name,) - service_b.dependencies = (service_c.name, service_d.name) - service_c.dependencies = (service_d.name,) - services = [service_a, service_b, service_c, service_d] - expected = {service_a.name, service_b.name, service_c.name, service_d.name} - - # when - permutations = itertools.permutations(services) - for permutation in permutations: - permutation = list(permutation) - results = ServiceDependencies(permutation).boot_order() - - # then - for result in results: - assert len(result) == 4 - result_set = {x.name for x in result} - a_index = result.index(service_a) - b_index = result.index(service_b) - c_index = result.index(service_c) - d_index = result.index(service_d) - assert b_index < a_index - assert c_index < b_index - assert d_index < b_index - assert d_index < c_index - assert expected == result_set + with pytest.raises(ValueError): + ServiceDependencies(services).boot_paths() diff --git a/daemon/tests/test_utils.py b/daemon/tests/test_utils.py index 21d092ac..997dce0f 100644 --- a/daemon/tests/test_utils.py +++ b/daemon/tests/test_utils.py @@ -1,6 +1,4 @@ -import netaddr - -from core import utils +from core.misc import utils class TestUtils: @@ -9,7 +7,7 @@ class TestUtils: no_args = "()" one_arg = "('one',)" two_args = "('one', 'two')" - unicode_args = "('one', 'two', 'three')" + unicode_args = u"('one', 'two', 'three')" # when no_args = utils.make_tuple_fromstr(no_args, str) @@ -22,7 +20,3 @@ class TestUtils: assert len(one_arg) == 1 assert len(two_args) == 2 assert len(unicode_args) == 3 - - def test_random_mac(self): - value = utils.random_mac() - assert netaddr.EUI(value) is not None diff --git a/daemon/tests/test_xml.py b/daemon/tests/test_xml.py index 6841da8e..058a5e7b 100644 --- a/daemon/tests/test_xml.py +++ b/daemon/tests/test_xml.py @@ -1,41 +1,31 @@ -from pathlib import Path -from tempfile import TemporaryFile from xml.etree import ElementTree import pytest -from core.emulator.data import IpPrefixes, LinkOptions -from core.emulator.enumerations import EventTypes -from core.emulator.session import Session -from core.errors import CoreError -from core.location.mobility import BasicRangeModel -from core.nodes.base import CoreNode -from core.nodes.network import SwitchNode, WlanNode +from core.emane.ieee80211abg import EmaneIeee80211abgModel +from core.emulator.emudata import NodeOptions +from core.enumerations import NodeTypes +from core.mobility import BasicRangeModel from core.services.utility import SshService class TestXml: - def test_xml_hooks(self, session: Session, tmpdir: TemporaryFile): + def test_xml_hooks(self, session, tmpdir): """ Test save/load hooks in xml. :param session: session for test :param tmpdir: tmpdir to create data in + :param str version: xml version to write and parse """ - # create hooks + # create hook file_name = "runtime_hook.sh" data = "#!/bin/sh\necho hello" - state = EventTypes.RUNTIME_STATE - session.add_hook(state, file_name, data) - - file_name = "instantiation_hook.sh" - data = "#!/bin/sh\necho hello" - state = EventTypes.INSTANTIATION_STATE - session.add_hook(state, file_name, data) + session.set_hook("hook:4", file_name, None, data) # save xml xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) + file_path = xml_file.strpath session.save_xml(file_path) # verify xml file was created and can be parsed @@ -49,37 +39,43 @@ class TestXml: session.open_xml(file_path, start=True) # verify nodes have been recreated - runtime_hooks = session.hooks.get(state) + runtime_hooks = session._hooks.get(4) assert runtime_hooks runtime_hook = runtime_hooks[0] assert file_name == runtime_hook[0] assert data == runtime_hook[1] - def test_xml_ptp( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): + def test_xml_ptp(self, session, tmpdir, ip_prefixes): """ Test xml client methods for a ptp network. :param session: session for test :param tmpdir: tmpdir to create data in + :param str version: xml version to write and parse :param ip_prefixes: generates ip addresses for nodes """ - # create nodes - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) + # create ptp + ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER) - # link nodes - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface1_data, iface2_data) + # create nodes + node_one = session.add_node() + node_two = session.add_node() + + # link nodes to ptp net + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, ptp_node.objid, interface_one=interface) # instantiate session session.instantiate() + # get ids for nodes + n1_id = node_one.objid + n2_id = node_two.objid + # save xml xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) + file_path = xml_file.strpath session.save_xml(file_path) # verify xml file was created and can be parsed @@ -90,54 +86,56 @@ class TestXml: session.shutdown() # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) - # verify no links are known - assert len(session.link_manager.links()) == 0 + with pytest.raises(KeyError): + assert not session.get_object(n1_id) + with pytest.raises(KeyError): + assert not session.get_object(n2_id) # load saved xml session.open_xml(file_path, start=True) # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) - assert len(session.link_manager.links()) == 1 + assert session.get_object(n1_id) + assert session.get_object(n2_id) - def test_xml_ptp_services( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): + def test_xml_ptp_services(self, session, tmpdir, ip_prefixes): """ Test xml client methods for a ptp neetwork. :param session: session for test :param tmpdir: tmpdir to create data in + :param str version: xml version to write and parse :param ip_prefixes: generates ip addresses for nodes """ + # create ptp + ptp_node = session.add_node(_type=NodeTypes.PEER_TO_PEER) + # create nodes - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) + node_options = NodeOptions(model="host") + node_one = session.add_node(node_options=node_options) + node_two = session.add_node() # link nodes to ptp net - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface1_data, iface2_data) + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, ptp_node.objid, interface_one=interface) # set custom values for node service - session.services.set_service(node1.id, SshService.name) + session.services.set_service(node_one.objid, SshService.name) service_file = SshService.configs[0] file_data = "# test" - session.services.set_service_file( - node1.id, SshService.name, service_file, file_data - ) + session.services.set_service_file(node_one.objid, SshService.name, service_file, file_data) # instantiate session session.instantiate() + # get ids for nodes + n1_id = node_one.objid + n2_id = node_two.objid + # save xml xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) + file_path = xml_file.strpath session.save_xml(file_path) # verify xml file was created and can be parsed @@ -148,53 +146,57 @@ class TestXml: session.shutdown() # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) + with pytest.raises(KeyError): + assert not session.get_object(n1_id) + with pytest.raises(KeyError): + assert not session.get_object(n2_id) # load saved xml session.open_xml(file_path, start=True) # retrieve custom service - service = session.services.get_service(node1.id, SshService.name) + service = session.services.get_service(node_one.objid, SshService.name) # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) + assert session.get_object(n1_id) + assert session.get_object(n2_id) assert service.config_data.get(service_file) == file_data - def test_xml_mobility( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): + def test_xml_mobility(self, session, tmpdir, ip_prefixes): """ Test xml client methods for mobility. :param session: session for test :param tmpdir: tmpdir to create data in + :param str version: xml version to write and parse :param ip_prefixes: generates ip addresses for nodes """ # create wlan - wlan = session.add_node(WlanNode) - session.mobility.set_model(wlan, BasicRangeModel, {"test": "1"}) + wlan_node = session.add_node(_type=NodeTypes.WIRELESS_LAN) + session.mobility.set_model(wlan_node, BasicRangeModel, {"test": "1"}) # create nodes - options = CoreNode.create_options() - options.model = "mdr" - node1 = session.add_node(CoreNode, options=options) - node2 = session.add_node(CoreNode, options=options) + node_options = NodeOptions() + node_options.set_position(0, 0) + node_one = session.create_wireless_node(node_options=node_options) + node_two = session.create_wireless_node(node_options=node_options) # link nodes - for node in [node1, node2]: - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, wlan.id, iface1_data=iface_data) + for node in [node_one, node_two]: + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, wlan_node.objid, interface_one=interface) # instantiate session session.instantiate() + # get ids for nodes + wlan_id = wlan_node.objid + n1_id = node_one.objid + n2_id = node_two.objid + # save xml xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) + file_path = xml_file.strpath session.save_xml(file_path) # verify xml file was created and can be parsed @@ -205,237 +207,63 @@ class TestXml: session.shutdown() # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) + with pytest.raises(KeyError): + assert not session.get_object(n1_id) + with pytest.raises(KeyError): + assert not session.get_object(n2_id) # load saved xml session.open_xml(file_path, start=True) # retrieve configuration we set originally - value = str(session.mobility.get_config("test", wlan.id, BasicRangeModel.name)) + value = str(session.mobility.get_config("test", wlan_id, BasicRangeModel.name)) # verify nodes and configuration were restored - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) - assert session.get_node(wlan.id, WlanNode) + assert session.get_object(n1_id) + assert session.get_object(n2_id) + assert session.get_object(wlan_id) assert value == "1" - def test_network_to_network(self, session: Session, tmpdir: TemporaryFile): + def test_xml_emane(self, session, tmpdir, ip_prefixes): """ - Test xml generation when dealing with network to network nodes. - - :param session: session for test - :param tmpdir: tmpdir to create data in - """ - # create nodes - switch1 = session.add_node(SwitchNode) - switch2 = session.add_node(SwitchNode) - - # link nodes - session.add_link(switch1.id, switch2.id) - - # instantiate session - session.instantiate() - - # save xml - xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) - session.save_xml(file_path) - - # verify xml file was created and can be parsed - assert xml_file.isfile() - assert ElementTree.parse(file_path) - - # stop current session, clearing data - session.shutdown() - - # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(switch1.id, SwitchNode) - with pytest.raises(CoreError): - assert not session.get_node(switch2.id, SwitchNode) - - # load saved xml - session.open_xml(file_path, start=True) - - # verify nodes have been recreated - switch1 = session.get_node(switch1.id, SwitchNode) - switch2 = session.get_node(switch2.id, SwitchNode) - assert switch1 - assert switch2 - assert len(session.link_manager.links()) == 1 - - def test_link_options( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): - """ - Test xml client methods for a ptp network. + Test xml client methods for emane. :param session: session for test :param tmpdir: tmpdir to create data in + :param str version: xml version to write and parse :param ip_prefixes: generates ip addresses for nodes """ - # create nodes - node1 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - switch = session.add_node(SwitchNode) - - # create link - options = LinkOptions() - options.loss = 10.5 - options.bandwidth = 50000 - options.jitter = 10 - options.delay = 30 - options.dup = 5 - options.buffer = 100 - session.add_link(node1.id, switch.id, iface1_data, options=options) - - # instantiate session - session.instantiate() - - # save xml - xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) - session.save_xml(file_path) - - # verify xml file was created and can be parsed - assert xml_file.isfile() - assert ElementTree.parse(file_path) - - # stop current session, clearing data - session.shutdown() - - # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(switch.id, SwitchNode) - - # load saved xml - session.open_xml(file_path, start=True) - - # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(switch.id, SwitchNode) - assert len(session.link_manager.links()) == 1 - link = list(session.link_manager.links())[0] - link_options = link.options() - assert options.loss == link_options.loss - assert options.bandwidth == link_options.bandwidth - assert options.jitter == link_options.jitter - assert options.delay == link_options.delay - assert options.dup == link_options.dup - assert options.buffer == link_options.buffer - - def test_link_options_ptp( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): - """ - Test xml client methods for a ptp network. - - :param session: session for test - :param tmpdir: tmpdir to create data in - :param ip_prefixes: generates ip addresses for nodes - """ - # create nodes - node1 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode) - iface2_data = ip_prefixes.create_iface(node2) - - # create link - options = LinkOptions() - options.loss = 10.5 - options.bandwidth = 50000 - options.jitter = 10 - options.delay = 30 - options.dup = 5 - options.buffer = 100 - session.add_link(node1.id, node2.id, iface1_data, iface2_data, options) - - # instantiate session - session.instantiate() - - # save xml - xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) - session.save_xml(file_path) - - # verify xml file was created and can be parsed - assert xml_file.isfile() - assert ElementTree.parse(file_path) - - # stop current session, clearing data - session.shutdown() - - # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) - - # load saved xml - session.open_xml(file_path, start=True) - - # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) - assert len(session.link_manager.links()) == 1 - link = list(session.link_manager.links())[0] - link_options = link.options() - assert options.loss == link_options.loss - assert options.bandwidth == link_options.bandwidth - assert options.jitter == link_options.jitter - assert options.delay == link_options.delay - assert options.dup == link_options.dup - assert options.buffer == link_options.buffer - - def test_link_options_bidirectional( - self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes - ): - """ - Test xml client methods for a ptp network. - - :param session: session for test - :param tmpdir: tmpdir to create data in - :param ip_prefixes: generates ip addresses for nodes - """ - # create nodes - node1 = session.add_node(CoreNode) - iface1_data = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode) - iface2_data = ip_prefixes.create_iface(node2) - - # create link - options1 = LinkOptions() - options1.unidirectional = 1 - options1.bandwidth = 5000 - options1.delay = 10 - options1.loss = 10.5 - options1.dup = 5 - options1.jitter = 5 - options1.buffer = 50 - iface1, iface2 = session.add_link( - node1.id, node2.id, iface1_data, iface2_data, options1 + # create emane node for networking the core nodes + emane_network = session.create_emane_network( + EmaneIeee80211abgModel, + geo_reference=(47.57917, -122.13232, 2.00000), + config={"test": "1"} ) - options2 = LinkOptions() - options2.unidirectional = 1 - options2.bandwidth = 10000 - options2.delay = 20 - options2.loss = 10 - options2.dup = 10 - options2.jitter = 10 - options2.buffer = 100 - session.update_link(node2.id, node1.id, iface2.id, iface1.id, options2) + emane_network.setposition(x=80, y=50) + + # create nodes + node_options = NodeOptions() + node_options.set_position(150, 150) + node_one = session.create_wireless_node(node_options=node_options) + node_options.set_position(300, 150) + node_two = session.create_wireless_node(node_options=node_options) + + for i, node in enumerate([node_one, node_two]): + node.setposition(x=150 * (i + 1), y=150) + interface = ip_prefixes.create_interface(node) + session.add_link(node.objid, emane_network.objid, interface_one=interface) # instantiate session session.instantiate() + # get ids for nodes + emane_id = emane_network.objid + n1_id = node_one.objid + n2_id = node_two.objid + # save xml xml_file = tmpdir.join("session.xml") - file_path = Path(xml_file.strpath) + file_path = xml_file.strpath session.save_xml(file_path) # verify xml file was created and can be parsed @@ -446,27 +274,19 @@ class TestXml: session.shutdown() # verify nodes have been removed from session - with pytest.raises(CoreError): - assert not session.get_node(node1.id, CoreNode) - with pytest.raises(CoreError): - assert not session.get_node(node2.id, CoreNode) + with pytest.raises(KeyError): + assert not session.get_object(n1_id) + with pytest.raises(KeyError): + assert not session.get_object(n2_id) # load saved xml session.open_xml(file_path, start=True) - # verify nodes have been recreated - assert session.get_node(node1.id, CoreNode) - assert session.get_node(node2.id, CoreNode) - assert len(session.link_manager.links()) == 1 - assert options1.bandwidth == iface1.options.bandwidth - assert options1.delay == iface1.options.delay - assert options1.loss == iface1.options.loss - assert options1.dup == iface1.options.dup - assert options1.jitter == iface1.options.jitter - assert options1.buffer == iface1.options.buffer - assert options2.bandwidth == iface2.options.bandwidth - assert options2.delay == iface2.options.delay - assert options2.loss == iface2.options.loss - assert options2.dup == iface2.options.dup - assert options2.jitter == iface2.options.jitter - assert options2.buffer == iface2.options.buffer + # retrieve configuration we set originally + value = str(session.emane.get_config("test", emane_id, EmaneIeee80211abgModel.name)) + + # verify nodes and configuration were restored + assert session.get_object(n1_id) + assert session.get_object(n2_id) + assert session.get_object(emane_id) + assert value == "1" diff --git a/dockerfiles/Dockerfile.centos b/dockerfiles/Dockerfile.centos deleted file mode 100644 index 06654486..00000000 --- a/dockerfiles/Dockerfile.centos +++ /dev/null @@ -1,78 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM centos:7 -LABEL Description="CORE Docker CentOS Image" - -ARG PREFIX=/usr -ARG BRANCH=master -ENV LANG en_US.UTF-8 -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install system dependencies -RUN yum -y update && \ - yum install -y \ - xterm \ - git \ - sudo \ - wget \ - tzdata \ - unzip \ - libpcap-devel \ - libpcre3-devel \ - libxml2-devel \ - protobuf-devel \ - unzip \ - uuid-devel \ - tcpdump \ - make && \ - yum-builddep -y python3 && \ - yum autoremove -y && \ - yum install -y hostname - -# install python3.9 -RUN wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz && \ - tar xf Python-3.9.15.tgz && \ - cd Python-3.9.15 && \ - ./configure --enable-optimizations --with-ensurepip=install && \ - make -j$(nproc) altinstall && \ - python3.9 -m pip install --upgrade pip && \ - cd /opt && \ - rm -rf Python-3.9.15 - -# install core -RUN git clone https://github.com/coreemu/core && \ - cd core && \ - git checkout ${BRANCH} && \ - NO_SYSTEM=1 PYTHON=/usr/local/bin/python3.9 ./setup.sh && \ - PATH=/root/.local/bin:$PATH PYTHON=/usr/local/bin/python3.9 inv install -v -p ${PREFIX} --no-python - -# install emane -RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - cd emane-1.3.3-release-1/rpms/el7/x86_64 && \ - yum install -y epel-release && \ - yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm && \ - cd ../../../.. && \ - rm emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - rm -rf emane-1.3.3-release-1 - -# install emane python bindings -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - git checkout v1.3.3 && \ - ./autogen.sh && \ - PYTHON=${VENV_PATH}/bin/python ./configure --prefix=/usr && \ - cd src/python && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip - -WORKDIR /root diff --git a/dockerfiles/Dockerfile.centos-package b/dockerfiles/Dockerfile.centos-package deleted file mode 100644 index 8d4a1296..00000000 --- a/dockerfiles/Dockerfile.centos-package +++ /dev/null @@ -1,89 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM centos:7 -LABEL Description="CORE CentOS Image" - -ENV LANG en_US.UTF-8 -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install basic dependencies -RUN yum -y update && \ - yum install -y \ - xterm \ - git \ - sudo \ - wget \ - tzdata \ - unzip \ - libpcap-devel \ - libpcre3-devel \ - libxml2-devel \ - protobuf-devel \ - unzip \ - uuid-devel \ - tcpdump \ - automake \ - gawk \ - libreadline-devel \ - libtool \ - pkg-config \ - make && \ - yum-builddep -y python3 && \ - yum autoremove -y && \ - yum install -y hostname - -# install python3.9 -RUN wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz && \ - tar xf Python-3.9.15.tgz && \ - cd Python-3.9.15 && \ - ./configure --enable-optimizations --with-ensurepip=install && \ - make -j$(nproc) altinstall && \ - python3.9 -m pip install --upgrade pip && \ - cd /opt && \ - rm -rf Python-3.9.15 - -# install core -COPY core_*.rpm . -RUN PYTHON=/usr/local/bin/python3.9 yum install -y ./core_*.rpm && \ - rm -f core_*.rpm - -# install ospf mdr -RUN git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ - cd ospf-mdr && \ - ./bootstrap.sh && \ - ./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga && \ - make -j$(nproc) && \ - make install && \ - cd /opt && \ - rm -rf ospf-mdr - - # install emane -RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - cd emane-1.3.3-release-1/rpms/el7/x86_64 && \ - yum install -y epel-release && \ - yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm && \ - cd ../../../.. && \ - rm emane-1.3.3-release-1.el7.x86_64.tar.gz && \ - rm -rf emane-1.3.3-release-1 - -# install emane python bindings -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - git checkout v1.3.3 && \ - ./autogen.sh && \ - PYTHON=${VENV_PATH}/bin/python ./configure --prefix=/usr && \ - cd src/python && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip diff --git a/dockerfiles/Dockerfile.ubuntu-package b/dockerfiles/Dockerfile.ubuntu-package deleted file mode 100644 index b8f66165..00000000 --- a/dockerfiles/Dockerfile.ubuntu-package +++ /dev/null @@ -1,75 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM ubuntu:22.04 -LABEL Description="CORE Docker Ubuntu Image" - -ENV DEBIAN_FRONTEND=noninteractive -ARG PROTOC_VERSION=3.19.6 -ARG VENV_PATH=/opt/core/venv -ENV PATH="$PATH:${VENV_PATH}/bin" -WORKDIR /opt - -# install basic dependencies -RUN apt-get update -y && \ - apt-get install -y --no-install-recommends \ - ca-certificates \ - python3 \ - python3-tk \ - python3-pip \ - python3-venv \ - libpcap-dev \ - libpcre3-dev \ - libprotobuf-dev \ - libxml2-dev \ - protobuf-compiler \ - unzip \ - uuid-dev \ - automake \ - gawk \ - git \ - wget \ - libreadline-dev \ - libtool \ - pkg-config \ - g++ \ - make \ - iputils-ping \ - tcpdump && \ - apt-get autoremove -y - -# install core -COPY core_*.deb . -RUN apt-get install -y ./core_*.deb && \ - rm -f core_*.deb - -# install ospf mdr -RUN git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git && \ - cd ospf-mdr && \ - ./bootstrap.sh && \ - ./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga && \ - make -j$(nproc) && \ - make install && \ - cd /opt && \ - rm -rf ospf-mdr - -# install emane -RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \ - mkdir protoc && \ - unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \ - git clone https://github.com/adjacentlink/emane.git && \ - cd emane && \ - ./autogen.sh && \ - ./configure --prefix=/usr && \ - make -j$(nproc) && \ - make install && \ - cd src/python && \ - make clean && \ - PATH=/opt/protoc/bin:$PATH make && \ - ${VENV_PATH}/bin/python -m pip install . && \ - cd /opt && \ - rm -rf protoc && \ - rm -rf emane && \ - rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip - -WORKDIR /root diff --git a/docs/architecture.md b/docs/architecture.md index b9c5c91c..8599afba 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -1,58 +1,39 @@ # CORE Architecture +* Table of Contents +{:toc} + ## Main Components -* core-daemon - * Manages emulated sessions of nodes and links for a given network - * Nodes are created using Linux namespaces - * Links are created using Linux bridges and virtual ethernet peers - * Packets sent over links are manipulated using traffic control - * Provides gRPC API -* core-gui - * GUI and daemon communicate over gRPC API - * Drag and drop creation for nodes and links - * Can launch terminals for emulated nodes in running sessions - * Can save/open scenario files to recreate previous sessions -* vnoded - * Command line utility for creating CORE node namespaces -* vcmd - * Command line utility for sending shell commands to nodes +* CORE Daemon + * Manages emulation sessions + * Builds the emulated networks using kernel virtualization for nodes and some form of bridging and packet manipulation for virtual networks + * Nodes and networks come together via interfaces installed on nodes + * Controlled via the CORE GUI + * Written in python and can be scripted, given direct control of scenarios +* CORE GUI + * GUI and daemon communicate using a custom, asynchronous, sockets-based API, known as the CORE API + * Drag and drop creation for nodes and network interfaces + * Can launch terminals for emulated nodes in running scenarios + * Can save/open scenario files to recreate previous sessions + * TCL/TK program -![](static/architecture.png) - -## Sessions - -CORE can create and run multiple emulated sessions at once, below is an -overview of the states a session will transition between during typical -GUI interactions. - -![](static/workflow.png) +![](static/core-architecture.jpg) ## How Does it Work? -The CORE framework runs on Linux and uses Linux namespacing for creating -node containers. These nodes are linked together using Linux bridging and -virtual interfaces. CORE sessions are a set of nodes and links operating -together for a specific purpose. +A CORE node is a lightweight virtual machine. The CORE framework runs on Linux. CORE uses Linux network namespace virtualization to build virtual nodes, and ties them together with virtual networks using Linux Ethernet bridging. ### Linux -Linux network namespaces (also known as netns) is the primary -technique used by CORE. Most recent Linux distributions have -namespaces-enabled kernels out of the box. Each namespace has its own process -environment and private network stack. Network namespaces share the same -filesystem in CORE. +Linux network namespaces (also known as netns, LXC, or [Linux containers](http://lxc.sourceforge.net/)) is the primary virtualization technique used by CORE. LXC has been part of the mainline Linux kernel since 2.6.24. Most recent Linux distributions have namespaces-enabled kernels out of the box. A namespace is created using the ```clone()``` system call. Each namespace has its own process environment and private network stack. Network namespaces share the same filesystem in CORE. -CORE combines these namespaces with Linux Ethernet bridging to form networks. -Link characteristics are applied using Linux Netem queuing disciplines. -Nftables provides Ethernet frame filtering on Linux bridges. Wireless networks are -emulated by controlling which interfaces can send and receive with nftables -rules. +CORE combines these namespaces with Linux Ethernet bridging to form networks. Link characteristics are applied using Linux Netem queuing disciplines. Ebtables is Ethernet frame filtering on Linux bridges. Wireless networks are emulated by controlling which interfaces can send and receive with ebtables rules. + +## Prior Work + +The Tcl/Tk CORE GUI was originally derived from the open source [IMUNES](http://imunes.net) project from the University of Zagreb as a custom project within Boeing Research and Technology's Network Technology research group in 2004. Since then they have developed the CORE framework to use Linux virtualization, have developed a Python framework, and made numerous user- and kernel-space developments, such as support for wireless networks, IPsec, the ability to distribute emulations, simulation integration, and more. The IMUNES project also consists of userspace and kernel components. ## Open Source Project and Resources -CORE has been released by Boeing to the open source community under the BSD -license. If you find CORE useful for your work, please contribute back to the -project. Contributions can be as simple as reporting a bug, dropping a line of -encouragement, or can also include submitting patches or maintaining aspects -of the tool. +CORE has been released by Boeing to the open source community under the BSD license. If you find CORE useful for your work, please contribute back to the project. Contributions can be as simple as reporting a bug, dropping a line of encouragement or technical suggestions to the mailing lists, or can also include submitting patches or maintaining aspects of the tool. For contributing to CORE, please visit [CORE GitHub](https://github.com/coreemu/core). diff --git a/docs/configservices.md b/docs/configservices.md deleted file mode 100644 index da81aa48..00000000 --- a/docs/configservices.md +++ /dev/null @@ -1,196 +0,0 @@ -# Config Services - -## Overview - -Config services are a newer version of services for CORE, that leverage a -templating engine, for more robust service file creation. They also -have the power of configuration key/value pairs that values that can be -defined and displayed within the GUI, to help further tweak a service, -as needed. - -CORE services are a convenience for creating reusable dynamic scripts -to run on nodes, for carrying out specific task(s). - -This boilds down to the following functions: - -* generating files the service will use, either directly for commands or for configuration -* command(s) for starting a service -* command(s) for validating a service -* command(s) for stopping a service - -Most CORE nodes will have a default set of services to run, associated with -them. You can however customize the set of services a node will use. Or even -further define a new node type within the GUI, with a set of services, that -will allow quickly dragging and dropping that node type during creation. - -## Available Services - -| Service Group | Services | -|----------------------------------|-----------------------------------------------------------------------| -| [BIRD](services/bird.md) | BGP, OSPF, RADV, RIP, Static | -| [EMANE](services/emane.md) | Transport Service | -| [FRR](services/frr.md) | BABEL, BGP, OSPFv2, OSPFv3, PIMD, RIP, RIPNG, Zebra | -| [NRL](services/nrl.md) | arouted, MGEN Sink, MGEN Actor, NHDP, OLSR, OLSRORG, OLSRv2, SMF | -| [Quagga](services/quagga.md) | BABEL, BGP, OSPFv2, OSPFv3, OSPFv3 MDR, RIP, RIPNG, XPIMD, Zebra | -| [SDN](services/sdn.md) | OVS, RYU | -| [Security](services/security.md) | Firewall, IPsec, NAT, VPN Client, VPN Server | -| [Utility](services/utility.md) | ATD, Routing Utils, DHCP, FTP, IP Forward, PCAP, RADVD, SSF, UCARP | -| [XORP](services/xorp.md) | BGP, OLSR, OSPFv2, OSPFv3, PIMSM4, PIMSM6, RIP, RIPNG, Router Manager | - -## Node Types and Default Services - -Here are the default node types and their services: - -| Node Type | Services | -|-----------|--------------------------------------------------------------------------------------------------------------------------------------------| -| *router* | zebra, OSFPv2, OSPFv3, and IPForward services for IGP link-state routing. | -| *PC* | DefaultRoute service for having a default route when connected directly to a router. | -| *mdr* | zebra, OSPFv3MDR, and IPForward services for wireless-optimized MANET Designated Router routing. | -| *prouter* | a physical router, having the same default services as the *router* node type; for incorporating Linux testbed machines into an emulation. | - -Configuration files can be automatically generated by each service. For -example, CORE automatically generates routing protocol configuration for the -router nodes in order to simplify the creation of virtual networks. - -To change the services associated with a node, double-click on the node to -invoke its configuration dialog and click on the *Services...* button, -or right-click a node a choose *Services...* from the menu. -Services are enabled or disabled by clicking on their names. The button next to -each service name allows you to customize all aspects of this service for this -node. For example, special route redistribution commands could be inserted in -to the Quagga routing configuration associated with the zebra service. - -To change the default services associated with a node type, use the Node Types -dialog available from the *Edit* button at the end of the Layer-3 nodes -toolbar, or choose *Node types...* from the *Session* menu. Note that -any new services selected are not applied to existing nodes if the nodes have -been customized. - -The node types are saved in the GUI config file **~/.coregui/config.yaml**. -Keep this in mind when changing the default services for -existing node types; it may be better to simply create a new node type. It is -recommended that you do not change the default built-in node types. - -## New Services - -Services can save time required to configure nodes, especially if a number -of nodes require similar configuration procedures. New services can be -introduced to automate tasks. - -### Creating New Services - -!!! note - - The directory base name used in **custom_services_dir** below should - be unique and should not correspond to any existing Python module name. - For example, don't use the name **subprocess** or **services**. - -1. Modify the example service shown below - to do what you want. It could generate config/script files, mount per-node - directories, start processes/scripts, etc. Your file can define one or more - classes to be imported. You can create multiple Python files that will be imported. - -2. Put these files in a directory such as **~/.coregui/custom_services**. - -3. Add a **custom_config_services_dir = ~/.coregui/custom_services** entry to the - /etc/core/core.conf file. - -4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) - should be displayed in the terminal (or service log, like journalctl). - -5. Start using your custom service on your nodes. You can create a new node - type that uses your service, or change the default services for an existing - node type, or change individual nodes. - -### Example Custom Service - -Below is the skeleton for a custom service with some documentation. Most -people would likely only setup the required class variables **(name/group)**. -Then define the **files** to generate and implement the -**get_text_template** function to dynamically create the files wanted. Finally, -the **startup** commands would be supplied, which typically tend to be -running the shell files generated. - -```python -from typing import Dict, List - -from core.config import ConfigString, ConfigBool, Configuration -from core.configservice.base import ConfigService, ConfigServiceMode, ShadowDir - - -# class that subclasses ConfigService -class ExampleService(ConfigService): - # unique name for your service within CORE - name: str = "Example" - # the group your service is associated with, used for display in GUI - group: str = "ExampleGroup" - # directories that the service should shadow mount, hiding the system directory - directories: List[str] = [ - "/usr/local/core", - ] - # files that this service should generate, defaults to nodes home directory - # or can provide an absolute path to a mounted directory - files: List[str] = [ - "example-start.sh", - "/usr/local/core/file1", - ] - # executables that should exist on path, that this service depends on - executables: List[str] = [] - # other services that this service depends on, can be used to define service start order - dependencies: List[str] = [] - # commands to run to start this service - startup: List[str] = [] - # commands to run to validate this service - validate: List[str] = [] - # commands to run to stop this service - shutdown: List[str] = [] - # validation mode, blocking, non-blocking, and timer - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - # configurable values that this service can use, for file generation - default_configs: List[Configuration] = [ - ConfigString(id="value1", label="Text"), - ConfigBool(id="value2", label="Boolean"), - ConfigString(id="value3", label="Multiple Choice", options=["value1", "value2", "value3"]), - ] - # sets of values to set for the configuration defined above, can be used to - # provide convenient sets of values to typically use - modes: Dict[str, Dict[str, str]] = { - "mode1": {"value1": "value1", "value2": "0", "value3": "value2"}, - "mode2": {"value1": "value2", "value2": "1", "value3": "value3"}, - "mode3": {"value1": "value3", "value2": "0", "value3": "value1"}, - } - # defines directories that this service can help shadow within a node - shadow_directories: List[ShadowDir] = [ - ShadowDir(path="/user/local/core", src="/opt/core") - ] - - def get_text_template(self, name: str) -> str: - return """ - # sample script 1 - # node id(${node.id}) name(${node.name}) - # config: ${config} - echo hello - """ -``` - -#### Validation Mode - -Validation modes are used to determine if a service has started up successfully. - -* blocking - startup commands are expected to run til completion and return 0 exit code -* non-blocking - startup commands are ran, but do not wait for completion -* timer - startup commands are ran, and an arbitrary amount of time is waited to consider started - -#### Shadow Directories - -Shadow directories provide a convenience for copying a directory and the files within -it to a nodes home directory, to allow a unique set of per node files. - -* `ShadowDir(path="/user/local/core")` - copies files at the given location into the node -* `ShadowDir(path="/user/local/core", src="/opt/core")` - copies files to the given location, - but sourced from the provided location -* `ShadowDir(path="/user/local/core", templates=True)` - copies files and treats them as - templates for generation -* `ShadowDir(path="/user/local/core", has_node_paths=True)` - copies files from the given - location, and looks for unique node names directories within it, using a directory named - default, when not preset diff --git a/docs/ctrlnet.md b/docs/ctrlnet.md index d20e3a41..5b38191a 100644 --- a/docs/ctrlnet.md +++ b/docs/ctrlnet.md @@ -1,45 +1,23 @@ # CORE Control Network +* Table of Contents +{:toc} + ## Overview -The CORE control network allows the virtual nodes to communicate with their -host environment. There are two types: the primary control network and -auxiliary control networks. The primary control network is used mainly for -communicating with the virtual nodes from host machines and for master-slave -communications in a multi-server distributed environment. Auxiliary control -networks have been introduced to for routing namespace hosted emulation -software traffic to the test network. +The CORE control network allows the virtual nodes to communicate with their host environment. There are two types: the primary control network and auxiliary control networks. The primary control network is used mainly for communicating with the virtual nodes from host machines and for master-slave communications in a multi-server distributed environment. Auxiliary control networks have been introduced to for routing namespace hosted emulation software traffic to the test network. ## Activating the Primary Control Network -Under the *Session Menu*, the *Options...* dialog has an option to set a -*control network prefix*. +Under the *Session Menu*, the *Options...* dialog has an option to set a *control network prefix*. -This can be set to a network prefix such as *172.16.0.0/24*. A bridge will -be created on the host machine having the last address in the prefix range -(e.g. *172.16.0.254*), and each node will have an extra *ctrl0* control -interface configured with an address corresponding to its node number -(e.g. *172.16.0.3* for *n3*.) +This can be set to a network prefix such as *172.16.0.0/24*. A bridge will be created on the host machine having the last address in the prefix range (e.g. *172.16.0.254*), and each node will have an extra *ctrl0* control interface configured with an address corresponding to its node number (e.g. *172.16.0.3* for *n3*.) -A default for the primary control network may also be specified by setting -the *controlnet* line in the */etc/core/core.conf* configuration file which -new sessions will use by default. To simultaneously run multiple sessions with -control networks, the session option should be used instead of the *core.conf* -default. +A default for the primary control network may also be specified by setting the *controlnet* line in the */etc/core/core.conf* configuration file which new sessions will use by default. To simultaneously run multiple sessions with control networks, the session option should be used instead of the *core.conf* default. -!!! note +**NOTE: If you have a large scenario with more than 253 nodes, use a control network prefix that allows more than the suggested */24*, such as */23* or greater.** - If you have a large scenario with more than 253 nodes, use a control - network prefix that allows more than the suggested */24*, such as */23* or - greater. - -!!! note - - Running a session with a control network can fail if a previous - session has set up a control network and the its bridge is still up. Close - the previous session first or wait for it to complete. If unable to, the - **core-daemon** may need to be restarted and the lingering bridge(s) removed - manually. +**IMPORTANT: Running a session with a control network can fail if a previous session has set up a control network and the its bridge is still up. Close the previous session first or wait for it to complete. If unable to, the *core-daemon* may need to be restarted and the lingering bridge(s) removed manually.** ```shell # Restart the CORE Daemon @@ -52,62 +30,34 @@ for cb in $ctrlbridges; do sudo brctl delbr $cb done ``` - -!!! note - - If adjustments to the primary control network configuration made in - **/etc/core/core.conf** do not seem to take affect, check if there is anything - set in the *Session Menu*, the *Options...* dialog. They may need to be - cleared. These per session settings override the defaults in - **/etc/core/core.conf**. + +**TIP: If adjustments to the primary control network configuration made in */etc/core/core.conf* do not seem to take affect, check if there is anything set in the *Session Menu*, the *Options...* dialog. They may need to be cleared. These per session settings override the defaults in */etc/core/core.conf*.** ## Control Network in Distributed Sessions -When the primary control network is activated for a distributed session, a -control network bridge will be created on each of the slave servers, with -GRE tunnels back to the master server's bridge. The slave control bridges -are not assigned an address. From the host, any of the nodes (local or remote) -can be accessed, just like the single server case. +When the primary control network is activated for a distributed session, a control network bridge will be created on each of the slave servers, with GRE tunnels back to the master server's bridge. The slave control bridges are not assigned an address. From the host, any of the nodes (local or remote) can be accessed, just like the single server case. -In some situations, remote emulated nodes need to communicate with the host -on which they are running and not the master server. Multiple control network -prefixes can be specified in the either the session option or -*/etc/core/core.conf*, separated by spaces and beginning with the master -server. Each entry has the form *"server:prefix"*. For example, if the servers -*core1*,*core2*, and *core3* are assigned with nodes in the scenario and using -*/etc/core/core.conf* instead of the session option. +In some situations, remote emulated nodes need to communicate with the host on which they are running and not the master server. Multiple control network prefixes can be specified in the either the session option or */etc/core/core.conf*, separated by spaces and beginning with the master server. Each entry has the form *"server:prefix"*. For example, if the servers *core1*,*core2*, and *core3* are assigned with nodes in the scenario and using :file:`/etc/core/core.conf` instead of the session option: ```shell controlnet=core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.1.0/24 ``` -Then, the control network bridges will be assigned as follows: +Then, the control network bridges will be assigned as follows: * core1 = 172.16.1.254 (assuming it is the master server), * core2 = 172.16.2.254 * core3 = 172.16.3.254 -Tunnels back to the master server will still be built, but it is up to the -user to add appropriate routes if networking between control network prefixes -is desired. The control network script may help with this. +Tunnels back to the master server will still be built, but it is up to the user to add appropriate routes if networking between control network prefixes is desired. The control network script may help with this. ## Control Network Script -A control network script may be specified using the *controlnet_updown_script* -option in the */etc/core/core.conf* file. This script will be run after the -bridge has been built (and address assigned) with the first argument being the -name of the bridge, and the second argument being the keyword *"startup"*. -The script will again be invoked prior to bridge removal with the second -argument being the keyword *"shutdown"*. +A control network script may be specified using the *controlnet_updown_script* option in the */etc/core/core.conf* file. This script will be run after the bridge has been built (and address assigned) with the first argument being the name of the bridge, and the second argument being the keyword *"startup"*. The script will again be invoked prior to bridge removal with the second argument being the keyword *"shutdown"*. ## Auxiliary Control Networks -Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces. -Since it is advisable to separate the OTA traffic from other traffic, we will -need more than single channel leading out from the namespace. Up to three -auxiliary control networks may be defined. Multiple control networks are set -up in */etc/core/core.conf* file. Lines *controlnet1*, *controlnet2* and -*controlnet3* define the auxiliary networks. +Starting with EMANE 0.9.2, CORE will run EMANE instances within namespaces. Since it is advisable to separate the OTA traffic from other traffic, we will need more than single channel leading out from the namespace. Up to three auxiliary control networks may be defined. Multiple control networks are set up in */etc/core/core.conf* file. Lines *controlnet1*, *controlnet2* and *controlnet3* define the auxiliary networks. For example, having the following */etc/core/core.conf*: @@ -117,26 +67,13 @@ controlnet1 = core1:172.18.1.0/24 core2:172.18.2.0/24 core3:172.18.3.0/24 controlnet2 = core1:172.19.1.0/24 core2:172.19.2.0/24 core3:172.19.3.0/24 ``` -This will activate the primary and two auxiliary control networks and add -interfaces *ctrl0*, *ctrl1*, *ctrl2* to each node. One use case would be to -assign *ctrl1* to the OTA manager device and *ctrl2* to the Event Service -device in the EMANE Options dialog box and leave *ctrl0* for CORE control -traffic. +This will activate the primary and two auxiliary control networks and add interfaces *ctrl0*, *ctrl1*, *ctrl2* to each node. One use case would be to assign *ctrl1* to the OTA manager device and *ctrl2* to the Event Service device in the EMANE Options dialog box and leave *ctrl0* for CORE control traffic. -!!! note +**NOTE: *controlnet0* may be used in place of *controlnet* to configure the primary control network.** - *controlnet0* may be used in place of *controlnet* to configure - the primary control network. +Unlike the primary control network, the auxiliary control networks will not employ tunneling since their primary purpose is for efficiently transporting multicast EMANE OTA and event traffic. Note that there is no per-session configuration for auxiliary control networks. -Unlike the primary control network, the auxiliary control networks will not -employ tunneling since their primary purpose is for efficiently transporting -multicast EMANE OTA and event traffic. Note that there is no per-session -configuration for auxiliary control networks. - -To extend the auxiliary control networks across a distributed test -environment, host network interfaces need to be added to them. The following -lines in */etc/core/core.conf* will add host devices *eth1*, *eth2* and *eth3* -to *controlnet1*, *controlnet2*, *controlnet3*: +To extend the auxiliary control networks across a distributed test environment, host network interfaces need to be added to them. The following lines in */etc/core/core.conf* will add host devices *eth1*, *eth2* and *eth3* to *controlnet1*, *controlnet2*, *controlnet3*: ```shell controlnetif1 = eth1 @@ -144,11 +81,7 @@ controlnetif2 = eth2 controlnetif3 = eth3 ``` -!!! note - - There is no need to assign an interface to the primary control - network because tunnels are formed between the master and the slaves using IP - addresses that are provided in *servers.conf*. +**NOTE: There is no need to assign an interface to the primary control network because tunnels are formed between the master and the slaves using IP addresses that are provided in *servers.conf*.** Shown below is a representative diagram of the configuration above. diff --git a/docs/devguide.md b/docs/devguide.md index 4fa43977..25908fa7 100644 --- a/docs/devguide.md +++ b/docs/devguide.md @@ -1,87 +1,46 @@ # CORE Developer's Guide -## Overview +* Table of Contents +{:toc} -The CORE source consists of several programming languages for -historical reasons. Current development focuses on the Python modules and -daemon. Here is a brief description of the source directories. +## Source Code Guide -| Directory | Description | -|-----------|--------------------------------------------------------------------------------------| -| daemon | Python CORE daemon/gui code that handles receiving API calls and creating containers | -| docs | Markdown Documentation currently hosted on GitHub | -| man | Template files for creating man pages for various CORE command line utilities | -| netns | C program for creating CORE containers | +The CORE source consists of several different programming languages for historical reasons. Current development focuses on the Python modules and daemon. Here is a brief description of the source directories. -## Getting started +These are being actively developed as of CORE 5.1: -To setup CORE for develop we will leverage to automated install script. +* *gui* - Tcl/Tk GUI. This uses Tcl/Tk because of its roots with the IMUNES + project. +* *daemon* - Python modules are found in the :file:`daemon/core` directory, the + daemon under :file:`daemon/scripts/core-daemon` +* *netns* - Python extension modules for Linux Network Namespace support are in :file:`netns`. +* *doc* - Documentation for the manual lives here in reStructuredText format. -## Clone CORE Repo +Not actively being developed: -```shell -cd ~/Documents -git clone https://github.com/coreemu/core.git -cd core -git checkout develop -``` +* *ns3* - Python ns3 script support for running CORE. -## Install the Development Environment +## The CORE API -This command will automatically install system dependencies, clone and build OSPF-MDR, -build CORE, setup the CORE poetry environment, and install pre-commit hooks. You can -refer to the [install docs](install.md) for issues related to different distributions. +The CORE API is used between different components of CORE for communication. The GUI communicates with the CORE daemon using the API. One emulation server communicates with another using the API. The API also allows other systems to interact with the CORE emulation. The API allows another system to add, remove, or modify nodes and links, and enables executing commands on the emulated systems. Wireless link parameters are updated on-the-fly based on node positions. -```shell -./install -d -``` +CORE listens on a local TCP port for API messages. The other system could be software running locally or another machine accessible across the network. -### pre-commit +The CORE API is currently specified in a separate document, available from the CORE website. -pre-commit hooks help automate running tools to check modified code. Every time a commit is made -python utilities will be ran to check validity of code, potentially failing and backing out the commit. -These changes are currently mandated as part of the current CI, so add the changes and commit again. +## Linux network namespace Commands -## Running CORE +Linux network namespace containers are often managed using the *Linux Container Tools* or *lxc-tools* package. The lxc-tools website is available here http://lxc.sourceforge.net/ for more information. CORE does not use these management utilities, but includes its own set of tools for instantiating and configuring network namespace containers. This section describes these tools. -You can now run core as you normally would, or leverage some of the invoke tasks to -conveniently run tests, etc. +### vnoded command -```shell -# run core-daemon -sudo core-daemon +The *vnoded* daemon is the program used to create a new namespace, and listen on a control channel for commands that may instantiate other processes. This daemon runs as PID 1 in the container. It is launched automatically by the CORE daemon. The control channel is a UNIX domain socket usually named */tmp/pycore.23098/n3*, for node 3 running on CORE session 23098, for example. Root privileges are required for creating a new namespace. -# run gui -core-gui +### vcmd command -# run mocked unit tests -cd -inv test-mock -``` +The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network namespace, for running commands in the namespace. The CORE daemon uses the same channel for setting up a node and running processes within it. This program has two required arguments, the control channel name, and the command line to be run within the namespace. This command does not need to run with root privileges. -## Linux Network Namespace Commands - -Linux network namespace containers are often managed using the *Linux Container Tools* or *lxc-tools* package. -The lxc-tools website is available here http://lxc.sourceforge.net/ for more information. CORE does not use these -management utilities, but includes its own set of tools for instantiating and configuring network namespace containers. -This section describes these tools. - -### vnoded - -The *vnoded* daemon is the program used to create a new namespace, and listen on a control channel for commands that -may instantiate other processes. This daemon runs as PID 1 in the container. It is launched automatically by the CORE -daemon. The control channel is a UNIX domain socket usually named */tmp/pycore.23098/n3*, for node 3 running on CORE -session 23098, for example. Root privileges are required for creating a new namespace. - -### vcmd - -The *vcmd* program is used to connect to the *vnoded* daemon in a Linux network namespace, for running commands in the -namespace. The CORE daemon uses the same channel for setting up a node and running processes within it. This program -has two required arguments, the control channel name, and the command line to be run within the namespace. This command -does not need to run with root privileges. - -When you double-click on a node in a running emulation, CORE will open a shell window for that node using a command -such as: +When you double-click on a node in a running emulation, CORE will open a shell window for that node using a command such as: ```shell gnome-terminal -e vcmd -c /tmp/pycore.50160/n1 -- bash @@ -95,14 +54,11 @@ vcmd -c /tmp/pycore.50160/n1 -- /sbin/ip -4 ro ### core-cleanup script -A script named *core-cleanup* is provided to clean up any running CORE emulations. It will attempt to kill any -remaining vnoded processes, kill any EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and remove -any bridges or *nftables* rules. With a *-d* option, it will also kill any running CORE daemon. +A script named *core-cleanup* is provided to clean up any running CORE emulations. It will attempt to kill any remaining vnoded processes, kill any EMANE processes, remove the :file:`/tmp/pycore.*` session directories, and remove any bridges or *ebtables* rules. With a *-d* option, it will also kill any running CORE daemon. ### netns command -The *netns* command is not used by CORE directly. This utility can be used to run a command in a new network namespace -for testing purposes. It does not open a control channel for receiving further commands. +The *netns* command is not used by CORE directly. This utility can be used to run a command in a new network namespace for testing purposes. It does not open a control channel for receiving further commands. ### Other Useful Commands @@ -110,9 +66,48 @@ Here are some other Linux commands that are useful for managing the Linux networ ```shell # view the Linux bridging setup -ip link show type bridge +brctl show # view the netem rules used for applying link effects tc qdisc show # view the rules that make the wireless LAN work -nft list ruleset +ebtables -L ``` + +### Example Command Usage + +Below is a transcript of creating two emulated nodes and connecting them together with a wired link: + +```shell +# create node 1 namespace container +vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid +# create a virtual Ethernet (veth) pair, installing one end into node 1 +ip link add name n1.0.1 type veth peer name n1.0 +ip link set n1.0 netns `cat /tmp/n1.pid` +vcmd -c /tmp/n1.ctl -- ip link set lo up +vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0 up +vcmd -c /tmp/n1.ctl -- ip addr add 10.0.0.1/24 dev eth0 + +# create node 2 namespace container +vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid +# create a virtual Ethernet (veth) pair, installing one end into node 2 +ip link add name n2.0.1 type veth peer name n2.0 +ip link set n2.0 netns `cat /tmp/n2.pid` +vcmd -c /tmp/n2.ctl -- ip link set lo up +vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0 up +vcmd -c /tmp/n2.ctl -- ip addr add 10.0.0.2/24 eth0 + +# bridge together nodes 1 and 2 using the other end of each veth pair +brctl addbr b.1.1 +brctl setfd b.1.1 0 +brctl addif b.1.1 n1.0.1 +brctl addif b.1.1 n2.0.1 +ip link set n1.0.1 up +ip link set n2.0.1 up +ip link set b.1.1 up + +# display connectivity and ping from node 1 to node 2 +brctl show +vcmd -c /tmp/n1.ctl -- ping 10.0.0.2 +``` + +The above example script can be found as *twonodes.sh* in the *examples/netns* directory. Use *core-cleanup* to clean up after the script. diff --git a/docs/diagrams/architecture.plantuml b/docs/diagrams/architecture.plantuml deleted file mode 100644 index 403886d9..00000000 --- a/docs/diagrams/architecture.plantuml +++ /dev/null @@ -1,44 +0,0 @@ -@startuml -skinparam { - RoundCorner 8 - ComponentStyle uml2 - ComponentBorderColor #Black - InterfaceBorderColor #Black - InterfaceBackgroundColor #Yellow -} - -package User { - component "core-gui" as gui #DeepSkyBlue - component "python scripts" as scripts #DeepSkyBlue - component vcmd #DeepSkyBlue -} -package Server { - component "core-daemon" as daemon #DarkSeaGreen -} -package Python { - component core #LightSteelBlue -} -package "Linux System" { - component nodes #SpringGreen [ - nodes - (linux namespaces) - ] - component links #SpringGreen [ - links - (bridging and traffic manipulation) - ] -} - -package API { - interface gRPC as grpc -} - -gui <..> grpc -scripts <..> grpc -grpc -- daemon -scripts -- core -daemon - core -core <..> nodes -core <..> links -vcmd <..> nodes -@enduml diff --git a/docs/diagrams/workflow.plantuml b/docs/diagrams/workflow.plantuml deleted file mode 100644 index cff943ad..00000000 --- a/docs/diagrams/workflow.plantuml +++ /dev/null @@ -1,40 +0,0 @@ -@startuml -skinparam { - RoundCorner 8 - StateBorderColor #Black - StateBackgroundColor #LightSteelBlue -} - -Definition: Session XML -Definition: GUI Drawing -Definition: Scripts - -Configuration: Configure Hooks -Configuration: Configure Services -Configuration: Configure WLAN / Mobility -Configuration: Configure EMANE - -Instantiation: Create Nodes -Instantiation: Create Interfaces -Instantiation: Create Bridges -Instantiation: Start Services - -Runtime: Interactive Shells -Runtime: Traffic Scripts -Runtime: Mobility -Runtime: Widgets - -Datacollect: Collect Files -Datacollect: Other Results - -Shutdown: Shutdown Services -Shutdown: Destroy Brdges -Shutdown: Destroy Interfaces -Shutdown: Destroy Nodes - -Definition -> Configuration -Configuration -> Instantiation -Instantiation -> Runtime -Runtime -> Datacollect -Datacollect -> Shutdown -@enduml diff --git a/docs/distributed.md b/docs/distributed.md deleted file mode 100644 index 95ec7268..00000000 --- a/docs/distributed.md +++ /dev/null @@ -1,193 +0,0 @@ -# CORE - Distributed Emulation - -## Overview - -A large emulation scenario can be deployed on multiple emulation servers and -controlled by a single GUI. The GUI, representing the entire topology, can be -run on one of the emulation servers or on a separate machine. - -Each machine that will act as an emulation will require the installation of a -distributed CORE package and some configuration to allow SSH as root. - -## CORE Configuration - -CORE configuration settings required for using distributed functionality. - -Edit **/etc/core/core.conf** or specific configuration file being used. - -```shell -# uncomment and set this to the address that remote servers -# use to get back to the main host, example below -distributed_address = 129.168.0.101 -``` - -### EMANE Specific Configurations - -EMANE needs to have controlnet configured in **core.conf** in order to startup correctly. -The names before the addresses need to match the names of distributed servers configured. - -```shell -controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24 -emane_event_generate = True -``` - -## Configuring SSH - -Distributed CORE works using the python fabric library to run commands on -remote servers over SSH. - -### Remote GUI Terminals - -You need to have the same user defined on each server, since the user used -for these remote shells is the same user that is running the CORE GUI. - -**Edit -> Preferences... -> Terminal program:** - -Currently recommend setting this to **xterm -e** as the default -**gnome-terminal** will not work. - -May need to install xterm if, not already installed. - -```shell -sudo apt install xterm -``` - -### Distributed Server SSH Configuration - -First the distributed servers must be configured to allow passwordless root -login over SSH. - -On distributed server: - -```shelll -# install openssh-server -sudo apt install openssh-server - -# open sshd config -vi /etc/ssh/sshd_config - -# verify these configurations in file -PermitRootLogin yes -PasswordAuthentication yes - -# if desired add/modify the following line to allow SSH to -# accept all env variables -AcceptEnv * - -# restart sshd -sudo systemctl restart sshd -``` - -On master server: - -```shell -# install package if needed -sudo apt install openssh-client - -# generate ssh key if needed -ssh-keygen -o -t rsa -b 4096 -f ~/.ssh/core - -# copy public key to authorized_keys file -ssh-copy-id -i ~/.ssh/core root@server - -# configure fabric to use the core ssh key -sudo vi /etc/fabric.yml - -# set configuration -connect_kwargs: {"key_filename": "/home/user/.ssh/core"} -``` - -On distributed server: - -```shell -# open sshd config -vi /etc/ssh/sshd_config - -# change configuration for root login to without password -PermitRootLogin without-password - -# restart sshd -sudo systemctl restart sshd -``` - -### Fabric Config File - -Make sure the value used below is the absolute path to the file -generated above **~/.ssh/core**" - -Add/update the fabric configuration file **/etc/fabric.yml**: - -```yaml -connect_kwargs: { "key_filename": "/home/user/.ssh/core" } -``` - -## Add Emulation Servers in GUI - -Within the core-gui navigate to menu option: - -**Session -> Servers...** - -Within the dialog box presented, add or modify an existing server if present -to use the name, address, and port for the a server you plan to use. - -Server configurations are loaded and written to in a configuration file for -the GUI. - -## Assigning Nodes - -The user needs to assign nodes to emulation servers in the scenario. Making no -assignment means the node will be emulated on the master server -In the configuration window of every node, a drop-down box located between -the *Node name* and the *Image* button will select the name of the emulation -server. By default, this menu shows *(none)*, indicating that the node will -be emulated locally on the master. When entering Execute mode, the CORE GUI -will deploy the node on its assigned emulation server. - -Another way to assign emulation servers is to select one or more nodes using -the select tool (ctrl-click to select multiple), and right-click one of the -nodes and choose *Assign to...*. - -The **CORE emulation servers** dialog box may also be used to assign nodes to -servers. The assigned server name appears in parenthesis next to the node name. -To assign all nodes to one of the servers, click on the server name and then -the **all nodes** button. Servers that have assigned nodes are shown in blue in -the server list. Another option is to first select a subset of nodes, then open -the **CORE emulation servers** box and use the **selected nodes** button. - -**IMPORTANT: Leave the nodes unassigned if they are to be run on the master -server. Do not explicitly assign the nodes to the master server.** - -## GUI Visualization - -If there is a link between two nodes residing on different servers, the GUI -will draw the link with a dashed line. - -## Concerns and Limitations - -Wireless nodes, i.e. those connected to a WLAN node, can be assigned to -different emulation servers and participate in the same wireless network -only if an EMANE model is used for the WLAN. The basic range model does -not work across multiple servers due to the Linux bridging and nftables -rules that are used. - -!!! note - - The basic range wireless model does not support distributed emulation, - but EMANE does. - -When nodes are linked across servers **core-daemons** will automatically -create necessary tunnels between the nodes when executed. Care should be taken -to arrange the topology such that the number of tunnels is minimized. The -tunnels carry data between servers to connect nodes as specified in the topology. -These tunnels are created using GRE tunneling, similar to the Tunnel Tool. - -## Distributed Checklist - -1. Install CORE on master server -2. Install distributed CORE package on all servers needed -3. Installed and configure public-key SSH access on all servers (if you want to use - double-click shells or Widgets.) for both the GUI user (for terminals) and root for running CORE commands -4. Update CORE configuration as needed -5. Choose the servers that participate in distributed emulation. -6. Assign nodes to desired servers, empty for master server. -7. Press the **Start** button to launch the distributed emulation. diff --git a/docs/docker.md b/docs/docker.md deleted file mode 100644 index 562fd453..00000000 --- a/docs/docker.md +++ /dev/null @@ -1,71 +0,0 @@ -# Docker Node Support - -## Overview - -Provided below is some information for helping setup and use Docker -nodes within a CORE scenario. - -## Installation - -### Debian Systems - -```shell -sudo apt install docker.io -``` - -### RHEL Systems - -## Configuration - -Custom configuration required to avoid iptable rules being added and removing -the need for the default docker network, since core will be orchestrating -connections between nodes. - -Place the file below in **/etc/docker/docker.json** - -```json -{ - "bridge": "none", - "iptables": false -} -``` - -## Group Setup - -To use Docker nodes within the python GUI, you will need to make sure the -user running the GUI is a member of the docker group. - -```shell -# add group if does not exist -sudo groupadd docker - -# add user to group -sudo usermod -aG docker $USER - -# to get this change to take effect, log out and back in or run the following -newgrp docker -``` - -## Image Requirements - -Images used by Docker nodes in CORE need to have networking tools installed for -CORE to automate setup and configuration of the network within the container. - -Example Dockerfile: - -``` -FROM ubuntu:latest -RUN apt-get update -RUN apt-get install -y iproute2 ethtool -``` - -Build image: - -```shell -sudo docker build -t . -``` - -## Tools and Versions Tested With - -* Docker version 18.09.5, build e8ff056 -* nsenter from util-linux 2.31.1 diff --git a/docs/emane.md b/docs/emane.md index a034c63b..2182bed1 100644 --- a/docs/emane.md +++ b/docs/emane.md @@ -1,311 +1,143 @@ -# EMANE (Extendable Mobile Ad-hoc Network Emulator) +# CORE/EMANE + +* Table of Contents +{:toc} ## What is EMANE? -The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous -network emulation using a pluggable MAC and PHY layer architecture. The -EMANE framework provides an implementation architecture for modeling -different radio interface types in the form of *Network Emulation Modules* -(NEMs) and incorporating these modules into a real-time emulation running -in a distributed environment. +The Extendable Mobile Ad-hoc Network Emulator (EMANE) allows heterogeneous network emulation using a pluggable MAC and PHY layer architecture. The EMANE framework provides an implementation architecture for modeling different radio interface types in the form of *Network Emulation Modules* (NEMs) and incorporating these modules into a real-time emulation running in a distributed environment. -EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent -Link LLC, who maintain these websites: +EMANE is developed by U.S. Naval Research Labs (NRL) Code 5522 and Adjacent Link LLC, who maintain these websites: -* -* +* http://www.nrl.navy.mil/itd/ncs/products/emane +* http://www.adjacentlink.com/ -Instead of building Linux Ethernet bridging networks with CORE, -higher-fidelity wireless networks can be emulated using EMANE bound to virtual -devices. CORE emulates layers 3 and above (network, session, application) with -its virtual network stacks and process space for protocols and applications, -while EMANE emulates layers 1 and 2 (physical and data link) using its -pluggable PHY and MAC models. +Instead of building Linux Ethernet bridging networks with CORE, higher-fidelity wireless networks can be emulated using EMANE bound to virtual devices. CORE emulates layers 3 and above (network, session, application) with its virtual network stacks and process space for protocols and applications, while EMANE emulates layers 1 and 2 (physical and data link) using its pluggable PHY and MAC models. -The interface between CORE and EMANE is a TAP device. CORE builds the virtual -node using Linux network namespaces, installs the TAP device into the namespace -and instantiates one EMANE process in the namespace. The EMANE process binds a -user space socket to the TAP device for sending and receiving data from CORE. +The interface between CORE and EMANE is a TAP device. CORE builds the virtual node using Linux network namespaces, installs the TAP device into the namespace and instantiates one EMANE process in the namespace. The EMANE process binds a user space socket to the TAP device for sending and receiving data from CORE. -An EMANE instance sends and receives OTA (Over-The-Air) traffic to and from -other EMANE instances via a control port (e.g. *ctrl0*, *ctrl1*). It also -sends and receives Events to and from the Event Service using the same or a -different control port. EMANE models are configured through the GUI's -configuration dialog. A corresponding EmaneModel Python class is sub-classed -for each supported EMANE model, to provide configuration items and their -mapping to XML files. This way new models can be easily supported. When -CORE starts the emulation, it generates the appropriate XML files that -specify the EMANE NEM configuration, and launches the EMANE daemons. +An EMANE instance sends and receives OTA traffic to and from other EMANE instances via a control port (e.g. *ctrl0*, *ctrl1*). It also sends and receives Events to and from the Event Service using the same or a different control port. EMANE models are configured through CORE's WLAN configuration dialog. A corresponding EmaneModel Python class is sub-classed for each supported EMANE model, to provide configuration items and their mapping to XML files. This way new models can be easily supported. When CORE starts the emulation, it generates the appropriate XML files that specify the EMANE NEM configuration, and launches the EMANE daemons. -Some EMANE models support location information to determine when packets -should be dropped. EMANE has an event system where location events are -broadcast to all NEMs. CORE can generate these location events when nodes -are moved on the canvas. The canvas size and scale dialog has controls for -mapping the X,Y coordinate system to a latitude, longitude geographic system -that EMANE uses. When specified in the *core.conf* configuration file, CORE -can also subscribe to EMANE location events and move the nodes on the canvas -as they are moved in the EMANE emulation. This would occur when an Emulation -Script Generator, for example, is running a mobility script. - -## EMANE in CORE - -This section will cover some high level topics and examples for running and -using EMANE in CORE. - -You can find more detailed tutorials and examples at the -[EMANE Tutorial](https://github.com/adjacentlink/emane-tutorial/wiki). - -Every topic below assumes CORE, EMANE, and OSPF MDR have been installed. - -!!! info - - Demo files will be found within the `core-gui` **~/.coregui/xmls** directory - -| Topic | Model | Description | -|--------------------------------------|---------|-----------------------------------------------------------| -| [XML Files](emane/files.md) | RF Pipe | Overview of generated XML files used to drive EMANE | -| [GPSD](emane/gpsd.md) | RF Pipe | Overview of running and integrating gpsd with EMANE | -| [Precomputed](emane/precomputed.md) | RF Pipe | Overview of using the precomputed propagation model | -| [EEL](emane/eel.md) | RF Pipe | Overview of using the Emulation Event Log (EEL) Generator | -| [Antenna Profiles](emane/antenna.md) | RF Pipe | Overview of using antenna profiles in EMANE | +Some EMANE models support location information to determine when packets should be dropped. EMANE has an event system where location events are broadcast to all NEMs. CORE can generate these location events when nodes are moved on the canvas. The canvas size and scale dialog has controls for mapping the X,Y coordinate system to a latitude, longitude geographic system that EMANE uses. When specified in the *core.conf* configuration file, CORE can also subscribe to EMANE location events and move the nodes on the canvas as they are moved in the EMANE emulation. This would occur when an Emulation Script Generator, for example, is running a mobility script. ## EMANE Configuration -The CORE configuration file **/etc/core/core.conf** has options specific to -EMANE. An example emane section from the **core.conf** file is shown below: +The CORE configuration file */etc/core/core.conf* has options specific to EMANE. An example emane section from the *core.conf* file is shown below: ```shell # EMANE configuration emane_platform_port = 8101 emane_transform_port = 8201 emane_event_monitor = False -#emane_models_dir = /home//.coregui/custom_emane +#emane_models_dir = /home/username/.core/myemane # EMANE log level range [0,4] default: 2 emane_log_level = 2 emane_realtime = True -# prefix used for emane installation -# emane_prefix = /usr ``` -If you have an EMANE event generator (e.g. mobility or pathloss scripts) and -want to have CORE subscribe to EMANE location events, set the following line -in the **core.conf** configuration file. +EMANE can be installed from deb or RPM packages or from source. See the [EMANE GitHub](https://github.com/adjacentlink/emane) for full details. -!!! note +Here are quick instructions for installing all EMANE packages: - Do not set this option to True if you want to manually drag nodes around - on the canvas to update their location in EMANE. +```shell +# install dependencies +sudo apt-get install libssl-dev libxml-libxml-perl libxml-simple-perl +wget https://adjacentlink.com/downloads/emane/emane-1.2.1-release-1.ubuntu-16_04.amd64.tar.gz +tar xzf emane-1.2.1-release-1.ubuntu-16_04.amd64.tar.gz +sudo dpkg -i emane-1.2.1-release-1/deb/ubuntu-16_04/amd64/*.deb +``` + +If you have an EMANE event generator (e.g. mobility or pathloss scripts) and want to have CORE subscribe to EMANE location events, set the following line in the */etc/core/core.conf* configuration file: ```shell emane_event_monitor = True ``` -Another common issue is if installing EMANE from source, the default configure -prefix will place the DTD files in **/usr/local/share/emane/dtd** while CORE -expects them in **/usr/share/emane/dtd**. +Do not set the above option to True if you want to manually drag nodes around on the canvas to update their location in EMANE. -Update the EMANE prefix configuration to resolve this problem. +Another common issue is if installing EMANE from source, the default configure prefix will place the DTD files in */usr/local/share/emane/dtd* while CORE expects them in */usr/share/emane/dtd*. + +A symbolic link will fix this: ```shell -emane_prefix = /usr/local +sudo ln -s /usr/local/share/emane /usr/share/emane ``` ## Custom EMANE Models -CORE supports custom developed EMANE models by way of dynamically loading user -created python files that represent the model. Custom EMANE models should be -placed within the path defined by **emane_models_dir** in the CORE -configuration file. This path cannot end in **/emane**. +CORE supports custom developed EMANE models by way of dynamically loading user created python files that represent the model. Custom EMANE models should be placed within the path defined by **emane_models_dir** in the CORE configuration file. This path cannot end in **/emane**. Here is an example model with documentation describing functionality: - -```python -""" -Example custom emane model. -""" -from pathlib import Path -from typing import Dict, Optional, Set, List - -from core.config import Configuration -from core.emane import emanemanifest, emanemodel +[Example Model](examplemodel.html) -class ExampleModel(emanemodel.EmaneModel): - """ - Custom emane model. - - :cvar name: defines the emane model name that will show up in the GUI - - Mac Definition: - :cvar mac_library: defines that mac library that the model will reference - :cvar mac_xml: defines the mac manifest file that will be parsed to obtain configuration options, - that will be displayed within the GUI - :cvar mac_defaults: allows you to override options that are maintained within the manifest file above - :cvar mac_config: parses the manifest file and converts configurations into core supported formats - - Phy Definition: - NOTE: phy configuration will default to the universal model as seen below and the below section does not - have to be included - :cvar phy_library: defines that phy library that the model will reference, used if you need to - provide a custom phy - :cvar phy_xml: defines the phy manifest file that will be parsed to obtain configuration options, - that will be displayed within the GUI - :cvar phy_defaults: allows you to override options that are maintained within the manifest file above - or for the default universal model - :cvar phy_config: parses the manifest file and converts configurations into core supported formats - - Custom Override Options: - NOTE: these options default to what's seen below and do not have to be included - :cvar config_ignore: allows you to ignore options within phy/mac, used typically if you needed to add - a custom option for display within the gui - """ - - name: str = "emane_example" - mac_library: str = "rfpipemaclayer" - mac_xml: str = "/usr/share/emane/manifest/rfpipemaclayer.xml" - mac_defaults: Dict[str, str] = { - "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml" - } - mac_config: List[Configuration] = [] - phy_library: Optional[str] = None - phy_xml: str = "/usr/share/emane/manifest/emanephy.xml" - phy_defaults: Dict[str, str] = { - "subid": "1", "propagationmodel": "2ray", "noisemode": "none" - } - phy_config: List[Configuration] = [] - config_ignore: Set[str] = set() - - @classmethod - def load(cls, emane_prefix: Path) -> None: - """ - Called after being loaded within the EmaneManager. Provides configured - emane_prefix for parsing xml files. - - :param emane_prefix: configured emane prefix path - :return: nothing - """ - cls._load_platform_config(emane_prefix) - manifest_path = "share/emane/manifest" - # load mac configuration - mac_xml_path = emane_prefix / manifest_path / cls.mac_xml - cls.mac_config = emanemanifest.parse(mac_xml_path, cls.mac_defaults) - # load phy configuration - phy_xml_path = emane_prefix / manifest_path / cls.phy_xml - cls.phy_config = emanemanifest.parse(phy_xml_path, cls.phy_defaults) -``` ## Single PC with EMANE -This section describes running CORE and EMANE on a single machine. This is the -default mode of operation when building an EMANE network with CORE. The OTA -manager and Event service interface are set to use *ctrl0* and the virtual -nodes use the primary control channel for communicating with one another. The -primary control channel is automatically activated when a scenario involves -EMANE. Using the primary control channel prevents your emulation session from -sending multicast traffic on your local network and interfering with other -EMANE users. +This section describes running CORE and EMANE on a single machine. This is the default mode of operation when building an EMANE network with CORE. The OTA manager and Event service interface are set to use *ctrl0* and the virtual nodes use the primary control channel for communicating with one another. The primary control channel is automatically activated when a scenario involves EMANE. Using the primary control channel prevents your emulation session from sending multicast traffic on your local network and interfering with other EMANE users. -EMANE is configured through an EMANE node. Once a node is linked to an EMANE -cloud, the radio interface on that node may also be configured -separately (apart from the cloud.) +EMANE is configured through a WLAN node, because it is all about emulating wireless radio networks. Once a node is linked to a WLAN cloud configured with an EMANE model, the radio interface on that node may also be configured separately (apart from the cloud.) -Right click on an EMANE node and select EMANE Config to open the configuration dialog. -The EMANE models should be listed here for selection. (You may need to restart the -CORE daemon if it was running prior to installing the EMANE Python bindings.) +Double-click on a WLAN node to invoke the WLAN configuration dialog. Click the *EMANE* tab; when EMANE has been properly installed, EMANE wireless modules should be listed in the *EMANE Models* list. (You may need to restart the CORE daemon if it was running prior to installing the EMANE Python bindings.) Click on a model name to enable it. -When an EMANE model is selected, you can click on the models option button -causing the GUI to query the CORE daemon for configuration items. -Each model will have different parameters, refer to the -EMANE documentation for an explanation of each item. The defaults values are -presented in the dialog. Clicking *Apply* and *Apply* again will store the -EMANE model selections. +When an EMANE model is selected in the *EMANE Models* list, clicking on the *model options* button causes the GUI to query the CORE daemon for configuration items. Each model will have different parameters, refer to the EMANE documentation for an explanation of each item. The defaults values are presented in the dialog. Clicking *Apply* and *Apply* again will store the EMANE model selections. -The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports -geographic location information for determining pathloss between nodes. A -default latitude and longitude location is provided by CORE and this -location-based pathloss is enabled by default; this is the *pathloss mode* -setting for the Universal PHY. Moving a node on the canvas while the -emulation is running generates location events for EMANE. To view or change -the geographic location or scale of the canvas use the *Canvas Size and Scale* -dialog available from the *Canvas* menu. +The *EMANE options* button allows specifying some global parameters for EMANE, some of which are necessary for distributed operation. -Note that conversion between geographic and Cartesian coordinate systems is -done using UTM (Universal Transverse Mercator) projection, where different -zones of 6 degree longitude bands are defined. The location events generated -by CORE may become inaccurate near the zone boundaries for very large scenarios -that span multiple UTM zones. It is recommended that EMANE location scripts be -used to achieve geo-location accuracy in this situation. +The RF-PIPE and IEEE 802.11abg models use a Universal PHY that supports geographic location information for determining pathloss between nodes. A default latitude and longitude location is provided by CORE and this location-based pathloss is enabled by default; this is the *pathloss mode* setting for the Universal PHY. Moving a node on the canvas while the emulation is running generates location events for EMANE. To view or change the geographic location or scale of the canvas use the *Canvas Size and Scale* dialog available from the *Canvas* menu. -Clicking the green *Start* button launches the emulation and causes TAP devices -to be created in the virtual nodes that are linked to the EMANE WLAN. These -devices appear with interface names such as eth0, eth1, etc. The EMANE processes -should now be running in each namespace. +Note that conversion between geographic and Cartesian coordinate systems is done using UTM (Universal Transverse Mercator) projection, where different zones of 6 degree longitude bands are defined. The location events generated by CORE may become inaccurate near the zone boundaries for very large scenarios that span multiple UTM zones. It is recommended that EMANE location scripts be used to achieve geo-location accuracy in this situation. -To view the configuration generated by CORE, look in the */tmp/pycore.nnnnn/* session -directory to find the generated EMANE xml files. One easy way to view -this information is by double-clicking one of the virtual nodes and listing the files -in the shell. +Clicking the green *Start* button launches the emulation and causes TAP devices to be created in the virtual nodes that are linked to the EMANE WLAN. These devices appear with interface names such as eth0, eth1, etc. The EMANE processes should now be running in each namespace. For a four node scenario: -![](static/emane-single-pc.png) +```shell +ps -aef | grep emane +root 1063 969 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane4.log /tmp/pycore.59992/platform4.xml +root 1117 959 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane2.log /tmp/pycore.59992/platform2.xml +root 1179 942 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane1.log /tmp/pycore.59992/platform1.xml +root 1239 979 0 11:46 ? 00:00:00 emane -d --logl 3 -r -f /tmp/pycore.59992/emane5.log /tmp/pycore.59992/platform5.xml +``` + +The example above shows the EMANE processes started by CORE. To view the configuration generated by CORE, look in the */tmp/pycore.nnnnn/* session directory for a *platform.xml* file and other XML files. One easy way to view this information is by double-clicking one of the virtual nodes, and typing *cd ..* in the shell to go up to the session directory. + +![](static/single-pc-emane.png) ## Distributed EMANE -Running CORE and EMANE distributed among two or more emulation servers is -similar to running on a single machine. There are a few key configuration -items that need to be set in order to be successful, and those are outlined here. +Running CORE and EMANE distributed among two or more emulation servers is similar to running on a single machine. There are a few key configuration items that need to be set in order to be successful, and those are outlined here. -It is a good idea to maintain separate networks for data (OTA) and control. -The control network may be a shared laboratory network, for example, and you do -not want multicast traffic on the data network to interfere with other EMANE -users. Furthermore, control traffic could interfere with the OTA latency and -throughput and might affect emulation fidelity. The examples described here will -use *eth0* as a control interface and *eth1* as a data interface, although -using separate interfaces is not strictly required. Note that these interface -names refer to interfaces present on the host machine, not virtual interfaces -within a node. +It is a good idea to maintain separate networks for data (OTA) and control. The control network may be a shared laboratory network, for example, and you do not want multicast traffic on the data network to interfere with other EMANE users. Furthermore, control traffic could interfere with the OTA latency and thoughput and might affect emulation fidelity. The examples described here will use *eth0* as a control interface and *eth1* as a data interface, although using separate interfaces is not strictly required. Note that these interface names refer to interfaces present on the host machine, not virtual interfaces within a node. -**IMPORTANT: If an auxiliary control network is used, an interface on the host -has to be assigned to that network.** +**IMPORTANT: If an auxiliary control network is used, an interface on the host has to be assigned to that network.** -Each machine that will act as an emulation server needs to have CORE distributed -and EMANE installed. As well as be setup to work for CORE distributed mode. +Each machine that will act as an emulation server needs to have CORE and EMANE installed. -The IP addresses of the available servers are configured from the CORE -servers dialog box. The dialog shows available -servers, some or all of which may be assigned to nodes on the canvas. +The IP addresses of the available servers are configured from the CORE emulation servers dialog box (choose *Session* then *Emulation servers...*). This list of servers is stored in a *~/.core/servers.conf* file. The dialog shows available servers, some or all of which may be assigned to nodes on the canvas. -Nodes need to be assigned to servers and can be done so using the node -configuration dialog. When a node is not assigned to any emulation server, -it will be emulated locally. +Nodes need to be assigned to emulation servers. Select several nodes, right-click them, and choose *Assign to* and the name of the desired server. When a node is not assigned to any emulation server, it will be emulated locally. The local machine that the GUI connects with is considered the "master" machine, which in turn connects to the other emulation server "slaves". Public key SSH should be configured from the master to the slaves. -Using the EMANE node configuration dialog. You can change the EMANE model -being used, along with changing any configuration setting from their defaults. +Under the *EMANE* tab of the EMANE WLAN, click on the *EMANE options* button. This brings up the emane configuration dialog. The *enable OTA Manager channel* should be set to *on*. The *OTA Manager device* and *Event Service device* should be set to a control network device. For example, if you have a primary and auxiliary control network (i.e. controlnet and controlnet1), and you want the OTA traffic to have its dedicated network, set the OTA Manager device to *ctrl1* and the Event Service device to *ctrl0*. The EMANE models can be configured. Click *Apply* to save these settings. -![](static/emane-configuration.png) +![](static/distributed-emane-configuration.png) -!!! note +**HINT:** + Here is a quick checklist for distributed emulation with EMANE. - Here is a quick checklist for distributed emulation with EMANE. + 1. Follow the steps outlined for normal CORE. + 2. Under the *EMANE* tab of the EMANE WLAN, click on *EMANE options*. + 3. Turn on the *OTA Manager channel* and set the *OTA Manager device*. + Also set the *Event Service device*. + 4. Select groups of nodes, right-click them, and assign them to servers + using the *Assign to* menu. + 5. Synchronize your machine's clocks prior to starting the emulation, + using *ntp* or *ptp*. Some EMANE models are sensitive to timing. + 6. Press the *Start* button to launch the distributed emulation. -1. Follow the steps outlined for normal CORE. -2. Assign nodes to desired servers -3. Synchronize your machine's clocks prior to starting the emulation, - using *ntp* or *ptp*. Some EMANE models are sensitive to timing. -4. Press the *Start* button to launch the distributed emulation. -Now when the Start button is used to instantiate the emulation, the local CORE -daemon will connect to other emulation servers that have been assigned -to nodes. Each server will have its own session directory where the -*platform.xml* file and other EMANE XML files are generated. The NEM IDs are -automatically coordinated across servers so there is no overlap. +Now when the Start button is used to instantiate the emulation, the local CORE Python daemon will connect to other emulation servers that have been assigned to nodes. Each server will have its own session directory where the *platform.xml* file and other EMANE XML files are generated. The NEM IDs are automatically coordinated across servers so there is no overlap. Each server also gets its own Platform ID. -An Ethernet device is used for disseminating multicast EMANE events, as -specified in the *configure emane* dialog. EMANE's Event Service can be run -with mobility or pathloss scripts. -If CORE is not subscribed to location events, it will generate them as nodes -are moved on the canvas. +An Ethernet device is used for disseminating multicast EMANE events, as specified in the *configure emane* dialog. EMANE's Event Service can be run with mobility or pathloss scripts as described in :ref:`Single_PC_with_EMANE`. If CORE is not subscribed to location events, it will generate them as nodes are moved on the canvas. -Double-clicking on a node during runtime will cause the GUI to attempt to SSH -to the emulation server for that node and run an interactive shell. The public -key SSH configuration should be tested with all emulation servers prior to -starting the emulation. +Double-clicking on a node during runtime will cause the GUI to attempt to SSH to the emulation server for that node and run an interactive shell. The public key SSH configuration should be tested with all emulation servers prior to starting the emulation. + +![](static/distributed-emane-network.png) diff --git a/docs/emane/antenna.md b/docs/emane/antenna.md deleted file mode 100644 index 79c023ac..00000000 --- a/docs/emane/antenna.md +++ /dev/null @@ -1,450 +0,0 @@ -# EMANE Antenna Profiles - -## Overview - -Introduction to using the EMANE antenna profile in CORE, based on the example -EMANE Demo linked below. - -[EMANE Demo 6](https://github.com/adjacentlink/emane-tutorial/wiki/Demonstration-6) -for more specifics. - -## Demo Setup - -We will need to create some files in advance of starting this session. - -Create directory to place antenna profile files. - -```shell -mkdir /tmp/emane -``` - -Create `/tmp/emane/antennaprofile.xml` with the following contents. - -```xml - - - - - - - - - - -``` - -Create `/tmp/emane/antenna30dsector.xml` with the following contents. - -```xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -Create `/tmp/emane/blockageaft.xml` with the following contents. - -```xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -## Run Demo - -1. Select `Open...` within the GUI -1. Load `emane-demo-antenna.xml` -1. Click ![Start Button](../static/gui/start.png) -1. After startup completes, double click n1 to bring up the nodes terminal - -## Example Demo - -This demo will cover running an EMANE event service to feed in antenna, -location, and pathloss events to demonstrate how antenna profiles -can be used. - -### EMANE Event Dump - -On n1 lets dump EMANE events, so when we later run the EMANE event service -you can monitor when and what is sent. - -```shell -root@n1:/tmp/pycore.44917/n1.conf# emaneevent-dump -i ctrl0 -``` - -### Send EMANE Events - -On the host machine create the following to send EMANE events. - -!!! warning - - Make sure to set the `eventservicedevice` to the proper control - network value - -Create `eventservice.xml` with the following contents. - -```xml - - - - - - - -``` - -Create `eelgenerator.xml` with the following contents. - -```xml - - - - - - - - - - - -``` - -Create `scenario.eel` with the following contents. - -```shell -0.0 nem:1 antennaprofile 1,0.0,0.0 -0.0 nem:4 antennaprofile 2,0.0,0.0 -# -0.0 nem:1 pathloss nem:2,60 nem:3,60 nem:4,60 -0.0 nem:2 pathloss nem:3,60 nem:4,60 -0.0 nem:3 pathloss nem:4,60 -# -0.0 nem:1 location gps 40.025495,-74.315441,3.0 -0.0 nem:2 location gps 40.025495,-74.312501,3.0 -0.0 nem:3 location gps 40.023235,-74.315441,3.0 -0.0 nem:4 location gps 40.023235,-74.312501,3.0 -0.0 nem:4 velocity 180.0,0.0,10.0 -# -30.0 nem:1 velocity 20.0,0.0,10.0 -30.0 nem:1 orientation 0.0,0.0,10.0 -30.0 nem:1 antennaprofile 1,60.0,0.0 -30.0 nem:4 velocity 270.0,0.0,10.0 -# -60.0 nem:1 antennaprofile 1,105.0,0.0 -60.0 nem:4 antennaprofile 2,45.0,0.0 -# -90.0 nem:1 velocity 90.0,0.0,10.0 -90.0 nem:1 orientation 0.0,0.0,0.0 -90.0 nem:1 antennaprofile 1,45.0,0.0 -``` - -Run the EMANE event service, monitor what is output on n1 for events -dumped and see the link changes within the CORE GUI. - -```shell -emaneeventservice -l 3 eventservice.xml -``` - -### Stages - -The events sent will trigger 4 different states. - -* State 1 - * n2 and n3 see each other - * n4 and n3 are pointing away -* State 2 - * n2 and n3 see each other - * n1 and n2 see each other - * n4 and n3 see each other -* State 3 - * n2 and n3 see each other - * n4 and n3 are pointing at each other but blocked -* State 4 - * n2 and n3 see each other - * n4 and n3 see each other diff --git a/docs/emane/eel.md b/docs/emane/eel.md deleted file mode 100644 index c2dad86a..00000000 --- a/docs/emane/eel.md +++ /dev/null @@ -1,114 +0,0 @@ -# EMANE Emulation Event Log (EEL) Generator - -## Overview - -Introduction to using the EMANE event service and eel files to provide events. - -[EMANE Demo 1](https://github.com/adjacentlink/emane-tutorial/wiki/Demonstration-1) -for more specifics. - -## Run Demo - -1. Select `Open...` within the GUI -2. Load `emane-demo-eel.xml` -3. Click ![Start Button](../static/gui/start.png) -4. After startup completes, double click n1 to bring up the nodes terminal - -## Example Demo - -This demo will go over defining an EMANE event service and eel file to drive -an emane event service. - -### Viewing Events - -On n1 we will use the EMANE event dump utility to listen to events. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# emaneevent-dump -i ctrl0 -``` - -### Sending Events - -On the host machine we will create the following files and start the -EMANE event service targeting the control network. - -!!! warning - - Make sure to set the `eventservicedevice` to the proper control - network value - -Create `eventservice.xml` with the following contents. - -```xml - - - - - - - -``` - -Next we will create the `eelgenerator.xml` file. The EEL Generator is actually -a plugin that loads sentence parsing plugins. The sentence parsing plugins know -how to convert certain sentences, in this case commeffect, location, velocity, -orientation, pathloss and antennaprofile sentences, into their corresponding -emane event equivalents. - -* commeffect:eelloadercommeffect:delta -* location,velocity,orientation:eelloaderlocation:delta -* pathloss:eelloaderpathloss:delta -* antennaprofile:eelloaderantennaprofile:delta - -These configuration items tell the EEL Generator which sentences to map to -which plugin and whether to issue delta or full updates. - -Create `eelgenerator.xml` with the following contents. - -```xml - - - - - - - - - - - -``` - -Finally, create `scenario.eel` with the following contents. - -```shell -0.0 nem:1 pathloss nem:2,90.0 -0.0 nem:2 pathloss nem:1,90.0 -0.0 nem:1 location gps 40.031075,-74.523518,3.000000 -0.0 nem:2 location gps 40.031165,-74.523412,3.000000 -``` - -Start the EMANE event service using the files created above. - -```shell -emaneeventservice eventservice.xml -l 3 -``` - -### Sent Events - -If we go back to look at our original terminal we will see the events logged -out to the terminal. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# emaneevent-dump -i ctrl0 -[1601858142.917224] nem: 0 event: 100 len: 66 seq: 1 [Location] - UUID: 0af267be-17d3-4103-9f76-6f697e13bcec - (1, {'latitude': 40.031075, 'altitude': 3.0, 'longitude': -74.823518}) - (2, {'latitude': 40.031165, 'altitude': 3.0, 'longitude': -74.523412}) -[1601858142.917466] nem: 1 event: 101 len: 14 seq: 2 [Pathloss] - UUID: 0af267be-17d3-4103-9f76-6f697e13bcec - (2, {'forward': 90.0, 'reverse': 90.0}) -[1601858142.917889] nem: 2 event: 101 len: 14 seq: 3 [Pathloss] - UUID: 0af267be-17d3-4103-9f76-6f697e13bcec - (1, {'forward': 90.0, 'reverse': 90.0}) -``` diff --git a/docs/emane/files.md b/docs/emane/files.md deleted file mode 100644 index c04b0f6b..00000000 --- a/docs/emane/files.md +++ /dev/null @@ -1,167 +0,0 @@ -# EMANE XML Files - -## Overview - -Introduction to the XML files generated by CORE used to drive EMANE for -a given node. - -[EMANE Demo 0](https://github.com/adjacentlink/emane-tutorial/wiki/Demonstration-0) -may provide more helpful details. - -## Run Demo - -1. Select `Open...` within the GUI -2. Load `emane-demo-files.xml` -3. Click ![Start Button](../static/gui/start.png) -4. After startup completes, double click n1 to bring up the nodes terminal - -## Example Demo - -We will take a look at the files generated in the example demo provided. In this -case we are running the RF Pipe model. - -### Generated Files - -| Name | Description | -|-------------------------------------|------------------------------------------------------| -| \-platform.xml | configuration file for the emulator instances | -| \-nem.xml | configuration for creating a NEM | -| \-mac.xml | configuration for defining a NEMs MAC layer | -| \-phy.xml | configuration for defining a NEMs PHY layer | -| \-trans-virtual.xml | configuration when a virtual transport is being used | -| \-trans.xml | configuration when a raw transport is being used | - -### Listing File - -Below are the files within n1 after starting the demo session. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# ls -eth0-mac.xml eth0-trans-virtual.xml n1-platform.xml var.log -eth0-nem.xml ipforward.sh quaggaboot.sh var.run -eth0-phy.xml n1-emane.log usr.local.etc.quagga var.run.quagga -``` - -### Platform XML - -The root configuration file used to run EMANE for a node is the platform xml file. -In this demo we are looking at `n1-platform.xml`. - -* lists all configuration values set for the platform -* The unique nem id given for each interface that EMANE will create for this node -* The path to the file(s) used for definition for a given nem - -```shell -root@n1:/tmp/pycore.46777/n1.conf# cat n1-platform.xml - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -### NEM XML - -The nem definition will contain reference to the transport, mac, and phy xml -definitions being used for a given nem. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# cat eth0-nem.xml - - - - - - - -``` - -### MAC XML - -MAC layer configuration settings would be found in this file. CORE will write -out all values, even if the value is a default value. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# cat eth0-mac.xml - - - - - - - - - - - - - - -``` - -### PHY XML - -PHY layer configuration settings would be found in this file. CORE will write -out all values, even if the value is a default value. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# cat eth0-phy.xml - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -### Transport XML - -```shell -root@n1:/tmp/pycore.46777/n1.conf# cat eth0-trans-virtual.xml - - - - - - -``` diff --git a/docs/emane/gpsd.md b/docs/emane/gpsd.md deleted file mode 100644 index eadf8af2..00000000 --- a/docs/emane/gpsd.md +++ /dev/null @@ -1,117 +0,0 @@ -# EMANE GPSD Integration - -## Overview - -Introduction to integrating gpsd in CORE with EMANE. - -[EMANE Demo 0](https://github.com/adjacentlink/emane-tutorial/wiki/Demonstration-0) -may provide more helpful details. - -!!! warning - - Requires installation of [gpsd](https://gpsd.gitlab.io/gpsd/index.html) - -## Run Demo - -1. Select `Open...` within the GUI -2. Load `emane-demo-gpsd.xml` -3. Click ![Start Button](../static/gui/start.png) -4. After startup completes, double click n1 to bring up the nodes terminal - -## Example Demo - -This section will cover how to run a gpsd location agent within EMANE, that will -write out locations to a pseudo terminal file. That file can be read in by the -gpsd server and make EMANE location events available to gpsd clients. - -### EMANE GPSD Event Daemon - -First create an `eventdaemon.xml` file on n1 with the following contents. - -```xml - - - - - - - -``` - -Then create the `gpsdlocationagent.xml` file on n1 with the following contents. - -```xml - - - - - -``` - -Start the EMANE event agent. This will facilitate feeding location events -out to a pseudo terminal file defined above. - -```shell -emaneeventd eventdaemon.xml -r -d -l 3 -f emaneeventd.log -``` - -Start gpsd, reading in the pseudo terminal file. - -```shell -gpsd -G -n -b $(cat gps.pty) -``` - -### EMANE EEL Event Daemon - -EEL Events will be played out from the actual host machine over the designated -control network interface. Create the following files in the same directory -somewhere on your host. - -!!! note - - Make sure the below eventservicedevice matches the control network - device being used on the host for EMANE - -Create `eventservice.xml` on the host machine with the following contents. - -```xml - - - - - - - -``` - -Create `eelgenerator.xml` on the host machine with the following contents. - -```xml - - - - - - - - - - - -``` - -Create `scenario.eel` file with the following contents. - -```shell -0.0 nem:1 location gps 40.031075,-74.523518,3.000000 -0.0 nem:2 location gps 40.031165,-74.523412,3.000000 -``` - -Start the EEL event service, which will send the events defined in the file above -over the control network to all EMANE nodes. These location events will be received -and provided to gpsd. This allows gpsd client to connect to and get gps locations. - -```shell -emaneeventservice eventservice.xml -l 3 -``` - diff --git a/docs/emane/precomputed.md b/docs/emane/precomputed.md deleted file mode 100644 index 4d0234ae..00000000 --- a/docs/emane/precomputed.md +++ /dev/null @@ -1,91 +0,0 @@ -# EMANE Procomputed - -## Overview - -Introduction to using the precomputed propagation model. - -[EMANE Demo 1](https://github.com/adjacentlink/emane-tutorial/wiki/Demonstration-1) -for more specifics. - -## Run Demo - -1. Select `Open...` within the GUI -2. Load `emane-demo-precomputed.xml` -3. Click ![Start Button](../static/gui/start.png) -4. After startup completes, double click n1 to bring up the nodes terminal - -## Example Demo - -This demo is using the RF Pipe model with the propagation model set to -precomputed. - -### Failed Pings - -Due to using precomputed and having not sent any pathloss events, the nodes -cannot ping each other yet. - -Open a terminal on n1. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# ping 10.0.0.2 -connect: Network is unreachable -``` - -### EMANE Shell - -You can leverage `emanesh` to investigate why packets are being dropped. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# emanesh localhost get table nems phy BroadcastPacketDropTable0 UnicastPacketDropTable0 -nem 1 phy BroadcastPacketDropTable0 -| NEM | Out-of-Band | Rx Sensitivity | Propagation Model | Gain Location | Gain Horizon | Gain Profile | Not FOI | Spectrum Clamp | Fade Location | Fade Algorithm | Fade Select | -| 2 | 0 | 0 | 169 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | - -nem 1 phy UnicastPacketDropTable0 -| NEM | Out-of-Band | Rx Sensitivity | Propagation Model | Gain Location | Gain Horizon | Gain Profile | Not FOI | Spectrum Clamp | Fade Location | Fade Algorithm | Fade Select | -``` - -In the example above we can see that the reason packets are being dropped is due to -the propogation model and that is because we have not issued any pathloss events. -You can run another command to validate if you have received any pathloss events. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# emanesh localhost get table nems phy PathlossEventInfoTable -nem 1 phy PathlossEventInfoTable -| NEM | Forward Pathloss | Reverse Pathloss | -``` - -### Pathloss Events - -On the host we will send pathloss events from all nems to all other nems. - -!!! note - - Make sure properly specify the right control network device - -```shell -emaneevent-pathloss 1:2 90 -i -``` - -Now if we check for pathloss events on n2 we will see what was just sent above. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# emanesh localhost get table nems phy PathlossEventInfoTable -nem 1 phy PathlossEventInfoTable -| NEM | Forward Pathloss | Reverse Pathloss | -| 2 | 90.0 | 90.0 -``` - -You should also now be able to ping n1 from n2. - -```shell -root@n1:/tmp/pycore.46777/n1.conf# ping -c 3 10.0.0.2 -PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data. -64 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=3.06 ms -64 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=2.12 ms -64 bytes from 10.0.0.2: icmp_seq=3 ttl=64 time=1.99 ms - ---- 10.0.0.2 ping statistics --- -3 packets transmitted, 3 received, 0% packet loss, time 2001ms -rtt min/avg/max/mdev = 1.991/2.393/3.062/0.479 ms -``` diff --git a/docs/examplemodel.html b/docs/examplemodel.html new file mode 100644 index 00000000..35613c45 --- /dev/null +++ b/docs/examplemodel.html @@ -0,0 +1,239 @@ + + + + + examplemodel.py + + + +
    +
    +
    +

    examplemodel.py

    +
    +
    +
    +
    +
    + # +
    + +
    +
    +
    from core.emane import emanemanifest
    +from core.emane import emanemodel
    +
    +
    +
    +
    +
    +
    + # +
    +

    Custom EMANE Model

    +
    +
    +
    class ExampleModel(emanemodel.EmaneModel):
    +
    +
    +
    +
    +
    +
    + # +
    +

    MAC Definition

    +
    +
    +
    +
    +
    +
    +
    +
    +
    + # +
    +

    Defines the emane model name that will show up in the GUI.

    +
    +
    +
        name = "emane_example"
    +
    +
    +
    +
    +
    +
    + # +
    +

    Defines that mac library that the model will reference.

    +
    +
    +
        mac_library = "rfpipemaclayer"
    +
    +
    +
    +
    +
    +
    + # +
    +

    Defines the mac manifest file that will be parsed to obtain configuration options, that will be displayed +within the GUI.

    +
    +
    +
        mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml"
    +
    +
    +
    +
    +
    +
    + # +
    +

    Allows you to override options that are maintained within the manifest file above.

    +
    +
    +
        mac_defaults = {
    +        "pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml",
    +    }
    +
    +
    +
    +
    +
    +
    + # +
    +

    Parses the manifest file and converts configurations into core supported formats.

    +
    +
    +
        mac_config = emanemanifest.parse(mac_xml, mac_defaults)
    +
    +
    +
    +
    +
    +
    + # +
    +

    PHY Definition

    +

    NOTE: phy configuration will default to the universal model as seen below and the below section does not +have to be included.

    +
    +
    +
    +
    +
    +
    +
    +
    +
    + # +
    +

    Defines that phy library that the model will reference, used if you need to provide a custom phy.

    +
    +
    +
        phy_library = None
    +
    +
    +
    +
    +
    +
    + # +
    +

    Defines the phy manifest file that will be parsed to obtain configuration options, that will be displayed +within the GUI.

    +
    +
    +
        phy_xml = "/usr/share/emane/manifest/emanephy.xml"
    +
    +
    +
    +
    +
    +
    + # +
    +

    Allows you to override options that are maintained within the manifest file above or for the default universal +model.

    +
    +
    +
        phy_defaults = {
    +        "subid": "1",
    +        "propagationmodel": "2ray",
    +        "noisemode": "none"
    +    }
    +
    +
    +
    +
    +
    +
    + # +
    +

    Parses the manifest file and converts configurations into core supported formats.

    +
    +
    +
        phy_config = emanemanifest.parse(phy_xml, phy_defaults)
    +
    +
    +
    +
    +
    +
    + # +
    +

    Custom override options

    +

    NOTE: these options default to what's seen below and do not have to be included.

    +
    +
    +
    +
    +
    +
    +
    +
    +
    + # +
    +

    Allows you to ignore options within phy/mac, used typically if you needed to add a custom option for display +within the gui.

    +
    +
    +
        config_ignore = set()
    +
    +
    +
    +
    +
    +
    + # +
    +

    Allows you to override how options are displayed with the GUI, using the GUI format of +"name:1-2|othername:3-4". This will be parsed into tabs, split by "|" and account for items based on the indexed +numbers after ":" for including values in each tab.

    +
    +
    +
        config_groups_override = None
    +
    +
    +
    +
    +
    +
    + # +
    +

    Allows you to override the default config matrix list. This value by default is the mac_config + phy_config, in +that order.

    +
    +
    +
        config_matrix_override = None
    +
    +
    +
    +
    +
    +
    + diff --git a/docs/exampleservice.html b/docs/exampleservice.html new file mode 100644 index 00000000..cddb18d4 --- /dev/null +++ b/docs/exampleservice.html @@ -0,0 +1,344 @@ + + + + + sample.py + + + +
    +
    +
    +

    sample.py

    +
    +
    +
    +
    +
    + # +
    +

    Sample user-defined service.

    +
    +
    +
    from core.service import CoreService
    +from core.service import ServiceMode
    +
    +
    +
    +
    +
    +
    + # +
    +

    Custom CORE Service

    +
    +
    +
    class MyService(CoreService):
    +
    +
    +
    +
    +
    +
    + # +
    +

    Service Attributes

    +
    +
    +
    +
    +
    +
    +
    +
    +
    + # +
    +

    Name used as a unique ID for this service and is required, no spaces.

    +
    +
    +
        name = "MyService"
    +
    +
    +
    +
    +
    +
    + # +
    +

    Allows you to group services within the GUI under a common name.

    +
    +
    +
        group = "Utility"
    +
    +
    +
    +
    +
    +
    + # +
    +

    Executables this service depends on to function, if executable is not on the path, service will not be loaded.

    +
    +
    +
        executables = ()
    +
    +
    +
    +
    +
    +
    + # +
    +

    Services that this service depends on for startup, tuple of service names.

    +
    +
    +
        dependencies = ()
    +
    +
    +
    +
    +
    +
    + # +
    +

    Directories that this service will create within a node.

    +
    +
    +
        dirs = ()
    +
    +
    +
    +
    +
    +
    + # +
    +

    Files that this service will generate, without a full path this file goes in the node’s directory. +e.g. /tmp/pycore.12345/n1.conf/myfile

    +
    +
    +
        configs = ("myservice1.sh", "myservice2.sh")
    +
    +
    +
    +
    +
    +
    + # +
    +

    Commands used to start this service, any non-zero exit code will cause a failure.

    +
    +
    +
        startup = ("sh %s" % configs[0], "sh %s" % configs[1])
    +
    +
    +
    +
    +
    +
    + # +
    +

    Commands used to validate that a service was started, any non-zero exit code will cause a failure.

    +
    +
    +
        validate = ()
    +
    +
    +
    +
    +
    +
    + # +
    +

    Validation mode, used to determine startup success.

    +
      +
    • NON_BLOCKING - runs startup commands, and validates success with validation commands
    • +
    • BLOCKING - runs startup commands, and validates success with the startup commands themselves
    • +
    • TIMER - runs startup commands, and validates success by waiting for “validation_timer” alone
    • +
    +
    +
    +
        validation_mode = ServiceMode.NON_BLOCKING
    +
    +
    +
    +
    +
    +
    + # +
    +

    Time in seconds for a service to wait for validation, before determining success in TIMER/NON_BLOCKING modes.

    +
    +
    +
        validation_timer = 5
    +
    +
    +
    +
    +
    +
    + # +
    +

    Period in seconds to wait before retrying validation, only used in NON_BLOCKING mode.

    +
    +
    +
        validation_period = 0.5
    +
    +
    +
    +
    +
    +
    + # +
    +

    Shutdown commands to stop this service.

    +
    +
    +
        shutdown = ()
    +
    +
    +
    +
    +
    +
    + # +
    +

    On Load

    +
    +
    +
        @classmethod
    +    def on_load(cls):
    +
    +
    +
    +
    +
    +
    + # +
    +

    Provides a way to run some arbitrary logic when the service is loaded, possibly to help facilitate +dynamic settings for the environment.

    +
    +
    +
            pass
    +
    +
    +
    +
    +
    +
    + # +
    +

    Get Configs

    +
    +
    +
        @classmethod
    +    def get_configs(cls, node):
    +
    +
    +
    +
    +
    +
    + # +
    +

    Provides a way to dynamically generate the config files from the node a service will run. +Defaults to the class definition and can be left out entirely if not needed.

    +
    +
    +
            return cls.configs
    +
    +
    +
    +
    +
    +
    + # +
    +

    Generate Config

    +
    +
    +
        @classmethod
    +    def generate_config(cls, node, filename):
    +
    +
    +
    +
    +
    +
    + # +
    +

    Returns a string representation for a file, given the node the service is starting on the config filename +that this information will be used for. This must be defined, if “configs” are defined.

    +
    +
    +
            cfg = "#!/bin/sh\n"
    +
    +        if filename == cls.configs[0]:
    +            cfg += "# auto-generated by MyService (sample.py)\n"
    +            for ifc in node.netifs():
    +                cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
    +        elif filename == cls.configs[1]:
    +            cfg += "echo hello"
    +
    +        return cfg
    +
    +
    +
    +
    +
    +
    + # +
    +

    Get Startup

    +
    +
    +
        @classmethod
    +    def get_startup(cls, node):
    +
    +
    +
    +
    +
    +
    + # +
    +

    Provides a way to dynamically generate the startup commands from the node a service will run. +Defaults to the class definition and can be left out entirely if not needed.

    +
    +
    +
            return cls.startup
    +
    +
    +
    +
    +
    +
    + # +
    +

    Get Validate

    +
    +
    +
        @classmethod
    +    def get_validate(cls, node):
    +
    +
    +
    +
    +
    +
    + # +
    +

    Provides a way to dynamically generate the validate commands from the node a service will run. +Defaults to the class definition and can be left out entirely if not needed.

    +
    +
    +
            return cls.validate
    +
    +
    +
    +
    +
    +
    + diff --git a/docs/grpc.md b/docs/grpc.md deleted file mode 100644 index 3266a57d..00000000 --- a/docs/grpc.md +++ /dev/null @@ -1,411 +0,0 @@ -* Table of Contents - -## Overview - -[gRPC](https://grpc.io/) is a client/server API for interfacing with CORE -and used by the python GUI for driving all functionality. It is dependent -on having a running `core-daemon` instance to be leveraged. - -A python client can be created from the raw generated grpc files included -with CORE or one can leverage a provided gRPC client that helps encapsulate -some functionality to try and help make things easier. - -## Python Client - -A python client wrapper is provided at -[CoreGrpcClient](https://github.com/coreemu/core/blob/master/daemon/core/api/grpc/client.py) -to help provide some conveniences when using the API. - -### Client HTTP Proxy - -Since gRPC is HTTP2 based, proxy configurations can cause issues. By default, -the client disables proxy support to avoid issues when a proxy is present. -You can enable and properly account for this issue when needed. - -## Proto Files - -Proto files are used to define the API and protobuf messages that are used for -interfaces with this API. - -They can be found -[here](https://github.com/coreemu/core/tree/master/daemon/proto/core/api/grpc) -to see the specifics of -what is going on and response message values that would be returned. - -## Examples - -### Node Models - -When creating nodes of type `NodeType.DEFAULT` these are the default models -and the services they map to. - -* mdr - * zebra, OSPFv3MDR, IPForward -* PC - * DefaultRoute -* router - * zebra, OSPFv2, OSPFv3, IPForward -* host - * DefaultRoute, SSH - -### Interface Helper - -There is an interface helper class that can be leveraged for convenience -when creating interface data for nodes. Alternatively one can manually create -a `core.api.grpc.wrappers.Interface` class instead with appropriate information. - -Manually creating gRPC client interface: - -```python -from core.api.grpc.wrappers import Interface - -# id is optional and will set to the next available id -# name is optional and will default to eth -# mac is optional and will result in a randomly generated mac -iface = Interface( - id=0, - name="eth0", - ip4="10.0.0.1", - ip4_mask=24, - ip6="2001::", - ip6_mask=64, -) -``` - -Leveraging the interface helper class: - -```python -from core.api.grpc import client - -iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") -# node_id is used to get an ip4/ip6 address indexed from within the above prefixes -# iface_id is required and used exactly for that -# name is optional and would default to eth -# mac is optional and will result in a randomly generated mac -iface_data = iface_helper.create_iface( - node_id=1, iface_id=0, name="eth0", mac="00:00:00:00:aa:00" -) -``` - -### Listening to Events - -Various events that can occur within a session can be listened to. - -Event types: - -* session - events for changes in session state and mobility start/stop/pause -* node - events for node movements and icon changes -* link - events for link configuration changes and wireless link add/delete -* config - configuration events when legacy gui joins a session -* exception - alert/error events -* file - file events when the legacy gui joins a session - -```python -from core.api.grpc import client -from core.api.grpc.wrappers import EventType - - -def event_listener(event): - print(event) - - -# create grpc client and connect -core = client.CoreGrpcClient() -core.connect() - -# add session -session = core.create_session() - -# provide no events to listen to all events -core.events(session.id, event_listener) - -# provide events to listen to specific events -core.events(session.id, event_listener, [EventType.NODE]) -``` - -### Configuring Links - -Links can be configured at the time of creation or during runtime. - -Currently supported configuration options: - -* bandwidth (bps) -* delay (us) -* duplicate (%) -* jitter (us) -* loss (%) - -```python -from core.api.grpc import client -from core.api.grpc.wrappers import LinkOptions, Position - -# interface helper -iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") - -# create grpc client and connect -core = client.CoreGrpcClient() -core.connect() - -# add session -session = core.create_session() - -# create nodes -position = Position(x=100, y=100) -node1 = session.add_node(1, position=position) -position = Position(x=300, y=100) -node2 = session.add_node(2, position=position) - -# configuring when creating a link -options = LinkOptions( - bandwidth=54_000_000, - delay=5000, - dup=5, - loss=5.5, - jitter=0, -) -iface1 = iface_helper.create_iface(node1.id, 0) -iface2 = iface_helper.create_iface(node2.id, 0) -link = session.add_link(node1=node1, node2=node2, iface1=iface1, iface2=iface2) - -# configuring during runtime -link.options.loss = 10.0 -core.edit_link(session.id, link) -``` - -### Peer to Peer Example - -```python -# required imports -from core.api.grpc import client -from core.api.grpc.wrappers import Position - -# interface helper -iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") - -# create grpc client and connect -core = client.CoreGrpcClient() -core.connect() - -# add session -session = core.create_session() - -# create nodes -position = Position(x=100, y=100) -node1 = session.add_node(1, position=position) -position = Position(x=300, y=100) -node2 = session.add_node(2, position=position) - -# create link -iface1 = iface_helper.create_iface(node1.id, 0) -iface2 = iface_helper.create_iface(node2.id, 0) -session.add_link(node1=node1, node2=node2, iface1=iface1, iface2=iface2) - -# start session -core.start_session(session) -``` - -### Switch/Hub Example - -```python -# required imports -from core.api.grpc import client -from core.api.grpc.wrappers import NodeType, Position - -# interface helper -iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") - -# create grpc client and connect -core = client.CoreGrpcClient() -core.connect() - -# add session -session = core.create_session() - -# create nodes -position = Position(x=200, y=200) -switch = session.add_node(1, _type=NodeType.SWITCH, position=position) -position = Position(x=100, y=100) -node1 = session.add_node(2, position=position) -position = Position(x=300, y=100) -node2 = session.add_node(3, position=position) - -# create links -iface1 = iface_helper.create_iface(node1.id, 0) -session.add_link(node1=node1, node2=switch, iface1=iface1) -iface1 = iface_helper.create_iface(node2.id, 0) -session.add_link(node1=node2, node2=switch, iface1=iface1) - -# start session -core.start_session(session) -``` - -### WLAN Example - -```python -# required imports -from core.api.grpc import client -from core.api.grpc.wrappers import NodeType, Position - -# interface helper -iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") - -# create grpc client and connect -core = client.CoreGrpcClient() -core.connect() - -# add session -session = core.create_session() - -# create nodes -position = Position(x=200, y=200) -wlan = session.add_node(1, _type=NodeType.WIRELESS_LAN, position=position) -position = Position(x=100, y=100) -node1 = session.add_node(2, model="mdr", position=position) -position = Position(x=300, y=100) -node2 = session.add_node(3, model="mdr", position=position) - -# create links -iface1 = iface_helper.create_iface(node1.id, 0) -session.add_link(node1=node1, node2=wlan, iface1=iface1) -iface1 = iface_helper.create_iface(node2.id, 0) -session.add_link(node1=node2, node2=wlan, iface1=iface1) - -# set wlan config using a dict mapping currently -# support values as strings -wlan.set_wlan( - { - "range": "280", - "bandwidth": "55000000", - "delay": "6000", - "jitter": "5", - "error": "5", - } -) - -# start session -core.start_session(session) -``` - -### EMANE Example - -For EMANE you can import and use one of the existing models and -use its name for configuration. - -Current models: - -* core.emane.ieee80211abg.EmaneIeee80211abgModel -* core.emane.rfpipe.EmaneRfPipeModel -* core.emane.tdma.EmaneTdmaModel -* core.emane.bypass.EmaneBypassModel - -Their configurations options are driven dynamically from parsed EMANE manifest files -from the installed version of EMANE. - -Options and their purpose can be found at the [EMANE Wiki](https://github.com/adjacentlink/emane/wiki). - -If configuring EMANE global settings or model mac/phy specific settings, any value not provided -will use the defaults. When no configuration is used, the defaults are used. - -```python -# required imports -from core.api.grpc import client -from core.api.grpc.wrappers import NodeType, Position -from core.emane.models.ieee80211abg import EmaneIeee80211abgModel - -# interface helper -iface_helper = client.InterfaceHelper(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") - -# create grpc client and connect -core = client.CoreGrpcClient() -core.connect() - -# add session -session = core.create_session() - -# create nodes -position = Position(x=200, y=200) -emane = session.add_node( - 1, _type=NodeType.EMANE, position=position, emane=EmaneIeee80211abgModel.name -) -position = Position(x=100, y=100) -node1 = session.add_node(2, model="mdr", position=position) -position = Position(x=300, y=100) -node2 = session.add_node(3, model="mdr", position=position) - -# create links -iface1 = iface_helper.create_iface(node1.id, 0) -session.add_link(node1=node1, node2=emane, iface1=iface1) -iface1 = iface_helper.create_iface(node2.id, 0) -session.add_link(node1=node2, node2=emane, iface1=iface1) - -# setting emane specific emane model configuration -emane.set_emane_model(EmaneIeee80211abgModel.name, { - "eventservicettl": "2", - "unicastrate": "3", -}) - -# start session -core.start_session(session) -``` - -EMANE Model Configuration: - -```python -# emane network specific config, set on an emane node -# this setting applies to all nodes connected -emane.set_emane_model(EmaneIeee80211abgModel.name, {"unicastrate": "3"}) - -# node specific config for an individual node connected to an emane network -node.set_emane_model(EmaneIeee80211abgModel.name, {"unicastrate": "3"}) - -# node interface specific config for an individual node connected to an emane network -node.set_emane_model(EmaneIeee80211abgModel.name, {"unicastrate": "3"}, iface_id=0) -``` - -## Configuring a Service - -Services help generate and run bash scripts on nodes for a given purpose. - -Configuring the files of a service results in a specific hard coded script being -generated, instead of the default scripts, that may leverage dynamic generation. - -The following features can be configured for a service: - -* files - files that will be generated -* directories - directories that will be mounted unique to the node -* startup - commands to run start a service -* validate - commands to run to validate a service -* shutdown - commands to run to stop a service - -Editing service properties: - -```python -# configure a service, for a node, for a given session -node.service_configs[service_name] = NodeServiceData( - configs=["file1.sh", "file2.sh"], - directories=["/etc/node"], - startup=["bash file1.sh"], - validate=[], - shutdown=[], -) -``` - -When editing a service file, it must be the name of `config` -file that the service will generate. - -Editing a service file: - -```python -# to edit the contents of a generated file you can specify -# the service, the file name, and its contents -file_configs = node.service_file_configs.setdefault(service_name, {}) -file_configs[file_name] = "echo hello world" -``` - -## File Examples - -File versions of the network examples can be found -[here](https://github.com/coreemu/core/tree/master/package/examples/grpc). -These examples will create a session using the gRPC API when the core-daemon is running. - -You can then switch to and attach to these sessions using either of the CORE GUIs. diff --git a/docs/gui.md b/docs/gui.md deleted file mode 100644 index c296ac18..00000000 --- a/docs/gui.md +++ /dev/null @@ -1,497 +0,0 @@ -# CORE GUI - -![](static/core-gui.png) - -## Overview - -The GUI is used to draw nodes and network devices on a canvas, linking them -together to create an emulated network session. - -After pressing the start button, CORE will proceed through these phases, -staying in the **runtime** phase. After the session is stopped, CORE will -proceed to the **data collection** phase before tearing down the emulated -state. - -CORE can be customized to perform any action at each state. See the -**Hooks...** entry on the [Session Menu](#session-menu) for details about -when these session states are reached. - -## Prerequisites - -Beyond installing CORE, you must have the CORE daemon running. This is done -on the command line with either systemd or sysv. - -```shell -# systemd service -sudo systemctl daemon-reload -sudo systemctl start core-daemon - -# direct invocation -sudo core-daemon -``` - -## GUI Files - -The GUI will create a directory in your home directory on first run called -~/.coregui. This directory will help layout various files that the GUI may use. - -* .coregui/ - * backgrounds/ - * place backgrounds used for display in the GUI - * custom_emane/ - * place to keep custom emane models to use with the core-daemon - * custom_services/ - * place to keep custom services to use with the core-daemon - * icons/ - * icons the GUI uses along with customs icons desired - * mobility/ - * place to keep custom mobility files - * scripts/ - * place to keep core related scripts - * xmls/ - * place to keep saved session xml files - * gui.log - * log file when running the gui, look here when issues occur for exceptions etc - * config.yaml - * configuration file used to save/load various gui related settings (custom nodes, layouts, addresses, etc) - -## Modes of Operation - -The CORE GUI has two primary modes of operation, **Edit** and **Execute** -modes. Running the GUI, by typing **core-gui** with no options, starts in -Edit mode. Nodes are drawn on a blank canvas using the toolbar on the left -and configured from right-click menus or by double-clicking them. The GUI -does not need to be run as root. - -Once editing is complete, pressing the green **Start** button instantiates -the topology and enters Execute mode. In execute mode, -the user can interact with the running emulated machines by double-clicking or -right-clicking on them. The editing toolbar disappears and is replaced by an -execute toolbar, which provides tools while running the emulation. Pressing -the red **Stop** button will destroy the running emulation and return CORE -to Edit mode. - -Once the emulation is running, the GUI can be closed, and a prompt will appear -asking if the emulation should be terminated. The emulation may be left -running and the GUI can reconnect to an existing session at a later time. - -The GUI can be run as a normal user on Linux. - -The GUI currently provides the following options on startup. - -```shell -usage: core-gui [-h] [-l {DEBUG,INFO,WARNING,ERROR,CRITICAL}] [-p] - [-s SESSION] [--create-dir] - -CORE Python GUI - -optional arguments: - -h, --help show this help message and exit - -l {DEBUG,INFO,WARNING,ERROR,CRITICAL}, --level {DEBUG,INFO,WARNING,ERROR,CRITICAL} - logging level - -p, --proxy enable proxy - -s SESSION, --session SESSION - session id to join - --create-dir create gui directory and exit -``` - -## Toolbar - -The toolbar is a row of buttons that runs vertically along the left side of the -CORE GUI window. The toolbar changes depending on the mode of operation. - -### Editing Toolbar - -When CORE is in Edit mode (the default), the vertical Editing Toolbar exists on -the left side of the CORE window. Below are brief descriptions for each toolbar -item, starting from the top. Most of the tools are grouped into related -sub-menus, which appear when you click on their group icon. - -| Icon | Name | Description | -|----------------------------|----------------|----------------------------------------------------------------------------------------| -| ![](static/gui/select.png) | Selection Tool | Tool for selecting, moving, configuring nodes. | -| ![](static/gui/start.png) | Start Button | Starts Execute mode, instantiates the emulation. | -| ![](static/gui/link.png) | Link | Allows network links to be drawn between two nodes by clicking and dragging the mouse. | - -### CORE Nodes - -These nodes will create a new node container and run associated services. - -| Icon | Name | Description | -|----------------------------|---------|------------------------------------------------------------------------------| -| ![](static/gui/router.png) | Router | Runs Quagga OSPFv2 and OSPFv3 routing to forward packets. | -| ![](static/gui/host.png) | Host | Emulated server machine having a default route, runs SSH server. | -| ![](static/gui/pc.png) | PC | Basic emulated machine having a default route, runs no processes by default. | -| ![](static/gui/mdr.png) | MDR | Runs Quagga OSPFv3 MDR routing for MANET-optimized routing. | -| ![](static/gui/router.png) | PRouter | Physical router represents a real testbed machine. | - -### Network Nodes - -These nodes are mostly used to create a Linux bridge that serves the -purpose described below. - -| Icon | Name | Description | -|-------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ![](static/gui/hub.png) | Hub | Ethernet hub forwards incoming packets to every connected node. | -| ![](static/gui/lanswitch.png) | Switch | Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table. | -| ![](static/gui/wlan.png) | Wireless LAN | When routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them. | -| ![](static/gui/rj45.png) | RJ45 | RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation. | -| ![](static/gui/tunnel.png) | Tunnel | Tool allows connecting together more than one CORE emulation using GRE tunnels. | - -### Annotation Tools - -| Icon | Name | Description | -|-------------------------------|-----------|---------------------------------------------------------------------| -| ![](static/gui/marker.png) | Marker | For drawing marks on the canvas. | -| ![](static/gui/oval.png) | Oval | For drawing circles on the canvas that appear in the background. | -| ![](static/gui/rectangle.png) | Rectangle | For drawing rectangles on the canvas that appear in the background. | -| ![](static/gui/text.png) | Text | For placing text captions on the canvas. | - -### Execution Toolbar - -When the Start button is pressed, CORE switches to Execute mode, and the Edit -toolbar on the left of the CORE window is replaced with the Execution toolbar -Below are the items on this toolbar, starting from the top. - -| Icon | Name | Description | -|----------------------------|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ![](static/gui/stop.png) | Stop Button | Stops Execute mode, terminates the emulation, returns CORE to edit mode. | -| ![](static/gui/select.png) | Selection Tool | In Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node. | -| ![](static/gui/marker.png) | Marker | For drawing freehand lines on the canvas, useful during demonstrations; markings are not saved. | -| ![](static/gui/run.png) | Run Tool | This tool allows easily running a command on all or a subset of all nodes. A list box allows selecting any of the nodes. A text entry box allows entering any command. The command should return immediately, otherwise the display will block awaiting response. The *ping* command, for example, with no parameters, is not a good idea. The result of each command is displayed in a results box. The first occurrence of the special text "NODE" will be replaced with the node name. The command will not be attempted to run on nodes that are not routers, PCs, or hosts, even if they are selected. | - -## Menu - -The menubar runs along the top of the CORE GUI window and provides access to a -variety of features. Some of the menus are detachable, such as the *Widgets* -menu, by clicking the dashed line at the top. - -### File Menu - -The File menu contains options for saving and opening saved sessions. - -| Option | Description | -|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| New Session | This starts a new session with an empty canvas. | -| Save | Saves the current topology. If you have not yet specified a file name, the Save As dialog box is invoked. | -| Save As | Invokes the Save As dialog box for selecting a new **.xml** file for saving the current configuration in the XML file. | -| Open | Invokes the File Open dialog box for selecting a new XML file to open. | -| Recently used files | Above the Quit menu command is a list of recently use files, if any have been opened. You can clear this list in the Preferences dialog box. You can specify the number of files to keep in this list from the Preferences dialog. Click on one of the file names listed to open that configuration file. | -| Execute Python Script | Invokes a File Open dialog box for selecting a Python script to run and automatically connect to. After a selection is made, a Python Script Options dialog box is invoked to allow for command-line options to be added. The Python script must create a new CORE Session and add this session to the daemon's list of sessions in order for this to work. | -| Quit | The Quit command should be used to exit the CORE GUI. CORE may prompt for termination if you are currently in Execute mode. Preferences and the recently-used files list are saved. | - -### Edit Menu - -| Option | Description | -|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Preferences | Invokes the Preferences dialog box. | -| Custom Nodes | Custom node creation dialog box. | -| Undo | (Disabled) Attempts to undo the last edit in edit mode. | -| Redo | (Disabled) Attempts to redo an edit that has been undone. | -| Cut, Copy, Paste, Delete | Used to cut, copy, paste, and delete a selection. When nodes are pasted, their node numbers are automatically incremented, and existing links are preserved with new IP addresses assigned. Services and their customizations are copied to the new node, but care should be taken as node IP addresses have changed with possibly old addresses remaining in any custom service configurations. Annotations may also be copied and pasted. | - -### Canvas Menu - -The canvas menu provides commands related to the editing canvas. - -| Option | Description | -|------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Size/scale | Invokes a Canvas Size and Scale dialog that allows configuring the canvas size, scale, and geographic reference point. The size controls allow changing the width and height of the current canvas, in pixels or meters. The scale allows specifying how many meters are equivalent to 100 pixels. The reference point controls specify the latitude, longitude, and altitude reference point used to convert between geographic and Cartesian coordinate systems. By clicking the *Save as default* option, all new canvases will be created with these properties. The default canvas size can also be changed in the Preferences dialog box. | -| Wallpaper | Used for setting the canvas background image. | - -### View Menu - -The View menu features items for toggling on and off their display on the canvas. - -| Option | Description | -|-----------------|-----------------------------------| -| Interface Names | Display interface names on links. | -| IPv4 Addresses | Display IPv4 addresses on links. | -| IPv6 Addresses | Display IPv6 addresses on links. | -| Node Labels | Display node names. | -| Link Labels | Display link labels. | -| Annotations | Display annotations. | -| Canvas Grid | Display the canvas grid. | - -### Tools Menu - -The tools menu lists different utility functions. - -| Option | Description | -|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Find | Display find dialog used for highlighting a node on the canvas. | -| Auto Grid | Automatically layout nodes in a grid. | -| IP addresses | Invokes the IP Addresses dialog box for configuring which IPv4/IPv6 prefixes are used when automatically addressing new interfaces. | -| MAC addresses | Invokes the MAC Addresses dialog box for configuring the starting number used as the lowest byte when generating each interface MAC address. This value should be changed when tunneling between CORE emulations to prevent MAC address conflicts. | - -### Widgets Menu - -Widgets are GUI elements that allow interaction with a running emulation. -Widgets typically automate the running of commands on emulated nodes to report -status information of some type and display this on screen. - -#### Periodic Widgets - -These Widgets are those available from the main *Widgets* menu. More than one -of these Widgets may be run concurrently. An event loop fires once every second -that the emulation is running. If one of these Widgets is enabled, its periodic -routine will be invoked at this time. Each Widget may have a configuration -dialog box which is also accessible from the *Widgets* menu. - -Here are some standard widgets: - -* **Adjacency** - displays router adjacency states for Quagga's OSPFv2 and OSPFv3 - routing protocols. A line is drawn from each router halfway to the router ID - of an adjacent router. The color of the line is based on the OSPF adjacency - state such as Two-way or Full. To learn about the different colors, see the - *Configure Adjacency...* menu item. The **vtysh** command is used to - dump OSPF neighbor information. - Only half of the line is drawn because each - router may be in a different adjacency state with respect to the other. -* **Throughput** - displays the kilobits-per-second throughput above each link, - using statistics gathered from each link. If the throughput exceeds a certain - threshold, the link will become highlighted. For wireless nodes which broadcast - data to all nodes in range, the throughput rate is displayed next to the node and - the node will become circled if the threshold is exceeded. - -#### Observer Widgets - -These Widgets are available from the **Observer Widgets** submenu of the -**Widgets** menu, and from the Widgets Tool on the toolbar. Only one Observer Widget may -be used at a time. Mouse over a node while the session is running to pop up -an informational display about that node. - -Available Observer Widgets include IPv4 and IPv6 routing tables, socket -information, list of running processes, and OSPFv2/v3 neighbor information. - -Observer Widgets may be edited by the user and rearranged. Choosing -**Widgets->Observer Widgets->Edit Observers** from the Observer Widget menu will -invoke the Observer Widgets dialog. A list of Observer Widgets is displayed along -with up and down arrows for rearranging the list. Controls are available for -renaming each widget, for changing the command that is run during mouse over, and -for adding and deleting items from the list. Note that specified commands should -return immediately to avoid delays in the GUI display. Changes are saved to a -**config.yaml** file in the CORE configuration directory. - -### Session Menu - -The Session Menu has entries for starting, stopping, and managing sessions, -in addition to global options such as node types, comments, hooks, servers, -and options. - -| Option | Description | -|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Sessions | Invokes the CORE Sessions dialog box containing a list of active CORE sessions in the daemon. Basic session information such as name, node count, start time, and a thumbnail are displayed. This dialog allows connecting to different sessions, shutting them down, or starting a new session. | -| Servers | Invokes the CORE emulation servers dialog for configuring. | -| Options | Presents per-session options, such as the IPv4 prefix to be used, if any, for a control network the ability to preserve the session directory; and an on/off switch for SDT3D support. | -| Hooks | Invokes the CORE Session Hooks window where scripts may be configured for a particular session state. The session states are defined in the [table](#session-states) below. The top of the window has a list of configured hooks, and buttons on the bottom left allow adding, editing, and removing hook scripts. The new or edit button will open a hook script editing window. A hook script is a shell script invoked on the host (not within a virtual node). | - -#### Session States - -| State | Description | -|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Definition | Used by the GUI to tell the backend to clear any state. | -| Configuration | When the user presses the *Start* button, node, link, and other configuration data is sent to the backend. This state is also reached when the user customizes a service. | -| Instantiation | After configuration data has been sent, just before the nodes are created. | -| Runtime | All nodes and networks have been built and are running. (This is the same state at which the previously-named *global experiment script* was run.) | -| Datacollect | The user has pressed the *Stop* button, but before services have been stopped and nodes have been shut down. This is a good time to collect log files and other data from the nodes. | -| Shutdown | All nodes and networks have been shut down and destroyed. | - -### Help Menu - -| Option | Description | -|--------------------------|---------------------------------------------------------------| -| CORE Github (www) | Link to the CORE GitHub page. | -| CORE Documentation (www) | Lnk to the CORE Documentation page. | -| About | Invokes the About dialog box for viewing version information. | - -## Building Sample Networks - -### Wired Networks - -Wired networks are created using the **Link Tool** to draw a link between two -nodes. This automatically draws a red line representing an Ethernet link and -creates new interfaces on network-layer nodes. - -Double-click on the link to invoke the **link configuration** dialog box. Here -you can change the Bandwidth, Delay, Loss, and Duplicate -rate parameters for that link. You can also modify the color and width of the -link, affecting its display. - -Link-layer nodes are provided for modeling wired networks. These do not create -a separate network stack when instantiated, but are implemented using Linux bridging. -These are the hub, switch, and wireless LAN nodes. The hub copies each packet from -the incoming link to every connected link, while the switch behaves more like an -Ethernet switch and keeps track of the Ethernet address of the connected peer, -forwarding unicast traffic only to the appropriate ports. - -The wireless LAN (WLAN) is covered in the next section. - -### Wireless Networks - -Wireless networks allow moving nodes around to impact the connectivity between them. Connections between a -pair of nodes is stronger when the nodes are closer while connection is weaker when the nodes are further away. -CORE offers several levels of wireless emulation fidelity, depending on modeling needs and available -hardware. - -* WLAN Node - * uses set bandwidth, delay, and loss - * links are enabled or disabled based on a set range - * uses the least CPU when moving, but nothing extra when not moving -* Wireless Node - * uses set bandwidth, delay, and initial loss - * loss dynamically changes based on distance between nodes, which can be configured with range parameters - * links are enabled or disabled based on a set range - * uses more CPU to calculate loss for every movement, but nothing extra when not moving -* EMANE Node - * uses a physical layer model to account for signal propagation, antenna profile effects and interference - sources in order to provide a realistic environment for wireless experimentation - * uses the most CPU for every packet, as complex calculations are used for fidelity - * See [Wiki](https://github.com/adjacentlink/emane/wiki) for details on general EMANE usage - * See [CORE EMANE](emane.md) for details on using EMANE in CORE - -| Model | Type | Supported Platform(s) | Fidelity | Description | -|----------|--------|-----------------------|----------|-------------------------------------------------------------------------------| -| WLAN | On/Off | Linux | Low | Ethernet bridging with nftables | -| Wireless | On/Off | Linux | Medium | Ethernet bridging with nftables | -| EMANE | RF | Linux | High | TAP device connected to EMANE emulator with pluggable MAC and PHY radio types | - -#### Example WLAN Network Setup - -To quickly build a wireless network, you can first place several router nodes -onto the canvas. If you have the -Quagga MDR software installed, it is -recommended that you use the **mdr** node type for reduced routing overhead. Next -choose the **WLAN** from the **Link-layer nodes** submenu. First set the -desired WLAN parameters by double-clicking the cloud icon. Then you can link -all selected right-clicking on the WLAN and choosing **Link to Selected**. - -Linking a router to the WLAN causes a small antenna to appear, but no red link -line is drawn. Routers can have multiple wireless links and both wireless and -wired links (however, you will need to manually configure route -redistribution.) The mdr node type will generate a routing configuration that -enables OSPFv3 with MANET extensions. This is a Boeing-developed extension to -Quagga's OSPFv3 that reduces flooding overhead and optimizes the flooding -procedure for mobile ad-hoc (MANET) networks. - -The default configuration of the WLAN is set to use the basic range model. Having this model -selected causes **core-daemon** to calculate the distance between nodes based -on screen pixels. A numeric range in screen pixels is set for the wireless -network using the **Range** slider. When two wireless nodes are within range of -each other, a green line is drawn between them and they are linked. Two -wireless nodes that are farther than the range pixels apart are not linked. -During Execute mode, users may move wireless nodes around by clicking and -dragging them, and wireless links will be dynamically made or broken. - -### Running Commands within Nodes - -You can double click a node to bring up a terminal for running shell commands. Within -the terminal you can run anything you like and those commands will be run in context of the node. -For standard CORE nodes, the only thing to keep in mind is that you are using the host file -system and anything you change or do can impact the greater system. By default, your terminal -will open within the nodes home directory for the running session, but it is temporary and -will be removed when the session is stopped. - -You can also launch GUI based applications from within standard CORE nodes, but you need to -enable xhost access to root. - -```shell -xhost +local:root -``` - -### Mobility Scripting - -CORE has a few ways to script mobility. - -| Option | Description | -|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ns-2 script | The script specifies either absolute positions or waypoints with a velocity. Locations are given with Cartesian coordinates. | -| gRPC API | An external entity can move nodes by leveraging the gRPC API | -| EMANE events | See [EMANE](emane.md) for details on using EMANE scripts to move nodes around. Location information is typically given as latitude, longitude, and altitude. | - -For the first method, you can create a mobility script using a text -editor, or using a tool such as [BonnMotion](http://net.cs.uni-bonn.de/wg/cs/applications/bonnmotion/), and associate -the script with one of the wireless -using the WLAN configuration dialog box. Click the *ns-2 mobility script...* -button, and set the *mobility script file* field in the resulting *ns2script* -configuration dialog. - -Here is an example for creating a BonnMotion script for 10 nodes: - -```shell -bm -f sample RandomWaypoint -n 10 -d 60 -x 1000 -y 750 -bm NSFile -f sample -# use the resulting 'sample.ns_movements' file in CORE -``` - -When the Execute mode is started and one of the WLAN nodes has a mobility -script, a mobility script window will appear. This window contains controls for -starting, stopping, and resetting the running time for the mobility script. The -**loop** checkbox causes the script to play continuously. The **resolution** text -box contains the number of milliseconds between each timer event; lower values -cause the mobility to appear smoother but consumes greater CPU time. - -The format of an ns-2 mobility script looks like: - -```shell -# nodes: 3, max time: 35.000000, max x: 600.00, max y: 600.00 -$node_(2) set X_ 144.0 -$node_(2) set Y_ 240.0 -$node_(2) set Z_ 0.00 -$ns_ at 1.00 "$node_(2) setdest 130.0 280.0 15.0" -``` - -The first three lines set an initial position for node 2. The last line in the -above example causes node 2 to move towards the destination **(130, 280)** at -speed **15**. All units are screen coordinates, with speed in units per second. -The total script time is learned after all nodes have reached their waypoints. -Initially, the time slider in the mobility script dialog will not be -accurate. - -Examples mobility scripts (and their associated topology files) can be found -in the **configs/** directory. - -## Alerts - -The alerts button is located in the bottom right-hand corner -of the status bar in the CORE GUI. This will change colors to indicate one or -more problems with the running emulation. Clicking on the alerts button will invoke the -alerts dialog. - -The alerts dialog contains a list of alerts received from -the CORE daemon. An alert has a time, severity level, optional node number, -and source. When the alerts button is red, this indicates one or more fatal -exceptions. An alert with a fatal severity level indicates that one or more -of the basic pieces of emulation could not be created, such as failure to -create a bridge or namespace, or the failure to launch EMANE processes for an -EMANE-based network. - -Clicking on an alert displays details for that -exceptio. The exception source is a text string -to help trace where the exception occurred; "service:UserDefined" for example, -would appear for a failed validation command with the UserDefined service. - -A button is available at the bottom of the dialog for clearing the exception -list. - -## Customizing your Topology's Look - -Several annotation tools are provided for changing the way your topology is -presented. Captions may be added with the Text tool. Ovals and rectangles may -be drawn in the background, helpful for visually grouping nodes together. - -During live demonstrations the marker tool may be helpful for drawing temporary -annotations on the canvas that may be quickly erased. A size and color palette -appears at the bottom of the toolbar when the marker tool is selected. Markings -are only temporary and are not saved in the topology file. - -The basic node icons can be replaced with a custom image of your choice. Icons -appear best when they use the GIF or PNG format with a transparent background. -To change a node's icon, double-click the node to invoke its configuration -dialog and click on the button to the right of the node name that shows the -node's current icon. - -A background image for the canvas may be set using the *Wallpaper...* option -from the *Canvas* menu. The image may be centered, tiled, or scaled to fit the -canvas size. An existing terrain, map, or network diagram could be used as a -background, for example, with CORE nodes drawn on top. diff --git a/docs/hitl.md b/docs/hitl.md deleted file mode 100644 index b659a36f..00000000 --- a/docs/hitl.md +++ /dev/null @@ -1,127 +0,0 @@ -# Hardware In The Loop - -## Overview - -In some cases it may be impossible or impractical to run software using CORE -nodes alone. You may need to bring in external hardware into the network. -CORE's emulated networks run in real time, so they can be connected to live -physical networks. The RJ45 tool and the Tunnel tool help with connecting to -the real world. These tools are available from the **Link Layer Nodes** menu. - -When connecting two or more CORE emulations together, MAC address collisions -should be avoided. CORE automatically assigns MAC addresses to interfaces when -the emulation is started, starting with **00:00:00:aa:00:00** and incrementing -the bottom byte. The starting byte should be changed on the second CORE machine -using the **Tools->MAC Addresses** option the menu. - -## RJ45 Node - -CORE provides the RJ45 node, which represents a physical -interface within the host that is running CORE. Any real-world network -devices can be connected to the interface and communicate with the CORE nodes in real time. - -The main drawback is that one physical interface is required for each -connection. When the physical interface is assigned to CORE, it may not be used -for anything else. Another consideration is that the computer or network that -you are connecting to must be co-located with the CORE machine. - -### GUI Usage - -To place an RJ45 connection, click on the **Link Layer Nodes** toolbar and select -the **RJ45 Node** from the options. Click on the canvas, where you would like -the nodes to place. Now click on the **Link Tool** and draw a link between the RJ45 -and the other node you wish to be connected to. The RJ45 node will display "UNASSIGNED". -Double-click the RJ45 node to assign a physical interface. A list of available -interfaces will be shown, and one may be selected, then selecting **Apply**. - -!!! note - - When you press the Start button to instantiate your topology, the - interface assigned to the RJ45 will be connected to the CORE topology. The - interface can no longer be used by the system. - -### Multiple RJ45s with One Interface (VLAN) - -It is possible to have multiple RJ45 nodes using the same physical interface -by leveraging 802.1x VLANs. This allows for more RJ45 nodes than physical ports -are available, but the (e.g. switching) hardware connected to the physical port -must support the VLAN tagging, and the available bandwidth will be shared. - -You need to create separate VLAN virtual devices on the Linux host, -and then assign these devices to RJ45 nodes inside of CORE. The VLANing is -actually performed outside of CORE, so when the CORE emulated node receives a -packet, the VLAN tag will already be removed. - -Here are example commands for creating VLAN devices under Linux: - -```shell -ip link add link eth0 name eth0.1 type vlan id 1 -ip link add link eth0 name eth0.2 type vlan id 2 -ip link add link eth0 name eth0.3 type vlan id 3 -``` - -## Tunnel Tool - -The tunnel tool builds GRE tunnels between CORE emulations or other hosts. -Tunneling can be helpful when the number of physical interfaces is limited or -when the peer is located on a different network. In this case a physical interface does -not need to be dedicated to CORE as with the RJ45 tool. - -The peer GRE tunnel endpoint may be another CORE machine or another -host that supports GRE tunneling. When placing a Tunnel node, initially -the node will display "UNASSIGNED". This text should be replaced with the IP -address of the tunnel peer. This is the IP address of the other CORE machine or -physical machine, not an IP address of another virtual node. - -!!! note - - Be aware of possible MTU (Maximum Transmission Unit) issues with GRE devices. - The *gretap* device has an interface MTU of 1,458 bytes; when joined to a Linux - bridge, the bridge's MTU becomes 1,458 bytes. The Linux bridge will not perform - fragmentation for large packets if other bridge ports have a higher MTU such - as 1,500 bytes. - -The GRE key is used to identify flows with GRE tunneling. This allows multiple -GRE tunnels to exist between that same pair of tunnel peers. A unique number -should be used when multiple tunnels are used with the same peer. When -configuring the peer side of the tunnel, ensure that the matching keys are -used. - -### Example Usage - -Here are example commands for building the other end of a tunnel on a Linux -machine. In this example, a router in CORE has the virtual address -**10.0.0.1/24** and the CORE host machine has the (real) address -**198.51.100.34/24**. The Linux box -that will connect with the CORE machine is reachable over the (real) network -at **198.51.100.76/24**. -The emulated router is linked with the Tunnel Node. In the -Tunnel Node configuration dialog, the address **198.51.100.76** is entered, with -the key set to **1**. The gretap interface on the Linux box will be assigned -an address from the subnet of the virtual router node, -**10.0.0.2/24**. - -```shell -# these commands are run on the tunnel peer -sudo ip link add gt0 type gretap remote 198.51.100.34 local 198.51.100.76 key 1 -sudo ip addr add 10.0.0.2/24 dev gt0 -sudo ip link set dev gt0 up -``` - -Now the virtual router should be able to ping the Linux machine: - -```shell -# from the CORE router node -ping 10.0.0.2 -``` - -And the Linux machine should be able to ping inside the CORE emulation: - -```shell -# from the tunnel peer -ping 10.0.0.1 -``` - -To debug this configuration, **tcpdump** can be run on the gretap devices, or -on the physical interfaces on the CORE or Linux machines. Make sure that a -firewall is not blocking the GRE traffic. diff --git a/docs/index.md b/docs/index.md index 4afec59f..f516b648 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,17 +2,34 @@ ## Introduction -CORE (Common Open Research Emulator) is a tool for building virtual networks. As an emulator, CORE builds a -representation of a real computer network that runs in real time, as opposed to simulation, where abstract models are -used. The live-running emulation can be connected to physical networks and routers. It provides an environment for -running real applications and protocols, taking advantage of tools provided by the Linux operating system. +CORE (Common Open Research Emulator) is a tool for building virtual networks. As an emulator, CORE builds a representation of a real computer network that runs in real time, as opposed to simulation, where abstract models are used. The live-running emulation can be connected to physical networks and routers. It provides an environment for running real applications and protocols, taking advantage of virtualization provided by the Linux operating system. -CORE is typically used for network and protocol research, demonstrations, application and platform testing, evaluating -networking scenarios, security studies, and increasing the size of physical test networks. +CORE is typically used for network and protocol research, demonstrations, application and platform testing, evaluating networking scenarios, security studies, and increasing the size of physical test networks. ### Key Features - * Efficient and scalable * Runs applications and protocols without modification * Drag and drop GUI * Highly customizable + +## Topics + +* [Architecture](architecture.md) +* [Installation](install.md) +* [Usage](usage.md) +* [Python Scripting](scripting.md) +* [Node Types](machine.md) +* [CTRLNET](ctrlnet.md) +* [Services](services.md) +* [EMANE](emane.md) +* [NS3](ns3.md) +* [Performance](performance.md) +* [Developers Guide](devguide.md) + +## Credits + +The CORE project was derived from the open source IMUNES project from the University of Zagreb in 2004. In 2006, changes for CORE were released back to that project, some items of which were adopted. Marko Zec is the primary developer from the University of Zagreb responsible for the IMUNES (GUI) and VirtNet (kernel) projects. Ana Kukec and Miljenko Mikuc are known contributors. + +Jeff Ahrenholz has been the primary Boeing developer of CORE, and has written this manual. Tom Goff designed the Python framework and has made significant contributions. Claudiu Danilov, Rod Santiago, Kevin Larson, Gary Pei, Phil Spagnolo, and Ian Chakeres have contributed code to CORE. Dan Mackley helped develop the CORE API, originally to interface with a simulator. Jae Kim and Tom Henderson have supervised the project and provided direction. + +Copyright (c) 2005-2018, the Boeing Company. diff --git a/docs/install.md b/docs/install.md index 51c05dbc..fb161f78 100644 --- a/docs/install.md +++ b/docs/install.md @@ -1,407 +1,314 @@ -# Installation -!!! warning +# CORE Installation - If Docker is installed, the default iptable rules will block CORE traffic +* Table of Contents +{:toc} -## Overview +# Overview -CORE currently supports and provides the following installation options, with the package -option being preferred. +This section will describe how to set up a CORE machine. Note that the easiest way to install CORE is using a binary package on Ubuntu or Fedora/CentOS (deb or rpm) using the distribution's package manager to automatically install dependencies. -* [Package based install (rpm/deb)](#package-based-install) -* [Script based install](#script-based-install) -* [Dockerfile based install](#dockerfile-based-install) +Ubuntu and Fedora/CentOS Linux are the recommended distributions for running CORE. However, these distributions are not strictly required. CORE will likely work on other flavors of Linux as well. -### Requirements +The primary dependencies are Tcl/Tk (8.5 or newer) for the GUI, and Python 2.7 for the CORE daemon. -Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous -containers, as a general rule you should select a machine having as much RAM and CPU resources as possible. +CORE files are installed to the following directories, when the installation prefix is */usr*. -* Linux Kernel v3.3+ -* iproute2 4.5+ is a requirement for bridge related commands -* nftables compatible kernel and nft command line tool +Install Path | Description +-------------|------------ +/usr/bin/core-gui|GUI startup command +/usr/bin/core-daemon|Daemon startup command +/usr/bin/|Misc. helper commands/scripts +/usr/lib/core|GUI files +/usr/lib/python2.7/dist-packages/core|Python modules for daemon/scripts +/etc/core/|Daemon configuration files +~/.core/|User-specific GUI preferences and scenario files +/usr/share/core/|Example scripts and scenarios +/usr/share/man/man1/|Command man pages +/etc/init.d/core-daemon|SysV startup script for daemon +/etc/systemd/system/core-daemon.service|Systemd startup script for daemon -### Supported Linux Distributions +## Prerequisites -Plan is to support recent Ubuntu and CentOS LTS releases. +A Linux operating system is required. The GUI uses the Tcl/Tk scripting toolkit, and the CORE daemon requires Python. Details of the individual software packages required can be found in the installation steps. -Verified: +## Required Hardware -* Ubuntu - 18.04, 20.04, 22.04 -* CentOS - 7.8 +Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous virtual machines, as a general rule you should select a machine having as much RAM and CPU resources as possible. -### Files +## Required Software -The following is a list of files that would be installed after installation. +CORE requires a Linux operating system because it uses virtualization provided by the kernel. It does not run on Windows or Mac OS X operating systems (unless it is running within a virtual machine guest.) The virtualization technology that CORE currently uses is Linux network namespaces. -* executables - * `/bin/{vcmd, vnode}` - * can be adjusted using script based install , package will be /usr -* python files - * virtual environment `/opt/core/venv` - * local install will be local to the python version used - * `python3 -c "import core; print(core.__file__)"` - * scripts {core-daemon, core-cleanup, etc} - * virtualenv `/opt/core/venv/bin` - * local `/usr/local/bin` -* configuration files - * `/etc/core/{core.conf, logging.conf}` -* ospf mdr repository files when using script based install - * `/../ospf-mdr` +The CORE GUI requires the X.Org X Window system (X11), or can run over a remote X11 session. For specific Tcl/Tk, Python, and other libraries required to run CORE. -### Installed Scripts +**NOTE: CORE *Services* determine what run on each node. You may require other software packages depending on the services you wish to use. For example, the *HTTP* service will require the *apache2* package.** -The following python scripts are provided. +## Installing from Packages -| Name | Description | -|---------------------|------------------------------------------------------------------------------| -| core-cleanup | tool to help removed lingering core created containers, bridges, directories | -| core-cli | tool to query, open xml files, and send commands using gRPC | -| core-daemon | runs the backed core server providing a gRPC API | -| core-gui | starts GUI | -| core-python | provides a convenience for running the core python virtual environment | -| core-route-monitor | tool to help monitor traffic across nodes and feed that to SDT | -| core-service-update | tool to update automate modifying a legacy service to match current naming | +The easiest way to install CORE is using the pre-built packages. The package managers on Ubuntu or Fedora/CentOS will automatically install dependencies for you. You can obtain the CORE packages from [CORE GitHub](https://github.com/coreemu/core/releases). -### Upgrading from Older Release +### Installing from Packages on Ubuntu -Please make sure to uninstall any previous installations of CORE cleanly -before proceeding to install. - -Clearing out a current install from 7.0.0+, making sure to provide options -used for install (`-l` or `-p`). +Install Quagga for routing. If you plan on working with wireless networks, we recommend installing [OSPF MDR](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet) (replace *amd64* below with *i386* if needed to match your architecture): ```shell -cd -inv uninstall +wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-mr_0.99.21mr2.2_amd64.deb +sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb ``` -Previous install was built from source for CORE release older than 7.0.0: +Or, for the regular Ubuntu version of Quagga: ```shell -cd -sudo make uninstall -make clean -./bootstrap.sh clean +sudo apt-get install quagga ``` -Installed from previously built packages: +Install the CORE deb packages for Ubuntu from command line. ```shell -# centos -sudo yum remove core -# ubuntu -sudo apt remove core +sudo dpkg -i python-core_*.deb +sudo dpkg -i core-gui_*.deb ``` -## Installation Examples - -The below links will take you to sections providing complete examples for installing -CORE and related utilities on fresh installations. Otherwise, a breakdown for installing -different components and the options available are detailed below. - -* [Ubuntu 22.04](install_ubuntu.md) -* [CentOS 7](install_centos.md) - -## Package Based Install - -Starting with 9.0.0 there are pre-built rpm/deb packages. You can retrieve the -rpm/deb package from [releases](https://github.com/coreemu/core/releases) page. - -The built packages will require and install system level dependencies, as well as running -a post install script to install the provided CORE python wheel. A similar uninstall script -is ran when uninstalling and would require the same options as given, during the install. - -!!! note - - PYTHON defaults to python3 for installs below, CORE requires python3.9+, pip, - tk compatibility for python gui, and venv for virtual environments - -Examples for install: +Start the CORE daemon as root, the systemd installation will auto start the daemon, but you can use the commands below if need be. ```shell -# recommended to upgrade to the latest version of pip before installation -# in python, can help avoid building from source issues -sudo -m pip install --upgrade pip -# install vcmd/vnoded, system dependencies, -# and core python into a venv located at /opt/core/venv -sudo install -y ./ -# disable the venv and install to python directly -sudo NO_VENV=1 install -y ./ -# change python executable used to install for venv or direct installations -sudo PYTHON=python3.9 install -y ./ -# disable venv and change python executable -sudo NO_VENV=1 PYTHON=python3.9 install -y ./ -# skip installing the python portion entirely, as you plan to carry this out yourself -# core python wheel is located at /opt/core/core--py3-none-any.whl -sudo NO_PYTHON=1 install -y ./ -# install python wheel into python of your choosing -sudo -m pip install /opt/core/core--py3-none-any.whl +# systemd +sudo systemctl start core-daemon + +# sysv +sudo service core-daemon start ``` -Example for removal, requires using the same options as install: +Run the CORE GUI as a normal user: ```shell -# remove a standard install -sudo remove core -# remove a local install -sudo NO_VENV=1 remove core -# remove install using alternative python -sudo PYTHON=python3.9 remove core -# remove install using alternative python and local install -sudo NO_VENV=1 PYTHON=python3.9 remove core -# remove install and skip python uninstall -sudo NO_PYTHON=1 remove core -``` - -### Installing OSPF MDR - -You will need to manually install OSPF MDR for routing nodes, since this is not -provided by the package. - -```shell -git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git -cd ospf-mdr -./bootstrap.sh -./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga -make -j$(nproc) -sudo make install -``` - -When done see [Post Install](#post-install). - -## Script Based Install - -The script based installation will install system level dependencies, python library and -dependencies, as well as dependencies for building CORE. - -The script based install also automatically builds and installs OSPF MDR, used by default -on routing nodes. This can optionally be skipped. - -Installaion will carry out the following steps: - -* installs system dependencies for building core -* builds vcmd/vnoded and python grpc files -* installs core into poetry managed virtual environment or locally, if flag is passed -* installs systemd service pointing to appropriate python location based on install type -* clone/build/install working version of [OPSF MDR](https://github.com/USNavalResearchLaboratory/ospf-mdr) - -!!! note - - Installing locally comes with its own risks, it can result it potential - dependency conflicts with system package manager installed python dependencies - -!!! note - - Provide a prefix that will be found on path when running as sudo, - if the default prefix /usr/local will not be valid - -The following tools will be leveraged during installation: - -| Tool | Description | -|---------------------------------------------|-----------------------------------------------------------------------| -| [pip](https://pip.pypa.io/en/stable/) | used to install pipx | -| [pipx](https://pipxproject.github.io/pipx/) | used to install standalone python tools (invoke, poetry) | -| [invoke](http://www.pyinvoke.org/) | used to run provided tasks (install, uninstall, reinstall, etc) | -| [poetry](https://python-poetry.org/) | used to install python virtual environment or building a python wheel | - -First we will need to clone and navigate to the CORE repo. - -```shell -# clone CORE repo -git clone https://github.com/coreemu/core.git -cd core - -# install dependencies to run installation task -./setup.sh -# skip installing system packages, due to using python built from source -NO_SYSTEM=1 ./setup.sh - -# run the following or open a new terminal -source ~/.bashrc - -# Ubuntu -inv install -# CentOS -inv install -p /usr -# optionally skip python system packages -inv install --no-python -# optionally skip installing ospf mdr -inv install --no-ospf - -# install command options -Usage: inv[oke] [--core-opts] install [--options] [other tasks here ...] - -Docstring: - install core, poetry, scripts, service, and ospf mdr - -Options: - -d, --dev install development mode - -i STRING, --install-type=STRING used to force an install type, can be one of the following (redhat, debian) - -l, --local determines if core will install to local system, default is False - -n, --no-python avoid installing python system dependencies - -o, --[no-]ospf disable ospf installation - -p STRING, --prefix=STRING prefix where scripts are installed, default is /usr/local - -v, --verbose -``` - -When done see [Post Install](#post-install). - -### Unsupported Linux Distribution - -For unsupported OSs you could attempt to do the following to translate -an installation to your use case. - -* make sure you have python3.9+ with venv support -* make sure you have python3 invoke available to leverage `/tasks.py` - -```shell -# this will print the commands that would be ran for a given installation -# type without actually running them, they may help in being used as -# the basis for translating to your OS -inv install --dry -v -p -i -``` - -## Dockerfile Based Install - -You can leverage one of the provided Dockerfiles, to run and launch CORE within a Docker container. - -Since CORE nodes will leverage software available within the system for a given use case, -make sure to update and build the Dockerfile with desired software. - -```shell -# clone core -git clone https://github.com/coreemu/core.git -cd core -# build image -sudo docker build -t core -f dockerfiles/Dockerfile. . -# start container -sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged core -# enable xhost access to the root user -xhost +local:root -# launch core-gui -sudo docker exec -it core core-gui -``` - -When done see [Post Install](#post-install). - -## Installing EMANE - -!!! note - - Installing EMANE for the virtual environment is known to work for 1.21+ - -The recommended way to install EMANE is using prebuilt packages, otherwise -you can follow their instructions for installing from source. Installation -information can be found [here](https://github.com/adjacentlink/emane/wiki/Install). - -There is an invoke task to help install the EMANE bindings into the CORE virtual -environment, when needed. An example for running the task is below and the version -provided should match the version of the packages installed. - -You will also need to make sure, you are providing the correct python binary where CORE -is being used. - -Also, these EMANE bindings need to be built using `protoc` 3.19+. So make sure -that is available and being picked up on PATH properly. - -Examples for building and installing EMANE python bindings for use in CORE: - -```shell -# if your system does not have protoc 3.19+ -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -git checkout v1.3.3 -./autogen.sh -PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr -cd src/python -PATH=/opt/protoc/bin:$PATH make -/opt/core/venv/bin/python -m pip install . - -# when your system has protoc 3.19+ -cd -# example version tag v1.3.3 -# overriding python used to leverage the default virtualenv install -PYTHON=/opt/core/venv/bin/python inv install-emane -e -# local install that uses whatever python3 refers to -inv install-emane -e -``` - -## Post Install - -After installation completes you are now ready to run CORE. - -### Resolving Docker Issues - -If you have Docker installed, by default it will change the iptables -forwarding chain to drop packets, which will cause issues for CORE traffic. - -You can temporarily resolve the issue with the following command: - -```shell -sudo iptables --policy FORWARD ACCEPT -``` - -Alternatively, you can configure Docker to avoid doing this, but will likely -break normal Docker networking usage. Using the setting below will require -a restart. - -Place the file contents below in **/etc/docker/docker.json** - -```json -{ - "iptables": false -} -``` - -### Resolving Path Issues - -One problem running CORE you may run into, using the virtual environment or locally -can be issues related to your path. - -To add support for your user to run scripts from the virtual environment: - -```shell -# can add to ~/.bashrc -export PATH=$PATH:/opt/core/venv/bin -``` - -This will not solve the path issue when running as sudo, so you can do either -of the following to compensate. - -```shell -# run command passing in the right PATH to pickup from the user running the command -sudo env PATH=$PATH core-daemon - -# add an alias to ~/.bashrc or something similar -alias sudop='sudo env PATH=$PATH' -# now you can run commands like so -sudop core-daemon -``` - -### Running CORE - -The following assumes I have resolved PATH issues and setup the `sudop` alias. - -```shell -# in one terminal run the server daemon using the alias above -sudop core-daemon -# in another terminal run the gui client core-gui ``` -### Enabling Service +After running the *core-gui* command, a GUI should appear with a canvas for drawing topologies. Messages will print out on the console about connecting to the CORE daemon. -After installation, the core service is not enabled by default. If you desire to use the -service, run the following commands. +### Installing from Packages on Fedora/CentOS + +The commands shown here should be run as root. The *x86_64* architecture is shown in the examples below, replace with *i686* is using a 32-bit architecture. + +**CentOS 7 Only: in order to install *tkimg* package you must build from source.** + +Make sure the system is up to date. ```shell -sudo systemctl enable core-daemon -sudo systemctl start core-daemon +yum update ``` + +**Optional (Fedora 17+): Fedora 17 and newer have an additional prerequisite providing the required netem kernel modules (otherwise skip this step and have the package manager install it for you.)** + +```shell +yum install kernel-modules-extra +``` + +Install Quagga for routing. If you plan on working with wireless networks, we recommend installing [OSPF MDR](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet): + +```shell +wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2-1.el6.x86_64.rpm +sudo yum install quagga-0.99.21mr2.2-1.el6.x86_64.rpm +``` + +Or, for the regular Fedora/CentOS version of Quagga: + +```shell +yum install quagga +``` + +Install the CORE RPM packages and automatically resolve dependencies: + +```shell +yum install python-core_*.rpm +yum install core-gui_*.rpm +``` + +Turn off SELINUX by setting *SELINUX=disabled* in the */etc/sysconfig/selinux* file, and adding *selinux=0* to the kernel line in your */etc/grub.conf* file; on Fedora 15 and newer, disable sandboxd using ```chkconfig sandbox off```; you need to reboot in order for this change to take effect + +Turn off firewalls: + +```shell +systemctl disable firewalld +systemctl disable iptables.service +systemctl disable ip6tables.service +chkconfig iptables off +chkconfig ip6tables off +``` + +You need to reboot after making these changes, or flush the firewall using + +```shell +iptables -F +ip6tables -F +``` + +Start the CORE daemon as root. + +```shell +# systemd +sudo systemctl daemon-reload +sudo systemctl start core-daemon + +# sysv +sudo service core-daemon start +``` + +Run the CORE GUI as a normal user: + +```shell +core-gui +``` + +After running the *core-gui* command, a GUI should appear with a canvas for drawing topologies. Messages will print out on the console about connecting to the CORE daemon. + +### Installing from Source + +This option is listed here for developers and advanced users who are comfortable patching and building source code. Please consider using the binary packages instead for a simplified install experience. + +To build CORE from source on Ubuntu, first install these development packages. These packages are not required for normal binary package installs. + +#### Ubuntu 18.04 pre-reqs + +```shell +sudo apt install automake pkg-config gcc libev-dev bridge-utils ebtables python-dev python-sphinx python-setuptools python-lxml python-enum34 tk libtk-img +``` + +#### Ubuntu 16.04 Requirements + +```shell +sudo apt-get install automake bridge-utils ebtables python-dev libev-dev python-sphinx python-setuptools python-enum34 python-lxml libtk-img +``` + + +#### CentOS 7 with Gnome Desktop Requirements + +```shell +sudo yum -y install automake gcc python-devel libev-devel python-sphinx tk python-lxml python-enum34 +``` + +You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu/core) page. Choose either a stable release version or the development snapshot available in the *nightly_snapshots* directory. + +```shell +tar xzf core-*.tar.gz +cd core-* +``` + +#### Tradional Autotools Build +```shell +./bootstrap.sh +./configure +make +sudo make install +``` + +#### Build Documentation +```shell +./bootstrap.sh +./configure +make doc +``` + +#### Build Packages +Install fpm: http://fpm.readthedocs.io/en/latest/installing.html +Build package commands, DESTDIR is used for gui packaging only + +```shell +./bootstrap.sh +./configure +make +mkdir /tmp/core-gui +make fpm DESTDIR=/tmp/core-gui + +``` +This will produce: + +* CORE GUI rpm/deb files + * core-gui_$VERSION_$ARCH +* CORE ns3 rpm/deb files + * python-core-ns3_$VERSION_$ARCH +* CORE python rpm/deb files for SysV and systemd service types + * python-core-sysv_$VERSION_$ARCH + * python-core-systemd_$VERSION_$ARCH + + +### Quagga Routing Software + +Virtual networks generally require some form of routing in order to work (e.g. to automatically populate routing tables for routing packets from one subnet to another.) CORE builds OSPF routing protocol configurations by default when the blue router node type is used. The OSPF protocol is available from the [Quagga open source routing suit](http://www.quagga.net). + +Quagga is not specified as a dependency for the CORE packages because there are two different Quagga packages that you may use: + +* [Quagga](http://www.quagga.net) - the standard version of Quagga, suitable for static wired networks, and usually available via your distribution's package manager. + +* [OSPF MANET Designated Routers](http://www.nrl.navy.mil/itd/ncs/products/ospf-manet) (MDR) - the Quagga routing suite with a modified version of OSPFv3, optimized for use with mobile wireless networks. The *mdr* node type (and the MDR service) requires this variant of Quagga. + +If you plan on working with wireless networks, we recommend installing OSPF MDR; otherwise install the standard version of Quagga using your package manager or from source. + +#### Installing Quagga from Packages + +To install the standard version of Quagga from packages, use your package manager (Linux). + +Ubuntu users: + +```shell +sudo apt-get install quagga +``` + +Fedora/CentOS users: + +```shell +sudo yum install quagga +``` + +To install the Quagga variant having OSPFv3 MDR, first download the appropriate package, and install using the package manager. + +Ubuntu users: +```shell +wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-mr_0.99.21mr2.2_amd64.deb +sudo dpkg -i quagga-mr_0.99.21mr2.2_amd64.deb +``` + +Replace *amd64* with *i686* if using a 32-bit architecture. + +Fedora/CentOS users: + +```shell +wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2-1.el6.x86_64.rpm +sudo yum install quagga-0.99.21mr2.2-1.el6.x86_64.rpm +```` + +Replace *x86_64* with *i686* if using a 32-bit architecture. + +#### Compiling Quagga for CORE + +To compile Quagga to work with CORE on Linux: + +```shell +wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/quagga-0.99.21mr2.2/quagga-0.99.21mr2.2.tar.gz +tar xzf quagga-0.99.21mr2.2.tar.gz +cd quagga-0.99 +./configure --enable-user=root --enable-group=root --with-cflags=-ggdb \\ + --sysconfdir=/usr/local/etc/quagga --enable-vtysh \\ + --localstatedir=/var/run/quagga +make +sudo make install +``` + +Note that the configuration directory */usr/local/etc/quagga* shown for Quagga above could be */etc/quagga*, if you create a symbolic link from */etc/quagga/Quagga.conf -> /usr/local/etc/quagga/Quagga.conf* on the host. The *quaggaboot.sh* script in a Linux network namespace will try and do this for you if needed. + +If you try to run quagga after installing from source and get an error such as: + +```shell +error while loading shared libraries libzebra.so.0 +``` + +this is usually a sign that you have to run ```sudo ldconfig```` to refresh the cache file. + +### VCORE + +CORE is capable of running inside of a virtual machine, using software such as VirtualBox, VMware Server or QEMU. However, CORE itself is performing machine virtualization in order to realize multiple emulated nodes, and running CORE virtually adds additional contention for the physical resources. **For performance reasons, this is not recommended.** Timing inside of a VM often has problems. If you do run CORE from within a VM, it is recommended that you view the GUI with remote X11 over SSH, so the virtual machine does not need to emulate the video card with the X11 application. + +A CORE virtual machine is provided for download, named VCORE. This is the perhaps the easiest way to get CORE up and running as the machine is already set up for you. This may be adequate for initially evaluating the tool but keep in mind the performance limitations of running within VirtualBox or VMware. To install the virtual machine, you first need to obtain VirtualBox from http://www.virtualbox.org, or VMware Server or Player from http://www.vmware.com (this commercial software is distributed for free.) Once virtualization software has been installed, you can import the virtual machine appliance using the *vbox* file for VirtualBox or the *vmx* file for VMware. See the documentation that comes with VCORE for login information. + diff --git a/docs/install_centos.md b/docs/install_centos.md deleted file mode 100644 index 53de2af6..00000000 --- a/docs/install_centos.md +++ /dev/null @@ -1,144 +0,0 @@ -# Install CentOS - -## Overview - -Below is a detailed path for installing CORE and related tooling on a fresh -CentOS 7 install. Both of the examples below will install CORE into its -own virtual environment located at **/opt/core/venv**. Both examples below -also assume using **~/Documents** as the working directory. - -## Script Install - -This section covers step by step commands that can be used to install CORE using -the script based installation path. - -``` shell -# install system packages -sudo yum -y update -sudo yum install -y git sudo wget tzdata unzip libpcap-devel libpcre3-devel \ - libxml2-devel protobuf-devel unzip uuid-devel tcpdump make epel-release -sudo yum-builddep -y python3 - -# install python3.9 -cd ~/Documents -wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz -tar xf Python-3.9.15.tgz -cd Python-3.9.15 -./configure --enable-optimizations --with-ensurepip=install -sudo make -j$(nproc) altinstall -python3.9 -m pip install --upgrade pip - -# install core -cd ~/Documents -git clone https://github.com/coreemu/core -cd core -NO_SYSTEM=1 PYTHON=/usr/local/bin/python3.9 ./setup.sh -source ~/.bashrc -PYTHON=python3.9 inv install -p /usr --no-python - -# install emane -cd ~/Documents -wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz -tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz -cd emane-1.3.3-release-1/rpms/el7/x86_64 -sudo yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm - -# install emane python bindings into CORE virtual environment -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -git checkout v1.3.3 -./autogen.sh -PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr -cd src/python -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Package Install - -This section covers step by step commands that can be used to install CORE using -the package based installation path. This will require downloading a package from the release -page, to use during the install CORE step below. - -``` shell -# install system packages -sudo yum -y update -sudo yum install -y git sudo wget tzdata unzip libpcap-devel libpcre3-devel libxml2-devel \ - protobuf-devel unzip uuid-devel tcpdump automake gawk libreadline-devel libtool \ - pkg-config make -sudo yum-builddep -y python3 - -# install python3.9 -cd ~/Documents -wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz -tar xf Python-3.9.15.tgz -cd Python-3.9.15 -./configure --enable-optimizations --with-ensurepip=install -sudo make -j$(nproc) altinstall -python3.9 -m pip install --upgrade pip - -# install core -cd ~/Documents -sudo PYTHON=python3.9 yum install -y ./core_*.rpm - -# install ospf mdr -cd ~/Documents -git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git -cd ospf-mdr -./bootstrap.sh -./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga -make -j$(nproc) -sudo make install - -# install emane -cd ~/Documents -wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz -tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz -cd emane-1.3.3-release-1/rpms/el7/x86_64 -sudo yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm - -# install emane python bindings into CORE virtual environment -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -git checkout v1.3.3 -./autogen.sh -PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr -cd src/python -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Setup PATH - -The CORE virtual environment and related scripts will not be found on your PATH, -so some adjustments needs to be made. - -To add support for your user to run scripts from the virtual environment: - -```shell -# can add to ~/.bashrc -export PATH=$PATH:/opt/core/venv/bin -``` - -This will not solve the path issue when running as sudo, so you can do either -of the following to compensate. - -```shell -# run command passing in the right PATH to pickup from the user running the command -sudo env PATH=$PATH core-daemon - -# add an alias to ~/.bashrc or something similar -alias sudop='sudo env PATH=$PATH' -# now you can run commands like so -sudop core-daemon -``` diff --git a/docs/install_ubuntu.md b/docs/install_ubuntu.md deleted file mode 100644 index 57274a4f..00000000 --- a/docs/install_ubuntu.md +++ /dev/null @@ -1,116 +0,0 @@ -# Install Ubuntu - -## Overview - -Below is a detailed path for installing CORE and related tooling on a fresh -Ubuntu 22.04 installation. Both of the examples below will install CORE into its -own virtual environment located at **/opt/core/venv**. Both examples below -also assume using **~/Documents** as the working directory. - -## Script Install - -This section covers step by step commands that can be used to install CORE using -the script based installation path. - -``` shell -# install system packages -sudo apt-get update -y -sudo apt-get install -y ca-certificates git sudo wget tzdata libpcap-dev libpcre3-dev \ - libprotobuf-dev libxml2-dev protobuf-compiler unzip uuid-dev iproute2 iputils-ping \ - tcpdump - -# install core -cd ~/Documents -git clone https://github.com/coreemu/core -cd core -./setup.sh -source ~/.bashrc -inv install - -# install emane -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -./autogen.sh -./configure --prefix=/usr -make -j$(nproc) -sudo make install -cd src/python -make clean -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Package Install - -This section covers step by step commands that can be used to install CORE using -the package based installation path. This will require downloading a package from the release -page, to use during the install CORE step below. - -``` shell -# install system packages -sudo apt-get update -y -sudo apt-get install -y ca-certificates python3 python3-tk python3-pip python3-venv \ - libpcap-dev libpcre3-dev libprotobuf-dev libxml2-dev protobuf-compiler unzip \ - uuid-dev automake gawk git wget libreadline-dev libtool pkg-config g++ make \ - iputils-ping tcpdump - -# install core -cd ~/Documents -sudo apt-get install -y ./core_*.deb - -# install ospf mdr -cd ~/Documents -git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git -cd ospf-mdr -./bootstrap.sh -./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga -make -j$(nproc) -sudo make install - -# install emane -cd ~/Documents -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.19.6/protoc-3.19.6-linux-x86_64.zip -mkdir protoc -unzip protoc-3.19.6-linux-x86_64.zip -d protoc -git clone https://github.com/adjacentlink/emane.git -cd emane -./autogen.sh -./configure --prefix=/usr -make -j$(nproc) -sudo make install -cd src/python -make clean -PATH=~/Documents/protoc/bin:$PATH make -sudo /opt/core/venv/bin/python -m pip install . -``` - -## Setup PATH - -The CORE virtual environment and related scripts will not be found on your PATH, -so some adjustments needs to be made. - -To add support for your user to run scripts from the virtual environment: - -```shell -# can add to ~/.bashrc -export PATH=$PATH:/opt/core/venv/bin -``` - -This will not solve the path issue when running as sudo, so you can do either -of the following to compensate. - -```shell -# run command passing in the right PATH to pickup from the user running the command -sudo env PATH=$PATH core-daemon - -# add an alias to ~/.bashrc or something similar -alias sudop='sudo env PATH=$PATH' -# now you can run commands like so -sudop core-daemon -``` diff --git a/docs/lxc.md b/docs/lxc.md deleted file mode 100644 index 1ee11453..00000000 --- a/docs/lxc.md +++ /dev/null @@ -1,43 +0,0 @@ -# LXC Support - -## Overview - -LXC nodes are provided by way of LXD to create nodes using predefined -images and provide file system separation. - -## Installation - -### Debian Systems - -```shell -sudo snap install lxd -``` - -## Configuration - -Initialize LXD and say no to adding a default bridge. - -```shell -sudo lxd init -``` - -## Group Setup - -To use LXC nodes within the python GUI, you will need to make sure the user running the GUI is a member of the -lxd group. - -```shell -# add group if does not exist -sudo groupadd lxd - -# add user to group -sudo usermod -aG lxd $USER - -# to get this change to take effect, log out and back in or run the following -newgrp lxd -``` - -## Tools and Versions Tested With - -* LXD 3.14 -* nsenter from util-linux 2.31.1 diff --git a/docs/machine.md b/docs/machine.md new file mode 100644 index 00000000..bd68d7e1 --- /dev/null +++ b/docs/machine.md @@ -0,0 +1,22 @@ +# CORE Node Types + +* Table of Contents +{:toc} + +## Overview + +Different node types can be configured in CORE, and each node type has a *machine type* that indicates how the node will be represented at run time. Different machine types allow for different virtualization options. + +## netns nodes + +The *netns* machine type is the default. This is for nodes that will be backed by Linux network namespaces. See :ref:`Linux` for a brief explanation of netns. This default machine type is very lightweight, providing a minimum amount of virtualization in order to emulate a network. Another reason this is designated as the default machine type is because this virtualization technology typically requires no changes to the kernel; it is available out-of-the-box from the latest mainstream Linux distributions. + +## physical nodes + +The *physical* machine type is used for nodes that represent a real Linux-based machine that will participate in the emulated network scenario. This is typically used, for example, to incorporate racks of server machines from an emulation testbed. A physical node is one that is running the CORE daemon (*core-daemon*), but will not be further partitioned into virtual machines. Services that are run on the physical node do not run in an isolated or virtualized environment, but directly on the operating system. + +Physical nodes must be assigned to servers, the same way nodes are assigned to emulation servers with *Distributed Emulation*. The list of available physical nodes currently shares the same dialog box and list as the emulation servers, accessed using the *Emulation Servers...* entry from the *Session* menu. + +Support for physical nodes is under development and may be improved in future releases. Currently, when any node is linked to a physical node, a dashed line is drawn to indicate network tunneling. A GRE tunneling interface will be created on the physical node and used to tunnel traffic to and from the emulated world. + +Double-clicking on a physical node during runtime opens a terminal with an SSH shell to that node. Users should configure public-key SSH login as done with emulation servers. diff --git a/docs/nodetypes.md b/docs/nodetypes.md deleted file mode 100644 index 8f095746..00000000 --- a/docs/nodetypes.md +++ /dev/null @@ -1,53 +0,0 @@ -# Node Types - -## Overview - -Different node types can be used within CORE, each with their own -tradeoffs and functionality. - -## CORE Nodes - -CORE nodes are the standard node type typically used in CORE. They are -backed by Linux network namespaces. They use very little system resources -in order to emulate a network. They do however share the hosts file system -as they do not get their own. CORE nodes will have a directory uniquely -created for them as a place to keep their files and mounted directories -(`/tmp/pycore./ +``` + +The interactive Python shell allows some interaction with the Python objects for the emulation. + +In another terminal, nodes can be accessed using *vcmd*: + +```shell +vcmd -c /tmp/pycore.10781/n1 -- bash +root@n1:/tmp/pycore.10781/n1.conf# +root@n1:/tmp/pycore.10781/n1.conf# ping 10.0.0.3 +PING 10.0.0.3 (10.0.0.3) 56(84) bytes of data. +64 bytes from 10.0.0.3: icmp_req=1 ttl=64 time=7.99 ms +64 bytes from 10.0.0.3: icmp_req=2 ttl=64 time=3.73 ms +64 bytes from 10.0.0.3: icmp_req=3 ttl=64 time=3.60 ms +^C +--- 10.0.0.3 ping statistics --- +3 packets transmitted, 3 received, 0% packet loss, time 2002ms +rtt min/avg/max/mdev = 3.603/5.111/7.993/2.038 ms +root@n1:/tmp/pycore.10781/n1.conf# +``` + +The ping packets shown above are traversing an ns-3 ad-hoc Wifi simulated network. + +To clean up the session, use the Session.shutdown() method from the Python terminal. + +```python +print session + +session.shutdown() +``` + +A CORE/ns-3 Python script will instantiate an Ns3Session, which is a CORE Session having CoreNs3Nodes, an ns-3 MobilityHelper, and a fixed duration. The CoreNs3Node inherits from both the CoreNode and the ns-3 Node classes -- it is a network namespace having an associated simulator object. The CORE TunTap interface is used, represented by a ns-3 TapBridge in *CONFIGURE_LOCAL* mode, where ns-3 creates and configures the tap device. An event is scheduled to install the taps at time 0. + +**NOTE: The GUI can be used to run the *ns3wifi.py* and *ns3wifirandomwalk.py* scripts directly. First, *core-daemon* must be stopped and run within the waf root shell. Then the GUI may be run as a normal user, and the *Execute Python Script...* option may be used from the *File* menu. Dragging nodes around in the *ns3wifi.py* example will cause their ns-3 positions to be updated.** + +Users may find the files *ns3wimax.py* and *ns3lte.py* in that example directory; those files were similarly configured, but the underlying ns-3 support is not present as of ns-3.16, so they will not work. Specifically, the ns-3 has to be extended to support bridging the Tap device to an LTE and a WiMax device. + +## Integration details + +The previous example *ns3wifi.py* used Python API from the special Python objects *Ns3Session* and *Ns3WifiNet*. The example program does not import anything directly from the ns-3 python modules; rather, only the above two objects are used, and the API available to configure the underlying ns-3 objects is constrained. For example, *Ns3WifiNet* instantiates a constant-rate 802.11a-based ad hoc network, using a lot of ns-3 defaults. + +However, programs may be written with a blend of ns-3 API and CORE Python API calls. This section examines some of the fundamental objects in the CORE ns-3 support. Source code can be found in *ns3/corens3/obj.py* and example code in *ns3/corens3/examples/*. + +## Ns3Session + +The *Ns3Session* class is a CORE Session that starts an ns-3 simulation thread. ns-3 actually runs as a separate process on the same host as the CORE daemon, and the control of starting and stopping this process is performed by the *Ns3Session* class. + +Example: + +```python +session = Ns3Session(persistent=True, duration=opt.duration) +``` + +Note the use of the duration attribute to control how long the ns-3 simulation should run. By default, the duration is 600 seconds. + +Typically, the session keeps track of the ns-3 nodes (holding a node container for references to the nodes). This is accomplished via the ```addnode()``` method, e.g.: + +```python +for i in xrange(1, opt.numnodes + 1): + node = session.addnode(name = "n%d" % i) +``` + +```addnode()``` creates instances of a *CoreNs3Node*, which we'll cover next. + +## CoreNs3Node + +A *CoreNs3Node* is both a CoreNode and an ns-3 node: + +```python +class CoreNs3Node(CoreNode, ns.network.Node): + """ + The CoreNs3Node is both a CoreNode backed by a network namespace and + an ns-3 Node simulator object. When linked to simulated networks, the TunTap + device will be used. + """ +``` + +## CoreNs3Net + +A *CoreNs3Net* derives from *PyCoreNet*. This network exists entirely in simulation, using the TunTap device to interact between the emulated and the simulated realm. *Ns3WifiNet* is a specialization of this. + +As an example, this type of code would be typically used to add a WiFi network to a session: + +```python +wifi = session.addobj(cls=Ns3WifiNet, name="wlan1", rate="OfdmRate12Mbps") +wifi.setposition(30, 30, 0) +``` + +The above two lines will create a wlan1 object and set its initial canvas position. Later in the code, the newnetif method of the CoreNs3Node can be used to add interfaces on particular nodes to this network; e.g.: + +```python +for i in xrange(1, opt.numnodes + 1): + node = session.addnode(name = "n%d" % i) + node.newnetif(wifi, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)]) +``` + +## Mobility + +Mobility in ns-3 is handled by an object (a MobilityModel) aggregated to an ns-3 node. The MobilityModel is able to report the position of the object in the ns-3 space. This is a slightly different model from, for instance, EMANE, where location is associated with an interface, and the CORE GUI, where mobility is configured by right-clicking on a WiFi cloud. + +The CORE GUI supports the ability to render the underlying ns-3 mobility model, if one is configured, on the CORE canvas. For example, the example program :file:`ns3wifirandomwalk.py` uses five nodes (by default) in a random walk mobility model. This can be executed by starting the core daemon from an ns-3 waf shell: + +```shell +sudo bash +cd /path/to/ns-3 +./waf shell +core-daemon +``` + +and in a separate window, starting the CORE GUI (not from a waf shell) and selecting the *Execute Python script...* option from the File menu, selecting the *ns3wifirandomwalk.py* script. + +The program invokes ns-3 mobility through the following statement: + +```python +session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0)) +``` + +This can be replaced by a different mode of mobility, in which nodes are placed according to a constant mobility model, and a special API call to the CoreNs3Net object is made to use the CORE canvas positions. + +```python +session.setuprandomwalkmobility(bounds=(1000.0, 750.0, 0)) +session.setupconstantmobility() +wifi.usecorepositions() +``` + +In this mode, the user dragging around the nodes on the canvas will cause CORE to update the position of the underlying ns-3 nodes. diff --git a/docs/performance.md b/docs/performance.md index 449e3837..b057dd23 100644 --- a/docs/performance.md +++ b/docs/performance.md @@ -1,44 +1,28 @@ # CORE Performance +* Table of Contents +{:toc} + ## Overview -The top question about the performance of CORE is often *how many nodes can it -handle?* The answer depends on several factors: +The top question about the performance of CORE is often *how many nodes can it handle?* The answer depends on several factors: -| Factor | Performance Impact | -|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Hardware | the number and speed of processors in the computer, the available processor cache, RAM memory, and front-side bus speed may greatly affect overall performance. | -| Operating system version | distribution of Linux and the specific kernel versions used will affect overall performance. | -| Active processes | all nodes share the same CPU resources, so if one or more nodes is performing a CPU-intensive task, overall performance will suffer. | -| Network traffic | the more packets that are sent around the virtual network increases the amount of CPU usage. | -| GUI usage | widgets that run periodically, mobility scenarios, and other GUI interactions generally consume CPU cycles that may be needed for emulation. | +* Hardware - the number and speed of processors in the computer, the available processor cache, RAM memory, and front-side bus speed may greatly affect overall performance. +* Operating system version - distribution of Linux and the specific kernel versions used will affect overall performance. +* Active processes - all nodes share the same CPU resources, so if one or more nodes is performing a CPU-intensive task, overall performance will suffer. +* Network traffic - the more packets that are sent around the virtual network increases the amount of CPU usage. +* GUI usage - widgets that run periodically, mobility scenarios, and other GUI interactions generally consume CPU cycles that may be needed for emulation. -On a typical single-CPU Xeon 3.0GHz server machine with 2GB RAM running Linux, -we have found it reasonable to run 30-75 nodes running OSPFv2 and OSPFv3 -routing. On this hardware CORE can instantiate 100 or more nodes, but at -that point it becomes critical as to what each of the nodes is doing. +On a typical single-CPU Xeon 3.0GHz server machine with 2GB RAM running Linux, we have found it reasonable to run 30-75 nodes running OSPFv2 and OSPFv3 routing. On this hardware CORE can instantiate 100 or more nodes, but at that point it becomes critical as to what each of the nodes is doing. -Because this software is primarily a network emulator, the more appropriate -question is *how much network traffic can it handle?* On the same 3.0GHz -server described above, running Linux, about 300,000 packets-per-second can -be pushed through the system. The number of hops and the size of the packets -is less important. The limiting factor is the number of times that the -operating system needs to handle a packet. The 300,000 pps figure represents -the number of times the system as a whole needed to deal with a packet. As -more network hops are added, this increases the number of context switches -and decreases the throughput seen on the full length of the network path. +Because this software is primarily a network emulator, the more appropriate question is *how much network traffic can it handle?* On the same 3.0GHz server described above, running Linux, about 300,000 packets-per-second can be pushed through the system. The number of hops and the size of the packets is less important. The limiting factor is the number of times that the operating system needs to handle a packet. The 300,000 pps figure represents the number of times the system as a whole needed to deal with a packet. As more network hops are added, this increases the number of context switches and decreases the throughput seen on the full length of the network path. -!!! note +**NOTE: The right question to be asking is *"how much traffic?"*, not *"how many nodes?"*.** - The right question to be asking is *"how much traffic?"*, not - *"how many nodes?"*. +For a more detailed study of performance in CORE, refer to the following publications: -For a more detailed study of performance in CORE, refer to the following -publications: +* J\. Ahrenholz, T. Goff, and B. Adamson, Integration of the CORE and EMANE Network Emulators, Proceedings of the IEEE Military Communications Conference 2011, November 2011. -* J\. Ahrenholz, T. Goff, and B. Adamson, Integration of the CORE and EMANE - Network Emulators, Proceedings of the IEEE Military Communications Conference 2011, November 2011. -* Ahrenholz, J., Comparison of CORE Network Emulation Platforms, Proceedings - of the IEEE Military Communications Conference 2010, pp. 864-869, November 2010. -* J\. Ahrenholz, C. Danilov, T. Henderson, and J.H. Kim, CORE: A real-time - network emulator, Proceedings of IEEE MILCOM Conference, 2008. +* Ahrenholz, J., Comparison of CORE Network Emulation Platforms, Proceedings of the IEEE Military Communications Conference 2010, pp. 864-869, November 2010. + +* J\. Ahrenholz, C. Danilov, T. Henderson, and J.H. Kim, CORE: A real-time network emulator, Proceedings of IEEE MILCOM Conference, 2008. diff --git a/docs/pycco.css b/docs/pycco.css new file mode 100644 index 00000000..aef571a5 --- /dev/null +++ b/docs/pycco.css @@ -0,0 +1,190 @@ +/*--------------------- Layout and Typography ----------------------------*/ +body { + font-family: 'Palatino Linotype', 'Book Antiqua', Palatino, FreeSerif, serif; + font-size: 16px; + line-height: 24px; + color: #252519; + margin: 0; padding: 0; + background: #f5f5ff; +} +a { + color: #261a3b; +} + a:visited { + color: #261a3b; + } +p { + margin: 0 0 15px 0; +} +h1, h2, h3, h4, h5, h6 { + margin: 40px 0 15px 0; +} +h2, h3, h4, h5, h6 { + margin-top: 0; + } +#container { + background: white; + } +#container, div.section { + position: relative; +} +#background { + position: absolute; + top: 0; left: 580px; right: 0; bottom: 0; + background: #f5f5ff; + border-left: 1px solid #e5e5ee; + z-index: 0; +} +#jump_to, #jump_page { + background: white; + -webkit-box-shadow: 0 0 25px #777; -moz-box-shadow: 0 0 25px #777; + -webkit-border-bottom-left-radius: 5px; -moz-border-radius-bottomleft: 5px; + font: 10px Arial; + text-transform: uppercase; + cursor: pointer; + text-align: right; +} +#jump_to, #jump_wrapper { + position: fixed; + right: 0; top: 0; + padding: 5px 10px; +} + #jump_wrapper { + padding: 0; + display: none; + } + #jump_to:hover #jump_wrapper { + display: block; + } + #jump_page { + padding: 5px 0 3px; + margin: 0 0 25px 25px; + } + #jump_page .source { + display: block; + padding: 5px 10px; + text-decoration: none; + border-top: 1px solid #eee; + } + #jump_page .source:hover { + background: #f5f5ff; + } + #jump_page .source:first-child { + } +div.docs { + float: left; + max-width: 500px; + min-width: 500px; + min-height: 5px; + padding: 10px 25px 1px 50px; + vertical-align: top; + text-align: left; +} + .docs pre { + margin: 15px 0 15px; + padding-left: 15px; + } + .docs p tt, .docs p code { + background: #f8f8ff; + border: 1px solid #dedede; + font-size: 12px; + padding: 0 0.2em; + } + .octowrap { + position: relative; + } + .octothorpe { + font: 12px Arial; + text-decoration: none; + color: #454545; + position: absolute; + top: 3px; left: -20px; + padding: 1px 2px; + opacity: 0; + -webkit-transition: opacity 0.2s linear; + } + div.docs:hover .octothorpe { + opacity: 1; + } +div.code { + margin-left: 580px; + padding: 14px 15px 16px 50px; + vertical-align: top; +} + .code pre, .docs p code { + font-size: 12px; + } + pre, tt, code { + line-height: 18px; + font-family: Monaco, Consolas, "Lucida Console", monospace; + margin: 0; padding: 0; + } +div.clearall { + clear: both; +} + + +/*---------------------- Syntax Highlighting -----------------------------*/ +td.linenos { background-color: #f0f0f0; padding-right: 10px; } +span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; } +body .hll { background-color: #ffffcc } +body .c { color: #408080; font-style: italic } /* Comment */ +body .err { border: 1px solid #FF0000 } /* Error */ +body .k { color: #954121 } /* Keyword */ +body .o { color: #666666 } /* Operator */ +body .cm { color: #408080; font-style: italic } /* Comment.Multiline */ +body .cp { color: #BC7A00 } /* Comment.Preproc */ +body .c1 { color: #408080; font-style: italic } /* Comment.Single */ +body .cs { color: #408080; font-style: italic } /* Comment.Special */ +body .gd { color: #A00000 } /* Generic.Deleted */ +body .ge { font-style: italic } /* Generic.Emph */ +body .gr { color: #FF0000 } /* Generic.Error */ +body .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +body .gi { color: #00A000 } /* Generic.Inserted */ +body .go { color: #808080 } /* Generic.Output */ +body .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +body .gs { font-weight: bold } /* Generic.Strong */ +body .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +body .gt { color: #0040D0 } /* Generic.Traceback */ +body .kc { color: #954121 } /* Keyword.Constant */ +body .kd { color: #954121; font-weight: bold } /* Keyword.Declaration */ +body .kn { color: #954121; font-weight: bold } /* Keyword.Namespace */ +body .kp { color: #954121 } /* Keyword.Pseudo */ +body .kr { color: #954121; font-weight: bold } /* Keyword.Reserved */ +body .kt { color: #B00040 } /* Keyword.Type */ +body .m { color: #666666 } /* Literal.Number */ +body .s { color: #219161 } /* Literal.String */ +body .na { color: #7D9029 } /* Name.Attribute */ +body .nb { color: #954121 } /* Name.Builtin */ +body .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +body .no { color: #880000 } /* Name.Constant */ +body .nd { color: #AA22FF } /* Name.Decorator */ +body .ni { color: #999999; font-weight: bold } /* Name.Entity */ +body .ne { color: #D2413A; font-weight: bold } /* Name.Exception */ +body .nf { color: #0000FF } /* Name.Function */ +body .nl { color: #A0A000 } /* Name.Label */ +body .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +body .nt { color: #954121; font-weight: bold } /* Name.Tag */ +body .nv { color: #19469D } /* Name.Variable */ +body .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +body .w { color: #bbbbbb } /* Text.Whitespace */ +body .mf { color: #666666 } /* Literal.Number.Float */ +body .mh { color: #666666 } /* Literal.Number.Hex */ +body .mi { color: #666666 } /* Literal.Number.Integer */ +body .mo { color: #666666 } /* Literal.Number.Oct */ +body .sb { color: #219161 } /* Literal.String.Backtick */ +body .sc { color: #219161 } /* Literal.String.Char */ +body .sd { color: #219161; font-style: italic } /* Literal.String.Doc */ +body .s2 { color: #219161 } /* Literal.String.Double */ +body .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */ +body .sh { color: #219161 } /* Literal.String.Heredoc */ +body .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */ +body .sx { color: #954121 } /* Literal.String.Other */ +body .sr { color: #BB6688 } /* Literal.String.Regex */ +body .s1 { color: #219161 } /* Literal.String.Single */ +body .ss { color: #19469D } /* Literal.String.Symbol */ +body .bp { color: #954121 } /* Name.Builtin.Pseudo */ +body .vc { color: #19469D } /* Name.Variable.Class */ +body .vg { color: #19469D } /* Name.Variable.Global */ +body .vi { color: #19469D } /* Name.Variable.Instance */ +body .il { color: #666666 } /* Literal.Number.Integer.Long */ diff --git a/docs/python.md b/docs/python.md deleted file mode 100644 index 0985bb8d..00000000 --- a/docs/python.md +++ /dev/null @@ -1,437 +0,0 @@ -# Python API - -## Overview - -Writing your own Python scripts offers a rich programming environment with -complete control over all aspects of the emulation. - -The scripts need to be ran with root privileges because they create new network -namespaces. In general, a CORE Python script does not connect to the CORE -daemon, in fact the *core-daemon* is just another Python script that uses -the CORE Python modules and exchanges messages with the GUI. - -## Examples - -### Node Models - -When creating nodes of type `core.nodes.base.CoreNode` these are the default models -and the services they map to. - -* mdr - * zebra, OSPFv3MDR, IPForward -* PC - * DefaultRoute -* router - * zebra, OSPFv2, OSPFv3, IPForward -* host - * DefaultRoute, SSH - -### Interface Helper - -There is an interface helper class that can be leveraged for convenience -when creating interface data for nodes. Alternatively one can manually create -a `core.emulator.data.InterfaceData` class instead with appropriate information. - -Manually creating interface data: - -```python -from core.emulator.data import InterfaceData - -# id is optional and will set to the next available id -# name is optional and will default to eth -# mac is optional and will result in a randomly generated mac -iface_data = InterfaceData( - id=0, - name="eth0", - ip4="10.0.0.1", - ip4_mask=24, - ip6="2001::", - ip6_mask=64, -) -``` - -Leveraging the interface prefixes helper class: - -```python -from core.emulator.data import IpPrefixes - -ip_prefixes = IpPrefixes(ip4_prefix="10.0.0.0/24", ip6_prefix="2001::/64") -# node is used to get an ip4/ip6 address indexed from within the above prefixes -# name is optional and would default to eth -# mac is optional and will result in a randomly generated mac -iface_data = ip_prefixes.create_iface( - node=node, name="eth0", mac="00:00:00:00:aa:00" -) -``` - -### Listening to Events - -Various events that can occur within a session can be listened to. - -Event types: - -* session - events for changes in session state and mobility start/stop/pause -* node - events for node movements and icon changes -* link - events for link configuration changes and wireless link add/delete -* config - configuration events when legacy gui joins a session -* exception - alert/error events -* file - file events when the legacy gui joins a session - -```python -def event_listener(event): - print(event) - - -# add an event listener to event type you want to listen to -# each handler will receive an object unique to that type -session.event_handlers.append(event_listener) -session.exception_handlers.append(event_listener) -session.node_handlers.append(event_listener) -session.link_handlers.append(event_listener) -session.file_handlers.append(event_listener) -session.config_handlers.append(event_listener) -``` - -### Configuring Links - -Links can be configured at the time of creation or during runtime. - -Currently supported configuration options: - -* bandwidth (bps) -* delay (us) -* dup (%) -* jitter (us) -* loss (%) - -```python -from core.emulator.data import LinkOptions - -# configuring when creating a link -options = LinkOptions( - bandwidth=54_000_000, - delay=5000, - dup=5, - loss=5.5, - jitter=0, -) -session.add_link(n1_id, n2_id, iface1_data, iface2_data, options) - -# configuring during runtime -session.update_link(n1_id, n2_id, iface1_id, iface2_id, options) -``` - -### Peer to Peer Example - -```python -# required imports -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.base import CoreNode, Position - -# ip nerator for example -ip_prefixes = IpPrefixes(ip4_prefix="10.0.0.0/24") - -# create emulator instance for creating sessions and utility methods -coreemu = CoreEmu() -session = coreemu.create_session() - -# must be in configuration state for nodes to start, when using "node_add" below -session.set_state(EventTypes.CONFIGURATION_STATE) - -# create nodes -position = Position(x=100, y=100) -n1 = session.add_node(CoreNode, position=position) -position = Position(x=300, y=100) -n2 = session.add_node(CoreNode, position=position) - -# link nodes together -iface1 = ip_prefixes.create_iface(n1) -iface2 = ip_prefixes.create_iface(n2) -session.add_link(n1.id, n2.id, iface1, iface2) - -# start session -session.instantiate() - -# do whatever you like here -input("press enter to shutdown") - -# stop session -session.shutdown() -``` - -### Switch/Hub Example - -```python -# required imports -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.base import CoreNode, Position -from core.nodes.network import SwitchNode - -# ip nerator for example -ip_prefixes = IpPrefixes(ip4_prefix="10.0.0.0/24") - -# create emulator instance for creating sessions and utility methods -coreemu = CoreEmu() -session = coreemu.create_session() - -# must be in configuration state for nodes to start, when using "node_add" below -session.set_state(EventTypes.CONFIGURATION_STATE) - -# create switch -position = Position(x=200, y=200) -switch = session.add_node(SwitchNode, position=position) - -# create nodes -position = Position(x=100, y=100) -n1 = session.add_node(CoreNode, position=position) -position = Position(x=300, y=100) -n2 = session.add_node(CoreNode, position=position) - -# link nodes to switch -iface1 = ip_prefixes.create_iface(n1) -session.add_link(n1.id, switch.id, iface1) -iface1 = ip_prefixes.create_iface(n2) -session.add_link(n2.id, switch.id, iface1) - -# start session -session.instantiate() - -# do whatever you like here -input("press enter to shutdown") - -# stop session -session.shutdown() -``` - -### WLAN Example - -```python -# required imports -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.location.mobility import BasicRangeModel -from core.nodes.base import CoreNode, Position -from core.nodes.network import WlanNode - -# ip nerator for example -ip_prefixes = IpPrefixes(ip4_prefix="10.0.0.0/24") - -# create emulator instance for creating sessions and utility methods -coreemu = CoreEmu() -session = coreemu.create_session() - -# must be in configuration state for nodes to start, when using "node_add" below -session.set_state(EventTypes.CONFIGURATION_STATE) - -# create wlan -position = Position(x=200, y=200) -wlan = session.add_node(WlanNode, position=position) - -# create nodes -options = CoreNode.create_options() -options.model = "mdr" -position = Position(x=100, y=100) -n1 = session.add_node(CoreNode, position=position, options=options) -position = Position(x=300, y=100) -n2 = session.add_node(CoreNode, position=position, options=options) - -# configuring wlan -session.mobility.set_model_config(wlan.id, BasicRangeModel.name, { - "range": "280", - "bandwidth": "55000000", - "delay": "6000", - "jitter": "5", - "error": "5", -}) - -# link nodes to wlan -iface1 = ip_prefixes.create_iface(n1) -session.add_link(n1.id, wlan.id, iface1) -iface1 = ip_prefixes.create_iface(n2) -session.add_link(n2.id, wlan.id, iface1) - -# start session -session.instantiate() - -# do whatever you like here -input("press enter to shutdown") - -# stop session -session.shutdown() -``` - -### EMANE Example - -For EMANE you can import and use one of the existing models and -use its name for configuration. - -Current models: - -* core.emane.ieee80211abg.EmaneIeee80211abgModel -* core.emane.rfpipe.EmaneRfPipeModel -* core.emane.tdma.EmaneTdmaModel -* core.emane.bypass.EmaneBypassModel - -Their configurations options are driven dynamically from parsed EMANE manifest files -from the installed version of EMANE. - -Options and their purpose can be found at the [EMANE Wiki](https://github.com/adjacentlink/emane/wiki). - -If configuring EMANE global settings or model mac/phy specific settings, any value not provided -will use the defaults. When no configuration is used, the defaults are used. - -```python -# required imports -from core.emane.models.ieee80211abg import EmaneIeee80211abgModel -from core.emane.nodes import EmaneNet -from core.emulator.coreemu import CoreEmu -from core.emulator.data import IpPrefixes -from core.emulator.enumerations import EventTypes -from core.nodes.base import CoreNode, Position - -# ip nerator for example -ip_prefixes = IpPrefixes(ip4_prefix="10.0.0.0/24") - -# create emulator instance for creating sessions and utility methods -coreemu = CoreEmu() -session = coreemu.create_session() - -# location information is required to be set for emane -session.location.setrefgeo(47.57917, -122.13232, 2.0) -session.location.refscale = 150.0 - -# must be in configuration state for nodes to start, when using "node_add" below -session.set_state(EventTypes.CONFIGURATION_STATE) - -# create emane -options = EmaneNet.create_options() -options.emane_model = EmaneIeee80211abgModel.name -position = Position(x=200, y=200) -emane = session.add_node(EmaneNet, position=position, options=options) - -# create nodes -options = CoreNode.create_options() -options.model = "mdr" -position = Position(x=100, y=100) -n1 = session.add_node(CoreNode, position=position, options=options) -position = Position(x=300, y=100) -n2 = session.add_node(CoreNode, position=position, options=options) - -# configure general emane settings -config = session.emane.get_configs() -config.update({ - "eventservicettl": "2" -}) - -# configure emane model settings -# using a dict mapping currently support values as strings -session.emane.set_model_config(emane.id, EmaneIeee80211abgModel.name, { - "unicastrate": "3", -}) - -# link nodes to emane -iface1 = ip_prefixes.create_iface(n1) -session.add_link(n1.id, emane.id, iface1) -iface1 = ip_prefixes.create_iface(n2) -session.add_link(n2.id, emane.id, iface1) - -# start session -session.instantiate() - -# do whatever you like here -input("press enter to shutdown") - -# stop session -session.shutdown() -``` - -EMANE Model Configuration: - -```python -from core import utils - -# standardized way to retrieve an appropriate config id -# iface id can be omitted, to allow a general configuration for a model, per node -config_id = utils.iface_config_id(node.id, iface_id) -# set emane configuration for the config id -session.emane.set_config(config_id, EmaneIeee80211abgModel.name, { - "unicastrate": "3", -}) -``` - -## Configuring a Service - -Services help generate and run bash scripts on nodes for a given purpose. - -Configuring the files of a service results in a specific hard coded script being -generated, instead of the default scripts, that may leverage dynamic generation. - -The following features can be configured for a service: - -* configs - files that will be generated -* dirs - directories that will be mounted unique to the node -* startup - commands to run start a service -* validate - commands to run to validate a service -* shutdown - commands to run to stop a service - -Editing service properties: - -```python -# configure a service, for a node, for a given session -session.services.set_service(node_id, service_name) -service = session.services.get_service(node_id, service_name) -service.configs = ("file1.sh", "file2.sh") -service.dirs = ("/etc/node",) -service.startup = ("bash file1.sh",) -service.validate = () -service.shutdown = () -``` - -When editing a service file, it must be the name of `config` -file that the service will generate. - -Editing a service file: - -```python -# to edit the contents of a generated file you can specify -# the service, the file name, and its contents -session.services.set_service_file( - node_id, - service_name, - file_name, - "echo hello", -) -``` - -## File Examples - -File versions of the network examples can be found -[here](https://github.com/coreemu/core/tree/master/package/examples/python). - -## Executing Scripts from GUI - -To execute a python script from a GUI you need have the following. - -The builtin name check here to know it is being executed from the GUI, this can -be avoided if your script does not use a name check. - -```python -if __name__ in ["__main__", "__builtin__"]: - main() -``` - -A script can add sessions to the core-daemon. A global *coreemu* variable is -exposed to the script pointing to the *CoreEmu* object. - -The example below has a fallback to a new CoreEmu object, in the case you would -like to run the script standalone, outside of the core-daemon. - -```python -coreemu = globals().get("coreemu") or CoreEmu() -session = coreemu.create_session() -``` diff --git a/docs/scripting.md b/docs/scripting.md new file mode 100644 index 00000000..0b0ca47f --- /dev/null +++ b/docs/scripting.md @@ -0,0 +1,120 @@ + +# CORE Python Scripting + +* Table of Contents +{:toc} + +## Overview + +CORE can be used via the GUI or Python scripting. Writing your own Python scripts offers a rich programming environment with complete control over all aspects of the emulation. This chapter provides a brief introduction to scripting. Most of the documentation is available from sample scripts, or online via interactive Python. + +The best starting point is the sample scripts that are included with CORE. If you have a CORE source tree, the example script files can be found under *core/daemon/examples/api/*. When CORE is installed from packages, the example script files will be in */usr/share/core/examples/api/* (or */usr/local/* prefix when installed from source.) For the most part, the example scripts are self-documenting; see the comments contained within the Python code. + +The scripts should be run with root privileges because they create new network namespaces. In general, a CORE Python script does not connect to the CORE daemon, in fact the *core-daemon* is just another Python script that uses the CORE Python modules and exchanges messages with the GUI. To connect the GUI to your scripts, see the included sample scripts that allow for GUI connections. + +Here are the basic elements of a CORE Python script: + +```python +from core.emulator.coreemu import CoreEmu +from core.emulator.emudata import IpPrefixes +from core.enumerations import EventTypes +from core.enumerations import NodeTypes + +# ip generator for example +prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16") + +# create emulator instance for creating sessions and utility methods +coreemu = CoreEmu() +session = coreemu.create_session() + +# must be in configuration state for nodes to start, when using "node_add" below +session.set_state(EventTypes.CONFIGURATION_STATE) + +# create switch network node +switch = session.add_node(_type=NodeTypes.SWITCH) + +# create nodes +for _ in xrange(options.nodes): + node = session.add_node() + interface = prefixes.create_interface(node) + session.add_link(node.objid, switch.objid, interface_one=interface) + +# instantiate session +session.instantiate() + +# shutdown session +coreemu.shutdown() +``` + +The above script creates a CORE session having two nodes connected with a hub. The first node pings the second node with 5 ping packets; the result is displayed on screen. + +A good way to learn about the CORE Python modules is via interactive Python. Scripts can be run using *python -i*. Cut and paste the simple script above and you will have two nodes connected by a hub, with one node running a test ping to the other. + +The CORE Python modules are documented with comments in the code. From an interactive Python shell, you can retrieve online help about the various classes and methods; for example *help(nodes.CoreNode)* or *help(Session)*. + +**NOTE: The CORE daemon *core-daemon* manages a list of sessions and allows the GUI to connect and control sessions. Your Python script uses the same CORE modules but runs independently of the daemon. The daemon does not need to be running for your script to work.** + +The session created by a Python script may be viewed in the GUI if certain steps are followed. The GUI has a *File Menu*, *Execute Python script...* option for running a script and automatically connecting to it. Once connected, normal GUI interaction is possible, such as moving and double-clicking nodes, activating Widgets, etc. + +The script should have a line such as the following for running it from the GUI. + +```python +if __name__ in ["__main__", "__builtin__"]: + main() +``` + +A script can add sessions to the core-daemon. A global *coreemu* variable is exposed to the script pointing to the *CoreEmu* object. +The example below has a fallback to a new CoreEmu object, in the case you would like to run the script standalone, outside of the core-daemon. + +```python +coreemu = globals().get("coreemu", CoreEmu()) +session = coreemu.create_session() +``` + +Finally, nodes and networks need to have their coordinates set to something, otherwise they will be grouped at the coordinates *<0, 0>*. First sketching the topology in the GUI and then using the *Export Python script* option may help here. + +```python +switch.setposition(x=80,y=50) +``` + +A fully-worked example script that you can launch from the GUI is available in the examples directory. + +## Configuring Services + +Examples setting or configuring custom services for a node. + +```python +# create session and node +coreemu = CoreEmu() +session = coreemu.create_session() +node = session.add_node() + +# create and retrieve custom service +session.services.set_service(node.objid, "ServiceName") +custom_service = session.services.get_service(node.objid, "ServiceName") + +# set custom file data +session.services.set_service_file(node.objid, "ServiceName", "FileName", "custom file data") + +# set services to a node, using custom services when defined +session.services.add_services(node, node.type, ["Service1", "Service2"]) +``` + +# Configuring EMANE Models + +Examples for configuring custom emane model settings. + +```python +# create session and emane network +coreemu = CoreEmu() +session = coreemu.create_session() +emane_network = session.create_emane_network( + model=EmaneIeee80211abgModel, + geo_reference=(47.57917, -122.13232, 2.00000) +) +emane_network.setposition(x=80, y=50) + +# set custom emane model config +config = {} +session.emane.set_model_config(emane_network.objid, EmaneIeee80211abgModel.name, config) +``` diff --git a/docs/services.md b/docs/services.md index 9e6e3642..793e6f99 100644 --- a/docs/services.md +++ b/docs/services.md @@ -1,299 +1,13 @@ -# Services (Deprecated) +# CORE Services -## Overview +* Table of Contents +{:toc} -CORE uses the concept of services to specify what processes or scripts run on a -node when it is started. Layer-3 nodes such as routers and PCs are defined by -the services that they run. +## Custom Services -Services may be customized for each node, or new custom services can be -created. New node types can be created each having a different name, icon, and -set of default services. Each service defines the per-node directories, -configuration files, startup index, starting commands, validation commands, -shutdown commands, and meta-data associated with a node. +CORE supports custom developed services by way of dynamically loading user created python files. +Custom services should be placed within the path defined by **custom_services_dir** in the CORE +configuration file. This path cannot end in **/services**. -!!! note - - **Network namespace nodes do not undergo the normal Linux boot process** - using the **init**, **upstart**, or **systemd** frameworks. These - lightweight nodes use configured CORE *services*. - -## Available Services - -| Service Group | Services | -|----------------------------------|-----------------------------------------------------------------------| -| [BIRD](services/bird.md) | BGP, OSPF, RADV, RIP, Static | -| [EMANE](services/emane.md) | Transport Service | -| [FRR](services/frr.md) | BABEL, BGP, OSPFv2, OSPFv3, PIMD, RIP, RIPNG, Zebra | -| [NRL](services/nrl.md) | arouted, MGEN Sink, MGEN Actor, NHDP, OLSR, OLSRORG, OLSRv2, SMF | -| [Quagga](services/quagga.md) | BABEL, BGP, OSPFv2, OSPFv3, OSPFv3 MDR, RIP, RIPNG, XPIMD, Zebra | -| [SDN](services/sdn.md) | OVS, RYU | -| [Security](services/security.md) | Firewall, IPsec, NAT, VPN Client, VPN Server | -| [Utility](services/utility.md) | ATD, Routing Utils, DHCP, FTP, IP Forward, PCAP, RADVD, SSF, UCARP | -| [XORP](services/xorp.md) | BGP, OLSR, OSPFv2, OSPFv3, PIMSM4, PIMSM6, RIP, RIPNG, Router Manager | - -## Node Types and Default Services - -Here are the default node types and their services: - -| Node Type | Services | -|-----------|--------------------------------------------------------------------------------------------------------------------------------------------| -| *router* | zebra, OSFPv2, OSPFv3, and IPForward services for IGP link-state routing. | -| *host* | DefaultRoute and SSH services, representing an SSH server having a default route when connected directly to a router. | -| *PC* | DefaultRoute service for having a default route when connected directly to a router. | -| *mdr* | zebra, OSPFv3MDR, and IPForward services for wireless-optimized MANET Designated Router routing. | -| *prouter* | a physical router, having the same default services as the *router* node type; for incorporating Linux testbed machines into an emulation. | - -Configuration files can be automatically generated by each service. For -example, CORE automatically generates routing protocol configuration for the -router nodes in order to simplify the creation of virtual networks. - -To change the services associated with a node, double-click on the node to -invoke its configuration dialog and click on the *Services...* button, -or right-click a node a choose *Services...* from the menu. -Services are enabled or disabled by clicking on their names. The button next to -each service name allows you to customize all aspects of this service for this -node. For example, special route redistribution commands could be inserted in -to the Quagga routing configuration associated with the zebra service. - -To change the default services associated with a node type, use the Node Types -dialog available from the *Edit* button at the end of the Layer-3 nodes -toolbar, or choose *Node types...* from the *Session* menu. Note that -any new services selected are not applied to existing nodes if the nodes have -been customized. - -## Customizing a Service - -A service can be fully customized for a particular node. From the node's -configuration dialog, click on the button next to the service name to invoke -the service customization dialog for that service. -The dialog has three tabs for configuring the different aspects of the service: -files, directories, and startup/shutdown. - -!!! note - - A **yellow** customize icon next to a service indicates that service - requires customization (e.g. the *Firewall* service). - A **green** customize icon indicates that a custom configuration exists. - Click the *Defaults* button when customizing a service to remove any - customizations. - -The Files tab is used to display or edit the configuration files or scripts that -are used for this service. Files can be selected from a drop-down list, and -their contents are displayed in a text entry below. The file contents are -generated by the CORE daemon based on the network topology that exists at -the time the customization dialog is invoked. - -The Directories tab shows the per-node directories for this service. For the -default types, CORE nodes share the same filesystem tree, except for these -per-node directories that are defined by the services. For example, the -**/var/run/quagga** directory needs to be unique for each node running -the Zebra service, because Quagga running on each node needs to write separate -PID files to that directory. - -!!! note - - The **/var/log** and **/var/run** directories are - mounted uniquely per-node by default. - Per-node mount targets can be found in **/tmp/pycore./.conf/** - -The Startup/shutdown tab lists commands that are used to start and stop this -service. The startup index allows configuring when this service starts relative -to the other services enabled for this node; a service with a lower startup -index value is started before those with higher values. Because shell scripts -generated by the Files tab will not have execute permissions set, the startup -commands should include the shell name, with -something like ```sh script.sh```. - -Shutdown commands optionally terminate the process(es) associated with this -service. Generally they send a kill signal to the running process using the -*kill* or *killall* commands. If the service does not terminate -the running processes using a shutdown command, the processes will be killed -when the *vnoded* daemon is terminated (with *kill -9*) and -the namespace destroyed. It is a good practice to -specify shutdown commands, which will allow for proper process termination, and -for run-time control of stopping and restarting services. - -Validate commands are executed following the startup commands. A validate -command can execute a process or script that should return zero if the service -has started successfully, and have a non-zero return value for services that -have had a problem starting. For example, the *pidof* command will check -if a process is running and return zero when found. When a validate command -produces a non-zero return value, an exception is generated, which will cause -an error to be displayed in the Check Emulation Light. - -!!! note - - To start, stop, and restart services during run-time, right-click a - node and use the *Services...* menu. - -## New Services - -Services can save time required to configure nodes, especially if a number -of nodes require similar configuration procedures. New services can be -introduced to automate tasks. - -### Leveraging UserDefined - -The easiest way to capture the configuration of a new process into a service -is by using the **UserDefined** service. This is a blank service where any -aspect may be customized. The UserDefined service is convenient for testing -ideas for a service before adding a new service type. - -### Creating New Services - -!!! note - - The directory name used in **custom_services_dir** below should be unique and - should not correspond to any existing Python module name. For example, don't - use the name **subprocess** or **services**. - -1. Modify the example service shown below - to do what you want. It could generate config/script files, mount per-node - directories, start processes/scripts, etc. sample.py is a Python file that - defines one or more classes to be imported. You can create multiple Python - files that will be imported. - -2. Put these files in a directory such as `/home//.coregui/custom_services` - Note that the last component of this directory name **custom_services** should not - be named the same as any python module, due to naming conflicts. - -3. Add a **custom_services_dir = `/home//.coregui/custom_services`** entry to the - /etc/core/core.conf file. - -4. Restart the CORE daemon (core-daemon). Any import errors (Python syntax) - should be displayed in the daemon output. - -5. Start using your custom service on your nodes. You can create a new node - type that uses your service, or change the default services for an existing - node type, or change individual nodes. - -If you have created a new service type that may be useful to others, please -consider contributing it to the CORE project. - -#### Example Custom Service - -Below is the skeleton for a custom service with some documentation. Most -people would likely only setup the required class variables **(name/group)**. -Then define the **configs** (files they want to generate) and implement the -**generate_config** function to dynamically create the files wanted. Finally -the **startup** commands would be supplied, which typically tends to be -running the shell files generated. - -```python -""" -Simple example custom service, used to drive shell commands on a node. -""" -from typing import Tuple - -from core.nodes.base import CoreNode -from core.services.coreservices import CoreService, ServiceMode - - -class ExampleService(CoreService): - """ - Example Custom CORE Service - - :cvar name: name used as a unique ID for this service and is required, no spaces - :cvar group: allows you to group services within the GUI under a common name - :cvar executables: executables this service depends on to function, if executable is - not on the path, service will not be loaded - :cvar dependencies: services that this service depends on for startup, tuple of - service names - :cvar dirs: directories that this service will create within a node - :cvar configs: files that this service will generate, without a full path this file - goes in the node's directory e.g. /tmp/pycore.12345/n1.conf/myfile - :cvar startup: commands used to start this service, any non-zero exit code will - cause a failure - :cvar validate: commands used to validate that a service was started, any non-zero - exit code will cause a failure - :cvar validation_mode: validation mode, used to determine startup success. - NON_BLOCKING - runs startup commands, and validates success with validation commands - BLOCKING - runs startup commands, and validates success with the startup commands themselves - TIMER - runs startup commands, and validates success by waiting for "validation_timer" alone - :cvar validation_timer: time in seconds for a service to wait for validation, before - determining success in TIMER/NON_BLOCKING modes. - :cvar validation_period: period in seconds to wait before retrying validation, - only used in NON_BLOCKING mode - :cvar shutdown: shutdown commands to stop this service - """ - - name: str = "ExampleService" - group: str = "Utility" - executables: Tuple[str, ...] = () - dependencies: Tuple[str, ...] = () - dirs: Tuple[str, ...] = () - configs: Tuple[str, ...] = ("myservice1.sh", "myservice2.sh") - startup: Tuple[str, ...] = tuple(f"sh {x}" for x in configs) - validate: Tuple[str, ...] = () - validation_mode: ServiceMode = ServiceMode.NON_BLOCKING - validation_timer: int = 5 - validation_period: float = 0.5 - shutdown: Tuple[str, ...] = () - - @classmethod - def on_load(cls) -> None: - """ - Provides a way to run some arbitrary logic when the service is loaded, possibly - to help facilitate dynamic settings for the environment. - - :return: nothing - """ - pass - - @classmethod - def get_configs(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the config files from the node a service - will run. Defaults to the class definition and can be left out entirely if not - needed. - - :param node: core node that the service is being ran on - :return: tuple of config files to create - """ - return cls.configs - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Returns a string representation for a file, given the node the service is - starting on the config filename that this information will be used for. This - must be defined, if "configs" are defined. - - :param node: core node that the service is being ran on - :param filename: configuration file to generate - :return: configuration file content - """ - cfg = "#!/bin/sh\n" - if filename == cls.configs[0]: - cfg += "# auto-generated by MyService (sample.py)\n" - for iface in node.get_ifaces(): - cfg += f'echo "Node {node.name} has interface {iface.name}"\n' - elif filename == cls.configs[1]: - cfg += "echo hello" - return cfg - - @classmethod - def get_startup(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the startup commands from the node a - service will run. Defaults to the class definition and can be left out entirely - if not needed. - - :param node: core node that the service is being ran on - :return: tuple of startup commands to run - """ - return cls.startup - - @classmethod - def get_validate(cls, node: CoreNode) -> Tuple[str, ...]: - """ - Provides a way to dynamically generate the validate commands from the node a - service will run. Defaults to the class definition and can be left out entirely - if not needed. - - :param node: core node that the service is being ran on - :return: tuple of commands to validate service startup with - """ - return cls.validate -``` +Here is an example service with documentation describing functionality: +[Example Service](exampleservice.html) diff --git a/docs/services/bird.md b/docs/services/bird.md deleted file mode 100644 index db2f7701..00000000 --- a/docs/services/bird.md +++ /dev/null @@ -1,45 +0,0 @@ -# BIRD Internet Routing Daemon - -## Overview - -The [BIRD Internet Routing Daemon](https://bird.network.cz/) is a routing -daemon; i.e., a software responsible for managing kernel packet forwarding -tables. It aims to develop a dynamic IP routing daemon with full support of -all modern routing protocols, easy to use configuration interface and powerful -route filtering language, primarily targeted on (but not limited to) Linux and -other UNIX-like systems and distributed under the GNU General Public License. -BIRD has a free implementation of several well known and common routing and -router-supplemental protocols, namely RIP, RIPng, OSPFv2, OSPFv3, BGP, BFD, -and NDP/RA. BIRD supports IPv4 and IPv6 address families, Linux kernel and -several BSD variants (tested on FreeBSD, NetBSD and OpenBSD). BIRD consists -of bird daemon and birdc interactive CLI client used for supervision. - -In order to be able to use the BIRD Internet Routing Protocol, you must first -install the project on your machine. - -## BIRD Package Install - -```shell -sudo apt-get install bird -``` - -## BIRD Source Code Install - -You can download BIRD source code from its -[official repository.](https://gitlab.labs.nic.cz/labs/bird/) - -```shell -./configure -make -su -make install -vi /etc/bird/bird.conf -``` - -The installation will place the bird directory inside */etc* where you will -also find its config file. - -In order to be able to do use the Bird Internet Routing Protocol, you must -modify *bird.conf* due to the fact that the given configuration file is not -configured beyond allowing the bird daemon to start, which means that nothing -else will happen if you run it. diff --git a/docs/services/emane.md b/docs/services/emane.md deleted file mode 100644 index 3f904091..00000000 --- a/docs/services/emane.md +++ /dev/null @@ -1,10 +0,0 @@ -# EMANE Services - -## Overview - -EMANE related services for CORE. - -## Transport Service - -Helps with setting up EMANE for using an external transport. - diff --git a/docs/services/frr.md b/docs/services/frr.md deleted file mode 100644 index aa2db6ff..00000000 --- a/docs/services/frr.md +++ /dev/null @@ -1,91 +0,0 @@ -# FRRouting - -## Overview - -FRRouting is a routing software package that provides TCP/IP based routing services with routing protocols support such -as BGP, RIP, OSPF, IS-IS and more. FRR also supports special BGP Route Reflector and Route Server behavior. In addition -to traditional IPv4 routing protocols, FRR also supports IPv6 routing protocols. With an SNMP daemon that supports the -AgentX protocol, FRR provides routing protocol MIB read-only access (SNMP Support). - -FRR (as of v7.2) currently supports the following protocols: - -* BGPv4 -* OSPFv2 -* OSPFv3 -* RIPv1/v2/ng -* IS-IS -* PIM-SM/MSDP/BSM(AutoRP) -* LDP -* BFD -* Babel -* PBR -* OpenFabric -* VRRPv2/v3 -* EIGRP (alpha) -* NHRP (alpha) - -## FRRouting Package Install - -Ubuntu 19.10 and later - -```shell -sudo apt update && sudo apt install frr -``` - -Ubuntu 16.04 and Ubuntu 18.04 - -```shell -sudo apt install curl -curl -s https://deb.frrouting.org/frr/keys.asc | sudo apt-key add - -FRRVER="frr-stable" -echo deb https://deb.frrouting.org/frr $(lsb_release -s -c) $FRRVER | sudo tee -a /etc/apt/sources.list.d/frr.list -sudo apt update && sudo apt install frr frr-pythontools -``` - -Fedora 31 - -```shell -sudo dnf update && sudo dnf install frr -``` - -## FRRouting Source Code Install - -Building FRR from source is the best way to ensure you have the latest features and bug fixes. Details for each -supported platform, including dependency package listings, permissions, and other gotchas, are in the developer’s -documentation. - -FRR’s source is available on the project [GitHub page](https://github.com/FRRouting/frr). - -```shell -git clone https://github.com/FRRouting/frr.git -``` - -Change into your FRR source directory and issue: - -```shell -./bootstrap.sh -``` - -Then, choose the configuration options that you wish to use for the installation. You can find these options on -FRR's [official webpage](http://docs.frrouting.org/en/latest/installation.html). Once you have chosen your configure -options, run the configure script and pass the options you chose: - -```shell -./configure \ - --prefix=/usr \ - --enable-exampledir=/usr/share/doc/frr/examples/ \ - --localstatedir=/var/run/frr \ - --sbindir=/usr/lib/frr \ - --sysconfdir=/etc/frr \ - --enable-pimd \ - --enable-watchfrr \ - ... -``` - -After configuring the software, you are ready to build and install it in your system. - -```shell -make && sudo make install -``` - -If everything finishes successfully, FRR should be installed. diff --git a/docs/services/nrl.md b/docs/services/nrl.md deleted file mode 100644 index da26ab25..00000000 --- a/docs/services/nrl.md +++ /dev/null @@ -1,86 +0,0 @@ -# NRL Services - -## Overview - -The Protean Protocol Prototyping Library (ProtoLib) is a cross-platform library that allows applications to be built -while supporting a variety of platforms including Linux, Windows, WinCE/PocketPC, MacOS, FreeBSD, Solaris, etc as well -as the simulation environments of NS2 and Opnet. The goal of the Protolib is to provide a set of simple, cross-platform -C++ classes that allow development of network protocols and applications that can run on different platforms and in -network simulation environments. While Protolib provides an overall framework for developing working protocol -implementations, applications, and simulation modules, the individual classes are designed for use as stand-alone -components when possible. Although Protolib is principally for research purposes, the code has been constructed to -provide robust, efficient performance and adaptability to real applications. In some cases, the code consists of data -structures, etc useful in protocol implementations and, in other cases, provides common, cross-platform interfaces to -system services and functions (e.g., sockets, timers, routing tables, etc). - -Currently, the Naval Research Laboratory uses this library to develop a wide variety of protocols.The NRL Protolib -currently supports the following protocols: - -* MGEN_Sink -* NHDP -* SMF -* OLSR -* OLSRv2 -* OLSRORG -* MgenActor -* arouted - -## NRL Installation - -In order to be able to use the different protocols that NRL offers, you must first download the support library itself. -You can get the source code from their [NRL Protolib Repo](https://github.com/USNavalResearchLaboratory/protolib). - -## Multi-Generator (MGEN) - -Download MGEN from the [NRL MGEN Repo](https://github.com/USNavalResearchLaboratory/mgen), unpack it and copy the -protolib library into the main folder *mgen*. Execute the following commands to build the protocol. - -```shell -cd mgen/makefiles -make -f Makefile.{os} mgen -``` - -## Neighborhood Discovery Protocol (NHDP) - -Download NHDP from the [NRL NHDP Repo](https://github.com/USNavalResearchLaboratory/NCS-Downloads/tree/master/nhdp). - -```shell -sudo apt-get install libpcap-dev libboost-all-dev -wget https://github.com/protocolbuffers/protobuf/releases/download/v3.8.0/protoc-3.8.0-linux-x86_64.zip -unzip protoc-3.8.0-linux-x86_64.zip -``` - -Then place the binaries in your $PATH. To know your paths you can issue the following command - -```shell -echo $PATH -``` - -Go to the downloaded *NHDP* tarball, unpack it and place the protolib library inside the NHDP main folder. Now, compile -the NHDP Protocol. - -```shell -cd nhdp/unix -make -f Makefile.{os} -``` - -## Simplified Multicast Forwarding (SMF) - -Download SMF from the [NRL SMF Repo](https://github.com/USNavalResearchLaboratory/nrlsmf) , unpack it and place the -protolib library inside the *smf* main folder. - -```shell -cd mgen/makefiles -make -f Makefile.{os} -``` - -## Optimized Link State Routing Protocol (OLSR) - -To install the OLSR protocol, download their source code from -their [NRL OLSR Repo](https://github.com/USNavalResearchLaboratory/nrlolsr). Unpack it and place the previously -downloaded protolib library inside the *nrlolsr* main directory. Then execute the following commands: - -```shell -cd ./unix -make -f Makefile.{os} -``` diff --git a/docs/services/quagga.md b/docs/services/quagga.md deleted file mode 100644 index 6842b5e7..00000000 --- a/docs/services/quagga.md +++ /dev/null @@ -1,32 +0,0 @@ -# Quagga Routing Suite - -## Overview - -Quagga is a routing software suite, providing implementations of OSPFv2, OSPFv3, RIP v1 and v2, RIPng and BGP-4 for Unix -platforms, particularly FreeBSD, Linux, Solaris and NetBSD. Quagga is a fork of GNU Zebra which was developed by -Kunihiro Ishiguro. -The Quagga architecture consists of a core daemon, zebra, which acts as an abstraction layer to the underlying Unix -kernel and presents the Zserv API over a Unix or TCP stream to Quagga clients. It is these Zserv clients which typically -implement a routing protocol and communicate routing updates to the zebra daemon. - -## Quagga Package Install - -```shell -sudo apt-get install quagga -``` - -## Quagga Source Install - -First, download the source code from their [official webpage](https://www.quagga.net/). - -```shell -sudo apt-get install gawk -``` - -Extract the tarball, go to the directory of your currently extracted code and issue the following commands. - -```shell -./configure -make -sudo make install -``` diff --git a/docs/services/sdn.md b/docs/services/sdn.md deleted file mode 100644 index 05e8606e..00000000 --- a/docs/services/sdn.md +++ /dev/null @@ -1,30 +0,0 @@ -# Software Defined Networking - -## Overview - -Ryu is a component-based software defined networking framework. Ryu provides software components with well defined API -that make it easy for developers to create new network management and control applications. Ryu supports various -protocols for managing network devices, such as OpenFlow, Netconf, OF-config, etc. About OpenFlow, Ryu supports fully -1.0, 1.2, 1.3, 1.4, 1.5 and Nicira Extensions. All of the code is freely available under the Apache 2.0 license. - -## Installation - -### Prerequisites - -```shell -sudo apt-get install gcc python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev zlib1g-dev -``` - -### Ryu Package Install - -```shell -pip install ryu -``` - -### Ryu Source Install - -```shell -git clone git://github.com/osrg/ryu.git -cd ryu -pip install . -``` diff --git a/docs/services/security.md b/docs/services/security.md deleted file mode 100644 index a621009d..00000000 --- a/docs/services/security.md +++ /dev/null @@ -1,90 +0,0 @@ -# Security Services - -## Overview - -The security services offer a wide variety of protocols capable of satisfying the most use cases available. Security -services such as IP security protocols, for providing security at the IP layer, as well as the suite of protocols -designed to provide that security, through authentication and encryption of IP network packets. Virtual Private -Networks (VPNs) and Firewalls are also available for use to the user. - -## Installation - -Libraries needed for some security services. - -```shell -sudo apt-get install ipsec-tools racoon -``` - -## OpenVPN - -Below is a set of instruction for running a very simple OpenVPN client/server scenario. - -### Installation - -```shell -# install openvpn -sudo apt install openvpn - -# retrieve easyrsa3 for key/cert generation -git clone https://github.com/OpenVPN/easy-rsa -``` - -### Generating Keys/Certs - -```shell -# navigate into easyrsa3 repo subdirectory that contains built binary -cd easy-rsa/easyrsa3 - -# initalize pki -./easyrsa init-pki - -# build ca -./easyrsa build-ca - -# generate and sign server keypair(s) -SERVER_NAME=server1 -./easyrsa get-req $SERVER_NAME nopass -./easyrsa sign-req server $SERVER_NAME - -# generate and sign client keypair(s) -CLIENT_NAME=client1 -./easyrsa get-req $CLIENT_NAME nopass -./easyrsa sign-req client $CLIENT_NAME - -# DH generation -./easyrsa gen-dh - -# create directory for keys for CORE to use -# NOTE: the default is set to a directory that requires using sudo, but can be -# anywhere and not require sudo at all -KEYDIR=/etc/core/keys -sudo mkdir $KEYDIR - -# move keys to directory -sudo cp pki/ca.crt $KEYDIR -sudo cp pki/issued/*.crt $KEYDIR -sudo cp pki/private/*.key $KEYDIR -sudo cp pki/dh.pem $KEYDIR/dh1024.pem -``` - -### Configure Server Nodes - -Add VPNServer service to nodes desired for running an OpenVPN server. - -Modify [sampleVPNServer](https://github.com/coreemu/core/blob/master/package/examples/services/sampleVPNServer) for the -following - -* Edit keydir key/cert directory -* Edit keyname to use generated server name above -* Edit vpnserver to match an address that the server node will have - -### Configure Client Nodes - -Add VPNClient service to nodes desired for acting as an OpenVPN client. - -Modify [sampleVPNClient](https://github.com/coreemu/core/blob/master/package/examples/services/sampleVPNClient) for the -following - -* Edit keydir key/cert directory -* Edit keyname to use generated client name above -* Edit vpnserver to match the address a server was configured to use diff --git a/docs/services/utility.md b/docs/services/utility.md deleted file mode 100644 index 698de4f8..00000000 --- a/docs/services/utility.md +++ /dev/null @@ -1,44 +0,0 @@ -# Utility Services - -## Overview - -Variety of convenience services for carrying out common networking changes. - -The following services are provided as utilities: - -* UCARP -* IP Forward -* Default Routing -* Default Muticast Routing -* Static Routing -* SSH -* DHCP -* DHCP Client -* FTP -* HTTP -* PCAP -* RADVD -* ATD - -## Installation - -To install the functionality of the previously metioned services you can run the following command: - -```shell -sudo apt-get install isc-dhcp-server apache2 libpcap-dev radvd at -``` - -## UCARP - -UCARP allows a couple of hosts to share common virtual IP addresses in order to provide automatic failover. It is a -portable userland implementation of the secure and patent-free Common Address Redundancy Protocol (CARP, OpenBSD's -alternative to the patents-bloated VRRP). - -Strong points of the CARP protocol are: very low overhead, cryptographically signed messages, interoperability between -different operating systems and no need for any dedicated extra network link between redundant hosts. - -### Installation - -```shell -sudo apt-get install ucarp -``` diff --git a/docs/services/xorp.md b/docs/services/xorp.md deleted file mode 100644 index a9bd108d..00000000 --- a/docs/services/xorp.md +++ /dev/null @@ -1,52 +0,0 @@ -# XORP routing suite - -## Overview - -XORP is an open networking platform that supports OSPF, RIP, BGP, OLSR, VRRP, PIM, IGMP (Multicast) and other routing -protocols. Most protocols support IPv4 and IPv6 where applicable. It is known to work on various Linux distributions and -flavors of BSD. - -XORP started life as a project at the ICSI Center for Open Networking (ICON) at the International Computer Science -Institute in Berkeley, California, USA, and spent some time with the team at XORP, Inc. It is now maintained and -improved on a volunteer basis by a core of long-term XORP developers and some newer contributors. - -XORP's primary goal is to be an open platform for networking protocol implementations and an alternative to proprietary -and closed networking products in the marketplace today. It is the only open source platform to offer integrated -multicast capability. - -XORP design philosophy is: - -* modularity -* extensibility -* performance -* robustness - This is achieved by carefully separating functionalities into independent modules, and by providing an API for each - module. - -XORP divides into two subsystems. The higher-level ("user-level") subsystem consists of the routing protocols. The -lower-level ("kernel") manages the forwarding path, and provides APIs for the higher-level to access. - -User-level XORP uses multi-process architecture with one process per routing protocol, and a novel inter-process -communication mechanism called XRL (XORP Resource Locator). - -The lower-level subsystem can use traditional UNIX kernel forwarding, or Click modular router. The modularity and -independency of the lower-level from the user-level subsystem allows for its easily replacement with other solutions -including high-end hardware-based forwarding engines. - -## Installation - -In order to be able to install the XORP Routing Suite, you must first install scons in order to compile it. - -```shell -sudo apt-get install scons -``` - -Then, download XORP from its official [release web page](http://www.xorp.org/releases/current/). - -```shell -http://www.xorp.org/releases/current/ -cd xorp -sudo apt-get install libssl-dev ncurses-dev -scons -scons install -``` diff --git a/docs/static/architecture.png b/docs/static/architecture.png deleted file mode 100644 index f4ce3388..00000000 Binary files a/docs/static/architecture.png and /dev/null differ diff --git a/docs/static/core-architecture.jpg b/docs/static/core-architecture.jpg new file mode 100644 index 00000000..04f6390f Binary files /dev/null and b/docs/static/core-architecture.jpg differ diff --git a/docs/static/core-gui.png b/docs/static/core-gui.png deleted file mode 100644 index 6d0fbd40..00000000 Binary files a/docs/static/core-gui.png and /dev/null differ diff --git a/docs/static/core-workflow.jpg b/docs/static/core-workflow.jpg new file mode 100644 index 00000000..b60eff7d Binary files /dev/null and b/docs/static/core-workflow.jpg differ diff --git a/docs/static/distributed-controlnetwork.png b/docs/static/distributed-controlnetwork.png new file mode 100644 index 00000000..ed9b0354 Binary files /dev/null and b/docs/static/distributed-controlnetwork.png differ diff --git a/docs/static/distributed-emane-configuration.png b/docs/static/distributed-emane-configuration.png new file mode 100644 index 00000000..219e5d43 Binary files /dev/null and b/docs/static/distributed-emane-configuration.png differ diff --git a/docs/static/distributed-emane-network.png b/docs/static/distributed-emane-network.png new file mode 100644 index 00000000..ebc5577f Binary files /dev/null and b/docs/static/distributed-emane-network.png differ diff --git a/docs/static/emane-configuration.png b/docs/static/emane-configuration.png deleted file mode 100644 index ad66a6f3..00000000 Binary files a/docs/static/emane-configuration.png and /dev/null differ diff --git a/docs/static/emane-single-pc.png b/docs/static/emane-single-pc.png deleted file mode 100644 index 8c58d825..00000000 Binary files a/docs/static/emane-single-pc.png and /dev/null differ diff --git a/docs/static/gui/host.png b/docs/static/gui/host.png deleted file mode 100644 index e6efda08..00000000 Binary files a/docs/static/gui/host.png and /dev/null differ diff --git a/docs/static/gui/hub.png b/docs/static/gui/hub.png deleted file mode 100644 index c9a2523b..00000000 Binary files a/docs/static/gui/hub.png and /dev/null differ diff --git a/docs/static/gui/lanswitch.png b/docs/static/gui/lanswitch.png deleted file mode 100644 index eb9ba593..00000000 Binary files a/docs/static/gui/lanswitch.png and /dev/null differ diff --git a/docs/static/gui/link.png b/docs/static/gui/link.png deleted file mode 100644 index d6b6745b..00000000 Binary files a/docs/static/gui/link.png and /dev/null differ diff --git a/docs/static/gui/marker.png b/docs/static/gui/marker.png deleted file mode 100644 index 8c60bacb..00000000 Binary files a/docs/static/gui/marker.png and /dev/null differ diff --git a/docs/static/gui/mdr.png b/docs/static/gui/mdr.png deleted file mode 100644 index b0678ee7..00000000 Binary files a/docs/static/gui/mdr.png and /dev/null differ diff --git a/docs/static/gui/oval.png b/docs/static/gui/oval.png deleted file mode 100644 index 1babf1b7..00000000 Binary files a/docs/static/gui/oval.png and /dev/null differ diff --git a/docs/static/gui/pc.png b/docs/static/gui/pc.png deleted file mode 100644 index 3f587e70..00000000 Binary files a/docs/static/gui/pc.png and /dev/null differ diff --git a/docs/static/gui/rectangle.png b/docs/static/gui/rectangle.png deleted file mode 100644 index ca6c8c06..00000000 Binary files a/docs/static/gui/rectangle.png and /dev/null differ diff --git a/docs/static/gui/rj45.png b/docs/static/gui/rj45.png deleted file mode 100644 index c9d87cfd..00000000 Binary files a/docs/static/gui/rj45.png and /dev/null differ diff --git a/docs/static/gui/router.png b/docs/static/gui/router.png deleted file mode 100644 index 1de5014a..00000000 Binary files a/docs/static/gui/router.png and /dev/null differ diff --git a/docs/static/gui/run.png b/docs/static/gui/run.png deleted file mode 100644 index a39a997f..00000000 Binary files a/docs/static/gui/run.png and /dev/null differ diff --git a/docs/static/gui/select.png b/docs/static/gui/select.png deleted file mode 100644 index 04e18891..00000000 Binary files a/docs/static/gui/select.png and /dev/null differ diff --git a/docs/static/gui/start.png b/docs/static/gui/start.png deleted file mode 100644 index 719f4cd9..00000000 Binary files a/docs/static/gui/start.png and /dev/null differ diff --git a/docs/static/gui/stop.png b/docs/static/gui/stop.png deleted file mode 100644 index 1e87c929..00000000 Binary files a/docs/static/gui/stop.png and /dev/null differ diff --git a/docs/static/gui/text.png b/docs/static/gui/text.png deleted file mode 100644 index 14a85dc0..00000000 Binary files a/docs/static/gui/text.png and /dev/null differ diff --git a/docs/static/gui/tunnel.png b/docs/static/gui/tunnel.png deleted file mode 100644 index 2871b74f..00000000 Binary files a/docs/static/gui/tunnel.png and /dev/null differ diff --git a/docs/static/gui/wlan.png b/docs/static/gui/wlan.png deleted file mode 100644 index db979a09..00000000 Binary files a/docs/static/gui/wlan.png and /dev/null differ diff --git a/docs/static/single-pc-emane.png b/docs/static/single-pc-emane.png new file mode 100644 index 00000000..579255b8 Binary files /dev/null and b/docs/static/single-pc-emane.png differ diff --git a/docs/static/tutorial-common/running-join.png b/docs/static/tutorial-common/running-join.png deleted file mode 100644 index 30fbcb80..00000000 Binary files a/docs/static/tutorial-common/running-join.png and /dev/null differ diff --git a/docs/static/tutorial-common/running-open.png b/docs/static/tutorial-common/running-open.png deleted file mode 100644 index 7e3e722c..00000000 Binary files a/docs/static/tutorial-common/running-open.png and /dev/null differ diff --git a/docs/static/tutorial1/link-config-dialog.png b/docs/static/tutorial1/link-config-dialog.png deleted file mode 100644 index 73d4ed2d..00000000 Binary files a/docs/static/tutorial1/link-config-dialog.png and /dev/null differ diff --git a/docs/static/tutorial1/link-config.png b/docs/static/tutorial1/link-config.png deleted file mode 100644 index 35f45327..00000000 Binary files a/docs/static/tutorial1/link-config.png and /dev/null differ diff --git a/docs/static/tutorial1/scenario.png b/docs/static/tutorial1/scenario.png deleted file mode 100644 index c1a2dfc7..00000000 Binary files a/docs/static/tutorial1/scenario.png and /dev/null differ diff --git a/docs/static/tutorial2/wireless-config-delay.png b/docs/static/tutorial2/wireless-config-delay.png deleted file mode 100644 index b375af76..00000000 Binary files a/docs/static/tutorial2/wireless-config-delay.png and /dev/null differ diff --git a/docs/static/tutorial2/wireless-configuration.png b/docs/static/tutorial2/wireless-configuration.png deleted file mode 100644 index 9b87959c..00000000 Binary files a/docs/static/tutorial2/wireless-configuration.png and /dev/null differ diff --git a/docs/static/tutorial2/wireless.png b/docs/static/tutorial2/wireless.png deleted file mode 100644 index 8543117d..00000000 Binary files a/docs/static/tutorial2/wireless.png and /dev/null differ diff --git a/docs/static/tutorial3/mobility-script.png b/docs/static/tutorial3/mobility-script.png deleted file mode 100644 index 6f32e5b1..00000000 Binary files a/docs/static/tutorial3/mobility-script.png and /dev/null differ diff --git a/docs/static/tutorial3/motion_continued_breaks_link.png b/docs/static/tutorial3/motion_continued_breaks_link.png deleted file mode 100644 index cc1f5dcd..00000000 Binary files a/docs/static/tutorial3/motion_continued_breaks_link.png and /dev/null differ diff --git a/docs/static/tutorial3/motion_from_ns2_file.png b/docs/static/tutorial3/motion_from_ns2_file.png deleted file mode 100644 index 704cc1d9..00000000 Binary files a/docs/static/tutorial3/motion_from_ns2_file.png and /dev/null differ diff --git a/docs/static/tutorial3/move-n2.png b/docs/static/tutorial3/move-n2.png deleted file mode 100644 index befcd4b0..00000000 Binary files a/docs/static/tutorial3/move-n2.png and /dev/null differ diff --git a/docs/static/tutorial5/VM-network-settings.png b/docs/static/tutorial5/VM-network-settings.png deleted file mode 100644 index 5d47738e..00000000 Binary files a/docs/static/tutorial5/VM-network-settings.png and /dev/null differ diff --git a/docs/static/tutorial5/configure-the-rj45.png b/docs/static/tutorial5/configure-the-rj45.png deleted file mode 100644 index 0e2b8f8b..00000000 Binary files a/docs/static/tutorial5/configure-the-rj45.png and /dev/null differ diff --git a/docs/static/tutorial5/rj45-connector.png b/docs/static/tutorial5/rj45-connector.png deleted file mode 100644 index 8c8e86ef..00000000 Binary files a/docs/static/tutorial5/rj45-connector.png and /dev/null differ diff --git a/docs/static/tutorial5/rj45-unassigned.png b/docs/static/tutorial5/rj45-unassigned.png deleted file mode 100644 index eda4a3b6..00000000 Binary files a/docs/static/tutorial5/rj45-unassigned.png and /dev/null differ diff --git a/docs/static/tutorial6/configure-icon.png b/docs/static/tutorial6/configure-icon.png deleted file mode 100644 index 52a9e2e8..00000000 Binary files a/docs/static/tutorial6/configure-icon.png and /dev/null differ diff --git a/docs/static/tutorial6/create-nodes.png b/docs/static/tutorial6/create-nodes.png deleted file mode 100644 index 38257e24..00000000 Binary files a/docs/static/tutorial6/create-nodes.png and /dev/null differ diff --git a/docs/static/tutorial6/hidden-nodes.png b/docs/static/tutorial6/hidden-nodes.png deleted file mode 100644 index 604829dd..00000000 Binary files a/docs/static/tutorial6/hidden-nodes.png and /dev/null differ diff --git a/docs/static/tutorial6/linked-nodes.png b/docs/static/tutorial6/linked-nodes.png deleted file mode 100644 index 8e75007e..00000000 Binary files a/docs/static/tutorial6/linked-nodes.png and /dev/null differ diff --git a/docs/static/tutorial6/only-node1-moving.png b/docs/static/tutorial6/only-node1-moving.png deleted file mode 100644 index 01ac2ebd..00000000 Binary files a/docs/static/tutorial6/only-node1-moving.png and /dev/null differ diff --git a/docs/static/tutorial6/scenario-with-motion.png b/docs/static/tutorial6/scenario-with-motion.png deleted file mode 100644 index e30e781c..00000000 Binary files a/docs/static/tutorial6/scenario-with-motion.png and /dev/null differ diff --git a/docs/static/tutorial6/scenario-with-terrain.png b/docs/static/tutorial6/scenario-with-terrain.png deleted file mode 100644 index db424e9b..00000000 Binary files a/docs/static/tutorial6/scenario-with-terrain.png and /dev/null differ diff --git a/docs/static/tutorial6/select-wallpaper.png b/docs/static/tutorial6/select-wallpaper.png deleted file mode 100644 index 41d40f57..00000000 Binary files a/docs/static/tutorial6/select-wallpaper.png and /dev/null differ diff --git a/docs/static/tutorial6/wlan-links.png b/docs/static/tutorial6/wlan-links.png deleted file mode 100644 index ab6c152d..00000000 Binary files a/docs/static/tutorial6/wlan-links.png and /dev/null differ diff --git a/docs/static/tutorial7/scenario.png b/docs/static/tutorial7/scenario.png deleted file mode 100644 index 1c677aa3..00000000 Binary files a/docs/static/tutorial7/scenario.png and /dev/null differ diff --git a/docs/static/workflow.png b/docs/static/workflow.png deleted file mode 100644 index 35613983..00000000 Binary files a/docs/static/workflow.png and /dev/null differ diff --git a/docs/tutorials/common/grpc.md b/docs/tutorials/common/grpc.md deleted file mode 100644 index 2a85d7c8..00000000 --- a/docs/tutorials/common/grpc.md +++ /dev/null @@ -1,22 +0,0 @@ -## gRPC Python Scripts - -You can also run the same steps above, using the provided gRPC script versions of scenarios. -Below are the steps to run and join one of these scenario, then you can continue with -the remaining steps of a given section. - -1. Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -2. From another terminal run the tutorial python script, which will create a session to join - ``` shell - /opt/core/venv/bin/python scenario.py - ``` -3. In another terminal run the CORE GUI - ``` shell - core-gui - ``` -4. You will be presented with sessions to join, select the one created by the script -

    - -

    diff --git a/docs/tutorials/overview.md b/docs/tutorials/overview.md deleted file mode 100644 index 6ec0d275..00000000 --- a/docs/tutorials/overview.md +++ /dev/null @@ -1,29 +0,0 @@ -# CORE Tutorials - -These tutorials will cover various use cases within CORE. These -tutorials will provide example python, gRPC, XML, and related files, as well -as an explanation for their usage and purpose. - -## Checklist - -These are the items you should become familiar with for running all the tutorials below. - -* [Install CORE](../install.md) -* [Tutorial Setup](setup.md) - -## Tutorials - -* [Tutorial 1 - Wired Network](tutorial1.md) - * Covers interactions when using a simple 2 node wired network -* [Tutorial 2 - Wireless Network](tutorial2.md) - * Covers interactions when using a simple 3 node wireless network -* [Tutorial 3 - Basic Mobility](tutorial3.md) - * Covers mobility interactions when using a simple 3 node wireless network -* [Tutorial 4 - Tests](tutorial4.md) - * Covers automating scenarios as tests to validate software -* [Tutorial 5 - RJ45 Node](tutorial5.md) - * Covers using the RJ45 node to connect a Windows OS -* [Tutorial 6 - Improve Visuals](tutorial6.md) - * Covers changing the look of a scenario within the CORE GUI -* [Tutorial 7 - EMANE](tutorial7.md) - * Covers using EMANE within CORE for higher fidelity RF networks diff --git a/docs/tutorials/setup.md b/docs/tutorials/setup.md deleted file mode 100644 index 858b0f1d..00000000 --- a/docs/tutorials/setup.md +++ /dev/null @@ -1,82 +0,0 @@ -# Tutorial Setup - -## Setup for CORE - -We assume the prior installation of CORE, using a virtual environment. You can -then adjust your PATH and add an alias to help more conveniently run CORE -commands. - -This can be setup in your **.bashrc** - -```shell -export PATH=$PATH:/opt/core/venv/bin -alias sudop='sudo env PATH=$PATH' -``` - -## Setup for Chat App - -There is a simple TCP chat app provided as example software to use and run within -the tutorials provided. - -### Installation - -The following will install chatapp and its scripts into **/usr/local**, which you -may need to add to PATH within node to be able to use command directly. - -``` shell -sudo python3 -m pip install . -``` - -!!! note - - Some Linux distros will not have **/usr/local** in their PATH and you - will need to compensate. - -``` shell -export PATH=$PATH:/usr/local -``` - -### Running the Server - -The server will print and log connected clients and their messages. - -``` shell -usage: chatapp-server [-h] [-a ADDRESS] [-p PORT] - -chat app server - -optional arguments: - -h, --help show this help message and exit - -a ADDRESS, --address ADDRESS - address to listen on (default: ) - -p PORT, --port PORT port to listen on (default: 9001) -``` - -### Running the Client - -The client will print and log messages from other clients and their join/leave status. - -``` shell -usage: chatapp-client [-h] -a ADDRESS [-p PORT] - -chat app client - -optional arguments: - -h, --help show this help message and exit - -a ADDRESS, --address ADDRESS - address to listen on (default: None) - -p PORT, --port PORT port to listen on (default: 9001) -``` - -### Installing the Chat App Service - -1. You will first need to edit **/etc/core/core.conf** to update the config - service path to pick up your service - ``` shell - custom_config_services_dir = - ``` -2. Then you will need to copy/move **chatapp/chatapp_service.py** to the directory - configured above -3. Then you will need to restart the **core-daemon** to pick up this new service -4. Now the service will be an available option under the group **ChatApp** with - the name **ChatApp Server** diff --git a/docs/tutorials/tutorial1.md b/docs/tutorials/tutorial1.md deleted file mode 100644 index 7bda7e7f..00000000 --- a/docs/tutorials/tutorial1.md +++ /dev/null @@ -1,252 +0,0 @@ -# Tutorial 1 - Wired Network - -## Overview - -This tutorial will cover some use cases when using a wired 2 node -scenario in CORE. - -

    - -

    - -## Files - -Below is the list of files used for this tutorial. - -* 2 node wired scenario - * scenario.xml - * scenario.py -* 2 node wired scenario, with **n1** running the "Chat App Server" service - * scenario_service.xml - * scenario_service.py - -## Running this Tutorial - -This section covers interactions that can be carried out for this scenario. - -Our scenario has the following nodes and addresses: - -* n1 - 10.0.0.20 -* n2 - 10.0.0.21 - -All usages below assume a clean scenario start. - -### Using Ping - -Using the command line utility **ping** can be a good way to verify connectivity -between nodes in CORE. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n1** by double clicking it in the GUI -* Run the following in **n1** terminal - ``` shell - ping -c 3 10.0.0.21 - ``` -* You should see the following output - ``` shell - PING 10.0.0.21 (10.0.0.21) 56(84) bytes of data. - 64 bytes from 10.0.0.21: icmp_seq=1 ttl=64 time=0.085 ms - 64 bytes from 10.0.0.21: icmp_seq=2 ttl=64 time=0.079 ms - 64 bytes from 10.0.0.21: icmp_seq=3 ttl=64 time=0.072 ms - - --- 10.0.0.21 ping statistics --- - 3 packets transmitted, 3 received, 0% packet loss, time 1999ms - rtt min/avg/max/mdev = 0.072/0.078/0.085/0.011 ms - ``` - -### Using Tcpdump - -Using **tcpdump** can be very beneficial for examining a network. You can verify -traffic being sent/received among many other uses. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n1** by double clicking it in the GUI -* Open a terminal on **n2** by double clicking it in the GUI -* Run the following in **n2** terminal - ``` shell - tcpdump -lenni eth0 - ``` -* Run the following in **n1** terminal - ``` shell - ping -c 1 10.0.0.21 - ``` -* You should see the following in **n2** terminal - ``` shell - tcpdump: verbose output suppressed, use -v or -vv for full protocol decode - listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes - 10:23:04.685292 00:00:00:aa:00:00 > 00:00:00:aa:00:01, ethertype IPv4 (0x0800), length 98: 10.0.0.20 > 10.0.0.21: ICMP echo request, id 67, seq 1, length 64 - 10:23:04.685329 00:00:00:aa:00:01 > 00:00:00:aa:00:00, ethertype IPv4 (0x0800), length 98: 10.0.0.21 > 10.0.0.20: ICMP echo reply, id 67, seq 1, length 64 - ``` - -### Editing a Link - -You can edit links between nodes in CORE to modify loss, delay, bandwidth, and more. This can be -beneficial for understanding how software will behave in adverse conditions. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Right click the link between **n1** and **n2** -* Select **Configure** -

    - -

    -* Update the loss to **25** -

    - -

    -* Open a terminal on **n1** by double clicking it in the GUI -* Run the following in **n1** terminal - ``` shell - ping -c 10 10.0.0.21 - ``` -* You should see something similar for the summary output, reflecting the change in loss - ``` shell - --- 10.0.0.21 ping statistics --- - 10 packets transmitted, 6 received, 40% packet loss, time 9000ms - rtt min/avg/max/mdev = 0.077/0.093/0.108/0.016 ms - ``` -* Remember that the loss above is compounded, since a ping and the loss applied occurs in both directions - -### Running Software - -We will now leverage the installed Chat App software to stand up a server and client -within the nodes of our scenario. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n1** by double clicking it in the GUI -* Run the following in **n1** terminal - ``` shell - export PATH=$PATH:/usr/local/bin - chatapp-server - ``` -* Open a terminal on **n2** by double clicking it in the GUI -* Run the following in **n2** terminal - ``` shell - export PATH=$PATH:/usr/local/bin - chatapp-client -a 10.0.0.20 - ``` -* You will see the following output in **n1** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.21:44362 joining - ``` -* Type the following in **n2** terminal and hit enter - ``` shell - hello world - ``` -* You will see the following output in **n1** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.21:44362 joining - [10.0.0.21:44362] hello world - ``` - -### Tailing a Log - -In this case we are using the service based scenario. This will automatically start -and run the Chat App Server on **n1** and log to a file. This case will demonstrate -using `tail -f` to observe the output of running software. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario_service.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n1** by double clicking it in the GUI -* Run the following in **n1** terminal - ``` shell - tail -f chatapp.log - ``` -* Open a terminal on **n2** by double clicking it in the GUI -* Run the following in **n2** terminal - ``` shell - export PATH=$PATH:/usr/local/bin - chatapp-client -a 10.0.0.20 - ``` -* You will see the following output in **n1** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.21:44362 joining - ``` -* Type the following in **n2** terminal and hit enter - ``` shell - hello world - ``` -* You will see the following output in **n1** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.21:44362 joining - [10.0.0.21:44362] hello world - ``` - ---8<-- "tutorials/common/grpc.md" diff --git a/docs/tutorials/tutorial2.md b/docs/tutorials/tutorial2.md deleted file mode 100644 index 7b82e04e..00000000 --- a/docs/tutorials/tutorial2.md +++ /dev/null @@ -1,145 +0,0 @@ -# Tutorial 2 - Wireless Network - -## Overview - -This tutorial will cover the use of a 3 node scenario in CORE. Then -running a chat server on one node and a chat client on the other. The client will -send a simple message and the server will log receipt of the message. - -## Files - -Below is the list of files used for this tutorial. - -* scenario.xml - 3 node CORE xml scenario file (wireless) -* scenario.py - 3 node CORE gRPC python script (wireless) - -## Running with the XML Scenario File - -This section will cover running this sample tutorial using the -XML scenario file, leveraging an NS2 mobility file. - -* Make sure the **core-daemon** is running a terminal - ```shell - sudop core-daemon - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` -* In the GUI menu bar select **File->Open...** -* Navigate to and select this tutorials **scenario.xml** file -* You can now click play to start the session -

    - -

    -* Note that OSPF routing protocol is included in the scenario to provide routes to other nodes, as they are discovered -* Double click node **n4** to open a terminal and ping node **n2** - ```shell - ping -c 2 10.0.0.2 - PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data. - 64 bytes from 10.0.0.2: icmp_seq=1 ttl=63 time=20.2 ms - 64 bytes from 10.0.0.2: icmp_seq=2 ttl=63 time=20.2 ms - - --- 10.0.0.2 ping statistics --- - 2 packets transmitted, 2 received, 0% packet loss, time 1000ms - rtt min/avg/max/mdev = 20.168/20.173/20.178/0.005 ms - ``` - -### Configuring Delay - -* Right click on the **wlan1** node and select **WLAN Config**, then set delay to 500000 -

    - -

    -* Using the open terminal for node **n4**, ping **n2** again, expect about 2 seconds delay - ```shell - ping -c 5 10.0.0.2 - 64 bytes from 10.0.0.2: icmp_seq=1 ttl=63 time=2001 ms - 64 bytes from 10.0.0.2: icmp_seq=2 ttl=63 time=2000 ms - 64 bytes from 10.0.0.2: icmp_seq=3 ttl=63 time=2000 ms - 64 bytes from 10.0.0.2: icmp_seq=4 ttl=63 time=2000 ms - 64 bytes from 10.0.0.2: icmp_seq=5 ttl=63 time=2000 ms - - --- 10.0.0.2 ping statistics --- - 5 packets transmitted, 5 received, 0% packet loss, time 4024ms - rtt min/avg/max/mdev = 2000.176/2000.438/2001.166/0.376 ms, pipe 2 - ``` - -### Configure Loss - -* Right click on the **wlan1** node and select **WLAN Config**, set delay back to 5000 and loss to 10 -

    - -

    -* Using the open terminal for node **n4**, ping **n2** again, expect to notice considerable loss - ```shell - ping -c 10 10.0.0.2 - PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data. - 64 bytes from 10.0.0.2: icmp_seq=1 ttl=63 time=20.4 ms - 64 bytes from 10.0.0.2: icmp_seq=2 ttl=63 time=20.5 ms - 64 bytes from 10.0.0.2: icmp_seq=3 ttl=63 time=20.2 ms - 64 bytes from 10.0.0.2: icmp_seq=4 ttl=63 time=20.8 ms - 64 bytes from 10.0.0.2: icmp_seq=5 ttl=63 time=21.9 ms - 64 bytes from 10.0.0.2: icmp_seq=8 ttl=63 time=22.7 ms - 64 bytes from 10.0.0.2: icmp_seq=9 ttl=63 time=22.4 ms - 64 bytes from 10.0.0.2: icmp_seq=10 ttl=63 time=20.3 ms - - --- 10.0.0.2 ping statistics --- - 10 packets transmitted, 8 received, 20% packet loss, time 9064ms - rtt min/avg/max/mdev = 20.188/21.143/22.717/0.967 ms - ``` -* Make sure to set loss back to 0 when done - -## Running with the gRPC Python Script - -This section will cover running this sample tutorial using the -gRPC python script and providing mobility over the gRPC interface. - -* Make sure the **core-daemon** is running a terminal - ```shell - sudop core-daemon - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` -* From another terminal run the **scenario.py** script - ```shell - /opt/core/venv/bin/python scenario.py - ``` -* In the GUI dialog box select the session and click connect -* You will now have joined the already running scenario - -

    - -

    - -## Running Software - -We will now leverage the installed Chat App software to stand up a server and client -within the nodes of our scenario. You can use the bases of the running scenario from -either **scenario.xml** or the **scenario.py** gRPC script. - -* In the GUI double click on node **n4**, this will bring up a terminal for this node -* In the **n4** terminal, run the server - ```shell - export PATH=$PATH:/usr/local/bin - chatapp-server - ``` -* In the GUI double click on node **n2**, this will bring up a terminal for this node -* In the **n2** terminal, run the client - ```shell - export PATH=$PATH:/usr/local/bin - chatapp-client -a 10.0.0.4 - ``` -* This will result in **n2** connecting to the server -* In the **n2** terminal, type a message at the client prompt - ```shell - >>hello world - ``` -* Observe that text typed at client then appears in the terminal of **n4** - ```shell - chat server listening on: :9001 - [server] 10.0.0.2:53684 joining - [10.0.0.2:53684] hello world - ``` diff --git a/docs/tutorials/tutorial3.md b/docs/tutorials/tutorial3.md deleted file mode 100644 index eaa2a5e6..00000000 --- a/docs/tutorials/tutorial3.md +++ /dev/null @@ -1,155 +0,0 @@ -# Tutorial 3 - Basic Mobility - -## Overview - -This tutorial will cover using a 3 node scenario in CORE with basic mobility. -Mobility can be provided from a NS2 file or by including mobility commands in a gRPC script. - -## Files - -Below is the list of files used for this tutorial. - -* movements1.txt - a NS2 mobility input file -* scenario.xml - 3 node CORE xml scenario file (wireless) -* scenario.py - 3 node CORE gRPC python script (wireless) -* printout.py - event listener - -## Running with XML file using NS2 Movement - -This section will cover running this sample tutorial using the XML scenario -file, leveraging an NS2 file for mobility. - -* Make sure the **core-daemon** is running a terminal - ```shell - sudop core-daemon - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` -* Observe the format of the N2 file, cat movements1.txt. Note that this file was manually developed. - ```shell - $node_(1) set X_ 208.1 - $node_(1) set Y_ 211.05 - $node_(1) set Z_ 0 - $ns_ at 0.0 "$node_(1) setdest 208.1 211.05 0.00" - $node_(2) set X_ 393.1 - $node_(2) set Y_ 223.05 - $node_(2) set Z_ 0 - $ns_ at 0.0 "$node_(2) setdest 393.1 223.05 0.00" - $node_(4) set X_ 499.1 - $node_(4) set Y_ 186.05 - $node_(4) set Z_ 0 - $ns_ at 0.0 "$node_(4) setdest 499.1 186.05 0.00" - $ns_ at 1.0 "$node_(1) setdest 190.1 225.05 0.00" - $ns_ at 1.0 "$node_(2) setdest 393.1 225.05 0.00" - $ns_ at 1.0 "$node_(4) setdest 515.1 186.05 0.00" - $ns_ at 2.0 "$node_(1) setdest 175.1 250.05 0.00" - $ns_ at 2.0 "$node_(2) setdest 393.1 250.05 0.00" - $ns_ at 2.0 "$node_(4) setdest 530.1 186.05 0.00" - $ns_ at 3.0 "$node_(1) setdest 160.1 275.05 0.00" - $ns_ at 3.0 "$node_(2) setdest 393.1 275.05 0.00" - $ns_ at 3.0 "$node_(4) setdest 530.1 186.05 0.00" - $ns_ at 4.0 "$node_(1) setdest 160.1 300.05 0.00" - $ns_ at 4.0 "$node_(2) setdest 393.1 300.05 0.00" - $ns_ at 4.0 "$node_(4) setdest 550.1 186.05 0.00" - $ns_ at 5.0 "$node_(1) setdest 160.1 275.05 0.00" - $ns_ at 5.0 "$node_(2) setdest 393.1 275.05 0.00" - $ns_ at 5.0 "$node_(4) setdest 530.1 186.05 0.00" - $ns_ at 6.0 "$node_(1) setdest 175.1 250.05 0.00" - $ns_ at 6.0 "$node_(2) setdest 393.1 250.05 0.00" - $ns_ at 6.0 "$node_(4) setdest 515.1 186.05 0.00" - $ns_ at 7.0 "$node_(1) setdest 190.1 225.05 0.00" - $ns_ at 7.0 "$node_(2) setdest 393.1 225.05 0.00" - $ns_ at 7.0 "$node_(4) setdest 499.1 186.05 0.00" - ``` -* In the GUI menu bar select **File->Open...**, and select this tutorials **scenario.xml** file -* You can now click play to start the session -* Select the play button on the Mobility Player to start mobility -* Observe movement of the nodes -* Note that OSPF routing protocol is included in the scenario to build routing table so that routes to other nodes are - known and when the routes are discovered, ping will work - -

    - -

    - -## Running with the gRPC Script - -This section covers using a gRPC script to create and provide scenario movement. - -* Make sure the **core-daemon** is running a terminal - ```shell - sudop core-daemon - ``` -* From another terminal run the **scenario.py** script - ```shell - /opt/core/venv/bin/python scenario.py - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` -* In the GUI dialog box select the session and click connect -* You will now have joined the already running scenario -* In the terminal running the **scenario.py**, hit a key to start motion -

    - -

    -* Observe the link between **n3** and **n4** is shown and then as motion continues the link breaks -

    - -

    - -## Running the Chat App Software - -This section covers using one of the above 2 scenarios to run software within -the nodes. - -* In the GUI double click on **n4**, this will bring up a terminal for this node -* in the **n4** terminal, run the server - ```shell - export PATH=$PATH:/usr/local/bin - chatapp-server - ``` -* In the GUI double click on **n2**, this will bring up a terminal for this node -* In the **n2** terminal, run the client - ```shell - export PATH=$PATH:/usr/local/bin - chatapp-client -a 10.0.0.4 - ``` -* This will result in **n2** connecting to the server -* In the **n2** terminal, type a message at the client prompt and hit enter - ```shell - >>hello world - ``` -* Observe that text typed at client then appears in the server terminal - ```shell - chat server listening on: :9001 - [server] 10.0.0.2:53684 joining - [10.0.0.2:53684] hello world - ``` - -## Running Mobility from a Node - -This section provides an example for running a script within a node, that -leverages a control network in CORE for issuing mobility using the gRPC -API. - -* Edit the following line in **/etc/core/core.conf** - ```shell - grpcaddress = 0.0.0.0 - ``` -* Start the scenario from the **scenario.xml** -* From the GUI open **Session -> Options** and set **Control Network** to **172.16.0.0/24** -* Click to play the scenario -* Double click on **n2** to get a terminal window -* From the terminal window for **n2**, run the script - ```shell - /opt/core/venv/bin/python move-node2.py - ``` -* Observe that node 2 moves and continues to move - -

    - -

    diff --git a/docs/tutorials/tutorial4.md b/docs/tutorials/tutorial4.md deleted file mode 100644 index 77ac1c94..00000000 --- a/docs/tutorials/tutorial4.md +++ /dev/null @@ -1,121 +0,0 @@ -# Tutorial 4 - Tests - -## Overview - -A use case for CORE would be to help automate integration tests for running -software within a network. This tutorial covers using CORE with the python -pytest testing framework. It will show how you can define tests, for different -use cases to validate software and outcomes within a defined network. Using -pytest, you would create tests using all the standard pytest functionality. -Creating a test file, and then defining test functions to run. For these tests, -we are leveraging the CORE library directly and the API it provides. - -Refer to the [pytest documentation](https://docs.pytest.org) for indepth -information on how to write tests with pytest. - -## Files - -A directory is used for containing your tests. Within this directory we need a -**conftest.py**, which pytest will pick up to help define and provide -test fixtures, which will be leveraged within our tests. - -* tests - * conftest.py - file used by pytest to define fixtures, which can be shared across tests - * test_ping.py - defines test classes/functions to run - -## Test Fixtures - -Below are the definitions for fixture you can define to facilitate and make -creating CORE based tests easier. - -The global session fixture creates one **CoreEmu** object for the entire -test session, yields it for testing, and calls shutdown when everything -is over. - -``` python -@pytest.fixture(scope="session") -def global_session(): - core = CoreEmu() - session = core.create_session() - session.set_state(EventTypes.CONFIGURATION_STATE) - yield session - core.shutdown() -``` - -The regular session fixture leverages the global session fixture. It -will set the correct state for each test case, yield the session for a test, -and then clear the session after a test finishes to prepare for the next -test. - -``` python -@pytest.fixture -def session(global_session): - global_session.set_state(EventTypes.CONFIGURATION_STATE) - yield global_session - global_session.clear() -``` - -The ip prefixes fixture help provide a preconfigured convenience for -creating and assigning interfaces to nodes, when creating your network -within a test. The address subnet can be whatever you desire. - -``` python -@pytest.fixture(scope="session") -def ip_prefixes(): - return IpPrefixes(ip4_prefix="10.0.0.0/24") -``` - -## Test Functions - -Within a pytest test file, you have the freedom to create any kind of -test you like, but they will all follow a similar formula. - -* define a test function that will leverage the session and ip prefixes fixtures -* then create a network to test, using the session fixture -* run commands within nodes as desired, to test out your use case -* validate command result or output for expected behavior to pass or fail - -In the test below, we create a simple 2 node wired network and validate -node1 can ping node2 successfully. - -``` python -def test_success(self, session: Session, ip_prefixes: IpPrefixes): - # create nodes - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - - # link nodes together - iface1_data = ip_prefixes.create_iface(node1) - iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface1_data, iface2_data) - - # ping node, expect a successful command - node1.cmd(f"ping -c 1 {iface2_data.ip4}") -``` - -## Install Pytest - -Since we are running an automated test within CORE, we will need to install -pytest within the python interpreter used by CORE. - -``` shell -sudo /opt/core/venv/bin/python -m pip install pytest -``` - -## Running Tests - -You can run your own or the provided tests, by running the following. - -``` shell -cd -sudo /opt/core/venv/bin/python -m pytest -v -``` - -If you run the provided tests, you would expect to see the two tests -running and passing. - -``` shell -tests/test_ping.py::TestPing::test_success PASSED [ 50%] -tests/test_ping.py::TestPing::test_failure PASSED [100%] -``` - diff --git a/docs/tutorials/tutorial5.md b/docs/tutorials/tutorial5.md deleted file mode 100644 index 92337717..00000000 --- a/docs/tutorials/tutorial5.md +++ /dev/null @@ -1,168 +0,0 @@ -# Tutorial 5 - RJ45 Node - -## Overview - -This tutorial will cover connecting CORE VM to a Windows host machine using a RJ45 node. - -## Files - -Below is the list of files used for this tutorial. - -* scenario.xml - the scenario with RJ45 unassigned -* scenario.py- grpc script to create the RJ45 in simple CORE scenario -* client_for_windows.py - chat app client modified for windows - -## Running with the Saved XML File - -This section covers using the saved **scenario.xml** file to get and up and running. - -* Configure the Windows host VM to have a bridged network adapter -

    - -

    -* Make sure the **core-daemon** is running in a terminal - ```shell - sudop core-daemon - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` -* Open the **scenario.xml** with the unassigned RJ45 node -

    - -

    -* Configure the RJ45 node name to use the bridged interface -

    - -

    -* After configuring the RJ45, run the scenario: -

    - -

    -* Double click node **n1** to open a terminal and add a route to the Windows host - ```shell - ip route add 192.168.0.0/24 via 10.0.0.20 - ``` -* On the Windows host using Windows command prompt with administrator privilege, add a route that uses the interface - connected to the associated interface assigned to the RJ45 node - ```shell - # if enp0s3 is ssigned 192.168.0.6/24 - route add 10.0.0.0 mask 255.255.255.0 192.168.0.6 - ``` -* Now you should be able to ping from the Windows host to **n1** - ```shell - C:\WINDOWS\system32>ping 10.0.0.20 - - Pinging 10.0.0.20 with 32 bytes of data: - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - - Ping statistics for 10.0.0.20: - Packets: Sent = 4, Received = 4, Lost = 0 (0% loss) - Approximate round trip times in milli-seconds: - Minimum = 0ms, Maximum = 0ms, Average = 0ms - ``` -* After pinging successfully, run the following in the **n1** terminal to start the chatapp server - ```shell - export PATH=$PATH:/usr/local/bin - chatapp-server - ``` -* On the Windows host, run the **client_for_windows.py** - ```shell - python3 client_for_windows.py -a 10.0.0.20 - connected to server(10.0.0.20:9001) as client(192.168.0.6:49960) - >> .Hello WORLD - .Hello WORLD Again - . - ``` -* Observe output on **n1** - ```shell - chat server listening on: :9001 - [server] 192.168.0.6:49960 joining - [192.168.0.6:49960] Hello WORLD - [192.168.0.6:49960] Hello WORLD Again - ``` -* When finished, you can stop the CORE scenario and cleanup -* On the Windows host remove the added route - ```shell - route delete 10.0.0.0 - ``` - -## Running with the gRPC Script - -This section covers leveraging the gRPC script to get up and running. - -* Configure the Windows host VM to have a bridged network adapter -

    - -

    -* Make sure the **core-daemon** is running in a terminal - ```shell - sudop core-daemon - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` -* Run the gRPC script in the VM - ```shell - # use the desired interface name, in this case enp0s3 - /opt/core/venv/bin/python scenario.py enp0s3 - ``` -* In the **core-gui** connect to the running session that was created -

    - -

    -* Double click node **n1** to open a terminal and add a route to the Windows host - ```shell - ip route add 192.168.0.0/24 via 10.0.0.20 - ``` -* On the Windows host using Windows command prompt with administrator privilege, add a route that uses the interface - connected to the associated interface assigned to the RJ45 node - ```shell - # if enp0s3 is ssigned 192.168.0.6/24 - route add 10.0.0.0 mask 255.255.255.0 192.168.0.6 - ``` -* Now you should be able to ping from the Windows host to **n1** - ```shell - C:\WINDOWS\system32>ping 10.0.0.20 - - Pinging 10.0.0.20 with 32 bytes of data: - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - Reply from 10.0.0.20: bytes=32 time<1ms TTL=64 - - Ping statistics for 10.0.0.20: - Packets: Sent = 4, Received = 4, Lost = 0 (0% loss) - Approximate round trip times in milli-seconds: - Minimum = 0ms, Maximum = 0ms, Average = 0ms - ``` -* After pinging successfully, run the following in the **n1** terminal to start the chatapp server - ```shell - export PATH=$PATH:/usr/local/bin - chatapp-server - ``` -* On the Windows host, run the **client_for_windows.py** - ```shell - python3 client_for_windows.py -a 10.0.0.20 - connected to server(10.0.0.20:9001) as client(192.168.0.6:49960) - >> .Hello WORLD - .Hello WORLD Again - . - ``` -* Observe output on **n1** - ```shell - chat server listening on: :9001 - [server] 192.168.0.6:49960 joining - [192.168.0.6:49960] Hello WORLD - [192.168.0.6:49960] Hello WORLD Again - ``` -* When finished, you can stop the CORE scenario and cleanup -* On the Windows host remove the added route - ```shell - route delete 10.0.0.0 - ``` diff --git a/docs/tutorials/tutorial6.md b/docs/tutorials/tutorial6.md deleted file mode 100644 index 46bb57ac..00000000 --- a/docs/tutorials/tutorial6.md +++ /dev/null @@ -1,97 +0,0 @@ -# Tutorial 6 - Improved Visuals - -## Overview - -This tutorial will cover changing the node icons, changing the background, and changing or hiding links. - -## Files - -Below is the list of files used for this tutorial. - -* drone.png - icon for a drone -* demo.py - a mobility script for a node -* terrain.png - a background -* completed-scenario.xml - the scenario after making all changes below - -## Running this Tutorial - -This section will cover running this sample tutorial that develops a scenario file. - -* Ensure that **/etc/core/core.conf** has **grpcaddress** set to **0.0.0.0** -* Make sure the **core-daemon** is running in a terminal - ```shell - sudop core-daemon - ``` -* In another terminal run the GUI - ```shell - core-gui - ``` - -### Changing Node Icons - -* Create three MDR nodes -

    - -

    -* Double click on each node for configuration, click the icon and set it to use the **drone.png** image -

    - -

    -* Use **Session -> Options** and set **Control Network 0** to **172.16.0.0./24** - -### Linking Nodes to WLAN - -* Add a WLAN Node -* Link the three prior MDR nodes to the WLAN node -

    - -

    -* Click play to start the scenario -* Observe wireless links being created -

    - -

    -* Click stop to end the scenario -* Right click the WLAN node and select **Edit -> Hide** -* Now you can view the nodes in isolation -

    - -

    - -### Changing Canvas Background - -* Click **Canvas -> Wallpaper** to set the background to terrain.png -

    - -

    -* Click play to start the scenario again -* You now have a scenario with drone icons, terrain background, links displayed and hidden WLAN node -

    - -

    - -## Adding Mobility - -* Open and play the **completed-scenario.xml** -* Double click on **n1** and run the **demo.py** script - ```shell - # node id is first parameter, second is total nodes - /opt/core/venv/bin/python demo.py 1 3 - ``` -* Let it run to see the link break as the node 1 drone approches the right side -

    - -

    -* Repeat for other nodes, double click on **n2** and **n3** and run the demo.py script - ```shell - # n2 - /opt/core/venv/bin/python demo.py 2 3 - # n3 - /opt/core/venv/bin/python demo.py 3 3 - ``` -* You can turn off wireless links via **View -> Wireless Links** -* Observe nodes moving in parallel tracks, when the far right is reached, the node will move down - and then move to the left. When the far left is reached, the drone will move down and then move to the right. -

    - -

    diff --git a/docs/tutorials/tutorial7.md b/docs/tutorials/tutorial7.md deleted file mode 100644 index 2cc2f812..00000000 --- a/docs/tutorials/tutorial7.md +++ /dev/null @@ -1,236 +0,0 @@ -# Tutorial 7 - EMANE - -## Overview - -This tutorial will cover basic usage and some concepts one may want to -use or leverage when working with and creating EMANE based networks. - -

    - -

    - -For more detailed information on EMANE see the following: - -* [EMANE in CORE](../emane.md) -* [EMANE Wiki](https://github.com/adjacentlink/emane/wiki) - -## Files - -Below is a list of the files used for this tutorial. - -* 2 node EMANE ieee80211abg scenario - * scenario.xml - * scenario.py -* 2 node EMANE ieee80211abg scenario, with **n2** running the "Chat App Server" service - * scenario_service.xml - * scenario_service.py - -## Running this Tutorial - -This section covers interactions that can be carried out for this scenario. - -Our scenario has the following nodes and addresses: - -* emane1 - no address, this is a representative node for the EMANE network -* n2 - 10.0.0.1 -* n3 - 10.0.0.2 - -All usages below assume a clean scenario start. - -### Using Ping - -Using the command line utility **ping** can be a good way to verify connectivity -between nodes in CORE. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n2** by double clicking it in the GUI -* Run the following in **n2** terminal - ``` shell - ping -c 3 10.0.0.2 - ``` -* You should see the following output - ``` shell - PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data. - 64 bytes from 10.0.0.2: icmp_seq=1 ttl=64 time=7.93 ms - 64 bytes from 10.0.0.2: icmp_seq=2 ttl=64 time=3.07 ms - 64 bytes from 10.0.0.2: icmp_seq=3 ttl=64 time=3.05 ms - - --- 10.0.0.2 ping statistics --- - 3 packets transmitted, 3 received, 0% packet loss, time 2000ms - rtt min/avg/max/mdev = 3.049/4.685/7.932/2.295 ms - ``` - -### Using Tcpdump - -Using **tcpdump** can be very beneficial for examining a network. You can verify -traffic being sent/received among many other uses. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n2** by double clicking it in the GUI -* Open a terminal on **n3** by double clicking it in the GUI -* Run the following in **n3** terminal - ``` shell - tcpdump -lenni eth0 - ``` -* Run the following in **n2** terminal - ``` shell - ping -c 1 10.0.0.2 - ``` -* You should see the following in **n2** terminal - ``` shell - tcpdump: verbose output suppressed, use -v[v]... for full protocol decode - listening on eth0, link-type EN10MB (Ethernet), snapshot length 262144 bytes - 14:56:25.414283 02:02:00:00:00:01 > 02:02:00:00:00:02, ethertype IPv4 (0x0800), length 98: 10.0.0.1 > 10.0.0.2: ICMP echo request, id 64832, seq 1, length 64 - 14:56:25.414303 02:02:00:00:00:02 > 02:02:00:00:00:01, ethertype IPv4 (0x0800), length 98: 10.0.0.2 > 10.0.0.1: ICMP echo reply, id 64832, seq 1, length 64 - ``` - -### Running Software - -We will now leverage the installed Chat App software to stand up a server and client -within the nodes of our scenario. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n2** by double clicking it in the GUI -* Run the following in **n2** terminal - ``` shell - export PATH=$PATH:/usr/local/bin - chatapp-server - ``` -* Open a terminal on **n3** by double clicking it in the GUI -* Run the following in **n3** terminal - ``` shell - export PATH=$PATH:/usr/local/bin - chatapp-client -a 10.0.0.1 - ``` -* You will see the following output in **n1** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.1:44362 joining - ``` -* Type the following in **n2** terminal and hit enter - ``` shell - hello world - ``` -* You will see the following output in **n1** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.2:44362 joining - [10.0.0.2:44362] hello world - ``` - -### Tailing a Log - -In this case we are using the service based scenario. This will automatically start -and run the Chat App Server on **n2** and log to a file. This case will demonstrate -using `tail -f` to observe the output of running software. - -* Make sure the CORE daemon is running a terminal, if not already - ``` shell - sudop core-daemon - ``` -* In another terminal run the GUI - ``` shell - core-gui - ``` -* In the GUI menu bar select **File->Open...**, then navigate to and select **scenario_service.xml** -

    - -

    -* You can now click on the **Start Session** button to run the scenario -

    - -

    -* Open a terminal on **n2** by double clicking it in the GUI -* Run the following in **n2** terminal - ``` shell - tail -f chatapp.log - ``` -* Open a terminal on **n3** by double clicking it in the GUI -* Run the following in **n3** terminal - ``` shell - export PATH=$PATH:/usr/local/bin - chatapp-client -a 10.0.0.1 - ``` -* You will see the following output in **n2** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.2:44362 joining - ``` -* Type the following in **n3** terminal and hit enter - ``` shell - hello world - ``` -* You will see the following output in **n2** terminal - ``` shell - chat server listening on: :9001 - [server] 10.0.0.2:44362 joining - [10.0.0.2:44362] hello world - ``` - -## Advanced Topics - -This section will cover some high level topics and examples for running and -using EMANE in CORE. You can find more detailed tutorials and examples at -the [EMANE Tutorial](https://github.com/adjacentlink/emane-tutorial/wiki). - -!!! note - - Every topic below assumes CORE, EMANE, and OSPF MDR have been installed. - - Scenario files to support the EMANE topics below will be found in - the GUI default directory for opening XML files. - -| Topic | Model | Description | -|-----------------------------------------|---------|-----------------------------------------------------------| -| [XML Files](../emane/files.md) | RF Pipe | Overview of generated XML files used to drive EMANE | -| [GPSD](../emane/gpsd.md) | RF Pipe | Overview of running and integrating gpsd with EMANE | -| [Precomputed](../emane/precomputed.md) | RF Pipe | Overview of using the precomputed propagation model | -| [EEL](../emane/eel.md) | RF Pipe | Overview of using the Emulation Event Log (EEL) Generator | -| [Antenna Profiles](../emane/antenna.md) | RF Pipe | Overview of using antenna profiles in EMANE | - ---8<-- "tutorials/common/grpc.md" diff --git a/docs/usage.md b/docs/usage.md new file mode 100644 index 00000000..7c070eb1 --- /dev/null +++ b/docs/usage.md @@ -0,0 +1,1082 @@ +# Using the CORE GUI + +* Table of Contents +{:toc} + +## Overview + +CORE can be used via the GUI or [Python_Scripting](scripting.md). Often the GUI is used to draw nodes and network devices on the canvas. A Python script could also be written, that imports the CORE Python module, to configure and instantiate nodes and networks. This chapter primarily covers usage of the CORE GUI. + +![](static/core-workflow.jpg) + +CORE can be customized to perform any action at each phase in the workflow above. See the *Hooks...* entry on the **Session Menu** for details about when these session states are reached. + +## Prerequisites + +Beyond instaling CORE, you must have the CORE daemon running. This is done on the command line with either Systemd or SysV +```shell +# systed +sudo systemctl daemon-reload +sudo systemctl start core-daemon + +# sysv +sudo service core-daemon start +``` + +## Modes of Operation + +The CORE GUI has two primary modes of operation, **Edit** and **Execute** modes. Running the GUI, by typing **core-gui** with no options, starts in Edit mode. Nodes are drawn on a blank canvas using the toolbar on the left and configured from right-click menus or by double-clicking them. The GUI does not need to be run as root. + +Once editing is complete, pressing the green **Start** button (or choosing **Execute** from the **Session** menu) instantiates the topology within the Linux kernel and enters Execute mode. In execute mode, the user can interact with the running emulated machines by double-clicking or right-clicking on them. The editing toolbar disappears and is replaced by an execute toolbar, which provides tools while running the emulation. Pressing the red **Stop** button (or choosing **Terminate** from the **Session** menu) will destroy the running emulation and return CORE to Edit mode. + +CORE can be started directly in Execute mode by specifying **--start** and a topology file on the command line: + +```shell +core-gui --start ~/.core/configs/myfile.imn +``` + +Once the emulation is running, the GUI can be closed, and a prompt will appear asking if the emulation should be terminated. The emulation may be left running and the GUI can reconnect to an existing session at a later time. + +The GUI can be run as a normal user on Linux. + +The GUI can be connected to a different address or TCP port using the **--address** and/or **--port** options. The defaults are shown below. + +```shell +core-gui --address 127.0.0.1 --port 4038 +``` + +## Toolbar + +The toolbar is a row of buttons that runs vertically along the left side of the CORE GUI window. The toolbar changes depending on the mode of operation. + +### Editing Toolbar + +When CORE is in Edit mode (the default), the vertical Editing Toolbar exists on the left side of the CORE window. Below are brief descriptions for each toolbar item, starting from the top. Most of the tools are grouped into related sub-menus, which appear when you click on their group icon. + +* |select| *Selection Tool* - default tool for selecting, moving, configuring nodes +* |start| *Start button* - starts Execute mode, instantiates the emulation +* |link| *Link* - the Link Tool allows network links to be drawn between two nodes by clicking and dragging the mouse +* |router| *Network-layer virtual nodes* + * |router| *Router* - runs Quagga OSPFv2 and OSPFv3 routing to forward packets + * |host| *Host* - emulated server machine having a default route, runs SSH server + * |pc| *PC* - basic emulated machine having a default route, runs no processes by default + * |mdr| *MDR* - runs Quagga OSPFv3 MDR routing for MANET-optimized routing + * |router_green| *PRouter* - physical router represents a real testbed machine + * |document_properties| *Edit* - edit node types button invokes the CORE Node Types dialog. New types of nodes may be created having different icons and names. The default services that are started with each node type can be changed here. +* |hub| *Link-layer nodes* + * |hub| *Hub* - the Ethernet hub forwards incoming packets to every connected node + * |lanswitch| *Switch* - the Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table + * |wlan| *Wireless LAN* - when routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them + * |rj45| *RJ45* - with the RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation + * |tunnel| *Tunnel* - the Tunnel Tool allows connecting together more than one CORE emulation using GRE tunnels +* *Annotation Tools* + * |marker| *Marker* - for drawing marks on the canvas + * |oval| *Oval* - for drawing circles on the canvas that appear in the background + * |rectangle| *Rectangle* - for drawing rectangles on the canvas that appear in the background + * |text| *Text* - for placing text captions on the canvas + +### Execution Toolbar + +When the Start button is pressed, CORE switches to Execute mode, and the Edit toolbar on the left of the CORE window is replaced with the Execution toolbar Below are the items on this toolbar, starting from the top. + +* |select| *Selection Tool* - in Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node +* |stop| *Stop button* - stops Execute mode, terminates the emulation, returns CORE to edit mode. +* |observe| *Observer Widgets Tool* - clicking on this magnifying glass icon + invokes a menu for easily selecting an Observer Widget. The icon has a darker + gray background when an Observer Widget is active, during which time moving + the mouse over a node will pop up an information display for that node. +* |plot| *Plot Tool* - with this tool enabled, clicking on any link will + activate the Throughput Widget and draw a small, scrolling throughput plot + on the canvas. The plot shows the real-time kbps traffic for that link. + The plots may be dragged around the canvas; right-click on a + plot to remove it. +* |marker| *Marker* - for drawing freehand lines on the canvas, useful during + demonstrations; markings are not saved +* |twonode| *Two-node Tool* - click to choose a starting and ending node, and + run a one-time *traceroute* between those nodes or a continuous *ping -R* + between nodes. The output is displayed in real time in a results box, while + the IP addresses are parsed and the complete network path is highlighted on + the CORE display. +* |run| *Run Tool* - this tool allows easily running a command on all or a + subset of all nodes. A list box allows selecting any of the nodes. A text + entry box allows entering any command. The command should return immediately, + otherwise the display will block awaiting response. The *ping* command, for + example, with no parameters, is not a good idea. The result of each command + is displayed in a results box. The first occurrence of the special text + "NODE" will be replaced with the node name. The command will not be attempted + to run on nodes that are not routers, PCs, or hosts, even if they are + selected. + +## Menubar + +The menubar runs along the top of the CORE GUI window and provides access to a +variety of features. Some of the menus are detachable, such as the *Widgets* +menu, by clicking the dashed line at the top. + +### File Menu + +The File menu contains options for manipulating the **.imn** Configuration Files. Generally, these menu items should not be used in +Execute mode. + +* *New* - this starts a new file with an empty canvas. +* *Open* - invokes the File Open dialog box for selecting a new **.imn** + or XML file to open. You can change the default path used for this dialog + in the Preferences Dialog. +* *Save* - saves the current topology. If you have not yet specified a file + name, the Save As dialog box is invoked. +* *Save As XML* - invokes the Save As dialog box for selecting a new + **.xml** file for saving the current configuration in the XML file. +* *Save As imn* - invokes the Save As dialog box for selecting a new + **.imn** topology file for saving the current configuration. Files are saved in the + *IMUNES network configuration* file +* *Export Python script* - prints Python snippets to the console, for inclusion + in a CORE Python script. +* *Execute XML or Python script* - invokes a File Open dialog box for selecting an XML file to run or a + Python script to run and automatically connect to. If a Python script, the script must create + a new CORE Session and add this session to the daemon's list of sessions + in order for this to work +* *Execute Python script with options* - invokes a File Open dialog box for selecting a + Python script to run and automatically connect to. After a selection is made, + a Python Script Options dialog box is invoked to allow for command-line options to be added. + The Python script must create a new CORE Session and add this session to the daemon's list of sessions + in order for this to work +* *Open current file in editor* - this opens the current topology file in the + **vim** text editor. First you need to save the file. Once the file has been + edited with a text editor, you will need to reload the file to see your + changes. The text editor can be changed from the Preferences Dialog. +* *Print* - this uses the Tcl/Tk postscript command to print the current canvas + to a printer. A dialog is invoked where you can specify a printing command, + the default being **lpr**. The postscript output is piped to the print + command. +* *Save screenshot* - saves the current canvas as a postscript graphic file. +* Recently used files - above the Quit menu command is a list of recently use + files, if any have been opened. You can clear this list in the + Preferences dialog box. You can specify the number of files to keep in + this list from the Preferences dialog. Click on one of the file names + listed to open that configuration file. +* *Quit* - the Quit command should be used to exit the CORE GUI. CORE may + prompt for termination if you are currently in Execute mode. Preferences and + the recently-used files list are saved. + +### Edit Menu + +* *Undo* - attempts to undo the last edit in edit mode. +* *Redo* - attempts to redo an edit that has been undone. +* *Cut*, *Copy*, *Paste* - used to cut, copy, and paste a selection. When nodes + are pasted, their node numbers are automatically incremented, and existing + links are preserved with new IP addresses assigned. Services and their + customizations are copied to the new node, but care should be taken as + node IP addresses have changed with possibly old addresses remaining in any + custom service configurations. Annotations may also be copied and pasted. +* *Select All* - selects all items on the canvas. Selected items can be moved + as a group. +* *Select Adjacent* - select all nodes that are linked to the already selected + node(s). For wireless nodes this simply selects the WLAN node(s) that the + wireless node belongs to. You can use this by clicking on a node and pressing + CTRL+N to select the adjacent nodes. +* *Find...* - invokes the *Find* dialog box. The Find dialog can be used to + search for nodes by name or number. Results are listed in a table that + includes the node or link location and details such as IP addresses or + link parameters. Clicking on a result will focus the canvas on that node + or link, switching canvases if necessary. +* *Clear marker* - clears any annotations drawn with the marker tool. Also + clears any markings used to indicate a node's status. +* *Preferences...* - invokes the Preferences dialog box. + +### Canvas Menu + +The canvas menu provides commands for adding, removing, changing, and switching to different editing canvases. + +* *New* - creates a new empty canvas at the right of all existing canvases. +* *Manage...* - invokes the *Manage Canvases* dialog box, where canvases may be + renamed and reordered, and you can easily switch to one of the canvases by + selecting it. +* *Delete* - deletes the current canvas and all items that it contains. +* *Size/scale...* - invokes a Canvas Size and Scale dialog that allows + configuring the canvas size, scale, and geographic reference point. The size + controls allow changing the width and height of the current canvas, in pixels + or meters. The scale allows specifying how many meters are equivalent to 100 + pixels. The reference point controls specify the latitude, longitude, and + altitude reference point used to convert between geographic and Cartesian + coordinate systems. By clicking the *Save as default* option, all new + canvases will be created with these properties. The default canvas size can + also be changed in the Preferences dialog box. +* *Wallpaper...* - used for setting the canvas background image, +* *Previous*, *Next*, *First*, *Last* - used for switching the active canvas to + the first, last, or adjacent canvas. + +### View Menu + +The View menu features items for controlling what is displayed on the drawing +canvas. + +* *Show* - opens a submenu of items that can be displayed or hidden, such as + interface names, addresses, and labels. Use these options to help declutter + the display. These options are generally saved in the topology + files, so scenarios have a more consistent look when copied from one computer + to another. +* *Show hidden nodes* - reveal nodes that have been hidden. Nodes are hidden by + selecting one or more nodes, right-clicking one and choosing *hide*. +* *Locked* - toggles locked view; when the view is locked, nodes cannot be + moved around on the canvas with the mouse. This could be useful when + sharing the topology with someone and you do not expect them to change + things. +* *3D GUI...* - launches a 3D GUI by running the command defined under + Preferences, *3D GUI command*. This is typically a script that runs + the SDT3D display. SDT is the Scripted Display Tool from NRL that is based on + NASA's Java-based WorldWind virtual globe software. +* *Zoom In* - magnifies the display. You can also zoom in by clicking *zoom + 100%* label in the status bar, or by pressing the **+** (plus) key. +* *Zoom Out* - reduces the size of the display. You can also zoom out by + right-clicking *zoom 100%* label in the status bar or by pressing the **-** + (minus) key. + +### Tools Menu + +The tools menu lists different utility functions. + +* *Autorearrange all* - automatically arranges all nodes on the canvas. Nodes + having a greater number of links are moved to the center. This mode can + continue to run while placing nodes. To turn off this autorearrange mode, + click on a blank area of the canvas with the select tool, or choose this menu + option again. +* *Autorearrange selected* - automatically arranges the selected nodes on the + canvas. +* *Align to grid* - moves nodes into a grid formation, starting with the + smallest-numbered node in the upper-left corner of the canvas, arranging + nodes in vertical columns. +* *Traffic...* - invokes the CORE Traffic Flows dialog box, which allows + configuring, starting, and stopping MGEN traffic flows for the emulation. +* *IP addresses...* - invokes the IP Addresses dialog box for configuring which + IPv4/IPv6 prefixes are used when automatically addressing new interfaces. +* *MAC addresses...* - invokes the MAC Addresses dialog box for configuring the + starting number used as the lowest byte when generating each interface MAC + address. This value should be changed when tunneling between CORE emulations + to prevent MAC address conflicts. +* *Build hosts file...* - invokes the Build hosts File dialog box for + generating **/etc/hosts** file entries based on IP addresses used in the + emulation. +* *Renumber nodes...* - invokes the Renumber Nodes dialog box, which allows + swapping one node number with another in a few clicks. +* *Experimental...* - menu of experimental options, such as a tool to convert + ns-2 scripts to IMUNES imn topologies, supporting only basic ns-2 + functionality, and a tool for automatically dividing up a topology into + partitions. +* *Topology generator* - opens a submenu of topologies to generate. You can + first select the type of node that the topology should consist of, or routers + will be chosen by default. Nodes may be randomly placed, aligned in grids, or + various other topology patterns. + * *Random* - nodes are randomly placed about the canvas, but are not linked + together. This can be used in conjunction with a WLAN node to quickly create a wireless network. + * *Grid* - nodes are placed in horizontal rows starting in the upper-left + corner, evenly spaced to the right; nodes are not linked to each other. + * *Connected Grid* - nodes are placed in an N x M (width and height) + rectangular grid, and each node is linked to the node above, below, left + and right of itself. + * *Chain* - nodes are linked together one after the other in a chain. + * *Star* - one node is placed in the center with N nodes surrounding it in a + circular pattern, with each node linked to the center node + * *Cycle* - nodes are arranged in a circular pattern with every node + connected to its neighbor to form a closed circular path. + * *Wheel* - the wheel pattern links nodes in a combination of both Star and + Cycle patterns. + * *Cube* - generate a cube graph of nodes + * *Clique* - creates a clique graph of nodes, where every node is connected + to every other node + * *Bipartite* - creates a bipartite graph of nodes, having two disjoint sets + of vertices. +* *Debugger...* - opens the CORE Debugger window for executing arbitrary Tcl/Tk + commands. + +### Widgets Menu + +*Widgets* are GUI elements that allow interaction with a running emulation. +Widgets typically automate the running of commands on emulated nodes to report +status information of some type and display this on screen. + +#### Periodic Widgets + +These Widgets are those available from the main *Widgets* menu. More than one +of these Widgets may be run concurrently. An event loop fires once every second +that the emulation is running. If one of these Widgets is enabled, its periodic +routine will be invoked at this time. Each Widget may have a configuration +dialog box which is also accessible from the *Widgets* menu. + +Here are some standard widgets: + +* *Adjacency* - displays router adjacency states for Quagga's OSPFv2 and OSPFv3 + routing protocols. A line is drawn from each router halfway to the router ID + of an adjacent router. The color of the line is based on the OSPF adjacency + state such as Two-way or Full. To learn about the different colors, see the + *Configure Adjacency...* menu item. The **vtysh** command is used to + dump OSPF neighbor information. + Only half of the line is drawn because each + router may be in a different adjacency state with respect to the other. +* *Throughput* - displays the kilobits-per-second throughput above each link, + using statistics gathered from the ng_pipe Netgraph node that implements each + link. If the throughput exceeds a certain threshold, the link will become + highlighted. For wireless nodes which broadcast data to all nodes in range, + the throughput rate is displayed next to the node and the node will become + circled if the threshold is exceeded. + +#### Observer Widgets + +These Widgets are available from the *Observer Widgets* submenu of the +*Widgets* menu, and from the Widgets Tool on the toolbar. Only one Observer Widget may +be used at a time. Mouse over a node while the session is running to pop up +an informational display about that node. + +Available Observer Widgets include IPv4 and IPv6 routing tables, socket +information, list of running processes, and OSPFv2/v3 neighbor information. + +Observer Widgets may be edited by the user and rearranged. Choosing *Edit...* +from the Observer Widget menu will invoke the Observer Widgets dialog. A list +of Observer Widgets is displayed along with up and down arrows for rearranging +the list. Controls are available for renaming each widget, for changing the +command that is run during mouse over, and for adding and deleting items from +the list. Note that specified commands should return immediately to avoid +delays in the GUI display. Changes are saved to a **widgets.conf** file in +the CORE configuration directory. + +### Session Menu + +The Session Menu has entries for starting, stopping, and managing sessions, +in addition to global options such as node types, comments, hooks, servers, +and options. + +* *Start* or *Stop* - this starts or stops the emulation, performing the same + function as the green Start or red Stop button. +* *Change sessions...* - invokes the CORE Sessions dialog box containing a list + of active CORE sessions in the daemon. Basic session information such as + name, node count, start time, and a thumbnail are displayed. This dialog + allows connecting to different sessions, shutting them down, or starting + a new session. +* *Node types...* - invokes the CORE Node Types dialog, performing the same + function as the Edit button on the Network-Layer Nodes toolbar. +* *Comments...* - invokes the CORE Session Comments window where optional + text comments may be specified. These comments are saved at the top of the + configuration file, and can be useful for describing the topology or how + to use the network. +* *Hooks...* - invokes the CORE Session Hooks window where scripts may be + configured for a particular session state. The top of the window has a list + of configured hooks, and buttons on the bottom left allow adding, editing, + and removing hook scripts. The new or edit button will open a hook script + editing window. A hook script is a shell script invoked on the host (not + within a virtual node). + * *definition* - used by the GUI to tell the backend to clear any state. + * *configuration* - when the user presses the *Start* button, node, link, and + other configuration data is sent to the backend. This state is also + reached when the user customizes a service. + * *instantiation* - after configuration data has been sent, just before the nodes are created. + * *runtime* - all nodes and networks have been + built and are running. (This is the same state at which + the previously-named *global experiment script* was run.) + * *datacollect* - the user has pressed the + *Stop* button, but before services have been stopped and nodes have been + shut down. This is a good time to collect log files and other data from the + nodes. + * *shutdown* - all nodes and networks have been shut down and destroyed. +* *Reset node positions* - if you have moved nodes around + using the mouse or by using a mobility module, choosing this item will reset + all nodes to their original position on the canvas. The node locations are + remembered when you first press the Start button. +* *Emulation servers...* - invokes the CORE emulation + servers dialog for configuring. +* *Change Sessions...* - invokes the Sessions dialog for switching between different + running sessions. This dialog is presented during startup when one or + more sessions are already running. +* *Options...* - presents per-session options, such as the IPv4 prefix to be + used, if any, for a control network the ability to preserve + the session directory; and an on/off switch for SDT3D support. + +### Help Menu + +* *Online manual (www)*, *CORE website (www)*, *Mailing list (www)* - these + options attempt to open a web browser with the link to the specified web + resource. +* *About* - invokes the About dialog box for viewing version information + +## Connecting with Physical Networks + +CORE's emulated networks run in real time, so they can be connected to live +physical networks. The RJ45 tool and the Tunnel tool help with connecting to +the real world. These tools are available from the *Link-layer nodes* menu. + +When connecting two or more CORE emulations together, MAC address collisions +should be avoided. CORE automatically assigns MAC addresses to interfaces when +the emulation is started, starting with **00:00:00:aa:00:00** and incrementing +the bottom byte. The starting byte should be changed on the second CORE machine +using the *MAC addresses...* option from the *Tools* menu. + +### RJ45 Tool + +The RJ45 node in CORE represents a physical interface on the real CORE machine. +Any real-world network device can be connected to the interface and communicate +with the CORE nodes in real time. + +The main drawback is that one physical interface is required for each +connection. When the physical interface is assigned to CORE, it may not be used +for anything else. Another consideration is that the computer or network that +you are connecting to must be co-located with the CORE machine. + +To place an RJ45 connection, click on the *Link-layer nodes* toolbar and select +the *RJ45 Tool* from the submenu. Click on the canvas near the node you want to +connect to. This could be a router, hub, switch, or WLAN, for example. Now +click on the *Link Tool* and draw a link between the RJ45 and the other node. +The RJ45 node will display "UNASSIGNED". Double-click the RJ45 node to assign a +physical interface. A list of available interfaces will be shown, and one may +be selected by double-clicking its name in the list, or an interface name may +be entered into the text box. + +**NOTE:** + When you press the Start button to instantiate your topology, the + interface assigned to the RJ45 will be connected to the CORE topology. The + interface can no longer be used by the system. For example, if there was an + IP address assigned to the physical interface before execution, the address + will be removed and control given over to CORE. No IP address is needed; the + interface is put into promiscuous mode so it will receive all packets and + send them into the emulated world. + +Multiple RJ45 nodes can be used within CORE and assigned to the same physical +interface if 802.1x VLANs are used. This allows for more RJ45 nodes than +physical ports are available, but the (e.g. switching) hardware connected to +the physical port must support the VLAN tagging, and the available bandwidth +will be shared. + +You need to create separate VLAN virtual devices on the Linux host, +and then assign these devices to RJ45 nodes inside of CORE. The VLANning is +actually performed outside of CORE, so when the CORE emulated node receives a +packet, the VLAN tag will already be removed. + +Here are example commands for creating VLAN devices under Linux: + +```shell +ip link add link eth0 name eth0.1 type vlan id 1 +ip link add link eth0 name eth0.2 type vlan id 2 +ip link add link eth0 name eth0.3 type vlan id 3 +``` + +### Tunnel Tool + +The tunnel tool builds GRE tunnels between CORE emulations or other hosts. +Tunneling can be helpful when the number of physical interfaces is limited or +when the peer is located on a different network. Also a physical interface does +not need to be dedicated to CORE as with the RJ45 tool. + +The peer GRE tunnel endpoint may be another CORE machine or another +host that supports GRE tunneling. When placing a Tunnel node, initially +the node will display "UNASSIGNED". This text should be replaced with the IP +address of the tunnel peer. This is the IP address of the other CORE machine or +physical machine, not an IP address of another virtual node. + +**NOTE:** + Be aware of possible MTU issues with GRE devices. The *gretap* device + has an interface MTU of 1,458 bytes; when joined to a Linux bridge, the + bridge's MTU + becomes 1,458 bytes. The Linux bridge will not perform fragmentation for + large packets if other bridge ports have a higher MTU such as 1,500 bytes. + +The GRE key is used to identify flows with GRE tunneling. This allows multiple +GRE tunnels to exist between that same pair of tunnel peers. A unique number +should be used when multiple tunnels are used with the same peer. When +configuring the peer side of the tunnel, ensure that the matching keys are +used. + +Here are example commands for building the other end of a tunnel on a Linux +machine. In this example, a router in CORE has the virtual address +**10.0.0.1/24** and the CORE host machine has the (real) address +**198.51.100.34/24**. The Linux box +that will connect with the CORE machine is reachable over the (real) network +at **198.51.100.76/24**. +The emulated router is linked with the Tunnel Node. In the +Tunnel Node configuration dialog, the address **198.51.100.76** is entered, with +the key set to **1**. The gretap interface on the Linux box will be assigned +an address from the subnet of the virtual router node, +**10.0.0.2/24**. +```shell +# these commands are run on the tunnel peer +sudo ip link add gt0 type gretap remote 198.51.100.34 local 198.51.100.76 key 1 +sudo ip addr add 10.0.0.2/24 dev gt0 +sudo ip link set dev gt0 up +``` + + +Now the virtual router should be able to ping the Linux machine: + +```shell +# from the CORE router node +ping 10.0.0.2 +``` + +And the Linux machine should be able to ping inside the CORE emulation: + +```shell +# from the tunnel peer +ping 10.0.0.1 +``` + +To debug this configuration, **tcpdump** can be run on the gretap devices, or +on the physical interfaces on the CORE or Linux machines. Make sure that a +firewall is not blocking the GRE traffic. + +### Communicating with the Host Machine + +The host machine that runs the CORE GUI and/or daemon is not necessarily +accessible from a node. Running an X11 application on a node, for example, +requires some channel of communication for the application to connect with +the X server for graphical display. There are several different ways to +connect from the node to the host and vice versa. + + +#### Control Network + +The quickest way to connect with the host machine through the primary control network. + +With a control network, the host can launch an X11 application on a node. +To run an X11 application on the node, the **SSH** service can be enabled on +the node, and SSH with X11 forwarding can be used from the host to the node: + +```shell +# SSH from host to node n5 to run an X11 app +ssh -X 172.16.0.5 xclock +``` + +Note that the **coresendmsg** utility can be used for a node to send +messages to the CORE daemon running on the host (if the **listenaddr = 0.0.0.0** +is set in the **/etc/core/core.conf** file) to interact with the running +emulation. For example, a node may move itself or other nodes, or change +its icon based on some node state. + +#### Other Methods + +There are still other ways to connect a host with a node. The RJ45 Tool +can be used in conjunction with a dummy interface to access a node: + +```shell +sudo modprobe dummy numdummies=1 +``` + +A **dummy0** interface should appear on the host. Use the RJ45 tool assigned +to **dummy0**, and link this to a node in your scenario. After starting the +session, configure an address on the host. + +```shell +sudo brctl show +# determine bridge name from the above command +# assign an IP address on the same network as the linked node +sudo ip addr add 10.0.1.2/24 dev b.48304.34658 +``` + +In the example shown above, the host will have the address **10.0.1.2** and +the node linked to the RJ45 may have the address **10.0.1.1**. + +## Building Sample Networks + +### Wired Networks + +Wired networks are created using the *Link Tool* to draw a link between two +nodes. This automatically draws a red line representing an Ethernet link and +creates new interfaces on network-layer nodes. + +Double-click on the link to invoke the *link configuration* dialog box. Here +you can change the Bandwidth, Delay, Loss, and Duplicate +rate parameters for that link. You can also modify the color and width of the +link, affecting its display. + +Link-layer nodes are provided for modeling wired networks. These do not create +a separate network stack when instantiated, but are implemented using Linux bridging. +These are the hub, switch, and wireless LAN nodes. The hub copies each packet from +the incoming link to every connected link, while the switch behaves more like an +Ethernet switch and keeps track of the Ethernet address of the connected peer, +forwarding unicast traffic only to the appropriate ports. + +The wireless LAN (WLAN) is covered in the next section. + +### Wireless Networks + +The wireless LAN node allows you to build wireless networks where moving nodes +around affects the connectivity between them. The wireless LAN, or WLAN, node +appears as a small cloud. The WLAN offers several levels of wireless emulation +fidelity, depending on your modeling needs. + +The WLAN tool can be extended with plug-ins for different levels of wireless +fidelity. The basic on/off range is the default setting available on all +platforms. Other plug-ins offer higher fidelity at the expense of greater +complexity and CPU usage. The availability of certain plug-ins varies depending +on platform. See the table below for a brief overview of wireless model types. + + +Model|Type|Supported Platform(s)|Fidelity|Description +-----|----|---------------------|--------|----------- +|Basic|on/off|Linux|Low|Ethernet bridging with ebtables +|EMANE|Plug-in|Linux|High|TAP device connected to EMANE emulator with pluggable MAC and PHY radio types + +To quickly build a wireless network, you can first place several router nodes +onto the canvas. If you have the +Quagga MDR software installed, it is +recommended that you use the *mdr* node type for reduced routing overhead. Next +choose the *wireless LAN* from the *Link-layer nodes* submenu. First set the +desired WLAN parameters by double-clicking the cloud icon. Then you can link +all of the routers by right-clicking on the WLAN and choosing *Link to all +routers*. + +Linking a router to the WLAN causes a small antenna to appear, but no red link +line is drawn. Routers can have multiple wireless links and both wireless and +wired links (however, you will need to manually configure route +redistribution.) The mdr node type will generate a routing configuration that +enables OSPFv3 with MANET extensions. This is a Boeing-developed extension to +Quagga's OSPFv3 that reduces flooding overhead and optimizes the flooding +procedure for mobile ad-hoc (MANET) networks. + +The default configuration of the WLAN is set to use the basic range model, +using the *Basic* tab in the WLAN configuration dialog. Having this model +selected causes **core-daemon** to calculate the distance between nodes based +on screen pixels. A numeric range in screen pixels is set for the wireless +network using the *Range* slider. When two wireless nodes are within range of +each other, a green line is drawn between them and they are linked. Two +wireless nodes that are farther than the range pixels apart are not linked. +During Execute mode, users may move wireless nodes around by clicking and +dragging them, and wireless links will be dynamically made or broken. + +The *EMANE* tab lists available EMANE models to use for wireless networking. +See the [EMANE](emane.md) chapter for details on using EMANE. + +### Mobility Scripting + +CORE has a few ways to script mobility. + +* ns-2 script - the script specifies either absolute positions + or waypoints with a velocity. Locations are given with Cartesian coordinates. +* CORE API - an external entity can move nodes by sending CORE API Node + messages with updated X,Y coordinates; the **coresendmsg** utility + allows a shell script to generate these messages. +* EMANE events - see [EMANE](emane.md) for details on using EMANE scripts to move + nodes around. Location information is typically given as latitude, longitude, + and altitude. + +For the first method, you can create a mobility script using a text +editor, or using a tool such as [BonnMotion](http://net.cs.uni-bonn.de/wg/cs/applications/bonnmotion/), and associate the script with one of the wireless +using the WLAN configuration dialog box. Click the *ns-2 mobility script...* +button, and set the *mobility script file* field in the resulting *ns2script* +configuration dialog. + +Here is an example for creating a BonnMotion script for 10 nodes: + +```shell +bm -f sample RandomWaypoint -n 10 -d 60 -x 1000 -y 750 +bm NSFile -f sample +# use the resulting 'sample.ns_movements' file in CORE +``` + +When the Execute mode is started and one of the WLAN nodes has a mobility +script, a mobility script window will appear. This window contains controls for +starting, stopping, and resetting the running time for the mobility script. The +*loop* checkbox causes the script to play continuously. The *resolution* text +box contains the number of milliseconds between each timer event; lower values +cause the mobility to appear smoother but consumes greater CPU time. + +The format of an ns-2 mobility script looks like: + +```shell +# nodes: 3, max time: 35.000000, max x: 600.00, max y: 600.00 +$node_(2) set X_ 144.0 +$node_(2) set Y_ 240.0 +$node_(2) set Z_ 0.00 +$ns_ at 1.00 "$node_(2) setdest 130.0 280.0 15.0" +``` + +The first three lines set an initial position for node 2. The last line in the +above example causes node 2 to move towards the destination **(130, 280)** at +speed **15**. All units are screen coordinates, with speed in units per second. +The total script time is learned after all nodes have reached their waypoints. +Initially, the time slider in the mobility script dialog will not be +accurate. + +Examples mobility scripts (and their associated topology files) can be found in the **configs/** directory. + +## Multiple Canvases + +CORE supports multiple canvases for organizing emulated nodes. Nodes running on +different canvases may be linked together. + +To create a new canvas, choose *New* from the *Canvas* menu. A new canvas tab +appears in the bottom left corner. Clicking on a canvas tab switches to that +canvas. Double-click on one of the tabs to invoke the *Manage Canvases* dialog +box. Here, canvases may be renamed and reordered, and you can easily switch to +one of the canvases by selecting it. + +Each canvas maintains its own set of nodes and annotations. To link between +canvases, select a node and right-click on it, choose *Create link to*, choose +the target canvas from the list, and from that submenu the desired node. A +pseudo-link will be drawn, representing the link between the two nodes on +different canvases. Double-clicking on the label at the end of the arrow will +jump to the canvas that it links. + +Distributed Emulation +--------------------- + +A large emulation scenario can be deployed on multiple emulation servers and +controlled by a single GUI. The GUI, representing the entire topology, can be +run on one of the emulation servers or on a separate machine. Emulations can be +distributed on Linux. + +Each machine that will act as an emulation server needs to have CORE installed. +It is not important to have the GUI component but the CORE Python daemon +**core-daemon** needs to be installed. Set the **listenaddr** line in the +**/etc/core/core.conf** configuration file so that the CORE Python +daemon will respond to commands from other servers: + +```shell +### core-daemon configuration options ### +[core-daemon] +pidfile = /var/run/core-daemon.pid +logfile = /var/log/core-daemon.log +listenaddr = 0.0.0.0 +``` + + +The **listenaddr** should be set to the address of the interface that should +receive CORE API control commands from the other servers; setting **listenaddr += 0.0.0.0** causes the Python daemon to listen on all interfaces. CORE uses TCP +port 4038 by default to communicate from the controlling machine (with GUI) to +the emulation servers. Make sure that firewall rules are configured as +necessary to allow this traffic. + +In order to easily open shells on the emulation servers, the servers should be +running an SSH server, and public key login should be enabled. This is +accomplished by generating an SSH key for your user if you do not already have +one (use **ssh-keygen -t rsa**), and then copying your public key to the +authorized_keys file on the server (for example, **ssh-copy-id user@server** or +**scp ~/.ssh/id_rsa.pub server:.ssh/authorized_keys**.) When double-clicking on +a node during runtime, instead of opening a local shell, the GUI will attempt +to SSH to the emulation server to run an interactive shell. The user name used +for these remote shells is the same user that is running the CORE GUI. + +**HINT: Here is a quick distributed emulation checklist.** + +1. Install the CORE daemon on all servers. +2. Configure public-key SSH access to all servers (if you want to use +double-click shells or Widgets.) +3. Set **listenaddr=0.0.0.0** in all of the server's core.conf files, +then start (or restart) the daemon. +4. Select nodes, right-click them, and choose *Assign to* to assign +the servers (add servers through *Session*, *Emulation Servers...*) +5. Press the *Start* button to launch the distributed emulation. + +Servers are configured by choosing *Emulation servers...* from the *Session* +menu. Servers parameters are configured in the list below and stored in a +*servers.conf* file for use in different scenarios. The IP address and port of +the server must be specified. The name of each server will be saved in the +topology file as each node's location. + +**NOTE:** + The server that the GUI connects with + is referred to as the master server. + +The user needs to assign nodes to emulation servers in the scenario. Making no +assignment means the node will be emulated on the master server +In the configuration window of every node, a drop-down box located between +the *Node name* and the *Image* button will select the name of the emulation +server. By default, this menu shows *(none)*, indicating that the node will +be emulated locally on the master. When entering Execute mode, the CORE GUI +will deploy the node on its assigned emulation server. + +Another way to assign emulation servers is to select one or more nodes using +the select tool (shift-click to select multiple), and right-click one of the +nodes and choose *Assign to...*. + +The *CORE emulation servers* dialog box may also be used to assign nodes to +servers. The assigned server name appears in parenthesis next to the node name. +To assign all nodes to one of the servers, click on the server name and then +the *all nodes* button. Servers that have assigned nodes are shown in blue in +the server list. Another option is to first select a subset of nodes, then open +the *CORE emulation servers* box and use the *selected nodes* button. + +**IMPORTANT:** + Leave the nodes unassigned if they are to be run on the master server. + Do not explicitly assign the nodes to the master server. + +The emulation server machines should be reachable on the specified port and via +SSH. SSH is used when double-clicking a node to open a shell, the GUI will open +an SSH prompt to that node's emulation server. Public-key authentication should +be configured so that SSH passwords are not needed. + +If there is a link between two nodes residing on different servers, the GUI +will draw the link with a dashed line, and automatically create necessary +tunnels between the nodes when executed. Care should be taken to arrange the +topology such that the number of tunnels is minimized. The tunnels carry data +between servers to connect nodes as specified in the topology. +These tunnels are created using GRE tunneling, similar to the Tunnel Tool. + +Wireless nodes, i.e. those connected to a WLAN node, can be assigned to +different emulation servers and participate in the same wireless network +only if an +EMANE model is used for the WLAN. The basic range model does not work across multiple servers due +to the Linux bridging and ebtables rules that are used. + +**NOTE:** + The basic range wireless model does not support distributed emulation, + but EMANE does. + +## Services + +CORE uses the concept of services to specify what processes or scripts run on a +node when it is started. Layer-3 nodes such as routers and PCs are defined by +the services that they run. + +Services may be customized for each node, or new custom services can be +created. New node types can be created each having a different name, icon, and +set of default services. Each service defines the per-node directories, +configuration files, startup index, starting commands, validation commands, +shutdown commands, and meta-data associated with a node. + +**NOTE:** + Network namespace nodes do not undergo the normal Linux boot process + using the **init**, **upstart**, or **systemd** frameworks. These + lightweight nodes use configured CORE *services*. + +### Default Services and Node Types + +Here are the default node types and their services: + +* *router* - zebra, OSFPv2, OSPFv3, and IPForward services for IGP + link-state routing. +* *host* - DefaultRoute and SSH services, representing an SSH server having a + default route when connected directly to a router. +* *PC* - DefaultRoute service for having a default route when connected + directly to a router. +* *mdr* - zebra, OSPFv3MDR, and IPForward services for + wireless-optimized MANET Designated Router routing. +* *prouter* - a physical router, having the same default services as the + *router* node type; for incorporating Linux testbed machines into an + emulation. + +Configuration files can be automatically generated by each service. For +example, CORE automatically generates routing protocol configuration for the +router nodes in order to simplify the creation of virtual networks. + +To change the services associated with a node, double-click on the node to +invoke its configuration dialog and click on the *Services...* button, +or right-click a node a choose *Services...* from the menu. +Services are enabled or disabled by clicking on their names. The button next to +each service name allows you to customize all aspects of this service for this +node. For example, special route redistribution commands could be inserted in +to the Quagga routing configuration associated with the zebra service. + +To change the default services associated with a node type, use the Node Types +dialog available from the *Edit* button at the end of the Layer-3 nodes +toolbar, or choose *Node types...* from the *Session* menu. Note that +any new services selected are not applied to existing nodes if the nodes have +been customized. + +The node types are saved in a **~/.core/nodes.conf** file, not with the +**.imn** file. Keep this in mind when changing the default services for +existing node types; it may be better to simply create a new node type. It is +recommended that you do not change the default built-in node types. The +**nodes.conf** file can be copied between CORE machines to save your custom +types. + +### Customizing a Service + +A service can be fully customized for a particular node. From the node's +configuration dialog, click on the button next to the service name to invoke +the service customization dialog for that service. +The dialog has three tabs for configuring the different aspects of the service: +files, directories, and startup/shutdown. + +**NOTE:** + A **yellow** customize icon next to a service indicates that service + requires customization (e.g. the *Firewall* service). + A **green** customize icon indicates that a custom configuration exists. + Click the *Defaults* button when customizing a service to remove any + customizations. + +The Files tab is used to display or edit the configuration files or scripts that +are used for this service. Files can be selected from a drop-down list, and +their contents are displayed in a text entry below. The file contents are +generated by the CORE daemon based on the network topology that exists at +the time the customization dialog is invoked. + +The Directories tab shows the per-node directories for this service. For the +default types, CORE nodes share the same filesystem tree, except for these +per-node directories that are defined by the services. For example, the +**/var/run/quagga** directory needs to be unique for each node running +the Zebra service, because Quagga running on each node needs to write separate +PID files to that directory. + +**NOTE:** + The **/var/log** and **/var/run** directories are + mounted uniquely per-node by default. + Per-node mount targets can be found in **/tmp/pycore.nnnnn/nN.conf/** + (where *nnnnn* is the session number and *N* is the node number.) + +The Startup/shutdown tab lists commands that are used to start and stop this +service. The startup index allows configuring when this service starts relative +to the other services enabled for this node; a service with a lower startup +index value is started before those with higher values. Because shell scripts +generated by the Files tab will not have execute permissions set, the startup +commands should include the shell name, with +something like ```sh script.sh```. + +Shutdown commands optionally terminate the process(es) associated with this +service. Generally they send a kill signal to the running process using the +*kill* or *killall* commands. If the service does not terminate +the running processes using a shutdown command, the processes will be killed +when the *vnoded* daemon is terminated (with *kill -9*) and +the namespace destroyed. It is a good practice to +specify shutdown commands, which will allow for proper process termination, and +for run-time control of stopping and restarting services. + +Validate commands are executed following the startup commands. A validate +command can execute a process or script that should return zero if the service +has started successfully, and have a non-zero return value for services that +have had a problem starting. For example, the *pidof* command will check +if a process is running and return zero when found. When a validate command +produces a non-zero return value, an exception is generated, which will cause +an error to be displayed in the Check Emulation Light. + +**TIP:** + To start, stop, and restart services during run-time, right-click a + node and use the *Services...* menu. + +### Creating new Services + +Services can save time required to configure nodes, especially if a number +of nodes require similar configuration procedures. New services can be +introduced to automate tasks. + +The easiest way to capture the configuration of a new process into a service +is by using the **UserDefined** service. This is a blank service where any +aspect may be customized. The UserDefined service is convenient for testing +ideas for a service before adding a new service type. + +To introduce new service types, a **myservices/** directory exists in the +user's CORE configuration directory, at **~/.core/myservices/**. A detailed +**README.txt** file exists in that directory to outline the steps necessary +for adding a new service. First, you need to create a small Python file that +defines the service; then the **custom_services_dir** entry must be set +in the **/etc/core/core.conf** configuration file. A sample is provided in +the **myservices/** directory. + +**NOTE:** + The directory name used in **custom_services_dir** should be unique and + should not correspond to + any existing Python module name. For example, don't use the name **subprocess** + or **services**. + +If you have created a new service type that may be useful to others, please +consider contributing it to the CORE project. + +## Check Emulation Light + +The |cel| Check Emulation Light, or CEL, is located in the bottom right-hand corner +of the status bar in the CORE GUI. This is a yellow icon that indicates one or +more problems with the running emulation. Clicking on the CEL will invoke the +CEL dialog. + +The Check Emulation Light dialog contains a list of exceptions received from +the CORE daemon. An exception has a time, severity level, optional node number, +and source. When the CEL is blinking, this indicates one or more fatal +exceptions. An exception with a fatal severity level indicates that one or more +of the basic pieces of emulation could not be created, such as failure to +create a bridge or namespace, or the failure to launch EMANE processes for an +EMANE-based network. + +Clicking on an exception displays details for that +exception. If a node number is specified, that node is highlighted on the +canvas when the exception is selected. The exception source is a text string +to help trace where the exception occurred; "service:UserDefined" for example, +would appear for a failed validation command with the UserDefined service. + +Buttons are available at the bottom of the dialog for clearing the exception +list and for viewing the CORE daemon and node log files. + +**NOTE:** + In batch mode, exceptions received from the CORE daemon are displayed on + the console. + +## Configuration Files + +Configurations are saved to **xml** or **.imn** topology files using +the *File* menu. You +can easily edit these files with a text editor. +Any time you edit the topology +file, you will need to stop the emulation if it were running and reload the +file. + +The **.xml** [file schema is specified by NRL](http://www.nrl.navy.mil/itd/ncs/products/mnmtools) and there are two versions to date: +version 0.0 and version 1.0, +with 1.0 as the current default. CORE can open either XML version. However, the +xmlfilever line in **/etc/core/core.conf** controls the version of the XML file +that CORE will create. + +In version 1.0, the XML file is also referred to as the Scenario Plan. The Scenario Plan will be logically +made up of the following: + +* **Network Plan** - describes nodes, hosts, interfaces, and the networks to + which they belong. +* **Motion Plan** - describes position and motion patterns for nodes in an + emulation. +* **Services Plan** - describes services (protocols, applications) and traffic + flows that are associated with certain nodes. +* **Visualization Plan** - meta-data that is not part of the NRL XML schema but + used only by CORE. For example, GUI options, canvas and annotation info, etc. + are contained here. +* **Test Bed Mappings** - describes mappings of nodes, interfaces and EMANE modules in the scenario to + test bed hardware. + CORE includes Test Bed Mappings in XML files that are saved while the scenario is running. + + +The **.imn** file format comes from IMUNES, and is +basically Tcl lists of nodes, links, etc. +Tabs and spacing in the topology files are important. The file starts by +listing every node, then links, annotations, canvases, and options. Each entity +has a block contained in braces. The first block is indented by four spaces. +Within the **network-config** block (and any *custom-*-config* block), the +indentation is one tab character. + +**TIP:** + There are several topology examples included with CORE in + the **configs/** directory. + This directory can be found in **~/.core/configs**, or + installed to the filesystem + under **/usr[/local]/share/examples/configs**. + +**TIP:** + When using the **.imn** file format, file paths for things like custom + icons may contain the special variables **$CORE_DATA_DIR** or **$CONFDIR** which + will be substituted with **/usr/share/core** or **~/.core/configs**. + +**TIP:** + Feel free to edit the files directly using your favorite text editor. + +## Customizing your Topology's Look + +Several annotation tools are provided for changing the way your topology is +presented. Captions may be added with the Text tool. Ovals and rectangles may +be drawn in the background, helpful for visually grouping nodes together. + +During live demonstrations the marker tool may be helpful for drawing temporary +annotations on the canvas that may be quickly erased. A size and color palette +appears at the bottom of the toolbar when the marker tool is selected. Markings +are only temporary and are not saved in the topology file. + +The basic node icons can be replaced with a custom image of your choice. Icons +appear best when they use the GIF or PNG format with a transparent background. +To change a node's icon, double-click the node to invoke its configuration +dialog and click on the button to the right of the node name that shows the +node's current icon. + +A background image for the canvas may be set using the *Wallpaper...* option +from the *Canvas* menu. The image may be centered, tiled, or scaled to fit the +canvas size. An existing terrain, map, or network diagram could be used as a +background, for example, with CORE nodes drawn on top. + +## Preferences + +The *Preferences* Dialog can be accessed from the **Edit_Menu**. There are +numerous defaults that can be set with this dialog, which are stored in the +**~/.core/prefs.conf** preferences file. + + + diff --git a/gui/.gitignore b/gui/.gitignore new file mode 100644 index 00000000..682be43e --- /dev/null +++ b/gui/.gitignore @@ -0,0 +1,2 @@ +core-gui +version.tcl diff --git a/gui/Makefile.am b/gui/Makefile.am new file mode 100644 index 00000000..0d0d2b47 --- /dev/null +++ b/gui/Makefile.am @@ -0,0 +1,41 @@ +# CORE +# (c)2010-2013 the Boeing Company. +# See the LICENSE file included in this distribution. +# +# author: Jeff Ahrenholz +# +# Makefile for installing the CORE GUI. Since it is a Tcl/Tk script, we do not +# build anything here. +# + +SUBDIRS = icons + +TCL_FILES := $(wildcard *.tcl) +ADDONS_FILES := $(wildcard addons/*) +CONFIG_FILES := $(wildcard configs/*) + +# CORE GUI script (/usr/local/bin/core-gui) +dist_bin_SCRIPTS = core-gui + +# Tcl/Tk scripts (/usr/local/lib/core) +coredir = $(CORE_LIB_DIR) +dist_core_DATA = $(TCL_FILES) +dist_core_SCRIPTS = $(OTHER_FILES) + +# Addon files +coreaddonsdir = $(coredir)/addons +dist_coreaddons_DATA = $(ADDONS_FILES) + +# Sample configs (/usr/local/share/core/examples/configs) +coreconfigsdir = $(datadir)/core/examples/configs +dist_coreconfigs_DATA = $(CONFIG_FILES) + +# remove generated file from dist +dist-hook: + -rm -f $(distdir)/version.tcl + +# extra cruft to remove +DISTCLEANFILES = Makefile.in + +# files to include in source tarball not included elsewhere +EXTRA_DIST = core-gui.in diff --git a/gui/addons/ipsecservice.tcl b/gui/addons/ipsecservice.tcl new file mode 100644 index 00000000..c859852a --- /dev/null +++ b/gui/addons/ipsecservice.tcl @@ -0,0 +1,329 @@ +# +# This is a separate "addons" file because it is closely tied to Python +# service definition for the IPsec service. +# + +# +# Helper dialog for configuring the IPsec service +# +proc popupServiceConfig_IPsec { parent w node service btn } { + global plugin_img_add plugin_img_del plugin_img_edit + + set f $w.note.ipsec + ttk::frame $f + set h "This IPsec service helper will assist with building an ipsec.sh file" + set h "$h (located on the Files tab).\nThe IPsec service builds ESP" + set h "$h tunnels between the specified peers using the racoon IKEv2" + set h "$h\nkeying daemon. You need to provide keys and the addresses of" + set h "$h peers, along with the\nsubnets to tunnel." + ttk::label $f.help -text $h + pack $f.help -side top -anchor w -padx 4 -pady 4 + $w.note add $f -text "IPsec" -underline 0 + + global g_ipsec_key_dir g_ipsec_key_name + set g_ipsec_key_dir "/etc/core/keys" + set g_ipsec_key_name "ipsec1" + ttk::labelframe $f.keys -text "Keys" + + ttk::frame $f.keys.dir + ttk::label $f.keys.dir.lab -text "Key directory:" + ttk::entry $f.keys.dir.ent -width 40 -textvariable g_ipsec_key_dir + ttk::button $f.keys.dir.btn -width 5 -text "..." -command { + set f .popupServicesCustomize.note.ipsec + set d [$f.keys.dir.ent get] + set d [tk_chooseDirectory -initialdir $d -title "Key directory"] + if { $d != "" } { + $f.keys.dir.ent delete 0 end + $f.keys.dir.ent insert 0 $d + } + } + pack $f.keys.dir.lab $f.keys.dir.ent $f.keys.dir.btn \ + -side left -padx 4 -pady 4 + pack $f.keys.dir -side top -anchor w + + ttk::frame $f.keys.name + ttk::label $f.keys.name.lab -text "Key base name:" + ttk::entry $f.keys.name.ent -width 10 -textvariable g_ipsec_key_name + pack $f.keys.name.lab $f.keys.name.ent -side left -padx 4 -pady 4 + pack $f.keys.name -side top -anchor w + + set h "The (name).pem x509 certificate and (name).key RSA private key need" + set h "$h to exist in the\nspecified directory. These can be generated" + set h "$h using the openssl tool. Also, a ca-cert.pem\nfile should exist" + set h "$h in the key directory for the CA that issued the certs." + ttk::label $f.keys.help -text $h + pack $f.keys.help -side top -anchor w -padx 4 -pady 4 + + pack $f.keys -side top -pady 4 -pady 4 -expand true -fill x + + ttk::labelframe $f.t -text "IPsec Tunnel Endpoints" + set h "(1) Define tunnel endpoints (select peer node using the button" + set h "$h, then select address from the list)" + ttk::label $f.t.lab1 -text $h + pack $f.t.lab1 -side top -anchor w -padx 4 -pady 4 + ttk::frame $f.t.ep + ttk::label $f.t.ep.lab1 -text "Local:" + ttk::combobox $f.t.ep.combo1 -width 12 + pack $f.t.ep.lab1 $f.t.ep.combo1 -side left -padx 4 -pady 4 + populateComboWithIPs $f.t.ep.combo1 $node + + global g_twoNodeSelect g_twoNodeSelectCallback + set g_twoNodeSelect "" + set g_twoNodeSelectCallback selectTwoNodeIPsecCallback + + set h "Choose a node by clicking it on the canvas" + set h "$h or\nby selecting it from the list below." + ttk::label $f.t.ep.lab2 -text "Peer node:" + ttk::checkbutton $f.t.ep.node -text " (none) " -variable g_twoNodeSelect \ + -onvalue "$f.t.ep.node" -style Toolbutton \ + -command "popupSelectNodes {$h} {} selectNodesIPsecCallback" + + ttk::label $f.t.ep.lab3 -text "Peer:" + ttk::combobox $f.t.ep.combo2 -width 12 + ttk::button $f.t.ep.add -text "Add Endpoint" -image $plugin_img_add \ + -compound left -command "ipsecTreeHelper $f ep" + pack $f.t.ep.lab2 $f.t.ep.node $f.t.ep.lab3 $f.t.ep.combo2 \ + $f.t.ep.add -side left -padx 4 -pady 4 + pack $f.t.ep -side top -anchor w + + set h "(2) Select endpoints below and add the subnets to be encrypted" + ttk::label $f.t.lab2 -text $h + pack $f.t.lab2 -side top -anchor w -padx 4 -pady 4 + + ttk::frame $f.t.sub + ttk::label $f.t.sub.lab1 -text "Local subnet:" + ttk::combobox $f.t.sub.combo1 -width 12 + ttk::label $f.t.sub.lab2 -text "Remote subnet:" + ttk::combobox $f.t.sub.combo2 -width 12 + ttk::button $f.t.sub.add -text "Add Subnet" -image $plugin_img_add \ + -compound left -command "ipsecTreeHelper $f sub" + pack $f.t.sub.lab1 $f.t.sub.combo1 $f.t.sub.lab2 $f.t.sub.combo2 \ + $f.t.sub.add -side left -padx 5 -pady 4 + pack $f.t.sub -side top -anchor w + + global node_list + set net_list [ipv4SubnetList $node_list] + $f.t.sub.combo1 configure -values $net_list + $f.t.sub.combo2 configure -values $net_list + + ttk::treeview $f.t.tree -height 5 -selectmode browse -show tree + + pack $f.t.tree -side top -padx 4 -pady 4 -fill both + pack $f.t -side top -expand true -fill both + + ttk::frame $f.bottom + ttk::button $f.bottom.del -image $plugin_img_del \ + -command "ipsecTreeHelper $f del" + ttk::button $f.bottom.gen -text "Generate ipsec.sh" \ + -image $plugin_img_edit -compound left -command "generateIPsecScript $w" + pack $f.bottom.del $f.bottom.gen -side left -padx 4 -pady 4 + pack $f.bottom -side top +} + +# +# Callback invoked when receiving configuration values +# from a Configuration Message; this service helper depends on the ipsec.sh +# file, not the other configuration values. +# +#proc popupServiceConfig_IPsec_vals { node values services w } { +#} + +# +# Callback invoked when receiving service file data from a File Message +proc popupServiceConfig_IPsec_file { node name data w } { + if { $name == "ipsec.sh" } { + readIPsecScript $w + } +} + +# helper to insert all of a node's IP addresses into a combo +proc populateComboWithIPs { combo node } { + set ip_list [ipv4List $node 0] + $combo configure -values $ip_list + $combo delete 0 end + $combo insert 0 [lindex $ip_list 0] +} + +# called from editor.tcl:button1 when user clicks on a node +# search for IP address and populate +proc selectTwoNodeIPsecCallback {} { + set w .popupServicesCustomize + set f $w.note.ipsec + + if { ![winfo exists $w] } { return }; # user has closed window + catch {destroy .nodeselect} + + set node [string trim [$f.t.ep.node cget -text]] + if { [set node] == "(none)" } { set node "" } + + # populate peer interface combo with list of IPs + populateComboWithIPs $f.t.ep.combo2 $node +} + +# called from popupSelectNodes dialog when a node selection has been made +proc selectNodesIPsecCallback { nodes } { + global g_twoNodeSelect + set w .popupServicesCustomize + set f $w.note.ipsec + + set g_twoNodeSelect "" + set node [lindex $nodes 0] + if { $node == "" } { + $f.t.ep.node configure -text "(none)" + return + } + $f.t.ep.node configure -text " $node " + + # populate peer interface combo with list of IPs + populateComboWithIPs $f.t.ep.combo2 $node +} + +# helper to manipulate tree; cmd is "del", "ep" or "sub" +proc ipsecTreeHelper { f cmd } { + + if { $cmd == "del" } { + set sel [$f.t.tree selection] + $f.t.tree delete $sel + return + } + + # add endpoint (ep) or subnet (sub) + set l [string trim [$f.t.$cmd.combo1 get]] + set p [string trim [$f.t.$cmd.combo2 get]] + if { $l == "" || $p == "" } { + if { $cmd == "ep" } { + set h "tunnel interface addresses" + } else { + set h "subnet addresses" + } + tk_messageBox -type ok -icon warning -message \ + "You need to select local and peer $h." + return + } + + if { $cmd == "ep" } { + set item [$f.t.tree insert {} end -text "$l <--> $p" -open true] + $f.t.tree selection set $item + } elseif { $cmd == "sub" } { + set parent [$f.t.tree selection] + if { $parent == "" } { + tk_messageBox -type ok -icon warning -message \ + "You need to first select endpoints, then configure their subnets." + return + } + if { [$f.t.tree parent $parent] != {} } { + set parent [$f.t.tree parent $parent] + } + $f.t.tree insert $parent end -text "$l <===> $p" + } +} + +# update an ipsec.sh file that was generated by the IPsec service +proc generateIPsecScript { w } { + #puts "generateIPsecScript $w..." + set cfg [$w.note.files.txt get 0.0 end-1c] + set newcfg "" + + # + # Gather data for a new config + # + set f $w.note.ipsec + set keydir [$f.keys.dir.ent get] + set keyname [$f.keys.name.ent get] + + set tunnelhosts "" + set subnet_list "" + set ti 0 + set th_items [$f.t.tree children {}] + foreach th $th_items { + set ep [$f.t.tree item $th -text] + set i [string first " " $ep] + # replace " <--> " with "AND" + set ep [string replace $ep $i $i+5 "AND"] + # build a list e.g.: + # tunnelhosts="172.16.0.1AND172.16.0.2 172.16.0.1AND172.16.2.1" + lappend tunnelhosts $ep + + set subnets "" + foreach subnet_item [$f.t.tree children $th] { + set sn [$f.t.tree item $subnet_item -text] + set i [string first " " $sn] + # replace " <===> " with "AND" + set sn [string replace $sn $i $i+6 "AND"] + lappend subnets $sn + } + incr ti + set subnetstxt [join $subnets " "] + # build a list e.g.: + # T2="172.16.4.0/24AND172.16.5.0/24 172.16.4.0/24AND172.16.6.0/24" + set subnets "T$ti=\"$subnetstxt\"" + lappend subnet_list $subnets + } + + # + # Perform replacements in existing ipsec.sh file. + # + set have_subnets 0 + foreach line [split $cfg "\n"] { + if { [string range $line 0 6] == "keydir=" } { + set line "keydir=$keydir" + } elseif { [string range $line 0 8] == "certname=" } { + set line "certname=$keyname" + } elseif { [string range $line 0 11] == "tunnelhosts=" } { + set tunnelhosts [join $tunnelhosts " "] + set line "tunnelhosts=\"$tunnelhosts\"" + } elseif { [string range $line 0 0] == "T" && \ + [string is digit [string range $line 1 1]] } { + if { $have_subnets } { + continue ;# skip this line + } else { + set line [join $subnet_list "\n"] + set have_subnets 1 + } + } + lappend newcfg $line + } + $w.note.files.txt delete 0.0 end + $w.note.files.txt insert 0.0 [join $newcfg "\n"] + $w.note select $w.note.files + $w.btn.apply configure -state normal +} + +proc readIPsecScript { w } { + set cfg [$w.note.files.txt get 0.0 end-1c] + + set f $w.note.ipsec + $f.keys.dir.ent delete 0 end + $f.keys.name.ent delete 0 end + $f.t.tree delete [$f.t.tree children {}] + + set ti 1 + foreach line [split $cfg "\n"] { + if { [string range $line 0 6] == "keydir=" } { + $f.keys.dir.ent insert 0 [string range $line 7 end] + } elseif { [string range $line 0 8] == "certname=" } { + $f.keys.name.ent insert 0 [string range $line 9 end] + } elseif { [string range $line 0 11] == "tunnelhosts=" } { + set tunnelhosts [string range $line 13 end-1] + set ti 0 + foreach ep [split $tunnelhosts " "] { + incr ti + set i [string first "AND" $ep] + set ep [string replace $ep $i $i+2 " <--> "] + $f.t.tree insert {} end -id "T$ti" -text "$ep" -open true + } + } elseif { [string range $line 0 0] == "T" && \ + [string is digit [string range $line 1 1]] } { + set i [string first "=" $line] + set ti [string range $line 0 $i-1] + set subnets [split [string range $line $i+2 end-1] " "] + foreach sn $subnets { + set i [string first "AND" $sn] + set sn [string replace $sn $i $i+2 " <===> "] + if { [catch {$f.t.tree insert $ti end -text "$sn"} e] } { + puts "IPsec service ignoring line '$ti='" + } + } + } + } +} diff --git a/gui/annotations.tcl b/gui/annotations.tcl new file mode 100644 index 00000000..8a2184d3 --- /dev/null +++ b/gui/annotations.tcl @@ -0,0 +1,837 @@ +# +# Copyright 2007-2008 University of Zagreb, Croatia. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# + +#****h* imunes/annotations.tcl +# NAME +# annotations.tcl -- oval, rectangle, text, background, ... +# FUNCTION +# This module is used for configuration/image annotations, such as oval, +# rectangle, text, background or some other. +#**** + +#****f* annotations.tcl/annotationConfig +# NAME +# annotationConfig -- +# SYNOPSIS +# annotationConfig $canvas $target +# FUNCTION +# . . . +# INPUTS +# * canvas -- +# * target -- oval or rectangle object +#**** + +proc annotationConfig { c target } { + switch -exact -- [nodeType $target] { + oval { + popupAnnotationDialog $c $target "true" + } + rectangle { + popupAnnotationDialog $c $target "true" + } + text { + popupAnnotationDialog $c $target "true" + } + default { + puts "Unknown type [nodeType $target] for target $target" + } + } + redrawAll +} + + +#****f* annotations.tcl/popupOvalDialog +# NAME +# popupOvalDialog -- creates a new oval or modifies existing oval +# SYNOPSIS +# popupOvalDialog $canvas $modify $color $label $lcolor +# FUNCTION +# Called from: +# - editor.tcl/button1-release when new oval is drawn +# - annotationConfig which is called from popupConfigDialog bound to +# Double-1 on various objects +# - configureOval called from button3annotation procedure which creates +# a menu for configuration and deletion (bound to 3 on oval, +# rectangle and text) +# INPUTS +# * canvas -- +# * modify -- create new oval "newoval" if modify=false or +# modify an existing oval "newoval" if modify=true +# * color -- oval color +# * label -- label text +# * lcolor -- label (text) color +#**** + + +#****f* annotations.tcl/destroyNewoval +# NAME +# destroyNewoval -- helper for popupOvalDialog and popupOvalApply +# SYNOPSIS +# destroyNewoval $canvas +# FUNCTION +# . . . +# INPUTS +# * canvas -- +#**** + +proc destroyNewoval { c } { + global newoval + $c delete -withtags newoval + set newoval "" +} + + +# oval/rectangle/text right-click menu + +proc button3annotation { type c x y } { + + if { $type == "oval" } { + set procname "Oval" + set item [lindex [$c gettags {oval && current}] 1] + } elseif { $type == "rectangle" } { + set procname "Rectangle" + set item [lindex [$c gettags {rectangle && current}] 1] + } elseif { $type == "label" } { + set procname "Label" + set item [lindex [$c gettags {label && current}] 1] + } elseif { $type == "text" } { + set procname "Text" + set item [lindex [$c gettags {text && current}] 1] + } elseif { $type == "marker" } { + # erase markings + $c delete -withtags {marker && current} + return + } else { + # ??? + return + } + if { $item == "" } { + return + } + set menutext "$type $item" + + .button3menu delete 0 end + + .button3menu add command -label "Configure $menutext" \ + -command "annotationConfig $c $item" + .button3menu add command -label "Delete $menutext" \ + -command "deleteAnnotation $c $type $item" + + set x [winfo pointerx .] + set y [winfo pointery .] + tk_popup .button3menu $x $y +} + + +proc deleteAnnotation { c type target } { + global changed annotation_list + + $c delete -withtags "$type && $target" + $c delete -withtags "new$type" + set i [lsearch -exact $annotation_list $target] + set annotation_list [lreplace $annotation_list $i $i] + set changed 1 + updateUndoLog +} + + +proc drawOval {oval} { + global $oval defOvalColor zoom curcanvas + global defTextFontFamily defTextFontSize + + set coords [getNodeCoords $oval] + if { [llength $coords] < 4 } { + puts "Bad coordinates for oval $oval" + return + } + set x1 [expr {[lindex $coords 0] * $zoom}] + set y1 [expr {[lindex $coords 1] * $zoom}] + set x2 [expr {[lindex $coords 2] * $zoom}] + set y2 [expr {[lindex $coords 3] * $zoom}] + set color [lindex [lsearch -inline [set $oval] "color *"] 1] + set label [lindex [lsearch -inline [set $oval] "label *"] 1] + set lcolor [lindex [lsearch -inline [set $oval] "labelcolor *"] 1] + set bordercolor [lindex [lsearch -inline [set $oval] "border *"] 1] + set width [lindex [lsearch -inline [set $oval] "width *"] 1] + set lx [expr $x1 + (($x2 - $x1) / 2)] + set ly [expr ($y1 + 20)] + + if { $color == "" } { set color $defOvalColor } + if { $lcolor == "" } { set lcolor black } + if { $width == "" } { set width 0 } + if { $bordercolor == "" } { set bordercolor black } + + # -outline red -stipple gray50 + set newoval [.c create oval $x1 $y1 $x2 $y2 \ + -fill $color -width $width -outline $bordercolor \ + -tags "oval $oval annotation"] + .c raise $newoval background + + set fontfamily [lindex [lsearch -inline [set $oval] "fontfamily *"] 1] + set fontsize [lindex [lsearch -inline [set $oval] "fontsize *"] 1] + if { $fontfamily == "" } { + set fontfamily $defTextFontFamily + } + if { $fontsize == "" } { + set fontsize $defTextFontSize + } + set newfontsize $fontsize + set font [list "$fontfamily" $fontsize] + set effects [lindex [lsearch -inline [set $oval] "effects *"] 1] + + .c create text $lx $ly -tags "oval $oval annotation" -text $label \ + -justify center -font "$font $effects" -fill $lcolor + + setNodeCanvas $oval $curcanvas + setType $oval "oval" +} + + +# Color helper for popupOvalDialog and popupLabelDialog +proc popupColor { type l settext } { + # popup color selection dialog with current color + if { $type == "fg" } { + set initcolor [$l cget -fg] + } else { + set initcolor [$l cget -bg] + } + set newcolor [tk_chooseColor -initialcolor $initcolor] + + # set fg or bg of the "l" label control + if { $newcolor == "" } { + return + } + if { $settext == "true" } { + $l configure -text $newcolor -$type $newcolor + } else { + $l configure -$type $newcolor + } +} + + +#****f* annotations.tcl/roundRect +# NAME +# roundRect -- Draw a rounded rectangle in the canvas. +# Called from drawRect procedure +# SYNOPSIS +# roundRect $w $x0 $y0 $x3 $y3 $radius $args +# FUNCTION +# Creates a rounded rectangle as a smooth polygon in the canvas +# and returns the canvas item number of the rounded rectangle. +# INPUTS +# * w -- Path name of the canvas +# * x0, y0 -- Coordinates of the upper left corner, in pixels +# * x3, y3 -- Coordinates of the lower right corner, in pixels +# * radius -- Radius of the bend at the corners, in any form +# acceptable to Tk_GetPixels +# * args -- Other args suitable to a 'polygon' item on the canvas +# Example: +# roundRect .c 100 50 500 250 $rad -fill white -outline black -tags rectangle +#**** + +proc roundRect { w x0 y0 x3 y3 radius args } { + + set r [winfo pixels $w $radius] + set d [expr { 2 * $r }] + + # Make sure that the radius of the curve is less than 3/8 size of the box + + set maxr 0.75 + + if { $d > $maxr * ( $x3 - $x0 ) } { + set d [expr { $maxr * ( $x3 - $x0 ) }] + } + if { $d > $maxr * ( $y3 - $y0 ) } { + set d [expr { $maxr * ( $y3 - $y0 ) }] + } + + set x1 [expr { $x0 + $d }] + set x2 [expr { $x3 - $d }] + set y1 [expr { $y0 + $d }] + set y2 [expr { $y3 - $d }] + + set cmd [list $w create polygon] + lappend cmd $x0 $y0 $x1 $y0 $x2 $y0 $x3 $y0 $x3 $y1 $x3 $y2 + lappend cmd $x3 $y3 $x2 $y3 $x1 $y3 $x0 $y3 $x0 $y2 $x0 $y1 + lappend cmd -smooth 1 + return [eval $cmd $args] + } + +proc drawRect {rectangle} { + global $rectangle defRectColor zoom curcanvas + global defTextFontFamily defTextFontSize + + set coords [getNodeCoords $rectangle] + if {$coords == "" || [llength $coords] != 4 } { + puts "Bad coordinates for rectangle $rectangle" + return + } + + set x1 [expr {[lindex $coords 0] * $zoom}] + set y1 [expr {[lindex $coords 1] * $zoom}] + set x2 [expr {[lindex $coords 2] * $zoom}] + set y2 [expr {[lindex $coords 3] * $zoom}] + set color [lindex [lsearch -inline [set $rectangle] "color *"] 1] + set label [lindex [lsearch -inline [set $rectangle] "label *"] 1] + set lcolor [lindex [lsearch -inline [set $rectangle] "labelcolor *"] 1] + set bordercolor [lindex [lsearch -inline [set $rectangle] "border *"] 1] + set width [lindex [lsearch -inline [set $rectangle] "width *"] 1] + set rad [lindex [lsearch -inline [set $rectangle] "rad *"] 1] + set lx [expr $x1 + (($x2 - $x1) / 2)] + set ly [expr ($y1 + 20)] + + if { $color == "" } { set color $defRectColor } + if { $lcolor == "" } { set lcolor black } + if { $bordercolor == "" } { set bordercolor black } + if { $width == "" } { set width 0 } + # rounded-rectangle radius + if { $rad == "" } { set rad 25 } + + # Boeing: allow borderless rectangles + if { $width == 0 } { + set newrect [roundRect .c $x1 $y1 $x2 $y2 $rad \ + -fill $color -tags "rectangle $rectangle annotation"] + } else { + # end Boeing + set newrect [roundRect .c $x1 $y1 $x2 $y2 $rad \ + -fill $color -outline $bordercolor -width $width \ + -tags "rectangle $rectangle annotation"] + .c raise $newrect background + # Boeing + } + # end Boeing + + set fontfamily [lindex [lsearch -inline [set $rectangle] "fontfamily *"] 1] + set fontsize [lindex [lsearch -inline [set $rectangle] "fontsize *"] 1] + if { $fontfamily == "" } { + set fontfamily $defTextFontFamily + } + if { $fontsize == "" } { + set fontsize $defTextFontSize + } + set newfontsize $fontsize + set font [list "$fontfamily" $fontsize] + set effects [lindex [lsearch -inline [set $rectangle] "effects *"] 1] + + .c create text $lx $ly -tags "rectangle $rectangle annotation" \ + -text $label -justify center -font "$font $effects" -fill $lcolor + + setNodeCanvas $rectangle $curcanvas + setType $rectangle "rectangle" +} + + +proc popupAnnotationDialog { c target modify } { + global $target newrect newoval + global width rad fontfamily fontsize + global defFillColor defTextColor defTextFontFamily defTextFontSize + + # do nothing, return, if coords are empty + if { $target == 0 \ + && [$c coords "$newrect"] == "" \ + && [$c coords "$newoval"] == "" } { + return + } + if { $target == 0 } { + set width 0 + set rad 25 + set coords [$c bbox "$newrect"] + if { [$c coords "$newrect"] == "" } { + set coords [$c bbox "$newoval"] + set annotationType "oval" + } else { + set annotationType "rectangle" + } + set fontfamily "" + set fontsize "" + set effects "" + set color "" + set label "" + set lcolor "" + set bordercolor "" + } else { + set width [lindex [lsearch -inline [set $target] "width *"] 1] + set rad [lindex [lsearch -inline [set $target] "rad *"] 1] + set coords [$c bbox "$target"] + set color [lindex [lsearch -inline [set $target] "color *"] 1] + set fontfamily [lindex [lsearch -inline [set $target] "fontfamily *"] 1] + set fontsize [lindex [lsearch -inline [set $target] "fontsize *"] 1] + set effects [lindex [lsearch -inline [set $target] "effects *"] 1] + + set label [lindex [lsearch -inline [set $target] "label *"] 1] + set lcolor [lindex [lsearch -inline [set $target] "labelcolor *"] 1] + set bordercolor [lindex [lsearch -inline [set $target] "border *"] 1] + set annotationType [nodeType $target] + } + + if { $color == "" } { + # Boeing: use default shape colors + if { $annotationType == "oval" } { + global defOvalColor + set color $defOvalColor + } elseif { $annotationType == "rectangle" } { + global defRectColor + set color $defRectColor + } else { + set color $defFillColor + } + } + if { $lcolor == "" } { set lcolor black } + if { $bordercolor == "" } { set bordercolor black } + if { $width == "" } { set width 0 } + if { $rad == "" } { set rad 25 } + if { $fontfamily == "" } { set fontfamily $defTextFontFamily } + if { $fontsize == "" } { set fontsize $defTextFontSize } + + set textBold 0 + set textItalic 0 + set textUnderline 0 + if { [lsearch $effects bold ] != -1} {set textBold 1} + if { [lsearch $effects italic ] != -1} {set textItalic 1} + if { [lsearch $effects underline ] != -1} {set textUnderline 1} + + set x1 [lindex $coords 0] + set y1 [lindex $coords 1] + set x2 [lindex $coords 2] + set y2 [lindex $coords 3] + set xx [expr {abs($x2 - $x1)}] + set yy [expr {abs($y2 - $y1)}] + if { $xx > $yy } { + set maxrad [expr $yy * 3.0 / 8.0] + } else { + set maxrad [expr $xx * 3.0 / 8.0] + } + + set wi .popup + catch {destroy $wi} + toplevel $wi + + wm transient $wi . + wm resizable $wi 0 0 + + if { $modify == "true" } { + set windowtitle "Configure $annotationType $target" + } else { + set windowtitle "Add a new $annotationType" + } + wm title $wi $windowtitle + + frame $wi.text -relief groove -bd 2 + frame $wi.text.lab + label $wi.text.lab.name_label -text "Text for top of $annotationType:" + entry $wi.text.lab.name -bg white -fg $lcolor -width 32 \ + -validate focus -invcmd "focusAndFlash %W" + $wi.text.lab.name insert 0 $label + pack $wi.text.lab.name_label $wi.text.lab.name -side left -anchor w \ + -padx 2 -pady 2 -fill x + pack $wi.text.lab -side top -fill x + + frame $wi.text.format + + set fontmenu [tk_optionMenu $wi.text.format.fontmenu fontfamily "$fontfamily"] + set sizemenu [tk_optionMenu $wi.text.format.fontsize fontsize "$fontsize"] + + + # color selection + if { $color == "" } { + set color $defTextColor + } + button $wi.text.format.fg -text "Text color" -command \ + "popupColor fg $wi.text.lab.name false" + checkbutton $wi.text.format.bold -text "Bold" -variable textBold \ + -command [list fontupdate $wi.text.lab.name bold] + checkbutton $wi.text.format.italic -text "Italic" -variable textItalic \ + -command [list fontupdate $wi.text.lab.name italic] + checkbutton $wi.text.format.underline -text "Underline" \ + -variable textUnderline \ + -command [list fontupdate $wi.text.lab.name underline] + + if {$textBold == 1} { $wi.text.format.bold select + } else { $wi.text.format.bold deselect } + if {$textItalic == 1} { $wi.text.format.italic select + } else { $wi.text.format.italic deselect } + if {$textUnderline == 1} { $wi.text.format.underline select + } else { $wi.text.format.underline deselect } + + pack $wi.text.format.fontmenu \ + $wi.text.format.fontsize \ + $wi.text.format.fg \ + $wi.text.format.bold \ + $wi.text.format.italic \ + $wi.text.format.underline \ + -side left -pady 2 + + pack $wi.text.format -side top -fill x + + pack $wi.text -side top -fill x + + fontupdate $wi.text.lab.name fontfamily $fontfamily + fontupdate $wi.text.lab.name fontsize $fontsize + + $fontmenu delete 0 + foreach f [lsort -dictionary [font families]] { + $fontmenu add radiobutton -value "$f" -label $f \ + -variable fontfamily \ + -command [list fontupdate $wi.text.lab.name fontfamily $f] + } + + $sizemenu delete 0 + foreach f {8 9 10 11 12 14 16 18 20 22 24 26 28 36 48 72} { + $sizemenu add radiobutton -value "$f" -label $f \ + -variable fontsize \ + -command [list fontupdate $wi.text.lab.name fontsize $f] + } + +if { "$annotationType" == "rectangle" || "$annotationType" == "oval" } { + + # fill color, border color + frame $wi.colors -relief groove -bd 2 + # color selection controls + label $wi.colors.label -text "Fill color:" + + label $wi.colors.color -text $color -width 8 \ + -bg $color -fg $lcolor + button $wi.colors.bg -text "Color" -command \ + "popupColor bg $wi.colors.color true" + pack $wi.colors.label $wi.colors.color $wi.colors.bg \ + -side left -padx 2 -pady 2 -anchor w -fill x + pack $wi.colors -side top -fill x + + # border selection controls + frame $wi.border -relief groove -bd 2 + label $wi.border.label -text "Border color:" + label $wi.border.color -text $bordercolor -width 8 \ + -bg $color -fg $bordercolor + label $wi.border.width_label -text "Border width:" + set widthMenu [tk_optionMenu $wi.border.width width "$width"] + $widthMenu delete 0 + foreach f {0 1 2 3 4 5 6 7 8 9 10} { + $widthMenu add radiobutton -value $f -label $f \ + -variable width + } + button $wi.border.fg -text "Color" -command \ + "popupColor fg $wi.border.color true" + pack $wi.border.label $wi.border.color $wi.border.fg \ + $wi.border.width_label $wi.border.width \ + $wi.border.fg $wi.border.color $wi.border.label \ + -side left -padx 2 -pady 2 -anchor w -fill x + pack $wi.border -side top -fill x + +} + +if { $annotationType == "rectangle" } { + frame $wi.radius -relief groove -bd 2 + scale $wi.radius.rad -from 0 -to [expr int($maxrad)] \ + -length 400 -variable rad \ + -orient horizontal -label "Radius of the bend at the corners: " \ + -tickinterval [expr int($maxrad / 15) + 1] -showvalue true + pack $wi.radius.rad -side left -padx 2 -pady 2 -anchor w -fill x + pack $wi.radius -side top -fill x +} + + # Add new oval or modify old one? + if { $modify == "true" } { + set cancelcmd "destroy $wi" + set applytext "Modify $annotationType" + } else { + set cancelcmd "destroy $wi; destroyNewRect $c" + set applytext "Add $annotationType" + } + + frame $wi.butt -borderwidth 6 + button $wi.butt.apply -text $applytext -command "popupAnnotationApply $c $wi $target $annotationType" + + button $wi.butt.cancel -text "Cancel" -command $cancelcmd + bind $wi "$cancelcmd" + bind $wi "popupAnnotationApply $c $wi $target $annotationType" + pack $wi.butt.cancel $wi.butt.apply -side right + pack $wi.butt -side bottom + + after 100 { + grab .popup + } + return +} + +# helper for popupOvalDialog and popupOvalApply +proc destroyNewRect { c } { + global newrect + $c delete -withtags newrect + set newrect "" +} + + +proc popupAnnotationApply { c wi target type } { + global newrect newoval annotation_list + global $target + global changed + global width rad + global fontfamily fontsize textBold textItalic textUnderline + + # attributes + set caption [string trim [$wi.text.lab.name get]] + set labelcolor [$wi.text.lab.name cget -fg] + set coords [$c coords "$target"] + set iconcoords "iconcoords" + + if {"$type" == "rectangle" || "$type" == "oval" } { + set color [$wi.colors.color cget -text] + set bordercolor [$wi.border.color cget -text] + } + + if { $target == 0 } { + # Create a new annotation object + set target [newObjectId annotation] + global $target + lappend annotation_list $target + if {"$type" == "rectangle" } { + set coords [$c coords $newrect] + } elseif { "$type" == "oval" } { + set coords [$c coords $newoval] + } + } else { + set coords [getNodeCoords $target] + } + set $target {} + lappend $iconcoords $coords + lappend $target $iconcoords "label {$caption}" "labelcolor $labelcolor" \ + "fontfamily {$fontfamily}" "fontsize $fontsize" + if {"$type" == "rectangle" || "$type" == "oval" } { + lappend $target "color $color" "width $width" "border $bordercolor" + } + if {"$type" == "rectangle" } { + lappend $target "rad $rad" + } + + set ef {} + if {"$textBold" == 1} { lappend ef bold} + if {"$textItalic" == 1} { lappend ef italic} + if {"$textUnderline" == 1} { lappend ef underline} + if {"$ef" != ""} { lappend $target "effects {$ef}"} + + # draw it + if { $type == "rectangle" } { + drawRect $target + destroyNewRect $c + } elseif { $type == "oval" } { + drawOval $target + destroyNewoval $c + } elseif { $type == "text" } { + drawText $target + } + + set changed 1 + updateUndoLog + redrawAll + destroy $wi +} + +proc selectmarkEnter {c x y} { + set isThruplot false + + if {$c == ".c"} { + set obj [lindex [$c gettags current] 1] + set type [nodeType $obj] + if {$type != "oval" && $type != "rectangle"} { return } + } else { + set obj $c + set c .c + set isThruplot true + } + set bbox [$c bbox $obj] + + set x1 [lindex $bbox 0] + set y1 [lindex $bbox 1] + set x2 [lindex $bbox 2] + set y2 [lindex $bbox 3] + + if {$isThruplot == true} { + set x [expr $x+$x1] + set y [expr $y+$y1] + + } + set l 0 ;# left + set r 0 ;# right + set u 0 ;# up + set d 0 ;# down + + set x [$c canvasx $x] + set y [$c canvasy $y] + + if { $x < [expr $x1+($x2-$x1)/8.0]} { set l 1 } + if { $x > [expr $x2-($x2-$x1)/8.0]} { set r 1 } + if { $y < [expr $y1+($y2-$y1)/8.0]} { set u 1 } + if { $y > [expr $y2-($y2-$y1)/8.0]} { set d 1 } + + if {$l==1} { + if {$u==1} { + $c config -cursor top_left_corner + } elseif {$d==1} { + $c config -cursor bottom_left_corner + } else { + $c config -cursor left_side + } + } elseif {$r==1} { + if {$u==1} { + $c config -cursor top_right_corner + } elseif {$d==1} { + $c config -cursor bottom_right_corner + } else { + $c config -cursor right_side + } + } elseif {$u==1} { + $c config -cursor top_side + } elseif {$d==1} { + $c config -cursor bottom_side + } else { + $c config -cursor left_ptr + } +} + +proc selectmarkLeave {c x y} { + global thruplotResize + .bottom.textbox config -text {} + + # cursor options for thruplot resize + if {$thruplotResize == true} { + + } else { + # no resize update cursor + $c config -cursor left_ptr + } +} + + +proc textEnter { c x y } { + global annotation_list + global curcanvas + + set object [newObjectId annotation] + set newtext [$c create text $x $y -text "" \ + -anchor w -justify left -tags "text $object annotation"] + + set coords [$c coords "text && $object"] + set iconcoords "iconcoords" + + global $object + set $object {} + setType $object "text" + lappend $iconcoords $coords + lappend $object $iconcoords + lappend $object "label {}" + setNodeCanvas $object $curcanvas + + lappend annotation_list $object + popupAnnotationDialog $c $object "false" +} + + +proc drawText {text} { + global $text defTextColor defTextFont defTextFontFamily defTextFontSize + global zoom curcanvas newfontsize + + set coords [getNodeCoords $text] + if { [llength $coords] < 2 } { + puts "Bad coordinates for text $text" + return + } + set x [expr {[lindex $coords 0] * $zoom}] + set y [expr {[lindex $coords 1] * $zoom}] + set color [lindex [lsearch -inline [set $text] "labelcolor *"] 1] + if { $color == "" } { + set color $defTextColor + } + set label [lindex [lsearch -inline [set $text] "label *"] 1] + set fontfamily [lindex [lsearch -inline [set $text] "fontfamily *"] 1] + set fontsize [lindex [lsearch -inline [set $text] "fontsize *"] 1] + if { $fontfamily == "" } { + set fontfamily $defTextFontFamily + } + if { $fontsize == "" } { + set fontsize $defTextFontSize + } + set newfontsize $fontsize + set font [list "$fontfamily" $fontsize] + set effects [lindex [lsearch -inline [set $text] "effects *"] 1] + set newtext [.c create text $x $y -text $label -anchor w \ + -font "$font $effects" -justify left -fill $color \ + -tags "text $text annotation"] + + .c addtag text withtag $newtext + .c raise $text background + setNodeCanvas $text $curcanvas + setType $text "text" +} + + +proc fontupdate { label type args} { + global fontfamily fontsize + global textBold textItalic textUnderline + + if {"$textBold" == 1} {set bold "bold"} else {set bold {} } + if {"$textItalic"} {set italic "italic"} else {set italic {} } + if {"$textUnderline"} {set underline "underline"} else {set underline {} } + switch $type { + fontsize { + set fontsize $args + } + fontfamily { + set fontfamily "$args" + } + } + set f [list "$fontfamily" $fontsize] + lappend f "$bold $italic $underline" + $label configure -font "$f" +} + + +proc drawAnnotation { obj } { + switch -exact -- [nodeType $obj] { + oval { + drawOval $obj + } + rectangle { + drawRect $obj + } + text { + drawText $obj + } + } +} + +# shift annotation coordinates by dx, dy; does not redraw the annotation +proc moveAnnotation { obj dx dy } { + set coords [getNodeCoords $obj] + lassign $coords x1 y1 x2 y2 + set pt1 "[expr {$x1 + $dx}] [expr {$y1 + $dy}]" + if { [nodeType $obj] == "text" } { + # shift one point + setNodeCoords $obj $pt1 + } else { ;# oval/rectangle + # shift two points + set pt2 "[expr {$x2 + $dx}] [expr {$y2 + $dy}]" + setNodeCoords $obj "$pt1 $pt2" + } +} diff --git a/gui/api.tcl b/gui/api.tcl new file mode 100644 index 00000000..310e5ddc --- /dev/null +++ b/gui/api.tcl @@ -0,0 +1,3297 @@ +# version of the API document that is used +set CORE_API_VERSION 1.23 + +set DEFAULT_API_PORT 4038 +set g_api_exec_num 100; # starting execution number + +# set scale for X/Y coordinate translation +set XSCALE 1.0 +set YSCALE 1.0 +set XOFFSET 0 +set YOFFSET 0 + +# current session; 0 is a new session +set g_current_session 0 +set g_session_dialog_hint 1 + +# this is an array of lists, with one array entry for each widget or callback, +# and the entry is a list of execution numbers (for matching replies with +# requests) +array set g_execRequests { shell "" observer "" } + +# for a simulator, uncomment this line or cut/paste into debugger: +# set XSCALE 4.0; set YSCALE 4.0; set XOFFSET 1800; set YOFFSET 300 + +array set nodetypes { 0 def 1 phys 2 tbd 3 tbd 4 lanswitch 5 hub \ + 6 wlan 7 rj45 8 tunnel 9 ktunnel 10 emane } + +array set regtypes { wl 1 mob 2 util 3 exec 4 gui 5 emul 6 } +array set regntypes { 1 wl 2 mob 3 util 4 exec 5 gui 6 emul 7 relay 10 session } +array set regtxttypes { wl "Wireless Module" mob "Mobility Module" \ + util "Utility Module" exec "Execution Server" \ + gui "Graphical User Interface" emul "Emulation Server" \ + relay "Relay" } +set DEFAULT_GUI_REG "gui core_2d_gui" +array set eventtypes { definition_state 1 configuration_state 2 \ + instantiation_state 3 runtime_state 4 \ + datacollect_state 5 shutdown_state 6 \ + event_start 7 event_stop 8 event_pause 9 \ + event_restart 10 file_open 11 file_save 12 \ + event_scheduled 31 } + +set CORE_STATES \ + "NONE DEFINITION CONFIGURATION INSTANTIATION RUNTIME DATACOLLECT SHUTDOWN" + +set EXCEPTION_LEVELS \ + "NONE FATAL ERROR WARNING NOTICE" + +# Event handler invoked for each message received by peer +proc receiveMessage { channel } { + global curcanvas showAPI + set prmsg $showAPI + set type 0 + set flags 0 + set len 0 + set seq 0 + + #puts "API receive data." + # disable the fileevent here, then reinstall the handler at the end + fileevent $channel readable "" + # channel closed + if { [eof $channel] } { + resetChannel channel 1 + return + } + + # + # read first four bytes of message header + set more_data 1 + while { $more_data == 1 } { + if { [catch { set bytes [read $channel 4] } e] } { + # in tcl8.6 this occurs during shutdown + #puts "channel closed: $e" + break; + } + if { [fblocked $channel] == 1} { + # 4 bytes not available yet + break; + } elseif { [eof $channel] } { + resetChannel channel 1 + break; + } elseif { [string bytelength $bytes] == 0 } { + # zero bytes read - parseMessageHeader would fail + break; + } + # parse type/flags/length + if { [parseMessageHeader $bytes type flags len] < 0 } { + # Message header error + break; + } + # read message data of specified length + set bytes [read $channel $len] + #if { $prmsg== 1} { + # puts "read $len bytes (type=$type, flags=$flags, len=$len)..." + #} + # handle each message type + switch -exact -- "$type" { + 1 { parseNodeMessage $bytes $len $flags } + 2 { parseLinkMessage $bytes $len $flags } + 3 { parseExecMessage $bytes $len $flags $channel } + 4 { parseRegMessage $bytes $len $flags $channel } + 5 { parseConfMessage $bytes $len $flags $channel } + 6 { parseFileMessage $bytes $len $flags $channel } + 8 { parseEventMessage $bytes $len $flags $channel } + 9 { parseSessionMessage $bytes $len $flags $channel } + 10 { parseExceptionMessage $bytes $len $flags $channel; + #7 { parseIfaceMessage $bytes $len $flags $channel } + # + } + default { puts "Unknown Message = $type" } + } + # end switch + } + # end while + + # update the canvas + catch { + # this messes up widgets + #raiseAll .c + .c config -cursor left_ptr ;# otherwise we have hourglass/pirate + update + } + + if {$channel != -1 } { + resetChannel channel 0 + } +} + +# +# Open an API socket to the specified server:port, prompt user for retry +# if specified; set the readable file event and parameters; +# returns the channel name or -1 on error. +# +proc openAPIChannel { server port retry } { + # use default values (localhost:4038) when none specified + if { $server == "" || $server == 0 } { + set server "localhost" + } + if { $port == 0 } { + global DEFAULT_API_PORT + set port $DEFAULT_API_PORT + } + + # loop when retry is true + set s -1 + while { $s < 0 } { + # TODO: fix this to remove lengthy timeout periods... + # (need to convert all channel I/O to use async channel) + # vwait doesn't work here, blocks on socket call + #puts "Connecting to $server:$port..."; # verbose + set svcstart [getServiceStartString] + set e "This feature requires a connection to the CORE daemon.\n" + set e "$e\nFailed to connect to $server:$port!\n" + set e "$e\nHave you started the CORE daemon with" + set e "$e '$svcstart'?" + if { [catch {set s [socket $server $port]} ex] } { + puts "\n$e\n (Error: $ex)" + set s -1 + if { ! $retry } { return $s; }; # error, don't retry + } + if { $s > 0 } { puts "connected." }; # verbose + if { $retry } {; # prompt user with retry dialog + if { $s < 0 } { + set choice [tk_dialog .connect "Error" $e \ + error 0 Retry "Start daemon..." Cancel] + if { $choice == 2 } { return $s } ;# cancel + if { $choice == 1 } { + set sudocmd "gksudo" + set cmd "core-daemon -d" + if { [catch {exec $sudocmd $cmd & } e] } { + puts "Error running '$sudocmd $cmd'!" + } + after 300 ;# allow time for daemon to start + } + # fall through for retry... + } + } + }; # end while + + # now we have a valid socket, set up encoding and receive event + fconfigure $s -blocking 0 -encoding binary -translation { binary binary } \ + -buffering full -buffersize 4096 + fileevent $s readable [list receiveMessage $s] + return $s +} + +# +# Reinstall the receiveMessage event handler +# +proc resetChannel { channel_ptr close } { + upvar 1 $channel_ptr channel + if {$close == 1} { + close $channel + pluginChannelClosed $channel + set $channel -1 + } + if { [catch { fileevent $channel readable \ + [list receiveMessage $channel] } ] } { + # may print error here + } +} + +# +# Catch errors when flushing sockets +# +proc flushChannel { channel_ptr msg } { + upvar 1 $channel_ptr channel + if { [catch { flush $channel } err] } { + puts "*** $msg: $err" + set channel -1 + return -1 + } + return 0 +} + + +# +# CORE message header +# +proc parseMessageHeader { bytes type flags len } { + # variables are passed by reference + upvar 1 $type mytype + upvar 1 $flags myflags + upvar 1 $len mylen + + # + # read the four-byte message header + # + if { [binary scan $bytes ccS mytype myflags mylen] != 3 } { + puts "*** warning: message header error" + return -1 + } else { + set mytype [expr {$mytype & 0xFF}]; # convert signed to unsigned + set myflags [expr {$myflags & 0xFF}] + if { $mylen == 0 } { + puts "*** warning: zero length message header!" + # empty the channel + #set bytes [read $channel] + return -1 + } + } + return 0 +} + + +# +# CORE API Node message TLVs +# +proc parseNodeMessage { data len flags } { + global node_list curcanvas c router eid showAPI nodetypes CORE_DATA_DIR + global XSCALE YSCALE XOFFSET YOFFSET deployCfgAPI_lock + #puts "Parsing node message of length=$len, flags=$flags" + set prmsg $showAPI + set current 0 + + array set typenames { 1 num 2 type 3 name 4 ipv4_addr 5 mac_addr \ + 6 ipv6_addr 7 model 8 emulsrv 10 session \ + 32 xpos 33 ypos 34 canv \ + 35 emuid 36 netid 37 services \ + 48 lat 49 long 50 alt \ + 66 icon 80 opaque } + array set typesizes { num 4 type 4 name -1 ipv4_addr 4 ipv6_addr 16 \ + mac_addr 8 model -1 emulsrv -1 session -1 \ + xpos 2 ypos 2 canv 2 emuid 4 \ + netid 4 services -1 lat 4 long 4 alt 4 \ + icon -1 opaque -1 } + array set vals { num 0 type 0 name "" ipv4_addr -1 ipv6_addr -1 \ + mac_addr -1 model "" emulsrv "" session "" \ + xpos 0 ypos 0 canv "" \ + emuid -1 netid -1 services "" \ + lat 0 long 0 alt 0 \ + icon "" opaque "" } + + if { $prmsg==1 } { puts -nonewline "NODE(flags=$flags," } + + # + # TLV parsing + # + while { $current < $len } { + # TLV header + if { [binary scan $data @${current}cc type length] != 2 } { + puts "TLV header error" + break + } + set length [expr {$length & 0xFF}]; # convert signed to unsigned + if { $length == 0 } {; # prevent endless looping + if { $type == 0 } { puts -nonewline "(extra padding)"; break + } else { puts "Found zero-length TLV for type=$type, dropping."; + break } + } + set pad [pad_32bit $length] + # verbose debugging + #puts "tlv type=$type length=$length pad=$pad current=$current" + incr current 2 + + if {![info exists typenames($type)] } { ;# unknown TLV type + if { $prmsg } { puts -nonewline "unknown=$type," } + incr current $length + continue + } + set typename $typenames($type) + set size $typesizes($typename) + # 32-bit and 64-bit vals pre-padded + if { $size == 4 || $size == 8 } { incr current $pad } + # read TLV data depending on size + switch -exact -- "$size" { + 2 { binary scan $data @${current}S vals($typename) } + 4 { binary scan $data @${current}I vals($typename) } + 8 { binary scan $data @${current}W vals($typename) } + 16 { binary scan $data @${current}c16 vals($typename) } + -1 { binary scan $data @${current}a${length} vals($typename) } + } + if { $size == -1 } { incr current $pad } ;# string vals post-padded + if { $type == 6 } { incr current $pad } ;# 128-bit vals post-padded + incr current $length + # special handling of data here + switch -exact -- "$typename" { + ipv4_addr { array set vals [list $typename \ + [ipv4ToString $vals($typename)] ] } + mac_addr { array set vals [list $typename \ + [macToString $vals($typename)] ] } + ipv6_addr { array set vals [list $typename \ + [ipv6ToString $vals($typename)] ] } + xpos { array set vals [list $typename \ + [expr { ($vals($typename) * $XSCALE) - $XOFFSET }] ] } + ypos { array set vals [list $typename \ + [expr { ($vals($typename) * $YSCALE) - $YOFFSET }] ] } + } + if { $prmsg } { puts -nonewline "$typename=$vals($typename)," } + } + + if { $prmsg } { puts ") "} + + # + # Execution + # + # TODO: enforce message parameters here + if { ![info exists nodetypes($vals(type))] } { + puts "NODE: invalid node type ($vals(type)), dropping"; return + } + set node "n$vals(num)" + set node_id "$eid\_$node" + if { [lsearch $node_list $node] == -1 } {; # check for node existance + set exists false + } else { + set exists true + } + + if { $vals(name) == "" } {; # make sure there is a node name + set name $node + if { $exists } { set name [getNodeName $node] } + array set vals [list name $name] + } + if { $exists } { + if { $flags == 1 } { + puts "Node add msg but node ($node) already exists, dropping." + return + } + } elseif { $flags != 1 } { + puts -nonewline "Node modify/delete message but node ($node) does " + puts "not exist dropping." + return + } + if { $vals(icon) != "" } { + set icon $vals(icon) + if { [file pathtype $icon] == "relative" } { + set icon "$CORE_DATA_DIR/icons/normal/$icon" + } + if { ![file exists $icon ] } { + puts "Node icon '$vals(icon)' does not exist." + array set vals [list icon ""] + } else { + array set vals [list icon $icon] + } + } + global $node + + set wlans_needing_update { } + if { $vals(emuid) != -1 } { + # For Linux populate ngnodeidmap for later use with wireless; it is treated as + # a hex value string (without the leading "0x") + global ngnodeidmap + foreach wlan [findWlanNodes $node] { + if { ![info exists ngnodeidmap($eid\_$wlan)] } { + set netid [string range $wlan 1 end] + set emulation_type [lindex [getEmulPlugin $node] 1] + # TODO: verify that this incr 1000 is for OpenVZ + if { $emulation_type == "openvz" } { incr netid 1000 } + set ngnodeidmap($eid\_$wlan) [format "%x" $netid] + } + if { ![info exists ngnodeidmap($eid\_$wlan-$node)] } { + set ngnodeidmap($eid\_$wlan-$node) [format "%x" $vals(emuid)] + lappend wlans_needing_update $wlan + } + } ;# end foreach wlan + } + + # local flags: informational message that node was added or deleted + if {[expr {$flags & 0x8}]} { + if { ![info exists c] } { return } + if {[expr {$flags & 0x1}] } { ;# add flag + nodeHighlights $c $node on green + after 3000 "nodeHighlights .c $node off green" + } elseif {[expr {$flags & 0x2}] } { ;# delete flag + nodeHighlights $c $node on black + after 3000 "nodeHighlights .c $node off black" + } + # note: we may want to save other data passed in this message here + # rather than just returning... + return + } + # now we have all the information about this node + switch -exact -- "$flags" { + 0 { apiNodeModify $node vals } + 1 { apiNodeCreate $node vals } + 2 { apiNodeDelete $node } + default { puts "NODE: unsupported flags ($flags)"; return } + } +} + +# +# modify a node +# +proc apiNodeModify { node vals_ref } { + global c eid zoom curcanvas + upvar $vals_ref vals + if { ![info exists c] } { return } ;# batch mode + set draw 0 + if { $vals(icon) != "" } { + setCustomImage $node $vals(icon) + set draw 1 + } + # move the node and its links + if {$vals(xpos) != 0 && $vals(ypos) != 0} { + moveNodeAbs $c $node [expr {$zoom * $vals(xpos)}] \ + [expr {$zoom * $vals(ypos)}] + } + if { $vals(name) != "" } { + setNodeName $node $vals(name) + set draw 1 + } + if { $vals(services) != "" } { + set services [split $vals(services) |] + setNodeServices $node $services + } + # TODO: handle other optional on-screen data + # lat, long, alt, heading, platform type, platform id + if { $draw && [getNodeCanvas $node] == $curcanvas } { + .c delete withtag "node && $node" + .c delete withtag "nodelabel && $node" + drawNode .c $node + } +} + +# +# add a node +# +proc apiNodeCreate { node vals_ref } { + global $node nodetypes node_list canvas_list curcanvas eid + upvar $vals_ref vals + + # create GUI object + set nodetype $nodetypes($vals(type)) + set nodename $vals(name) + if { $nodetype == "emane" } { set nodetype "wlan" } ;# special case - EMANE + if { $nodetype == "def" } { set nodetype "router" } + newNode [list $nodetype $node] ;# use node number supplied from API message + setNodeName $node $nodename + if { $vals(canv) == "" } { + setNodeCanvas $node $curcanvas + } else { + set canv $vals(canv) + if { ![string is integer $canv] || $canv < 0 || $canv > 100} { + puts "warning: invalid canvas '$canv' in Node message!" + return + } + set canv "c$canv" + if { [lsearch $canvas_list $canv] < 0 && $canv == "c0" } { + # special case -- support old imn files with Canvas0 + global $canv + lappend canvas_list $canv + set $canv {} + setCanvasName $canv "Canvas0" + set curcanvas $canv + switchCanvas none + } else { + while { [lsearch $canvas_list $canv] < 0 } { + set canvnew [newCanvas ""] + switchCanvas none ;# redraw canvas tabs + } + } + setNodeCanvas $node $canv + } + setNodeCoords $node "$vals(xpos) $vals(ypos)" + lassign [getDefaultLabelOffsets [nodeType $node]] dx dy + setNodeLabelCoords $node "[expr $vals(xpos) + $dx] [expr $vals(ypos) + $dy]" + setNodeLocation $node $vals(emulsrv) + if { $vals(icon) != "" } { + setCustomImage $node $vals(icon) + } + drawNode .c $node + + set model $vals(model) + if { $model != "" && $vals(type) < 4} { + # set model only for (0 def 1 phys 2 tbd 3 tbd) 4 lanswitch + setNodeModel $node $model + if { [lsearch -exact [getNodeTypeNames] $model] == -1 } { + puts "warning: unknown node type '$model' in Node message!" + } + } + if { $vals(services) != "" } { + set services [split $vals(services) |] + setNodeServices $node $services + } + + if { $vals(type) == 7 } { ;# RJ45 node - used later to control linking + netconfInsertSection $node [list model $vals(model)] + } elseif { $vals(type) == 10 } { ;# EMANE node + set section [list mobmodel coreapi ""] + netconfInsertSection $node $section + #set sock [lindex [getEmulPlugin $node] 2] + #sendConfRequestMessage $sock $node "all" 0x1 -1 "" + } elseif { $vals(type) == 6 } { ;# WLAN node + if { $vals(opaque) != "" } { + # treat opaque as a list to accomodate other data + set i [lsearch $vals(opaque) "range=*"] + if { $i != -1 } { + set range [lindex $vals(opaque) $i] + setNodeRange $node [lindex [split $range =] 1] + } + } + } +} + +# +# delete a node +# +proc apiNodeDelete { node } { + removeGUINode $node +} + +# +# CORE API Link message TLVs +# +proc parseLinkMessage { data len flags } { + global router def_router_model eid + global link_list node_list ngnodeidmap ngnodeidrmap showAPI execMode + set prmsg $showAPI + set current 0 + set c .c + #puts "Parsing link message of length=$len, flags=$flags" + + array set typenames { 1 node1num 2 node2num 3 delay 4 bw 5 per \ + 6 dup 7 jitter 8 mer 9 burst 10 session \ + 16 mburst 32 ltype 33 guiattr 34 uni \ + 35 emuid1 36 netid 37 key \ + 48 if1num 49 if1ipv4 50 if1ipv4mask 51 if1mac \ + 52 if1ipv6 53 if1ipv6mask \ + 54 if2num 55 if2ipv4 56 if2ipv4mask 57 if2mac \ + 64 if2ipv6 65 if2ipv6mask } + array set typesizes { node1num 4 node2num 4 delay 8 bw 8 per -1 \ + dup -1 jitter 8 mer 2 burst 2 session -1 \ + mburst 2 ltype 4 guiattr -1 uni 2 \ + emuid1 4 netid 4 key 4 \ + if1num 2 if1ipv4 4 if1ipv4mask 2 if1mac 8 \ + if1ipv6 16 if1ipv6mask 2 \ + if2num 2 if2ipv4 4 if2ipv4mask 2 if2mac 8 \ + if2ipv6 16 if2ipv6mask 2 } + array set vals { node1num -1 node2num -1 delay 0 bw 0 per "" \ + dup "" jitter 0 mer 0 burst 0 session "" \ + mburst 0 ltype 0 guiattr "" uni 0 \ + emuid1 -1 netid -1 key -1 \ + if1num -1 if1ipv4 -1 if1ipv4mask 24 if1mac -1 \ + if1ipv6 -1 if1ipv6mask 64 \ + if2num -1 if2ipv4 -1 if2ipv4mask 24 if2mac -1 \ + if2ipv6 -1 if2ipv6mask 64 } + set emuid1 -1 + + if { $prmsg==1 } { puts -nonewline "LINK(flags=$flags," } + + # + # TLV parsing + # + while { $current < $len } { + # TLV header + if { [binary scan $data @${current}cc type length] != 2 } { + puts "TLV header error" + break + } + set length [expr {$length & 0xFF}]; # convert signed to unsigned + if { $length == 0 } {; # prevent endless looping + if { $type == 0 } { puts -nonewline "(extra padding)"; break + } else { puts "Found zero-length TLV for type=$type, dropping."; + break } + } + set pad [pad_32bit $length] + # verbose debugging + #puts "tlv type=$type length=$length pad=$pad current=$current" + incr current 2 + + if {![info exists typenames($type)] } { ;# unknown TLV type + if { $prmsg } { puts -nonewline "unknown=$type," } + incr current $length + continue + } + set typename $typenames($type) + set size $typesizes($typename) + # 32-bit and 64-bit vals pre-padded + if { $size == 4 || $size == 8} { incr current $pad } + # read TLV data depending on size + switch -exact -- "$size" { + 2 { binary scan $data @${current}S vals($typename) } + 4 { binary scan $data @${current}I vals($typename) } + 8 { binary scan $data @${current}W vals($typename) } + 16 { binary scan $data @${current}c16 vals($typename) } + -1 { binary scan $data @${current}a${length} vals($typename) } + } + incr current $length + # special handling of data here + switch -exact -- "$typename" { + delay - + jitter { if { $vals($typename) > 2000000 } { + array set vals [list $typename 2000000] } } + bw { if { $vals($typename) > 1000000000 } { + array set vals [list $typename 0] } } + per { if { $vals($typename) > 100 } { + array set vals [list $typename 100] } } + dup { if { $vals($typename) > 50 } { + array set vals [list $typename 50] } } + emuid1 { if { $emuid1 == -1 } { + set emuid $vals($typename) + } else { ;# this sets emuid2 if we already have emuid1 + array set vals [list emuid2 $vals($typename) ] + array set vals [list emuid1 $emuid1 ] + } + } + if1ipv4 - + if2ipv4 { array set vals [list $typename \ + [ipv4ToString $vals($typename)] ] } + if1mac - + if2mac { array set vals [list $typename \ + [macToString $vals($typename)] ] } + if1ipv6 - + if2ipv6 { array set vals [list $typename \ + [ipv6ToString $vals($typename)] ] } + } + if { $prmsg } { puts -nonewline "$typename=$vals($typename)," } + if { $size == 16 } { incr current $pad } ;# 128-bit vals post-padded + if { $size == -1 } { incr current $pad } ;# string vals post-padded + } + + if { $prmsg == 1 } { puts ") " } + + # perform some sanity checking of the link message + if { $vals(node1num) == $vals(node2num) || \ + $vals(node1num) < 0 || $vals(node2num) < 0 } { + puts -nonewline "link message error - node1=$vals(node1num), " + puts "node2=$vals(node2num)" + return + } + + # convert node number to node and check for node existance + set node1 "n$vals(node1num)" + set node2 "n$vals(node2num)" + if { [lsearch $node_list $node1] == -1 || \ + [lsearch $node_list $node2] == -1 } { + puts "Node ($node1/$node2) in link message not found, dropping" + return + } + + # set IPv4 and IPv6 address if specified, otherwise may be automatic + set prefix1 [chooseIfName $node1 $node2] + set prefix2 [chooseIfName $node2 $node1] + foreach i "1 2" { + # set interface name/number + if { $vals(if${i}num) == -1 } { + set ifname [newIfc [set prefix${i}] [set node${i}]] + set prefixlen [string length [set prefix${i}]] + set if${i}num [string range $ifname $prefixlen end] + array set vals [list if${i}num [set if${i}num]] + } + set ifname [set prefix${i}]$vals(if${i}num) + array set vals [list if${i}name $ifname] + # record IPv4/IPv6 addresses for newGUILink + foreach j "4 6" { + if { $vals(if${i}ipv${j}) != -1 } { + setIfcIPv${j}addr [set node${i}] $ifname \ + $vals(if${i}ipv${j})/$vals(if${i}ipv${j}mask) + } + } + if { $vals(if${i}mac) != -1 } { + setIfcMacaddr [set node${i}] $ifname $vals(if${i}mac) + } + } + # adopt network address for WLAN (WLAN must be node 1) + if { [nodeType $node1] == "wlan" } { + set v4addr $vals(if2ipv4) + if { $v4addr != -1 } { + set v4net [ipv4ToNet $v4addr $vals(if2ipv4mask)] + setIfcIPv4addr $node1 wireless "$v4net/$vals(if2ipv4mask)" + } + set v6addr $vals(if2ipv6) + if { $v6addr != -1 } { + set v6net [ipv6ToNet $v6addr $vals(if2ipv6mask)] + setIfcIPv6addr $node1 wireless "${v6net}::0/$vals(if2ipv6mask)" + } + } + + if { $execMode == "batch" } { + return ;# no GUI to update in batch mode + } + # treat 100% loss as link delete + if { $flags == 0 && $vals(per) == 100 } { + apiLinkDelete $node1 $node2 vals + return + } + + # now we have all the information about this node + switch -exact -- "$flags" { + 0 { apiLinkAddModify $node1 $node2 vals 0 } + 1 { apiLinkAddModify $node1 $node2 vals 1 } + 2 { apiLinkDelete $node1 $node2 vals } + default { puts "LINK: unsupported flags ($flags)"; return } + } +} + +# +# add or modify a link +# if add flag is set, check if two nodes are part of same wlan, and do wlan +# linkage, or add a wired link; otherwise modify wired/wireless link with +# supplied parameters +proc apiLinkAddModify { node1 node2 vals_ref add } { + global eid defLinkWidth + set c .c + upvar $vals_ref vals + + if {$vals(key) > -1} { + if { [nodeType $node1] == "tunnel" } { + netconfInsertSection $node1 [list "tunnel-key" $vals(key)] + } + if { [nodeType $node2] == "tunnel" } { + netconfInsertSection $node2 [list "tunnel-key" $vals(key)] + } + } + + # look for a wired link in the link list + set wired_link [linkByPeers $node1 $node2] + if { $wired_link != "" && $add == 0 } { ;# wired link exists, modify it + #puts "modify wired link" + if { $vals(uni) == 1 } { ;# unidirectional link effects message + set peers [linkPeers $wired_link] + if { $node1 == [lindex $peers 0] } { ;# downstream n1 <-- n2 + set bw [list $vals(bw) [getLinkBandwidth $wired_link up]] + set delay [list $vals(delay) [getLinkDelay $wired_link up]] + set per [list $vals(per) [getLinkBER $wired_link up]] + set dup [list $vals(dup) [getLinkBER $wired_link up]] + set jitter [list $vals(jitter) [getLinkJitter $wired_link up]] + } else { ;# upstream n1 --> n2 + set bw [list [getLinkBandwidth $wired_link] $vals(bw)] + set delay [list [getLinkDelay $wired_link] $vals(delay)] + set per [list [getLinkBER $wired_link] $vals(per)] + set dup [list [getLinkBER $wired_link] $vals(dup)] + set jitter [list $vals(jitter) [getLinkJitter $wired_link]] + } + setLinkBandwidth $wired_link $bw + setLinkDelay $wired_link $delay + setLinkBER $wired_link $per + setLinkDup $wired_link $dup + setLinkJitter $wired_link $jitter + } else { + setLinkBandwidth $wired_link $vals(bw) + setLinkDelay $wired_link $vals(delay) + setLinkBER $wired_link $vals(per) + setLinkDup $wired_link $vals(dup) + setLinkJitter $wired_link $vals(jitter) + } + updateLinkLabel $wired_link + updateLinkGuiAttr $wired_link $vals(guiattr) + return + # if add flag is set and a wired link already exists, assume wlan linkage + # special case: rj45 model=1 means link via wireless + } elseif {[nodeType $node1] == "rj45" || [nodeType $node2] == "rj45"} { + if { [nodeType $node1] == "rj45" } { + set rj45node $node1; set othernode $node2; + } else { set rj45node $node2; set othernode $node1; } + if { [netconfFetchSection $rj45node model] == 1 } { + set wlan [findWlanNodes $othernode] + if {$wlan != ""} {newGUILink $wlan $rj45node};# link rj4node to wlan + } + } + + # no wired link; determine if both nodes belong to the same wlan, and + # link them; otherwise add a wired link if add flag is set + set wlan $vals(netid) + if { $wlan < 0 } { + # WLAN not specified with netid, search for common WLAN + set wlans1 [findWlanNodes $node1] + set wlans2 [findWlanNodes $node2] + foreach w $wlans1 { + if { [lsearch -exact $wlans2 $w] < 0 } { continue } + set wlan $w + break + } + } + + if { $wlan < 0 } { ;# no common wlan + if {$add == 1} { ;# add flag was set - add a wired link + global g_newLink_ifhints + set g_newLink_ifhints [list $vals(if1name) $vals(if2name)] + newGUILink $node1 $node2 + if { [getNodeCanvas $node1] != [getNodeCanvas $node2] } { + set wired_link [linkByPeersMirror $node1 $node2] + } else { + set wired_link [linkByPeers $node1 $node2] + } + setLinkBandwidth $wired_link $vals(bw) + setLinkDelay $wired_link $vals(delay) + setLinkBER $wired_link $vals(per) + setLinkDup $wired_link $vals(dup) + setLinkJitter $wired_link $vals(jitter) + updateLinkLabel $wired_link + updateLinkGuiAttr $wired_link $vals(guiattr) + # adopt link effects for WLAN (WLAN must be node 1) + if { [nodeType $node1] == "wlan" } { + setLinkBandwidth $node1 $vals(bw) + setLinkDelay $node1 $vals(delay) + setLinkBER $node1 $vals(per) + } + return + } else { ;# modify link, but no wired link or common wlan! + puts -nonewline "link modify message received, but no wired link" + puts " or wlan for nodes $node1-$node2, dropping" + return + } + } + + set wlan "n$wlan" + drawWlanLink $node1 $node2 $wlan +} + +# +# delete a link +# +proc apiLinkDelete { node1 node2 vals_ref } { + global eid + upvar $vals_ref vals + set c .c + + # look for a wired link in the link list + set wired_link [linkByPeers $node1 $node2] + if { $wired_link != "" } { + removeGUILink $wired_link non-atomic + return + } + + set wlan $vals(netid) + if { $wlan < 0 } { + # WLAN not specified with netid, search for common WLAN + set wlans1 [findWlanNodes $node1] + set wlans2 [findWlanNodes $node2] + foreach w $wlans1 { + if { [lsearch -exact $wlans2 $w] < 0 } { continue } + set wlan $w + break + } + } + if { $wlan < 0 } { + puts "apiLinkDelete: no common WLAN!" + return + } + set wlan "n$wlan" + + # look for wireless link on the canvas, remove GUI object + $c delete -withtags "wlanlink && $node2 && $node1 && $wlan" + $c delete -withtags "linklabel && $node2 && $node1 && $wlan" +} + +# +# CORE API Execute message TLVs +# +proc parseExecMessage { data len flags channel } { + global node_list curcanvas c router eid showAPI + global XSCALE YSCALE XOFFSET YOFFSET + set prmsg $showAPI + set current 0 + + # set default values + set nodenum 0 + set execnum 0 + set exectime 0 + set execcmd "" + set execres "" + set execstatus 0 + set session "" + + if { $prmsg==1 } { puts -nonewline "EXEC(flags=$flags," } + + # parse each TLV + while { $current < $len } { + # TLV header + set typelength [parseTLVHeader $data current] + set type [lindex $typelength 0] + set length [lindex $typelength 1] + if { $length == 0 || $length == "" } { break } + set pad [pad_32bit $length] + # verbose debugging + #puts "exec tlv type=$type length=$length pad=$pad current=$current" + if { [expr {$current + $length + $pad}] > $len } { + puts "error with EXEC message length (len=$len, TLV length=$length)" + break + } + # TLV data + switch -exact -- "$type" { + 1 { + incr current $pad + binary scan $data @${current}I nodenum + if { $prmsg==1 } { puts -nonewline "node=$nodenum/" } + } + 2 { + incr current $pad + binary scan $data @${current}I execnum + if { $prmsg == 1} { puts -nonewline "exec=$execnum," } + } + 3 { + incr current $pad + binary scan $data @${current}I exectime + if { $prmsg == 1} { puts -nonewline "time=$exectime," } + } + 4 { + binary scan $data @${current}a${length} execcmd + if { $prmsg == 1} { puts -nonewline "cmd=$execcmd," } + incr current $pad + } + 5 { + binary scan $data @${current}a${length} execres + if { $prmsg == 1} { puts -nonewline "res=($length bytes)," } + incr current $pad + } + 6 { + incr current $pad + binary scan $data @${current}I execstatus + if { $prmsg == 1} { puts -nonewline "status=$execstatus," } + } + 10 { + binary scan $data @${current}a${length} session + if { $prmsg == 1} { puts -nonewline "session=$session," } + incr current $pad + } + default { + if { $prmsg == 1} { puts -nonewline "unknown=" } + if { $prmsg == 1} { puts -nonewline "$type," } + } + } + # end switch + + # advance current pointer + incr current $length + } + if { $prmsg == 1 } { puts ") "} + + set node "n$nodenum" + set node_id "$eid\_$node" + # check for node existance + if { [lsearch $node_list $node] == -1 } { + puts "Execute message but node ($node) does not exist, dropping." + return + } + global $node + + # Callback support - match execnum from response with original request, and + # invoke type-specific callback + global g_execRequests + foreach type [array names g_execRequests] { + set idx [lsearch $g_execRequests($type) $execnum] + if { $idx > -1 } { + set g_execRequests($type) \ + [lreplace $g_execRequests($type) $idx $idx] + exec_${type}_callback $node $execnum $execcmd $execres $execstatus + return + } + } +} + +# spawn interactive terminal +proc exec_shell_callback { node execnum execcmd execres execstatus } { + #puts "opening terminal for $node by running '$execres'" + set title "CORE: [getNodeName $node] (console)" + set term [get_term_prog false] + set xi [string first "xterm -e" $execres] + + # shell callback already has xterm command, launch it using user-defined + # term program (e.g. remote nodes 'ssh -X -f a.b.c.d xterm -e ...' + if { $xi > -1 } { + set execres [string replace $execres $xi [expr $xi+7] $term] + if { [catch {exec sh -c "$execres" & } ] } { + puts "Warning: failed to open terminal for $node" + } + return + # no xterm command; execute shell callback in a terminal (e.g. local nodes) + } elseif { \ + [catch {eval exec $term "$execres" & } ] } { + puts "Warning: failed to open terminal for $node: ($term $execres)" + } +} + + +# +# CORE API Register message TLVs +# parse register message into plugin capabilities +# +proc parseRegMessage { data len flags channel } { + global regntypes showAPI + set prmsg $showAPI + set current 0 + set str 0 + set session "" + set fnhint "" + + set plugin_cap_list {} ;# plugin capabilities list + + if { $prmsg==1 } { puts -nonewline "REG(flags=$flags," } + + # parse each TLV + while { $current < $len } { + # TLV header + if { [binary scan $data @${current}cc type length] != 2 } { + puts "TLV header error" + break + } + set length [expr {$length & 0xFF}]; # convert signed to unsigned + if { $length == 0 } { + # prevent endless looping + if { $type == 0 } { + puts -nonewline "(extra padding)" + break + } else { + puts "Found zero-length TLV for type=$type, dropping." + break + } + } + set pad [pad_32bit $length] + # verbose debugging + #puts "tlv type=$type length=$length pad=$pad current=$current" + incr current 2 + # TLV data + if { [info exists regntypes($type)] } { + set plugin_type $regntypes($type) + binary scan $data @${current}a${length} str + if { $prmsg == 1} { puts -nonewline "$plugin_type=$str," } + if { $type == 10 } { ;# session number + set session $str + } else { + lappend plugin_cap_list "$plugin_type=$str" + if { $plugin_type == "exec" } { set fnhint $str } + } + } else { + if { $prmsg == 1} { puts -nonewline "unknown($type)," } + } + incr current $pad + # end switch + + # advance current pointer + incr current $length + } + if { $prmsg == 1 } { puts ") "} + + # reg message with session number indicates the sid of a session that + # was just started from XML or Python script (via reg exec=scriptfile.py) + if { $session != "" } { + # The channel passed to here is soon after discarded for + # sessions that are started from XML or Python scripts. This causes + # an exception in the GUI when responding back to daemon if the + # response is sent after the channel has been destroyed. Setting + # the channel to -1 basically disables the GUI response to the daemon, + # but it turns out the daemon does not need the response anyway. + set channel -1 + # assume session string only contains one session number + connectShutdownSession connect $channel $session $fnhint + return + } + + set plugin [pluginByChannel $channel] + if { [setPluginCapList $plugin $plugin_cap_list] < 0 } { + return + } + + # callback to refresh any open dialogs this message may refresh + pluginsConfigRefreshCallback +} + +proc parseConfMessage { data len flags channel } { + global showAPI node_list MACHINE_TYPES + set prmsg $showAPI + set current 0 + set str 0 + set nodenum -1 + set obj "" + set tflags 0 + set types {} + set values {} + set captions {} + set bitmap {} + set possible_values {} + set groups {} + set opaque {} + set session "" + set netid -1 + + if { $prmsg==1 } { puts -nonewline "CONF(flags=$flags," } + + # parse each TLV + while { $current < $len } { + set typelength [parseTLVHeader $data current] + set type [lindex $typelength 0] + set length [lindex $typelength 1] + set pad [pad_32bit $length] + if { $length == 0 || $length == "" } { + # allow some zero-length string TLVs + if { $type < 5 || $type > 9 } { break } + } + # verbose debugging + #puts "tlv type=$type length=$length pad=$pad current=$current" + # TLV data + switch -exact -- "$type" { + 1 { + incr current $pad + binary scan $data @${current}I nodenum + if { $prmsg == 1} { puts -nonewline "node=$nodenum/" } + } + 2 { + binary scan $data @${current}a${length} obj + if { $prmsg == 1} { puts -nonewline "obj=$obj," } + incr current $pad + } + 3 { + binary scan $data @${current}S tflags + if { $prmsg == 1} { puts -nonewline "cflags=$tflags," } + } + 4 { + set type 0 + set types {} + if { $prmsg == 1} { puts -nonewline "types=" } + # number of 16-bit values + set types_len $length + # get each 16-bit type value, add to list + while {$types_len > 0} { + binary scan $data @${current}S type + if {$type > 0 && $type < 12} { + lappend types $type + if { $prmsg == 1} { puts -nonewline "$type/" } + } + incr current 2 + incr types_len -2 + } + if { $prmsg == 1} { puts -nonewline "," } + incr current -$length; # length incremented below + incr current $pad + } + 5 { + set values {} + binary scan $data @${current}a${length} vals + if { $prmsg == 1} { puts -nonewline "vals=$vals," } + set values [split $vals |] + incr current $pad + } + 6 { + set captions {} + binary scan $data @${current}a${length} capt + if { $prmsg == 1} { puts -nonewline "capt=$capt," } + set captions [split $capt |] + incr current $pad + } + 7 { + set bitmap {} + binary scan $data @${current}a${length} bitmap + if { $prmsg == 1} { puts -nonewline "bitmap," } + incr current $pad + } + 8 { + set possible_values {} + binary scan $data @${current}a${length} pvals + if { $prmsg == 1} { puts -nonewline "pvals=$pvals," } + set possible_values [split $pvals |] + incr current $pad + } + 9 { + set groups {} + binary scan $data @${current}a${length} groupsstr + if { $prmsg == 1} { puts -nonewline "groups=$groupsstr," } + set groups [split $groupsstr |] + incr current $pad + } + 10 { + binary scan $data @${current}a${length} session + if { $prmsg == 1} { puts -nonewline "session=$session," } + incr current $pad + } + 35 { + incr current $pad + binary scan $data @${current}I netid + if { $prmsg == 1} { puts -nonewline "netid=$netid/" } + } + 80 { + set opaque {} + binary scan $data @${current}a${length} opaquestr + if { $prmsg == 1} { puts -nonewline "opaque=$opaquestr," } + set opaque [split $opaquestr |] + incr current $pad + } + default { + if { $prmsg == 1} { puts -nonewline "unknown=" } + if { $prmsg == 1} { puts -nonewline "$type," } + } + } + # end switch + + # advance current pointer + incr current $length + } + + if { $prmsg == 1 } { puts ") "} + + set objs_ok [concat "services session metadata emane" $MACHINE_TYPES] + if { $nodenum > -1 } { + set node "n$nodenum" + } else { + set node "" + } + # check for node existance + if { [lsearch $node_list $node] == -1 } { + if { [lsearch $objs_ok $obj] < 0 } { + set msg "Configure message for $obj but node ($node) does" + set msg "$msg not exist, dropping." + puts $msg + return + } + } else { + global $node + } + + # for handling node services + # this could be improved, instead of checking for the hard-coded object + # "services" and opaque data for service customization + if { $obj == "services" } { + if { $tflags & 0x2 } { ;# update flag + if { $opaque != "" } { + set services [lindex [split $opaque ":"] 1] + set services [split $services ","] + customizeServiceValues n$nodenum $values $services + } + # TODO: save services config with the node + } elseif { $tflags & 0x1 } { ;# request flag + # TODO: something else + } else { + popupServicesConfig $channel n$nodenum $types $values $captions \ + $possible_values $groups $session + } + return + # metadata received upon XML file load + } elseif { $obj == "metadata" } { + parseMetaData $values + return + # session options received upon XML file load + } elseif { $obj == "session" && $tflags & 0x2 } { + setSessionOptions $types $values + return + } + # handle node machine-type profile + if { [lsearch $MACHINE_TYPES $obj] != -1 } { + if { $tflags == 0 } { + popupNodeProfileConfig $channel n$nodenum $obj $types $values \ + $captions $bitmap $possible_values $groups $session \ + $opaque + } else { + puts -nonewline "warning: received Configure message for profile " + puts "with unexpected flags!" + } + return + } + + # update the configuration for a node without displaying dialog box + if { $tflags & 0x2 } { + if { $obj == "emane" && $node == "" } { + set node [lindex [findWlanNodes ""] 0] + } + if { $node == "" } { + puts "ignoring Configure message for $obj with no node" + return + } + # this is similar to popupCapabilityConfigApply + setCustomConfig $node $obj $types $values 0 + if { $obj != "emane" && [nodeType $node] == "wlan"} { + set section [list mobmodel coreapi $obj] + netconfInsertSection $node $section + } + # configuration request - unhandled + } elseif { $tflags & 0x1 } { + # configuration response data from our request (from GUI plugin configure) + } else { + popupCapabilityConfig $channel n$nodenum $obj $types $values \ + $captions $bitmap $possible_values $groups + } +} + +# process metadata received from Conf Message when loading XML +proc parseMetaData { values } { + global canvas_list annotation_list execMode g_comments + + foreach value $values { + # data looks like this: "annotation a1={iconcoords {514.0 132.0...}}" + lassign [splitKeyValue $value] key object_config + lassign $key class object + # metadata with no object name e.g. comments="Comment text" + if { "$class" == "comments" } { + set g_comments $object_config + continue + } elseif { "$class" == "global_options" } { + foreach opt $object_config { + lassign [split $opt =] key value + setGlobalOption $key $value + } + continue + } + # metadata having class and object name + if {"$class" == "" || $object == ""} { + puts "warning: invalid metadata value '$value'" + } + if { "$class" == "canvas" } { + if { [lsearch $canvas_list $object] < 0 } { + lappend canvas_list $object + } + } elseif { "$class" == "annotation" } { + if { [lsearch $annotation_list $object] < 0 } { + lappend annotation_list $object + } + } else { + puts "metadata parsing error: unknown object class $class" + } + global $object + set $object $object_config + } + + if { $execMode == "batch" } { return } + switchCanvas none + redrawAll +} + +proc parseFileMessage { data len flags channel } { + global showAPI node_list + set prmsg $showAPI + + array set tlvnames { 1 num 2 name 3 mode 4 fno 5 type 6 sname \ + 10 session 16 data 17 cdata } + array set tlvsizes { num 4 name -1 mode -3 fno 2 type -1 sname -1 \ + session -1 data -1 cdata -1 } + array set defvals { num -1 name "" mode -1 fno -1 type "" sname "" \ + session "" data "" cdata "" } + + if { $prmsg==1 } { puts -nonewline "FILE(flags=$flags," } + array set vals [parseMessage $data $len $flags [array get tlvnames] \ + [array get tlvsizes] [array get defvals]] + if { $prmsg } { puts ") "} + + # hook scripts received in File Message + if { [string range $vals(type) 0 4] == "hook:" } { + global g_hook_scripts + set state [string range $vals(type) 5 end] + lappend g_hook_scripts [list $vals(name) $state $vals(data)] + return + } + + # required fields + foreach t "num name data" { + if { $vals($t) == $defvals($t) } { + puts "Received File Message without $t, dropping."; return; + } + } + + # check for node existance + set node "n$vals(num)" + if { [lsearch $node_list $node] == -1 } { + puts "File message but node ($node) does not exist, dropping." + return + } else { + global $node + } + + # service customization received in File Message + if { [string range $vals(type) 0 7] == "service:" } { + customizeServiceFile $node $vals(name) $vals(type) $vals(data) true + } +} + +proc parseEventMessage { data len flags channel } { + global showAPI eventtypes g_traffic_start_opt execMode node_list + set prmsg $showAPI + set current 0 + set nodenum -1 + set eventtype -1 + set eventname "" + set eventdata "" + set eventtime "" + set session "" + + if { $prmsg==1 } { puts -nonewline "EVENT(flags=$flags," } + + # parse each TLV + while { $current < $len } { + set typelength [parseTLVHeader $data current] + set type [lindex $typelength 0] + set length [lindex $typelength 1] + if { $length == 0 || $length == "" } { break } + set pad [pad_32bit $length] + # verbose debugging + #puts "tlv type=$type length=$length pad=$pad current=$current" + # TLV data + switch -exact -- "$type" { + 1 { + incr current $pad + binary scan $data @${current}I nodenum + if { $prmsg == 1} { puts -nonewline "node=$nodenum," } + } + 2 { + incr current $pad + binary scan $data @${current}I eventtype + if { $prmsg == 1} { + set typestr "" + foreach t [array names eventtypes] { + if { $eventtypes($t) == $eventtype } { + set typestr "-$t" + break + } + } + puts -nonewline "type=$eventtype$typestr," + } + } + 3 { + binary scan $data @${current}a${length} eventname + if { $prmsg == 1} { puts -nonewline "name=$eventname," } + incr current $pad + } + 4 { + binary scan $data @${current}a${length} eventdata + if { $prmsg == 1} { puts -nonewline "data=$eventdata," } + incr current $pad + } + 5 { + binary scan $data @${current}a${length} eventtime + if { $prmsg == 1} { puts -nonewline "time=$eventtime," } + incr current $pad + } + 10 { + binary scan $data @${current}a${length} session + if { $prmsg == 1} { puts -nonewline "session=$session," } + incr current $pad + } + default { + if { $prmsg == 1} { puts -nonewline "unknown=" } + if { $prmsg == 1} { puts -nonewline "$type," } + } + } + # end switch + + # advance current pointer + incr current $length + } + + if { $prmsg == 1 } { puts ") "} + + # TODO: take other actions here based on Event Message + if { $eventtype == 4 } { ;# entered the runtime state + if { $g_traffic_start_opt == 1 } { startTrafficScripts } + if { $execMode == "batch" } { + global g_current_session g_abort_session + if {$g_abort_session} { + puts "Current session ($g_current_session) aborted. Disconnecting." + shutdownSession + } else { + puts "Session running. Session id is $g_current_session. Disconnecting." + } + exit.real + } + } elseif { $eventtype == 6 } { ;# shutdown state + set name [lindex [getEmulPlugin "*"] 0] + if { [getAssignedRemoteServers] == "" } { + # start a new session if not distributed + # otherwise we need to allow time for node delete messages + # from other servers + pluginConnect $name disconnect 1 + pluginConnect $name connect 1 + } + } elseif { $eventtype >= 7 || $eventtype <= 10 } { + if { [string range $eventname 0 8] == "mobility:" } { + set node "n$nodenum" + if {[lsearch $node_list $node] == -1} { + puts "Event message with unknown node %nodenum." + return + } + handleMobilityScriptEvent $node $eventtype $eventdata $eventtime + } + } +} + +proc parseSessionMessage { data len flags channel } { + global showAPI g_current_session g_session_dialog_hint execMode + set prmsg $showAPI + set current 0 + set sessionids {} + set sessionnames {} + set sessionfiles {} + set nodecounts {} + set sessiondates {} + set thumbs {} + set sessionopaque {} + + if { $prmsg==1 } { puts -nonewline "SESSION(flags=$flags," } + + # parse each TLV + while { $current < $len } { + set typelength [parseTLVHeader $data current] + set type [lindex $typelength 0] + set length [lindex $typelength 1] + if { $length == 0 || $length == "" } { + puts "warning: zero-length TLV, discarding remainder of message!" + break + } + set pad [pad_32bit $length] + # verbose debugging + #puts "tlv type=$type length=$length pad=$pad current=$current" + # TLV data + switch -exact -- "$type" { + 1 { + set sessionids {} + binary scan $data @${current}a${length} sids + if { $prmsg == 1} { puts -nonewline "sids=$sids," } + set sessionids [split $sids |] + incr current $pad + } + 2 { + set sessionnames {} + binary scan $data @${current}a${length} snames + if { $prmsg == 1} { puts -nonewline "names=$snames," } + set sessionnames [split $snames |] + incr current $pad + } + 3 { + set sessionfiles {} + binary scan $data @${current}a${length} sfiles + if { $prmsg == 1} { puts -nonewline "files=$sfiles," } + set sessionfiles [split $sfiles |] + incr current $pad + } + 4 { + set nodecounts {} + binary scan $data @${current}a${length} ncs + if { $prmsg == 1} { puts -nonewline "ncs=$ncs," } + set nodecounts [split $ncs |] + incr current $pad + } + 5 { + set sessiondates {} + binary scan $data @${current}a${length} sdates + if { $prmsg == 1} { puts -nonewline "dates=$sdates," } + set sessiondates [split $sdates |] + incr current $pad + } + 6 { + set thumbs {} + binary scan $data @${current}a${length} th + if { $prmsg == 1} { puts -nonewline "thumbs=$th," } + set thumbs [split $th |] + incr current $pad + } + 10 { + set sessionopaque {} + binary scan $data @${current}a${length} sessionopaque + if { $prmsg == 1} { puts -nonewline "$sessionopaque," } + incr current $pad + } + default { + if { $prmsg == 1} { puts -nonewline "unknown=" } + if { $prmsg == 1} { puts -nonewline "$type," } + } + } + # end switch + + # advance current pointer + incr current $length + } + + if { $prmsg == 1 } { puts ") "} + + if {$g_current_session == 0} { + # set the current session to the channel port number + set current_session [lindex [fconfigure $channel -sockname] 2] + } else { + set current_session $g_current_session + } + + if {[lsearch $sessionids $current_session] == -1} { + puts -nonewline "*** warning: current session ($g_current_session) " + puts "not found in session list: $sessionids" + } + + set orig_session_choice $g_current_session + set g_current_session $current_session + setGuiTitle "" + + if {$execMode == "closebatch"} { + # we're going to close some session, so this is expected + global g_session_choice + + if {[lsearch $sessionids $g_session_choice] == -1} { + puts -nonewline "*** warning: current session ($g_session_choice) " + puts "not found in session list: $sessionids" + } else { + set flags 0x2 ;# delete flag + set sid $g_session_choice + set name "" + set f "" + set nodecount "" + set thumb "" + set user "" + sendSessionMessage $channel $flags $sid $name $f $nodecount $thumb $user + + puts "Session shutdown message sent." + } + exit.real + } + + if {$orig_session_choice == 0 && [llength $sessionids] == 1} { + # we just started up and only the current session exists + set g_session_dialog_hint 0 + return + } + + if {$execMode == "batch"} { + puts "Another session is active." + exit.real + } + + if { $g_session_dialog_hint } { + popupSessionConfig $channel $sessionids $sessionnames $sessionfiles \ + $nodecounts $sessiondates $thumbs $sessionopaque + } + set g_session_dialog_hint 0 +} + +# parse message TLVs given the possible TLV names and sizes +# default values are supplied in defaultvals, parsed values are returned +proc parseMessage { data len flags tlvnamesl tlvsizesl defaultvalsl } { + global showAPI + set prmsg $showAPI + + array set tlvnames $tlvnamesl + array set tlvsizes $tlvsizesl + array set vals $defaultvalsl ;# this array is returned + + set current 0 + + while { $current < $len } { + set typelength [parseTLVHeader $data current] + set type [lindex $typelength 0] + set length [lindex $typelength 1] + if { $length == 0 || $length == "" } { break } + set pad [pad_32bit $length] + + if {![info exists tlvnames($type)] } { ;# unknown TLV type + if { $prmsg } { puts -nonewline "unknown=$type," } + incr current $length + continue + } + set tlvname $tlvnames($type) + set size $tlvsizes($tlvname) + # 32-bit and 64-bit vals pre-padded + if { $size == 4 || $size == 8 } { incr current $pad } + # read TLV data depending on size + switch -exact -- "$size" { + 2 { binary scan $data @${current}S vals($tlvname) } + 4 { binary scan $data @${current}I vals($tlvname) } + 8 { binary scan $data @${current}W vals($tlvname) } + 16 { binary scan $data @${current}c16 vals($tlvname) } + -1 { binary scan $data @${current}a${length} vals($tlvname) } + } + if { $size == -1 } { incr current $pad } ;# string vals post-padded + if { $type == 6 } { incr current $pad } ;# 128-bit vals post-padded + incr current $length + + if { $prmsg } { puts -nonewline "$tlvname=$vals($tlvname)," } + } + return [array get vals] +} + +proc parseExceptionMessage { data len flags channel } { + global showAPI + set prmsg $showAPI + + array set typenames { 1 num 2 sess 3 level 4 src 5 date 6 txt 10 opaque } + array set typesizes { num 4 sess -1 level 2 src -1 date -1 txt -1 \ + opaque -1 } + array set defvals { num -1 sess "" level -1 src "" date "" txt "" opaque ""} + + if { $prmsg==1 } { puts -nonewline "EXCEPTION(flags=$flags," } + array set vals [parseMessage $data $len $flags [array get typenames] \ + [array get typesizes] [array get defvals]] + if { $prmsg == 1 } { puts ") "} + + if { $vals(level) == $defvals(level) } { + puts "Exception Message received without an exception level."; return; + } + + receiveException [array get vals] +} + +proc sendNodePosMessage { channel node nodeid x y wlanid force } { + global showAPI + set prmsg $showAPI + + if { $channel == -1 } { + set channel [lindex [getEmulPlugin $node] 2] + if { $channel == -1 } { return } + } + set node_num [string range $node 1 end] + set x [format "%u" [expr int($x)]] + set y [format "%u" [expr int($y)]] + set len [expr 8+4+4] ;# node number, x, y + if {$nodeid > -1} { incr len 8 } + if {$wlanid > -1} { incr len 8 } + if {$force == 1 } { set crit 0x4 } else { set crit 0x0 } + #puts "sending [expr $len+4] bytes: $nodeid $x $y $wlanid" + if { $prmsg == 1 } { + puts -nonewline ">NODE(flags=$crit,$node,x=$x,y=$y" } + set msg [binary format ccSc2sIc2Sc2S \ + 1 $crit $len \ + {1 4} 0 $node_num \ + {0x20 2} $x \ + {0x21 2} $y + ] + + set msg2 "" + set msg3 "" + if { $nodeid > -1 } { + if { $prmsg == 1 } { puts -nonewline ",emuid=$nodeid" } + set msg2 [binary format c2sI {0x23 4} 0 $nodeid] + } + if { $wlanid > -1 } { + if { $prmsg == 1 } { puts -nonewline ",netid=$wlanid" } + set msg3 [binary format c2sI {0x24 4} 0 $wlanid] + } + + if { $prmsg == 1 } { puts ")" } + puts -nonewline $channel $msg$msg2$msg3 + flushChannel channel "Error sending node position" +} + +# build a new node +proc sendNodeAddMessage { channel node } { + global showAPI CORE_DATA_DIR + set prmsg $showAPI + set len [expr {8+8+4+4}]; # node number, type, x, y + set ipv4 0 + set ipv6 0 + set macstr "" + set wireless 0 + + # type, name + set type [getNodeTypeAPI $node] + set model [getNodeModel $node] + set model_len [string length $model] + set model_pad_len [pad_32bit $model_len] + set model_pad [binary format x$model_pad_len] + set name [getNodeName $node] + set name_len [string length $name] + set name_pad_len [pad_32bit $name_len] + set name_pad [binary format x$name_pad_len] + incr len [expr { 2+$name_len+$name_pad_len}] + if {$model_len > 0} { incr len [expr {2+$model_len+$model_pad_len }] } + set node_num [string range $node 1 end] + + # fixup node type for EMANE-enabled WLAN nodes + set opaque "" + if { [isEmane $node] } { set type 0xA } + + # emulation server (node location) + set emusrv [getNodeLocation $node] + set emusrv_len [string length $emusrv] + set emusrv_pad_len [pad_32bit $emusrv_len] + set emusrv_pad [binary format x$emusrv_pad_len] + if { $emusrv_len > 0 } { incr len [expr {2+$emusrv_len+$emusrv_pad_len } ] } + + # canvas + set canv [getNodeCanvas $node] + if { $canv != "c1" } { + set canv [string range $canv 1 end] ;# convert "c2" to "2" + incr len 4 + } else { + set canv "" + } + + # services + set svc [getNodeServices $node false] + set svc [join $svc "|"] + set svc_len [string length $svc] + set svc_pad_len [pad_32bit $svc_len] + set svc_pad [binary format x$svc_pad_len] + if { $svc_len > 0 } { incr len [expr {2+$svc_len+$svc_pad_len } ] } + + # icon + set icon [getCustomImage $node] + if { [file dirname $icon] == "$CORE_DATA_DIR/icons/normal" } { + set icon [file tail $icon] ;# don't include standard icon path + } + set icon_len [string length $icon] + set icon_pad_len [pad_32bit $icon_len] + set icon_pad [binary format x$icon_pad_len] + if { $icon_len > 0 } { incr len [expr {2+$icon_len+$icon_pad_len} ] } + + # opaque data + set opaque_len [string length $opaque] + set opaque_pad_len [pad_32bit $opaque_len] + set opaque_pad [binary format x$opaque_pad_len] + if { $opaque_len > 0 } { incr len [expr {2+$opaque_len+$opaque_pad_len} ] } + + # length must be calculated before this + if { $prmsg == 1 } { + puts -nonewline ">NODE(flags=add/str,$node,type=$type,$name," + } + set msg [binary format c2Sc2sIc2sIcc \ + {0x1 0x11} $len \ + {0x1 4} 0 $node_num \ + {0x2 4} 0 $type \ + 0x3 $name_len ] + puts -nonewline $channel $msg$name$name_pad + + # IPv4 address + if { $ipv4 > 0 } { + if { $prmsg == 1 } { puts -nonewline "$ipv4str," } + set msg [binary format c2sI {0x4 4} 0 $ipv4] + puts -nonewline $channel $msg + } + + # MAC address + if { $macstr != "" } { + if { $prmsg == 1 } { puts -nonewline "$macstr," } + set mac [join [split $macstr ":"] ""] + puts -nonewline $channel [binary format c2x2W {0x5 8} 0x$mac] + } + + # IPv6 address + if { $ipv6 != 0 } { + if { $prmsg == 1 } { puts -nonewline "$ipv6str," } + set msg [binary format c2 {0x6 16} ] + puts -nonewline $channel $msg + foreach ipv6w [split $ipv6 ":"] { + set msg [binary format S 0x$ipv6w] + puts -nonewline $channel $msg + } + puts -nonewline $channel [binary format x2]; # 2 bytes padding + } + + # model type + if { $model_len > 0 } { + set mh [binary format cc 0x7 $model_len] + puts -nonewline $channel $mh$model$model_pad + if { $prmsg == 1 } { puts -nonewline "m=$model," } + } + + # emulation server + if { $emusrv_len > 0 } { + puts -nonewline $channel [binary format cc 0x8 $emusrv_len] + puts -nonewline $channel $emusrv$emusrv_pad + if { $prmsg == 1 } { puts -nonewline "srv=$emusrv," } + } + + # X,Y coordinates + set coords [getNodeCoords $node] + set x [format "%u" [expr int([lindex $coords 0])]] + set y [format "%u" [expr int([lindex $coords 1])]] + set msg [binary format c2Sc2S {0x20 2} $x {0x21 2} $y] + puts -nonewline $channel $msg + + # canvas + if { $canv != "" } { + if { $prmsg == 1 } { puts -nonewline "canvas=$canv," } + set msg [binary format c2S {0x22 2} $canv] + puts -nonewline $channel $msg + } + + if { $prmsg == 1 } { puts -nonewline "x=$x,y=$y" } + + # services + if { $svc_len > 0 } { + puts -nonewline $channel [binary format cc 0x25 $svc_len] + puts -nonewline $channel $svc$svc_pad + if { $prmsg == 1 } { puts -nonewline ",svc=$svc" } + } + + # icon + if { $icon_len > 0 } { + puts -nonewline $channel [binary format cc 0x42 $icon_len] + puts -nonewline $channel $icon$icon_pad + if { $prmsg == 1 } { puts -nonewline ",icon=$icon" } + } + + # opaque data + if { $opaque_len > 0 } { + puts -nonewline $channel [binary format cc 0x50 $opaque_len] + puts -nonewline $channel $opaque$opaque_pad + if { $prmsg == 1 } { puts -nonewline ",opaque=$opaque" } + } + + if { $prmsg == 1 } { puts ")" } + + flushChannel channel "Error sending node add" +} + +# delete a node +proc sendNodeDelMessage { channel node } { + global showAPI + set prmsg $showAPI + set len 8; # node number + set node_num [string range $node 1 end] + + if { $prmsg == 1 } { puts ">NODE(flags=del/str,$node_num)" } + set msg [binary format c2Sc2sI \ + {0x1 0x12} $len \ + {0x1 4} 0 $node_num ] + puts -nonewline $channel $msg + flushChannel channel "Error sending node delete" +} + +# send a message to build, modify, or delete a link +# type should indicate add/delete/link/unlink +proc sendLinkMessage { channel link type {sendboth true} } { + global showAPI + set prmsg $showAPI + + set node1 [lindex [linkPeers $link] 0] + set node2 [lindex [linkPeers $link] 1] + set if1 [ifcByPeer $node1 $node2]; set if2 [ifcByPeer $node2 $node1] + if { [nodeType $node1] == "pseudo" } { return } ;# never seems to occur + if { [nodeType $node2] == "pseudo" } { + set mirror2 [getLinkMirror $node2] + set node2 [getNodeName $node2] + if { [string range $node1 1 end] > [string range $node2 1 end] } { + return ;# only send one link message (for two pseudo-links) + } + set if2 [ifcByPeer $node2 $mirror2] + } + set node1_num [string range $node1 1 end] + set node2_num [string range $node2 1 end] + + # flag for sending unidirectional link messages + set uni 0 + if { $sendboth && [isLinkUni $link] } { + set uni 1 + } + + # set flags and link message type from supplied type parameter + set flags 0 + set ltype 1 ;# add/delete a link (not wireless link/unlink) + set netid -1 + if { $type == "add" || $type == "link" } { + set flags 1 + } elseif { $type == "delete" || $type == "unlink" } { + set flags 2 + } + if { $type == "link" || $type == "unlink" } { + set ltype 0 ;# a wireless link/unlink event + set tmp [getLinkOpaque $link net] + if { $tmp != "" } { set netid [string range $tmp 1 end] } + } + + set key "" + if { [nodeType $node1] == "tunnel" } { + set key [netconfFetchSection $node1 "tunnel-key"] + if { $key == "" } { set key 1 } + } + if {[nodeType $node2] == "tunnel" } { + set key [netconfFetchSection $node2 "tunnel-key"] + if { $key == "" } { set key 1 } + } + + if { $prmsg == 1 } { + puts -nonewline ">LINK(flags=$flags,$node1_num-$node2_num," + } + + # len = node1num, node2num, type + set len [expr {8+8+8}] + set delay [getLinkDelay $link] + if { $delay == "" } { set delay 0 } + set jitter [getLinkJitter $link] + if { $jitter == "" } { set jitter 0 } + set bw [getLinkBandwidth $link] + if { $bw == "" } { set bw 0 } + set per [getLinkBER $link]; # PER and BER + if { $per == "" } { set per 0 } + set per_len 0 + set per_msg [buildStringTLV 0x5 $per per_len] + set dup [getLinkDup $link] + if { $dup == "" } { set dup 0 } + set dup_len 0 + set dup_msg [buildStringTLV 0x6 $dup dup_len] + if { $type != "delete" } { + incr len [expr {12+12+$per_len+$dup_len+12}] ;# delay,bw,per,dup,jitter + if {$prmsg==1 } { + puts -nonewline "$delay,$bw,$per,$dup,$jitter," + } + } + # TODO: mer, burst, mburst + if { $prmsg == 1 } { puts -nonewline "type=$ltype," } + if { $uni } { + incr len 4 + if { $prmsg == 1 } { puts -nonewline "uni=$uni," } + } + if { $netid > -1 } { + incr len 8 + if { $prmsg == 1 } { puts -nonewline "netid=$netid," } + } + if { $key != "" } { + incr len 8 + if { $prmsg == 1 } { puts -nonewline "key=$key," } + } + + set if1num [ifcNameToNum $if1]; set if2num [ifcNameToNum $if2] + set if1ipv4 0; set if2ipv4 0; set if1ipv6 ""; set if2ipv6 ""; + set if1ipv4mask 0; set if2ipv4mask 0; + set if1ipv6mask ""; set if2ipv6mask ""; set if1mac ""; set if2mac ""; + + if { $if1num >= 0 && ([[typemodel $node1].layer] == "NETWORK" || \ + [nodeType $node1] == "tunnel") } { + incr len 4 + if { $prmsg == 1 } { puts -nonewline "if1n=$if1num," } + if { $type != "delete" } { + getIfcAddrs $node1 $if1 if1ipv4 if1ipv6 if1mac if1ipv4mask \ + if1ipv6mask len + } + } + if { $if2num >= 0 && ([[typemodel $node2].layer] == "NETWORK" || \ + [nodeType $node2] == "tunnel") } { + incr len 4 + if { $prmsg == 1 } { puts -nonewline "if2n=$if2num," } + if { $type != "delete" } { + getIfcAddrs $node2 $if2 if2ipv4 if2ipv6 if2mac if2ipv4mask \ + if2ipv6mask len + } + } + + # start building the binary message on channel + # length must be calculated before this + set msg [binary format ccSc2sIc2sI \ + {0x2} $flags $len \ + {0x1 4} 0 $node1_num \ + {0x2 4} 0 $node2_num ] + puts -nonewline $channel $msg + + if { $type != "delete" } { + puts -nonewline $channel [binary format c2sW {0x3 8} 0 $delay] + puts -nonewline $channel [binary format c2sW {0x4 8} 0 $bw] + puts -nonewline $channel $per_msg + puts -nonewline $channel $dup_msg + puts -nonewline $channel [binary format c2sW {0x7 8} 0 $jitter] + } + # TODO: mer, burst, mburst + + # link type + puts -nonewline $channel [binary format c2sI {0x20 4} 0 $ltype] + + # unidirectional flag + if { $uni } { + puts -nonewline $channel [binary format c2S {0x22 2} $uni] + } + + # network ID + if { $netid > -1 } { + puts -nonewline $channel [binary format c2sI {0x24 4} 0 $netid] + } + + if { $key != "" } { + puts -nonewline $channel [binary format c2sI {0x25 4} 0 $key] + } + + # interface 1 info + if { $if1num >= 0 && ([[typemodel $node1].layer] == "NETWORK" || \ + [nodeType $node1] == "tunnel") } { + puts -nonewline $channel [ binary format c2S {0x30 2} $if1num ] + } + if { $if1ipv4 > 0 } { puts -nonewline $channel [binary format c2sIc2S \ + {0x31 4} 0 $if1ipv4 {0x32 2} $if1ipv4mask ] } + if { $if1mac != "" } { + set if1mac [join [split $if1mac ":"] ""] + puts -nonewline $channel [binary format c2x2W {0x33 8} 0x$if1mac] + } + if {$if1ipv6 != ""} { puts -nonewline $channel [binary format c2 {0x34 16}] + foreach ipv6w [split $if1ipv6 ":"] { puts -nonewline $channel \ + [binary format S 0x$ipv6w] } + puts -nonewline $channel [binary format x2c2S {0x35 2} $if1ipv6mask] } + + # interface 2 info + if { $if2num >= 0 && ([[typemodel $node2].layer] == "NETWORK" || \ + [nodeType $node2] == "tunnel") } { + puts -nonewline $channel [ binary format c2S {0x36 2} $if2num ] + } + if { $if2ipv4 > 0 } { puts -nonewline $channel [binary format c2sIc2S \ + {0x37 4} 0 $if2ipv4 {0x38 2} $if2ipv4mask ] } + if { $if2mac != "" } { + set if2mac [join [split $if2mac ":"] ""] + puts -nonewline $channel [binary format c2x2W {0x39 8} 0x$if2mac] + } + if {$if2ipv6 != ""} { puts -nonewline $channel [binary format c2 {0x40 16}] + foreach ipv6w [split $if2ipv6 ":"] { puts -nonewline $channel \ + [binary format S 0x$ipv6w] } + puts -nonewline $channel [binary format x2c2S {0x41 2} $if2ipv6mask] } + + if { $prmsg==1 } { puts ")" } + flushChannel channel "Error sending link message" + + ########################################################## + # send a second Link Message for unidirectional link effects + if { $uni < 1 } { + return + } + # first calculate length and possibly print the message + set flags 0 + if { $prmsg == 1 } { + puts -nonewline ">LINK(flags=$flags,$node2_num-$node1_num," + } + set len [expr {8+8+8}] ;# len = node2num, node1num (swapped), type + set delay [getLinkDelay $link up] + if { $delay == "" } { set delay 0 } + set jitter [getLinkJitter $link up] + if { $jitter == "" } { set jitter 0 } + set bw [getLinkBandwidth $link up] + if { $bw == "" } { set bw 0 } + set per [getLinkBER $link up]; # PER and BER + if { $per == "" } { set per 0 } + set per_len 0 + set per_msg [buildStringTLV 0x5 $per per_len] + set dup [getLinkDup $link up] + if { $dup == "" } { set dup 0 } + set dup_len 0 + set dup_msg [buildStringTLV 0x6 $dup dup_len] + incr len [expr {12+12+$per_len+$dup_len+12}] ;# delay,bw,per,dup,jitter + if {$prmsg==1 } { + puts -nonewline "$delay,$bw,$per,$dup,$jitter," + } + if { $prmsg == 1 } { puts -nonewline "type=$ltype," } + incr len 4 ;# unidirectional flag + if { $prmsg == 1 } { puts -nonewline "uni=$uni," } + # note that if1num / if2num are reversed here due to reversed node nums + if { $if2num >= 0 && ([[typemodel $node2].layer] == "NETWORK" || \ + [nodeType $node2] == "tunnel") } { + incr len 4 + if { $prmsg == 1 } { puts -nonewline "if1n=$if2num," } + } + if { $if1num >= 0 && ([[typemodel $node1].layer] == "NETWORK" || \ + [nodeType $node1] == "tunnel") } { + incr len 4 + if { $prmsg == 1 } { puts -nonewline "if2n=$if1num," } + } + # build and send the link message + set msg [binary format ccSc2sIc2sI \ + {0x2} $flags $len \ + {0x1 4} 0 $node2_num \ + {0x2 4} 0 $node1_num ] + puts -nonewline $channel $msg + puts -nonewline $channel [binary format c2sW {0x3 8} 0 $delay] + puts -nonewline $channel [binary format c2sW {0x4 8} 0 $bw] + puts -nonewline $channel $per_msg + puts -nonewline $channel $dup_msg + puts -nonewline $channel [binary format c2sW {0x7 8} 0 $jitter] + puts -nonewline $channel [binary format c2sI {0x20 4} 0 $ltype] + puts -nonewline $channel [binary format c2S {0x22 2} $uni] + if { $if2num >= 0 && ([[typemodel $node2].layer] == "NETWORK" || \ + [nodeType $node2] == "tunnel") } { + puts -nonewline $channel [ binary format c2S {0x30 2} $if2num ] + } + if { $if1num >= 0 && ([[typemodel $node1].layer] == "NETWORK" || \ + [nodeType $node1] == "tunnel") } { + puts -nonewline $channel [ binary format c2S {0x36 2} $if1num ] + } + if { $prmsg==1 } { puts ")" } + flushChannel channel "Error sending link message" +} + +# helper to get IPv4, IPv6, MAC address and increment length +# also prints TLV-style addresses if showAPI is true +proc getIfcAddrs { node ifc ipv4p ipv6p macp ipv4maskp ipv6maskp lenp } { + global showAPI + upvar $ipv4p ipv4 + upvar $ipv6p ipv6 + upvar $macp mac + upvar $ipv4maskp ipv4mask + upvar $ipv6maskp ipv6mask + upvar $lenp len + + if { $ifc == "" || $node == "" } { return } + + # IPv4 address + set ipv4str [getIfcIPv4addr $node $ifc] + if {$ipv4str != ""} { + set ipv4 [lindex [split $ipv4str /] 0] + if { [info exists ipv4mask ] } { + set ipv4mask [lindex [split $ipv4str / ] 1] + incr len 12; # 8 addr + 4 mask + if { $showAPI == 1 } { puts -nonewline "$ipv4str," } + } else { + incr len 8; # 8 addr + if { $showAPI == 1 } { puts -nonewline "$ipv4," } + } + set ipv4 [stringToIPv4 $ipv4]; # convert to integer + } + + # IPv6 address + set ipv6str [getIfcIPv6addr $node $ifc] + if {$ipv6str != ""} { + set ipv6 [lindex [split $ipv6str /] 0] + if { [info exists ipv6mask ] } { + set ipv6mask [lindex [split $ipv6str / ] 1] + incr len 24; # 20 addr + 4 mask + if { $showAPI == 1 } { puts -nonewline "$ipv6str," } + } else { + incr len 20; # 20 addr + if { $showAPI == 1 } { puts -nonewline "$ipv6," } + } + set ipv6 [expandIPv6 $ipv6]; # convert to long string + } + + # MAC address (from conf if there, otherwise generated) + if { [info exists mac] } { + set mac [lindex [getIfcMacaddr $node $ifc] 0] + if {$mac == ""} { + set mac [getNextMac] + } + if { $showAPI == 1 } { puts -nonewline "$mac," } + incr len 12; + } +} + +# +# Register Message: (registration types) +# This is a simple Register Message, types is an array of +# tuples. +proc sendRegMessage { channel flags types_list } { + global showAPI regtypes + set prmsg $showAPI + + if { $channel == -1 || $channel == "" } { + set plugin [lindex [getEmulPlugin "*"] 0] + set channel [pluginConnect $plugin connect true] + if { $channel == -1 } { return } + } + set len 0 + array set types $types_list + + # array names output is unreliable, sort it + set type_list [lsort -dict [array names types]] + foreach type $type_list { + if { ![info exists regtypes($type)] } { + puts "sendRegMessage: unknown registration type '$type'" + return -1 + } + set str_$type $types($type) + set str_${type}_len [string length [set str_$type]] + set str_${type}_pad_len [pad_32bit [set str_${type}_len]] + set str_${type}_pad [binary format x[set str_${type}_pad_len]] + incr len [expr { 2 + [set str_${type}_len] + [set str_${type}_pad_len]}] + } + + if { $prmsg == 1 } { puts ">REG($type_list)" } + # message header + set msg1 [binary format ccS 4 $flags $len] + puts -nonewline $channel $msg1 + + foreach type $type_list { + set type_num $regtypes($type) + set tlvh [binary format cc $type_num [set str_${type}_len]] + puts -nonewline $channel $tlvh[set str_${type}][set str_${type}_pad] + } + + flushChannel channel "Error: API channel was closed" +} + +# +# Configuration Message: (object, type flags, node) +# This is a simple Configuration Message containing flags +proc sendConfRequestMessage { channel node model flags netid opaque } { + global showAPI + set prmsg $showAPI + + if { $channel == -1 || $channel == "" } { + set pname [lindex [getEmulPlugin $node] 0] + set channel [pluginConnect $pname connect true] + if { $channel == -1 } { return } + } + + set model_len [string length $model] + set model_pad_len [pad_32bit $model_len] + set model_pad [binary format x$model_pad_len ] + set len [expr {4+2+$model_len+$model_pad_len}] + # optional network ID to provide Netgraph mapping + if { $netid != -1 } { incr len 8 } + # convert from node name to number + if { [string is alpha [string range $node 0 0]] } { + set node [string range $node 1 end] + } + + if { $node > 0 } { incr len 8 } + # add a session number when configuring services + set session "" + set session_len 0 + set session_pad_len 0 + set session_pad "" + if { $node <= 0 && $model == "services" } { + global g_current_session + set session [format "0x%x" $g_current_session] + set session_len [string length $session] + set session_pad_len [pad_32bit $session_len] + set session_pad [binary format x$session_pad_len] + incr len [expr {2 + $session_len + $session_pad_len}] + } + # opaque data - used when custom configuring services + set opaque_len 0 + set msgop [buildStringTLV 0x50 $opaque opaque_len] + if { $opaque_len > 0 } { incr len $opaque_len } + + if { $prmsg == 1 } { + puts -nonewline ">CONF(flags=0," + if { $node > 0 } { puts -nonewline "node=$node," } + puts -nonewline "obj=$model,cflags=$flags" + if { $session != "" } { puts -nonewline ",session=$session" } + if { $netid > -1 } { puts -nonewline ",netid=$netid" } + if { $opaque_len > 0 } { puts -nonewline ",opaque=$opaque" } + puts ") request" + } + # header, node node number, node model header + set msg1 [binary format c2S {5 0} $len ] + set msg1b "" + if { $node > 0 } { set msg1b [binary format c2sI {1 4} 0 $node] } + set msg1c [binary format cc 2 $model_len] + # request flag + set msg2 [binary format c2S {3 2} $flags ] + # session number + set msg3 "" + if { $session != "" } { + set msg3 [binary format cc 0x0A $session_len] + set msg3 $msg3$session$session_pad + } + # network ID + set msg4 "" + if { $netid != -1 } { + set msg4 [binary format c2sI {0x23 4} 0 0x$netid ] + } + + #catch {puts -nonewline $channel $msg1$model$model_pad$msg2$msg3$msg4$msg5} + puts -nonewline $channel $msg1$msg1b$msg1c$model$model_pad$msg2$msg3$msg4 + if { $opaque_len > 0 } { puts -nonewline $channel $msgop } + + flushChannel channel "Error: API channel was closed" +} + +# +# Configuration Message: (object, type flags, node, types, values) +# This message is more complicated to build because of the list of +# data types and values. +proc sendConfReplyMessage { channel node model types values opaque } { + global showAPI + set prmsg $showAPI + # convert from node name to number + if { [string is alpha [string range $node 0 0]] } { + set node [string range $node 1 end] + } + # add a session number when configuring services + set session "" + set session_len 0 + set session_pad_len 0 + set session_pad "" + if { $node <= 0 && $model == "services" && $opaque == "" } { + global g_current_session + set session [format "0x%x" $g_current_session] + set session_len [string length $session] + set session_pad_len [pad_32bit $session_len] + set session_pad [binary format x$session_pad_len] + incr len [expr {$session_len + $session_pad_len}] + } + + if { $prmsg == 1 } { + puts -nonewline ">CONF(flags=0," + if {$node > -1 } { puts -nonewline "node=$node," } + puts -nonewline "obj=$model,cflags=0" + if {$session != "" } { puts -nonewline "session=$session," } + if {$opaque != "" } { puts -nonewline "opaque=$opaque," } + puts "types=<$types>,values=<$values>) reply" + } + + # types (16-bit values) and values + set n 0 + set type_len [expr {[llength $types] * 2} ] + set type_data [binary format cc 4 $type_len] + set value_data "" + foreach type $types { + set t [binary format S $type] + set type_data $type_data$t + set val [lindex $values $n] + if { $val == "" } { + #puts "warning: empty value $n (type=$type)" + if { $type != 10 } { set val 0 } + } + incr n + lappend value_data $val + }; # end foreach + set value_len 0 + set value_data [join $value_data |] + set msgval [buildStringTLV 0x5 $value_data value_len] + set type_pad_len [pad_32bit $type_len] + set type_pad [binary format x$type_pad_len ] + set model_len [string length $model] + set model_pad_len [pad_32bit $model_len] + set model_pad [binary format x$model_pad_len ] + # opaque data - used when custom configuring services + set opaque_len 0 + set msgop [buildStringTLV 0x50 $opaque opaque_len] + + # 4 bytes header, model TLV + set len [expr 4+2+$model_len+$model_pad_len] + if { $node > -1 } { incr len 8 } + # session number + set msg3 "" + if { $session != "" } { + incr len [expr {2 + $session_len + $session_pad_len }] + set msg3 [binary format cc 0x0A $session_len] + set msg3 $msg3$session$session_pad + } + if { $opaque_len > 0 } { incr len $opaque_len } + # types TLV, values TLV + incr len [expr {2 + $type_len + $type_pad_len + $value_len}] + + # header, node node number, node model header + set msgh [binary format c2S {5 0} $len ] + set msgwl "" + if { $node > -1 } { set msgwl [binary format c2sI {1 4} 0 $node] } + set model_hdr [binary format cc 2 $model_len] + # no flags + set type_hdr [binary format c2S {3 2} 0 ] + set msg $msgh$msgwl$model_hdr$model$model_pad$type_hdr$type_data$type_pad + set msg $msg$msgval$msg3 + puts -nonewline $channel $msg + if { $opaque_len > 0 } { puts -nonewline $channel $msgop } + flushChannel channel "Error sending conf reply" +} + +# Event Message +proc sendEventMessage { channel type nodenum name data flags } { + global showAPI eventtypes + set prmsg $showAPI + + set len [expr 8] ;# event type + if {$nodenum > -1} { incr len 8 } + set name_len [string length $name] + set name_pad_len [pad_32bit $name_len] + if { $name_len > 0 } { incr len [expr {2 + $name_len + $name_pad_len}] } + set data_len [string length $data] + set data_pad_len [pad_32bit $data_len] + if { $data_len > 0 } { incr len [expr {2 + $data_len + $data_pad_len}] } + + if { $prmsg == 1 } { + puts -nonewline ">EVENT(flags=$flags," } + set msg [binary format ccS 8 $flags $len ] ;# message header + + set msg2 "" + if { $nodenum > -1 } { + if { $prmsg == 1 } { puts -nonewline "node=$nodenum," } + set msg2 [binary format c2sI {0x01 4} 0 $nodenum] + } + if { $prmsg == 1} { + set typestr "" + foreach t [array names eventtypes] { + if { $eventtypes($t) == $type } { set typestr "-$t"; break } + } + puts -nonewline "type=$type$typestr," + } + set msg3 [binary format c2sI {0x02 4} 0 $type] + set msg4 "" + set msg5 "" + if { $name_len > 0 } { + if { $prmsg == 1 } { puts -nonewline "name=$name," } + set msg4 [binary format cc 0x03 $name_len ] + set name_pad [binary format x$name_pad_len ] + set msg5 $name$name_pad + } + set msg6 "" + set msg7 "" + if { $data_len > 0 } { + if { $prmsg == 1 } { puts -nonewline "data=$data" } + set msg6 [binary format cc 0x04 $data_len ] + set data_pad [binary format x$data_pad_len ] + set msg7 $data$data_pad + } + + if { $prmsg == 1 } { puts ")" } + puts -nonewline $channel $msg$msg2$msg3$msg4$msg5$msg6$msg7 + flushChannel channel "Error sending Event type=$type" +} + + +# deploy working configuration using CORE API +# Deploys a current working configuration. It creates all the +# nodes and link as defined in configuration file. +proc deployCfgAPI { sock } { + global eid + global node_list link_list annotation_list canvas_list + global mac_byte4 mac_byte5 + global execMode + global ngnodemap + global mac_addr_start + global deployCfgAPI_lock + global eventtypes + global g_comments + + if { ![info exists deployCfgAPI_lock] } { set deployCfgAPI_lock 0 } + if { $deployCfgAPI_lock } { + puts "***error: deployCfgAPI called while deploying config" + return + } + + set deployCfgAPI_lock 1 ;# lock + + set mac_byte4 0 + set mac_byte5 0 + if { [info exists mac_addr_start] } { set mac_byte5 $mac_addr_start } + set t_start [clock seconds] + + global systype + set systype [lindex [checkOS] 0] + statgraph on [expr (2*[llength $node_list]) + [llength $link_list]] + + + sendSessionProperties $sock + + # this tells the CORE services that we are starting to send + # configuration data + # clear any existing config + sendEventMessage $sock $eventtypes(definition_state) -1 "" "" 0 + # inform CORE services about emulation servers, hook scripts, canvas info, + # and services + sendEventMessage $sock $eventtypes(configuration_state) -1 "" "" 0 + sendEmulationServerInfo $sock 0 + sendSessionOptions $sock + sendHooks $sock + sendCanvasInfo $sock + sendNodeTypeInfo $sock 0 + # send any custom service info before the node messages + sendNodeCustomServices $sock + + # send Node add messages for all emulation nodes + foreach node $node_list { + set node_id "$eid\_$node" + set type [nodeType $node] + set name [getNodeName $node] + if { $type == "pseudo" } { continue } + + statgraph inc 1 + statline "Creating node $name" + if { [[typemodel $node].layer] == "NETWORK" } { + nodeHighlights .c $node on red + } + # inform the CORE daemon of the node + sendNodeAddMessage $sock $node + pluginCapsInitialize $node "mobmodel" + writeNodeCoords $node [getNodeCoords $node] + } + + # send Link add messages for all network links + for { set pending_links $link_list } { $pending_links != "" } {} { + set link [lindex $pending_links 0] + set i [lsearch -exact $pending_links $link] + set pending_links [lreplace $pending_links $i $i] + statgraph inc 1 + + set lnode1 [lindex [linkPeers $link] 0] + set lnode2 [lindex [linkPeers $link] 1] + if { [nodeType $lnode2] == "router" && \ + [getNodeModel $lnode2] == "remote" } { + continue; # remote routers are ctrl. by GUI; TODO: move to daemon + } + sendLinkMessage $sock $link add + } + + # GUI-specific meta-data send via Configure Messages + if { [llength $annotation_list] > 0 } { + sendMetaData $sock $annotation_list "annotation" + } + sendMetaData $sock $canvas_list "canvas" ;# assume >= 1 canvas + # global GUI options - send as meta-data + set obj "metadata" + set values [getGlobalOptionList] + sendConfReplyMessage $sock -1 $obj "10" "{global_options=$values}" "" + if { [info exists g_comments] && $g_comments != "" } { + sendConfReplyMessage $sock -1 $obj "10" "{comments=$g_comments}" "" + } + + # status bar graph + statgraph off 0 + statline "Network topology instantiated in [expr [clock seconds] - $t_start] seconds ([llength $node_list] nodes and [llength $link_list] links)." + + # TODO: turn on tcpdump if enabled; customPostConfigCommands; + # addons 4 deployCfgHook + + # draw lines between wlan nodes + # initialization does not work earlier than this + + foreach node $node_list { + # WLAN handling: draw lines between wireless nodes + if { [nodeType $node] == "wlan" && $execMode == "interactive" } { + wlanRunMobilityScript $node + } + } + + sendTrafficScripts $sock + + # tell the CORE services that we are ready to instantiate + sendEventMessage $sock $eventtypes(instantiation_state) -1 "" "" 0 + + set deployCfgAPI_lock 0 ;# unlock + + statline "Network topology instantiated in [expr [clock seconds] - $t_start] seconds ([llength $node_list] nodes and [llength $link_list] links)." +} + +# +# emulation shutdown procedure when using the CORE API +proc shutdownSession {} { + global link_list node_list eid eventtypes execMode + + set nodecount [getNodeCount] + if { $nodecount == 0 } { + # This allows switching to edit mode without extra API messages, + # such as when file new is selected while running an existing session. + return + } + + # prepare the channel + set plugin [lindex [getEmulPlugin "*"] 0] + set sock [pluginConnect $plugin connect true] + + sendEventMessage $sock $eventtypes(datacollect_state) -1 "" "" 0 + + # shut down all links + foreach link $link_list { + + set lnode2 [lindex [linkPeers $link] 1] + if { [nodeType $lnode2] == "router" && \ + [getNodeModel $lnode2] == "remote" } { + continue; # remote routers are ctrl. by GUI; TODO: move to daemon + } + + sendLinkMessage $sock $link delete false + } + # shut down all nodes + foreach node $node_list { + set type [nodeType $node] + if { [[typemodel $node].layer] == "NETWORK" && $execMode != "batch" } { + nodeHighlights .c $node on red + } + sendNodeDelMessage $sock $node + pluginCapsDeinitialize $node "mobmodel" + deleteNodeCoords $node + } + + sendNodeTypeInfo $sock 1 + sendEmulationServerInfo $sock 1 +} + +# inform the CORE services about the canvas information to support +# conversion between X,Y and lat/long coordinates +proc sendCanvasInfo { sock } { + global curcanvas + + if { ![info exists curcanvas] } { return } ;# batch mode + set obj "location" + + set scale [getCanvasScale $curcanvas] + set refpt [getCanvasRefPoint $curcanvas] + set refx [lindex $refpt 0] + set refy [lindex $refpt 1] + set latitude [lindex $refpt 2] + set longitude [lindex $refpt 3] + set altitude [lindex $refpt 4] + + set types [list 2 2 10 10 10 10] + set values [list $refx $refy $latitude $longitude $altitude $scale] + + sendConfReplyMessage $sock -1 $obj $types $values "" +} + +# inform the CORE services about the default services for a node type, which +# are used when node-specific services have not been configured for a node +proc sendNodeTypeInfo { sock reset } { + global node_list + + set obj "services" + + if { $reset == 1} { + sendConfRequestMessage $sock -1 "all" 0x3 -1 "" + return + } + # build a list of node types in use + set typesinuse "" + foreach node $node_list { + set type [nodeType $node] + if { $type != "router" && $type != "OVS" } { continue } + set model [getNodeModel $node] + if { [lsearch $typesinuse $model] < 0 } { lappend typesinuse $model } + } + + foreach type $typesinuse { + # build a list of type + enabled services, all strings + set values [getNodeTypeServices $type] + set values [linsert $values 0 $type] + set types [string repeat "10 " [llength $values]] + sendConfReplyMessage $sock -1 $obj $types $values "" + # send any custom profiles for a node type; node type passed in opaque + set machine_type [getNodeTypeMachineType $type] + set values [getNodeTypeProfile $type] + if { $values != "" } { + set types [string repeat "10 " [llength $values]] + sendConfReplyMessage $sock -1 $machine_type $types $values \ + "$machine_type:$type" + } + } + +} + +# inform the CORE services about any services that have been customized for +# a particular node +proc sendNodeCustomServices { sock } { + global node_list + foreach node $node_list { + set cfgs [getCustomConfig $node] + set cfgfiles "" + foreach cfg $cfgs { + set ids [split [getConfig $cfg "custom-config-id"] :] + if { [lindex $ids 0] != "service" } { continue } + if { [llength $ids] == 3 } { + # customized service config file -- build a list + lappend cfgfiles $cfg + continue + } + set s [lindex $ids 1] + set values [getConfig $cfg "config"] + set t [string repeat "10 " [llength $values]] + sendConfReplyMessage $sock $node services $t $values "service:$s" + } + # send customized service config files after the service info + foreach cfg $cfgfiles { + set idstr [getConfig $cfg "custom-config-id"] + set ids [split $idstr :] + if { [lindex $ids 0] != "service" } { continue } + set s [lindex $ids 1] + set filename [lindex $ids 2] + set data [join [getConfig $cfg "config"] "\n"] + sendFileMessage $sock $node "service:$s" $filename "" $data \ + [string length $data] + } + } +} + +# publish hooks to the CORE services +proc sendHooks { sock } { + global g_hook_scripts + if { ![info exists g_hook_scripts] } { return } + foreach hook $g_hook_scripts { + set name [lindex $hook 0] + set state [lindex $hook 1] + set data [lindex $hook 2] + # TODO: modify sendFileMessage to make node number optional + sendFileMessage $sock n0 "hook:$state" $name "" $data \ + [string length $data] + } +} + +# inform the CORE services about the emulation servers that will be used +proc sendEmulationServerInfo { sock reset } { + global exec_servers + set node -1 ;# not used + set obj "broker" + + set servernames [getAssignedRemoteServers] + if { $servernames == "" } { return } ;# not using emulation servers + + if { $reset == 1} { + sendConfRequestMessage $sock $node $obj 0x3 -1 "" + return + } + + set servers "" + foreach servername $servernames { + set host [lindex $exec_servers($servername) 0] + set port [lindex $exec_servers($servername) 1] + lappend servers "$servername:$host:$port" + } + + set serversstring [join $servers ,] + + set types [list 10] + set values [list $serversstring] + + sendConfReplyMessage $sock $node $obj $types $values "" +} + +# returns the length of node_list minus any pseudo-nodes (inter-canvas nodes) +proc getNodeCount {} { + global node_list + set nodecount 0 + foreach node $node_list { + if { [nodeType $node] != "pseudo" } { incr nodecount } + } + return $nodecount +} + +# send basic properties of a session +proc sendSessionProperties { sock } { + global currentFile CORE_DATA_DIR CORE_USER + set sessionname [file tail $currentFile] + set nodecount [getNodeCount] + if { $sessionname == "" } { set sessionname "untitled" } + set tf "/tmp/thumb.jpg" + if { ![writeCanvasThumbnail .c $tf] } { + set src "$CORE_DATA_DIR/icons/normal/thumb-unknown.gif" + set tf "/tmp/thumb.gif" + if [catch { file copy $src $tf } e] { + puts -nonewline "warning: failed to copy $src to $tf\n($e)" + set tf "" + } + } + set user $CORE_USER + sendSessionMessage $sock 0 0 $sessionname $currentFile $nodecount $tf $user +} + +# send session options from global array in Config Message +proc sendSessionOptions { sock } { + if { $sock == -1 } { + set sock [lindex [getEmulPlugin "*"] 2] + } + set values [getSessionOptionsList] + set types [string repeat "10 " [llength $values]] + sendConfReplyMessage $sock -1 "session" $types $values "" +} + +# send annotations as key=value metadata in Config Message +proc sendAnnotations { sock } { + global annotation_list + + if { $sock == -1 } { + set sock [lindex [getEmulPlugin "*"] 2] + } + set values "" + foreach a $annotation_list { + global $a + set val [set $a] + lappend values "annotation $a=$val" + } + set types [string repeat "10 " [llength $values]] + sendConfReplyMessage $sock -1 "metadata" $types $values "" +} + +# send items as key=value metadata in Config Message +proc sendMetaData { sock items itemtype } { + + if { $sock == -1 } { + set sock [lindex [getEmulPlugin "*"] 2] + } + set values "" + foreach i $items { + global $i + set val [set $i] + lappend values "$itemtype $i=$val" + } + set types [string repeat "10 " [llength $values]] + sendConfReplyMessage $sock -1 "metadata" $types $values "" +} + +# send an Event message for the definition state (this clears any existing +# state), then send all node and link definitions to the CORE services +proc sendNodeLinkDefinitions { sock } { + global node_list link_list annotation_list canvas_list eventtypes + global g_comments + #sendEventMessage $sock $eventtypes(definition_state) -1 "" "" 0 + foreach node $node_list { + sendNodeAddMessage $sock $node + pluginCapsInitialize $node "mobmodel" + } + foreach link $link_list { sendLinkMessage $sock $link add } + # GUI-specific meta-data send via Configure Messages + sendMetaData $sock $annotation_list "annotation" + sendMetaData $sock $canvas_list "canvas" + set obj "metadata" + set values [getGlobalOptionList] + sendConfReplyMessage $sock -1 $obj "10" "{global_options=$values}" "" + if { [info exists g_comments] && $g_comments != "" } { + sendConfReplyMessage $sock -1 $obj "10" "{comments=$g_comments}" "" + } +} + +proc getNodeTypeAPI { node } { + set type [nodeType $node] + if { $type == "router" } { + set model [getNodeModel $node] + set type [getNodeTypeMachineType $model] + } + switch -exact -- "$type" { + router { return 0x0 } + netns { return 0x0 } + jail { return 0x0 } + OVS { return 0x0 } + physical { return 0x1 } + tbd { return 0x3 } + lanswitch { return 0x4 } + hub { return 0x5 } + wlan { return 0x6 } + rj45 { return 0x7 } + tunnel { return 0x8 } + ktunnel { return 0x9 } + emane { return 0xA } + default { return 0x0 } + } +} + +# send an Execute message +proc sendExecMessage { channel node cmd exec_num flags } { + global showAPI g_api_exec_num + set prmsg $showAPI + + set node_num [string range $node 1 end] + set cmd_len [string length $cmd] + if { $cmd_len > 255 } { puts "sendExecMessage error: cmd too long!"; return} + set cmd_pad_len [pad_32bit $cmd_len] + set cmd_pad [binary format x$cmd_pad_len] + + if { $exec_num == 0 } { + incr g_api_exec_num + set exec_num $g_api_exec_num + } + + # node num + exec num + command string + set len [expr {8 + 8 + 2 + $cmd_len + $cmd_pad_len}] + + if { $prmsg == 1 } {puts ">EXEC(flags=$flags,$node,n=$exec_num,cmd='$cmd')" } + + set msg [binary format ccSc2sIc2sIcc \ + 3 $flags $len \ + {1 4} 0 $node_num \ + {2 4} 0 $exec_num \ + 4 $cmd_len \ + ] + puts -nonewline $channel $msg$cmd$cmd_pad + flushChannel channel "Error sending file message" +} + +# if source file (sf) is specified, then send a message that the file source +# file should be copied to the given file name (f); otherwise, include the file +# data in this message +proc sendFileMessage { channel node type f sf data data_len } { + global showAPI + set prmsg $showAPI + + set node_num [string range $node 1 end] + + set f_len [string length $f] + set f_pad_len [pad_32bit $f_len] + set f_pad [binary format x$f_pad_len] + set type_len [string length $type] + set type_pad_len [pad_32bit $type_len] + set type_pad [binary format x$type_pad_len] + if { $sf != "" } { + set sf_len [string length $sf] + set sf_pad_len [pad_32bit $sf_len] + set sf_pad [binary format x$sf_pad_len] + set data_len 0 + set data_pad_len 0 + } else { + set sf_len 0 + set sf_pad_len 0 + set data_pad_len [pad_32bit $data_len] + set data_pad [binary format x$data_pad_len] + } + # TODO: gzip compression w/tlv type 0x11 + + # node number TLV + file name TLV + ( file src name / data TLV) + set len [expr {8 + 2 + 2 + $f_len + $f_pad_len + $sf_len + $sf_pad_len \ + + $data_len + $data_pad_len}] + # 16-bit data length + if { $data_len > 255 } { + incr len 2 + if { $data_len > 65536 } { + puts -nonewline "*** error: File Message data length too large " + puts "($data_len > 65536)" + return + } + } + if { $type_len > 0 } { incr len [expr {2 + $type_len + $type_pad_len}] } + set flags 1; # add flag + + if { $prmsg == 1 } { + puts -nonewline ">FILE(flags=$flags,$node,f=$f," + if { $type != "" } { puts -nonewline "type=$type," } + if { $sf != "" } { puts "src=$sf)" + } else { puts "data=($data_len))" } + } + + set msg [binary format ccSc2sIcc \ + 6 $flags $len \ + {1 4} 0 $node_num \ + 2 $f_len \ + ] + set msg2 "" + if { $type_len > 0 } { + set msg2 [binary format cc 0x5 $type_len] + set msg2 $msg2$type$type_pad + } + if { $sf != "" } { ;# source file name TLV + set msg3 [binary format cc 0x6 $sf_len] + puts -nonewline $channel $msg$f$f_pad$msg2$msg3$sf$sf_pad + } else { ;# file data TLV + if { $data_len > 255 } { + set msg3 [binary format ccS 0x10 0 $data_len] + } else { + set msg3 [binary format cc 0x10 $data_len] + } + puts -nonewline $channel $msg$f$f_pad$msg2$msg3$data$data_pad + } + flushChannel channel "Error sending file message" +} + +# Session Message +proc sendSessionMessage { channel flags num name sfile nodecount tf user } { + global showAPI + set prmsg $showAPI + + if { $channel == -1 } { + set pname [lindex [getEmulPlugin "*"] 0] + set channel [pluginConnect $pname connect true] + if { $channel == -1 } { return } + } + + set num_len [string length $num] + set num_pad_len [pad_32bit $num_len] + set len [expr {2 + $num_len + $num_pad_len}] + if { $num_len <= 0 } { + puts "error: sendSessionMessage requires at least one session number" + return + } + set name_len [string length $name] + set name_pad_len [pad_32bit $name_len] + if { $name_len > 0 } { incr len [expr { 2 + $name_len + $name_pad_len }] } + set sfile_len [string length $sfile] + set sfile_pad_len [pad_32bit $sfile_len] + if { $sfile_len > 0 } { + incr len [expr { 2 + $sfile_len + $sfile_pad_len }] + } + set nc_len [string length $nodecount] + set nc_pad_len [pad_32bit $nc_len] + if { $nc_len > 0 } { incr len [expr { 2 + $nc_len + $nc_pad_len }] } + set tf_len [string length $tf] + set tf_pad_len [pad_32bit $tf_len] + if { $tf_len > 0 } { incr len [expr { 2 + $tf_len + $tf_pad_len }] } + set user_len [string length $user] + set user_pad_len [pad_32bit $user_len] + if { $user_len > 0 } { incr len [expr { 2 + $user_len + $user_pad_len }] } + + if { $prmsg == 1 } { + puts -nonewline ">SESSION(flags=$flags" } + set msgh [binary format ccS 0x09 $flags $len ] ;# message header + + if { $prmsg == 1 } { puts -nonewline ",sids=$num" } + set num_hdr [binary format cc 0x01 $num_len] + set num_pad [binary format x$num_pad_len ] + set msg1 "$num_hdr$num$num_pad" + + set msg2 "" + if { $name_len > 0 } { + if { $prmsg == 1 } { puts -nonewline ",name=$name" } + # TODO: name_len > 255 + set name_hdr [binary format cc 0x02 $name_len] + set name_pad [binary format x$name_pad_len] + set msg2 "$name_hdr$name$name_pad" + } + set msg3 "" + if { $sfile_len > 0 } { + if { $prmsg == 1 } { puts -nonewline ",file=$sfile" } + # TODO: sfile_len > 255 + set sfile_hdr [binary format cc 0x03 $sfile_len] + set sfile_pad [binary format x$sfile_pad_len] + set msg3 "$sfile_hdr$sfile$sfile_pad" + } + set msg4 "" + if { $nc_len > 0 } { + if { $prmsg == 1 } { puts -nonewline ",nc=$nodecount" } + set nc_hdr [binary format cc 0x04 $nc_len] + set nc_pad [binary format x$nc_pad_len] + set msg4 "$nc_hdr$nodecount$nc_pad" + } + set msg5 "" + if { $tf_len > 0 } { + if { $prmsg == 1 } { puts -nonewline ",thumb=$tf" } + set tf_hdr [binary format cc 0x06 $tf_len] + set tf_pad [binary format x$tf_pad_len] + set msg5 "$tf_hdr$tf$tf_pad" + } + set msg6 "" + if { $user_len > 0 } { + if { $prmsg == 1 } { puts -nonewline ",user=$user" } + set user_hdr [binary format cc 0x07 $user_len] + set user_pad [binary format x$user_pad_len] + set msg6 "$user_hdr$user$user_pad" + } + + if { $prmsg == 1 } { puts ")" } + puts -nonewline $channel $msgh$msg1$msg2$msg3$msg4$msg5$msg6 + flushChannel channel "Error sending Session num=$num" +} + +# return a new execution number and record it in the execution request list +# for the given callback (e.g. widget) type +proc newExecCallbackRequest { type } { + global g_api_exec_num g_execRequests + incr g_api_exec_num + set exec_num $g_api_exec_num + lappend g_execRequests($type) $exec_num + return $exec_num +} + +# ask daemon to load or save an XML file based on the current session +proc xmlFileLoadSave { cmd name } { + global oper_mode eventtypes + + set plugin [lindex [getEmulPlugin "*"] 0] + set sock [pluginConnect $plugin connect true] + if { $sock == -1 || $sock == "" } { return } + + # inform daemon about nodes and links when saving in edit mode + if { $cmd == "save" && $oper_mode != "exec" } { + sendSessionProperties $sock + # this tells the CORE services that we are starting to send + # configuration data + # clear any existing config + sendEventMessage $sock $eventtypes(definition_state) -1 "" "" 0 + sendEventMessage $sock $eventtypes(configuration_state) -1 "" "" 0 + sendEmulationServerInfo $sock 0 + sendSessionOptions $sock + sendHooks $sock + sendCanvasInfo $sock + sendNodeTypeInfo $sock 0 + # send any custom service info before the node messages + sendNodeCustomServices $sock + sendNodeLinkDefinitions $sock + } elseif { $cmd == "open" } { + # reset config objects + sendNodeTypeInfo $sock 1 + } + sendEventMessage $sock $eventtypes(file_$cmd) -1 $name "" 0 +} + +############################################################################ +# +# Helper functions below here +# + +# helper function to get interface number from name +proc ifcNameToNum { ifc } { + # eth0, eth1, etc. + if {[string range $ifc 0 2] == "eth"} { + set ifnum [string range $ifc 3 end] + # l0, l1, etc. + } else { + set ifnum [string range $ifc 1 end] + } + if { $ifnum == "" } { + return -1 + } + if {![string is integer $ifnum]} { + return -1 + } + return $ifnum +} + +# +# parse the type and length from a TLV header +proc parseTLVHeader { data current_ref } { + global showAPI + set prmsg $showAPI + upvar $current_ref current + + if { [binary scan $data @${current}cc type length] != 2 } { + if { $prmsg == 1 } { puts "TLV header error" } + return "" + } + set length [expr {$length & 0xFF}]; # convert signed to unsigned + if { $length == 0 } { + if { $type == 0 } { + # prevent endless looping + if { $prmsg == 1 } { puts -nonewline "(extra padding)" } + return "" + } else { + # support for length > 255 + incr current 2 + if { [binary scan $data @${current}S length] != 1 } { + puts "error reading TLV length (type=$type)" + return "" + } + set length [expr {$length & 0xFFFF}] + if { $length == 0 } { + # zero-length string, not length > 255 + incr current -2 + } + } + } + incr current 2 + return [list $type $length] +} + +# return the binary string, and length by reference +proc buildStringTLV { type data len_ref } { + upvar $len_ref len + set data_len [string length $data] + if { $data_len > 65536 } { + puts "warning: buildStringTLV data truncated" + set data_len 65536 + set data [string range 0 65535] + } + set data_pad_len [pad_32bit $data_len] + set data_pad [binary format x$data_pad_len] + + if { $data_len == 0 } { + set len 0 + return "" + } + + if { $data_len > 255 } { + set hdr [binary format ccS $type 0 $data_len] + set hdr_len 4 + } else { + set hdr [binary format cc $type $data_len] + set hdr_len 2 + } + + set len [expr {$hdr_len + $data_len + $data_pad_len}] + + return $hdr$data$data_pad +} + +# calculate padding to 32-bit word boundary +# 32-bit and 64-bit values are pre-padded, strings and 128-bit values are +# post-padded to word boundary, depending on type +proc pad_32bit { len } { + # total length = 2 + len + pad + if { $len < 256 } { + set hdrsiz 2 + } else { + set hdrsiz 4 + } + # calculate padding to fill 32-bit boundary + return [expr { -($hdrsiz + $len) % 4 }] +} + +proc macToString { mac_num } { + set mac_bytes "" + # convert 64-bit integer into 12-digit hex string + set mac_num 0x[format "%.12lx" $mac_num] + while { $mac_num > 0 } { + # append 8-bit hex number to list + set uchar [format "%02x" [expr $mac_num & 0xFF]] + lappend mac_bytes $uchar + # shift off 8-bits + set mac_num [expr $mac_num >> 8] + } + + # make sure we have six hex digits + set num_zeroes [expr 6 - [llength $mac_bytes]] + while { $num_zeroes > 0 } { + lappend mac_bytes 00 + incr num_zeroes -1 + } + + # this is lreverse in tcl8.5 and later + set r {} + set i [llength $mac_bytes] + while { $i > 0 } { lappend r [lindex $mac_bytes [incr i -1]] } + + return [join $r :] +} + +proc hexdump { data } { + # read data as hex + binary scan $data H* hex + # split into pairs of hex digits + regsub -all -- {..} $hex {& } hex + return $hex +} diff --git a/gui/canvas.tcl b/gui/canvas.tcl new file mode 100644 index 00000000..11c8217b --- /dev/null +++ b/gui/canvas.tcl @@ -0,0 +1,406 @@ +# +# Copyright 2005-2008 University of Zagreb, Croatia. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# + +#****h* imunes/canvas.tcl +# NAME +# canvas.tcl -- file used for manipultaion with canvases in IMUNES +# FUNCTION +# This module is used to define all the actions used for configuring +# canvases in IMUNES. On each canvas a part of the simulation is presented +# If there is no additional canvas defined, simulation is presented on the +# defalut canvas. +# +#**** + +#****f* canvas.tcl/removeCanvas +# NAME +# removeCanvas -- remove canvas +# SYNOPSIS +# removeCanvas $canvas_id +# FUNCTION +# Removes the canvas from simulation. This function does not change the +# configuration of the nodes, i.e. nodes attached to the removed canvas +# remain attached to the same non existing canvas. +# INPUTS +# * canvas_id -- canvas id +#**** + +proc removeCanvas { canvas } { + global canvas_list $canvas + + set i [lsearch $canvas_list $canvas] + set canvas_list [lreplace $canvas_list $i $i] + set $canvas {} +} + +#****f* canvas.tcl/newCanvas +# NAME +# newCanvas -- craete new canvas +# SYNOPSIS +# set canvas_id [newCanvas $canvas_name] +# FUNCTION +# Creates new canvas. Returns the canvas_id of the new canvas. +# If the canvas_name parameter is empty, the name of the new canvas +# is set to CanvasN, where N represents the canvas_id of the new canvas. +# INPUTS +# * canvas_name -- canvas name +# RESULT +# * canvas_id -- canvas id +#**** + +proc newCanvas { name } { + global canvas_list + + set canvas [newObjectId canvas] + global $canvas + lappend canvas_list $canvas + set $canvas {} + if { $name != "" } { + setCanvasName $canvas $name + } else { + setCanvasName $canvas Canvas[string range $canvas 1 end] + } + + return $canvas +} + + +proc setCanvasSize { canvas x y } { + global $canvas + + set i [lsearch [set $canvas] "size *"] + if { $i >= 0 } { + set $canvas [lreplace [set $canvas] $i $i "size {$x $y}"] + } else { + set $canvas [linsert [set $canvas] 1 "size {$x $y}"] + } +} + +proc getCanvasSize { canvas } { + global $canvas g_prefs + + set entry [lrange [lsearch -inline [set $canvas] "size *"] 1 end] + set size [string trim $entry \{\}] + if { $size == "" } { + return "$g_prefs(gui_canvas_x) $g_prefs(gui_canvas_y)" + } else { + return $size + } +} + +#****f* canvas.tcl/getCanvasName +# NAME +# getCanvasName -- get canvas name +# SYNOPSIS +# set canvas_name [getCanvasName $canvas_id] +# FUNCTION +# Returns the name of the canvas. +# INPUTS +# * canvas_id -- canvas id +# RESULT +# * canvas_name -- canvas name +#**** + +proc getCanvasName { canvas } { + global $canvas + + set entry [lrange [lsearch -inline [set $canvas] "name *"] 1 end] + return [string trim $entry \{\}] +} + +#****f* canvas.tcl/setCanvasName +# NAME +# setCanvasName -- set canvas name +# SYNOPSIS +# setCanvasName $canvas_id $canvas_name +# FUNCTION +# Sets the name of the canvas. +# INPUTS +# * canvas_id -- canvas id +# * canvas_name -- canvas name +#**** + +proc setCanvasName { canvas name } { + global $canvas + + set i [lsearch [set $canvas] "name *"] + if { $i >= 0 } { + set $canvas [lreplace [set $canvas] $i $i "name {$name}"] + } else { + set $canvas [linsert [set $canvas] 1 "name {$name}"] + } +} + +# Boeing: canvas wallpaper support +proc getCanvasWallpaper { canvas } { + global $canvas + + set entry [lrange [lsearch -inline [set $canvas] "wallpaper *"] 1 end] + set entry2 [lrange [lsearch -inline \ + [set $canvas] "wallpaper-style *"] 1 end] + return [list [string trim $entry \{\}] [string trim $entry2 \{\}]] +} + +proc setCanvasWallpaper { canvas file style} { + global $canvas + + set i [lsearch [set $canvas] "wallpaper *"] + if { $i >= 0 } { + set $canvas [lreplace [set $canvas] $i $i "wallpaper {$file}"] + } else { + set $canvas [linsert [set $canvas] 1 "wallpaper {$file}"] + } + + set i [lsearch [set $canvas] "wallpaper-style *"] + if { $i >= 0 } { + set $canvas [lreplace [set $canvas] $i $i "wallpaper-style {$style}"] + } else { + set $canvas [linsert [set $canvas] 1 "wallpaper-style {$style}"] + } +} + +# Boeing: manage canvases +proc manageCanvasPopup { x y } { + global curcanvas CORE_DATA_DIR + + set w .entry1 + catch {destroy $w} + toplevel $w -takefocus 1 + + if { $x == 0 && $y == 0 } { + set screen [wm maxsize .] + set x [expr {[lindex $screen 0] / 4}] + set y [expr {[lindex $screen 1] / 4}] + } else { + set x [expr {$x + 10}] + set y [expr {$y - 250}] + } + wm geometry $w +$x+$y + wm title $w "Manage Canvases" + wm iconname $w "Manage Canvases" + + + ttk::frame $w.name + ttk::label $w.name.lab -text "Canvas name:" + ttk::entry $w.name.ent + $w.name.ent insert 0 [getCanvasName $curcanvas] + pack $w.name.lab $w.name.ent -side left -fill x + pack $w.name -side top -padx 4 -pady 4 + + global canvas_list + ttk::frame $w.canv + listbox $w.canv.cl -bg white -yscrollcommand "$w.canv.scroll set" + ttk::scrollbar $w.canv.scroll -orient vertical -command "$w.canv.cl yview" + foreach canvas $canvas_list { + $w.canv.cl insert end [getCanvasName $canvas] + if { $canvas == $curcanvas } { + set curindex [expr {[$w.canv.cl size] - 1}] + } + } + pack $w.canv.cl -side left -pady 4 -fill both -expand true + pack $w.canv.scroll -side left -fill y + pack $w.canv -side top -fill both -expand true -padx 4 -pady 4 + $w.canv.cl selection set $curindex + $w.canv.cl see $curindex + bind $w.canv.cl "manageCanvasSwitch $w" + + ttk::frame $w.buttons2 + foreach b {up down} { + set fn "$CORE_DATA_DIR/icons/tiny/arrow.${b}.gif" + set img$b [image create photo -file $fn] + ttk::button $w.buttons2.$b -image [set img${b}] \ + -command "manageCanvasUpDown $w $b" + } + pack $w.buttons2.up $w.buttons2.down -side left -expand 1 + pack $w.buttons2 -side top -fill x -pady 2 + + # hidden list of canvas numbers + ttk::label $w.list -text $canvas_list + + ttk::frame $w.buttons + ttk::button $w.buttons.apply -text "Apply" -command "manageCanvasApply $w" + ttk::button $w.buttons.cancel -text "Cancel" -command "destroy $w" + pack $w.buttons.apply $w.buttons.cancel -side left -expand 1 + pack $w.buttons -side bottom -fill x -pady 2m + + bind $w "destroy $w" + bind $w "manageCanvasApply $w" + +} + +# Boeing: manage canvases helper +# called when a canvas in the list is double-clicked +proc manageCanvasSwitch { w } { + global canvas_list curcanvas + set i [$w.canv.cl curselection] + if {$i == ""} { return} + set i [lindex $i 0] + set item [$w.canv.cl get $i] + + foreach canvas $canvas_list { + if {[getCanvasName $canvas] == $item} { + $w.name.ent delete 0 end + $w.name.ent insert 0 $item + set curcanvas $canvas + switchCanvas none + return + } + } +} + +# manage canvases helper +# handle the move up/down buttons for the canvas selection window +proc manageCanvasUpDown { w dir } { + global canvas_list + # get the currently selected item + set i [$w.canv.cl curselection] + if {$i == ""} { return} + set i [lindex $i 0] + set item [$w.canv.cl get $i] + + if {$dir == "down" } { + set max [expr {[llength $canvas_list] - 1}] + if {$i >= $max } { return } + set newi [expr {$i + 1}] + } else { + if {$i <= 0} { return } + set newi [expr {$i - 1}] + } + + # change the position + $w.canv.cl delete $i + $w.canv.cl insert $newi $item + $w.canv.cl selection set $newi + $w.canv.cl see $newi + + # update hidden list of canvas numbers + set new_canvas_list [$w.list cget -text] + set item [lindex $new_canvas_list $i] + set new_canvas_list [lreplace $new_canvas_list $i $i] + set new_canvas_list [linsert $new_canvas_list $newi $item] + $w.list configure -text $new_canvas_list +} + +# manage canvases helper +# called when apply button is pressed - changes the order of the canvases +proc manageCanvasApply { w } { + global canvas_list curcanvas changed + # we calculated this list earlier, making life easier here + set new_canvas_list [$w.list cget -text] + if {$canvas_list != $new_canvas_list} { + set canvas_list $new_canvas_list + } + set newname [$w.name.ent get] + destroy $w + if { $newname != [getCanvasName $curcanvas] } { + set changed 1 + } + setCanvasName $curcanvas $newname + switchCanvas none + updateUndoLog +} + +proc setCanvasScale { canvas scale } { + global $canvas + + set i [lsearch [set $canvas] "scale *"] + if { $i >= 0 } { + set $canvas [lreplace [set $canvas] $i $i "scale $scale"] + } else { + set $canvas [linsert [set $canvas] 1 "scale $scale"] + } +} + +proc getCanvasScale { canvas } { + global $canvas g_prefs + + set entry [lrange [lsearch -inline [set $canvas] "scale *"] 1 end] + set scale [string trim $entry \{\}] + if { $scale == "" } { + if { ![info exists g_prefs(gui_canvas_scale)] } { return 150.0 } + return "$g_prefs(gui_canvas_scale)" + } else { + return $scale + } +} + +proc setCanvasRefPoint { canvas refpt } { + global $canvas + + set i [lsearch [set $canvas] "refpt *"] + if { $i >= 0 } { + set $canvas [lreplace [set $canvas] $i $i "refpt {$refpt}"] + } else { + set $canvas [linsert [set $canvas] 1 "refpt {$refpt}"] + } +} + +proc getCanvasRefPoint { canvas } { + global $canvas g_prefs DEFAULT_REFPT + + set entry [lrange [lsearch -inline [set $canvas] "refpt *"] 1 end] + set altitude [string trim $entry \{\}] + if { $altitude == "" } { + if { ![info exists g_prefs(gui_canvas_refpt)] } { + return $DEFAULT_REFPT + } + return "$g_prefs(gui_canvas_refpt)" + } else { + return $altitude + } +} + +# from http://wiki.tcl.tk/1415 (MAK) +proc canvasSee { hWnd items } { + set box [eval $hWnd bbox $items] + + if {$box == ""} { return } + + if {[string match {} [$hWnd cget -scrollregion]] } { + # People really should set -scrollregion you know... + foreach {x y x1 y1} $box break + + set x [expr round(2.5 * ($x1+$x) / [winfo width $hWnd])] + set y [expr round(2.5 * ($y1+$y) / [winfo height $hWnd])] + + $hWnd xview moveto 0 + $hWnd yview moveto 0 + $hWnd xview scroll $x units + $hWnd yview scroll $y units + } else { + # If -scrollregion is set properly, use this + foreach { x y x1 y1 } $box break + foreach { top btm } [$hWnd yview] break + foreach { left right } [$hWnd xview] break + foreach { p q xmax ymax } [$hWnd cget -scrollregion] break + + set xpos [expr (($x1+$x) / 2.0) / $xmax - ($right-$left) / 2.0] + set ypos [expr (($y1+$y) / 2.0) / $ymax - ($btm-$top) / 2.0] + + $hWnd xview moveto $xpos + $hWnd yview moveto $ypos + } +} diff --git a/gui/cfgparse.tcl b/gui/cfgparse.tcl new file mode 100644 index 00000000..41f25594 --- /dev/null +++ b/gui/cfgparse.tcl @@ -0,0 +1,1147 @@ +# +# Copyright 2005-2008 University of Zagreb, Croatia. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# This work was supported in part by the Croatian Ministry of Science +# and Technology through the research contract #IP-2003-143. +# + +#****h* imunes/cfgparse.tcl +# NAME +# cfgparse.tcl -- file used for parsing the configuration +# FUNCTION +# This module is used for parsing the configuration, i.e. reading the +# configuration from a file or a string and writing the configuration +# to a file or a string. This module also contains a function for returning +# a new ID for nodes, links and canvases. +#**** + +#****f* nodecfg.tcl/dumpputs +# NAME +# dumpputs -- puts a string to a file or a string configuration +# SYNOPSIS +# dumpputs $method $destination $string +# FUNCTION +# Puts a sting to the file or appends the string configuration (used for +# undo functions), the choice depends on the value of method parameter. +# INPUTS +# * method -- method used. Possiable values are file (if saving the string +# to the file) and string (if appending the string configuration) +# * dest -- destination used. File_id for files, and string name for string +# configuration +# * string -- the string that is inserted to a file or appended to the string +# configuartion +#**** + +proc dumpputs {method dest string} { + switch -exact -- $method { + file { + puts $dest $string + } + string { + global $dest + append $dest "$string +" + } + } +} + +#****f* nodecfg.tcl/dumpCfg +# NAME +# dumpCfg -- puts the current configuraton to a file or a string +# SYNOPSIS +# dumpCfg $method $destination +# FUNCTION +# Writes the working (current) configuration to a file or a string. +# INPUTS +# * method -- used method. Possiable values are file (saving current congif +# to the file) and string (saving current config in a string) +# * dest -- destination used. File_id for files, and string name for string +# configurations +#**** + +proc dumpCfg {method dest} { + global node_list plot_list link_list canvas_list annotation_list + + global g_comments + if { [info exists g_comments] && $g_comments != "" } { + dumpputs $method $dest "comments \{" + foreach line [split $g_comments "\n"] { dumpputs $method $dest "$line" } + dumpputs $method $dest "\}" + dumpputs $method $dest "" + } + + foreach node $node_list { + global $node + upvar 0 $node lnode + dumpputs $method $dest "node $node \{" + foreach element $lnode { + if { "[lindex $element 0]" == "network-config" } { + dumpputs $method $dest " network-config \{" + foreach line [lindex $element 1] { + dumpputs $method $dest " $line" + } + dumpputs $method $dest " \}" + } elseif { "[lindex $element 0]" == "custom-config" } { + dumpputs $method $dest " custom-config \{" + foreach line [lindex $element 1] { + if { $line != {} } { + if { [catch {set str [lindex $line 0]} err] } { + puts "error loading config: $err" + puts "problem section: [lindex $element 0]" + puts "problem line: $line" + set str "" + } + if { $str == "config" } { + dumpputs $method $dest " config \{" + foreach element [lindex $line 1] { + dumpputs $method $dest " $element" + } + dumpputs $method $dest " \}" + } else { + dumpputs $method $dest " $line" + } + } + } + dumpputs $method $dest " \}" + } elseif { "[lindex $element 0]" == "ipsec-config" } { + dumpputs $method $dest " ipsec-config \{" + foreach line [lindex $element 1] { + if { $line != {} } { + dumpputs $method $dest " $line" + } + } + dumpputs $method $dest " \}" + } elseif { "[lindex $element 0]" == "custom-pre-config-commands" } { + #Boeing custom pre config commands + dumpputs $method $dest " custom-pre-config-commands \{" + foreach line [lindex $element 1] { + dumpputs $method $dest " $line" + } + dumpputs $method $dest " \}" + } elseif { "[lindex $element 0]" == "custom-post-config-commands" } { + #Boeing custom post config commands + dumpputs $method $dest " custom-post-config-commands \{" + foreach line [lindex $element 1] { + dumpputs $method $dest " $line" + } + dumpputs $method $dest " \}" + } elseif { "[lindex $element 0]" == "ine-config" } { + # Boeing: INE config support + dumpputs $method $dest " ine-config \{" + foreach line [lindex $element 1] { + dumpputs $method $dest " $line" + } + dumpputs $method $dest " \}" + # end Boeing + } else { + dumpputs $method $dest " $element" + } + } + dumpputs $method $dest "\}" + dumpputs $method $dest "" + } + + foreach obj "link annotation canvas plot" { + upvar 0 ${obj}_list obj_list + foreach elem $obj_list { + global $elem + upvar 0 $elem lelem + dumpputs $method $dest "$obj $elem \{" + foreach element $lelem { + dumpputs $method $dest " $element" + } + dumpputs $method $dest "\}" + dumpputs $method $dest "" + } + } + + global g_traffic_flows + if { [info exists g_traffic_flows] && [llength $g_traffic_flows] > 0 } { + dumpputs $method $dest "traffic \{" + foreach flow $g_traffic_flows { + dumpputs $method $dest " $flow" + } + dumpputs $method $dest "\}" + dumpputs $method $dest "" + } + + global g_hook_scripts + if { [info exists g_hook_scripts] && [llength $g_hook_scripts] > 0 } { + foreach hook $g_hook_scripts { + set name [lindex $hook 0] + set state [lindex $hook 1] + set script [lindex $hook 2] + dumpputs $method $dest "hook $state:$name \{" + # remove the final newline here because dumpputs adds a + # newline automatically + if {[string index $script end] == "\n"} { + set script [string replace $script end end] + } + dumpputs $method $dest $script + dumpputs $method $dest "\}" + dumpputs $method $dest "" + } + } + + dumpGlobalOptions $method $dest + + # session options + dumpputs $method $dest "option session \{" + foreach kv [getSessionOptionsList] { dumpputs $method $dest " $kv" } + dumpputs $method $dest "\}" + dumpputs $method $dest "" +} + +proc dumpGlobalOptions { method dest } { + global showIfNames showNodeLabels showLinkLabels + global showIfIPaddrs showIfIPv6addrs + global showBkgImage showGrid showAnnotations + global showAPI + global g_view_locked + global g_traffic_start_opt + global mac_addr_start + + dumpputs $method $dest "option global \{" + if {$showIfNames == 0} { + dumpputs $method $dest " interface_names no" + } else { + dumpputs $method $dest " interface_names yes" } + if {$showIfIPaddrs == 0} { + dumpputs $method $dest " ip_addresses no" + } else { + dumpputs $method $dest " ip_addresses yes" } + if {$showIfIPv6addrs == 0} { + dumpputs $method $dest " ipv6_addresses no" + } else { + dumpputs $method $dest " ipv6_addresses yes" } + if {$showNodeLabels == 0} { + dumpputs $method $dest " node_labels no" + } else { + dumpputs $method $dest " node_labels yes" } + if {$showLinkLabels == 0} { + dumpputs $method $dest " link_labels no" + } else { + dumpputs $method $dest " link_labels yes" } + if {$showAPI == 0} { + dumpputs $method $dest " show_api no" + } else { + dumpputs $method $dest " show_api yes" } + if {$showBkgImage == 0} { + dumpputs $method $dest " background_images no" + } else { + dumpputs $method $dest " background_images yes" } + if {$showAnnotations == 0} { + dumpputs $method $dest " annotations no" + } else { + dumpputs $method $dest " annotations yes" } + if {$showGrid == 0} { + dumpputs $method $dest " grid no" + } else { + dumpputs $method $dest " grid yes" } + if {$g_view_locked == 1} { + dumpputs $method $dest " locked yes" } + if { [info exists g_traffic_start_opt] } { + dumpputs $method $dest " traffic_start $g_traffic_start_opt" + } + if { [info exists mac_addr_start] && $mac_addr_start > 0 } { + dumpputs $method $dest " mac_address_start $mac_addr_start" + } + dumpputs $method $dest "\}" + dumpputs $method $dest "" +} + +# get the global options into a list of key=value pairs +proc getGlobalOptionList {} { + global tmp + set tmp "" + dumpGlobalOptions string tmp ;# put "options global {items}" into tmp + set items [lindex $tmp 2] + return [listToKeyValues $items] +} + +proc setGlobalOption { field value } { + global showIfNames showNodeLabels showLinkLabels + global showIfIPaddrs showIfIPv6addrs + global showBkgImage showGrid showAnnotations + global showAPI + global mac_addr_start + global g_traffic_start_opt + global g_view_locked + + switch -exact -- $field { + interface_names { + if { $value == "no" } { + set showIfNames 0 + } elseif { $value == "yes" } { + set showIfNames 1 + } + } + ip_addresses { + if { $value == "no" } { + set showIfIPaddrs 0 + } elseif { $value == "yes" } { + set showIfIPaddrs 1 + } + } + ipv6_addresses { + if { $value == "no" } { + set showIfIPv6addrs 0 + } elseif { $value == "yes" } { + set showIfIPv6addrs 1 + } + } + node_labels { + if { $value == "no" } { + set showNodeLabels 0 + } elseif { $value == "yes" } { + set showNodeLabels 1 + } + } + link_labels { + if { $value == "no" } { + set showLinkLabels 0 + } elseif { $value == "yes" } { + set showLinkLabels 1 + } + } + show_api { + if { $value == "no" } { + set showAPI 0 + } elseif { $value == "yes" } { + set showAPI 1 + } + } + background_images { + if { $value == "no" } { + set showBkgImage 0 + } elseif { $value == "yes" } { + set showBkgImage 1 + } + } + annotations { + if { $value == "no" } { + set showAnnotations 0 + } elseif { $value == "yes" } { + set showAnnotations 1 + } + } + grid { + if { $value == "no" } { + set showGrid 0 + } elseif { $value == "yes" } { + set showGrid 1 + } + } + locked { + if { $value == "yes" } { + set g_view_locked 1 + } else { + set g_view_locked 0 + } + } + mac_address_start { + set mac_addr_start $value + } + traffic_start { + set g_traffic_start_opt $value + } + } +} + +# reset global vars when opening a new file +proc cleanupGUIState {} { + global node_list link_list plot_list canvas_list annotation_list + global mac_addr_start g_comments + global g_traffic_flows g_traffic_start_opt g_hook_scripts + global g_view_locked + + set node_list {} + set link_list {} + set annotation_list {} + set plot_list {} + set canvas_list {} + set g_traffic_flows "" + set g_traffic_start_opt 0 + set g_hook_scripts "" + set g_comments "" + set g_view_locked 0 + resetSessionOptions +} + +#****f* nodecfg.tcl/loadCfg +# NAME +# loadCfg -- loads the current configuration. +# SYNOPSIS +# loadCfg $cfg +# FUNCTION +# Loads the configuration written in the cfg string to a current +# configuration. +# INPUTS +# * cfg -- string containing the new working configuration. +#**** + +proc loadCfg { cfg } { + global node_list plot_list link_list canvas_list annotation_list + global g_traffic_flows g_traffic_start_opt g_hook_scripts + global g_view_locked + global g_comments + + # maximum coordinates + set maxX 0 + set maxY 0 + set do_upgrade [upgradeOldConfig cfg] + if { $do_upgrade == "no"} { return } + + # Cleanup first + cleanupGUIState + set class "" + set object "" + foreach entry $cfg { + if {"$class" == ""} { + set class $entry + continue + } elseif {"$object" == ""} { + set object $entry + if {"$class" == "node"} { + lappend node_list $object + } elseif {"$class" == "link"} { + lappend link_list $object + } elseif {"$class" == "canvas"} { + lappend canvas_list $object + } elseif {"$class" == "plot"} { + lappend plot_list $object + } elseif {"$class" == "option"} { + # do nothing + } elseif {"$class" == "traffic"} { ;# save traffic flows + set g_traffic_flows [split [string trim $object] "\n"] + set class ""; set object ""; continue + } elseif {"$class" == "script"} { + # global_script (old config) becomes a runtime hook + set name "runtime_hook.sh" + set script [string trim $object] + lappend g_hook_scripts [list $name 4 $script] ;# 4=RUNTIME_STATE + set class ""; set object ""; continue + } elseif {"$class" == "hook"} { + continue + } elseif {"$class" == "comments"} { + set g_comments [string trim $object] + set class ""; set object ""; continue + } elseif {"$class" == "annotation"} { + lappend annotation_list $object + } else { + puts "configuration parsing error: unknown object class $class" + #exit 1 + } + # create an empty global variable named object for most objects + global $object + set $object {} + continue + } else { + set line [concat $entry] + # uses 'key=value' instead of 'key value' + if { $object == "session" } { + # 'key=value', values with space needs quoting 'key={space val}' + setSessionOptions "" [split $line "\n"] + set class "" + set object "" + continue + } + # extracts "field { value }" elements from line + if { [catch { set tmp [llength $line] } e] } { + puts "*** Error with line ('$e'):\n$line" + puts "*** Line will be skipped. This is a Tcl limitation, " + puts "*** consider using XML or fixing with whitespace." + continue + } + while {[llength $line] >= 2} { + set field [lindex $line 0] + if {"$field" == ""} { + set line [lreplace $line 0 0] + continue + } + + # consume first two list elements from line + set value [lindex $line 1] + set line [lreplace $line 0 1] + + if {"$class" == "node"} { + switch -exact -- $field { + type { + lappend $object "type $value" + } + mirror { + lappend $object "mirror $value" + } + model { + lappend $object "model $value" + } + cpu { + lappend $object "cpu {$value}" + } + interface-peer { + lappend $object "interface-peer {$value}" + } + network-config { + set cfg "" + foreach zline [split $value { +}] { + if { [string index "$zline" 0] == " " } { + set zline [string replace "$zline" 0 0] + } + lappend cfg $zline + } + set cfg [lrange $cfg 1 [expr {[llength $cfg] - 2}]] + lappend $object "network-config {$cfg}" + } + custom-enabled { + lappend $object "custom-enabled $value" + } + custom-command { + lappend $object "custom-command {$value}" + } + custom-config { + set cfg "" + set have_config 0 + set ccfg {} + foreach zline [split $value "\n"] { + if { [string index "$zline" 0] == \ + " " } { + # remove leading tab character + set zline [string replace "$zline" 0 0] + } + + # flag for config lines + if { $zline == "config \{" } { + set have_config 1 + # collect custom config lines into list + } elseif { $have_config == 1 } { + lappend ccfg $zline + # add non-config lines + } else { + lappend cfg $zline + } + } + # chop off last brace in config { } block and add it + if { $have_config } { + set ccfg [lrange $ccfg 0 \ + [expr {[llength $ccfg] - 3}]] + lappend cfg [list config $ccfg] + } + #set cfg [lrange $cfg 1 [expr {[llength $cfg] - 2}]] + lappend $object "custom-config {$cfg}" + } + ipsec-enabled { + lappend $object "ipsec-enabled $value" + } + ipsec-config { + set cfg "" + + foreach zline [split $value { +}] { + if { [string index "$zline" 0] == " " } { + set zline [string replace "$zline" 0 0] + } + lappend cfg $zline + } + set cfg [lrange $cfg 1 [expr {[llength $cfg] - 2}]] + lappend $object "ipsec-config {$cfg}" + } + iconcoords { + checkMaxCoords $value maxX maxY + lappend $object "iconcoords {$value}" + } + labelcoords { + checkMaxCoords $value maxX maxY + lappend $object "labelcoords {$value}" + } + canvas { + lappend $object "canvas $value" + } + hidden { + lappend $object "hidden $value" + } + /* { + set comment "$field $value" + foreach c $line { + lappend comment $c + # consume one element from line + set line [lreplace $line 0 0] + if { $c == "*/" } { break } + } + lappend $object "$comment" + } + + custom-pre-config-commands { + # Boeing - custom pre config commands + set cfg "" + foreach zline [split $value { +}] { + if { [string index "$zline" 0] == " " } { + set zline [string replace "$zline" 0 0] + } + lappend cfg $zline + } + set cfg [lrange $cfg 1 [expr [llength $cfg] - 2]] + lappend $object "custom-pre-config-commands {$cfg}" + } + custom-post-config-commands { + # Boeing - custom post config commands + set cfg "" + foreach zline [split $value { +}] { + if { [string index "$zline" 0] == " " } { + set zline [string replace "$zline" 0 0] + } + lappend cfg $zline + } + set cfg [lrange $cfg 1 [expr [llength $cfg] - 2]] + lappend $object "custom-post-config-commands {$cfg}" + } + custom-image { + # Boeing - custom-image + lappend $object "custom-image $value" + } + ine-config { + # Boeing - INE + set cfg "" + foreach zline [split $value { +}] { + if { [string index "$zline" 0] == " " } { + set zline [string replace "$zline" 0 0] + } + lappend cfg $zline + } + set cfg [lrange $cfg 1 [expr [llength $cfg] - 2]] + lappend $object "ine-config {$cfg}" + } + tunnel-peer { + # Boeing - Span tunnels + lappend $object "tunnel-peer {$value}" + } + range { + # Boeing - WLAN range + lappend $object "range $value" + } + bandwidth { + # Boeing - WLAN bandwidth + lappend $object "bandwidth $value" + } + cli-enabled { + puts "Warning: cli-enabled setting is deprecated" + } + delay { + # Boeing - WLAN delay + lappend $object "delay $value" + } + ber { + # Boeing - WLAN BER + lappend $object "ber $value" + } + location { + # Boeing - node location + lappend $object "location $value" + } + os { + # Boeing - node OS + # just ignore it, set at runtime + } + services { + lappend $object "services {$value}" + } + + default { + # Boeing - added warning + puts -nonewline "config file warning: unknown confi" + puts "guration item '$field' ignored for $object" + } + } + } elseif {"$class" == "plot"} { + switch -exact -- $field { + name { + lappend $object "name $value" + } + height { + lappend $object "height $value" + } + width { + lappend $object "width $value" + } + x { + lappend $object "x $value" + } + y { + lappend $object "y $value" + } + color { + lappend $object "color $value" + } + } + } elseif {"$class" == "link"} { + switch -exact -- $field { + nodes { + lappend $object "nodes {$value}" + } + mirror { + lappend $object "mirror $value" + } + bandwidth - + delay - + ber - + duplicate - + jitter { + if { [llength $value] > 1 } { ;# down/up-stream + lappend $object "$field {$value}" + } else { + lappend $object "$field $value" + } + } + color { + lappend $object "color $value" + } + width { + lappend $object "width $value" + } + default { + # this enables opaque data to be stored along with + # each link (any key is stored) + lappend $object "$field $value" + # Boeing - added warning + #puts -nonewline "config file warning: unknown conf" + #puts "iguration item '$field' ignored for $object" + } + } + } elseif {"$class" == "canvas"} { + switch -exact -- $field { + name { + lappend $object "name {$value}" + } + size { + lappend $object "size {$value}" + } + bkgImage { + lappend $object "wallpaper {$value}" + } + wallpaper { + lappend $object "wallpaper {$value}" + } + wallpaper-style { + lappend $object "wallpaper-style {$value}" + } + scale { + lappend $object "scale {$value}" + } + refpt { + lappend $object "refpt {$value}" + } + } + } elseif {"$class" == "option"} { + setGlobalOption $field $value + } elseif {"$class" == "annotation"} { + switch -exact -- $field { + type { + lappend $object "type $value" + } + iconcoords { + lappend $object "iconcoords {$value}" + } + color { + lappend $object "color $value" + } + border { + lappend $object "border $value" + } + label { + lappend $object "label {$value}" + } + labelcolor { + lappend $object "labelcolor $value" + } + size { + lappend $object "size $value" + } + canvas { + lappend $object "canvas $value" + } + font { + lappend $object "font {$value}" + } + fontfamily { + lappend $object "fontfamily {$value}" + } + fontsize { + lappend $object "fontsize {$value}" + } + effects { + lappend $object "effects {$value}" + } + width { + lappend $object "width $value" + } + rad { + lappend $object "rad $value" + } + } ;# end switch + } elseif {"$class" == "hook"} { + set state_name [split $object :] + if { [llength $state_name] != 2 } { + puts "invalid hook in config file" + continue + } + set state [lindex $state_name 0] + set name [lindex $state_name 1] + set lines [split $entry "\n"] + set lines [lreplace $lines 0 0] ;# chop extra newline + set lines [join $lines "\n"] + set hook [list $name $state $lines] + lappend g_hook_scripts $hook + set line "" ;# exit this while loop + } ;#endif class + } + } + set class "" + set object "" + } + + # + # Hack for comaptibility with old format files (no canvases) + # + if { $canvas_list == "" } { + set curcanvas [newCanvas ""] + foreach node $node_list { + setNodeCanvas $node $curcanvas + } + } + + + # auto resize canvas + set curcanvas [lindex $canvas_list 0] + set newX 0 + set newY 0 + if { $maxX > [lindex [getCanvasSize $curcanvas] 0] } { + set newX [expr {$maxX + 50}] + } + if { $maxY > [lindex [getCanvasSize $curcanvas] 1] } { + set newY [expr {$maxY + 50}] + } + if { $newX > 0 || $newY > 0 } { + if { $newX == 0 } { set newX [lindex [getCanvasSize $curcanvas] 0] } + if { $newY == 0 } { set newY [lindex [getCanvasSize $curcanvas] 1] } + setCanvasSize $curcanvas $newX $newY + } + + # extra upgrade steps + if { $do_upgrade == "yes" } { + upgradeNetworkConfigToServices + } + upgradeConfigRemoveNode0 + upgradeConfigServices + upgradeWlanConfigs +} + +#****f* nodecfg.tcl/newObjectId +# NAME +# newObjectId -- new object Id +# SYNOPSIS +# set obj_id [newObjectId $type] +# FUNCTION +# Returns the Id for a new object of the defined type. Supported types +# are node, link and canvas. The Id is in the form $mark$number. $mark is the +# first letter of the given type and $number is the first available number to +# that can be used for id. +# INPUTS +# * type -- the type of the new object. Can be node, link or canvas. +# RESULT +# * obj_id -- object Id in the form $mark$number. $mark is the +# first letter of the given type and $number is the first available number to +# that can be used for id. +#**** + +proc newObjectId { type } { + global node_list link_list annotation_list canvas_list + + set mark [string range [set type] 0 0] + set id 1 ;# start numbering at 1, not 0 + while {[lsearch [set [set type]_list] "$mark$id"] != -1} { + incr id + } + return $mark$id +} + + + +# Boeing: pick a new link id for temporary newlinks +proc newlinkId { } { + global link_list + set id [newObjectId link] + set mark "l" + set id 0 + + # alllinks contains a list of all existing and new links + set alllinks $link_list + foreach newlink [.c find withtag "newlink"] { + set newlinkname [lindex [.c gettags $newlink] 1] + lappend alllinks $newlinkname + } + + while {[lsearch $alllinks "$mark$id"] != -1 } { + incr id + } + return $mark$id +} + +# Boeing: helper fn to determine canvas size during load +proc checkMaxCoords { str maxXp maxYp } { + upvar 1 $maxXp maxX + upvar 1 $maxYp maxY + set x [lindex $str 0] + set y [lindex $str 1] + if { $x > $maxX } { + set maxX $x + } + if { $y > $maxY } { + set maxY $y + } + if { [llength $str] == 4 } { + set x [lindex $str 2] + set y [lindex $str 3] + if { $x > $maxX } { + set maxX $x + } + if { $y > $maxY } { + set maxY $y + } + } +} + +# Boeing: pick a router for OSPF +proc newRouterId { type node } { + set mark [string range [set type] 0 0] + for { set id 0 } { $node != "$mark$id" } { incr id } { + } + return "0.0.0.${id}" +} +# end Boeing + +# Boeing: load servers.conf file into exec_servers array +proc loadServersConf { } { + global CONFDIR exec_servers DEFAULT_API_PORT + set confname "$CONFDIR/servers.conf" + if { [catch { set f [open "$confname" r] } ] } { + puts "Creating a default $confname" + if { [catch { set f [open "$confname" w+] } ] } { + puts "***Warning: could not create a default $confname file." + return + } + puts $f "core1 192.168.0.2 $DEFAULT_API_PORT" + puts $f "core2 192.168.0.3 $DEFAULT_API_PORT" + close $f + if { [catch { set f [open "$confname" r] } ] } { + return + } + } + + array unset exec_servers + + while { [ gets $f line ] >= 0 } { + if { [string range $line 0 0] == "#" } { continue } ;# skip comments + set l [split $line] ;# parse fields separated by whitespace + set name [lindex $l 0] + set ip [lindex $l 1] + set port [lindex $l 2] + set sock -1 + if { $name == "" } { continue } ;# blank name + # load array of servers + array set exec_servers [list $name [list $ip $port $sock]] + } + close $f +} +# end Boeing + +# Boeing: write servers.conf file from exec_servers array +proc writeServersConf { } { + global CONFDIR exec_servers + set confname "$CONFDIR/servers.conf" + if { [catch { set f [open "$confname" w] } ] } { + puts "***Warning: could not write servers file: $confname" + return + } + + set header "# servers.conf: list of CORE emulation servers for running" + set header "$header remotely." + puts $f $header + foreach server [lsort -dictionary [array names exec_servers]] { + set ip [lindex $exec_servers($server) 0] + set port [lindex $exec_servers($server) 1] + puts $f "$server $ip $port" + } + close $f +} +# end Boeing + +# display the preferences dialog +proc popupPrefs {} { + global EDITORS TERMS + + set wi .core_prefs + catch { destroy $wi } + toplevel $wi + + wm transient $wi . + wm resizable $wi 0 0 + wm title $wi "Preferences" + + global g_prefs g_prefs_old + array set g_prefs_old [array get g_prefs] + + # + # Paths + # + labelframe $wi.dirs -borderwidth 4 -text "Paths" -relief raised + frame $wi.dirs.conf + label $wi.dirs.conf.label -text "Default configuration file path:" + entry $wi.dirs.conf.entry -bg white -width 40 \ + -textvariable g_prefs(default_conf_path) + pack $wi.dirs.conf.label $wi.dirs.conf.entry -side left + pack $wi.dirs.conf -side top -anchor w -padx 4 -pady 4 + + frame $wi.dirs.mru + label $wi.dirs.mru.label -text "Number of recent files to remember:" + entry $wi.dirs.mru.num -bg white -width 3 \ + -textvariable g_prefs(num_recent) + button $wi.dirs.mru.clear -text "Clear recent files" \ + -command "addFileToMrulist \"\"" + pack $wi.dirs.mru.label $wi.dirs.mru.num $wi.dirs.mru.clear -side left + pack $wi.dirs.mru -side top -anchor w -padx 4 -pady 4 + + pack $wi.dirs -side top -fill x + + # + # Window + # + labelframe $wi.win -borderwidth 4 -text "GUI Window" -relief raised + frame $wi.win.win + checkbutton $wi.win.win.savepos -text "remember window position" \ + -variable g_prefs(gui_save_pos) + checkbutton $wi.win.win.savesiz -text "remember window size" \ + -variable g_prefs(gui_save_size) + pack $wi.win.win.savepos $wi.win.win.savesiz -side left -anchor w -padx 4 + pack $wi.win.win -side top -anchor w -padx 4 -pady 4 + + frame $wi.win.a + checkbutton $wi.win.a.snaptogrid -text "snap to grid" \ + -variable g_prefs(gui_snap_grid) + checkbutton $wi.win.a.showtooltips -text "show tooltips" \ + -variable g_prefs(gui_show_tooltips) + pack $wi.win.a.snaptogrid $wi.win.a.showtooltips \ + -side left -anchor w -padx 4 + pack $wi.win.a -side top -anchor w -padx 4 -pady 4 + + frame $wi.win.canv + label $wi.win.canv.label -text "Default canvas size:" + entry $wi.win.canv.x -bg white -width 5 -textvariable g_prefs(gui_canvas_x) + entry $wi.win.canv.y -bg white -width 5 -textvariable g_prefs(gui_canvas_y) + label $wi.win.canv.label2 -text "Default # of canvases:" + entry $wi.win.canv.num -bg white -width 5 \ + -textvariable g_prefs(gui_num_canvases) + pack $wi.win.canv.label $wi.win.canv.x $wi.win.canv.y \ + $wi.win.canv.label2 $wi.win.canv.num \ + -side left -anchor w -padx 4 + pack $wi.win.canv -side top -anchor w -padx 4 -pady 4 + pack $wi.win -side top -fill x + + # + # Programs + # + labelframe $wi.pr -borderwidth 4 -text "Programs" -relief raised + + frame $wi.pr.editor + label $wi.pr.editor.label -text "Text editor:" + set editors [linsert $EDITORS 0 "EDITOR"] + ttk::combobox $wi.pr.editor.combo -width 10 -exportselection 0 \ + -values $editors -textvariable g_prefs(gui_text_editor) + label $wi.pr.editor.label2 -text "Terminal program:" + set terms [linsert $TERMS 0 "TERM"] + ttk::combobox $wi.pr.editor.combo2 -width 20 -exportselection 0 \ + -values $terms -textvariable g_prefs(gui_term_prog) + pack $wi.pr.editor.label $wi.pr.editor.combo -padx 4 -pady 4 -side left + pack $wi.pr.editor.label2 $wi.pr.editor.combo2 -padx 4 -pady 4 -side left + pack $wi.pr.editor -side top -anchor w -padx 4 -pady 4 + + frame $wi.pr.3d + label $wi.pr.3d.label -text "3D GUI command:" + entry $wi.pr.3d.entry -bg white -width 40 -textvariable g_prefs(gui_3d_path) + pack $wi.pr.3d.label $wi.pr.3d.entry -side left -padx 4 -pady 4 + pack $wi.pr.3d -side top -anchor w -padx 4 -pady 4 + + pack $wi.pr -side top -fill x + + # + # Buttons at the bottom + # + frame $wi.bot -borderwidth 0 + button $wi.bot.apply -text "Save" -command "savePrefsFile; destroy $wi" + button $wi.bot.defaults -text "Load defaults" -command initDefaultPrefs + button $wi.bot.cancel -text "Cancel" -command { + global g_prefs g_prefs_old + array set g_prefs [array get g_prefs_old] + destroy .core_prefs + } + pack $wi.bot.cancel $wi.bot.defaults $wi.bot.apply -side right + pack $wi.bot -side bottom -fill x + after 100 { + catch { grab .core_prefs } + } +} + +# initialize preferences array with default values +proc initDefaultPrefs {} { + global g_prefs CONFDIR SBINDIR DEFAULT_REFPT tcl_platform + + # variable expansions must be done here + array set g_prefs [list default_conf_path "$CONFDIR/configs"] + array set g_prefs [list gui_canvas_refpt "$DEFAULT_REFPT"] + set shell "bash" + array set g_prefs [list shell $shell] + array set g_prefs [list gui_text_editor [get_text_editor true]] + array set g_prefs [list gui_term_prog [get_term_prog true]] + setDefaultAddrs ipv4 + setDefaultAddrs ipv6 + # preferences will be reordered alphabetically + array set g_prefs { + num_recent 4 + log_path "/tmp/core_logs" + gui_save_pos 0 + gui_save_size 0 + gui_snap_grid 0 + gui_show_tooltips 1 + gui_canvas_x 1000 + gui_canvas_y 750 + gui_canvas_scale 150.0 + gui_num_canvases 1 + gui_3d_path "/usr/local/bin/sdt3d.sh" + } + # add new preferences above; keep this at the end of the file +} + + diff --git a/daemon/core/gui/data/backgrounds/sample1-bg.gif b/gui/configs/sample1-bg.gif similarity index 100% rename from daemon/core/gui/data/backgrounds/sample1-bg.gif rename to gui/configs/sample1-bg.gif diff --git a/gui/configs/sample1.imn b/gui/configs/sample1.imn new file mode 100644 index 00000000..912f1e71 --- /dev/null +++ b/gui/configs/sample1.imn @@ -0,0 +1,510 @@ +node n1 { + type router + model router + network-config { + hostname n1 + ! + interface eth1 + ip address 10.0.5.1/24 + ipv6 address a:5::1/64 + ! + interface eth0 + ip address 10.0.3.2/24 + ipv6 address a:3::2/64 + ! + } + canvas c1 + iconcoords {384.0 456.0} + labelcoords {384.0 484.0} + interface-peer {eth0 n2} + interface-peer {eth1 n15} +} + +node n2 { + type router + model router + network-config { + hostname n2 + ! + interface eth2 + ip address 10.0.4.1/24 + ipv6 address a:4::1/64 + ! + interface eth1 + ip address 10.0.3.1/24 + ipv6 address a:3::1/64 + ! + interface eth0 + ip address 10.0.2.2/24 + ipv6 address a:2::2/64 + ! + } + canvas c1 + iconcoords {264.0 432.0} + labelcoords {264.0 460.0} + interface-peer {eth0 n3} + interface-peer {eth1 n1} + interface-peer {eth2 n15} +} + +node n3 { + type router + model router + network-config { + hostname n3 + ! + interface eth1 + ip address 10.0.2.1/24 + ipv6 address a:2::1/64 + ! + interface eth0 + ip address 10.0.1.1/24 + ipv6 address a:1::1/64 + ! + } + canvas c1 + iconcoords {120.0 360.0} + labelcoords {120.0 388.0} + interface-peer {eth0 n4} + interface-peer {eth1 n2} +} + +node n4 { + type lanswitch + network-config { + hostname n4 + ! + } + canvas c1 + iconcoords {192.0 252.0} + labelcoords {192.0 280.0} + interface-peer {e0 n3} + interface-peer {e1 n11} + interface-peer {e2 n12} + interface-peer {e3 n13} + interface-peer {e4 n14} +} + +node n5 { + type router + model mdr + network-config { + hostname n5 + ! + interface eth0 + ipv6 address a:0::3/128 + ip address 10.0.0.5/32 + ! + interface eth1 + ip address 10.0.6.2/24 + ipv6 address a:6::2/64 + ! + } + canvas c1 + iconcoords {540.0 348.0} + labelcoords {540.0 376.0} + interface-peer {eth0 n10} + interface-peer {eth1 n15} + services {zebra OSPFv2 OSPFv3MDR IPForward} + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + files=('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh', ) + } + } + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth0 + ip address 10.0.0.5/32 + ipv6 address a::3/128 + ipv6 ospf6 instance-id 65 + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 6 + ipv6 ospf6 retransmit-interval 5 + ipv6 ospf6 network manet-designated-router + ipv6 ospf6 diffhellos + ipv6 ospf6 adjacencyconnectivity uniconnected + ipv6 ospf6 lsafullness mincostlsa + ! + interface eth1 + ip address 10.0.6.2/24 + !ip ospf hello-interval 2 + !ip ospf dead-interval 6 + !ip ospf retransmit-interval 5 + !ip ospf network point-to-point + ipv6 address a:6::2/64 + ! + router ospf + router-id 10.0.0.5 + network 10.0.0.5/32 area 0 + network 10.0.6.0/24 area 0 + redistribute connected metric-type 1 + redistribute ospf6 metric-type 1 + ! + router ospf6 + router-id 10.0.0.5 + interface eth0 area 0.0.0.0 + redistribute connected + redistribute ospf + ! + + + } + } +} + +node n6 { + type router + model mdr + network-config { + hostname n6 + ! + interface eth0 + ip address 10.0.0.6/32 + ipv6 address a:0::6/128 + ! + } + canvas c1 + iconcoords {780.0 228.0} + labelcoords {780.0 252.0} + interface-peer {eth0 n10} +} + +node n7 { + type router + model mdr + network-config { + hostname n7 + ! + interface eth0 + ip address 10.0.0.7/32 + ipv6 address a:0::7/128 + ! + } + canvas c1 + iconcoords {816.0 348.0} + labelcoords {816.0 372.0} + interface-peer {eth0 n10} +} + +node n8 { + type router + model mdr + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.0.8/32 + ipv6 address a:0::8/128 + ! + } + canvas c1 + iconcoords {672.0 420.0} + labelcoords {672.0 444.0} + interface-peer {eth0 n10} +} + +node n9 { + type router + model mdr + network-config { + hostname n9 + ! + interface eth0 + ip address 10.0.0.9/32 + ipv6 address a:0::9/128 + ! + } + canvas c1 + iconcoords {672.0 96.0} + labelcoords {672.0 120.0} + interface-peer {eth0 n10} +} + +node n10 { + type wlan + network-config { + hostname wlan10 + ! + interface wireless + ip address 10.0.0.0/32 + ipv6 address a:0::0/128 + ! + mobmodel + coreapi + basic_range + ns2script + ! + } + canvas c1 + iconcoords {852.0 564.0} + labelcoords {852.0 596.0} + interface-peer {e0 n8} + interface-peer {e1 n7} + interface-peer {e2 n5} + interface-peer {e3 n6} + interface-peer {e4 n9} + custom-config { + custom-config-id basic_range + custom-command {3 3 9 9 9} + config { + range=240 + bandwidth=54000000 + jitter=0 + delay=50000 + error=0 + } + } + custom-config { + custom-config-id ns2script + custom-command {10 3 11 10 10} + config { + file=sample1.scen + refresh_ms=50 + loop=1 + autostart=5 + map= + } + } +} + +node n11 { + type router + model PC + network-config { + hostname n11 + ! + interface eth0 + ip address 10.0.1.20/24 + ipv6 address a:1::20/64 + ! + } + canvas c1 + iconcoords {192.0 156.0} + labelcoords {192.0 188.0} + interface-peer {eth0 n4} +} + +node n12 { + type router + model PC + network-config { + hostname n12 + ! + interface eth0 + ip address 10.0.1.21/24 + ipv6 address a:1::21/64 + ! + } + canvas c1 + iconcoords {264.0 156.0} + labelcoords {264.0 188.0} + interface-peer {eth0 n4} +} + +node n13 { + type router + model PC + network-config { + hostname n13 + ! + interface eth0 + ip address 10.0.1.22/24 + ipv6 address a:1::22/64 + ! + } + canvas c1 + iconcoords {336.0 156.0} + labelcoords {336.0 188.0} + interface-peer {eth0 n4} +} + +node n14 { + type router + model host + network-config { + hostname n14 + ! + interface eth0 + ip address 10.0.1.10/24 + ipv6 address a:1::10/64 + ! + } + canvas c1 + iconcoords {348.0 228.0} + labelcoords {348.0 260.0} + interface-peer {eth0 n4} +} + +node n15 { + type router + model router + network-config { + hostname n15 + ! + interface eth2 + ip address 10.0.6.1/24 + ipv6 address a:6::1/64 + ! + interface eth1 + ip address 10.0.5.2/24 + ipv6 address a:5::2/64 + ! + interface eth0 + ip address 10.0.4.2/24 + ipv6 address a:4::2/64 + ! + } + canvas c1 + iconcoords {384.0 312.0} + labelcoords {384.0 340.0} + interface-peer {eth0 n2} + interface-peer {eth1 n1} + interface-peer {eth2 n5} +} + +link l1 { + nodes {n10 n8} + bandwidth 11000000 + delay 25000 +} + +link l0 { + nodes {n10 n7} + bandwidth 11000000 + delay 25000 +} + +link l2 { + nodes {n10 n5} + bandwidth 11000000 + delay 25000 +} + +link l3 { + nodes {n10 n6} + bandwidth 11000000 + delay 25000 +} + +link l4 { + nodes {n10 n9} + bandwidth 11000000 + delay 25000 +} + +link l5 { + nodes {n3 n4} + bandwidth 100000000 +} + +link l6 { + delay 25000 + nodes {n3 n2} + bandwidth 100000000 +} + +link l7 { + nodes {n2 n1} + bandwidth 100000000 +} + +link l8 { + delay 50000 + nodes {n2 n15} + bandwidth 100000000 +} + +link l9 { + nodes {n1 n15} + bandwidth 100000000 +} + +link l10 { + nodes {n15 n5} + bandwidth 100000000 +} + +link l11 { + nodes {n4 n11} + bandwidth 100000000 +} + +link l12 { + nodes {n4 n12} + bandwidth 100000000 +} + +link l13 { + nodes {n4 n13} + bandwidth 100000000 +} + +link l14 { + nodes {n4 n14} + bandwidth 100000000 +} + +annotation a0 { + iconcoords {612.0 492.0} + type text + label {wireless network} + labelcolor black + fontfamily {Arial} + fontsize {12} + effects {bold} + canvas c1 +} + +annotation a1 { + iconcoords {142.0 112.0 393.0 291.0} + type rectangle + label {} + labelcolor black + fontfamily {Arial} + fontsize {12} + color #ebebde + width 1 + border #ffffff + rad 25 + canvas c1 +} + +annotation a2 { + iconcoords {492.0 384.0} + type text + label {gateway} + labelcolor black + fontfamily {Arial} + fontsize {12} + effects {bold} + canvas c1 +} + +canvas c1 { + name {Canvas1} + wallpaper-style {upperleft} + wallpaper {sample1-bg.gif} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses no + node_labels yes + link_labels yes + ipsec_configs yes + exec_errors no + show_api no + background_images no + annotations yes + grid no + traffic_start 0 +} + +option session { +} + diff --git a/daemon/core/gui/data/mobility/sample1.scen b/gui/configs/sample1.scen similarity index 100% rename from daemon/core/gui/data/mobility/sample1.scen rename to gui/configs/sample1.scen diff --git a/gui/configs/sample10-kitchen-sink.imn b/gui/configs/sample10-kitchen-sink.imn new file mode 100644 index 00000000..dacee547 --- /dev/null +++ b/gui/configs/sample10-kitchen-sink.imn @@ -0,0 +1,848 @@ +comments { +Kitchen Sink +============ + +Contains every type of node available in CORE, except for physical (prouter) +machine types, and nodes distributed on other emulation servers. + +To get the RJ45 node to work, a test0 interface should first be created like this: + sudo ip link add name test0 type veth peer name test0.1 + +wlan15 uses the basic range model, while wlan24 uses EMANE 802.11 + +gateway nodes n11 and n20 are customized to redistribute routing between OSPFv2 and +OSPFv3 MDR (the MANET networks) +} + +node n1 { + type router + model router + network-config { + hostname n1 + ! + interface eth2 + ip address 10.0.11.2/24 + ipv6 address 2001:11::2/64 + ! + interface eth1 + ip address 10.0.3.1/24 + ipv6 address 2001:3::1/64 + ! + interface eth0 + ip address 10.0.2.1/24 + ipv6 address 2001:2::1/64 + ! + } + canvas c1 + iconcoords {288.0 264.0} + labelcoords {288.0 292.0} + interface-peer {eth0 n3} + interface-peer {eth1 n2} + interface-peer {eth2 n20} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif +} + +node n2 { + type router + model router + network-config { + hostname n2 + ! + interface eth2 + ip address 10.0.5.2/24 + ipv6 address 2001:5::2/64 + ! + interface eth1 + ip address 10.0.3.2/24 + ipv6 address 2001:3::2/64 + ! + interface eth0 + ip address 10.0.0.1/24 + ipv6 address 2001:0::1/64 + ! + } + canvas c1 + iconcoords {576.0 264.0} + labelcoords {576.0 292.0} + interface-peer {eth0 n5} + interface-peer {eth1 n1} + interface-peer {eth2 n19} +} + +node n3 { + type router + model router + network-config { + hostname n3 + ! + interface eth3 + ip address 10.0.9.1/24 + ipv6 address 2001:9::1/64 + ! + interface eth2 + ip address 10.0.4.1/24 + ipv6 address 2001:4::1/64 + ! + interface eth1 + ip address 10.0.2.2/24 + ipv6 address 2001:2::2/64 + ! + interface eth0 + ip address 10.0.1.1/24 + ipv6 address 2001:1::1/64 + ! + } + canvas c1 + iconcoords {288.0 408.0} + labelcoords {288.0 436.0} + interface-peer {eth0 n4} + interface-peer {eth1 n1} + interface-peer {eth2 n19} + interface-peer {eth3 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif +} + +node n4 { + type hub + network-config { + hostname n4 + ! + } + canvas c1 + iconcoords {216.0 528.0} + labelcoords {216.0 552.0} + interface-peer {e0 n3} + interface-peer {e1 n16} + interface-peer {e2 n17} + interface-peer {e3 n18} +} + +node n5 { + type lanswitch + network-config { + hostname n5 + ! + } + canvas c1 + iconcoords {672.0 264.0} + labelcoords {672.0 288.0} + interface-peer {e0 n2} + interface-peer {e1 n6} + interface-peer {e2 n7} + interface-peer {e3 n8} + interface-peer {e4 n25} +} + +node n6 { + type router + model host + network-config { + hostname n6 + ! + interface eth0 + ip address 10.0.0.10/24 + ipv6 address 2001:0::10/64 + ! + } + canvas c1 + iconcoords {792.0 216.0} + labelcoords {792.0 248.0} + interface-peer {eth0 n5} +} + +node n7 { + type router + model host + network-config { + hostname n7 + ! + interface eth0 + ip address 10.0.0.11/24 + ipv6 address 2001:0::11/64 + ! + } + canvas c1 + iconcoords {792.0 288.0} + labelcoords {792.0 320.0} + interface-peer {eth0 n5} +} + +node n8 { + type router + model host + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.0.12/24 + ipv6 address 2001:0::12/64 + ! + } + canvas c1 + iconcoords {792.0 360.0} + labelcoords {792.0 392.0} + interface-peer {eth0 n5} +} + +node n9 { + type rj45 + network-config { + hostname test0 + ! + } + canvas c1 + iconcoords {576.0 528.0} + labelcoords {576.0 556.0} + interface-peer {0 n19} +} + +node n10 { + type tunnel + network-config { + hostname 10.250.0.91 + ! + interface e0 + ip address 10.250.0.91/24 + ! + tunnel-type + UDP + ! + tunnel-tap + off + ! + tunnel-key + 1 + ! + } + canvas c1 + iconcoords {672.0 504.0} + labelcoords {672.0 536.0} + interface-peer {e0 n19} +} + +node n11 { + type router + model mdr + network-config { + hostname n11 + ! + interface eth1 + ip address 10.0.9.2/24 + ipv6 address 2001:9::2/64 + ! + interface eth0 + ip address 10.0.8.1/32 + ipv6 address 2001:8::1/128 + ! + } + canvas c1 + iconcoords {288.0 624.0} + labelcoords {288.0 656.0} + interface-peer {eth0 n15} + interface-peer {eth1 n3} + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + files=('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh', '/usr/local/etc/quagga/vtysh.conf', ) + } + } + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth0 + ip address 10.0.8.1/32 + ipv6 address 2001:8::1/128 + ipv6 ospf6 instance-id 65 + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 6 + ipv6 ospf6 retransmit-interval 5 + ipv6 ospf6 network manet-designated-router + ipv6 ospf6 diffhellos + ipv6 ospf6 adjacencyconnectivity uniconnected + ipv6 ospf6 lsafullness mincostlsa + ! + interface eth1 + ip address 10.0.9.2/24 + ipv6 address 2001:9::2/64 + ! + router ospf + router-id 10.0.8.1 + network 10.0.8.1/32 area 0 + network 10.0.9.0/24 area 0 + redistribute connected metric-type 1 + redistribute ospf6 metric-type 1 + ! + router ospf6 + router-id 10.0.8.1 + interface eth0 area 0.0.0.0 + redistribute connected + redistribute ospf + ! + + } + } + services {zebra OSPFv2 OSPFv3MDR IPForward} +} + +node n12 { + type router + model mdr + network-config { + hostname n12 + ! + interface eth0 + ip address 10.0.8.2/32 + ipv6 address 2001:8::2/128 + ! + } + canvas c1 + iconcoords {504.0 792.0} + labelcoords {504.0 824.0} + interface-peer {eth0 n15} +} + +node n13 { + type router + model mdr + network-config { + hostname n13 + ! + interface eth0 + ip address 10.0.8.3/32 + ipv6 address 2001:8::3/128 + ! + } + canvas c1 + iconcoords {552.0 672.0} + labelcoords {552.0 704.0} + interface-peer {eth0 n15} +} + +node n14 { + type router + model mdr + network-config { + hostname n14 + ! + interface eth0 + ip address 10.0.8.4/32 + ipv6 address 2001:8::4/128 + ! + } + canvas c1 + iconcoords {720.0 792.0} + labelcoords {720.0 824.0} + interface-peer {eth0 n15} +} + +node n15 { + type wlan + network-config { + hostname wlan15 + ! + interface wireless + ip address 10.0.8.0/32 + ipv6 address 2001:8::0/128 + ! + mobmodel + coreapi + basic_range + ! + } + custom-config { + custom-config-id basic_range + custom-command {3 3 9 9 9} + config { + range=275 + bandwidth=54000000 + jitter=0 + delay=20000 + error=0 + } + } + canvas c1 + iconcoords {120.0 768.0} + labelcoords {120.0 800.0} + interface-peer {e0 n11} + interface-peer {e1 n12} + interface-peer {e2 n13} + interface-peer {e3 n14} +} + +node n16 { + type router + model PC + network-config { + hostname n16 + ! + interface eth0 + ip address 10.0.1.20/24 + ipv6 address 2001:1::20/64 + ! + } + canvas c1 + iconcoords {96.0 456.0} + labelcoords {96.0 488.0} + interface-peer {eth0 n4} +} + +node n17 { + type router + model PC + network-config { + hostname n17 + ! + interface eth0 + ip address 10.0.1.21/24 + ipv6 address 2001:1::21/64 + ! + } + canvas c1 + iconcoords {96.0 600.0} + labelcoords {96.0 632.0} + interface-peer {eth0 n4} +} + +node n18 { + type router + model PC + network-config { + hostname n18 + ! + interface eth0 + ip address 10.0.1.22/24 + ipv6 address 2001:1::22/64 + ! + } + canvas c1 + iconcoords {96.0 528.0} + labelcoords {96.0 560.0} + interface-peer {eth0 n4} +} + +node n19 { + type router + model router + network-config { + hostname n19 + ! + interface eth3 + ip address 10.0.7.1/24 + ipv6 address 2001:7::1/64 + ! + interface eth2 + ip address 10.0.6.1/24 + ipv6 address 2001:6::1/64 + ! + interface eth1 + ip address 10.0.5.1/24 + ipv6 address 2001:5::1/64 + ! + interface eth0 + ip address 10.0.4.2/24 + ipv6 address 2001:4::2/64 + ! + } + canvas c1 + iconcoords {576.0 408.0} + labelcoords {576.0 436.0} + interface-peer {eth0 n3} + interface-peer {eth1 n2} + interface-peer {eth2 n9} + interface-peer {eth3 n10} +} + +node n20 { + type router + model mdr + network-config { + hostname n20 + ! + interface eth1 + ip address 10.0.11.1/24 + ipv6 address 2001:11::1/64 + ! + interface eth0 + ip address 10.0.10.1/32 + ipv6 address 2001:10::1/128 + ! + } + canvas c1 + iconcoords {288.0 168.0} + labelcoords {288.0 200.0} + interface-peer {eth0 n24} + interface-peer {eth1 n1} + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + files=('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh', '/usr/local/etc/quagga/vtysh.conf', ) + } + } + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth0 + ip address 10.0.10.1/32 + ipv6 address 2001:10::1/128 + ipv6 ospf6 instance-id 65 + ipv6 ospf6 hello-interval 2 + ipv6 ospf6 dead-interval 6 + ipv6 ospf6 retransmit-interval 5 + ipv6 ospf6 network manet-designated-router + ipv6 ospf6 diffhellos + ipv6 ospf6 adjacencyconnectivity uniconnected + ipv6 ospf6 lsafullness mincostlsa + ! + interface eth1 + ip address 10.0.11.1/24 + ipv6 address 2001:11::1/64 + ! + router ospf + router-id 10.0.10.1 + network 10.0.10.1/32 area 0 + network 10.0.11.0/24 area 0 + redistribute connected metric-type 1 + redistribute ospf6 metric-type 1 + ! + router ospf6 + router-id 10.0.10.1 + interface eth0 area 0.0.0.0 + redistribute connected + redistribute ospf + ! + + } + } + services {zebra OSPFv2 OSPFv3MDR IPForward} +} + +node n21 { + type router + model mdr + network-config { + hostname n21 + ! + interface eth0 + ip address 10.0.10.2/32 + ipv6 address 2001:10::2/128 + ! + } + canvas c1 + iconcoords {240.0 48.0} + labelcoords {240.0 80.0} + interface-peer {eth0 n24} +} + +node n22 { + type router + model mdr + network-config { + hostname n22 + ! + interface eth0 + ip address 10.0.10.3/32 + ipv6 address 2001:10::3/128 + ! + } + canvas c1 + iconcoords {504.0 48.0} + labelcoords {504.0 80.0} + interface-peer {eth0 n24} +} + +node n23 { + type router + model mdr + network-config { + hostname n23 + ! + interface eth0 + ip address 10.0.10.4/32 + ipv6 address 2001:10::4/128 + ! + } + canvas c1 + iconcoords {144.0 168.0} + labelcoords {144.0 200.0} + interface-peer {eth0 n24} +} + +node n24 { + type wlan + network-config { + hostname wlan24 + ! + interface wireless + ip address 10.0.10.0/32 + ipv6 address 2001:10::0/128 + ! + mobmodel + coreapi + emane_ieee80211abg + ! + } + custom-config { + custom-config-id basic_range + custom-command {3 3 9 9 9} + config { + range=275 + bandwidth=54000000 + jitter=0 + delay=20000 + error=0 + } + } + canvas c1 + iconcoords {48.0 72.0} + labelcoords {48.0 104.0} + interface-peer {e0 n20} + interface-peer {e1 n21} + interface-peer {e2 n22} + interface-peer {e3 n23} +} + +node n25 { + type lanswitch + network-config { + hostname n25 + ! + } + canvas c1 + iconcoords {624.0 192.0} + labelcoords {624.0 216.0} + interface-peer {e0 n5} + interface-peer {e1 n26} +} + +node n26 { + type router + model PC + network-config { + hostname n26 + ! + interface eth0 + ip address 10.0.0.20/24 + ipv6 address 2001:0::20/64 + ! + } + canvas c1 + iconcoords {720.0 144.0} + labelcoords {720.0 176.0} + interface-peer {eth0 n25} +} + +link l1 { + nodes {n2 n5} + bandwidth 0 +} + +link l2 { + delay 8000 + nodes {n3 n4} + bandwidth 1024000 +} + +link l3 { + nodes {n1 n3} + bandwidth 0 +} + +link l4 { + nodes {n1 n2} + bandwidth 0 +} + +link l5 { + nodes {n5 n6} + bandwidth 0 +} + +link l6 { + nodes {n5 n7} + bandwidth 0 +} + +link l7 { + nodes {n5 n8} + bandwidth 0 +} + +link l8 { + nodes {n3 n19} + bandwidth 0 +} + +link l9 { + nodes {n19 n2} + bandwidth 0 +} + +link l10 { + nodes {n4 n16} + bandwidth 0 +} + +link l11 { + nodes {n4 n17} + bandwidth 0 +} + +link l12 { + nodes {n4 n18} + bandwidth 0 +} + +link l13 { + nodes {n19 n9} +} + +link l14 { + nodes {n19 n10} +} + +link l15 { + nodes {n15 n11} +} + +link l16 { + nodes {n15 n12} +} + +link l17 { + nodes {n15 n13} +} + +link l18 { + nodes {n15 n14} +} + +link l19 { + nodes {n3 n11} + bandwidth 0 +} + +link l20 { + nodes {n24 n20} +} + +link l21 { + nodes {n24 n21} +} + +link l22 { + nodes {n24 n22} +} + +link l23 { + nodes {n24 n23} +} + +link l24 { + nodes {n20 n1} + bandwidth 0 +} + +link l25 { + delay 5000 + nodes {n25 n5} + bandwidth 0 +} + +link l26 { + nodes {n25 n26} + bandwidth 0 +} + +annotation a1 { + iconcoords {45.0 431.0 220.0 642.0} + type rectangle + label {} + labelcolor black + fontfamily {Arial} + fontsize {12} + color #e6f4f4 + width 0 + border black + rad 0 + canvas c1 +} + +annotation a2 { + iconcoords {642 189 821 404} + type rectangle + label {} + labelcolor black + fontfamily {Arial} + fontsize {12} + color #e6f4f4 + width 0 + border black + rad 0 + canvas c1 +} + +annotation a3 { + iconcoords {200 218 655 463} + type rectangle + label {} + labelcolor black + fontfamily {Arial} + fontsize {12} + color #f4f1f0 + width 0 + border black + rad 0 + canvas c1 +} + +annotation a4 { + iconcoords {600.0 48.0} + type text + label {Kitchen Sink Scenario} + labelcolor black + fontfamily {FreeSans} + fontsize {16} + effects {bold} + canvas c1 +} + +annotation a5 { + iconcoords {648.0 72.0} + type text + label {see scenario comments} + labelcolor black + fontfamily {FreeSans} + fontsize {12} + canvas c1 +} + +canvas c1 { + name {Canvas1} + refpt {0 0 47.5791667 -122.132322 150} + scale {150.0} + size {1000 1000} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + ipsec_configs yes + exec_errors yes + show_api no + background_images no + annotations yes + grid yes + traffic_start 0 +} + +option session { + enablesdt=1 +} + diff --git a/gui/configs/sample11-sdn.imn b/gui/configs/sample11-sdn.imn new file mode 100644 index 00000000..a41dbc11 --- /dev/null +++ b/gui/configs/sample11-sdn.imn @@ -0,0 +1,291 @@ +node n1 { + type router + model host + network-config { + hostname ryu1 + ! + interface eth1 + ip address 10.0.5.10/24 + ipv6 address 2001:5::10/64 + ! + interface eth0 + ip address 10.0.4.10/24 + ipv6 address 2001:4::10/64 + ! + } + canvas c1 + iconcoords {203.0 65.0} + labelcoords {203.0 97.0} + interface-peer {eth0 n2} + interface-peer {eth1 n3} +} + +node n2 { + type router + model OVS + network-config { + hostname ovs1 + ! + interface eth2 + ip address 10.0.4.1/24 + ipv6 address 2001:4::1/64 + ! + interface eth1 + ip address 10.0.1.1/24 + ipv6 address 2001:1::1/64 + ! + interface eth0 + ip address 10.0.0.1/24 + ipv6 address 2001:0::1/64 + ! + } + canvas c1 + iconcoords {124.0 213.0} + labelcoords {124.0 245.0} + interface-peer {eth0 n6} + interface-peer {eth1 n4} + interface-peer {eth2 n1} +} + +node n3 { + type router + model OVS + network-config { + hostname ovs2 + ! + interface eth2 + ip address 10.0.5.1/24 + ipv6 address 2001:5::1/64 + ! + interface eth1 + ip address 10.0.3.1/24 + ipv6 address 2001:3::1/64 + ! + interface eth0 + ip address 10.0.2.1/24 + ipv6 address 2001:2::1/64 + ! + } + canvas c1 + iconcoords {299.0 220.0} + labelcoords {299.0 252.0} + interface-peer {eth0 n7} + interface-peer {eth1 n5} + interface-peer {eth2 n1} +} + +node n4 { + type router + model host + network-config { + hostname n4 + ! + interface eth0 + ip address 10.0.1.10/24 + ipv6 address 2001:1::10/64 + ! + } + canvas c1 + iconcoords {39.0 313.0} + labelcoords {39.0 345.0} + interface-peer {eth0 n2} +} + +node n5 { + type router + model host + network-config { + hostname n5 + ! + interface eth0 + ip address 10.0.3.10/24 + ipv6 address 2001:3::10/64 + ! + } + canvas c1 + iconcoords {286.0 327.0} + labelcoords {286.0 359.0} + interface-peer {eth0 n3} +} + +node n6 { + type router + model host + network-config { + hostname n6 + ! + interface eth0 + ip address 10.0.0.10/24 + ipv6 address 2001:0::10/64 + ! + } + canvas c1 + iconcoords {131.0 322.0} + labelcoords {131.0 354.0} + interface-peer {eth0 n2} +} + +node n7 { + type router + model host + network-config { + hostname n7 + ! + interface eth0 + ip address 10.0.2.10/24 + ipv6 address 2001:2::10/64 + ! + } + canvas c1 + iconcoords {373.0 328.0} + labelcoords {373.0 360.0} + interface-peer {eth0 n3} +} + +node n8 { + type router + model mdr + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.6.1/32 + ipv6 address 2001:6::1/128 + ! + } + canvas c1 + iconcoords {579.0 102.0} + labelcoords {579.0 134.0} + interface-peer {eth0 n11} +} + +node n9 { + type router + model mdr + network-config { + hostname n9 + ! + interface eth0 + ip address 10.0.6.2/32 + ipv6 address 2001:6::2/128 + ! + } + canvas c1 + iconcoords {493.0 212.0} + labelcoords {493.0 244.0} + interface-peer {eth0 n11} +} + +node n10 { + type router + model mdr + network-config { + hostname n10 + ! + interface eth0 + ip address 10.0.6.3/32 + ipv6 address 2001:6::3/128 + ! + } + canvas c1 + iconcoords {674.0 225.0} + labelcoords {674.0 257.0} + interface-peer {eth0 n11} +} + +node n11 { + type wlan + network-config { + hostname mobile-sdn + ! + interface wireless + ip address 10.0.6.0/32 + ipv6 address 2001:6::0/128 + ! + mobmodel + coreapi + basic_range + ! + } + custom-config { + custom-config-id basic_range + custom-command {3 3 9 9 9} + config { + range=275 + bandwidth=54000000 + jitter=0 + delay=20000 + error=0 + } + } + canvas c1 + iconcoords {683.0 127.0} + labelcoords {683.0 159.0} + interface-peer {e0 n8} + interface-peer {e1 n9} + interface-peer {e2 n10} +} + +link l1 { + nodes {n2 n6} + bandwidth 0 +} + +link l2 { + nodes {n2 n4} + bandwidth 0 +} + +link l3 { + nodes {n3 n7} + bandwidth 0 +} + +link l4 { + nodes {n3 n5} + bandwidth 0 +} + +link l5 { + nodes {n1 n2} + bandwidth 0 +} + +link l6 { + nodes {n1 n3} + bandwidth 0 +} + +link l7 { + nodes {n11 n8} +} + +link l8 { + nodes {n11 n9} +} + +link l9 { + nodes {n11 n10} +} + +canvas c1 { + name {Canvas1} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses no + node_labels yes + link_labels yes + show_api no + background_images no + annotations yes + grid yes + traffic_start 0 + mac_address_start 80 +} + +option session { +} + diff --git a/gui/configs/sample2-ssh.imn b/gui/configs/sample2-ssh.imn new file mode 100644 index 00000000..d79a5f3b --- /dev/null +++ b/gui/configs/sample2-ssh.imn @@ -0,0 +1,248 @@ +node n8 { + type router + model router + network-config { + hostname n8 + ! + interface eth3 + ip address 10.0.6.2/24 + ipv6 address a:6::2/64 + ! + interface eth2 + ip address 10.0.3.1/24 + ipv6 address a:3::1/64 + ! + interface eth1 + ip address 10.0.1.1/24 + ipv6 address a:1::1/64 + ! + interface eth0 + ip address 10.0.0.1/24 + ipv6 address a:0::1/64 + ! + } + canvas c1 + iconcoords {264.0 168.0} + labelcoords {264.0 196.0} + interface-peer {eth0 n1} + interface-peer {eth1 n4} + interface-peer {eth2 n7} + interface-peer {eth3 n6} +} + +node n1 { + type router + model router + network-config { + hostname n1 + ! + interface eth3 + ip address 10.0.5.1/24 + ipv6 address a:5::1/64 + ! + interface eth2 + ip address 10.0.4.2/24 + ipv6 address a:4::2/64 + ! + interface eth1 + ip address 10.0.2.1/24 + ipv6 address a:2::1/64 + ! + interface eth0 + ip address 10.0.0.2/24 + ipv6 address a:0::2/64 + ! + } + canvas c1 + iconcoords {528.0 312.0} + labelcoords {528.0 340.0} + interface-peer {eth0 n8} + interface-peer {eth1 n5} + interface-peer {eth2 n7} + interface-peer {eth3 n6} +} + +node n2 { + type router + model host + cpu {{min 0} {max 100} {weight 1}} + network-config { + hostname sshserver + ! + interface eth0 + ip address 10.0.2.10/24 + ipv6 address a:2::10/64 + ! + } + canvas c1 + iconcoords {732.0 84.0} + labelcoords {671.0 95.0} + interface-peer {eth0 n5} +} + +node n3 { + type router + model PC + cpu {{min 0} {max 100} {weight 1}} + network-config { + hostname sshclient + ! + interface eth0 + ip address 10.0.1.20/24 + ipv6 address a:1::20/64 + ! + } + canvas c1 + iconcoords {72.0 252.0} + labelcoords {86.0 295.0} + interface-peer {eth0 n4} +} + +node n4 { + type lanswitch + network-config { + hostname n4 + ! + } + canvas c1 + iconcoords {120.0 120.0} + labelcoords {120.0 148.0} + interface-peer {e0 n3} + interface-peer {e1 n8} +} + +node n5 { + type lanswitch + network-config { + hostname n5 + ! + } + canvas c1 + iconcoords {708.0 204.0} + labelcoords {708.0 232.0} + interface-peer {e0 n1} + interface-peer {e1 n2} +} + +node n6 { + type router + model router + network-config { + hostname n6 + ! + interface eth1 + ip address 10.0.6.1/24 + ipv6 address a:6::1/64 + ! + interface eth0 + ip address 10.0.5.2/24 + ipv6 address a:5::2/64 + ! + } + canvas c1 + iconcoords {480.0 132.0} + labelcoords {480.0 160.0} + interface-peer {eth0 n1} + interface-peer {eth1 n8} +} + +node n7 { + type router + model router + network-config { + hostname n7 + ! + interface eth1 + ip address 10.0.4.1/24 + ipv6 address a:4::1/64 + ! + interface eth0 + ip address 10.0.3.2/24 + ipv6 address a:3::2/64 + ! + } + canvas c1 + iconcoords {312.0 348.0} + labelcoords {312.0 376.0} + interface-peer {eth0 n8} + interface-peer {eth1 n1} +} + +link l0 { + nodes {n8 n1} + bandwidth 0 +} + +link l1 { + nodes {n4 n3} + bandwidth 0 +} + +link l2 { + nodes {n4 n8} + bandwidth 0 +} + +link l3 { + nodes {n1 n5} + bandwidth 0 +} + +link l4 { + nodes {n5 n2} + bandwidth 0 +} + +link l5 { + nodes {n8 n7} + bandwidth 0 +} + +link l6 { + nodes {n7 n1} + bandwidth 0 +} + +link l7 { + nodes {n1 n6} + bandwidth 0 +} + +link l8 { + nodes {n6 n8} + bandwidth 0 +} + +annotation a0 { + iconcoords {202 75 612 405} + type rectangle + label {provider network} + labelcolor black + fontfamily {Arial} + fontsize 10 + color #f8f8d6 + width 0 + border black + rad 25 + canvas c1 +} + +canvas c1 { + name {Canvas1} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + ipsec_configs yes + remote_exec no + exec_errors yes + show_api no + background_images no + annotations yes + grid yes +} + diff --git a/gui/configs/sample3-bgp.imn b/gui/configs/sample3-bgp.imn new file mode 100644 index 00000000..d4a396ae --- /dev/null +++ b/gui/configs/sample3-bgp.imn @@ -0,0 +1,754 @@ +node n1 { + type router + model router + network-config { + hostname router1 + ! + interface eth2 + ip address 10.0.8.2/24 + ! + interface eth1 + ip address 10.0.6.1/24 + ! + interface eth0 + ip address 10.0.5.2/24 + ! + } + iconcoords {168.0 264.0} + labelcoords {168.0 288.0} + interface-peer {eth0 n16} + interface-peer {eth1 n2} + interface-peer {eth2 n3} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth2 + ip address 10.0.8.2/24 + ! + interface eth1 + ip address 10.0.6.1/24 + ! + interface eth0 + ip address 10.0.5.2/24 + ! + router bgp 105 + bgp router-id 10.0.8.2 + redistribute connected + neighbor 10.0.6.2 remote-as 105 + neighbor 10.0.6.2 next-hop-self + neighbor 10.0.5.1 remote-as 105 + neighbor 10.0.5.1 next-hop-self + neighbor 10.0.8.1 remote-as 2901 + neighbor 10.0.8.1 next-hop-self + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n2 { + type router + model router + network-config { + hostname router2 + ! + interface eth2 + ip address 10.0.9.1/24 + ! + interface eth1 + ip address 10.0.7.1/24 + ! + interface eth0 + ip address 10.0.6.2/24 + ! + } + iconcoords {312.0 168.0} + labelcoords {312.0 192.0} + interface-peer {eth0 n1} + interface-peer {eth1 n16} + interface-peer {eth2 n6} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth2 + ip address 10.0.9.1/24 + ! + interface eth1 + ip address 10.0.7.1/24 + ! + interface eth0 + ip address 10.0.6.2/24 + ! + router bgp 105 + bgp router-id 10.0.8.2 + redistribute connected + neighbor 10.0.7.2 remote-as 105 + neighbor 10.0.7.2 next-hop-self + neighbor 10.0.6.1 remote-as 105 + neighbor 10.0.6.1 next-hop-self + neighbor 10.0.9.2 remote-as 2902 + neighbor 10.0.9.2 next-hop-self + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n3 { + type router + model router + network-config { + hostname router3 + ! + interface eth1 + ip address 10.0.8.1/24 + ! + interface eth0 + ip address 10.0.2.1/24 + ! + } + iconcoords {96.0 408.0} + labelcoords {96.0 432.0} + interface-peer {eth0 n4} + interface-peer {eth1 n1} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth1 + ip address 10.0.8.1/24 + ! + interface eth0 + ip address 10.0.2.1/24 + ! + router bgp 2901 + bgp router-id 10.0.2.1 + redistribute connected + neighbor 10.0.2.2 remote-as 2901 + neighbor 10.0.2.2 next-hop-self + neighbor 10.0.8.2 remote-as 105 + neighbor 10.0.8.2 next-hop-self + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n4 { + type router + model router + network-config { + hostname router4 + ! + interface eth0 + ip address 10.0.2.2/24 + ! + interface eth1 + ip address 10.0.10.1/24 + ! + interface eth2 + ip address 10.0.0.1/24 + ! + } + iconcoords {240.0 432.0} + labelcoords {240.0 456.0} + interface-peer {eth2 n9} + interface-peer {eth0 n3} + interface-peer {eth1 n7} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth0 + ip address 10.0.2.2/24 + ! + interface eth1 + ip address 10.0.10.1/24 + ! + interface eth2 + ip address 10.0.0.1/24 + ! + router bgp 2901 + bgp router-id 10.0.10.1 + redistribute connected + neighbor 10.0.2.1 remote-as 2901 + neighbor 10.0.2.1 next-hop-self + neighbor 10.0.10.2 remote-as 2902 + neighbor 10.0.10.2 next-hop-self + network 10.0.0.0 mask 255.255.255.0 + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n5 { + type router + model router + network-config { + hostname router5 + ! + interface eth1 + ip address 10.0.4.1/24 + ! + interface eth0 + ip address 10.0.3.2/24 + ! + interface eth2 + ip address 10.0.1.1/24 + ! + } + iconcoords {528.0 336.0} + labelcoords {528.0 360.0} + interface-peer {eth2 n8} + interface-peer {eth0 n7} + interface-peer {eth1 n6} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth1 + ip address 10.0.4.1/24 + ! + interface eth0 + ip address 10.0.3.2/24 + ! + interface eth2 + ip address 10.0.1.1/24 + ! + router bgp 2902 + bgp router-id 10.0.4.1 + redistribute connected + neighbor 10.0.4.2 remote-as 2902 + neighbor 10.0.4.2 next-hop-self + neighbor 10.0.3.1 remote-as 2902 + neighbor 10.0.3.1 next-hop-self + network 10.0.1.0 mask 255.255.255.0 + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n6 { + type router + model router + network-config { + hostname router6 + ! + interface eth1 + ip address 10.0.9.2/24 + ! + interface eth0 + ip address 10.0.4.2/24 + ! + router bgp 2902 + bgp router-id 10.0.9.2 + redistribute connected + neighbor 10.0.4.1 remote-as 2902 + neighbor 10.0.4.1 next-hop-self + neighbor 10.0.9.1 remote-as 105 + neighbor 10.0.9.1 next-hop-self + ! + } + iconcoords {624.0 240.0} + labelcoords {624.0 264.0} + interface-peer {eth0 n5} + interface-peer {eth1 n2} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth1 + ip address 10.0.9.2/24 + ! + interface eth0 + ip address 10.0.4.2/24 + ! + router bgp 2902 + bgp router-id 10.0.9.2 + redistribute connected + neighbor 10.0.4.1 remote-as 2902 + neighbor 10.0.4.1 next-hop-self + neighbor 10.0.9.1 remote-as 105 + neighbor 10.0.9.1 next-hop-self + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n7 { + type router + model router + network-config { + hostname router7 + ! + interface eth1 + ip address 10.0.10.2/24 + ! + interface eth0 + ip address 10.0.3.1/24 + ! + } + iconcoords {528.0 456.0} + labelcoords {528.0 480.0} + interface-peer {eth0 n5} + interface-peer {eth1 n4} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth1 + ip address 10.0.10.2/24 + ! + interface eth0 + ip address 10.0.3.1/24 + ! + router bgp 2902 + bgp router-id 10.0.3.1 + redistribute connected + neighbor 10.0.3.2 remote-as 2902 + neighbor 10.0.3.2 next-hop-self + neighbor 10.0.10.1 remote-as 2901 + neighbor 10.0.10.1 next-hop-self + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +node n8 { + type lanswitch + network-config { + hostname lanswitch8 + ! + } + iconcoords {672.0 432.0} + labelcoords {672.0 456.0} + interface-peer {e0 n5} + interface-peer {e1 n10} + interface-peer {e2 n11} + canvas c1 +} + +node n9 { + type hub + network-config { + hostname hub9 + ! + } + iconcoords {120.0 504.0} + labelcoords {120.0 528.0} + interface-peer {e0 n4} + interface-peer {e1 n15} + interface-peer {e2 n14} + interface-peer {e3 n13} + interface-peer {e4 n12} + canvas c1 +} + +node n10 { + type router + model host + network-config { + hostname host10 + ! + interface eth0 + ip address 10.0.1.10/24 + ! + } + iconcoords {576.0 552.0} + labelcoords {576.0 584.0} + interface-peer {eth0 n8} + canvas c1 +} + +node n11 { + type router + model host + network-config { + hostname host11 + ! + interface eth0 + ip address 10.0.1.11/24 + ! + } + iconcoords {696.0 552.0} + labelcoords {696.0 584.0} + interface-peer {eth0 n8} + canvas c1 +} + +node n12 { + type router + model PC + network-config { + hostname pc12 + ! + interface eth0 + ip address 10.0.0.23/24 + ! + } + iconcoords {288.0 576.0} + labelcoords {288.0 608.0} + interface-peer {eth0 n9} + canvas c1 +} + +node n13 { + type router + model PC + network-config { + hostname pc13 + ! + interface eth0 + ip address 10.0.0.22/24 + ! + } + iconcoords {216.0 600.0} + labelcoords {216.0 632.0} + interface-peer {eth0 n9} + canvas c1 +} + +node n14 { + type router + model PC + network-config { + hostname pc14 + ! + interface eth0 + ip address 10.0.0.21/24 + ! + } + iconcoords {120.0 624.0} + labelcoords {120.0 656.0} + interface-peer {eth0 n9} + canvas c1 +} + +node n15 { + type router + model PC + network-config { + hostname pc15 + ! + interface eth0 + ip address 10.0.0.20/24 + ! + } + iconcoords {24.0 576.0} + labelcoords {24.0 608.0} + interface-peer {eth0 n9} + canvas c1 +} + +node n16 { + type router + model router + network-config { + hostname router0 + ! + interface eth0 + ip address 10.0.5.1/24 + ! + interface eth1 + ip address 10.0.7.2/24 + ! + } + iconcoords {120.0 120.0} + labelcoords {120.0 144.0} + interface-peer {eth0 n1} + interface-peer {eth1 n2} + canvas c1 + services {zebra BGP IPForward} + custom-config { + custom-config-id service:zebra:/usr/local/etc/quagga/Quagga.conf + custom-command /usr/local/etc/quagga/Quagga.conf + config { + interface eth0 + ip address 10.0.5.1/24 + ! + interface eth1 + ip address 10.0.7.2/24 + ! + router bgp 105 + bgp router-id 10.0.5.1 + redistribute connected + neighbor 10.0.7.1 remote-as 105 + neighbor 10.0.7.1 next-hop-self + neighbor 10.0.5.2 remote-as 105 + neighbor 10.0.5.2 next-hop-self + ! + } + } + custom-config { + custom-config-id service:zebra + custom-command zebra + config { + ('/usr/local/etc/quagga', '/var/run/quagga') + ('/usr/local/etc/quagga/Quagga.conf', 'quaggaboot.sh') + 35 + ('sh quaggaboot.sh zebra',) + ('killall zebra',) + + } + } +} + +link l0 { + nodes {n9 n4} + bandwidth 100000000 +} + +link l1 { + nodes {n8 n5} + bandwidth 100000000 +} + +link l2 { + nodes {n15 n9} + bandwidth 100000000 +} + +link l3 { + nodes {n14 n9} + bandwidth 100000000 +} + +link l4 { + nodes {n13 n9} + bandwidth 100000000 +} + +link l5 { + nodes {n12 n9} + bandwidth 100000000 +} + +link l6 { + nodes {n10 n8} + bandwidth 100000000 +} + +link l7 { + nodes {n11 n8} + bandwidth 100000000 +} + +link l8 { + nodes {n3 n4} + bandwidth 2048000 + delay 2500 +} + +link l9 { + nodes {n7 n5} + bandwidth 2048000 + delay 2500 +} + +link l10 { + nodes {n5 n6} + bandwidth 2048000 + delay 2500 +} + +link l11 { + nodes {n16 n1} + bandwidth 2048000 + delay 2500 +} + +link l12 { + nodes {n1 n2} + bandwidth 2048000 + delay 2500 +} + +link l13 { + nodes {n2 n16} + bandwidth 2048000 + delay 2500 +} + +link l14 { + nodes {n3 n1} + bandwidth 10000000 + delay 650000 +} + +link l15 { + nodes {n2 n6} + bandwidth 10000000 + delay 650000 +} + +link l16 { + nodes {n4 n7} + bandwidth 5000000 + delay 7500 +} + +annotation a0 { + iconcoords { 70 55 345 330 } + type oval + label {AS 105} + labelcolor #CFCFAC + fontfamily {Arial} + fontsize {12} + color #FFFFCC + width 0 + border black + canvas c1 +} + +annotation a1 { + iconcoords { 470 170 740 630 } + type oval + label {AS 2902} + labelcolor #C0C0CF + fontfamily {Arial} + fontsize {12} + color #F0F0FF + width 0 + border black + canvas c1 +} + +annotation a2 { + iconcoords { 0 355 320 660 } + type oval + label {AS 2901} + labelcolor #C0C0CF + fontfamily {Arial} + fontsize {12} + color #F0F0FF + width 0 + border black + canvas c1 +} + +annotation a10 { + type text + canvas c1 + iconcoords { 450 55 } + color #FFCCCC + fontsize {20} + label {Sample Topology 1} +} + +canvas c1 { + name {Canvas1} + size {900 706.0} +} + +option global { + interface_names yes + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + ipsec_configs yes + remote_exec no + exec_errors yes + show_api no + background_images no + annotations yes + grid yes +} + diff --git a/daemon/core/gui/data/backgrounds/sample4-bg.jpg b/gui/configs/sample4-bg.jpg similarity index 100% rename from daemon/core/gui/data/backgrounds/sample4-bg.jpg rename to gui/configs/sample4-bg.jpg diff --git a/gui/configs/sample4-nrlsmf.imn b/gui/configs/sample4-nrlsmf.imn new file mode 100644 index 00000000..165c424f --- /dev/null +++ b/gui/configs/sample4-nrlsmf.imn @@ -0,0 +1,546 @@ +comments { +Joe Macker NRL +Last updated: Sept 2010,2015(to fix mobility script) +Nov 2010 Jeff Ahrenholz - updated for new services model and renamed + (was 2groups_10nodes_smf.imn) + +This scenario is a simple SMF example for testing multicast within CORE. + +There are several dependencies for these scenarios to work; + +nrlsmf must be installed and the binary must be within the path when executing. +This should also be built along with protolib from the NRL pf.itd.nrl.navy.mil +repository or from nightly snapshots by using the Makefile.core build file. +This avoids some of the potential problems that arise with protolib call and +proper netns support in various kernel releases. For now the Makefile.core +approach patches around the problem. + +This scenario will launch 10 quagga manet-ospf and smf classical flooding +router nodes. A mobility pattern can be used to cause periodic fragmentation +and coalescing among 5 groups that move together as a somewhat randomized +cluster. + +Within netns and core the following must be used as nrlsmf params. hash mode +and instance ids. +This script uses nodenames as instance ids and MD5 as the hash mode. +Distributed optimized relay selection is not provided in this example but works +in nrlsmf with both quagga manetospf-mdr and with nrlolsr or newer nhdp code +being developed. Relays can also be manually configured if that is of some use +in a scneario. Classical flodding still provides duplication detection in this +mode but of course has additional overhead. + +----- +Traffic testing etc. You can try sending your own multicast apps or use a +testtool. + +mgen is recommended as a test tool, but ping -t 5 224.225.226.227 type testing +can also be used. + +an example mgen script to source multicast from a terminal window is as follows: + +mgen event "on 1 udp dst 224.225.226.227/5000 periodic [1 500]" + +this sends 500 bytes packets every second. See mgen users guide for the myriad +of choices/options. + +on a receive node terminal the follow can work. + +mgen event "join 224.225.226.227" event "listen udp 5000" output +without output it will stream to stdout. +} + +node n1 { + type router + model mdr + network-config { + hostname n1 + ! + interface eth0 + ip address 10.0.0.1/32 + ipv6 address a:0::1/128 + ! + } + iconcoords {186.2364578872143 137.89039496012572} + labelcoords {186.2364578872143 161.89039496012572} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_green.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n2 { + type router + model mdr + network-config { + hostname n2 + ! + interface eth0 + ip address 10.0.0.2/32 + ipv6 address a:0::2/128 + ! + } + iconcoords {49.97421009111123 297.31725181124926} + labelcoords {49.97421009111123 321.31725181124926} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_green.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n3 { + type router + model mdr + network-config { + hostname n3 + ! + interface eth0 + ip address 10.0.0.3/32 + ipv6 address a:0::3/128 + ! + } + iconcoords {176.46110847174833 328.14864514530865} + labelcoords {176.46110847174833 352.14864514530865} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_green.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n4 { + type router + model mdr + network-config { + hostname n4 + ! + interface eth0 + ip address 10.0.0.4/32 + ipv6 address a:0::4/128 + ! + } + iconcoords {145.04062040794378 195.27962082775758} + labelcoords {145.04062040794378 219.27962082775758} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_green.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n5 { + type router + model mdr + network-config { + hostname n5 + ! + interface eth0 + ip address 10.0.0.5/32 + ipv6 address a:0::5/128 + ! + } + iconcoords {137.9101266949479 257.51849231830334} + labelcoords {137.9101266949479 281.51849231830334} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_green.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n6 { + type router + model mdr + network-config { + hostname n6 + ! + interface eth0 + ip address 10.0.0.6/32 + ipv6 address a:0::6/128 + ! + } + iconcoords {119.15850324229558 93.2505296351548} + labelcoords {119.15850324229558 117.2505296351548} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n7 { + type router + model mdr + network-config { + hostname n7 + ! + interface eth0 + ip address 10.0.0.7/32 + ipv6 address a:0::7/128 + ! + } + iconcoords {79.1102256826161 50.123535235375556} + labelcoords {79.1102256826161 74.12353523537556} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n8 { + type router + model mdr + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.0.8/32 + ipv6 address a:0::8/128 + ! + } + iconcoords {159.90259315202974 8.220638318379141} + labelcoords {159.90259315202974 32.220638318379144} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n9 { + type router + model mdr + network-config { + hostname n9 + ! + interface eth0 + ip address 10.0.0.9/32 + ipv6 address a:0::9/128 + ! + } + iconcoords {150.43010603614704 165.70781621981482} + labelcoords {150.43010603614704 189.70781621981482} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n10 { + type router + model mdr + network-config { + hostname n10 + ! + interface eth0 + ip address 10.0.0.10/32 + ipv6 address a:0::10/128 + ! + } + iconcoords {64.19289632467826 42.49909518554088} + labelcoords {64.19289632467826 66.49909518554088} + canvas c1 + interface-peer {eth0 n11} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif + services {zebra OSPFv3MDR SMF IPForward UserDefined} + custom-config { + custom-config-id service:UserDefined:custom-post-config-commands.sh + custom-command custom-post-config-commands.sh + config { + route add default dev eth0 + route add -net 224.0.0.0 netmask 224.0.0.0 dev eth0 + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('custom-post-config-commands.sh', ) + startidx=35 + cmdup=('sh custom-post-config-commands.sh', ) + } + } +} + +node n11 { + type wlan + network-config { + hostname wlan11 + ! + interface wireless + ip address 10.0.0.0/32 + ipv6 address a:0::0/128 + ! + mobmodel + coreapi + basic_range + ns2script + ! + } + iconcoords {0 0} + labelcoords {0 0} + canvas c1 + interface-peer {e0 n1} + interface-peer {e1 n2} + interface-peer {e2 n3} + interface-peer {e3 n4} + interface-peer {e4 n5} + interface-peer {e5 n6} + interface-peer {e6 n7} + interface-peer {e7 n8} + interface-peer {e8 n9} + interface-peer {e9 n10} + custom-config { + custom-config-id ns2script + custom-command {10 3 11 10 10} + config { + file=sample4.scen + refresh_ms=50 + loop=1 + autostart=5 + map= + } + } + custom-config { + custom-config-id basic_range + custom-command {3 3 9 9 9} + config { + range=200 + bandwidth=54000000 + jitter=0 + delay=50000 + error=0 + } + } +} + +link l1 { + nodes {n11 n1} + bandwidth 54000000 + delay 50000 +} + +link l2 { + nodes {n11 n2} + bandwidth 54000000 + delay 50000 +} + +link l3 { + nodes {n11 n3} + bandwidth 54000000 + delay 50000 +} + +link l4 { + nodes {n11 n4} + bandwidth 54000000 + delay 50000 +} + +link l5 { + nodes {n11 n5} + bandwidth 54000000 + delay 50000 +} + +link l6 { + nodes {n11 n6} + bandwidth 54000000 + delay 50000 +} + +link l7 { + nodes {n11 n7} + bandwidth 54000000 + delay 50000 +} + +link l8 { + nodes {n11 n8} + bandwidth 54000000 + delay 50000 +} + +link l9 { + nodes {n11 n9} + bandwidth 54000000 + delay 50000 +} + +link l10 { + nodes {n11 n10} + bandwidth 54000000 + delay 50000 +} + +canvas c1 { + name {Canvas1} + wallpaper-style {upperleft} + wallpaper {sample4-bg.jpg} + size {1000 750} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + show_api no + background_images no + annotations yes + grid no + traffic_start 0 +} + +option session { +} + diff --git a/gui/configs/sample4.scen b/gui/configs/sample4.scen new file mode 100644 index 00000000..939176e7 --- /dev/null +++ b/gui/configs/sample4.scen @@ -0,0 +1,2791 @@ +$node_(1) set X_ 196.387421 +$node_(1) set Y_ 462.134022 +$ns_ at 0.000000 "$node_(1) setdest 196.387421 462.134022 1.000000" +$ns_ at 0.000000 "$node_(1) setdest 195.956911 462.201568 0.435777" +$node_(2) set X_ 108.414716 +$node_(2) set Y_ 393.160360 +$ns_ at 0.000000 "$node_(2) setdest 108.414716 393.160360 1.000000" +$ns_ at 0.000000 "$node_(2) setdest 108.686466 392.778045 0.469055" +$node_(3) set X_ 14.254378 +$node_(3) set Y_ 541.257030 +$ns_ at 0.000000 "$node_(3) setdest 14.254378 541.257030 1.000000" +$ns_ at 0.000000 "$node_(3) setdest 14.839150 541.372844 0.596131" +$node_(4) set X_ 41.851670 +$node_(4) set Y_ 545.867138 +$ns_ at 0.000000 "$node_(4) setdest 41.851670 545.867138 1.000000" +$ns_ at 0.000000 "$node_(4) setdest 42.442273 545.926217 0.593550" +$node_(5) set X_ 182.809226 +$node_(5) set Y_ 513.055969 +$ns_ at 0.000000 "$node_(5) setdest 182.809226 513.055969 1.000000" +$ns_ at 0.000000 "$node_(5) setdest 183.335280 513.337339 0.596575" +$node_(6) set X_ 122.027997 +$node_(6) set Y_ 524.087717 +$ns_ at 0.000000 "$node_(6) setdest 122.027997 524.087717 1.000000" +$ns_ at 0.000000 "$node_(6) setdest 122.475860 524.470641 0.589248" +$node_(7) set X_ 186.692167 +$node_(7) set Y_ 453.103964 +$ns_ at 0.000000 "$node_(7) setdest 186.692167 453.103964 1.000000" +$ns_ at 0.000000 "$node_(7) setdest 186.362331 453.043815 0.335275" +$node_(8) set X_ 6.841010 +$node_(8) set Y_ 411.004614 +$ns_ at 0.000000 "$node_(8) setdest 6.841010 411.004614 1.000000" +$ns_ at 0.000000 "$node_(8) setdest 6.715910 410.970880 0.129569" +$node_(9) set X_ 180.514289 +$node_(9) set Y_ 395.901964 +$ns_ at 0.000000 "$node_(9) setdest 180.514289 395.901964 1.000000" +$ns_ at 0.000000 "$node_(9) setdest 180.863640 396.303766 0.532438" +$node_(10) set X_ 148.853602 +$node_(10) set Y_ 357.991260 +$ns_ at 0.000000 "$node_(10) setdest 148.853602 357.991260 1.000000" +$ns_ at 0.000000 "$node_(10) setdest 148.959253 358.166829 0.204906" + +$ns_ at 1.000000 "$node_(1) setdest 194.187758 463.051431 1.962694" +$ns_ at 1.000000 "$node_(2) setdest 109.321754 390.842582 2.037058" +$ns_ at 1.000000 "$node_(3) setdest 16.393600 542.808055 2.115690" +$ns_ at 1.000000 "$node_(4) setdest 44.634359 546.049079 2.195526" +$ns_ at 1.000000 "$node_(5) setdest 184.328928 515.246522 2.152281" +$ns_ at 1.000000 "$node_(6) setdest 123.835691 526.192539 2.194099" +$ns_ at 1.000000 "$node_(7) setdest 184.877733 452.038170 1.793140" +$ns_ at 1.000000 "$node_(8) setdest 6.671562 410.117567 0.854465" +$ns_ at 1.000000 "$node_(9) setdest 181.894113 398.159435 2.122590" +$ns_ at 1.000000 "$node_(10) setdest 148.870931 359.695988 1.531708" + +$ns_ at 2.000000 "$node_(1) setdest 190.851655 464.311512 3.566145" +$ns_ at 2.000000 "$node_(2) setdest 110.783099 387.546466 3.605539" +$ns_ at 2.000000 "$node_(3) setdest 14.881262 545.183484 2.815996" +$ns_ at 2.000000 "$node_(4) setdest 48.394729 546.584695 3.798324" +$ns_ at 2.000000 "$node_(5) setdest 184.473917 519.006641 3.762914" +$ns_ at 2.000000 "$node_(6) setdest 126.386821 528.676353 3.560561" +$ns_ at 2.000000 "$node_(7) setdest 182.160868 450.006458 3.392523" +$ns_ at 2.000000 "$node_(8) setdest 8.577394 409.839307 1.926038" +$ns_ at 2.000000 "$node_(9) setdest 182.309184 401.800750 3.664895" +$ns_ at 2.000000 "$node_(10) setdest 149.229189 362.797090 3.121727" + +$ns_ at 3.000000 "$node_(1) setdest 185.998912 466.082945 5.165955" +$ns_ at 3.000000 "$node_(2) setdest 114.977824 384.500577 5.183932" +$ns_ at 3.000000 "$node_(3) setdest 12.849462 546.572220 2.461056" +$ns_ at 3.000000 "$node_(4) setdest 52.522431 543.493688 5.156767" +$ns_ at 3.000000 "$node_(5) setdest 181.227267 523.091806 5.218171" +$ns_ at 3.000000 "$node_(6) setdest 122.849749 526.513462 4.145958" +$ns_ at 3.000000 "$node_(7) setdest 178.377101 446.748671 4.993003" +$ns_ at 3.000000 "$node_(8) setdest 11.957948 411.081133 3.601427" +$ns_ at 3.000000 "$node_(9) setdest 179.694536 406.348826 5.246083" +$ns_ at 3.000000 "$node_(10) setdest 150.721736 367.286506 4.731021" + +$ns_ at 4.000000 "$node_(1) setdest 179.555767 468.148449 6.766123" +$ns_ at 4.000000 "$node_(2) setdest 121.705230 383.835205 6.760230" +$ns_ at 4.000000 "$node_(3) setdest 12.743213 545.655478 0.922878" +$ns_ at 4.000000 "$node_(4) setdest 54.718112 536.856974 6.990493" +$ns_ at 4.000000 "$node_(5) setdest 174.581069 521.988831 6.737099" +$ns_ at 4.000000 "$node_(6) setdest 117.358177 524.776868 5.759612" +$ns_ at 4.000000 "$node_(7) setdest 173.583229 442.224357 6.591709" +$ns_ at 4.000000 "$node_(8) setdest 16.085272 414.253650 5.205734" +$ns_ at 4.000000 "$node_(9) setdest 174.174132 410.497488 6.905523" +$ns_ at 4.000000 "$node_(10) setdest 152.442894 373.377115 6.329131" + +$ns_ at 5.000000 "$node_(1) setdest 184.255964 467.324102 4.771939" +$ns_ at 5.000000 "$node_(2) setdest 130.096253 384.724742 8.438041" +$ns_ at 5.000000 "$node_(3) setdest 13.104636 545.386686 0.450418" +$ns_ at 5.000000 "$node_(4) setdest 60.821055 533.607888 6.913933" +$ns_ at 5.000000 "$node_(5) setdest 176.727658 522.545813 2.217673" +$ns_ at 5.000000 "$node_(6) setdest 109.928718 524.861347 7.429940" +$ns_ at 5.000000 "$node_(7) setdest 168.331832 435.943930 8.186631" +$ns_ at 5.000000 "$node_(8) setdest 20.338476 419.569962 6.808298" +$ns_ at 5.000000 "$node_(9) setdest 166.303912 413.725191 8.506376" +$ns_ at 5.000000 "$node_(10) setdest 153.598648 381.217946 7.925553" + +$ns_ at 6.000000 "$node_(1) setdest 190.713020 468.199291 6.516097" +$ns_ at 6.000000 "$node_(2) setdest 140.041968 386.083359 10.038083" +$ns_ at 6.000000 "$node_(3) setdest 15.010480 545.397474 1.905875" +$ns_ at 6.000000 "$node_(4) setdest 69.199175 531.278561 8.695899" +$ns_ at 6.000000 "$node_(5) setdest 180.271997 521.405130 3.723372" +$ns_ at 6.000000 "$node_(6) setdest 100.969155 523.987540 9.002072" +$ns_ at 6.000000 "$node_(7) setdest 162.840946 427.835646 9.792553" +$ns_ at 6.000000 "$node_(8) setdest 24.574187 426.845069 8.418339" +$ns_ at 6.000000 "$node_(9) setdest 156.473605 416.105660 10.114424" +$ns_ at 6.000000 "$node_(10) setdest 153.809976 390.743972 9.528370" + +$ns_ at 7.000000 "$node_(1) setdest 198.802940 468.836289 8.114961" +$ns_ at 7.000000 "$node_(2) setdest 151.520674 388.001681 11.637897" +$ns_ at 7.000000 "$node_(3) setdest 18.516288 545.400428 3.505809" +$ns_ at 7.000000 "$node_(4) setdest 79.188417 528.784398 10.295912" +$ns_ at 7.000000 "$node_(5) setdest 185.412195 520.020244 5.323489" +$ns_ at 7.000000 "$node_(6) setdest 91.080154 520.135759 10.612660" +$ns_ at 7.000000 "$node_(7) setdest 157.194066 417.952547 11.382569" +$ns_ at 7.000000 "$node_(8) setdest 29.470473 435.584536 10.017580" +$ns_ at 7.000000 "$node_(9) setdest 144.909476 418.020989 11.721670" +$ns_ at 7.000000 "$node_(10) setdest 154.204220 401.865406 11.128419" + +$ns_ at 8.000000 "$node_(1) setdest 208.501664 469.415232 9.715988" +$ns_ at 8.000000 "$node_(2) setdest 164.443430 390.863715 13.235894" +$ns_ at 8.000000 "$node_(3) setdest 23.619197 545.235409 5.105577" +$ns_ at 8.000000 "$node_(4) setdest 90.784905 526.131143 11.896146" +$ns_ at 8.000000 "$node_(5) setdest 192.096154 518.214428 6.923603" +$ns_ at 8.000000 "$node_(6) setdest 80.107574 514.731340 12.231323" +$ns_ at 8.000000 "$node_(7) setdest 153.494434 405.549908 12.942672" +$ns_ at 8.000000 "$node_(8) setdest 34.279206 446.159167 11.616658" +$ns_ at 8.000000 "$node_(9) setdest 131.868827 420.741097 13.321318" +$ns_ at 8.000000 "$node_(10) setdest 155.452554 414.535462 12.731405" + +$ns_ at 9.000000 "$node_(1) setdest 219.760670 470.533402 11.314394" +$ns_ at 9.000000 "$node_(2) setdest 178.682089 395.031168 14.836006" +$ns_ at 9.000000 "$node_(3) setdest 30.308442 544.767661 6.705579" +$ns_ at 9.000000 "$node_(4) setdest 103.964771 523.226201 13.496205" +$ns_ at 9.000000 "$node_(5) setdest 200.282657 515.840475 8.523761" +$ns_ at 9.000000 "$node_(6) setdest 69.403546 506.039069 13.788828" +$ns_ at 9.000000 "$node_(7) setdest 157.365544 391.985292 14.106179" +$ns_ at 9.000000 "$node_(8) setdest 38.646714 458.620995 13.205010" +$ns_ at 9.000000 "$node_(9) setdest 117.255049 423.747093 14.919736" +$ns_ at 9.000000 "$node_(10) setdest 155.850673 428.857711 14.327781" + +$ns_ at 10.000000 "$node_(1) setdest 232.472738 472.803312 12.913139" +$ns_ at 10.000000 "$node_(2) setdest 194.127048 400.653131 16.436339" +$ns_ at 10.000000 "$node_(3) setdest 38.581962 544.035935 8.305815" +$ns_ at 10.000000 "$node_(4) setdest 118.685377 519.880090 15.096115" +$ns_ at 10.000000 "$node_(5) setdest 210.050487 513.181447 10.123286" +$ns_ at 10.000000 "$node_(6) setdest 59.302514 494.374293 15.430420" +$ns_ at 10.000000 "$node_(7) setdest 169.643190 381.472758 16.163353" +$ns_ at 10.000000 "$node_(8) setdest 40.877385 473.255023 14.803063" +$ns_ at 10.000000 "$node_(9) setdest 100.860701 425.784585 16.520473" +$ns_ at 10.000000 "$node_(10) setdest 154.521863 444.723981 15.921817" + +$ns_ at 11.000000 "$node_(1) setdest 246.478612 476.598268 14.510900" +$ns_ at 11.000000 "$node_(2) setdest 210.951195 407.159256 18.038337" +$ns_ at 11.000000 "$node_(3) setdest 48.438708 543.050999 9.905834" +$ns_ at 11.000000 "$node_(4) setdest 134.861031 515.746212 16.695530" +$ns_ at 11.000000 "$node_(5) setdest 221.527307 510.799257 11.721444" +$ns_ at 11.000000 "$node_(6) setdest 49.457411 480.494289 17.017067" +$ns_ at 11.000000 "$node_(7) setdest 180.598709 367.456279 17.790028" +$ns_ at 11.000000 "$node_(8) setdest 39.538357 489.562934 16.362792" +$ns_ at 11.000000 "$node_(9) setdest 83.020566 428.952409 18.119203" +$ns_ at 11.000000 "$node_(10) setdest 151.821820 462.044995 17.530195" + +$ns_ at 12.000000 "$node_(1) setdest 261.629729 482.087033 16.114679" +$ns_ at 12.000000 "$node_(2) setdest 229.245871 414.133015 19.578776" +$ns_ at 12.000000 "$node_(3) setdest 59.881260 541.845844 11.505842" +$ns_ at 12.000000 "$node_(4) setdest 152.423948 510.621388 18.295352" +$ns_ at 12.000000 "$node_(5) setdest 234.527040 507.889458 13.321411" +$ns_ at 12.000000 "$node_(6) setdest 40.966307 463.915095 18.627091" +$ns_ at 12.000000 "$node_(7) setdest 185.609934 356.000078 12.504276" +$ns_ at 12.000000 "$node_(8) setdest 30.885561 505.096833 17.781252" +$ns_ at 12.000000 "$node_(9) setdest 63.625003 431.905828 19.619138" +$ns_ at 12.000000 "$node_(10) setdest 146.050593 480.248923 19.096860" + +$ns_ at 13.000000 "$node_(1) setdest 278.393127 487.812114 17.714064" +$ns_ at 13.000000 "$node_(2) setdest 248.189196 420.533024 19.995242" +$ns_ at 13.000000 "$node_(3) setdest 72.938784 540.726815 13.105387" +$ns_ at 13.000000 "$node_(4) setdest 171.235204 504.613101 19.747478" +$ns_ at 13.000000 "$node_(5) setdest 249.025623 504.352557 14.923758" +$ns_ at 13.000000 "$node_(6) setdest 30.880946 446.819434 19.848832" +$ns_ at 13.000000 "$node_(7) setdest 184.657559 357.512927 1.787661" +$ns_ at 13.000000 "$node_(8) setdest 14.206642 507.144149 16.804101" +$ns_ at 13.000000 "$node_(9) setdest 44.005432 428.793757 19.864857" +$ns_ at 13.000000 "$node_(10) setdest 132.025204 492.782747 18.809792" + +$ns_ at 14.000000 "$node_(1) setdest 297.118073 492.525399 19.309031" +$ns_ at 14.000000 "$node_(2) setdest 267.609451 425.285611 19.993334" +$ns_ at 14.000000 "$node_(3) setdest 87.624368 539.967665 14.705193" +$ns_ at 14.000000 "$node_(4) setdest 190.013797 497.736791 19.997979" +$ns_ at 14.000000 "$node_(5) setdest 265.067340 500.390654 16.523722" +$ns_ at 14.000000 "$node_(6) setdest 22.990420 428.699834 19.763105" +$ns_ at 14.000000 "$node_(7) setdest 182.833109 360.375049 3.394165" +$ns_ at 14.000000 "$node_(8) setdest 11.626599 492.727976 14.645227" +$ns_ at 14.000000 "$node_(9) setdest 26.668091 418.946242 19.938830" +$ns_ at 14.000000 "$node_(10) setdest 123.299921 486.526765 10.736287" + +$ns_ at 15.000000 "$node_(1) setdest 316.902786 495.404161 19.993052" +$ns_ at 15.000000 "$node_(2) setdest 287.409944 428.039581 19.991095" +$ns_ at 15.000000 "$node_(3) setdest 103.927453 539.724173 16.304903" +$ns_ at 15.000000 "$node_(4) setdest 208.447762 489.981073 19.999056" +$ns_ at 15.000000 "$node_(5) setdest 282.569771 495.688589 18.123038" +$ns_ at 15.000000 "$node_(6) setdest 32.358017 413.328574 18.000764" +$ns_ at 15.000000 "$node_(7) setdest 180.522818 364.802409 4.993892" +$ns_ at 15.000000 "$node_(8) setdest 17.250049 478.879349 14.946828" +$ns_ at 15.000000 "$node_(9) setdest 19.305071 401.972918 18.501562" +$ns_ at 15.000000 "$node_(10) setdest 122.308127 476.484801 10.090823" + +$ns_ at 16.000000 "$node_(1) setdest 336.810376 497.321840 19.999742" +$ns_ at 16.000000 "$node_(2) setdest 307.390871 428.541832 19.987239" +$ns_ at 16.000000 "$node_(3) setdest 121.831706 539.952811 17.905713" +$ns_ at 16.000000 "$node_(4) setdest 227.120033 482.822730 19.997389" +$ns_ at 16.000000 "$node_(5) setdest 301.236586 489.579183 19.641151" +$ns_ at 16.000000 "$node_(6) setdest 46.948370 420.814042 16.398495" +$ns_ at 16.000000 "$node_(7) setdest 177.898557 370.854610 6.596658" +$ns_ at 16.000000 "$node_(8) setdest 30.672025 469.903968 16.146420" +$ns_ at 16.000000 "$node_(9) setdest 28.762169 396.296220 11.030031" +$ns_ at 16.000000 "$node_(10) setdest 119.039643 465.199922 11.748680" + +$ns_ at 17.000000 "$node_(1) setdest 356.723919 496.365486 19.936494" +$ns_ at 17.000000 "$node_(2) setdest 327.327857 427.009656 19.995774" +$ns_ at 17.000000 "$node_(3) setdest 141.310307 540.046329 19.478826" +$ns_ at 17.000000 "$node_(4) setdest 246.180029 476.772114 19.997335" +$ns_ at 17.000000 "$node_(5) setdest 320.056175 482.812486 19.999128" +$ns_ at 17.000000 "$node_(6) setdest 50.106366 436.546462 16.046245" +$ns_ at 17.000000 "$node_(7) setdest 175.044087 378.536882 8.195444" +$ns_ at 17.000000 "$node_(8) setdest 48.497193 467.760705 17.953557" +$ns_ at 17.000000 "$node_(9) setdest 38.413556 402.420929 11.430719" +$ns_ at 17.000000 "$node_(10) setdest 114.693546 452.584125 13.343422" + +$ns_ at 18.000000 "$node_(1) setdest 376.664288 496.703412 19.943231" +$ns_ at 18.000000 "$node_(2) setdest 346.945189 423.241158 19.976018" +$ns_ at 18.000000 "$node_(3) setdest 161.304062 539.571077 19.999402" +$ns_ at 18.000000 "$node_(4) setdest 265.558802 471.836704 19.997378" +$ns_ at 18.000000 "$node_(5) setdest 338.448532 474.970956 19.994209" +$ns_ at 18.000000 "$node_(6) setdest 54.288270 451.088370 15.131272" +$ns_ at 18.000000 "$node_(7) setdest 172.073073 387.868710 9.793362" +$ns_ at 18.000000 "$node_(8) setdest 67.655050 465.445201 19.297281" +$ns_ at 18.000000 "$node_(9) setdest 49.709921 408.850378 12.997910" +$ns_ at 18.000000 "$node_(10) setdest 114.878490 437.804560 14.780722" + +$ns_ at 19.000000 "$node_(1) setdest 396.221169 500.830441 19.987596" +$ns_ at 19.000000 "$node_(2) setdest 364.910034 414.653995 19.911680" +$ns_ at 19.000000 "$node_(3) setdest 181.265387 538.352550 19.998483" +$ns_ at 19.000000 "$node_(4) setdest 285.155517 467.848358 19.998454" +$ns_ at 19.000000 "$node_(5) setdest 356.023480 465.435990 19.994859" +$ns_ at 19.000000 "$node_(6) setdest 59.822992 466.650093 16.516669" +$ns_ at 19.000000 "$node_(7) setdest 170.484709 399.136223 11.378916" +$ns_ at 19.000000 "$node_(8) setdest 84.504418 457.452149 18.649130" +$ns_ at 19.000000 "$node_(9) setdest 63.410973 414.003103 14.637944" +$ns_ at 19.000000 "$node_(10) setdest 124.979075 425.189235 16.160700" + +$ns_ at 20.000000 "$node_(1) setdest 414.959731 507.703787 19.959373" +$ns_ at 20.000000 "$node_(2) setdest 380.876777 402.624609 19.991073" +$ns_ at 20.000000 "$node_(3) setdest 201.169537 536.405487 19.999156" +$ns_ at 20.000000 "$node_(4) setdest 304.943032 464.963448 19.996711" +$ns_ at 20.000000 "$node_(5) setdest 372.787924 454.543588 19.992274" +$ns_ at 20.000000 "$node_(6) setdest 70.020001 481.532051 18.040280" +$ns_ at 20.000000 "$node_(7) setdest 170.249709 412.128831 12.994733" +$ns_ at 20.000000 "$node_(8) setdest 93.638312 442.200433 17.777595" +$ns_ at 20.000000 "$node_(9) setdest 78.562641 419.829513 16.233302" +$ns_ at 20.000000 "$node_(10) setdest 142.579167 424.108364 17.633250" + +$ns_ at 21.000000 "$node_(1) setdest 429.209520 521.348596 19.729097" +$ns_ at 21.000000 "$node_(2) setdest 397.102346 390.982114 19.970398" +$ns_ at 21.000000 "$node_(3) setdest 220.965783 533.570502 19.998213" +$ns_ at 21.000000 "$node_(4) setdest 324.852277 463.066604 19.999402" +$ns_ at 21.000000 "$node_(5) setdest 388.665021 442.382765 19.999195" +$ns_ at 21.000000 "$node_(6) setdest 86.716919 491.602850 19.498924" +$ns_ at 21.000000 "$node_(7) setdest 169.847567 426.715435 14.592147" +$ns_ at 21.000000 "$node_(8) setdest 97.710618 423.903178 18.744951" +$ns_ at 21.000000 "$node_(9) setdest 95.770736 424.404680 17.805917" +$ns_ at 21.000000 "$node_(10) setdest 161.156216 426.885868 18.783538" + +$ns_ at 22.000000 "$node_(1) setdest 430.473009 540.114895 18.808785" +$ns_ at 22.000000 "$node_(2) setdest 416.281990 386.251137 19.754515" +$ns_ at 22.000000 "$node_(3) setdest 240.636740 529.963240 19.998972" +$ns_ at 22.000000 "$node_(4) setdest 344.804967 461.695943 19.999714" +$ns_ at 22.000000 "$node_(5) setdest 405.064820 430.950640 19.991171" +$ns_ at 22.000000 "$node_(6) setdest 105.990772 495.255793 19.616967" +$ns_ at 22.000000 "$node_(7) setdest 167.385262 442.713557 16.186502" +$ns_ at 22.000000 "$node_(8) setdest 103.162186 405.249727 19.433755" +$ns_ at 22.000000 "$node_(9) setdest 115.163365 425.341594 19.415248" +$ns_ at 22.000000 "$node_(10) setdest 179.829755 431.479678 19.230293" + +$ns_ at 23.000000 "$node_(1) setdest 415.678953 543.404262 15.155331" +$ns_ at 23.000000 "$node_(2) setdest 435.187649 392.319327 19.855651" +$ns_ at 23.000000 "$node_(3) setdest 260.149758 525.579574 19.999361" +$ns_ at 23.000000 "$node_(4) setdest 364.757106 460.319210 19.999581" +$ns_ at 23.000000 "$node_(5) setdest 423.001973 422.178788 19.967143" +$ns_ at 23.000000 "$node_(6) setdest 123.055577 489.304227 18.072872" +$ns_ at 23.000000 "$node_(7) setdest 163.127378 459.989903 17.793305" +$ns_ at 23.000000 "$node_(8) setdest 100.668183 387.349777 18.072860" +$ns_ at 23.000000 "$node_(9) setdest 134.745751 421.781273 19.903410" +$ns_ at 23.000000 "$node_(10) setdest 192.254617 443.561434 17.330494" + +$ns_ at 24.000000 "$node_(1) setdest 406.160525 540.747525 9.882242" +$ns_ at 24.000000 "$node_(2) setdest 449.530999 405.814727 19.694098" +$ns_ at 24.000000 "$node_(3) setdest 279.642597 521.105699 19.999658" +$ns_ at 24.000000 "$node_(4) setdest 384.596757 457.826144 19.995677" +$ns_ at 24.000000 "$node_(5) setdest 442.473674 417.861163 19.944649" +$ns_ at 24.000000 "$node_(6) setdest 134.199736 475.485411 17.752521" +$ns_ at 24.000000 "$node_(7) setdest 160.485918 479.161017 19.352233" +$ns_ at 24.000000 "$node_(8) setdest 89.621312 373.738896 17.529673" +$ns_ at 24.000000 "$node_(9) setdest 151.033770 410.479558 19.824942" +$ns_ at 24.000000 "$node_(10) setdest 187.121532 459.497919 16.742763" + +$ns_ at 25.000000 "$node_(1) setdest 409.358404 540.925330 3.202818" +$ns_ at 25.000000 "$node_(2) setdest 458.447905 423.373342 19.693049" +$ns_ at 25.000000 "$node_(3) setdest 299.300679 517.470033 19.991455" +$ns_ at 25.000000 "$node_(4) setdest 404.207394 453.940099 19.991959" +$ns_ at 25.000000 "$node_(5) setdest 462.251199 419.968804 19.889511" +$ns_ at 25.000000 "$node_(6) setdest 137.331615 457.789340 17.971076" +$ns_ at 25.000000 "$node_(7) setdest 163.430534 498.853924 19.911840" +$ns_ at 25.000000 "$node_(8) setdest 77.657124 359.369184 18.698407" +$ns_ at 25.000000 "$node_(9) setdest 164.260780 395.480602 19.998061" +$ns_ at 25.000000 "$node_(10) setdest 184.039441 474.459839 15.276071" + +$ns_ at 26.000000 "$node_(1) setdest 413.898194 539.190016 4.860144" +$ns_ at 26.000000 "$node_(2) setdest 472.249585 436.422368 18.993774" +$ns_ at 26.000000 "$node_(3) setdest 319.267001 516.756455 19.979069" +$ns_ at 26.000000 "$node_(4) setdest 422.915330 447.007413 19.951166" +$ns_ at 26.000000 "$node_(5) setdest 477.938097 430.823097 19.076018" +$ns_ at 26.000000 "$node_(6) setdest 140.850027 439.467490 18.656618" +$ns_ at 26.000000 "$node_(7) setdest 173.893091 515.708878 19.838210" +$ns_ at 26.000000 "$node_(8) setdest 62.077819 360.254769 15.604455" +$ns_ at 26.000000 "$node_(9) setdest 179.407140 382.455787 19.976437" +$ns_ at 26.000000 "$node_(10) setdest 185.534654 490.826466 16.434784" + +$ns_ at 27.000000 "$node_(1) setdest 419.345130 535.713495 6.461836" +$ns_ at 27.000000 "$node_(2) setdest 485.947996 449.586921 18.998734" +$ns_ at 27.000000 "$node_(3) setdest 339.044367 519.533812 19.971427" +$ns_ at 27.000000 "$node_(4) setdest 438.826119 435.000710 19.932740" +$ns_ at 27.000000 "$node_(5) setdest 473.471267 424.832381 7.472701" +$ns_ at 27.000000 "$node_(6) setdest 135.925019 421.904243 18.240706" +$ns_ at 27.000000 "$node_(7) setdest 186.435379 531.166580 19.906018" +$ns_ at 27.000000 "$node_(8) setdest 57.119411 374.294350 14.889447" +$ns_ at 27.000000 "$node_(9) setdest 192.143744 373.866180 15.362370" +$ns_ at 27.000000 "$node_(10) setdest 185.569664 508.866862 18.040430" + +$ns_ at 28.000000 "$node_(1) setdest 425.125458 530.097247 8.059430" +$ns_ at 28.000000 "$node_(2) setdest 493.049174 466.718934 18.545421" +$ns_ at 28.000000 "$node_(3) setdest 358.303469 524.898542 19.992332" +$ns_ at 28.000000 "$node_(4) setdest 450.847143 419.080495 19.948891" +$ns_ at 28.000000 "$node_(5) setdest 466.757695 428.118270 7.474565" +$ns_ at 28.000000 "$node_(6) setdest 128.600444 404.149699 19.206072" +$ns_ at 28.000000 "$node_(7) setdest 190.977325 534.505975 5.637449" +$ns_ at 28.000000 "$node_(8) setdest 50.549060 388.112799 15.300949" +$ns_ at 28.000000 "$node_(9) setdest 191.629729 374.111864 0.569712" +$ns_ at 28.000000 "$node_(10) setdest 181.834449 527.981880 19.476543" + +$ns_ at 29.000000 "$node_(1) setdest 431.663417 522.972367 9.669997" +$ns_ at 29.000000 "$node_(2) setdest 500.999975 483.730217 18.777619" +$ns_ at 29.000000 "$node_(3) setdest 378.191935 526.327594 19.939741" +$ns_ at 29.000000 "$node_(4) setdest 458.446194 400.622892 19.960678" +$ns_ at 29.000000 "$node_(5) setdest 461.699992 435.699961 9.113857" +$ns_ at 29.000000 "$node_(6) setdest 122.697249 385.435598 19.623081" +$ns_ at 29.000000 "$node_(7) setdest 183.725190 529.813555 8.637839" +$ns_ at 29.000000 "$node_(8) setdest 38.920729 400.522858 17.006694" +$ns_ at 29.000000 "$node_(9) setdest 190.433575 375.757305 2.034271" +$ns_ at 29.000000 "$node_(10) setdest 169.338208 541.367431 18.311990" + +$ns_ at 30.000000 "$node_(1) setdest 440.262619 515.700706 11.261586" +$ns_ at 30.000000 "$node_(2) setdest 500.811238 501.442061 17.712850" +$ns_ at 30.000000 "$node_(3) setdest 397.439899 521.321197 19.888392" +$ns_ at 30.000000 "$node_(4) setdest 464.984365 381.792314 19.933348" +$ns_ at 30.000000 "$node_(5) setdest 458.178246 445.894374 10.785581" +$ns_ at 30.000000 "$node_(6) setdest 117.489012 366.365374 19.768641" +$ns_ at 30.000000 "$node_(7) setdest 174.343370 525.704745 10.242113" +$ns_ at 30.000000 "$node_(8) setdest 24.090640 411.759729 18.606419" +$ns_ at 30.000000 "$node_(9) setdest 187.828459 378.293814 3.636002" +$ns_ at 30.000000 "$node_(10) setdest 152.821188 535.712898 17.458113" + +$ns_ at 31.000000 "$node_(1) setdest 451.405438 509.290896 12.854885" +$ns_ at 31.000000 "$node_(2) setdest 491.700571 517.036414 18.060678" +$ns_ at 31.000000 "$node_(3) setdest 413.171544 509.240492 19.835022" +$ns_ at 31.000000 "$node_(4) setdest 480.918024 372.117997 18.640651" +$ns_ at 31.000000 "$node_(5) setdest 456.464076 458.133299 12.358384" +$ns_ at 31.000000 "$node_(6) setdest 116.659357 360.796271 5.630563" +$ns_ at 31.000000 "$node_(7) setdest 164.951174 518.581014 11.788168" +$ns_ at 31.000000 "$node_(8) setdest 8.936318 423.738537 19.316970" +$ns_ at 31.000000 "$node_(9) setdest 184.393707 382.243473 5.234245" +$ns_ at 31.000000 "$node_(10) setdest 139.669561 526.256345 16.198509" + +$ns_ at 32.000000 "$node_(1) setdest 464.725397 503.658293 14.461934" +$ns_ at 32.000000 "$node_(2) setdest 479.083065 531.312050 19.052435" +$ns_ at 32.000000 "$node_(3) setdest 421.035268 491.095019 19.776156" +$ns_ at 32.000000 "$node_(4) setdest 490.592449 383.745440 15.125870" +$ns_ at 32.000000 "$node_(5) setdest 456.762816 472.118321 13.988212" +$ns_ at 32.000000 "$node_(6) setdest 116.717596 368.408818 7.612770" +$ns_ at 32.000000 "$node_(7) setdest 156.422329 508.202089 13.433662" +$ns_ at 32.000000 "$node_(8) setdest 11.745791 420.358995 4.394820" +$ns_ at 32.000000 "$node_(9) setdest 180.640795 387.951388 6.831153" +$ns_ at 32.000000 "$node_(10) setdest 126.512644 516.165541 16.580976" + +$ns_ at 33.000000 "$node_(1) setdest 480.312677 499.837211 16.048799" +$ns_ at 33.000000 "$node_(2) setdest 464.103605 542.498822 18.695670" +$ns_ at 33.000000 "$node_(3) setdest 415.566812 472.559995 19.324884" +$ns_ at 33.000000 "$node_(4) setdest 492.444864 398.380771 14.752097" +$ns_ at 33.000000 "$node_(5) setdest 454.811405 487.581008 15.585336" +$ns_ at 33.000000 "$node_(6) setdest 117.946938 377.618577 9.291444" +$ns_ at 33.000000 "$node_(7) setdest 147.983933 495.750139 15.041861" +$ns_ at 33.000000 "$node_(8) setdest 16.630884 421.422038 4.999419" +$ns_ at 33.000000 "$node_(9) setdest 176.046756 395.024567 8.434160" +$ns_ at 33.000000 "$node_(10) setdest 114.664512 502.380973 18.176703" + +$ns_ at 34.000000 "$node_(1) setdest 497.919072 498.422291 17.663158" +$ns_ at 34.000000 "$node_(2) setdest 467.653826 541.046000 3.835981" +$ns_ at 34.000000 "$node_(3) setdest 405.186017 464.081358 13.403290" +$ns_ at 34.000000 "$node_(4) setdest 490.175129 414.347121 16.126873" +$ns_ at 34.000000 "$node_(5) setdest 451.792079 504.480069 17.166671" +$ns_ at 34.000000 "$node_(6) setdest 121.883147 387.777476 10.894815" +$ns_ at 34.000000 "$node_(7) setdest 138.114893 482.374086 16.622777" +$ns_ at 34.000000 "$node_(8) setdest 22.994022 423.191629 6.604618" +$ns_ at 34.000000 "$node_(9) setdest 170.044883 403.068060 10.035948" +$ns_ at 34.000000 "$node_(10) setdest 107.004140 484.410586 19.534997" + +$ns_ at 35.000000 "$node_(1) setdest 517.152845 499.275087 19.252670" +$ns_ at 35.000000 "$node_(2) setdest 469.760083 536.639542 4.883974" +$ns_ at 35.000000 "$node_(3) setdest 406.522026 464.499466 1.399905" +$ns_ at 35.000000 "$node_(4) setdest 486.192447 431.664927 17.769867" +$ns_ at 35.000000 "$node_(5) setdest 441.963945 520.294525 18.619593" +$ns_ at 35.000000 "$node_(6) setdest 124.346991 399.961894 12.431032" +$ns_ at 35.000000 "$node_(7) setdest 124.150809 470.712739 18.192929" +$ns_ at 35.000000 "$node_(8) setdest 30.684577 426.047417 8.203668" +$ns_ at 35.000000 "$node_(9) setdest 162.654904 412.053873 11.634287" +$ns_ at 35.000000 "$node_(10) setdest 107.841118 465.759378 18.669978" + +$ns_ at 36.000000 "$node_(1) setdest 536.528998 504.082029 19.963517" +$ns_ at 36.000000 "$node_(2) setdest 474.403635 532.108135 6.488160" +$ns_ at 36.000000 "$node_(3) setdest 409.528015 464.468428 3.006149" +$ns_ at 36.000000 "$node_(4) setdest 484.427652 450.881008 19.296950" +$ns_ at 36.000000 "$node_(5) setdest 426.470273 532.788799 19.903788" +$ns_ at 36.000000 "$node_(6) setdest 120.932715 413.505237 13.967084" +$ns_ at 36.000000 "$node_(7) setdest 105.532349 464.813463 19.530707" +$ns_ at 36.000000 "$node_(8) setdest 39.253080 430.783080 9.790084" +$ns_ at 36.000000 "$node_(9) setdest 153.536221 421.645072 13.234104" +$ns_ at 36.000000 "$node_(10) setdest 117.666134 450.695452 17.984794" + +$ns_ at 37.000000 "$node_(1) setdest 554.428542 512.921944 19.963411" +$ns_ at 37.000000 "$node_(2) setdest 481.127335 527.610678 8.089206" +$ns_ at 37.000000 "$node_(3) setdest 414.102043 465.013680 4.606412" +$ns_ at 37.000000 "$node_(4) setdest 489.773407 469.940100 19.794597" +$ns_ at 37.000000 "$node_(5) setdest 408.058188 532.100718 18.424937" +$ns_ at 37.000000 "$node_(6) setdest 109.012095 423.192684 15.360592" +$ns_ at 37.000000 "$node_(7) setdest 85.728992 467.102452 19.935205" +$ns_ at 37.000000 "$node_(8) setdest 48.361002 437.630220 11.394629" +$ns_ at 37.000000 "$node_(9) setdest 142.102893 431.075459 14.820701" +$ns_ at 37.000000 "$node_(10) setdest 131.161133 437.935237 18.572509" + +$ns_ at 38.000000 "$node_(1) setdest 566.932229 527.655226 19.323866" +$ns_ at 38.000000 "$node_(2) setdest 490.148231 524.075412 9.688895" +$ns_ at 38.000000 "$node_(3) setdest 420.185075 466.255244 6.208443" +$ns_ at 38.000000 "$node_(4) setdest 501.252538 485.927117 19.681340" +$ns_ at 38.000000 "$node_(5) setdest 405.981429 521.845202 10.463677" +$ns_ at 38.000000 "$node_(6) setdest 91.954174 422.557957 17.069726" +$ns_ at 38.000000 "$node_(7) setdest 66.578736 472.830545 19.988581" +$ns_ at 38.000000 "$node_(8) setdest 58.379443 445.916629 13.001297" +$ns_ at 38.000000 "$node_(9) setdest 128.348746 440.069498 16.433785" +$ns_ at 38.000000 "$node_(10) setdest 139.114132 421.205939 18.523488" + +$ns_ at 39.000000 "$node_(1) setdest 567.143596 529.512240 1.869004" +$ns_ at 39.000000 "$node_(2) setdest 501.202329 521.792270 11.287418" +$ns_ at 39.000000 "$node_(3) setdest 427.677364 468.446739 7.806218" +$ns_ at 39.000000 "$node_(4) setdest 511.141012 502.780491 19.540167" +$ns_ at 39.000000 "$node_(5) setdest 413.374406 514.767835 10.234513" +$ns_ at 39.000000 "$node_(6) setdest 75.493177 413.507199 18.785117" +$ns_ at 39.000000 "$node_(7) setdest 46.764183 471.773954 19.842703" +$ns_ at 39.000000 "$node_(8) setdest 69.942011 454.838968 14.604833" +$ns_ at 39.000000 "$node_(9) setdest 112.291115 448.251876 18.022176" +$ns_ at 39.000000 "$node_(10) setdest 141.846200 402.561193 18.843852" + +$ns_ at 40.000000 "$node_(1) setdest 564.578377 527.996748 2.979440" +$ns_ at 40.000000 "$node_(2) setdest 514.059687 520.805131 12.895197" +$ns_ at 40.000000 "$node_(3) setdest 436.538030 471.609166 9.408100" +$ns_ at 40.000000 "$node_(4) setdest 519.473141 520.208352 19.317213" +$ns_ at 40.000000 "$node_(5) setdest 419.858457 504.879506 11.824634" +$ns_ at 40.000000 "$node_(6) setdest 87.235668 420.830481 13.838951" +$ns_ at 40.000000 "$node_(7) setdest 64.203274 476.552061 18.081820" +$ns_ at 40.000000 "$node_(8) setdest 84.829204 454.481993 14.891472" +$ns_ at 40.000000 "$node_(9) setdest 125.059425 443.474044 13.632954" +$ns_ at 40.000000 "$node_(10) setdest 148.729503 397.886383 8.320679" + +$ns_ at 41.000000 "$node_(1) setdest 560.539286 525.794333 4.600531" +$ns_ at 41.000000 "$node_(2) setdest 528.559240 520.724740 14.499776" +$ns_ at 41.000000 "$node_(3) setdest 446.636223 475.986259 11.006019" +$ns_ at 41.000000 "$node_(4) setdest 526.125692 538.415388 19.384339" +$ns_ at 41.000000 "$node_(5) setdest 424.189640 492.212788 13.386743" +$ns_ at 41.000000 "$node_(6) setdest 102.775617 424.456016 15.957271" +$ns_ at 41.000000 "$node_(7) setdest 83.933552 478.414079 19.817945" +$ns_ at 41.000000 "$node_(8) setdest 101.442514 452.999884 16.679290" +$ns_ at 41.000000 "$node_(9) setdest 140.551926 441.300742 15.644194" +$ns_ at 41.000000 "$node_(10) setdest 158.699125 396.791989 10.029510" + +$ns_ at 42.000000 "$node_(1) setdest 554.946328 523.113767 6.202145" +$ns_ at 42.000000 "$node_(2) setdest 544.648492 520.084739 16.101976" +$ns_ at 42.000000 "$node_(3) setdest 457.867768 481.716252 12.608743" +$ns_ at 42.000000 "$node_(4) setdest 542.996006 542.160589 17.281030" +$ns_ at 42.000000 "$node_(5) setdest 427.836410 477.619041 15.042485" +$ns_ at 42.000000 "$node_(6) setdest 120.073180 427.461833 17.556783" +$ns_ at 42.000000 "$node_(7) setdest 103.908927 479.371110 19.998288" +$ns_ at 42.000000 "$node_(8) setdest 119.693903 451.998492 18.278840" +$ns_ at 42.000000 "$node_(9) setdest 157.508841 438.167173 17.244020" +$ns_ at 42.000000 "$node_(10) setdest 170.268449 395.614996 11.629039" + +$ns_ at 43.000000 "$node_(1) setdest 548.011229 519.540767 7.801405" +$ns_ at 43.000000 "$node_(2) setdest 559.640611 513.386833 16.420280" +$ns_ at 43.000000 "$node_(3) setdest 470.540438 488.142494 14.208911" +$ns_ at 43.000000 "$node_(4) setdest 557.442321 534.605141 16.302785" +$ns_ at 43.000000 "$node_(5) setdest 432.038134 461.515754 16.642426" +$ns_ at 43.000000 "$node_(6) setdest 139.110535 429.593024 19.156274" +$ns_ at 43.000000 "$node_(7) setdest 123.907121 479.374828 19.998194" +$ns_ at 43.000000 "$node_(8) setdest 139.427933 451.544210 19.739258" +$ns_ at 43.000000 "$node_(9) setdest 176.047394 434.787992 18.844013" +$ns_ at 43.000000 "$node_(10) setdest 183.465605 394.695269 13.229166" + +$ns_ at 44.000000 "$node_(1) setdest 540.245181 514.250782 9.396566" +$ns_ at 44.000000 "$node_(2) setdest 557.197334 501.679640 11.959430" +$ns_ at 44.000000 "$node_(3) setdest 484.194340 496.104414 15.805733" +$ns_ at 44.000000 "$node_(4) setdest 569.805764 524.976564 15.670489" +$ns_ at 44.000000 "$node_(5) setdest 437.187949 444.017164 18.240648" +$ns_ at 44.000000 "$node_(6) setdest 159.082551 430.599768 19.997374" +$ns_ at 44.000000 "$node_(7) setdest 143.880972 478.393510 19.997942" +$ns_ at 44.000000 "$node_(8) setdest 159.427822 451.554805 19.999892" +$ns_ at 44.000000 "$node_(9) setdest 195.825003 432.098619 19.959623" +$ns_ at 44.000000 "$node_(10) setdest 198.232049 393.331915 14.829248" + +$ns_ at 45.000000 "$node_(1) setdest 531.522089 507.546145 11.002022" +$ns_ at 45.000000 "$node_(2) setdest 547.805553 492.435892 13.177725" +$ns_ at 45.000000 "$node_(3) setdest 499.562490 504.256506 17.396455" +$ns_ at 45.000000 "$node_(4) setdest 586.131716 520.450839 16.941632" +$ns_ at 45.000000 "$node_(5) setdest 444.245357 425.600998 19.722124" +$ns_ at 45.000000 "$node_(6) setdest 179.081213 430.697528 19.998900" +$ns_ at 45.000000 "$node_(7) setdest 163.775015 476.359693 19.997735" +$ns_ at 45.000000 "$node_(8) setdest 179.427632 451.479451 19.999952" +$ns_ at 45.000000 "$node_(9) setdest 215.744509 430.326634 19.998166" +$ns_ at 45.000000 "$node_(10) setdest 214.511122 391.127338 16.427671" + +$ns_ at 46.000000 "$node_(1) setdest 521.224593 500.285353 12.599902" +$ns_ at 46.000000 "$node_(2) setdest 537.140911 482.188971 14.789658" +$ns_ at 46.000000 "$node_(3) setdest 517.239989 511.230267 19.003350" +$ns_ at 46.000000 "$node_(4) setdest 596.737679 531.230366 15.122323" +$ns_ at 46.000000 "$node_(5) setdest 450.567683 406.626744 19.999853" +$ns_ at 46.000000 "$node_(6) setdest 199.077088 430.293213 19.999963" +$ns_ at 46.000000 "$node_(7) setdest 183.547456 473.359299 19.998794" +$ns_ at 46.000000 "$node_(8) setdest 199.426772 451.293967 19.999999" +$ns_ at 46.000000 "$node_(9) setdest 235.727172 429.545275 19.997933" +$ns_ at 46.000000 "$node_(10) setdest 232.313482 388.287908 18.027380" + +$ns_ at 47.000000 "$node_(1) setdest 509.132728 492.839445 14.200519" +$ns_ at 47.000000 "$node_(2) setdest 529.956513 467.566241 16.292323" +$ns_ at 47.000000 "$node_(3) setdest 530.443547 512.706866 13.285868" +$ns_ at 47.000000 "$node_(4) setdest 593.888061 544.610217 13.679939" +$ns_ at 47.000000 "$node_(5) setdest 455.365546 387.217155 19.993791" +$ns_ at 47.000000 "$node_(6) setdest 219.066969 429.662345 19.999833" +$ns_ at 47.000000 "$node_(7) setdest 203.164862 469.479130 19.997458" +$ns_ at 47.000000 "$node_(8) setdest 219.417361 451.789967 19.996742" +$ns_ at 47.000000 "$node_(9) setdest 255.709496 428.728816 19.998997" +$ns_ at 47.000000 "$node_(10) setdest 251.796297 386.439234 19.570326" + +$ns_ at 48.000000 "$node_(1) setdest 494.811654 486.211110 15.780621" +$ns_ at 48.000000 "$node_(2) setdest 523.405915 450.824130 17.978004" +$ns_ at 48.000000 "$node_(3) setdest 527.369831 511.982215 3.157982" +$ns_ at 48.000000 "$node_(4) setdest 596.143329 540.896760 4.344652" +$ns_ at 48.000000 "$node_(5) setdest 452.127739 368.087344 19.401882" +$ns_ at 48.000000 "$node_(6) setdest 239.040183 428.633694 19.999686" +$ns_ at 48.000000 "$node_(7) setdest 222.507492 464.407083 19.996575" +$ns_ at 48.000000 "$node_(8) setdest 239.319828 453.699710 19.993882" +$ns_ at 48.000000 "$node_(9) setdest 275.658454 427.303064 19.999843" +$ns_ at 48.000000 "$node_(10) setdest 271.767272 385.393628 19.998328" + +$ns_ at 49.000000 "$node_(1) setdest 477.936814 482.029186 17.385301" +$ns_ at 49.000000 "$node_(2) setdest 511.589591 435.283606 19.522638" +$ns_ at 49.000000 "$node_(3) setdest 522.811495 510.411820 4.821262" +$ns_ at 49.000000 "$node_(4) setdest 596.364469 534.992612 5.908287" +$ns_ at 49.000000 "$node_(5) setdest 441.999585 365.539662 10.443668" +$ns_ at 49.000000 "$node_(6) setdest 259.000403 427.373742 19.999946" +$ns_ at 49.000000 "$node_(7) setdest 241.427668 457.938519 19.995384" +$ns_ at 49.000000 "$node_(8) setdest 258.949550 457.488811 19.992080" +$ns_ at 49.000000 "$node_(9) setdest 295.601472 425.794473 19.999995" +$ns_ at 49.000000 "$node_(10) setdest 291.764641 385.099501 19.999532" + +$ns_ at 50.000000 "$node_(1) setdest 460.251786 475.213570 18.952911" +$ns_ at 50.000000 "$node_(2) setdest 494.572859 425.450831 19.653312" +$ns_ at 50.000000 "$node_(3) setdest 516.437962 509.551212 6.431374" +$ns_ at 50.000000 "$node_(4) setdest 595.905073 527.487889 7.518771" +$ns_ at 50.000000 "$node_(5) setdest 435.704871 370.756310 8.175380" +$ns_ at 50.000000 "$node_(6) setdest 278.984020 426.603522 19.998455" +$ns_ at 50.000000 "$node_(7) setdest 259.940900 450.373613 19.999189" +$ns_ at 50.000000 "$node_(8) setdest 278.151718 463.062180 19.994642" +$ns_ at 50.000000 "$node_(9) setdest 315.561187 424.532692 19.999558" +$ns_ at 50.000000 "$node_(10) setdest 311.760339 384.687257 19.999947" + +$ns_ at 51.000000 "$node_(1) setdest 445.750485 461.773203 19.771980" +$ns_ at 51.000000 "$node_(2) setdest 477.062237 431.160008 18.417833" +$ns_ at 51.000000 "$node_(3) setdest 508.417207 509.293695 8.024888" +$ns_ at 51.000000 "$node_(4) setdest 596.138322 518.372789 9.118084" +$ns_ at 51.000000 "$node_(5) setdest 427.847555 376.601237 9.792885" +$ns_ at 51.000000 "$node_(6) setdest 298.977580 426.115506 19.999515" +$ns_ at 51.000000 "$node_(7) setdest 278.425975 442.746479 19.996779" +$ns_ at 51.000000 "$node_(8) setdest 297.021088 469.690556 19.999713" +$ns_ at 51.000000 "$node_(9) setdest 335.550946 423.983930 19.997290" +$ns_ at 51.000000 "$node_(10) setdest 331.755079 384.949165 19.996455" + +$ns_ at 52.000000 "$node_(1) setdest 440.756113 442.613683 19.799772" +$ns_ at 52.000000 "$node_(2) setdest 463.491729 442.499863 17.684767" +$ns_ at 52.000000 "$node_(3) setdest 498.797256 509.801163 9.633327" +$ns_ at 52.000000 "$node_(4) setdest 595.872849 507.661271 10.714807" +$ns_ at 52.000000 "$node_(5) setdest 421.543150 385.787102 11.141168" +$ns_ at 52.000000 "$node_(6) setdest 318.966418 425.467482 19.999339" +$ns_ at 52.000000 "$node_(7) setdest 297.432455 436.535728 19.995493" +$ns_ at 52.000000 "$node_(8) setdest 316.154504 475.504807 19.997327" +$ns_ at 52.000000 "$node_(9) setdest 355.519975 424.976200 19.993668" +$ns_ at 52.000000 "$node_(10) setdest 351.681172 386.619992 19.996020" + +$ns_ at 53.000000 "$node_(1) setdest 439.775796 422.648723 19.989013" +$ns_ at 53.000000 "$node_(2) setdest 454.163430 457.931863 18.032299" +$ns_ at 53.000000 "$node_(3) setdest 487.566288 509.968935 11.232221" +$ns_ at 53.000000 "$node_(4) setdest 593.366650 495.626354 12.293098" +$ns_ at 53.000000 "$node_(5) setdest 426.618346 394.729658 10.282360" +$ns_ at 53.000000 "$node_(6) setdest 338.963772 425.552181 19.997533" +$ns_ at 53.000000 "$node_(7) setdest 316.854629 431.777866 19.996452" +$ns_ at 53.000000 "$node_(8) setdest 335.626595 480.039122 19.993058" +$ns_ at 53.000000 "$node_(9) setdest 375.323130 427.689354 19.988150" +$ns_ at 53.000000 "$node_(10) setdest 371.379909 390.033338 19.992278" + +$ns_ at 54.000000 "$node_(1) setdest 445.215882 403.632184 19.779365" +$ns_ at 54.000000 "$node_(2) setdest 454.059564 475.725275 17.793715" +$ns_ at 54.000000 "$node_(3) setdest 474.763417 509.199397 12.825977" +$ns_ at 54.000000 "$node_(4) setdest 586.634251 483.521630 13.850977" +$ns_ at 54.000000 "$node_(5) setdest 434.740699 390.416312 9.196607" +$ns_ at 54.000000 "$node_(6) setdest 358.922646 426.804947 19.998152" +$ns_ at 54.000000 "$node_(7) setdest 336.584130 428.536703 19.993958" +$ns_ at 54.000000 "$node_(8) setdest 355.462297 482.552656 19.994323" +$ns_ at 54.000000 "$node_(9) setdest 394.479217 433.330580 19.969455" +$ns_ at 54.000000 "$node_(10) setdest 390.644077 395.375055 19.991051" + +$ns_ at 55.000000 "$node_(1) setdest 453.489395 385.467485 19.960143" +$ns_ at 55.000000 "$node_(2) setdest 462.180023 491.875553 18.076873" +$ns_ at 55.000000 "$node_(3) setdest 460.781091 505.762029 14.398644" +$ns_ at 55.000000 "$node_(4) setdest 575.183381 473.135665 15.459324" +$ns_ at 55.000000 "$node_(5) setdest 443.385083 383.339482 11.171700" +$ns_ at 55.000000 "$node_(6) setdest 378.818295 428.840950 19.999553" +$ns_ at 55.000000 "$node_(7) setdest 356.438978 426.133796 19.999724" +$ns_ at 55.000000 "$node_(8) setdest 375.442154 483.043104 19.985875" +$ns_ at 55.000000 "$node_(9) setdest 411.603795 443.510768 19.922032" +$ns_ at 55.000000 "$node_(10) setdest 409.167270 402.886591 19.988293" + +$ns_ at 56.000000 "$node_(1) setdest 459.358776 366.350314 19.997896" +$ns_ at 56.000000 "$node_(2) setdest 474.439207 506.513884 19.093672" +$ns_ at 56.000000 "$node_(3) setdest 448.331346 496.551801 15.486266" +$ns_ at 56.000000 "$node_(4) setdest 559.796949 465.725065 17.078034" +$ns_ at 56.000000 "$node_(5) setdest 453.233562 375.205933 12.772907" +$ns_ at 56.000000 "$node_(6) setdest 398.640054 431.491358 19.998170" +$ns_ at 56.000000 "$node_(7) setdest 376.341269 424.293425 19.987200" +$ns_ at 56.000000 "$node_(8) setdest 395.289271 480.727854 19.981702" +$ns_ at 56.000000 "$node_(9) setdest 424.686463 458.541108 19.926549" +$ns_ at 56.000000 "$node_(10) setdest 426.588462 412.682524 19.986450" + +$ns_ at 57.000000 "$node_(1) setdest 474.147494 357.589355 17.188968" +$ns_ at 57.000000 "$node_(2) setdest 487.397435 521.211648 19.594386" +$ns_ at 57.000000 "$node_(3) setdest 449.175044 489.763733 6.840299" +$ns_ at 57.000000 "$node_(4) setdest 541.945847 460.100331 18.716289" +$ns_ at 57.000000 "$node_(5) setdest 461.910819 364.190126 14.022938" +$ns_ at 57.000000 "$node_(6) setdest 418.176323 435.711193 19.986816" +$ns_ at 57.000000 "$node_(7) setdest 395.624998 428.517124 19.740868" +$ns_ at 57.000000 "$node_(8) setdest 414.491546 475.216368 19.977584" +$ns_ at 57.000000 "$node_(9) setdest 432.332273 476.914703 19.900939" +$ns_ at 57.000000 "$node_(10) setdest 442.302058 425.016869 19.976315" + +$ns_ at 58.000000 "$node_(1) setdest 481.754901 367.368742 12.389877" +$ns_ at 58.000000 "$node_(2) setdest 501.253150 535.247087 19.722434" +$ns_ at 58.000000 "$node_(3) setdest 452.555239 490.811571 3.538882" +$ns_ at 58.000000 "$node_(4) setdest 523.200618 453.356899 19.921282" +$ns_ at 58.000000 "$node_(5) setdest 462.037992 362.225513 1.968725" +$ns_ at 58.000000 "$node_(6) setdest 436.692511 443.161055 19.958699" +$ns_ at 58.000000 "$node_(7) setdest 407.232993 444.170234 19.487570" +$ns_ at 58.000000 "$node_(8) setdest 432.402128 466.376774 19.973166" +$ns_ at 58.000000 "$node_(9) setdest 433.371712 496.829333 19.941738" +$ns_ at 58.000000 "$node_(10) setdest 455.460442 440.041839 19.972301" + +$ns_ at 59.000000 "$node_(1) setdest 490.840101 378.013714 13.994866" +$ns_ at 59.000000 "$node_(2) setdest 517.254096 544.656606 18.562578" +$ns_ at 59.000000 "$node_(3) setdest 456.805761 493.906270 5.257765" +$ns_ at 59.000000 "$node_(4) setdest 505.296597 444.480311 19.983688" +$ns_ at 59.000000 "$node_(5) setdest 459.985759 364.047842 2.744548" +$ns_ at 59.000000 "$node_(6) setdest 452.558464 455.227354 19.932988" +$ns_ at 59.000000 "$node_(7) setdest 405.781902 462.093611 17.982022" +$ns_ at 59.000000 "$node_(8) setdest 448.131072 454.084488 19.962465" +$ns_ at 59.000000 "$node_(9) setdest 431.643951 516.751552 19.996999" +$ns_ at 59.000000 "$node_(10) setdest 465.450760 457.326282 19.963928" + +$ns_ at 60.000000 "$node_(1) setdest 498.659527 391.489267 15.579922" +$ns_ at 60.000000 "$node_(2) setdest 534.755136 543.334674 17.550895" +$ns_ at 60.000000 "$node_(3) setdest 462.401977 497.870630 6.858118" +$ns_ at 60.000000 "$node_(4) setdest 488.791604 433.220713 19.979823" +$ns_ at 60.000000 "$node_(5) setdest 456.842494 367.045729 4.343667" +$ns_ at 60.000000 "$node_(6) setdest 465.331500 470.591333 19.980047" +$ns_ at 60.000000 "$node_(7) setdest 408.815556 476.318798 14.545068" +$ns_ at 60.000000 "$node_(8) setdest 460.493008 438.434407 19.943483" +$ns_ at 60.000000 "$node_(9) setdest 438.396275 534.407383 18.902969" +$ns_ at 60.000000 "$node_(10) setdest 472.292912 476.109195 19.990319" + +$ns_ at 61.000000 "$node_(1) setdest 506.422494 406.848972 17.210003" +$ns_ at 61.000000 "$node_(2) setdest 550.122906 534.408750 17.771901" +$ns_ at 61.000000 "$node_(3) setdest 469.441169 502.553605 8.454613" +$ns_ at 61.000000 "$node_(4) setdest 473.810761 419.970774 19.999664" +$ns_ at 61.000000 "$node_(5) setdest 452.869841 371.468100 5.944690" +$ns_ at 61.000000 "$node_(6) setdest 475.369253 487.873233 19.985509" +$ns_ at 61.000000 "$node_(7) setdest 420.177106 474.634595 11.485703" +$ns_ at 61.000000 "$node_(8) setdest 468.519800 420.146652 19.971764" +$ns_ at 61.000000 "$node_(9) setdest 456.064353 537.336055 17.909162" +$ns_ at 61.000000 "$node_(10) setdest 480.398446 494.324173 19.937028" + +$ns_ at 62.000000 "$node_(1) setdest 514.243671 423.949759 18.804460" +$ns_ at 62.000000 "$node_(2) setdest 558.597008 518.822165 17.741252" +$ns_ at 62.000000 "$node_(3) setdest 478.289486 507.332608 10.056420" +$ns_ at 62.000000 "$node_(4) setdest 459.228341 406.299144 19.989007" +$ns_ at 62.000000 "$node_(5) setdest 448.065246 377.284900 7.544488" +$ns_ at 62.000000 "$node_(6) setdest 483.292150 506.217705 19.982291" +$ns_ at 62.000000 "$node_(7) setdest 431.806592 467.919195 13.429130" +$ns_ at 62.000000 "$node_(8) setdest 473.157337 400.709492 19.982741" +$ns_ at 62.000000 "$node_(9) setdest 471.569985 531.759731 16.477864" +$ns_ at 62.000000 "$node_(10) setdest 493.533931 509.329068 19.942113" + +$ns_ at 63.000000 "$node_(1) setdest 526.606177 439.546625 19.902105" +$ns_ at 63.000000 "$node_(2) setdest 563.865355 500.625921 18.943568" +$ns_ at 63.000000 "$node_(3) setdest 488.865584 512.231308 11.655519" +$ns_ at 63.000000 "$node_(4) setdest 443.896167 393.463964 19.995435" +$ns_ at 63.000000 "$node_(5) setdest 443.134296 384.956898 9.119968" +$ns_ at 63.000000 "$node_(6) setdest 483.704532 525.909424 19.696036" +$ns_ at 63.000000 "$node_(7) setdest 441.488756 456.383377 15.060524" +$ns_ at 63.000000 "$node_(8) setdest 471.609423 381.023538 19.746717" +$ns_ at 63.000000 "$node_(9) setdest 484.411309 520.929706 16.798483" +$ns_ at 63.000000 "$node_(10) setdest 509.195143 521.746549 19.986681" + +$ns_ at 64.000000 "$node_(1) setdest 540.433816 453.956062 19.970865" +$ns_ at 64.000000 "$node_(2) setdest 568.124533 481.658093 19.440142" +$ns_ at 64.000000 "$node_(3) setdest 501.350038 516.664198 13.248098" +$ns_ at 64.000000 "$node_(4) setdest 433.455434 376.647829 19.793718" +$ns_ at 64.000000 "$node_(5) setdest 440.254466 395.258128 10.696204" +$ns_ at 64.000000 "$node_(6) setdest 469.652755 537.742107 18.370216" +$ns_ at 64.000000 "$node_(7) setdest 449.511886 441.764181 16.676076" +$ns_ at 64.000000 "$node_(8) setdest 457.550576 367.766860 19.323320" +$ns_ at 64.000000 "$node_(9) setdest 498.202162 508.754862 18.396045" +$ns_ at 64.000000 "$node_(10) setdest 527.839755 528.576108 19.856093" + +$ns_ at 65.000000 "$node_(1) setdest 536.004799 446.599409 8.586997" +$ns_ at 65.000000 "$node_(2) setdest 564.983309 461.913673 19.992735" +$ns_ at 65.000000 "$node_(3) setdest 500.747238 516.306833 0.700769" +$ns_ at 65.000000 "$node_(4) setdest 438.545894 357.691580 19.627842" +$ns_ at 65.000000 "$node_(5) setdest 441.586829 385.431591 9.916452" +$ns_ at 65.000000 "$node_(6) setdest 457.118274 526.488725 16.844935" +$ns_ at 65.000000 "$node_(7) setdest 457.806607 425.470338 18.283646" +$ns_ at 65.000000 "$node_(8) setdest 439.123220 364.402269 18.732003" +$ns_ at 65.000000 "$node_(9) setdest 510.416367 493.167415 19.802912" +$ns_ at 65.000000 "$node_(10) setdest 547.813104 529.300629 19.986485" + +$ns_ at 66.000000 "$node_(1) setdest 535.511187 436.310624 10.300619" +$ns_ at 66.000000 "$node_(2) setdest 562.492130 442.069901 19.999531" +$ns_ at 66.000000 "$node_(3) setdest 500.517193 514.277017 2.042810" +$ns_ at 66.000000 "$node_(4) setdest 445.642038 338.993091 19.999719" +$ns_ at 66.000000 "$node_(5) setdest 444.691682 374.116179 11.733655" +$ns_ at 66.000000 "$node_(6) setdest 455.508240 510.110942 16.456730" +$ns_ at 66.000000 "$node_(7) setdest 468.428808 408.858654 19.717485" +$ns_ at 66.000000 "$node_(8) setdest 419.931073 364.342807 19.192239" +$ns_ at 66.000000 "$node_(9) setdest 524.816169 479.714481 19.706235" +$ns_ at 66.000000 "$node_(10) setdest 567.683020 530.920241 19.935814" + +$ns_ at 67.000000 "$node_(1) setdest 534.977282 424.421906 11.900701" +$ns_ at 67.000000 "$node_(2) setdest 560.569724 422.162987 19.999522" +$ns_ at 67.000000 "$node_(3) setdest 500.250852 510.644079 3.642688" +$ns_ at 67.000000 "$node_(4) setdest 452.955902 320.378413 19.999972" +$ns_ at 67.000000 "$node_(5) setdest 448.073535 361.217857 13.334303" +$ns_ at 67.000000 "$node_(6) setdest 451.399100 495.313815 15.357083" +$ns_ at 67.000000 "$node_(7) setdest 481.733772 393.932468 19.995327" +$ns_ at 67.000000 "$node_(8) setdest 408.322022 365.329378 11.650896" +$ns_ at 67.000000 "$node_(9) setdest 539.158447 466.290889 19.644179" +$ns_ at 67.000000 "$node_(10) setdest 586.678542 530.162271 19.010638" + +$ns_ at 68.000000 "$node_(1) setdest 534.442950 410.931909 13.500575" +$ns_ at 68.000000 "$node_(2) setdest 558.901130 402.232965 19.999749" +$ns_ at 68.000000 "$node_(3) setdest 500.103546 505.403612 5.242537" +$ns_ at 68.000000 "$node_(4) setdest 460.080618 301.691006 19.999519" +$ns_ at 68.000000 "$node_(5) setdest 452.196525 346.864412 14.933869" +$ns_ at 68.000000 "$node_(6) setdest 438.416880 485.903560 16.034056" +$ns_ at 68.000000 "$node_(7) setdest 492.545093 377.214664 19.909034" +$ns_ at 68.000000 "$node_(8) setdest 416.839668 364.609333 8.548026" +$ns_ at 68.000000 "$node_(9) setdest 555.040544 454.694707 19.665006" +$ns_ at 68.000000 "$node_(10) setdest 586.825175 529.031950 1.139792" + +$ns_ at 69.000000 "$node_(1) setdest 534.282344 395.832941 15.099822" +$ns_ at 69.000000 "$node_(2) setdest 557.329589 382.294855 19.999950" +$ns_ at 69.000000 "$node_(3) setdest 500.199783 498.561725 6.842563" +$ns_ at 69.000000 "$node_(4) setdest 466.364502 282.705492 19.998423" +$ns_ at 69.000000 "$node_(5) setdest 457.330300 331.148052 16.533591" +$ns_ at 69.000000 "$node_(6) setdest 422.274174 478.513804 17.753744" +$ns_ at 69.000000 "$node_(7) setdest 503.862909 361.836359 19.094115" +$ns_ at 69.000000 "$node_(8) setdest 427.020554 367.160258 10.495602" +$ns_ at 69.000000 "$node_(9) setdest 571.735283 444.352269 19.638745" +$ns_ at 69.000000 "$node_(10) setdest 585.455531 526.966140 2.478608" + +$ns_ at 70.000000 "$node_(1) setdest 534.838474 379.142686 16.699517" +$ns_ at 70.000000 "$node_(2) setdest 555.525020 362.376742 19.999692" +$ns_ at 70.000000 "$node_(3) setdest 500.519465 490.124854 8.442926" +$ns_ at 70.000000 "$node_(4) setdest 471.472022 263.373599 19.995221" +$ns_ at 70.000000 "$node_(5) setdest 463.120393 313.962864 18.134383" +$ns_ at 70.000000 "$node_(6) setdest 408.213514 466.311334 18.617262" +$ns_ at 70.000000 "$node_(7) setdest 507.696506 363.297112 4.102471" +$ns_ at 70.000000 "$node_(8) setdest 439.013807 368.678079 12.088916" +$ns_ at 70.000000 "$node_(9) setdest 589.464572 437.178961 19.125482" +$ns_ at 70.000000 "$node_(10) setdest 583.204839 523.564669 4.078679" + +$ns_ at 71.000000 "$node_(1) setdest 536.112282 360.887514 18.299561" +$ns_ at 71.000000 "$node_(2) setdest 553.523133 342.477773 19.999413" +$ns_ at 71.000000 "$node_(3) setdest 501.048110 480.096108 10.042669" +$ns_ at 71.000000 "$node_(4) setdest 475.511485 243.786853 19.998948" +$ns_ at 71.000000 "$node_(5) setdest 469.394485 295.340571 19.650802" +$ns_ at 71.000000 "$node_(6) setdest 409.735707 449.316228 17.063139" +$ns_ at 71.000000 "$node_(7) setdest 509.753088 368.161673 5.281428" +$ns_ at 71.000000 "$node_(8) setdest 452.539013 366.922685 13.638644" +$ns_ at 71.000000 "$node_(9) setdest 593.411094 420.224329 17.407888" +$ns_ at 71.000000 "$node_(10) setdest 580.200954 518.748793 5.675913" + +$ns_ at 72.000000 "$node_(1) setdest 538.243058 341.252788 19.750004" +$ns_ at 72.000000 "$node_(2) setdest 552.067476 322.531052 19.999766" +$ns_ at 72.000000 "$node_(3) setdest 502.043198 468.496468 11.642244" +$ns_ at 72.000000 "$node_(4) setdest 478.483272 224.011916 19.996990" +$ns_ at 72.000000 "$node_(5) setdest 475.540955 276.308901 19.999589" +$ns_ at 72.000000 "$node_(6) setdest 417.775607 434.844392 16.555181" +$ns_ at 72.000000 "$node_(7) setdest 510.902737 374.952179 6.887138" +$ns_ at 72.000000 "$node_(8) setdest 467.088300 362.216077 15.291629" +$ns_ at 72.000000 "$node_(9) setdest 588.386379 404.792399 16.229363" +$ns_ at 72.000000 "$node_(10) setdest 576.937804 512.244383 7.277053" + +$ns_ at 73.000000 "$node_(1) setdest 540.337760 321.362851 19.999934" +$ns_ at 73.000000 "$node_(2) setdest 550.508182 302.593413 19.998521" +$ns_ at 73.000000 "$node_(3) setdest 503.738640 455.363500 13.241955" +$ns_ at 73.000000 "$node_(4) setdest 480.607929 204.125582 19.999511" +$ns_ at 73.000000 "$node_(5) setdest 482.491192 257.559586 19.996064" +$ns_ at 73.000000 "$node_(6) setdest 424.060892 418.184577 17.806018" +$ns_ at 73.000000 "$node_(7) setdest 511.222289 383.432144 8.485983" +$ns_ at 73.000000 "$node_(8) setdest 483.575039 358.506602 16.898898" +$ns_ at 73.000000 "$node_(9) setdest 581.016072 390.553508 16.033323" +$ns_ at 73.000000 "$node_(10) setdest 573.633501 504.005362 8.876930" + +$ns_ at 74.000000 "$node_(1) setdest 542.640833 301.496124 19.999775" +$ns_ at 74.000000 "$node_(2) setdest 547.211487 282.875900 19.991211" +$ns_ at 74.000000 "$node_(3) setdest 506.228546 440.731667 14.842176" +$ns_ at 74.000000 "$node_(4) setdest 483.949062 184.414401 19.992345" +$ns_ at 74.000000 "$node_(5) setdest 490.772371 239.358717 19.996239" +$ns_ at 74.000000 "$node_(6) setdest 428.971719 399.458697 19.359101" +$ns_ at 74.000000 "$node_(7) setdest 510.977845 393.521870 10.092687" +$ns_ at 74.000000 "$node_(8) setdest 501.649850 360.807649 18.220692" +$ns_ at 74.000000 "$node_(9) setdest 574.169947 374.218269 17.711845" +$ns_ at 74.000000 "$node_(10) setdest 569.666315 494.308359 10.477138" + +$ns_ at 75.000000 "$node_(1) setdest 545.286982 281.676690 19.995301" +$ns_ at 75.000000 "$node_(2) setdest 541.579801 263.698851 19.986873" +$ns_ at 75.000000 "$node_(3) setdest 509.521933 424.622555 16.442319" +$ns_ at 75.000000 "$node_(4) setdest 489.316472 165.158057 19.990394" +$ns_ at 75.000000 "$node_(5) setdest 499.170013 221.211394 19.996143" +$ns_ at 75.000000 "$node_(6) setdest 433.995690 380.248856 19.855938" +$ns_ at 75.000000 "$node_(7) setdest 509.830491 405.143663 11.678292" +$ns_ at 75.000000 "$node_(8) setdest 514.956828 375.067165 19.504088" +$ns_ at 75.000000 "$node_(9) setdest 563.225009 358.537277 19.122897" +$ns_ at 75.000000 "$node_(10) setdest 564.059199 483.616863 12.072607" + +$ns_ at 76.000000 "$node_(1) setdest 550.377288 262.356101 19.979900" +$ns_ at 76.000000 "$node_(2) setdest 533.540653 245.403103 19.984051" +$ns_ at 76.000000 "$node_(3) setdest 513.154562 406.949557 18.042473" +$ns_ at 76.000000 "$node_(4) setdest 496.890661 146.658823 19.989748" +$ns_ at 76.000000 "$node_(5) setdest 507.041942 202.829163 19.996842" +$ns_ at 76.000000 "$node_(6) setdest 441.406995 362.103882 19.600192" +$ns_ at 76.000000 "$node_(7) setdest 506.263148 417.944045 13.288180" +$ns_ at 76.000000 "$node_(8) setdest 523.622151 393.089514 19.997322" +$ns_ at 76.000000 "$node_(9) setdest 562.694870 356.369564 2.231598" +$ns_ at 76.000000 "$node_(10) setdest 556.855146 471.989381 13.678331" + +$ns_ at 77.000000 "$node_(1) setdest 559.088417 244.398391 19.959036" +$ns_ at 77.000000 "$node_(2) setdest 521.840951 229.244414 19.949593" +$ns_ at 77.000000 "$node_(3) setdest 516.301733 387.623702 19.580433" +$ns_ at 77.000000 "$node_(4) setdest 505.320440 128.522356 19.999815" +$ns_ at 77.000000 "$node_(5) setdest 515.254078 184.596382 19.996837" +$ns_ at 77.000000 "$node_(6) setdest 437.257639 351.072545 11.785905" +$ns_ at 77.000000 "$node_(7) setdest 500.846086 431.809163 14.885767" +$ns_ at 77.000000 "$node_(8) setdest 529.413794 412.094108 19.867504" +$ns_ at 77.000000 "$node_(9) setdest 565.572234 358.356299 3.496619" +$ns_ at 77.000000 "$node_(10) setdest 548.808724 459.002515 15.277551" + +$ns_ at 78.000000 "$node_(1) setdest 568.615890 226.866730 19.953243" +$ns_ at 78.000000 "$node_(2) setdest 506.765408 216.107743 19.996103" +$ns_ at 78.000000 "$node_(3) setdest 518.586099 367.756661 19.997941" +$ns_ at 78.000000 "$node_(4) setdest 511.394728 109.548785 19.922183" +$ns_ at 78.000000 "$node_(5) setdest 521.978222 165.767794 19.993245" +$ns_ at 78.000000 "$node_(6) setdest 429.959797 352.164602 7.379098" +$ns_ at 78.000000 "$node_(7) setdest 494.140339 446.876872 16.492510" +$ns_ at 78.000000 "$node_(8) setdest 525.963760 430.106023 18.339351" +$ns_ at 78.000000 "$node_(9) setdest 569.000240 362.124933 5.094490" +$ns_ at 78.000000 "$node_(10) setdest 540.470647 444.327845 16.878077" + +$ns_ at 79.000000 "$node_(1) setdest 574.185687 207.679959 19.978859" +$ns_ at 79.000000 "$node_(2) setdest 491.733402 202.925804 19.993117" +$ns_ at 79.000000 "$node_(3) setdest 519.783067 347.795031 19.997485" +$ns_ at 79.000000 "$node_(4) setdest 508.990521 89.908093 19.787293" +$ns_ at 79.000000 "$node_(5) setdest 526.722730 146.349523 19.989487" +$ns_ at 79.000000 "$node_(6) setdest 421.402925 354.646309 8.909485" +$ns_ at 79.000000 "$node_(7) setdest 484.722595 462.296802 18.068430" +$ns_ at 79.000000 "$node_(8) setdest 513.980626 443.368125 17.873971" +$ns_ at 79.000000 "$node_(9) setdest 572.479374 367.825573 6.678447" +$ns_ at 79.000000 "$node_(10) setdest 529.744275 429.315243 18.450834" + +$ns_ at 80.000000 "$node_(1) setdest 576.539904 187.859407 19.959875" +$ns_ at 80.000000 "$node_(2) setdest 480.207033 186.703672 19.900119" +$ns_ at 80.000000 "$node_(3) setdest 519.812887 327.797852 19.997201" +$ns_ at 80.000000 "$node_(4) setdest 497.292409 73.848971 19.868096" +$ns_ at 80.000000 "$node_(5) setdest 528.886672 126.481962 19.985060" +$ns_ at 80.000000 "$node_(6) setdest 415.020974 363.015006 10.524466" +$ns_ at 80.000000 "$node_(7) setdest 471.817545 477.042190 19.595070" +$ns_ at 80.000000 "$node_(8) setdest 503.184441 458.887502 18.905255" +$ns_ at 80.000000 "$node_(9) setdest 575.661628 375.491501 8.300193" +$ns_ at 80.000000 "$node_(10) setdest 514.499292 416.740217 19.762106" + +$ns_ at 81.000000 "$node_(1) setdest 574.626392 167.968199 19.983034" +$ns_ at 81.000000 "$node_(2) setdest 475.821624 167.311287 19.882063" +$ns_ at 81.000000 "$node_(3) setdest 518.646194 307.835016 19.996899" +$ns_ at 81.000000 "$node_(4) setdest 483.578324 59.304461 19.990471" +$ns_ at 81.000000 "$node_(5) setdest 527.651007 106.553198 19.967035" +$ns_ at 81.000000 "$node_(6) setdest 410.440591 374.312692 12.190882" +$ns_ at 81.000000 "$node_(7) setdest 456.292988 489.616829 19.978324" +$ns_ at 81.000000 "$node_(8) setdest 498.005915 477.169448 19.001228" +$ns_ at 81.000000 "$node_(9) setdest 579.725346 384.508368 9.890283" +$ns_ at 81.000000 "$node_(10) setdest 495.634540 410.515884 19.865074" + +$ns_ at 82.000000 "$node_(1) setdest 573.744222 148.036265 19.951446" +$ns_ at 82.000000 "$node_(2) setdest 474.558082 147.369780 19.981497" +$ns_ at 82.000000 "$node_(3) setdest 516.187480 287.990063 19.996686" +$ns_ at 82.000000 "$node_(4) setdest 474.515345 41.610136 19.880309" +$ns_ at 82.000000 "$node_(5) setdest 522.091375 87.379602 19.963374" +$ns_ at 82.000000 "$node_(6) setdest 414.780703 386.764139 13.186170" +$ns_ at 82.000000 "$node_(7) setdest 438.763875 499.134151 19.946158" +$ns_ at 82.000000 "$node_(8) setdest 500.792450 494.938482 17.986199" +$ns_ at 82.000000 "$node_(9) setdest 588.726105 391.049918 11.126794" +$ns_ at 82.000000 "$node_(10) setdest 475.806091 412.296842 19.908270" + +$ns_ at 83.000000 "$node_(1) setdest 577.404142 128.397041 19.977341" +$ns_ at 83.000000 "$node_(2) setdest 471.772322 127.583434 19.981491" +$ns_ at 83.000000 "$node_(3) setdest 512.431069 268.349770 19.996293" +$ns_ at 83.000000 "$node_(4) setdest 467.791067 22.805362 19.970865" +$ns_ at 83.000000 "$node_(5) setdest 513.352198 69.412545 19.979699" +$ns_ at 83.000000 "$node_(6) setdest 426.772346 396.441652 15.409535" +$ns_ at 83.000000 "$node_(7) setdest 419.593750 498.212291 19.192278" +$ns_ at 83.000000 "$node_(8) setdest 513.064464 506.922013 17.152474" +$ns_ at 83.000000 "$node_(9) setdest 594.729903 387.107313 7.182599" +$ns_ at 83.000000 "$node_(10) setdest 456.101092 415.641689 19.986870" + +$ns_ at 84.000000 "$node_(1) setdest 583.213841 109.262849 19.996747" +$ns_ at 84.000000 "$node_(2) setdest 476.419501 108.372979 19.764560" +$ns_ at 84.000000 "$node_(3) setdest 507.266385 249.033080 19.995211" +$ns_ at 84.000000 "$node_(4) setdest 462.492387 11.625346 12.372096" +$ns_ at 84.000000 "$node_(5) setdest 502.089366 52.902858 19.985523" +$ns_ at 84.000000 "$node_(6) setdest 437.511332 409.573265 16.963640" +$ns_ at 84.000000 "$node_(7) setdest 412.014189 481.555847 18.299914" +$ns_ at 84.000000 "$node_(8) setdest 526.654692 519.163066 18.290371" +$ns_ at 84.000000 "$node_(9) setdest 592.714197 381.218901 6.223863" +$ns_ at 84.000000 "$node_(10) setdest 436.259652 418.153011 19.999737" + +$ns_ at 85.000000 "$node_(1) setdest 587.867212 89.874667 19.938793" +$ns_ at 85.000000 "$node_(2) setdest 489.338727 93.291200 19.858662" +$ns_ at 85.000000 "$node_(3) setdest 500.466087 230.231287 19.993786" +$ns_ at 85.000000 "$node_(4) setdest 463.199929 17.989912 6.403773" +$ns_ at 85.000000 "$node_(5) setdest 487.155522 39.734993 19.910107" +$ns_ at 85.000000 "$node_(6) setdest 443.956620 426.937800 18.522117" +$ns_ at 85.000000 "$node_(7) setdest 404.982373 466.107830 16.973145" +$ns_ at 85.000000 "$node_(8) setdest 537.501281 535.276226 19.423759" +$ns_ at 85.000000 "$node_(9) setdest 588.601285 374.648766 7.751304" +$ns_ at 85.000000 "$node_(10) setdest 417.844644 423.934655 19.301293" + +$ns_ at 86.000000 "$node_(1) setdest 583.707241 70.590453 19.727804" +$ns_ at 86.000000 "$node_(2) setdest 507.837671 86.031211 19.872553" +$ns_ at 86.000000 "$node_(3) setdest 492.487335 211.892008 19.999741" +$ns_ at 86.000000 "$node_(4) setdest 460.684485 25.836346 8.239780" +$ns_ at 86.000000 "$node_(5) setdest 468.395888 33.126211 19.889693" +$ns_ at 86.000000 "$node_(6) setdest 439.617458 445.683926 19.241766" +$ns_ at 86.000000 "$node_(7) setdest 405.686381 458.064025 8.074554" +$ns_ at 86.000000 "$node_(8) setdest 552.956635 544.737738 18.121484" +$ns_ at 86.000000 "$node_(9) setdest 579.604555 372.897661 9.165562" +$ns_ at 86.000000 "$node_(10) setdest 417.084191 434.018891 10.112869" + +$ns_ at 87.000000 "$node_(1) setdest 568.163720 58.723273 19.555844" +$ns_ at 87.000000 "$node_(2) setdest 527.597802 85.380885 19.770830" +$ns_ at 87.000000 "$node_(3) setdest 484.486710 193.561998 19.999982" +$ns_ at 87.000000 "$node_(4) setdest 458.575323 35.422524 9.815466" +$ns_ at 87.000000 "$node_(5) setdest 448.529777 33.971581 19.884089" +$ns_ at 87.000000 "$node_(6) setdest 425.906823 457.123064 17.855963" +$ns_ at 87.000000 "$node_(7) setdest 408.452236 458.872840 2.881690" +$ns_ at 87.000000 "$node_(8) setdest 568.879506 540.146677 16.571531" +$ns_ at 87.000000 "$node_(9) setdest 569.851338 377.857203 10.941769" +$ns_ at 87.000000 "$node_(10) setdest 421.850394 441.875446 9.189240" + +$ns_ at 88.000000 "$node_(1) setdest 548.683399 54.449519 19.943618" +$ns_ at 88.000000 "$node_(2) setdest 546.933768 82.546059 19.542668" +$ns_ at 88.000000 "$node_(3) setdest 476.681038 175.148773 19.999384" +$ns_ at 88.000000 "$node_(4) setdest 461.366028 46.344763 11.273125" +$ns_ at 88.000000 "$node_(5) setdest 430.847089 43.014463 19.860795" +$ns_ at 88.000000 "$node_(6) setdest 410.587500 453.395095 15.766401" +$ns_ at 88.000000 "$node_(7) setdest 412.362473 461.216708 4.558911" +$ns_ at 88.000000 "$node_(8) setdest 582.276440 530.346179 16.599024" +$ns_ at 88.000000 "$node_(9) setdest 560.884729 386.754776 12.631979" +$ns_ at 88.000000 "$node_(10) setdest 424.841637 452.326111 10.870324" + +$ns_ at 89.000000 "$node_(1) setdest 529.270019 51.276340 19.671004" +$ns_ at 89.000000 "$node_(2) setdest 565.972708 77.913315 19.594477" +$ns_ at 89.000000 "$node_(3) setdest 469.023624 156.673222 19.999549" +$ns_ at 89.000000 "$node_(4) setdest 471.973165 53.282586 12.674571" +$ns_ at 89.000000 "$node_(5) setdest 419.286989 58.968693 19.702116" +$ns_ at 89.000000 "$node_(6) setdest 404.803098 439.911564 14.671910" +$ns_ at 89.000000 "$node_(7) setdest 416.934945 465.341967 6.158349" +$ns_ at 89.000000 "$node_(8) setdest 595.673075 523.961962 14.840082" +$ns_ at 89.000000 "$node_(9) setdest 554.608120 399.466950 14.177277" +$ns_ at 89.000000 "$node_(10) setdest 428.943927 464.092911 12.461395" + +$ns_ at 90.000000 "$node_(1) setdest 510.446672 45.560687 19.671987" +$ns_ at 90.000000 "$node_(2) setdest 581.900465 67.427062 19.069739" +$ns_ at 90.000000 "$node_(3) setdest 460.852421 138.421298 19.997532" +$ns_ at 90.000000 "$node_(4) setdest 486.548028 54.559831 14.630720" +$ns_ at 90.000000 "$node_(5) setdest 417.520328 78.793399 19.903267" +$ns_ at 90.000000 "$node_(6) setdest 412.989536 428.553731 14.000647" +$ns_ at 90.000000 "$node_(7) setdest 421.706379 471.443160 7.745395" +$ns_ at 90.000000 "$node_(8) setdest 596.698144 525.418757 1.781297" +$ns_ at 90.000000 "$node_(9) setdest 552.296746 415.092034 15.795117" +$ns_ at 90.000000 "$node_(10) setdest 437.002050 475.535203 13.994977" + +$ns_ at 91.000000 "$node_(1) setdest 491.216630 41.851459 19.584506" +$ns_ at 91.000000 "$node_(2) setdest 597.391272 59.977823 17.188841" +$ns_ at 91.000000 "$node_(3) setdest 451.041441 121.016462 19.979581" +$ns_ at 91.000000 "$node_(4) setdest 502.763761 55.445814 16.239919" +$ns_ at 91.000000 "$node_(5) setdest 420.064577 98.608541 19.977814" +$ns_ at 91.000000 "$node_(6) setdest 427.036725 421.558974 15.692359" +$ns_ at 91.000000 "$node_(7) setdest 425.224219 480.087841 9.333043" +$ns_ at 91.000000 "$node_(8) setdest 595.078325 527.548271 2.675565" +$ns_ at 91.000000 "$node_(9) setdest 555.215513 432.214111 17.369074" +$ns_ at 91.000000 "$node_(10) setdest 450.540971 483.206577 15.561245" + +$ns_ at 92.000000 "$node_(1) setdest 471.659908 39.220660 19.732878" +$ns_ at 92.000000 "$node_(2) setdest 599.260739 62.844430 3.422330" +$ns_ at 92.000000 "$node_(3) setdest 437.200521 106.710742 19.905394" +$ns_ at 92.000000 "$node_(4) setdest 520.463512 57.440346 17.811775" +$ns_ at 92.000000 "$node_(5) setdest 425.407301 117.784600 19.906430" +$ns_ at 92.000000 "$node_(6) setdest 441.106703 411.581456 17.248628" +$ns_ at 92.000000 "$node_(7) setdest 426.330437 490.960272 10.928563" +$ns_ at 92.000000 "$node_(8) setdest 592.410482 530.533599 4.003694" +$ns_ at 92.000000 "$node_(9) setdest 564.412524 448.873432 19.029397" +$ns_ at 92.000000 "$node_(10) setdest 467.267881 487.413523 17.247838" + +$ns_ at 93.000000 "$node_(1) setdest 453.466313 33.175001 19.171774" +$ns_ at 93.000000 "$node_(2) setdest 598.291343 67.449039 4.705545" +$ns_ at 93.000000 "$node_(3) setdest 420.425409 96.070528 19.865007" +$ns_ at 93.000000 "$node_(4) setdest 539.462165 61.491498 19.425773" +$ns_ at 93.000000 "$node_(5) setdest 434.020030 135.209813 19.437519" +$ns_ at 93.000000 "$node_(6) setdest 455.272226 399.073428 18.897428" +$ns_ at 93.000000 "$node_(7) setdest 427.424847 503.463387 12.550921" +$ns_ at 93.000000 "$node_(8) setdest 593.809342 529.223559 1.916510" +$ns_ at 93.000000 "$node_(9) setdest 576.244847 464.961799 19.970965" +$ns_ at 93.000000 "$node_(10) setdest 485.902916 490.396138 18.872215" + +$ns_ at 94.000000 "$node_(1) setdest 440.069927 20.181823 18.662418" +$ns_ at 94.000000 "$node_(2) setdest 596.289520 73.428855 6.305989" +$ns_ at 94.000000 "$node_(3) setdest 418.138459 85.885901 10.438235" +$ns_ at 94.000000 "$node_(4) setdest 559.121472 65.074947 19.983229" +$ns_ at 94.000000 "$node_(5) setdest 433.752323 153.363814 18.155975" +$ns_ at 94.000000 "$node_(6) setdest 468.907539 384.565199 19.910059" +$ns_ at 94.000000 "$node_(7) setdest 423.892158 517.002474 13.992382" +$ns_ at 94.000000 "$node_(8) setdest 594.496732 525.690274 3.599529" +$ns_ at 94.000000 "$node_(9) setdest 587.176402 481.519615 19.840871" +$ns_ at 94.000000 "$node_(10) setdest 505.557558 493.894481 19.963551" + +$ns_ at 95.000000 "$node_(1) setdest 427.052116 6.847278 18.635275" +$ns_ at 95.000000 "$node_(2) setdest 594.485579 81.120759 7.900607" +$ns_ at 95.000000 "$node_(3) setdest 420.911514 86.122459 2.783127" +$ns_ at 95.000000 "$node_(4) setdest 579.042638 65.882017 19.937508" +$ns_ at 95.000000 "$node_(5) setdest 419.915636 161.815294 16.213618" +$ns_ at 95.000000 "$node_(6) setdest 479.384927 367.530238 19.999139" +$ns_ at 95.000000 "$node_(7) setdest 413.636242 520.451794 10.820426" +$ns_ at 95.000000 "$node_(8) setdest 595.480416 520.598937 5.185494" +$ns_ at 95.000000 "$node_(9) setdest 588.142218 501.154215 19.658340" +$ns_ at 95.000000 "$node_(10) setdest 525.501306 495.121901 19.981483" + +$ns_ at 96.000000 "$node_(1) setdest 410.783725 0.750524 17.373284" +$ns_ at 96.000000 "$node_(2) setdest 593.127576 90.529264 9.506006" +$ns_ at 96.000000 "$node_(3) setdest 424.469755 88.906562 4.517998" +$ns_ at 96.000000 "$node_(4) setdest 589.252016 52.033514 17.205011" +$ns_ at 96.000000 "$node_(5) setdest 406.673798 155.191050 14.806312" +$ns_ at 96.000000 "$node_(6) setdest 483.266169 352.608643 15.418108" +$ns_ at 96.000000 "$node_(7) setdest 410.018337 513.556034 7.787216" +$ns_ at 96.000000 "$node_(8) setdest 595.625065 513.990687 6.609832" +$ns_ at 96.000000 "$node_(9) setdest 576.073216 515.525770 18.767056" +$ns_ at 96.000000 "$node_(10) setdest 544.912679 490.672198 19.914850" + +$ns_ at 97.000000 "$node_(1) setdest 419.156506 3.762852 8.898179" +$ns_ at 97.000000 "$node_(2) setdest 591.679304 101.541365 11.106928" +$ns_ at 97.000000 "$node_(3) setdest 429.445594 92.463295 6.116317" +$ns_ at 97.000000 "$node_(4) setdest 580.554829 37.419012 17.006609" +$ns_ at 97.000000 "$node_(5) setdest 406.950962 153.565137 1.649367" +$ns_ at 97.000000 "$node_(6) setdest 479.856644 351.887222 3.485012" +$ns_ at 97.000000 "$node_(7) setdest 408.684567 504.276932 9.374469" +$ns_ at 97.000000 "$node_(8) setdest 590.406909 507.617288 8.237073" +$ns_ at 97.000000 "$node_(9) setdest 559.752265 523.162622 18.019294" +$ns_ at 97.000000 "$node_(10) setdest 561.124452 479.086372 19.926187" + +$ns_ at 98.000000 "$node_(1) setdest 429.488659 2.608879 10.396395" +$ns_ at 98.000000 "$node_(2) setdest 589.054917 113.968791 12.701509" +$ns_ at 98.000000 "$node_(3) setdest 436.124983 96.326552 7.716151" +$ns_ at 98.000000 "$node_(4) setdest 574.740693 23.094755 15.459253" +$ns_ at 98.000000 "$node_(5) setdest 409.384783 153.230897 2.456664" +$ns_ at 98.000000 "$node_(6) setdest 474.746734 352.016833 5.111553" +$ns_ at 98.000000 "$node_(7) setdest 413.021931 494.129886 11.035183" +$ns_ at 98.000000 "$node_(8) setdest 580.882175 505.117671 9.847266" +$ns_ at 98.000000 "$node_(9) setdest 542.784470 531.635980 18.965860" +$ns_ at 98.000000 "$node_(10) setdest 575.265149 465.030323 19.938201" + +$ns_ at 99.000000 "$node_(1) setdest 441.489099 2.335719 12.003549" +$ns_ at 99.000000 "$node_(2) setdest 586.056851 127.939810 14.289078" +$ns_ at 99.000000 "$node_(3) setdest 444.433154 100.546506 9.318461" +$ns_ at 99.000000 "$node_(4) setdest 563.604426 11.802092 15.860034" +$ns_ at 99.000000 "$node_(5) setdest 413.431025 153.540340 4.058058" +$ns_ at 99.000000 "$node_(6) setdest 468.323715 353.930297 6.701979" +$ns_ at 99.000000 "$node_(7) setdest 422.987216 486.505659 12.547341" +$ns_ at 99.000000 "$node_(8) setdest 569.510780 507.343850 11.587256" +$ns_ at 99.000000 "$node_(9) setdest 530.489038 544.913627 18.096231" +$ns_ at 99.000000 "$node_(10) setdest 581.656414 446.357042 19.736760" + +$ns_ at 100.000000 "$node_(1) setdest 454.447553 5.677176 13.382334" +$ns_ at 100.000000 "$node_(2) setdest 586.790587 143.764948 15.842139" +$ns_ at 100.000000 "$node_(3) setdest 454.264488 105.292106 10.916769" +$ns_ at 100.000000 "$node_(4) setdest 547.205392 5.747685 17.480967" +$ns_ at 100.000000 "$node_(5) setdest 419.055426 154.172769 5.659845" +$ns_ at 100.000000 "$node_(6) setdest 468.513902 353.682612 0.312280" +$ns_ at 100.000000 "$node_(7) setdest 425.531876 476.300999 10.517147" +$ns_ at 100.000000 "$node_(8) setdest 568.125928 506.009333 1.923214" +$ns_ at 100.000000 "$node_(9) setdest 534.125064 533.948308 11.552442" +$ns_ at 100.000000 "$node_(10) setdest 574.270722 427.825799 19.948820" + +$ns_ at 101.000000 "$node_(1) setdest 463.974595 17.337902 15.057791" +$ns_ at 101.000000 "$node_(2) setdest 588.818288 160.985039 17.339062" +$ns_ at 101.000000 "$node_(3) setdest 465.953584 109.763324 12.515061" +$ns_ at 101.000000 "$node_(4) setdest 528.686907 4.225543 18.580936" +$ns_ at 101.000000 "$node_(5) setdest 426.301451 153.959775 7.249155" +$ns_ at 101.000000 "$node_(6) setdest 468.409172 351.964764 1.721038" +$ns_ at 101.000000 "$node_(7) setdest 425.627464 463.860769 12.440598" +$ns_ at 101.000000 "$node_(8) setdest 568.230938 502.795685 3.215363" +$ns_ at 101.000000 "$node_(9) setdest 534.843844 520.422103 13.545289" +$ns_ at 101.000000 "$node_(10) setdest 565.376509 409.915463 19.997179" + +$ns_ at 102.000000 "$node_(1) setdest 469.266398 33.192266 16.714186" +$ns_ at 102.000000 "$node_(2) setdest 583.250087 179.129202 18.979345" +$ns_ at 102.000000 "$node_(3) setdest 479.708421 112.845067 14.095839" +$ns_ at 102.000000 "$node_(4) setdest 513.846746 13.845484 17.685407" +$ns_ at 102.000000 "$node_(5) setdest 434.772540 151.433158 8.839861" +$ns_ at 102.000000 "$node_(6) setdest 467.957451 348.674042 3.321581" +$ns_ at 102.000000 "$node_(7) setdest 425.462374 449.821538 14.040202" +$ns_ at 102.000000 "$node_(8) setdest 568.504849 497.988118 4.815363" +$ns_ at 102.000000 "$node_(9) setdest 536.479818 505.366286 15.144440" +$ns_ at 102.000000 "$node_(10) setdest 555.303153 392.642708 19.995513" + +$ns_ at 103.000000 "$node_(1) setdest 465.730169 51.026601 18.181541" +$ns_ at 103.000000 "$node_(2) setdest 566.879047 186.735999 18.051989" +$ns_ at 103.000000 "$node_(3) setdest 495.386590 112.701027 15.678831" +$ns_ at 103.000000 "$node_(4) setdest 498.374210 23.147625 18.053509" +$ns_ at 103.000000 "$node_(5) setdest 443.971210 146.477076 10.448841" +$ns_ at 103.000000 "$node_(6) setdest 467.040158 343.838883 4.921401" +$ns_ at 103.000000 "$node_(7) setdest 424.846982 434.193648 15.640001" +$ns_ at 103.000000 "$node_(8) setdest 568.959563 491.588797 6.415456" +$ns_ at 103.000000 "$node_(9) setdest 539.291404 488.859842 16.744184" +$ns_ at 103.000000 "$node_(10) setdest 543.978114 376.161913 19.996828" + +$ns_ at 104.000000 "$node_(1) setdest 453.885584 66.862210 19.775255" +$ns_ at 104.000000 "$node_(2) setdest 550.420005 182.065219 17.108953" +$ns_ at 104.000000 "$node_(3) setdest 512.035228 108.074256 17.279588" +$ns_ at 104.000000 "$node_(4) setdest 480.422154 28.888008 18.847502" +$ns_ at 104.000000 "$node_(5) setdest 453.481156 139.084396 12.045363" +$ns_ at 104.000000 "$node_(6) setdest 465.522276 337.496223 6.521756" +$ns_ at 104.000000 "$node_(7) setdest 423.890552 416.980065 17.240133" +$ns_ at 104.000000 "$node_(8) setdest 569.408345 483.586152 8.015219" +$ns_ at 104.000000 "$node_(9) setdest 542.421361 470.785101 18.343743" +$ns_ at 104.000000 "$node_(10) setdest 532.315171 359.914903 19.999739" + +$ns_ at 105.000000 "$node_(1) setdest 441.554672 82.598803 19.992292" +$ns_ at 105.000000 "$node_(2) setdest 539.013776 170.214626 16.448058" +$ns_ at 105.000000 "$node_(3) setdest 529.234106 100.206225 18.913152" +$ns_ at 105.000000 "$node_(4) setdest 461.595790 33.254097 19.326011" +$ns_ at 105.000000 "$node_(5) setdest 463.150011 129.433979 13.660795" +$ns_ at 105.000000 "$node_(6) setdest 464.066377 329.507246 8.120554" +$ns_ at 105.000000 "$node_(7) setdest 423.478241 398.145310 18.839268" +$ns_ at 105.000000 "$node_(8) setdest 569.653858 473.974181 9.615106" +$ns_ at 105.000000 "$node_(9) setdest 544.689190 451.144354 19.771242" +$ns_ at 105.000000 "$node_(10) setdest 521.557519 343.060211 19.995192" + +$ns_ at 106.000000 "$node_(1) setdest 426.035377 95.120154 19.940731" +$ns_ at 106.000000 "$node_(2) setdest 527.796998 156.046188 18.070991" +$ns_ at 106.000000 "$node_(3) setdest 546.577423 90.292431 19.976836" +$ns_ at 106.000000 "$node_(4) setdest 442.458573 31.949902 19.181606" +$ns_ at 106.000000 "$node_(5) setdest 473.667535 118.375737 15.261160" +$ns_ at 106.000000 "$node_(6) setdest 463.038352 319.841988 9.719776" +$ns_ at 106.000000 "$node_(7) setdest 424.022196 378.194573 19.958150" +$ns_ at 106.000000 "$node_(8) setdest 569.813158 462.759869 11.215443" +$ns_ at 106.000000 "$node_(9) setdest 547.190950 431.303404 19.998052" +$ns_ at 106.000000 "$node_(10) setdest 512.059389 325.464003 19.996025" + +$ns_ at 107.000000 "$node_(1) setdest 413.339629 110.424764 19.884997" +$ns_ at 107.000000 "$node_(2) setdest 522.889895 138.019659 18.682489" +$ns_ at 107.000000 "$node_(3) setdest 562.785458 78.657080 19.951987" +$ns_ at 107.000000 "$node_(4) setdest 424.326276 36.316554 18.650680" +$ns_ at 107.000000 "$node_(5) setdest 485.278736 106.151196 16.859994" +$ns_ at 107.000000 "$node_(6) setdest 462.686740 308.528017 11.319434" +$ns_ at 107.000000 "$node_(7) setdest 425.056084 358.221365 19.999950" +$ns_ at 107.000000 "$node_(8) setdest 570.011307 449.945947 12.815454" +$ns_ at 107.000000 "$node_(9) setdest 550.802202 411.635114 19.997070" +$ns_ at 107.000000 "$node_(10) setdest 504.100758 307.123887 19.992490" + +$ns_ at 108.000000 "$node_(1) setdest 403.484524 120.881194 14.368718" +$ns_ at 108.000000 "$node_(2) setdest 522.671723 118.777356 19.243540" +$ns_ at 108.000000 "$node_(3) setdest 566.030236 61.969468 17.000146" +$ns_ at 108.000000 "$node_(4) setdest 406.729266 43.449566 18.987750" +$ns_ at 108.000000 "$node_(5) setdest 498.299352 93.063657 18.461314" +$ns_ at 108.000000 "$node_(6) setdest 463.350139 295.626592 12.918469" +$ns_ at 108.000000 "$node_(7) setdest 425.713473 338.232633 19.999539" +$ns_ at 108.000000 "$node_(8) setdest 570.151839 435.531284 14.415348" +$ns_ at 108.000000 "$node_(9) setdest 553.849836 391.873169 19.995562" +$ns_ at 108.000000 "$node_(10) setdest 497.862638 288.126445 19.995423" + +$ns_ at 109.000000 "$node_(1) setdest 403.989736 119.896423 1.106803" +$ns_ at 109.000000 "$node_(2) setdest 528.443915 101.055487 18.638209" +$ns_ at 109.000000 "$node_(3) setdest 555.086711 59.581719 11.200986" +$ns_ at 109.000000 "$node_(4) setdest 419.658364 34.543594 15.699616" +$ns_ at 109.000000 "$node_(5) setdest 512.018250 78.744240 19.830629" +$ns_ at 109.000000 "$node_(6) setdest 465.226532 281.230082 14.518276" +$ns_ at 109.000000 "$node_(7) setdest 425.892742 318.233998 19.999438" +$ns_ at 109.000000 "$node_(8) setdest 569.845697 419.519895 16.014315" +$ns_ at 109.000000 "$node_(9) setdest 555.287187 371.932228 19.992677" +$ns_ at 109.000000 "$node_(10) setdest 493.133245 268.700773 19.993096" + +$ns_ at 110.000000 "$node_(1) setdest 405.689023 117.799747 2.698820" +$ns_ at 110.000000 "$node_(2) setdest 542.452784 89.981334 17.857360" +$ns_ at 110.000000 "$node_(3) setdest 541.878384 60.468639 13.238071" +$ns_ at 110.000000 "$node_(4) setdest 431.508634 26.710829 14.204967" +$ns_ at 110.000000 "$node_(5) setdest 525.437273 63.918876 19.996540" +$ns_ at 110.000000 "$node_(6) setdest 468.870766 265.532533 16.115008" +$ns_ at 110.000000 "$node_(7) setdest 425.465752 298.239034 19.999523" +$ns_ at 110.000000 "$node_(8) setdest 568.423672 401.966936 17.610467" +$ns_ at 110.000000 "$node_(9) setdest 554.604727 351.953670 19.990211" +$ns_ at 110.000000 "$node_(10) setdest 490.462288 248.888314 19.991687" + +$ns_ at 111.000000 "$node_(1) setdest 409.267839 115.417933 4.298949" +$ns_ at 111.000000 "$node_(2) setdest 559.897956 86.045463 17.883655" +$ns_ at 111.000000 "$node_(3) setdest 527.059137 59.782209 14.835137" +$ns_ at 111.000000 "$node_(4) setdest 443.573833 19.915640 13.847152" +$ns_ at 111.000000 "$node_(5) setdest 538.192908 48.515225 19.999468" +$ns_ at 111.000000 "$node_(6) setdest 473.714036 248.485385 17.721809" +$ns_ at 111.000000 "$node_(7) setdest 424.698720 278.254092 19.999656" +$ns_ at 111.000000 "$node_(8) setdest 566.060462 382.899839 19.212989" +$ns_ at 111.000000 "$node_(9) setdest 551.708859 332.174554 19.989984" +$ns_ at 111.000000 "$node_(10) setdest 489.396329 228.922479 19.994270" + +$ns_ at 112.000000 "$node_(1) setdest 414.950785 113.903111 5.881374" +$ns_ at 112.000000 "$node_(2) setdest 578.124561 87.480642 18.283021" +$ns_ at 112.000000 "$node_(3) setdest 510.636699 60.626058 16.444104" +$ns_ at 112.000000 "$node_(4) setdest 457.652786 13.663710 15.404660" +$ns_ at 112.000000 "$node_(5) setdest 551.827674 34.309326 19.690465" +$ns_ at 112.000000 "$node_(6) setdest 478.228138 229.700152 19.319992" +$ns_ at 112.000000 "$node_(7) setdest 422.398639 258.396580 19.990277" +$ns_ at 112.000000 "$node_(8) setdest 565.174425 362.924990 19.994490" +$ns_ at 112.000000 "$node_(9) setdest 546.871512 312.774624 19.993929" +$ns_ at 112.000000 "$node_(10) setdest 491.447099 209.200491 19.828325" + +$ns_ at 113.000000 "$node_(1) setdest 422.446166 114.130760 7.498837" +$ns_ at 113.000000 "$node_(2) setdest 591.915195 78.408014 16.507397" +$ns_ at 113.000000 "$node_(3) setdest 492.624699 59.666840 18.037523" +$ns_ at 113.000000 "$node_(4) setdest 472.214347 5.489961 16.698779" +$ns_ at 113.000000 "$node_(5) setdest 551.486872 35.177611 0.932773" +$ns_ at 113.000000 "$node_(6) setdest 482.217820 210.104151 19.998020" +$ns_ at 113.000000 "$node_(7) setdest 417.831564 238.927314 19.997762" +$ns_ at 113.000000 "$node_(8) setdest 564.744712 342.930184 19.999424" +$ns_ at 113.000000 "$node_(9) setdest 540.209785 293.926814 19.990462" +$ns_ at 113.000000 "$node_(10) setdest 504.489346 196.354667 18.306158" + +$ns_ at 114.000000 "$node_(1) setdest 431.271998 116.305610 9.089845" +$ns_ at 114.000000 "$node_(2) setdest 588.736507 63.560605 15.183860" +$ns_ at 114.000000 "$node_(3) setdest 473.963039 54.031040 19.494096" +$ns_ at 114.000000 "$node_(4) setdest 473.434460 2.312632 3.403542" +$ns_ at 114.000000 "$node_(5) setdest 548.218838 37.090081 3.786501" +$ns_ at 114.000000 "$node_(6) setdest 484.657569 190.258834 19.994725" +$ns_ at 114.000000 "$node_(7) setdest 414.150519 219.279742 19.989426" +$ns_ at 114.000000 "$node_(8) setdest 563.955033 322.946138 19.999642" +$ns_ at 114.000000 "$node_(9) setdest 531.976806 275.701992 19.998151" +$ns_ at 114.000000 "$node_(10) setdest 520.244752 190.438199 16.829659" + +$ns_ at 115.000000 "$node_(1) setdest 440.908069 120.949190 10.696574" +$ns_ at 115.000000 "$node_(2) setdest 580.431419 50.836504 15.194645" +$ns_ at 115.000000 "$node_(3) setdest 456.876131 43.692246 19.971307" +$ns_ at 115.000000 "$node_(4) setdest 472.869595 2.467477 0.585704" +$ns_ at 115.000000 "$node_(5) setdest 543.700691 40.023340 5.386804" +$ns_ at 115.000000 "$node_(6) setdest 485.938648 170.301151 19.998756" +$ns_ at 115.000000 "$node_(7) setdest 414.076804 199.317789 19.962089" +$ns_ at 115.000000 "$node_(8) setdest 563.190475 302.961948 19.998810" +$ns_ at 115.000000 "$node_(9) setdest 522.369871 258.168219 19.993159" +$ns_ at 115.000000 "$node_(10) setdest 534.924858 194.630099 15.266877" + +$ns_ at 116.000000 "$node_(1) setdest 451.284786 127.573697 12.310985" +$ns_ at 116.000000 "$node_(2) setdest 569.402032 38.175636 16.791217" +$ns_ at 116.000000 "$node_(3) setdest 440.770647 31.838598 19.997390" +$ns_ at 116.000000 "$node_(4) setdest 472.841751 4.274661 1.807398" +$ns_ at 116.000000 "$node_(5) setdest 537.436100 43.104717 6.981403" +$ns_ at 116.000000 "$node_(6) setdest 486.954575 150.332081 19.994896" +$ns_ at 116.000000 "$node_(7) setdest 417.830901 179.685288 19.988205" +$ns_ at 116.000000 "$node_(8) setdest 563.641683 282.971460 19.995579" +$ns_ at 116.000000 "$node_(9) setdest 511.298664 241.517648 19.995328" +$ns_ at 116.000000 "$node_(10) setdest 550.011999 194.908027 15.089700" + +$ns_ at 117.000000 "$node_(1) setdest 462.138089 136.267655 13.906081" +$ns_ at 117.000000 "$node_(2) setdest 556.634968 24.940619 18.389225" +$ns_ at 117.000000 "$node_(3) setdest 425.324448 19.136132 19.998443" +$ns_ at 117.000000 "$node_(4) setdest 473.314494 7.680851 3.438840" +$ns_ at 117.000000 "$node_(5) setdest 529.438223 46.226951 8.585708" +$ns_ at 117.000000 "$node_(6) setdest 490.129007 130.598208 19.987566" +$ns_ at 117.000000 "$node_(7) setdest 423.966373 160.668563 19.981989" +$ns_ at 117.000000 "$node_(8) setdest 565.740884 263.087161 19.994799" +$ns_ at 117.000000 "$node_(9) setdest 499.055031 225.704650 19.998936" +$ns_ at 117.000000 "$node_(10) setdest 563.557238 186.500635 15.942326" + +$ns_ at 118.000000 "$node_(1) setdest 473.241458 147.092544 15.506870" +$ns_ at 118.000000 "$node_(2) setdest 539.814754 14.774438 19.653774" +$ns_ at 118.000000 "$node_(3) setdest 409.111886 8.883379 19.182442" +$ns_ at 118.000000 "$node_(4) setdest 473.692202 12.706450 5.039772" +$ns_ at 118.000000 "$node_(5) setdest 519.739209 49.337753 10.185674" +$ns_ at 118.000000 "$node_(6) setdest 496.431453 111.642085 19.976371" +$ns_ at 118.000000 "$node_(7) setdest 432.569997 142.630498 19.984847" +$ns_ at 118.000000 "$node_(8) setdest 568.702436 243.307677 19.999970" +$ns_ at 118.000000 "$node_(9) setdest 485.443278 211.070870 19.985678" +$ns_ at 118.000000 "$node_(10) setdest 570.192106 171.160831 16.713200" + +$ns_ at 119.000000 "$node_(1) setdest 487.251601 156.862702 17.080401" +$ns_ at 119.000000 "$node_(2) setdest 520.683497 9.021841 19.977421" +$ns_ at 119.000000 "$node_(3) setdest 417.073766 12.056418 8.570864" +$ns_ at 119.000000 "$node_(4) setdest 474.036968 19.336302 6.638810" +$ns_ at 119.000000 "$node_(5) setdest 508.315867 52.234824 11.784981" +$ns_ at 119.000000 "$node_(6) setdest 506.265369 94.271345 19.961175" +$ns_ at 119.000000 "$node_(7) setdest 443.201855 125.694874 19.996294" +$ns_ at 119.000000 "$node_(8) setdest 571.628211 223.523019 19.999821" +$ns_ at 119.000000 "$node_(9) setdest 470.395480 197.901690 19.996588" +$ns_ at 119.000000 "$node_(10) setdest 581.059668 156.562270 18.199503" + +$ns_ at 120.000000 "$node_(1) setdest 504.954771 159.215359 17.858813" +$ns_ at 120.000000 "$node_(2) setdest 501.192874 10.606251 19.554916" +$ns_ at 120.000000 "$node_(3) setdest 423.571462 19.617907 9.969763" +$ns_ at 120.000000 "$node_(4) setdest 474.472309 27.560057 8.235270" +$ns_ at 120.000000 "$node_(5) setdest 495.207683 54.945946 13.385614" +$ns_ at 120.000000 "$node_(6) setdest 520.516470 80.409235 19.880946" +$ns_ at 120.000000 "$node_(7) setdest 453.570709 108.596480 19.996705" +$ns_ at 120.000000 "$node_(8) setdest 573.762328 203.638794 19.998422" +$ns_ at 120.000000 "$node_(9) setdest 453.975818 186.542303 19.965996" +$ns_ at 120.000000 "$node_(10) setdest 592.135494 143.826225 16.878411" + +$ns_ at 121.000000 "$node_(1) setdest 509.718099 146.506082 13.572583" +$ns_ at 121.000000 "$node_(2) setdest 484.117793 19.082124 19.063023" +$ns_ at 121.000000 "$node_(3) setdest 433.058684 26.201559 11.547808" +$ns_ at 121.000000 "$node_(4) setdest 476.147484 37.254243 9.837858" +$ns_ at 121.000000 "$node_(5) setdest 480.493467 57.703401 14.970361" +$ns_ at 121.000000 "$node_(6) setdest 537.165573 83.737558 16.978527" +$ns_ at 121.000000 "$node_(7) setdest 464.612917 91.921714 19.999454" +$ns_ at 121.000000 "$node_(8) setdest 575.622097 183.725674 19.999777" +$ns_ at 121.000000 "$node_(9) setdest 435.402591 179.523497 19.855186" +$ns_ at 121.000000 "$node_(10) setdest 589.495037 140.928854 3.920047" + +$ns_ at 122.000000 "$node_(1) setdest 501.618767 135.407562 13.739589" +$ns_ at 122.000000 "$node_(2) setdest 468.354636 30.000884 19.175412" +$ns_ at 122.000000 "$node_(3) setdest 444.780251 32.212762 13.173066" +$ns_ at 122.000000 "$node_(4) setdest 478.682436 48.409195 11.439359" +$ns_ at 122.000000 "$node_(5) setdest 465.298067 64.267961 16.552753" +$ns_ at 122.000000 "$node_(6) setdest 544.950948 98.256877 16.474910" +$ns_ at 122.000000 "$node_(7) setdest 475.946542 75.443825 19.999297" +$ns_ at 122.000000 "$node_(8) setdest 576.534377 163.749996 19.996499" +$ns_ at 122.000000 "$node_(9) setdest 434.434766 179.467028 0.969471" +$ns_ at 122.000000 "$node_(10) setdest 584.521690 139.425209 5.195683" + +$ns_ at 123.000000 "$node_(1) setdest 492.075637 123.450755 15.298253" +$ns_ at 123.000000 "$node_(2) setdest 455.863068 44.093278 18.831751" +$ns_ at 123.000000 "$node_(3) setdest 458.619999 37.371581 14.769972" +$ns_ at 123.000000 "$node_(4) setdest 482.138433 60.980026 13.037244" +$ns_ at 123.000000 "$node_(5) setdest 449.568122 73.372764 18.174944" +$ns_ at 123.000000 "$node_(6) setdest 555.460624 109.423500 15.334495" +$ns_ at 123.000000 "$node_(7) setdest 488.072608 59.566099 19.978580" +$ns_ at 123.000000 "$node_(8) setdest 575.616599 143.782691 19.988386" +$ns_ at 123.000000 "$node_(9) setdest 437.345563 182.540188 4.232854" +$ns_ at 123.000000 "$node_(10) setdest 578.082071 137.243055 6.799300" + +$ns_ at 124.000000 "$node_(1) setdest 481.629922 110.164467 16.900841" +$ns_ at 124.000000 "$node_(2) setdest 442.838643 58.392410 19.341686" +$ns_ at 124.000000 "$node_(3) setdest 474.615429 40.719637 16.342070" +$ns_ at 124.000000 "$node_(4) setdest 486.172579 75.052483 14.639275" +$ns_ at 124.000000 "$node_(5) setdest 430.524277 77.428350 19.470897" +$ns_ at 124.000000 "$node_(6) setdest 570.380491 116.561320 16.539374" +$ns_ at 124.000000 "$node_(7) setdest 503.421106 46.832482 19.942953" +$ns_ at 124.000000 "$node_(8) setdest 571.969243 124.135771 19.982610" +$ns_ at 124.000000 "$node_(9) setdest 437.850553 188.097996 5.580703" +$ns_ at 124.000000 "$node_(10) setdest 570.292753 134.115544 8.393736" + +$ns_ at 125.000000 "$node_(1) setdest 468.764309 108.362457 12.991198" +$ns_ at 125.000000 "$node_(2) setdest 424.365917 65.103086 19.653874" +$ns_ at 125.000000 "$node_(3) setdest 458.785742 38.071495 16.049662" +$ns_ at 125.000000 "$node_(4) setdest 485.862028 75.256264 0.371442" +$ns_ at 125.000000 "$node_(5) setdest 410.538520 78.179722 19.999876" +$ns_ at 125.000000 "$node_(6) setdest 586.519711 124.964926 18.196017" +$ns_ at 125.000000 "$node_(7) setdest 522.204110 40.543630 19.807850" +$ns_ at 125.000000 "$node_(8) setdest 565.283378 105.313652 19.974308" +$ns_ at 125.000000 "$node_(9) setdest 438.236927 188.566948 0.607619" +$ns_ at 125.000000 "$node_(10) setdest 561.454881 129.437687 9.999516" + +$ns_ at 126.000000 "$node_(1) setdest 453.894330 110.665395 15.047252" +$ns_ at 126.000000 "$node_(2) setdest 404.988454 70.046829 19.998166" +$ns_ at 126.000000 "$node_(3) setdest 440.858781 37.340840 17.941845" +$ns_ at 126.000000 "$node_(4) setdest 484.107042 74.965456 1.778916" +$ns_ at 126.000000 "$node_(5) setdest 390.542704 78.440085 19.997511" +$ns_ at 126.000000 "$node_(6) setdest 592.964875 127.196911 6.820696" +$ns_ at 126.000000 "$node_(7) setdest 542.036992 42.707094 19.950533" +$ns_ at 126.000000 "$node_(8) setdest 554.346165 88.643407 19.937896" +$ns_ at 126.000000 "$node_(9) setdest 438.863799 187.609138 1.144713" +$ns_ at 126.000000 "$node_(10) setdest 550.853562 124.755186 11.589383" + +$ns_ at 127.000000 "$node_(1) setdest 437.536983 113.753231 16.646247" +$ns_ at 127.000000 "$node_(2) setdest 385.400042 74.078678 19.999042" +$ns_ at 127.000000 "$node_(3) setdest 421.408233 35.863244 19.506591" +$ns_ at 127.000000 "$node_(4) setdest 480.756885 74.527052 3.378720" +$ns_ at 127.000000 "$node_(5) setdest 370.587368 77.185177 19.994756" +$ns_ at 127.000000 "$node_(6) setdest 591.806569 125.973434 1.684805" +$ns_ at 127.000000 "$node_(7) setdest 561.938216 44.598425 19.990894" +$ns_ at 127.000000 "$node_(8) setdest 538.791423 76.225827 19.903424" +$ns_ at 127.000000 "$node_(9) setdest 439.851088 185.026858 2.764582" +$ns_ at 127.000000 "$node_(10) setdest 538.598737 119.856049 13.197813" + +$ns_ at 128.000000 "$node_(1) setdest 419.670201 117.456660 18.246569" +$ns_ at 128.000000 "$node_(2) setdest 365.724919 77.666346 19.999545" +$ns_ at 128.000000 "$node_(3) setdest 401.445644 34.670369 19.998197" +$ns_ at 128.000000 "$node_(4) setdest 475.797670 74.087926 4.978620" +$ns_ at 128.000000 "$node_(5) setdest 350.726157 74.834010 19.999892" +$ns_ at 128.000000 "$node_(6) setdest 589.270539 123.860295 3.301031" +$ns_ at 128.000000 "$node_(7) setdest 581.791247 44.803262 19.854088" +$ns_ at 128.000000 "$node_(8) setdest 520.702123 67.707079 19.994795" +$ns_ at 128.000000 "$node_(9) setdest 441.303563 180.909826 4.365734" +$ns_ at 128.000000 "$node_(10) setdest 525.473635 113.036002 14.791260" + +$ns_ at 129.000000 "$node_(1) setdest 400.190414 120.538102 19.722002" +$ns_ at 129.000000 "$node_(2) setdest 345.957172 80.701765 19.999440" +$ns_ at 129.000000 "$node_(3) setdest 381.449576 34.354000 19.998570" +$ns_ at 129.000000 "$node_(4) setdest 469.225898 73.791977 6.578432" +$ns_ at 129.000000 "$node_(5) setdest 330.797553 73.177839 19.997304" +$ns_ at 129.000000 "$node_(6) setdest 585.243076 121.062983 4.903612" +$ns_ at 129.000000 "$node_(7) setdest 588.511397 48.281694 7.567028" +$ns_ at 129.000000 "$node_(8) setdest 502.905331 58.615452 19.984581" +$ns_ at 129.000000 "$node_(9) setdest 443.695561 175.443621 5.966662" +$ns_ at 129.000000 "$node_(10) setdest 511.499269 104.458277 16.396959" + +$ns_ at 130.000000 "$node_(1) setdest 380.319010 122.788460 19.998421" +$ns_ at 130.000000 "$node_(2) setdest 326.114659 83.198800 19.999013" +$ns_ at 130.000000 "$node_(3) setdest 361.450892 34.562067 19.999767" +$ns_ at 130.000000 "$node_(4) setdest 461.047591 73.796428 8.178308" +$ns_ at 130.000000 "$node_(5) setdest 310.805689 73.087262 19.992069" +$ns_ at 130.000000 "$node_(6) setdest 580.472921 116.653619 6.495912" +$ns_ at 130.000000 "$node_(7) setdest 586.575808 48.184730 1.938017" +$ns_ at 130.000000 "$node_(8) setdest 485.758668 48.322000 19.999080" +$ns_ at 130.000000 "$node_(9) setdest 446.859035 168.570017 7.566637" +$ns_ at 130.000000 "$node_(10) setdest 497.658476 92.981586 17.980044" + +$ns_ at 131.000000 "$node_(1) setdest 360.365366 124.127494 19.998522" +$ns_ at 131.000000 "$node_(2) setdest 306.164975 84.538799 19.994637" +$ns_ at 131.000000 "$node_(3) setdest 341.453459 34.881321 19.999981" +$ns_ at 131.000000 "$node_(4) setdest 451.277647 74.201781 9.778349" +$ns_ at 131.000000 "$node_(5) setdest 290.929481 75.205162 19.988725" +$ns_ at 131.000000 "$node_(6) setdest 575.708834 110.114534 8.090498" +$ns_ at 131.000000 "$node_(7) setdest 583.023276 48.266735 3.553478" +$ns_ at 131.000000 "$node_(8) setdest 468.711352 37.876329 19.993075" +$ns_ at 131.000000 "$node_(9) setdest 451.428019 160.631101 9.159804" +$ns_ at 131.000000 "$node_(10) setdest 485.035837 78.081245 19.528215" + +$ns_ at 132.000000 "$node_(1) setdest 340.372607 124.577953 19.997833" +$ns_ at 132.000000 "$node_(2) setdest 286.178506 84.073191 19.991892" +$ns_ at 132.000000 "$node_(3) setdest 321.454012 34.894377 19.999451" +$ns_ at 132.000000 "$node_(4) setdest 439.923459 74.951472 11.378912" +$ns_ at 132.000000 "$node_(5) setdest 271.481106 79.805860 19.985138" +$ns_ at 132.000000 "$node_(6) setdest 571.726779 101.275821 9.694308" +$ns_ at 132.000000 "$node_(7) setdest 577.934453 49.068828 5.151648" +$ns_ at 132.000000 "$node_(8) setdest 450.507813 29.624318 19.986609" +$ns_ at 132.000000 "$node_(9) setdest 457.527500 151.759105 10.766428" +$ns_ at 132.000000 "$node_(10) setdest 474.583709 61.040590 19.990771" + +$ns_ at 133.000000 "$node_(1) setdest 320.380450 124.065568 19.998722" +$ns_ at 133.000000 "$node_(2) setdest 266.383509 81.306673 19.987384" +$ns_ at 133.000000 "$node_(3) setdest 301.478147 33.972686 19.997117" +$ns_ at 133.000000 "$node_(4) setdest 426.974114 75.826178 12.978853" +$ns_ at 133.000000 "$node_(5) setdest 252.963837 87.304381 19.977914" +$ns_ at 133.000000 "$node_(6) setdest 568.310797 90.505002 11.299534" +$ns_ at 133.000000 "$node_(7) setdest 571.352734 50.591970 6.755663" +$ns_ at 133.000000 "$node_(8) setdest 430.748481 26.892605 19.947266" +$ns_ at 133.000000 "$node_(9) setdest 464.585787 141.605483 12.365900" +$ns_ at 133.000000 "$node_(10) setdest 461.707413 45.848981 19.914416" + +$ns_ at 134.000000 "$node_(1) setdest 300.449527 122.443033 19.996858" +$ns_ at 134.000000 "$node_(2) setdest 247.068943 76.153410 19.990212" +$ns_ at 134.000000 "$node_(3) setdest 281.620279 31.641423 19.994242" +$ns_ at 134.000000 "$node_(4) setdest 412.407798 76.413196 14.578139" +$ns_ at 134.000000 "$node_(5) setdest 236.182515 98.124469 19.967150" +$ns_ at 134.000000 "$node_(6) setdest 565.493969 77.924687 12.891814" +$ns_ at 134.000000 "$node_(7) setdest 563.139327 52.127120 8.355642" +$ns_ at 134.000000 "$node_(8) setdest 416.947036 15.226109 18.071719" +$ns_ at 134.000000 "$node_(9) setdest 472.330576 129.982714 13.966765" +$ns_ at 134.000000 "$node_(10) setdest 445.040272 34.848403 19.970135" + +$ns_ at 135.000000 "$node_(1) setdest 280.660058 119.566270 19.997470" +$ns_ at 135.000000 "$node_(2) setdest 228.579859 68.567935 19.984636" +$ns_ at 135.000000 "$node_(3) setdest 261.870140 28.489966 19.999992" +$ns_ at 135.000000 "$node_(4) setdest 396.230443 76.619532 16.178671" +$ns_ at 135.000000 "$node_(5) setdest 221.570082 111.764000 19.988997" +$ns_ at 135.000000 "$node_(6) setdest 565.462360 63.456976 14.467745" +$ns_ at 135.000000 "$node_(7) setdest 553.580974 54.883009 9.947715" +$ns_ at 135.000000 "$node_(8) setdest 426.301069 2.292969 15.961330" +$ns_ at 135.000000 "$node_(9) setdest 481.564388 117.456807 15.561543" +$ns_ at 135.000000 "$node_(10) setdest 426.598641 27.230647 19.953044" + +$ns_ at 136.000000 "$node_(1) setdest 260.964022 116.093665 19.999821" +$ns_ at 136.000000 "$node_(2) setdest 211.157455 58.763950 19.991455" +$ns_ at 136.000000 "$node_(3) setdest 242.069536 25.684400 19.998378" +$ns_ at 136.000000 "$node_(4) setdest 378.451498 76.635633 17.778952" +$ns_ at 136.000000 "$node_(5) setdest 207.405609 125.883222 19.999618" +$ns_ at 136.000000 "$node_(6) setdest 570.550150 48.286694 16.000721" +$ns_ at 136.000000 "$node_(7) setdest 543.178360 59.884928 11.542685" +$ns_ at 136.000000 "$node_(8) setdest 439.426261 7.213786 14.017314" +$ns_ at 136.000000 "$node_(9) setdest 492.799836 104.479486 17.165259" +$ns_ at 136.000000 "$node_(10) setdest 416.515775 29.332216 10.299553" + +$ns_ at 137.000000 "$node_(1) setdest 241.166659 113.273096 19.997279" +$ns_ at 137.000000 "$node_(2) setdest 193.607811 49.185768 19.993288" +$ns_ at 137.000000 "$node_(3) setdest 222.148163 23.923978 19.999004" +$ns_ at 137.000000 "$node_(4) setdest 359.072806 76.657629 19.378705" +$ns_ at 137.000000 "$node_(5) setdest 192.886203 139.634471 19.997750" +$ns_ at 137.000000 "$node_(6) setdest 583.970533 42.100482 14.777547" +$ns_ at 137.000000 "$node_(7) setdest 532.296270 67.248819 13.139511" +$ns_ at 137.000000 "$node_(8) setdest 453.839916 9.148842 14.542968" +$ns_ at 137.000000 "$node_(9) setdest 505.861846 91.012627 18.760928" +$ns_ at 137.000000 "$node_(10) setdest 417.284418 30.284041 1.223431" + +$ns_ at 138.000000 "$node_(1) setdest 221.227107 111.767797 19.996292" +$ns_ at 138.000000 "$node_(2) setdest 175.271841 41.218919 19.991961" +$ns_ at 138.000000 "$node_(3) setdest 202.333247 21.224188 19.997993" +$ns_ at 138.000000 "$node_(4) setdest 339.081034 77.216611 19.999585" +$ns_ at 138.000000 "$node_(5) setdest 177.604021 152.479213 19.963278" +$ns_ at 138.000000 "$node_(6) setdest 588.327423 50.944124 9.858625" +$ns_ at 138.000000 "$node_(7) setdest 521.858823 77.648040 14.733774" +$ns_ at 138.000000 "$node_(8) setdest 470.072953 9.536007 16.237653" +$ns_ at 138.000000 "$node_(9) setdest 520.526387 77.499927 19.940959" +$ns_ at 138.000000 "$node_(10) setdest 420.076823 30.790374 2.837939" + +$ns_ at 139.000000 "$node_(1) setdest 201.240697 111.066682 19.998704" +$ns_ at 139.000000 "$node_(2) setdest 164.466411 38.985513 11.033831" +$ns_ at 139.000000 "$node_(3) setdest 184.238943 13.733066 19.583686" +$ns_ at 139.000000 "$node_(4) setdest 319.132136 78.610994 19.997570" +$ns_ at 139.000000 "$node_(5) setdest 159.330537 160.396138 19.914767" +$ns_ at 139.000000 "$node_(6) setdest 588.282286 62.792033 11.847995" +$ns_ at 139.000000 "$node_(7) setdest 512.243129 90.872745 16.350976" +$ns_ at 139.000000 "$node_(8) setdest 487.878750 9.368775 17.806582" +$ns_ at 139.000000 "$node_(9) setdest 536.534192 65.547922 19.977493" +$ns_ at 139.000000 "$node_(10) setdest 424.324084 29.536639 4.428439" + +$ns_ at 140.000000 "$node_(1) setdest 181.242347 111.274656 19.999432" +$ns_ at 140.000000 "$node_(2) setdest 175.502159 37.272010 11.167982" +$ns_ at 140.000000 "$node_(3) setdest 182.074451 1.712658 12.213731" +$ns_ at 140.000000 "$node_(4) setdest 299.283899 81.049008 19.997411" +$ns_ at 140.000000 "$node_(5) setdest 139.532044 162.682204 19.930038" +$ns_ at 140.000000 "$node_(6) setdest 589.138402 76.155223 13.390586" +$ns_ at 140.000000 "$node_(7) setdest 501.789191 105.467392 17.952396" +$ns_ at 140.000000 "$node_(8) setdest 505.637603 16.818056 19.257951" +$ns_ at 140.000000 "$node_(9) setdest 554.813129 57.628754 19.920661" +$ns_ at 140.000000 "$node_(10) setdest 429.800533 26.943825 6.059223" + +$ns_ at 141.000000 "$node_(1) setdest 161.247046 111.473323 19.996287" +$ns_ at 141.000000 "$node_(2) setdest 185.373829 29.829709 12.362755" +$ns_ at 141.000000 "$node_(3) setdest 183.778520 1.889613 1.713232" +$ns_ at 141.000000 "$node_(4) setdest 279.550001 84.297092 19.999419" +$ns_ at 141.000000 "$node_(5) setdest 119.793534 159.666849 19.967502" +$ns_ at 141.000000 "$node_(6) setdest 592.156148 90.830024 14.981874" +$ns_ at 141.000000 "$node_(7) setdest 488.189099 119.413179 19.479412" +$ns_ at 141.000000 "$node_(8) setdest 523.321051 26.105433 19.973976" +$ns_ at 141.000000 "$node_(9) setdest 571.486632 63.791740 17.776055" +$ns_ at 141.000000 "$node_(10) setdest 436.921300 24.140901 7.652561" + +$ns_ at 142.000000 "$node_(1) setdest 141.772095 115.540184 19.895051" +$ns_ at 142.000000 "$node_(2) setdest 196.339570 29.318053 10.977671" +$ns_ at 142.000000 "$node_(3) setdest 184.812005 4.915605 3.197612" +$ns_ at 142.000000 "$node_(4) setdest 259.803815 87.459443 19.997808" +$ns_ at 142.000000 "$node_(5) setdest 100.096591 156.319135 19.979409" +$ns_ at 142.000000 "$node_(6) setdest 590.692452 107.433540 16.667908" +$ns_ at 142.000000 "$node_(7) setdest 471.797782 130.833795 19.977632" +$ns_ at 142.000000 "$node_(8) setdest 541.126366 35.173355 19.981403" +$ns_ at 142.000000 "$node_(9) setdest 567.782585 75.167477 11.963585" +$ns_ at 142.000000 "$node_(10) setdest 446.013398 22.445407 9.248835" + +$ns_ at 143.000000 "$node_(1) setdest 127.450194 128.944032 19.615810" +$ns_ at 143.000000 "$node_(2) setdest 197.147859 36.740438 7.466266" +$ns_ at 143.000000 "$node_(3) setdest 185.893987 9.604906 4.812507" +$ns_ at 143.000000 "$node_(4) setdest 239.858385 88.607005 19.978416" +$ns_ at 143.000000 "$node_(5) setdest 80.175258 157.537770 19.958571" +$ns_ at 143.000000 "$node_(6) setdest 585.791697 124.971994 18.210293" +$ns_ at 143.000000 "$node_(7) setdest 454.924266 141.549003 19.988276" +$ns_ at 143.000000 "$node_(8) setdest 556.871642 47.471369 19.978860" +$ns_ at 143.000000 "$node_(9) setdest 558.206517 84.854499 13.621287" +$ns_ at 143.000000 "$node_(10) setdest 456.841721 21.664948 10.856413" + +$ns_ at 144.000000 "$node_(1) setdest 118.524599 146.840858 19.999066" +$ns_ at 144.000000 "$node_(2) setdest 196.591811 45.826445 9.103005" +$ns_ at 144.000000 "$node_(3) setdest 187.092769 15.899568 6.407796" +$ns_ at 144.000000 "$node_(4) setdest 220.066145 86.102236 19.950103" +$ns_ at 144.000000 "$node_(5) setdest 60.964143 162.769298 19.910697" +$ns_ at 144.000000 "$node_(6) setdest 574.313128 140.914141 19.644582" +$ns_ at 144.000000 "$node_(7) setdest 441.557267 156.254138 19.872536" +$ns_ at 144.000000 "$node_(8) setdest 571.374396 61.086555 19.892289" +$ns_ at 144.000000 "$node_(9) setdest 552.626228 98.963335 15.172307" +$ns_ at 144.000000 "$node_(10) setdest 469.299904 21.561095 12.458615" + +$ns_ at 145.000000 "$node_(1) setdest 112.115498 165.696718 19.915321" +$ns_ at 145.000000 "$node_(2) setdest 190.250128 47.613148 6.588570" +$ns_ at 145.000000 "$node_(3) setdest 187.313861 23.900730 8.004216" +$ns_ at 145.000000 "$node_(4) setdest 203.065906 94.794565 19.093577" +$ns_ at 145.000000 "$node_(5) setdest 45.972008 175.619703 19.745810" +$ns_ at 145.000000 "$node_(6) setdest 560.255115 155.135300 19.996727" +$ns_ at 145.000000 "$node_(7) setdest 438.601722 175.534770 19.505846" +$ns_ at 145.000000 "$node_(8) setdest 588.750250 69.671374 19.380903" +$ns_ at 145.000000 "$node_(9) setdest 551.878665 115.778256 16.831530" +$ns_ at 145.000000 "$node_(10) setdest 483.345939 22.123346 14.057284" + +$ns_ at 146.000000 "$node_(1) setdest 111.731159 185.496144 19.803156" +$ns_ at 146.000000 "$node_(2) setdest 183.547590 43.776632 7.722881" +$ns_ at 146.000000 "$node_(3) setdest 185.862180 33.389020 9.598699" +$ns_ at 146.000000 "$node_(4) setdest 190.626206 108.639388 18.612503" +$ns_ at 146.000000 "$node_(5) setdest 47.072918 193.458445 17.872681" +$ns_ at 146.000000 "$node_(6) setdest 545.916182 169.049201 19.980032" +$ns_ at 146.000000 "$node_(7) setdest 444.782179 193.174570 18.691190" +$ns_ at 146.000000 "$node_(8) setdest 586.255822 69.734557 2.495228" +$ns_ at 146.000000 "$node_(9) setdest 554.631147 134.007561 18.435936" +$ns_ at 146.000000 "$node_(10) setdest 498.884774 24.015570 15.653623" + +$ns_ at 147.000000 "$node_(1) setdest 111.899554 186.471049 0.989342" +$ns_ at 147.000000 "$node_(2) setdest 175.795557 38.607572 9.317360" +$ns_ at 147.000000 "$node_(3) setdest 182.523745 44.090935 11.210537" +$ns_ at 147.000000 "$node_(4) setdest 179.070997 123.870067 19.117961" +$ns_ at 147.000000 "$node_(5) setdest 62.376736 192.812895 15.317427" +$ns_ at 147.000000 "$node_(6) setdest 531.919229 183.302102 19.976483" +$ns_ at 147.000000 "$node_(7) setdest 436.724006 199.709287 10.374810" +$ns_ at 147.000000 "$node_(8) setdest 576.168467 67.370369 10.360701" +$ns_ at 147.000000 "$node_(9) setdest 562.586604 152.129905 19.791631" +$ns_ at 147.000000 "$node_(10) setdest 515.441797 28.772088 17.226708" + +$ns_ at 148.000000 "$node_(1) setdest 108.990066 180.641200 6.515540" +$ns_ at 148.000000 "$node_(2) setdest 167.327952 31.720295 10.914894" +$ns_ at 148.000000 "$node_(3) setdest 179.086940 56.426752 12.805624" +$ns_ at 148.000000 "$node_(4) setdest 163.850105 134.459779 18.542318" +$ns_ at 148.000000 "$node_(5) setdest 69.482057 179.583763 15.016508" +$ns_ at 148.000000 "$node_(6) setdest 513.623888 191.062746 19.873276" +$ns_ at 148.000000 "$node_(7) setdest 426.833193 195.609739 10.706749" +$ns_ at 148.000000 "$node_(8) setdest 564.268369 67.203121 11.901272" +$ns_ at 148.000000 "$node_(9) setdest 570.951086 170.282477 19.987006" +$ns_ at 148.000000 "$node_(10) setdest 531.755709 38.134043 18.809304" + +$ns_ at 149.000000 "$node_(1) setdest 104.753756 173.710069 8.123232" +$ns_ at 149.000000 "$node_(2) setdest 156.878825 24.944390 12.453801" +$ns_ at 149.000000 "$node_(3) setdest 176.599709 70.622921 14.412410" +$ns_ at 149.000000 "$node_(4) setdest 146.434634 142.639565 19.240778" +$ns_ at 149.000000 "$node_(5) setdest 75.707512 165.318603 15.564417" +$ns_ at 149.000000 "$node_(6) setdest 493.935495 190.646337 19.692796" +$ns_ at 149.000000 "$node_(7) setdest 417.399684 187.603159 12.373213" +$ns_ at 149.000000 "$node_(8) setdest 551.157370 70.606676 13.545571" +$ns_ at 149.000000 "$node_(9) setdest 576.243443 189.189175 19.633448" +$ns_ at 149.000000 "$node_(10) setdest 546.734747 51.323269 19.958137" + +$ns_ at 150.000000 "$node_(1) setdest 99.278520 165.679385 9.719573" +$ns_ at 150.000000 "$node_(2) setdest 143.075068 24.363392 13.815978" +$ns_ at 150.000000 "$node_(3) setdest 173.400696 86.306718 16.006723" +$ns_ at 150.000000 "$node_(4) setdest 130.649757 153.943580 19.415023" +$ns_ at 150.000000 "$node_(5) setdest 88.488112 154.041636 17.044464" +$ns_ at 150.000000 "$node_(6) setdest 475.349957 183.326049 19.975206" +$ns_ at 150.000000 "$node_(7) setdest 414.624429 174.179041 13.707991" +$ns_ at 150.000000 "$node_(8) setdest 537.189239 76.498837 15.160021" +$ns_ at 150.000000 "$node_(9) setdest 575.465562 180.778988 8.446085" +$ns_ at 150.000000 "$node_(10) setdest 560.401937 65.884227 19.970318" + +$ns_ at 151.000000 "$node_(1) setdest 91.813160 157.170777 11.319364" +$ns_ at 151.000000 "$node_(2) setdest 138.004596 35.391633 12.138031" +$ns_ at 151.000000 "$node_(3) setdest 168.114267 103.099736 17.605447" +$ns_ at 151.000000 "$node_(4) setdest 119.996573 169.374810 18.751351" +$ns_ at 151.000000 "$node_(5) setdest 105.657672 146.388340 18.798051" +$ns_ at 151.000000 "$node_(6) setdest 459.956503 171.347805 19.504788" +$ns_ at 151.000000 "$node_(7) setdest 416.813764 158.732695 15.600731" +$ns_ at 151.000000 "$node_(8) setdest 521.788366 83.114604 16.761720" +$ns_ at 151.000000 "$node_(9) setdest 577.687876 171.392622 9.645857" +$ns_ at 151.000000 "$node_(10) setdest 569.087851 83.782994 19.894998" + +$ns_ at 152.000000 "$node_(1) setdest 82.097039 148.659628 12.916759" +$ns_ at 152.000000 "$node_(2) setdest 146.119591 45.797946 13.196382" +$ns_ at 152.000000 "$node_(3) setdest 160.937701 120.920891 19.211888" +$ns_ at 152.000000 "$node_(4) setdest 105.936747 181.378621 18.487028" +$ns_ at 152.000000 "$node_(5) setdest 124.801631 141.076851 19.867135" +$ns_ at 152.000000 "$node_(6) setdest 448.544469 155.827294 19.264495" +$ns_ at 152.000000 "$node_(7) setdest 417.880719 141.561761 17.204051" +$ns_ at 152.000000 "$node_(8) setdest 503.612706 85.135031 18.287612" +$ns_ at 152.000000 "$node_(9) setdest 582.520079 161.274967 11.212365" +$ns_ at 152.000000 "$node_(10) setdest 569.702380 103.596801 19.823334" + +$ns_ at 153.000000 "$node_(1) setdest 70.379904 140.091185 14.515835" +$ns_ at 153.000000 "$node_(2) setdest 158.134566 54.430381 14.794545" +$ns_ at 153.000000 "$node_(3) setdest 156.219850 140.328761 19.973071" +$ns_ at 153.000000 "$node_(4) setdest 88.725972 189.580626 19.065247" +$ns_ at 153.000000 "$node_(5) setdest 144.327183 138.526583 19.691395" +$ns_ at 153.000000 "$node_(6) setdest 442.382049 137.609253 19.232068" +$ns_ at 153.000000 "$node_(7) setdest 416.376540 122.816638 18.805376" +$ns_ at 153.000000 "$node_(8) setdest 483.914674 83.287628 19.784472" +$ns_ at 153.000000 "$node_(9) setdest 586.204849 149.285184 12.543222" +$ns_ at 153.000000 "$node_(10) setdest 559.010783 119.996465 19.577008" + +$ns_ at 154.000000 "$node_(1) setdest 55.997138 132.866034 16.095551" +$ns_ at 154.000000 "$node_(2) setdest 172.406373 62.536019 16.412977" +$ns_ at 154.000000 "$node_(3) setdest 151.635699 159.667002 19.874155" +$ns_ at 154.000000 "$node_(4) setdest 78.054821 195.479620 12.193096" +$ns_ at 154.000000 "$node_(5) setdest 163.592391 140.383876 19.354528" +$ns_ at 154.000000 "$node_(6) setdest 441.383428 118.459568 19.175706" +$ns_ at 154.000000 "$node_(7) setdest 412.338720 103.315957 19.914330" +$ns_ at 154.000000 "$node_(8) setdest 464.344857 79.349340 19.962160" +$ns_ at 154.000000 "$node_(9) setdest 576.816577 139.350201 13.669073" +$ns_ at 154.000000 "$node_(10) setdest 540.866973 128.225326 19.922651" + +$ns_ at 155.000000 "$node_(1) setdest 41.252307 123.224103 17.617516" +$ns_ at 155.000000 "$node_(2) setdest 186.611964 72.729002 17.484157" +$ns_ at 155.000000 "$node_(3) setdest 144.090830 165.764530 9.700768" +$ns_ at 155.000000 "$node_(4) setdest 84.150229 193.306525 6.471193" +$ns_ at 155.000000 "$node_(5) setdest 178.718241 150.366969 18.123286" +$ns_ at 155.000000 "$node_(6) setdest 439.600336 99.066105 19.475263" +$ns_ at 155.000000 "$node_(7) setdest 407.087226 84.024775 19.993196" +$ns_ at 155.000000 "$node_(8) setdest 447.207539 69.162889 19.936185" +$ns_ at 155.000000 "$node_(9) setdest 561.438062 134.917972 16.004480" +$ns_ at 155.000000 "$node_(10) setdest 526.487380 140.831521 19.122992" + +$ns_ at 156.000000 "$node_(1) setdest 34.078752 105.954513 18.700231" +$ns_ at 156.000000 "$node_(2) setdest 187.094893 73.875178 1.243760" +$ns_ at 156.000000 "$node_(3) setdest 141.135685 162.335572 4.526658" +$ns_ at 156.000000 "$node_(4) setdest 92.326017 193.380787 8.176125" +$ns_ at 156.000000 "$node_(5) setdest 191.326981 163.978216 18.553877" +$ns_ at 156.000000 "$node_(6) setdest 441.938872 80.015861 19.193242" +$ns_ at 156.000000 "$node_(7) setdest 410.869511 64.803585 19.589788" +$ns_ at 156.000000 "$node_(8) setdest 431.974289 56.242401 19.974757" +$ns_ at 156.000000 "$node_(9) setdest 545.448883 127.932357 17.448572" +$ns_ at 156.000000 "$node_(10) setdest 515.851572 156.904956 19.273705" + +$ns_ at 157.000000 "$node_(1) setdest 43.916030 90.582641 18.250109" +$ns_ at 157.000000 "$node_(2) setdest 185.047818 73.249294 2.140618" +$ns_ at 157.000000 "$node_(3) setdest 137.743986 157.188007 6.164499" +$ns_ at 157.000000 "$node_(4) setdest 102.029528 194.793971 9.805877" +$ns_ at 157.000000 "$node_(5) setdest 189.463671 163.013794 2.098103" +$ns_ at 157.000000 "$node_(6) setdest 447.800127 61.558044 19.366087" +$ns_ at 157.000000 "$node_(7) setdest 421.274390 47.730933 19.993423" +$ns_ at 157.000000 "$node_(8) setdest 417.596276 42.348809 19.993978" +$ns_ at 157.000000 "$node_(9) setdest 533.516072 113.369140 18.827619" +$ns_ at 157.000000 "$node_(10) setdest 508.458281 174.872098 19.428817" + +$ns_ at 158.000000 "$node_(1) setdest 55.511989 78.080336 17.052094" +$ns_ at 158.000000 "$node_(2) setdest 181.442077 72.235518 3.745545" +$ns_ at 158.000000 "$node_(3) setdest 134.731133 150.035278 7.761367" +$ns_ at 158.000000 "$node_(4) setdest 113.388357 195.826949 11.405702" +$ns_ at 158.000000 "$node_(5) setdest 189.430347 155.904971 7.108901" +$ns_ at 158.000000 "$node_(6) setdest 451.172245 42.328961 19.522521" +$ns_ at 158.000000 "$node_(7) setdest 430.947390 30.485112 19.773347" +$ns_ at 158.000000 "$node_(8) setdest 416.344354 24.696906 17.696242" +$ns_ at 158.000000 "$node_(9) setdest 532.772610 95.462924 17.921644" +$ns_ at 158.000000 "$node_(10) setdest 499.709975 191.961210 19.198193" + +$ns_ at 159.000000 "$node_(1) setdest 62.113656 61.761513 17.603579" +$ns_ at 159.000000 "$node_(2) setdest 176.123023 71.889821 5.330276" +$ns_ at 159.000000 "$node_(3) setdest 131.954733 141.086339 9.369734" +$ns_ at 159.000000 "$node_(4) setdest 126.380996 196.373692 13.004139" +$ns_ at 159.000000 "$node_(5) setdest 187.683229 147.360821 8.720947" +$ns_ at 159.000000 "$node_(6) setdest 451.279335 22.737061 19.592193" +$ns_ at 159.000000 "$node_(7) setdest 436.024525 11.839659 19.324342" +$ns_ at 159.000000 "$node_(8) setdest 426.226077 23.617931 9.940454" +$ns_ at 159.000000 "$node_(9) setdest 526.035117 80.351338 16.545509" +$ns_ at 159.000000 "$node_(10) setdest 503.894359 181.756466 11.029318" + +$ns_ at 160.000000 "$node_(1) setdest 65.960718 43.172044 18.983367" +$ns_ at 160.000000 "$node_(2) setdest 169.348075 73.331064 6.926551" +$ns_ at 160.000000 "$node_(3) setdest 128.796997 130.584343 10.966459" +$ns_ at 160.000000 "$node_(4) setdest 140.930810 195.800748 14.561090" +$ns_ at 160.000000 "$node_(5) setdest 187.045369 137.058154 10.322393" +$ns_ at 160.000000 "$node_(6) setdest 444.402909 18.604950 8.022441" +$ns_ at 160.000000 "$node_(7) setdest 434.909325 14.365720 2.761278" +$ns_ at 160.000000 "$node_(8) setdest 424.301771 22.863179 2.067028" +$ns_ at 160.000000 "$node_(9) setdest 512.812174 79.050168 13.286808" +$ns_ at 160.000000 "$node_(10) setdest 495.740867 177.608074 9.148147" + +$ns_ at 161.000000 "$node_(1) setdest 72.042419 24.842076 19.312556" +$ns_ at 161.000000 "$node_(2) setdest 161.871350 77.405615 8.514892" +$ns_ at 161.000000 "$node_(3) setdest 126.355196 118.254232 12.569568" +$ns_ at 161.000000 "$node_(4) setdest 150.057613 187.830197 12.117269" +$ns_ at 161.000000 "$node_(5) setdest 187.048576 125.133800 11.924355" +$ns_ at 161.000000 "$node_(6) setdest 434.736032 18.300064 9.671684" +$ns_ at 161.000000 "$node_(7) setdest 431.202382 15.736051 3.952118" +$ns_ at 161.000000 "$node_(8) setdest 421.173291 24.251968 3.422882" +$ns_ at 161.000000 "$node_(9) setdest 497.627026 80.566955 15.260713" +$ns_ at 161.000000 "$node_(10) setdest 485.182776 174.616363 10.973770" + +$ns_ at 162.000000 "$node_(1) setdest 76.823348 6.990102 18.481078" +$ns_ at 162.000000 "$node_(2) setdest 155.188412 84.965774 10.090474" +$ns_ at 162.000000 "$node_(3) setdest 123.148854 104.452853 14.168934" +$ns_ at 162.000000 "$node_(4) setdest 146.530739 184.039119 5.177945" +$ns_ at 162.000000 "$node_(5) setdest 185.580284 111.692807 13.520953" +$ns_ at 162.000000 "$node_(6) setdest 423.465948 18.099012 11.271878" +$ns_ at 162.000000 "$node_(7) setdest 426.070562 17.854362 5.551829" +$ns_ at 162.000000 "$node_(8) setdest 416.609786 26.350567 5.022917" +$ns_ at 162.000000 "$node_(9) setdest 480.789422 81.428650 16.859639" +$ns_ at 162.000000 "$node_(10) setdest 473.049914 171.315733 12.573802" + +$ns_ at 163.000000 "$node_(1) setdest 74.663686 11.270534 4.794396" +$ns_ at 163.000000 "$node_(2) setdest 150.947840 95.880837 11.709870" +$ns_ at 163.000000 "$node_(3) setdest 117.765575 89.646315 15.754785" +$ns_ at 163.000000 "$node_(4) setdest 140.287067 180.992837 6.947177" +$ns_ at 163.000000 "$node_(5) setdest 184.617717 96.618690 15.104819" +$ns_ at 163.000000 "$node_(6) setdest 410.594549 18.024662 12.871613" +$ns_ at 163.000000 "$node_(7) setdest 419.556244 20.806580 7.152058" +$ns_ at 163.000000 "$node_(8) setdest 410.491787 28.885939 6.622539" +$ns_ at 163.000000 "$node_(9) setdest 462.331560 81.229685 18.458934" +$ns_ at 163.000000 "$node_(10) setdest 459.326921 167.770043 14.173654" + +$ns_ at 164.000000 "$node_(1) setdest 77.700460 16.056801 5.668364" +$ns_ at 164.000000 "$node_(2) setdest 148.502806 108.986321 13.331613" +$ns_ at 164.000000 "$node_(3) setdest 109.149818 74.597894 17.340307" +$ns_ at 164.000000 "$node_(4) setdest 131.820856 181.190189 8.468511" +$ns_ at 164.000000 "$node_(5) setdest 185.147114 79.901197 16.725873" +$ns_ at 164.000000 "$node_(6) setdest 396.123158 17.912799 14.471824" +$ns_ at 164.000000 "$node_(7) setdest 411.667762 24.597608 8.752145" +$ns_ at 164.000000 "$node_(8) setdest 402.783503 31.748758 8.222735" +$ns_ at 164.000000 "$node_(9) setdest 442.549827 79.886156 19.827305" +$ns_ at 164.000000 "$node_(10) setdest 443.980346 164.127469 15.772942" + +$ns_ at 165.000000 "$node_(1) setdest 80.751011 22.632665 7.248989" +$ns_ at 165.000000 "$node_(2) setdest 147.928686 123.907075 14.931795" +$ns_ at 165.000000 "$node_(3) setdest 96.035595 60.970960 18.912329" +$ns_ at 165.000000 "$node_(4) setdest 122.502042 185.210639 10.149105" +$ns_ at 165.000000 "$node_(5) setdest 185.141179 61.579353 18.321845" +$ns_ at 165.000000 "$node_(6) setdest 380.051979 17.762097 16.071885" +$ns_ at 165.000000 "$node_(7) setdest 402.404607 29.219681 10.352276" +$ns_ at 165.000000 "$node_(8) setdest 393.507649 34.981420 9.823012" +$ns_ at 165.000000 "$node_(9) setdest 422.735375 77.199676 19.995742" +$ns_ at 165.000000 "$node_(10) setdest 426.942629 160.730540 17.373052" + +$ns_ at 166.000000 "$node_(1) setdest 82.726194 31.276530 8.866665" +$ns_ at 166.000000 "$node_(2) setdest 148.987588 140.420750 16.547590" +$ns_ at 166.000000 "$node_(3) setdest 78.637722 51.317801 19.896468" +$ns_ at 166.000000 "$node_(4) setdest 111.343377 182.740860 11.428718" +$ns_ at 166.000000 "$node_(5) setdest 183.940304 41.880407 19.735515" +$ns_ at 166.000000 "$node_(6) setdest 362.380169 17.752944 17.671813" +$ns_ at 166.000000 "$node_(7) setdest 391.653384 34.440900 11.951984" +$ns_ at 166.000000 "$node_(8) setdest 382.669885 38.590161 11.422790" +$ns_ at 166.000000 "$node_(9) setdest 403.199353 72.940877 19.994837" +$ns_ at 166.000000 "$node_(10) setdest 408.274085 157.340788 18.973796" + +$ns_ at 167.000000 "$node_(1) setdest 83.836001 41.676120 10.458640" +$ns_ at 167.000000 "$node_(2) setdest 153.399183 157.898562 18.025984" +$ns_ at 167.000000 "$node_(3) setdest 58.975217 50.354423 19.686091" +$ns_ at 167.000000 "$node_(4) setdest 103.966143 171.791634 13.202619" +$ns_ at 167.000000 "$node_(5) setdest 192.412042 25.101152 18.796642" +$ns_ at 167.000000 "$node_(6) setdest 343.128857 18.513654 19.266336" +$ns_ at 167.000000 "$node_(7) setdest 379.364734 40.154703 13.552065" +$ns_ at 167.000000 "$node_(8) setdest 370.163237 42.213745 13.021007" +$ns_ at 167.000000 "$node_(9) setdest 384.109842 66.996327 19.993677" +$ns_ at 167.000000 "$node_(10) setdest 388.699756 153.273291 19.992471" + +$ns_ at 168.000000 "$node_(1) setdest 84.894126 53.682119 12.052537" +$ns_ at 168.000000 "$node_(2) setdest 167.272527 171.355308 19.327536" +$ns_ at 168.000000 "$node_(3) setdest 50.795974 63.637111 15.599033" +$ns_ at 168.000000 "$node_(4) setdest 99.748116 157.437131 14.961400" +$ns_ at 168.000000 "$node_(5) setdest 198.947067 22.026635 7.222133" +$ns_ at 168.000000 "$node_(6) setdest 323.346757 21.396871 19.991109" +$ns_ at 168.000000 "$node_(7) setdest 365.975599 47.239433 15.148014" +$ns_ at 168.000000 "$node_(8) setdest 355.878352 45.327028 14.620207" +$ns_ at 168.000000 "$node_(9) setdest 365.020316 61.038320 19.997696" +$ns_ at 168.000000 "$node_(10) setdest 369.105098 149.269829 19.999458" + +$ns_ at 169.000000 "$node_(1) setdest 89.451531 66.521653 13.624375" +$ns_ at 169.000000 "$node_(2) setdest 185.788334 169.598855 18.598931" +$ns_ at 169.000000 "$node_(3) setdest 57.023606 66.857513 7.011019" +$ns_ at 169.000000 "$node_(4) setdest 94.375213 141.785412 16.548244" +$ns_ at 169.000000 "$node_(5) setdest 197.685641 21.098308 1.566202" +$ns_ at 169.000000 "$node_(6) setdest 303.800583 25.627234 19.998722" +$ns_ at 169.000000 "$node_(7) setdest 351.976358 56.428967 16.745934" +$ns_ at 169.000000 "$node_(8) setdest 339.845401 47.794114 16.221653" +$ns_ at 169.000000 "$node_(9) setdest 345.625203 56.166910 19.997526" +$ns_ at 169.000000 "$node_(10) setdest 349.412594 145.778646 19.999576" + +$ns_ at 170.000000 "$node_(1) setdest 98.595971 78.660537 15.197805" +$ns_ at 170.000000 "$node_(2) setdest 191.824229 152.854155 17.799355" +$ns_ at 170.000000 "$node_(3) setdest 65.505872 65.288800 8.626105" +$ns_ at 170.000000 "$node_(4) setdest 84.771789 126.399680 18.136882" +$ns_ at 170.000000 "$node_(5) setdest 194.447590 20.748193 3.256924" +$ns_ at 170.000000 "$node_(6) setdest 284.422089 30.561769 19.996891" +$ns_ at 170.000000 "$node_(7) setdest 336.488392 66.266943 18.348375" +$ns_ at 170.000000 "$node_(8) setdest 322.137911 49.798598 17.820583" +$ns_ at 170.000000 "$node_(9) setdest 325.954317 52.576373 19.995892" +$ns_ at 170.000000 "$node_(10) setdest 329.735957 142.201287 19.999189" + +$ns_ at 171.000000 "$node_(1) setdest 112.977297 87.271027 16.761953" +$ns_ at 171.000000 "$node_(2) setdest 191.611035 136.352639 16.502893" +$ns_ at 171.000000 "$node_(3) setdest 75.600405 63.592078 10.236135" +$ns_ at 171.000000 "$node_(4) setdest 69.325659 114.455881 19.525298" +$ns_ at 171.000000 "$node_(5) setdest 189.626370 20.167736 4.856036" +$ns_ at 171.000000 "$node_(6) setdest 265.427300 36.816797 19.998184" +$ns_ at 171.000000 "$node_(7) setdest 319.125355 75.725898 19.772377" +$ns_ at 171.000000 "$node_(8) setdest 302.740361 50.564467 19.412663" +$ns_ at 171.000000 "$node_(9) setdest 306.097225 50.200000 19.998782" +$ns_ at 171.000000 "$node_(10) setdest 310.324303 137.409564 19.994322" + +$ns_ at 172.000000 "$node_(1) setdest 130.889740 91.652087 18.440426" +$ns_ at 172.000000 "$node_(2) setdest 188.195099 119.984720 16.720568" +$ns_ at 172.000000 "$node_(3) setdest 87.261624 61.566337 11.835863" +$ns_ at 172.000000 "$node_(4) setdest 50.408455 108.069384 19.966171" +$ns_ at 172.000000 "$node_(5) setdest 183.210315 19.462493 6.454698" +$ns_ at 172.000000 "$node_(6) setdest 246.804372 44.105199 19.998357" +$ns_ at 172.000000 "$node_(7) setdest 300.903681 83.960245 19.995847" +$ns_ at 172.000000 "$node_(8) setdest 282.761368 49.802834 19.993506" +$ns_ at 172.000000 "$node_(9) setdest 286.178420 48.427559 19.997508" +$ns_ at 172.000000 "$node_(10) setdest 291.419852 130.900985 19.993495" + +$ns_ at 173.000000 "$node_(1) setdest 150.117221 96.445478 19.815968" +$ns_ at 173.000000 "$node_(2) setdest 186.605797 101.739334 18.314476" +$ns_ at 173.000000 "$node_(3) setdest 100.596739 59.980711 13.429055" +$ns_ at 173.000000 "$node_(4) setdest 30.490170 107.030632 19.945353" +$ns_ at 173.000000 "$node_(5) setdest 175.384564 17.552862 8.055375" +$ns_ at 173.000000 "$node_(6) setdest 228.279753 51.641445 19.998913" +$ns_ at 173.000000 "$node_(7) setdest 282.201859 91.043224 19.998168" +$ns_ at 173.000000 "$node_(8) setdest 263.086759 46.344289 19.976279" +$ns_ at 173.000000 "$node_(9) setdest 266.189461 47.940458 19.994893" +$ns_ at 173.000000 "$node_(10) setdest 273.195059 122.679443 19.993419" + +$ns_ at 174.000000 "$node_(1) setdest 168.902350 103.226473 19.971554" +$ns_ at 174.000000 "$node_(2) setdest 184.513754 82.093305 19.757102" +$ns_ at 174.000000 "$node_(3) setdest 115.623096 60.000922 15.026371" +$ns_ at 174.000000 "$node_(4) setdest 11.378375 105.199769 19.199290" +$ns_ at 174.000000 "$node_(5) setdest 165.905684 15.739495 9.650776" +$ns_ at 174.000000 "$node_(6) setdest 210.524312 60.821660 19.988297" +$ns_ at 174.000000 "$node_(7) setdest 263.237099 97.391304 19.999006" +$ns_ at 174.000000 "$node_(8) setdest 244.452806 39.179900 19.963784" +$ns_ at 174.000000 "$node_(9) setdest 246.259591 49.492870 19.990241" +$ns_ at 174.000000 "$node_(10) setdest 255.875893 112.695630 19.990749" + +$ns_ at 175.000000 "$node_(1) setdest 179.680069 118.492693 18.687341" +$ns_ at 175.000000 "$node_(2) setdest 180.258778 62.740869 19.814682" +$ns_ at 175.000000 "$node_(3) setdest 132.021670 62.570627 16.598693" +$ns_ at 175.000000 "$node_(4) setdest 5.755446 88.484328 17.635853" +$ns_ at 175.000000 "$node_(5) setdest 155.223912 16.670444 10.722262" +$ns_ at 175.000000 "$node_(6) setdest 194.092342 72.196532 19.984929" +$ns_ at 175.000000 "$node_(7) setdest 244.024859 102.944527 19.998711" +$ns_ at 175.000000 "$node_(8) setdest 226.039257 31.479843 19.958700" +$ns_ at 175.000000 "$node_(9) setdest 226.666973 53.441639 19.986582" +$ns_ at 175.000000 "$node_(10) setdest 239.867016 100.730485 19.986216" + +$ns_ at 176.000000 "$node_(1) setdest 178.455056 113.826735 4.824088" +$ns_ at 176.000000 "$node_(2) setdest 170.317497 46.270492 19.238045" +$ns_ at 176.000000 "$node_(3) setdest 148.852278 69.512159 18.205884" +$ns_ at 176.000000 "$node_(4) setdest 7.747696 72.189753 16.415914" +$ns_ at 176.000000 "$node_(5) setdest 155.067960 17.267330 0.616923" +$ns_ at 176.000000 "$node_(6) setdest 179.520253 85.870172 19.982848" +$ns_ at 176.000000 "$node_(7) setdest 224.758061 108.308526 19.999549" +$ns_ at 176.000000 "$node_(8) setdest 206.348654 28.206572 19.960815" +$ns_ at 176.000000 "$node_(9) setdest 207.773501 59.963121 19.987322" +$ns_ at 176.000000 "$node_(10) setdest 225.733708 86.607919 19.979922" + +$ns_ at 177.000000 "$node_(1) setdest 175.246544 108.496016 6.221826" +$ns_ at 177.000000 "$node_(2) setdest 157.726290 31.362125 19.514044" +$ns_ at 177.000000 "$node_(3) setdest 164.683417 81.103727 19.621148" +$ns_ at 177.000000 "$node_(4) setdest 17.513917 60.031501 15.594941" +$ns_ at 177.000000 "$node_(5) setdest 156.535342 17.780235 1.554439" +$ns_ at 177.000000 "$node_(6) setdest 167.122891 101.545865 19.985543" +$ns_ at 177.000000 "$node_(7) setdest 205.299075 112.923724 19.998805" +$ns_ at 177.000000 "$node_(8) setdest 186.493072 30.105946 19.946222" +$ns_ at 177.000000 "$node_(9) setdest 189.928589 68.956396 19.982989" +$ns_ at 177.000000 "$node_(10) setdest 213.261658 71.046403 19.942738" + +$ns_ at 178.000000 "$node_(1) setdest 172.424003 101.197065 7.825690" +$ns_ at 178.000000 "$node_(2) setdest 142.888376 19.133692 19.227539" +$ns_ at 178.000000 "$node_(3) setdest 174.810337 98.211073 19.880034" +$ns_ at 178.000000 "$node_(4) setdest 31.152481 49.569283 17.189195" +$ns_ at 178.000000 "$node_(5) setdest 159.699097 18.019297 3.172774" +$ns_ at 178.000000 "$node_(6) setdest 157.052647 118.802745 19.980234" +$ns_ at 178.000000 "$node_(7) setdest 185.768249 117.229326 19.999784" +$ns_ at 178.000000 "$node_(8) setdest 167.818405 37.111714 19.945525" +$ns_ at 178.000000 "$node_(9) setdest 173.871013 80.833808 19.972948" +$ns_ at 178.000000 "$node_(10) setdest 196.419365 70.738967 16.845098" + +$ns_ at 179.000000 "$node_(1) setdest 170.753363 91.918408 9.427858" +$ns_ at 179.000000 "$node_(2) setdest 125.010723 12.749372 18.983414" +$ns_ at 179.000000 "$node_(3) setdest 181.874760 116.920836 19.999033" +$ns_ at 179.000000 "$node_(4) setdest 46.551054 38.789818 18.796620" +$ns_ at 179.000000 "$node_(5) setdest 163.906434 16.048997 4.645834" +$ns_ at 179.000000 "$node_(6) setdest 150.159618 137.551322 19.975559" +$ns_ at 179.000000 "$node_(7) setdest 166.344084 121.993628 19.999919" +$ns_ at 179.000000 "$node_(8) setdest 150.738069 47.480338 19.981147" +$ns_ at 179.000000 "$node_(9) setdest 160.581359 95.722920 19.957469" +$ns_ at 179.000000 "$node_(10) setdest 195.672687 84.091040 13.372934" + +$ns_ at 180.000000 "$node_(1) setdest 170.379290 80.898702 11.026053" +$ns_ at 180.000000 "$node_(2) setdest 106.108663 11.307135 18.957001" +$ns_ at 180.000000 "$node_(3) setdest 186.831517 136.224536 19.929934" +$ns_ at 180.000000 "$node_(4) setdest 63.824239 28.948857 19.879825" +$ns_ at 180.000000 "$node_(5) setdest 169.419179 13.556525 6.050023" +$ns_ at 180.000000 "$node_(6) setdest 147.828308 157.269197 19.855216" +$ns_ at 180.000000 "$node_(7) setdest 147.059157 127.267473 19.993046" +$ns_ at 180.000000 "$node_(8) setdest 134.966214 59.765581 19.991963" +$ns_ at 180.000000 "$node_(9) setdest 151.747848 113.601200 19.941509" +$ns_ at 180.000000 "$node_(10) setdest 189.493134 92.502152 10.437130" + +$ns_ at 181.000000 "$node_(1) setdest 171.163052 68.288192 12.634842" +$ns_ at 181.000000 "$node_(2) setdest 91.123305 20.053465 17.351060" +$ns_ at 181.000000 "$node_(3) setdest 188.637831 156.141046 19.998253" +$ns_ at 181.000000 "$node_(4) setdest 82.685943 28.747684 18.862777" +$ns_ at 181.000000 "$node_(5) setdest 172.747269 19.290485 6.629817" +$ns_ at 181.000000 "$node_(6) setdest 160.476531 170.330272 18.181563" +$ns_ at 181.000000 "$node_(7) setdest 128.509912 134.706388 19.985292" +$ns_ at 181.000000 "$node_(8) setdest 117.253969 68.951910 19.952752" +$ns_ at 181.000000 "$node_(9) setdest 145.153561 132.482364 19.999575" +$ns_ at 181.000000 "$node_(10) setdest 178.683836 98.024600 12.138302" + +$ns_ at 182.000000 "$node_(1) setdest 172.888706 54.198358 14.195116" +$ns_ at 182.000000 "$node_(2) setdest 83.890009 35.949121 17.464032" +$ns_ at 182.000000 "$node_(3) setdest 190.094980 176.021799 19.934083" +$ns_ at 182.000000 "$node_(4) setdest 96.855632 39.340425 17.691417" +$ns_ at 182.000000 "$node_(5) setdest 173.027925 27.952089 8.666149" +$ns_ at 182.000000 "$node_(6) setdest 177.244737 175.603016 17.577672" +$ns_ at 182.000000 "$node_(7) setdest 111.416146 145.032855 19.970798" +$ns_ at 182.000000 "$node_(8) setdest 97.650034 72.426294 19.909435" +$ns_ at 182.000000 "$node_(9) setdest 139.260466 151.590578 19.996310" +$ns_ at 182.000000 "$node_(10) setdest 167.962697 106.536181 13.689040" + +$ns_ at 183.000000 "$node_(1) setdest 180.428826 40.405392 15.719393" +$ns_ at 183.000000 "$node_(2) setdest 80.665629 54.207418 18.540821" +$ns_ at 183.000000 "$node_(3) setdest 196.076606 187.662698 13.087794" +$ns_ at 183.000000 "$node_(4) setdest 113.096835 43.407822 16.742771" +$ns_ at 183.000000 "$node_(5) setdest 175.004921 38.052270 10.291850" +$ns_ at 183.000000 "$node_(6) setdest 192.567746 173.044954 15.535067" +$ns_ at 183.000000 "$node_(7) setdest 94.933105 156.301721 19.966922" +$ns_ at 183.000000 "$node_(8) setdest 78.202415 68.303846 19.879749" +$ns_ at 183.000000 "$node_(9) setdest 128.466810 168.153105 19.769176" +$ns_ at 183.000000 "$node_(10) setdest 155.692441 115.659456 15.290302" + +$ns_ at 184.000000 "$node_(1) setdest 194.682139 32.290056 16.401695" +$ns_ at 184.000000 "$node_(2) setdest 71.721236 70.341996 18.447948" +$ns_ at 184.000000 "$node_(3) setdest 195.005636 184.210017 3.614967" +$ns_ at 184.000000 "$node_(4) setdest 131.373418 43.329977 18.276749" +$ns_ at 184.000000 "$node_(5) setdest 177.681189 49.641032 11.893773" +$ns_ at 184.000000 "$node_(6) setdest 192.538130 172.725928 0.320398" +$ns_ at 184.000000 "$node_(7) setdest 76.208559 163.180223 19.947993" +$ns_ at 184.000000 "$node_(8) setdest 59.386392 61.528995 19.998534" +$ns_ at 184.000000 "$node_(9) setdest 119.659162 170.816167 9.201443" +$ns_ at 184.000000 "$node_(10) setdest 139.468774 119.990598 16.791848" + +$ns_ at 185.000000 "$node_(1) setdest 195.613467 33.069178 1.214250" +$ns_ at 185.000000 "$node_(2) setdest 72.027737 89.787372 19.447792" +$ns_ at 185.000000 "$node_(3) setdest 194.672645 188.582756 4.385400" +$ns_ at 185.000000 "$node_(4) setdest 134.492679 47.692958 5.363338" +$ns_ at 185.000000 "$node_(5) setdest 173.964993 62.431439 13.319333" +$ns_ at 185.000000 "$node_(6) setdest 191.099665 172.527656 1.452065" +$ns_ at 185.000000 "$node_(7) setdest 56.380453 165.404161 19.952436" +$ns_ at 185.000000 "$node_(8) setdest 40.804330 54.137082 19.998335" +$ns_ at 185.000000 "$node_(9) setdest 119.805941 169.859912 0.967454" +$ns_ at 185.000000 "$node_(10) setdest 121.371407 116.477413 18.435216" + +$ns_ at 186.000000 "$node_(1) setdest 195.523460 35.547228 2.479684" +$ns_ at 186.000000 "$node_(2) setdest 72.860222 109.769952 19.999913" +$ns_ at 186.000000 "$node_(3) setdest 193.465576 194.532746 6.071194" +$ns_ at 186.000000 "$node_(4) setdest 134.472696 54.482129 6.789201" +$ns_ at 186.000000 "$node_(5) setdest 169.445995 76.675295 14.943519" +$ns_ at 186.000000 "$node_(6) setdest 188.017704 172.548818 3.082034" +$ns_ at 186.000000 "$node_(7) setdest 36.936529 161.362410 19.859555" +$ns_ at 186.000000 "$node_(8) setdest 21.592497 48.640305 19.982719" +$ns_ at 186.000000 "$node_(9) setdest 120.575903 167.393762 2.583551" +$ns_ at 186.000000 "$node_(10) setdest 104.372214 106.231584 19.848162" + +$ns_ at 187.000000 "$node_(1) setdest 195.465659 39.626432 4.079614" +$ns_ at 187.000000 "$node_(2) setdest 73.033876 129.767033 19.997835" +$ns_ at 187.000000 "$node_(3) setdest 192.328990 202.117959 7.669894" +$ns_ at 187.000000 "$node_(4) setdest 134.718626 62.867220 8.388696" +$ns_ at 187.000000 "$node_(5) setdest 165.002660 92.609977 16.542591" +$ns_ at 187.000000 "$node_(6) setdest 183.363363 172.076516 4.678243" +$ns_ at 187.000000 "$node_(7) setdest 27.040671 147.302132 17.193587" +$ns_ at 187.000000 "$node_(8) setdest 15.086590 34.481154 15.582310" +$ns_ at 187.000000 "$node_(9) setdest 122.262624 163.566603 4.182365" +$ns_ at 187.000000 "$node_(10) setdest 87.019330 96.377428 19.955625" + +$ns_ at 188.000000 "$node_(1) setdest 195.493442 45.306014 5.679650" +$ns_ at 188.000000 "$node_(2) setdest 72.088632 149.742168 19.997487" +$ns_ at 188.000000 "$node_(3) setdest 191.694603 211.364885 9.268662" +$ns_ at 188.000000 "$node_(4) setdest 135.397225 72.832879 9.988737" +$ns_ at 188.000000 "$node_(5) setdest 160.860419 110.273346 18.142567" +$ns_ at 188.000000 "$node_(6) setdest 177.331385 170.366266 6.269746" +$ns_ at 188.000000 "$node_(7) setdest 33.339932 142.144690 8.141247" +$ns_ at 188.000000 "$node_(8) setdest 25.950932 28.357513 12.471284" +$ns_ at 188.000000 "$node_(9) setdest 125.574753 158.840877 5.770848" +$ns_ at 188.000000 "$node_(10) setdest 73.723879 81.531864 19.928868" + +$ns_ at 189.000000 "$node_(1) setdest 195.501926 52.585695 7.279686" +$ns_ at 189.000000 "$node_(2) setdest 70.118723 169.643281 19.998371" +$ns_ at 189.000000 "$node_(3) setdest 191.784503 222.234607 10.870094" +$ns_ at 189.000000 "$node_(4) setdest 136.587465 84.360558 11.588963" +$ns_ at 189.000000 "$node_(5) setdest 157.134792 129.574013 19.656960" +$ns_ at 189.000000 "$node_(6) setdest 170.511875 166.445811 7.866110" +$ns_ at 189.000000 "$node_(7) setdest 42.075135 137.545000 9.872230" +$ns_ at 189.000000 "$node_(8) setdest 39.756298 24.207709 14.415582" +$ns_ at 189.000000 "$node_(9) setdest 131.266657 154.174797 7.360033" +$ns_ at 189.000000 "$node_(10) setdest 62.437001 65.038639 19.985497" + +$ns_ at 190.000000 "$node_(1) setdest 195.416493 61.464644 8.879359" +$ns_ at 190.000000 "$node_(2) setdest 67.278825 189.439428 19.998811" +$ns_ at 190.000000 "$node_(3) setdest 192.074302 234.702462 12.471222" +$ns_ at 190.000000 "$node_(4) setdest 138.172614 97.454006 13.189051" +$ns_ at 190.000000 "$node_(5) setdest 154.020435 149.328771 19.998742" +$ns_ at 190.000000 "$node_(6) setdest 163.520779 160.060724 9.468092" +$ns_ at 190.000000 "$node_(7) setdest 51.690561 131.290754 11.470484" +$ns_ at 190.000000 "$node_(8) setdest 55.087819 19.437284 16.056541" +$ns_ at 190.000000 "$node_(9) setdest 139.311546 150.191848 8.976865" +$ns_ at 190.000000 "$node_(10) setdest 59.419189 45.948052 19.327640" + +$ns_ at 191.000000 "$node_(1) setdest 195.032236 71.937101 10.479504" +$ns_ at 191.000000 "$node_(2) setdest 63.774126 209.129366 19.999414" +$ns_ at 191.000000 "$node_(3) setdest 192.134525 248.773197 14.070864" +$ns_ at 191.000000 "$node_(4) setdest 140.151210 112.110209 14.789156" +$ns_ at 191.000000 "$node_(5) setdest 151.528492 169.172266 19.999351" +$ns_ at 191.000000 "$node_(6) setdest 156.653106 151.371539 11.075507" +$ns_ at 191.000000 "$node_(7) setdest 61.020362 122.160481 13.054006" +$ns_ at 191.000000 "$node_(8) setdest 72.246356 15.431568 17.619908" +$ns_ at 191.000000 "$node_(9) setdest 149.037901 146.020315 10.583179" +$ns_ at 191.000000 "$node_(10) setdest 62.233515 26.981985 19.173735" + +$ns_ at 192.000000 "$node_(1) setdest 194.328858 83.995651 12.079047" +$ns_ at 192.000000 "$node_(2) setdest 60.068139 228.782066 19.999074" +$ns_ at 192.000000 "$node_(3) setdest 191.210367 264.413225 15.667308" +$ns_ at 192.000000 "$node_(4) setdest 142.034412 128.390629 16.388976" +$ns_ at 192.000000 "$node_(5) setdest 149.209226 189.037194 19.999859" +$ns_ at 192.000000 "$node_(6) setdest 149.885588 140.648727 12.679826" +$ns_ at 192.000000 "$node_(7) setdest 70.178665 110.699923 14.670341" +$ns_ at 192.000000 "$node_(8) setdest 91.480263 14.579377 19.252776" +$ns_ at 192.000000 "$node_(9) setdest 160.621424 145.650484 11.589425" +$ns_ at 192.000000 "$node_(10) setdest 71.531198 10.851366 18.618372" + +$ns_ at 193.000000 "$node_(1) setdest 193.150556 97.624358 13.679549" +$ns_ at 193.000000 "$node_(2) setdest 56.962041 248.539067 19.999673" +$ns_ at 193.000000 "$node_(3) setdest 188.728422 281.499973 17.266065" +$ns_ at 193.000000 "$node_(4) setdest 143.661611 146.305417 17.988535" +$ns_ at 193.000000 "$node_(5) setdest 146.639128 208.871240 19.999870" +$ns_ at 193.000000 "$node_(6) setdest 141.678111 128.965386 14.278065" +$ns_ at 193.000000 "$node_(7) setdest 77.526460 96.240451 16.219323" +$ns_ at 193.000000 "$node_(8) setdest 111.285642 11.901841 19.985551" +$ns_ at 193.000000 "$node_(9) setdest 162.320582 151.191100 5.795306" +$ns_ at 193.000000 "$node_(10) setdest 69.434904 14.512209 4.218556" + +$ns_ at 194.000000 "$node_(1) setdest 191.331293 112.793988 15.278331" +$ns_ at 194.000000 "$node_(2) setdest 54.045499 268.325206 19.999938" +$ns_ at 194.000000 "$node_(3) setdest 184.809887 299.958796 18.870163" +$ns_ at 194.000000 "$node_(4) setdest 144.576980 165.824228 19.540263" +$ns_ at 194.000000 "$node_(5) setdest 143.541952 228.628802 19.998844" +$ns_ at 194.000000 "$node_(6) setdest 132.011405 116.363647 15.882350" +$ns_ at 194.000000 "$node_(7) setdest 81.390315 78.832201 17.831897" +$ns_ at 194.000000 "$node_(8) setdest 130.995388 8.526521 19.996672" +$ns_ at 194.000000 "$node_(9) setdest 158.304055 156.109234 6.349844" +$ns_ at 194.000000 "$node_(10) setdest 64.357297 23.822765 10.605120" + +$ns_ at 195.000000 "$node_(1) setdest 188.282059 129.392447 16.876216" +$ns_ at 195.000000 "$node_(2) setdest 51.150472 288.114471 19.999904" +$ns_ at 195.000000 "$node_(3) setdest 179.637007 319.243200 19.966145" +$ns_ at 195.000000 "$node_(4) setdest 144.982177 185.819869 19.999746" +$ns_ at 195.000000 "$node_(5) setdest 139.870629 248.288518 19.999576" +$ns_ at 195.000000 "$node_(6) setdest 122.921583 101.453520 17.462438" +$ns_ at 195.000000 "$node_(7) setdest 80.824768 59.414565 19.425870" +$ns_ at 195.000000 "$node_(8) setdest 150.685052 6.112369 19.837111" +$ns_ at 195.000000 "$node_(9) setdest 153.669623 162.564787 7.946831" +$ns_ at 195.000000 "$node_(10) setdest 62.963882 36.031903 12.288395" + +$ns_ at 196.000000 "$node_(1) setdest 183.957321 147.358510 18.479253" +$ns_ at 196.000000 "$node_(2) setdest 48.611087 307.952411 19.999809" +$ns_ at 196.000000 "$node_(3) setdest 172.922634 338.070779 19.989011" +$ns_ at 196.000000 "$node_(4) setdest 145.105736 205.819344 19.999857" +$ns_ at 196.000000 "$node_(5) setdest 135.716006 267.851495 19.999273" +$ns_ at 196.000000 "$node_(6) setdest 114.965812 84.111046 19.080244" +$ns_ at 196.000000 "$node_(7) setdest 77.199943 39.771797 19.974427" +$ns_ at 196.000000 "$node_(8) setdest 170.172454 10.569598 19.990641" +$ns_ at 196.000000 "$node_(9) setdest 146.820750 169.209669 9.542616" +$ns_ at 196.000000 "$node_(10) setdest 65.562221 49.704614 13.917413" + +$ns_ at 197.000000 "$node_(1) setdest 179.424231 166.672897 19.839215" +$ns_ at 197.000000 "$node_(2) setdest 46.811525 327.868842 19.997566" +$ns_ at 197.000000 "$node_(3) setdest 163.939414 355.929108 19.990452" +$ns_ at 197.000000 "$node_(4) setdest 145.006109 225.818471 19.999375" +$ns_ at 197.000000 "$node_(5) setdest 130.662166 287.197475 19.995206" +$ns_ at 197.000000 "$node_(6) setdest 106.357417 66.062345 19.996501" +$ns_ at 197.000000 "$node_(7) setdest 75.946021 20.181201 19.630684" +$ns_ at 197.000000 "$node_(8) setdest 188.099728 18.417830 19.569923" +$ns_ at 197.000000 "$node_(9) setdest 138.645810 176.738594 11.113701" +$ns_ at 197.000000 "$node_(10) setdest 71.277978 64.136289 15.522342" + +$ns_ at 198.000000 "$node_(1) setdest 175.356307 186.254820 19.999992" +$ns_ at 198.000000 "$node_(2) setdest 45.427324 347.820835 19.999951" +$ns_ at 198.000000 "$node_(3) setdest 152.416252 372.246071 19.975649" +$ns_ at 198.000000 "$node_(4) setdest 143.832197 245.780166 19.996183" +$ns_ at 198.000000 "$node_(5) setdest 123.549176 305.874927 19.986041" +$ns_ at 198.000000 "$node_(6) setdest 95.007893 49.716550 19.899666" +$ns_ at 198.000000 "$node_(7) setdest 89.910882 8.256505 18.363434" +$ns_ at 198.000000 "$node_(8) setdest 186.767229 35.321285 16.955895" +$ns_ at 198.000000 "$node_(9) setdest 140.360459 185.666801 9.091364" +$ns_ at 198.000000 "$node_(10) setdest 79.585340 79.054188 17.075010" + +$ns_ at 199.000000 "$node_(1) setdest 171.310371 205.841290 19.999985" +$ns_ at 199.000000 "$node_(2) setdest 45.246485 367.808943 19.988926" +$ns_ at 199.000000 "$node_(3) setdest 141.026187 379.353825 13.425861" +$ns_ at 199.000000 "$node_(4) setdest 141.471649 265.638334 19.997976" +$ns_ at 199.000000 "$node_(5) setdest 113.615585 323.205618 19.975712" +$ns_ at 199.000000 "$node_(6) setdest 77.769915 49.534854 17.238935" +$ns_ at 199.000000 "$node_(7) setdest 106.824065 10.366785 17.044326" +$ns_ at 199.000000 "$node_(8) setdest 175.640526 47.155431 16.243476" +$ns_ at 199.000000 "$node_(9) setdest 145.881782 184.022631 5.760929" +$ns_ at 199.000000 "$node_(10) setdest 88.755604 95.337817 18.688240" + +$ns_ at 200.000000 "$node_(1) setdest 167.224719 225.419522 19.999993" +$ns_ at 200.000000 "$node_(2) setdest 49.050666 387.354600 19.912420" +$ns_ at 200.000000 "$node_(3) setdest 144.119591 377.312601 3.706177" +$ns_ at 200.000000 "$node_(4) setdest 138.256986 285.377161 19.998884" +$ns_ at 200.000000 "$node_(5) setdest 100.527566 338.285629 19.967548" +$ns_ at 200.000000 "$node_(6) setdest 72.091162 65.514596 16.958785" +$ns_ at 200.000000 "$node_(7) setdest 122.081158 16.304017 16.371610" +$ns_ at 200.000000 "$node_(8) setdest 160.926667 51.839992 15.441592" +$ns_ at 200.000000 "$node_(9) setdest 149.142428 177.126205 7.628401" +$ns_ at 200.000000 "$node_(10) setdest 99.613365 111.937528 19.835356" + +$ns_ at 201.000000 "$node_(1) setdest 163.241486 245.018750 19.999897" +$ns_ at 201.000000 "$node_(2) setdest 61.654558 402.610167 19.788644" +$ns_ at 201.000000 "$node_(3) setdest 149.076068 375.294812 5.351461" +$ns_ at 201.000000 "$node_(4) setdest 134.200711 304.958942 19.997488" +$ns_ at 201.000000 "$node_(5) setdest 84.538504 350.240382 19.964123" +$ns_ at 201.000000 "$node_(6) setdest 67.136324 80.376662 15.666251" +$ns_ at 201.000000 "$node_(7) setdest 139.717702 18.648063 17.791633" +$ns_ at 201.000000 "$node_(8) setdest 145.832431 59.287246 16.831446" +$ns_ at 201.000000 "$node_(9) setdest 152.552155 168.551362 9.227902" +$ns_ at 201.000000 "$node_(10) setdest 117.014782 121.404332 19.809839" + +$ns_ at 202.000000 "$node_(1) setdest 159.391115 264.644218 19.999609" +$ns_ at 202.000000 "$node_(2) setdest 78.236392 413.777419 19.991617" +$ns_ at 202.000000 "$node_(3) setdest 155.820516 373.560161 6.963950" +$ns_ at 202.000000 "$node_(4) setdest 129.372934 324.366691 19.999203" +$ns_ at 202.000000 "$node_(5) setdest 66.671342 359.206049 19.990464" +$ns_ at 202.000000 "$node_(6) setdest 59.005599 94.783951 16.543236" +$ns_ at 202.000000 "$node_(7) setdest 158.694781 17.342255 19.021952" +$ns_ at 202.000000 "$node_(8) setdest 128.180124 62.164422 17.885247" +$ns_ at 202.000000 "$node_(9) setdest 155.239978 158.052038 10.837906" +$ns_ at 202.000000 "$node_(10) setdest 136.907991 122.875448 19.947530" + +$ns_ at 203.000000 "$node_(1) setdest 156.090421 284.369430 19.999465" +$ns_ at 203.000000 "$node_(2) setdest 95.064626 424.526948 19.968521" +$ns_ at 203.000000 "$node_(3) setdest 163.970693 371.055823 8.526259" +$ns_ at 203.000000 "$node_(4) setdest 123.550868 343.497632 19.997233" +$ns_ at 203.000000 "$node_(5) setdest 47.626658 365.068247 19.926499" +$ns_ at 203.000000 "$node_(6) setdest 44.129184 104.390756 17.708711" +$ns_ at 203.000000 "$node_(7) setdest 173.699979 7.360376 18.022038" +$ns_ at 203.000000 "$node_(8) setdest 113.299302 53.211896 17.366248" +$ns_ at 203.000000 "$node_(9) setdest 158.404079 146.023580 12.437658" +$ns_ at 203.000000 "$node_(10) setdest 156.369232 118.860713 19.871034" + +$ns_ at 204.000000 "$node_(1) setdest 153.023861 304.132837 19.999901" +$ns_ at 204.000000 "$node_(2) setdest 111.823384 435.393263 19.973301" +$ns_ at 204.000000 "$node_(3) setdest 169.826488 362.901841 10.038813" +$ns_ at 204.000000 "$node_(4) setdest 116.289633 362.124419 19.992067" +$ns_ at 204.000000 "$node_(5) setdest 28.522161 370.489417 19.858773" +$ns_ at 204.000000 "$node_(6) setdest 27.051512 112.644070 18.967447" +$ns_ at 204.000000 "$node_(7) setdest 180.486356 8.534284 6.887160" +$ns_ at 204.000000 "$node_(8) setdest 98.103987 42.322359 18.694375" +$ns_ at 204.000000 "$node_(9) setdest 161.029362 132.243002 14.028416" +$ns_ at 204.000000 "$node_(10) setdest 172.443816 107.486758 19.691600" + +$ns_ at 205.000000 "$node_(1) setdest 149.277393 323.777154 19.998380" +$ns_ at 205.000000 "$node_(2) setdest 129.394110 444.945228 19.999261" +$ns_ at 205.000000 "$node_(3) setdest 168.110917 364.353857 2.247562" +$ns_ at 205.000000 "$node_(4) setdest 106.734376 379.674290 19.982515" +$ns_ at 205.000000 "$node_(5) setdest 24.594624 377.857992 8.349937" +$ns_ at 205.000000 "$node_(6) setdest 9.174780 116.138563 18.215078" +$ns_ at 205.000000 "$node_(7) setdest 181.039157 13.299606 4.797278" +$ns_ at 205.000000 "$node_(8) setdest 82.787587 30.297708 19.472656" +$ns_ at 205.000000 "$node_(9) setdest 162.200630 116.656242 15.630705" +$ns_ at 205.000000 "$node_(10) setdest 181.116590 90.598550 18.984958" + +$ns_ at 206.000000 "$node_(1) setdest 144.598638 343.219337 19.997231" +$ns_ at 206.000000 "$node_(2) setdest 147.113812 454.211788 19.996424" +$ns_ at 206.000000 "$node_(3) setdest 161.975039 369.257123 7.854363" +$ns_ at 206.000000 "$node_(4) setdest 94.731875 395.657510 19.988080" +$ns_ at 206.000000 "$node_(5) setdest 28.110519 380.700946 4.521493" +$ns_ at 206.000000 "$node_(6) setdest 5.389523 111.880557 5.697261" +$ns_ at 206.000000 "$node_(7) setdest 182.643028 19.505356 6.409660" +$ns_ at 206.000000 "$node_(8) setdest 66.703839 19.152674 19.567798" +$ns_ at 206.000000 "$node_(9) setdest 163.096529 99.441502 17.238037" +$ns_ at 206.000000 "$node_(10) setdest 185.879217 71.902868 19.292773" + +$ns_ at 207.000000 "$node_(1) setdest 138.750622 362.341225 19.996146" +$ns_ at 207.000000 "$node_(2) setdest 163.876030 465.070012 19.971805" +$ns_ at 207.000000 "$node_(3) setdest 153.481555 373.421602 9.459501" +$ns_ at 207.000000 "$node_(4) setdest 81.367103 410.535184 19.999058" +$ns_ at 207.000000 "$node_(5) setdest 33.182405 384.121656 6.117622" +$ns_ at 207.000000 "$node_(6) setdest 7.670427 109.002037 3.672656" +$ns_ at 207.000000 "$node_(7) setdest 183.240979 27.459965 7.977052" +$ns_ at 207.000000 "$node_(8) setdest 48.471800 15.353912 18.623584" +$ns_ at 207.000000 "$node_(9) setdest 164.150679 80.632580 18.838438" +$ns_ at 207.000000 "$node_(10) setdest 185.014933 52.993766 18.928844" + +$ns_ at 208.000000 "$node_(1) setdest 131.154711 380.829290 19.987657" +$ns_ at 208.000000 "$node_(2) setdest 180.272876 476.520635 19.999333" +$ns_ at 208.000000 "$node_(3) setdest 143.368479 377.932962 11.073692" +$ns_ at 208.000000 "$node_(4) setdest 69.391507 426.525693 19.977769" +$ns_ at 208.000000 "$node_(5) setdest 39.946435 387.849275 7.723163" +$ns_ at 208.000000 "$node_(6) setdest 10.931499 104.858445 5.272945" +$ns_ at 208.000000 "$node_(7) setdest 180.816137 36.719425 9.571701" +$ns_ at 208.000000 "$node_(8) setdest 30.381523 9.369616 19.054393" +$ns_ at 208.000000 "$node_(9) setdest 164.385587 60.684667 19.949297" +$ns_ at 208.000000 "$node_(10) setdest 174.507313 38.662418 17.770695" + +$ns_ at 209.000000 "$node_(1) setdest 121.092963 398.095887 19.984348" +$ns_ at 209.000000 "$node_(2) setdest 189.211708 490.664051 16.731375" +$ns_ at 209.000000 "$node_(3) setdest 131.945788 383.417256 12.671044" +$ns_ at 209.000000 "$node_(4) setdest 59.975120 444.153298 19.985015" +$ns_ at 209.000000 "$node_(5) setdest 48.328073 391.937141 9.325369" +$ns_ at 209.000000 "$node_(6) setdest 14.411675 98.942625 6.863568" +$ns_ at 209.000000 "$node_(7) setdest 175.728098 46.705469 11.207551" +$ns_ at 209.000000 "$node_(8) setdest 12.204122 7.859309 18.240037" +$ns_ at 209.000000 "$node_(9) setdest 159.026552 41.578356 19.843648" +$ns_ at 209.000000 "$node_(10) setdest 163.369819 23.521448 18.796083" + +$ns_ at 210.000000 "$node_(1) setdest 108.982057 413.993854 19.985480" +$ns_ at 210.000000 "$node_(2) setdest 180.764820 496.526252 10.281795" +$ns_ at 210.000000 "$node_(3) setdest 119.741871 390.808564 14.267692" +$ns_ at 210.000000 "$node_(4) setdest 53.047125 462.900705 19.986555" +$ns_ at 210.000000 "$node_(5) setdest 57.929229 397.148381 10.924249" +$ns_ at 210.000000 "$node_(6) setdest 16.629371 90.783736 8.454918" +$ns_ at 210.000000 "$node_(7) setdest 170.109979 58.216735 12.809079" +$ns_ at 210.000000 "$node_(8) setdest 7.612919 9.823576 4.993745" +$ns_ at 210.000000 "$node_(9) setdest 144.943214 27.647777 19.809125" +$ns_ at 210.000000 "$node_(10) setdest 151.654646 8.351943 19.166615" + +$ns_ at 211.000000 "$node_(1) setdest 94.765433 428.045068 19.988722" +$ns_ at 211.000000 "$node_(2) setdest 169.488840 501.292361 12.241876" +$ns_ at 211.000000 "$node_(3) setdest 106.907423 400.126968 15.860507" +$ns_ at 211.000000 "$node_(4) setdest 49.262673 482.502882 19.964153" +$ns_ at 211.000000 "$node_(5) setdest 68.775716 403.409497 12.523891" +$ns_ at 211.000000 "$node_(6) setdest 18.101119 80.820271 10.071578" +$ns_ at 211.000000 "$node_(7) setdest 164.220636 71.367090 14.408894" +$ns_ at 211.000000 "$node_(8) setdest 8.398181 10.582628 1.092152" +$ns_ at 211.000000 "$node_(9) setdest 126.252440 20.979851 19.844553" +$ns_ at 211.000000 "$node_(10) setdest 150.671362 7.925031 1.071962" + +$ns_ at 212.000000 "$node_(1) setdest 79.694798 440.482197 19.539862" +$ns_ at 212.000000 "$node_(2) setdest 158.086396 509.199851 13.876027" +$ns_ at 212.000000 "$node_(3) setdest 93.891593 411.784206 17.472923" +$ns_ at 212.000000 "$node_(4) setdest 51.602803 502.238394 19.873768" +$ns_ at 212.000000 "$node_(5) setdest 79.786377 412.225707 14.105326" +$ns_ at 212.000000 "$node_(6) setdest 18.082994 69.169270 11.651015" +$ns_ at 212.000000 "$node_(7) setdest 156.452151 85.357032 16.002119" +$ns_ at 212.000000 "$node_(8) setdest 10.252534 12.543807 2.699046" +$ns_ at 212.000000 "$node_(9) setdest 106.444082 18.349570 19.982227" +$ns_ at 212.000000 "$node_(10) setdest 150.561643 9.241322 1.320856" + +$ns_ at 213.000000 "$node_(1) setdest 74.331248 439.156532 5.524948" +$ns_ at 213.000000 "$node_(2) setdest 147.109484 520.059714 15.441154" +$ns_ at 213.000000 "$node_(3) setdest 78.802986 423.444845 19.069257" +$ns_ at 213.000000 "$node_(4) setdest 64.714464 515.886813 18.926040" +$ns_ at 213.000000 "$node_(5) setdest 90.367257 423.848581 15.717704" +$ns_ at 213.000000 "$node_(6) setdest 15.231940 56.211466 13.267750" +$ns_ at 213.000000 "$node_(7) setdest 146.294162 99.736602 17.605590" +$ns_ at 213.000000 "$node_(8) setdest 12.764348 16.030199 4.296992" +$ns_ at 213.000000 "$node_(9) setdest 87.094251 13.355646 19.983875" +$ns_ at 213.000000 "$node_(10) setdest 150.265932 12.167440 2.941022" + +$ns_ at 214.000000 "$node_(1) setdest 75.387413 435.963971 3.362726" +$ns_ at 214.000000 "$node_(2) setdest 134.637490 531.303365 16.791972" +$ns_ at 214.000000 "$node_(3) setdest 63.111503 435.821747 19.985253" +$ns_ at 214.000000 "$node_(4) setdest 82.741655 517.689625 18.117113" +$ns_ at 214.000000 "$node_(5) setdest 100.190863 438.094362 17.304495" +$ns_ at 214.000000 "$node_(6) setdest 11.435316 41.831751 14.872477" +$ns_ at 214.000000 "$node_(7) setdest 134.405054 114.825240 19.209838" +$ns_ at 214.000000 "$node_(8) setdest 15.786921 21.097420 5.900227" +$ns_ at 214.000000 "$node_(9) setdest 67.206233 11.386313 19.985283" +$ns_ at 214.000000 "$node_(10) setdest 150.233247 16.709509 4.542186" + +$ns_ at 215.000000 "$node_(1) setdest 78.105600 431.749429 5.015068" +$ns_ at 215.000000 "$node_(2) setdest 119.211728 527.255675 15.947976" +$ns_ at 215.000000 "$node_(3) setdest 48.423135 449.394750 19.999364" +$ns_ at 215.000000 "$node_(4) setdest 100.521552 517.808971 17.780297" +$ns_ at 215.000000 "$node_(5) setdest 109.423660 454.614792 18.925357" +$ns_ at 215.000000 "$node_(6) setdest 16.762073 29.826767 13.133696" +$ns_ at 215.000000 "$node_(7) setdest 121.638439 130.219167 19.998986" +$ns_ at 215.000000 "$node_(8) setdest 20.223833 27.143327 7.499279" +$ns_ at 215.000000 "$node_(9) setdest 47.635785 14.277190 19.782811" +$ns_ at 215.000000 "$node_(10) setdest 149.981298 22.846536 6.142197" + +$ns_ at 216.000000 "$node_(1) setdest 82.316111 426.647880 6.614696" +$ns_ at 216.000000 "$node_(2) setdest 110.714063 514.069971 15.686717" +$ns_ at 216.000000 "$node_(3) setdest 34.234170 463.437881 19.963373" +$ns_ at 216.000000 "$node_(4) setdest 119.756878 518.623367 19.252559" +$ns_ at 216.000000 "$node_(5) setdest 118.021417 472.643823 19.974168" +$ns_ at 216.000000 "$node_(6) setdest 24.439905 33.577223 8.544883" +$ns_ at 216.000000 "$node_(7) setdest 109.264356 145.928934 19.997868" +$ns_ at 216.000000 "$node_(8) setdest 26.436807 33.784570 9.094347" +$ns_ at 216.000000 "$node_(9) setdest 34.763093 28.005283 18.819318" +$ns_ at 216.000000 "$node_(10) setdest 149.466985 30.571752 7.742318" + +$ns_ at 217.000000 "$node_(1) setdest 87.966643 420.681429 8.217484" +$ns_ at 217.000000 "$node_(2) setdest 106.139891 497.477479 17.211446" +$ns_ at 217.000000 "$node_(3) setdest 24.871837 481.006527 19.907551" +$ns_ at 217.000000 "$node_(4) setdest 139.198045 521.043816 19.591262" +$ns_ at 217.000000 "$node_(5) setdest 125.144910 491.320562 19.989115" +$ns_ at 217.000000 "$node_(6) setdest 29.906717 42.714611 10.647906" +$ns_ at 217.000000 "$node_(7) setdest 95.624683 160.464758 19.933160" +$ns_ at 217.000000 "$node_(8) setdest 35.012079 40.154365 10.682209" +$ns_ at 217.000000 "$node_(9) setdest 26.794592 44.498695 18.317469" +$ns_ at 217.000000 "$node_(10) setdest 148.822317 39.891733 9.342250" + +$ns_ at 218.000000 "$node_(1) setdest 95.567803 414.482141 9.808608" +$ns_ at 218.000000 "$node_(2) setdest 101.751650 479.151427 18.844119" +$ns_ at 218.000000 "$node_(3) setdest 21.772410 500.718610 19.954264" +$ns_ at 218.000000 "$node_(4) setdest 156.440799 529.170085 19.061711" +$ns_ at 218.000000 "$node_(5) setdest 136.804828 507.434328 19.889875" +$ns_ at 218.000000 "$node_(6) setdest 34.936287 53.901566 12.265583" +$ns_ at 218.000000 "$node_(7) setdest 77.559907 162.800620 18.215168" +$ns_ at 218.000000 "$node_(8) setdest 46.265277 45.048070 12.271219" +$ns_ at 218.000000 "$node_(9) setdest 18.369496 61.553834 19.022618" +$ns_ at 218.000000 "$node_(10) setdest 148.315037 50.819341 10.939376" + +$ns_ at 219.000000 "$node_(1) setdest 105.384019 408.666566 11.409602" +$ns_ at 219.000000 "$node_(2) setdest 105.257830 460.439311 19.037767" +$ns_ at 219.000000 "$node_(3) setdest 20.836622 520.669754 19.973078" +$ns_ at 219.000000 "$node_(4) setdest 169.726291 541.962081 18.442870" +$ns_ at 219.000000 "$node_(5) setdest 151.392428 509.951508 14.803185" +$ns_ at 219.000000 "$node_(6) setdest 39.985524 66.816926 13.867275" +$ns_ at 219.000000 "$node_(7) setdest 65.983518 149.611440 17.548997" +$ns_ at 219.000000 "$node_(8) setdest 60.002002 46.833409 13.852258" +$ns_ at 219.000000 "$node_(9) setdest 12.671632 79.346833 18.683052" +$ns_ at 219.000000 "$node_(10) setdest 149.329428 63.307247 12.529038" + +$ns_ at 220.000000 "$node_(1) setdest 117.372851 403.595980 13.017025" +$ns_ at 220.000000 "$node_(2) setdest 114.312002 443.838623 18.909280" +$ns_ at 220.000000 "$node_(3) setdest 32.633983 533.959274 17.770455" +$ns_ at 220.000000 "$node_(4) setdest 186.809428 545.300049 17.406194" +$ns_ at 220.000000 "$node_(5) setdest 154.136381 499.903314 10.416116" +$ns_ at 220.000000 "$node_(6) setdest 38.966285 82.245893 15.462596" +$ns_ at 220.000000 "$node_(7) setdest 74.429529 163.526860 16.278023" +$ns_ at 220.000000 "$node_(8) setdest 62.748658 50.955781 4.953592" +$ns_ at 220.000000 "$node_(9) setdest 16.722542 97.580963 18.678688" +$ns_ at 220.000000 "$node_(10) setdest 147.564335 77.337282 14.140630" + +$ns_ at 221.000000 "$node_(1) setdest 130.510749 397.211113 14.607220" +$ns_ at 221.000000 "$node_(2) setdest 126.001789 428.533431 19.258765" +$ns_ at 221.000000 "$node_(3) setdest 47.085796 524.976442 17.016055" +$ns_ at 221.000000 "$node_(4) setdest 195.470877 545.496075 8.663667" +$ns_ at 221.000000 "$node_(5) setdest 153.537843 487.668110 12.249835" +$ns_ at 221.000000 "$node_(6) setdest 37.659369 99.258980 17.063211" +$ns_ at 221.000000 "$node_(7) setdest 81.550077 180.240654 18.167363" +$ns_ at 221.000000 "$node_(8) setdest 62.598894 57.433367 6.479317" +$ns_ at 221.000000 "$node_(9) setdest 23.470822 116.374653 19.968527" +$ns_ at 221.000000 "$node_(10) setdest 144.701469 92.814988 15.740247" + +$ns_ at 222.000000 "$node_(1) setdest 145.829089 392.029894 16.170856" +$ns_ at 222.000000 "$node_(2) setdest 140.217574 415.094347 19.562657" +$ns_ at 222.000000 "$node_(3) setdest 60.241100 516.107987 15.865420" +$ns_ at 222.000000 "$node_(4) setdest 195.175480 544.049314 1.476611" +$ns_ at 222.000000 "$node_(5) setdest 153.773151 473.824698 13.845411" +$ns_ at 222.000000 "$node_(6) setdest 36.836689 117.903151 18.662312" +$ns_ at 222.000000 "$node_(7) setdest 88.469997 198.652396 19.669203" +$ns_ at 222.000000 "$node_(8) setdest 62.213896 65.503403 8.079215" +$ns_ at 222.000000 "$node_(9) setdest 31.284840 134.781911 19.997150" +$ns_ at 222.000000 "$node_(10) setdest 140.588071 109.660694 17.340642" + +$ns_ at 223.000000 "$node_(1) setdest 163.506752 392.575980 17.686096" +$ns_ at 223.000000 "$node_(2) setdest 154.063643 401.028161 19.737558" +$ns_ at 223.000000 "$node_(3) setdest 74.782464 518.041618 14.669363" +$ns_ at 223.000000 "$node_(4) setdest 194.480877 541.034819 3.093486" +$ns_ at 223.000000 "$node_(5) setdest 154.461775 458.388033 15.452018" +$ns_ at 223.000000 "$node_(6) setdest 36.889515 137.817056 19.913975" +$ns_ at 223.000000 "$node_(7) setdest 93.285752 218.049531 19.986004" +$ns_ at 223.000000 "$node_(8) setdest 61.558857 75.160595 9.679381" +$ns_ at 223.000000 "$node_(9) setdest 40.120888 152.722334 19.998363" +$ns_ at 223.000000 "$node_(10) setdest 135.370957 127.870138 18.942073" + +$ns_ at 224.000000 "$node_(1) setdest 181.923802 396.004220 18.733408" +$ns_ at 224.000000 "$node_(2) setdest 167.390779 386.339495 19.833544" +$ns_ at 224.000000 "$node_(3) setdest 88.812375 526.590208 16.429145" +$ns_ at 224.000000 "$node_(4) setdest 193.757794 536.395966 4.694870" +$ns_ at 224.000000 "$node_(5) setdest 155.534575 441.369278 17.052534" +$ns_ at 224.000000 "$node_(6) setdest 37.937125 157.787942 19.998344" +$ns_ at 224.000000 "$node_(7) setdest 95.263234 237.932451 19.981015" +$ns_ at 224.000000 "$node_(8) setdest 60.618285 86.400311 11.279002" +$ns_ at 224.000000 "$node_(9) setdest 49.705686 170.274483 19.998657" +$ns_ at 224.000000 "$node_(10) setdest 129.766491 147.053821 19.985588" + +$ns_ at 225.000000 "$node_(1) setdest 169.232343 395.048481 12.727394" +$ns_ at 225.000000 "$node_(2) setdest 178.021040 369.992559 19.499353" +$ns_ at 225.000000 "$node_(3) setdest 105.697669 532.818640 17.997403" +$ns_ at 225.000000 "$node_(4) setdest 193.626695 530.109972 6.287361" +$ns_ at 225.000000 "$node_(5) setdest 157.030780 422.777712 18.651674" +$ns_ at 225.000000 "$node_(6) setdest 39.945670 177.684547 19.997729" +$ns_ at 225.000000 "$node_(7) setdest 93.862395 257.856020 19.972755" +$ns_ at 225.000000 "$node_(8) setdest 59.031456 99.180711 12.878534" +$ns_ at 225.000000 "$node_(9) setdest 59.205275 187.873059 19.998802" +$ns_ at 225.000000 "$node_(10) setdest 124.063931 166.223553 19.999945" + +$ns_ at 226.000000 "$node_(1) setdest 156.800156 391.440287 12.945205" +$ns_ at 226.000000 "$node_(2) setdest 181.618992 357.273651 13.218014" +$ns_ at 226.000000 "$node_(3) setdest 124.505138 538.184199 19.557865" +$ns_ at 226.000000 "$node_(4) setdest 193.643917 522.227082 7.882909" +$ns_ at 226.000000 "$node_(5) setdest 158.657327 402.933793 19.910469" +$ns_ at 226.000000 "$node_(6) setdest 43.108409 197.429911 19.997057" +$ns_ at 226.000000 "$node_(7) setdest 88.502871 277.089742 19.966486" +$ns_ at 226.000000 "$node_(8) setdest 56.808037 113.487980 14.479003" +$ns_ at 226.000000 "$node_(9) setdest 67.791820 205.933327 19.997551" +$ns_ at 226.000000 "$node_(10) setdest 117.964834 185.269423 19.998604" + +$ns_ at 227.000000 "$node_(1) setdest 142.278798 390.812500 14.534922" +$ns_ at 227.000000 "$node_(2) setdest 180.809611 361.122439 3.932972" +$ns_ at 227.000000 "$node_(3) setdest 139.353761 542.830733 15.558660" +$ns_ at 227.000000 "$node_(4) setdest 190.918757 513.212438 9.417553" +$ns_ at 227.000000 "$node_(5) setdest 158.201943 382.958722 19.980261" +$ns_ at 227.000000 "$node_(6) setdest 47.461748 216.947467 19.997164" +$ns_ at 227.000000 "$node_(7) setdest 79.883191 295.123722 19.988080" +$ns_ at 227.000000 "$node_(8) setdest 54.120836 129.341065 16.079222" +$ns_ at 227.000000 "$node_(9) setdest 75.603470 224.344458 19.999791" +$ns_ at 227.000000 "$node_(10) setdest 111.317961 204.132478 19.999894" + +$ns_ at 228.000000 "$node_(1) setdest 126.803939 394.954915 16.019702" +$ns_ at 228.000000 "$node_(2) setdest 181.453885 366.636563 5.551636" +$ns_ at 228.000000 "$node_(3) setdest 139.679950 541.809477 1.072084" +$ns_ at 228.000000 "$node_(4) setdest 182.221684 506.884617 10.755483" +$ns_ at 228.000000 "$node_(5) setdest 147.532398 367.471383 18.806830" +$ns_ at 228.000000 "$node_(6) setdest 53.185240 236.106169 19.995355" +$ns_ at 228.000000 "$node_(7) setdest 69.135376 311.964610 19.978263" +$ns_ at 228.000000 "$node_(8) setdest 50.220110 146.582020 17.676713" +$ns_ at 228.000000 "$node_(9) setdest 83.229589 242.832888 19.999493" +$ns_ at 228.000000 "$node_(10) setdest 104.487873 222.930048 19.999968" + +$ns_ at 229.000000 "$node_(1) setdest 112.719153 405.732941 17.735474" +$ns_ at 229.000000 "$node_(2) setdest 184.190891 373.250673 7.158048" +$ns_ at 229.000000 "$node_(3) setdest 138.130687 540.190583 2.240767" +$ns_ at 229.000000 "$node_(4) setdest 170.893314 511.903508 12.390368" +$ns_ at 229.000000 "$node_(5) setdest 131.769906 373.894402 17.020909" +$ns_ at 229.000000 "$node_(6) setdest 60.421096 254.745402 19.994464" +$ns_ at 229.000000 "$node_(7) setdest 55.227342 326.293985 19.969086" +$ns_ at 229.000000 "$node_(8) setdest 46.727716 165.535396 19.272448" +$ns_ at 229.000000 "$node_(9) setdest 90.276455 261.550003 19.999718" +$ns_ at 229.000000 "$node_(10) setdest 98.003508 241.846149 19.996647" + +$ns_ at 230.000000 "$node_(1) setdest 96.091336 415.488575 19.278400" +$ns_ at 230.000000 "$node_(2) setdest 183.469209 381.617058 8.397453" +$ns_ at 230.000000 "$node_(3) setdest 135.135321 537.790149 3.838527" +$ns_ at 230.000000 "$node_(4) setdest 159.237705 520.178185 14.294178" +$ns_ at 230.000000 "$node_(5) setdest 126.003487 389.332246 16.479642" +$ns_ at 230.000000 "$node_(6) setdest 69.231636 272.693606 19.994091" +$ns_ at 230.000000 "$node_(7) setdest 39.927801 339.173368 19.998862" +$ns_ at 230.000000 "$node_(8) setdest 44.917086 185.446450 19.993211" +$ns_ at 230.000000 "$node_(9) setdest 96.877144 280.425405 19.996247" +$ns_ at 230.000000 "$node_(10) setdest 92.829898 261.161671 19.996390" + +$ns_ at 231.000000 "$node_(1) setdest 76.542960 419.096553 19.878544" +$ns_ at 231.000000 "$node_(2) setdest 176.455767 382.214395 7.038833" +$ns_ at 231.000000 "$node_(3) setdest 130.270647 535.384863 5.426828" +$ns_ at 231.000000 "$node_(4) setdest 147.767236 531.122795 15.854216" +$ns_ at 231.000000 "$node_(5) setdest 120.093704 403.690952 15.527329" +$ns_ at 231.000000 "$node_(6) setdest 79.612346 289.781035 19.993483" +$ns_ at 231.000000 "$node_(7) setdest 24.003817 350.585990 19.591355" +$ns_ at 231.000000 "$node_(8) setdest 44.961975 205.440954 19.994554" +$ns_ at 231.000000 "$node_(9) setdest 99.747344 300.145262 19.927640" +$ns_ at 231.000000 "$node_(10) setdest 89.013268 280.789285 19.995247" + +$ns_ at 232.000000 "$node_(1) setdest 57.010907 414.931045 19.971292" +$ns_ at 232.000000 "$node_(2) setdest 170.625082 375.887436 8.603912" +$ns_ at 232.000000 "$node_(3) setdest 123.401898 533.915668 7.024119" +$ns_ at 232.000000 "$node_(4) setdest 133.831007 536.334199 14.878750" +$ns_ at 232.000000 "$node_(5) setdest 112.784511 418.983603 16.949616" +$ns_ at 232.000000 "$node_(6) setdest 91.565883 305.808159 19.993893" +$ns_ at 232.000000 "$node_(7) setdest 24.991666 366.397074 15.841913" +$ns_ at 232.000000 "$node_(8) setdest 46.668910 225.364489 19.996521" +$ns_ at 232.000000 "$node_(9) setdest 95.297668 319.497789 19.857489" +$ns_ at 232.000000 "$node_(10) setdest 86.767611 300.658389 19.995606" + +$ns_ at 233.000000 "$node_(1) setdest 39.647141 405.570459 19.726149" +$ns_ at 233.000000 "$node_(2) setdest 163.274148 368.970881 10.093313" +$ns_ at 233.000000 "$node_(3) setdest 114.780291 533.380993 8.638170" +$ns_ at 233.000000 "$node_(4) setdest 131.539148 532.333883 4.610330" +$ns_ at 233.000000 "$node_(5) setdest 104.102864 435.367253 18.541710" +$ns_ at 233.000000 "$node_(6) setdest 103.303347 321.997575 19.996631" +$ns_ at 233.000000 "$node_(7) setdest 30.412033 380.296306 14.918748" +$ns_ at 233.000000 "$node_(8) setdest 49.123918 245.211852 19.998622" +$ns_ at 233.000000 "$node_(9) setdest 83.212825 335.267364 19.867635" +$ns_ at 233.000000 "$node_(10) setdest 86.025912 320.638077 19.993450" + +$ns_ at 234.000000 "$node_(1) setdest 27.316123 390.616503 19.382332" +$ns_ at 234.000000 "$node_(2) setdest 152.190026 371.188100 11.303708" +$ns_ at 234.000000 "$node_(3) setdest 104.559734 533.881666 10.232813" +$ns_ at 234.000000 "$node_(4) setdest 134.437047 528.843275 4.536757" +$ns_ at 234.000000 "$node_(5) setdest 99.490371 454.660971 19.837405" +$ns_ at 234.000000 "$node_(6) setdest 113.260598 339.320990 19.981180" +$ns_ at 234.000000 "$node_(7) setdest 34.878867 395.284625 15.639767" +$ns_ at 234.000000 "$node_(8) setdest 52.561370 264.911186 19.996995" +$ns_ at 234.000000 "$node_(9) setdest 66.066130 345.377223 19.905235" +$ns_ at 234.000000 "$node_(10) setdest 87.285070 340.587013 19.988635" + +$ns_ at 235.000000 "$node_(1) setdest 20.218846 372.865040 19.117682" +$ns_ at 235.000000 "$node_(2) setdest 142.051256 379.923957 13.383193" +$ns_ at 235.000000 "$node_(3) setdest 92.840981 535.571717 11.839993" +$ns_ at 235.000000 "$node_(4) setdest 138.217326 524.011125 6.135160" +$ns_ at 235.000000 "$node_(5) setdest 91.141238 472.512581 19.707562" +$ns_ at 235.000000 "$node_(6) setdest 120.629818 357.902574 19.989513" +$ns_ at 235.000000 "$node_(7) setdest 37.667906 412.302677 17.245082" +$ns_ at 235.000000 "$node_(8) setdest 57.399640 284.312240 19.995243" +$ns_ at 235.000000 "$node_(9) setdest 47.135371 351.684805 19.953928" +$ns_ at 235.000000 "$node_(10) setdest 91.171326 360.189136 19.983649" + +$ns_ at 236.000000 "$node_(1) setdest 24.598928 356.149629 17.279760" +$ns_ at 236.000000 "$node_(2) setdest 133.409494 392.148552 14.970664" +$ns_ at 236.000000 "$node_(3) setdest 79.429653 536.017371 13.418731" +$ns_ at 236.000000 "$node_(4) setdest 141.644180 517.087121 7.725617" +$ns_ at 236.000000 "$node_(5) setdest 82.659254 489.792059 19.249010" +$ns_ at 236.000000 "$node_(6) setdest 124.911845 377.382671 19.945173" +$ns_ at 236.000000 "$node_(7) setdest 36.308163 431.068874 18.815394" +$ns_ at 236.000000 "$node_(8) setdest 63.770885 303.263885 19.993940" +$ns_ at 236.000000 "$node_(9) setdest 27.530214 350.712658 19.629245" +$ns_ at 236.000000 "$node_(10) setdest 98.063978 378.939773 19.977363" + +$ns_ at 237.000000 "$node_(1) setdest 40.321183 353.037350 16.027339" +$ns_ at 237.000000 "$node_(2) setdest 125.329380 406.653920 16.604033" +$ns_ at 237.000000 "$node_(3) setdest 64.843715 532.584356 14.984497" +$ns_ at 237.000000 "$node_(4) setdest 144.634350 508.245730 9.333344" +$ns_ at 237.000000 "$node_(5) setdest 82.909106 508.254419 18.464050" +$ns_ at 237.000000 "$node_(6) setdest 118.133725 394.817452 18.706002" +$ns_ at 237.000000 "$node_(7) setdest 29.233267 449.643599 19.876483" +$ns_ at 237.000000 "$node_(8) setdest 71.818285 321.564343 19.991683" +$ns_ at 237.000000 "$node_(9) setdest 29.921966 351.885394 2.663792" +$ns_ at 237.000000 "$node_(10) setdest 108.291461 396.089794 19.968090" + +$ns_ at 238.000000 "$node_(1) setdest 45.596210 362.703271 11.011628" +$ns_ at 238.000000 "$node_(2) setdest 116.291703 422.438321 18.188648" +$ns_ at 238.000000 "$node_(3) setdest 50.905662 523.641287 16.560429" +$ns_ at 238.000000 "$node_(4) setdest 147.083186 497.586315 10.937090" +$ns_ at 238.000000 "$node_(5) setdest 86.823206 527.075434 19.223704" +$ns_ at 238.000000 "$node_(6) setdest 100.295091 394.820595 17.838634" +$ns_ at 238.000000 "$node_(7) setdest 14.754694 462.701263 19.496965" +$ns_ at 238.000000 "$node_(8) setdest 82.106778 338.696777 19.984329" +$ns_ at 238.000000 "$node_(9) setdest 29.384525 352.572171 0.872069" +$ns_ at 238.000000 "$node_(10) setdest 121.581838 411.004409 19.976983" + +$ns_ at 239.000000 "$node_(1) setdest 43.050325 374.782260 12.344371" +$ns_ at 239.000000 "$node_(2) setdest 109.603123 440.967025 19.698985" +$ns_ at 239.000000 "$node_(3) setdest 40.281591 508.934509 18.142774" +$ns_ at 239.000000 "$node_(4) setdest 150.186991 485.439022 12.537557" +$ns_ at 239.000000 "$node_(5) setdest 88.363743 540.891623 13.901811" +$ns_ at 239.000000 "$node_(6) setdest 83.872867 392.609649 16.570387" +$ns_ at 239.000000 "$node_(7) setdest 2.120167 462.131925 12.647348" +$ns_ at 239.000000 "$node_(8) setdest 94.881055 354.058754 19.979301" +$ns_ at 239.000000 "$node_(9) setdest 28.147365 354.727230 2.484924" +$ns_ at 239.000000 "$node_(10) setdest 137.437686 423.128242 19.959841" + +$ns_ at 240.000000 "$node_(1) setdest 38.556092 388.011135 13.971445" +$ns_ at 240.000000 "$node_(2) setdest 102.994537 459.843356 19.999731" +$ns_ at 240.000000 "$node_(3) setdest 34.511155 490.212704 19.590914" +$ns_ at 240.000000 "$node_(4) setdest 153.699183 471.745840 14.136432" +$ns_ at 240.000000 "$node_(5) setdest 87.677276 538.921027 2.086741" +$ns_ at 240.000000 "$node_(6) setdest 67.844685 393.430187 16.049172" +$ns_ at 240.000000 "$node_(7) setdest 2.807388 458.762773 3.438526" +$ns_ at 240.000000 "$node_(8) setdest 109.948493 367.184167 19.982596" +$ns_ at 240.000000 "$node_(9) setdest 26.581030 358.478450 4.065102" +$ns_ at 240.000000 "$node_(10) setdest 155.787822 430.945677 19.945921" + +$ns_ at 241.000000 "$node_(1) setdest 36.249641 403.358559 15.519767" +$ns_ at 241.000000 "$node_(2) setdest 95.054734 478.197050 19.997464" +$ns_ at 241.000000 "$node_(3) setdest 38.588756 470.902314 19.736210" +$ns_ at 241.000000 "$node_(4) setdest 158.039693 456.618076 15.738147" +$ns_ at 241.000000 "$node_(5) setdest 85.810743 535.721185 3.704447" +$ns_ at 241.000000 "$node_(6) setdest 50.254348 394.635888 17.631610" +$ns_ at 241.000000 "$node_(7) setdest 5.492504 454.380668 5.139327" +$ns_ at 241.000000 "$node_(8) setdest 125.740547 379.432191 19.985071" +$ns_ at 241.000000 "$node_(9) setdest 26.686133 364.112657 5.635187" +$ns_ at 241.000000 "$node_(10) setdest 175.328309 429.760207 19.576413" + +$ns_ at 242.000000 "$node_(1) setdest 37.522192 420.480349 17.169015" +$ns_ at 242.000000 "$node_(2) setdest 90.631305 497.645680 19.945323" +$ns_ at 242.000000 "$node_(3) setdest 50.739464 455.055826 19.968748" +$ns_ at 242.000000 "$node_(4) setdest 162.672725 439.910564 17.337991" +$ns_ at 242.000000 "$node_(5) setdest 82.862827 531.310088 5.305468" +$ns_ at 242.000000 "$node_(6) setdest 31.030156 394.391413 19.225746" +$ns_ at 242.000000 "$node_(7) setdest 9.560885 449.014231 6.734268" +$ns_ at 242.000000 "$node_(8) setdest 138.863619 394.470220 19.958891" +$ns_ at 242.000000 "$node_(9) setdest 30.044622 370.532583 7.245336" +$ns_ at 242.000000 "$node_(10) setdest 184.068040 414.234527 17.816555" + +$ns_ at 243.000000 "$node_(1) setdest 37.985476 439.243636 18.769005" +$ns_ at 243.000000 "$node_(2) setdest 87.433717 517.377092 19.988827" +$ns_ at 243.000000 "$node_(3) setdest 62.449094 438.866145 19.980520" +$ns_ at 243.000000 "$node_(4) setdest 167.366807 421.571460 18.930323" +$ns_ at 243.000000 "$node_(5) setdest 79.256025 525.421283 6.905580" +$ns_ at 243.000000 "$node_(6) setdest 11.506341 396.543000 19.642013" +$ns_ at 243.000000 "$node_(7) setdest 15.539098 443.203051 8.337197" +$ns_ at 243.000000 "$node_(8) setdest 147.973211 412.218070 19.949207" +$ns_ at 243.000000 "$node_(9) setdest 36.059140 377.050285 8.868758" +$ns_ at 243.000000 "$node_(10) setdest 177.685561 398.128349 17.324694" + +$ns_ at 244.000000 "$node_(1) setdest 39.206090 459.137583 19.931358" +$ns_ at 244.000000 "$node_(2) setdest 83.154377 536.500088 19.595961" +$ns_ at 244.000000 "$node_(3) setdest 78.636311 427.281132 19.905741" +$ns_ at 244.000000 "$node_(4) setdest 170.390886 401.818715 19.982892" +$ns_ at 244.000000 "$node_(5) setdest 74.252665 518.545692 8.503374" +$ns_ at 244.000000 "$node_(6) setdest 7.973051 410.375276 14.276414" +$ns_ at 244.000000 "$node_(7) setdest 23.323807 437.026222 9.937551" +$ns_ at 244.000000 "$node_(8) setdest 153.374916 431.467556 19.993027" +$ns_ at 244.000000 "$node_(9) setdest 44.032723 383.856539 10.483469" +$ns_ at 244.000000 "$node_(10) setdest 172.718843 382.551826 16.349201" + +$ns_ at 245.000000 "$node_(1) setdest 34.618867 478.486864 19.885606" +$ns_ at 245.000000 "$node_(2) setdest 84.300844 531.592510 5.039713" +$ns_ at 245.000000 "$node_(3) setdest 94.940590 415.825326 19.926490" +$ns_ at 245.000000 "$node_(4) setdest 172.304961 394.881002 7.196912" +$ns_ at 245.000000 "$node_(5) setdest 68.133382 510.504064 10.105117" +$ns_ at 245.000000 "$node_(6) setdest 17.059711 420.972002 13.959154" +$ns_ at 245.000000 "$node_(7) setdest 32.971174 430.706107 11.533236" +$ns_ at 245.000000 "$node_(8) setdest 157.601063 450.999458 19.983881" +$ns_ at 245.000000 "$node_(9) setdest 53.145590 391.780127 12.075909" +$ns_ at 245.000000 "$node_(10) setdest 169.004729 365.196387 17.748406" + +$ns_ at 246.000000 "$node_(1) setdest 23.314948 494.915448 19.941839" +$ns_ at 246.000000 "$node_(2) setdest 80.388414 526.333405 6.554791" +$ns_ at 246.000000 "$node_(3) setdest 105.925896 399.190915 19.934407" +$ns_ at 246.000000 "$node_(4) setdest 172.261920 400.073454 5.192630" +$ns_ at 246.000000 "$node_(5) setdest 60.662128 501.492990 11.705516" +$ns_ at 246.000000 "$node_(6) setdest 25.067644 434.285222 15.536049" +$ns_ at 246.000000 "$node_(7) setdest 44.416211 424.250797 13.140012" +$ns_ at 246.000000 "$node_(8) setdest 157.547382 470.971881 19.972495" +$ns_ at 246.000000 "$node_(9) setdest 61.816142 402.356439 13.676142" +$ns_ at 246.000000 "$node_(10) setdest 159.697284 354.841814 13.922848" + +$ns_ at 247.000000 "$node_(1) setdest 11.471594 510.982443 19.960295" +$ns_ at 247.000000 "$node_(2) setdest 73.667647 521.515333 8.269373" +$ns_ at 247.000000 "$node_(3) setdest 115.687404 381.745967 19.990329" +$ns_ at 247.000000 "$node_(4) setdest 172.542353 406.883343 6.815660" +$ns_ at 247.000000 "$node_(5) setdest 53.043729 490.595790 13.296201" +$ns_ at 247.000000 "$node_(6) setdest 35.532652 447.799237 17.092249" +$ns_ at 247.000000 "$node_(7) setdest 57.583686 417.635230 14.735946" +$ns_ at 247.000000 "$node_(8) setdest 155.807521 490.895157 19.999102" +$ns_ at 247.000000 "$node_(9) setdest 70.586313 414.874960 15.284936" +$ns_ at 247.000000 "$node_(10) setdest 150.705382 353.022498 9.174106" + +$ns_ at 248.000000 "$node_(1) setdest 6.250205 530.216206 19.929890" +$ns_ at 248.000000 "$node_(2) setdest 64.550481 517.738746 9.868400" +$ns_ at 248.000000 "$node_(3) setdest 129.575043 368.902786 18.915967" +$ns_ at 248.000000 "$node_(4) setdest 172.766820 415.295599 8.415251" +$ns_ at 248.000000 "$node_(5) setdest 45.664917 477.649441 14.901504" +$ns_ at 248.000000 "$node_(6) setdest 49.236481 460.622756 18.767994" +$ns_ at 248.000000 "$node_(7) setdest 72.542200 411.059556 16.340032" +$ns_ at 248.000000 "$node_(8) setdest 154.151119 510.826019 19.999573" +$ns_ at 248.000000 "$node_(9) setdest 80.048453 428.859710 16.885062" +$ns_ at 248.000000 "$node_(10) setdest 150.693495 353.348529 0.326248" + +$ns_ at 249.000000 "$node_(1) setdest 15.633937 545.116140 17.608591" +$ns_ at 249.000000 "$node_(2) setdest 53.897778 513.466205 11.477573" +$ns_ at 249.000000 "$node_(3) setdest 147.143605 364.268821 18.169425" +$ns_ at 249.000000 "$node_(4) setdest 172.715108 425.310616 10.015150" +$ns_ at 249.000000 "$node_(5) setdest 40.276620 462.090927 16.465147" +$ns_ at 249.000000 "$node_(6) setdest 66.591813 470.358500 19.899554" +$ns_ at 249.000000 "$node_(7) setdest 89.011277 403.945945 17.939731" +$ns_ at 249.000000 "$node_(8) setdest 153.637467 522.766334 11.951358" +$ns_ at 249.000000 "$node_(9) setdest 90.366944 444.196722 18.484999" +$ns_ at 249.000000 "$node_(10) setdest 152.055411 354.274810 1.647062" + +$ns_ at 250.000000 "$node_(1) setdest 32.767440 547.185729 17.258045" +$ns_ at 250.000000 "$node_(2) setdest 42.327370 507.405581 13.061605" +$ns_ at 250.000000 "$node_(3) setdest 166.110136 361.566966 19.158010" +$ns_ at 250.000000 "$node_(4) setdest 171.517760 436.860424 11.611706" +$ns_ at 250.000000 "$node_(5) setdest 39.663162 444.108944 17.992445" +$ns_ at 250.000000 "$node_(6) setdest 82.640473 482.268268 19.985046" +$ns_ at 250.000000 "$node_(7) setdest 106.691634 395.709099 19.504889" +$ns_ at 250.000000 "$node_(8) setdest 156.539558 511.951326 11.197613" +$ns_ at 250.000000 "$node_(9) setdest 102.661633 459.753615 19.828673" +$ns_ at 250.000000 "$node_(10) setdest 155.016298 355.644768 3.262459" + diff --git a/gui/configs/sample5-mgen.imn b/gui/configs/sample5-mgen.imn new file mode 100644 index 00000000..d80a58a6 --- /dev/null +++ b/gui/configs/sample5-mgen.imn @@ -0,0 +1,127 @@ +node n1 { + type router + model router + network-config { + hostname n1 + ! + interface eth0 + ip address 10.0.0.2/24 + ipv6 address a:0::2/64 + ! + router ospf + router-id 10.0.0.2 + network 10.0.0.0/24 area 0 + ! + router ospf6 + router-id 10.0.0.2 + interface eth0 area 0.0.0.0 + ! + } + canvas c1 + iconcoords {312.0 120.0} + labelcoords {312.0 148.0} + interface-peer {eth0 n2} + custom-config { + custom-config-id service:UserDefined:mgen.sh + custom-command mgen.sh + config { + #!/bin/sh + SCRIPTDIR=$SESSION_DIR + LOGDIR=/var/log + if [ `uname` = "Linux" ]; then + cd $SCRIPTDIR + else + cd /tmp/e0_`hostname` + fi + ( + cat << 'EOF' + # mgen receiver script + 15.0 LISTEN UDP 5001 + EOF + ) > recv.mgn + mgen input recv.mgn output $LOGDIR/mgen.log > /dev/null 2> /dev/null < /dev/null & + } + } + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('mgen.sh', ) + startidx=35 + cmdup=('sh mgen.sh', ) + } + } + services {zebra OSPFv2 OSPFv3 IPForward UserDefined} +} + +node n2 { + type router + model router + network-config { + hostname n2 + ! + interface eth0 + ip address 10.0.0.1/24 + ipv6 address a:0::1/64 + ! + } + canvas c1 + iconcoords {72.0 48.0} + labelcoords {72.0 76.0} + interface-peer {eth0 n1} + custom-config { + custom-config-id service:UserDefined + custom-command UserDefined + config { + files=('mgen.sh', ) + startidx=35 + cmdup=('sh mgen.sh', ) + } + } + custom-config { + custom-config-id service:UserDefined:mgen.sh + custom-command mgen.sh + config { + #!/bin/sh + HN=`hostname` + SCRIPTDIR=$SESSION_DIR + LOGDIR=/var/log + + cd $SCRIPTDIR + ( + cat << 'EOF' + # mgen sender script: send UDP traffic to UDP port 5001 after 15 seconds + 15.0 ON 1 UDP SRC 5000 DST 10.0.0.2/5001 PERIODIC [1 4096] + EOF + ) > send_$HN.mgn + mgen input send_$HN.mgn output $LOGDIR/mgen_$HN.log > /dev/null 2> /dev/null < /dev/null & + } + } + services {zebra OSPFv2 OSPFv3 IPForward UserDefined} +} + +link l1 { + nodes {n2 n1} + bandwidth 0 +} + +canvas c1 { + name {Canvas1} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + show_api no + background_images no + annotations yes + grid yes + traffic_start 0 +} + +option session { +} + diff --git a/gui/configs/sample6-emane-rfpipe.imn b/gui/configs/sample6-emane-rfpipe.imn new file mode 100644 index 00000000..9e0cc045 --- /dev/null +++ b/gui/configs/sample6-emane-rfpipe.imn @@ -0,0 +1,271 @@ +node n1 { + type router + model mdr + network-config { + hostname n1 + ! + interface eth0 + ip address 10.0.0.1/32 + ipv6 address a:0::1/128 + ! + } + iconcoords {263.148836492 76.94184084899999} + labelcoords {263.148836492 100.94184084899999} + canvas c1 + interface-peer {eth0 n11} +} + +node n2 { + type router + model mdr + network-config { + hostname n2 + ! + interface eth0 + ip address 10.0.0.2/32 + ipv6 address a:0::2/128 + ! + } + iconcoords {184.35166313500002 532.524009667} + labelcoords {184.35166313500002 556.524009667} + canvas c1 + interface-peer {eth0 n11} +} + +node n3 { + type router + model mdr + network-config { + hostname n3 + ! + interface eth0 + ip address 10.0.0.3/32 + ipv6 address a:0::3/128 + ! + } + iconcoords {121.17243156500001 313.104176223} + labelcoords {121.17243156500001 337.104176223} + canvas c1 + interface-peer {eth0 n11} +} + +node n4 { + type router + model mdr + network-config { + hostname n4 + ! + interface eth0 + ip address 10.0.0.4/32 + ipv6 address a:0::4/128 + ! + } + iconcoords {443.031505695 586.805480735} + labelcoords {443.031505695 610.805480735} + canvas c1 + interface-peer {eth0 n11} +} + +node n5 { + type router + model mdr + network-config { + hostname n5 + ! + interface eth0 + ip address 10.0.0.5/32 + ipv6 address a:0::5/128 + ! + } + iconcoords {548.817758443 209.207353139} + labelcoords {548.817758443 233.207353139} + canvas c1 + interface-peer {eth0 n11} +} + +node n6 { + type router + model mdr + network-config { + hostname n6 + ! + interface eth0 + ip address 10.0.0.6/32 + ipv6 address a:0::6/128 + ! + } + iconcoords {757.062318769 61.533941783} + labelcoords {757.062318769 85.533941783} + canvas c1 + interface-peer {eth0 n11} +} + +node n7 { + type router + model mdr + network-config { + hostname n7 + ! + interface eth0 + ip address 10.0.0.7/32 + ipv6 address a:0::7/128 + ! + } + iconcoords {778.142667152 489.227596061} + labelcoords {778.142667152 513.227596061} + canvas c1 + interface-peer {eth0 n11} +} + +node n8 { + type router + model mdr + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.0.8/32 + ipv6 address a:0::8/128 + ! + } + iconcoords {93.895107521 135.228007484} + labelcoords {93.895107521 159.228007484} + canvas c1 + interface-peer {eth0 n11} +} + +node n9 { + type router + model mdr + network-config { + hostname n9 + ! + interface eth0 + ip address 10.0.0.9/32 + ipv6 address a:0::9/128 + ! + } + iconcoords {528.693178831 84.9814304098} + labelcoords {528.693178831 108.9814304098} + canvas c1 + interface-peer {eth0 n11} +} + +node n10 { + type router + model mdr + network-config { + hostname n10 + ! + interface eth0 + ip address 10.0.0.10/32 + ipv6 address a:0::10/128 + ! + } + iconcoords {569.534639911 475.46828902} + labelcoords {569.534639911 499.46828902} + canvas c1 + interface-peer {eth0 n11} +} + +node n11 { + bandwidth 54000000 + type wlan + range 275 + network-config { + hostname wlan11 + ! + interface wireless + ip address 10.0.0.0/32 + ipv6 address a:0::0/128 + ! + mobmodel + coreapi + emane_rfpipe + ! + } + canvas c1 + iconcoords {65.0 558.0} + labelcoords {65.0 582.0} + interface-peer {e0 n1} + interface-peer {e1 n2} + interface-peer {e2 n3} + interface-peer {e3 n4} + interface-peer {e4 n5} + interface-peer {e5 n6} + interface-peer {e6 n7} + interface-peer {e7 n8} + interface-peer {e8 n9} + interface-peer {e9 n10} +} + +link l1 { + nodes {n11 n1} + bandwidth 54000000 +} + +link l2 { + nodes {n11 n2} + bandwidth 54000000 +} + +link l3 { + nodes {n11 n3} + bandwidth 54000000 +} + +link l4 { + nodes {n11 n4} + bandwidth 54000000 +} + +link l5 { + nodes {n11 n5} + bandwidth 54000000 +} + +link l6 { + nodes {n11 n6} + bandwidth 54000000 +} + +link l7 { + nodes {n11 n7} + bandwidth 54000000 +} + +link l8 { + nodes {n11 n8} + bandwidth 54000000 +} + +link l9 { + nodes {n11 n9} + bandwidth 54000000 +} + +link l10 { + nodes {n11 n10} + bandwidth 54000000 +} + +canvas c1 { + name {Canvas1} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + ipsec_configs yes + remote_exec no + exec_errors yes + show_api no + background_images no + annotations yes + grid yes + traffic_start 0 +} + diff --git a/gui/configs/sample7-emane-ieee80211abg.imn b/gui/configs/sample7-emane-ieee80211abg.imn new file mode 100644 index 00000000..b1323f6f --- /dev/null +++ b/gui/configs/sample7-emane-ieee80211abg.imn @@ -0,0 +1,274 @@ +node n1 { + type router + model mdr + network-config { + hostname n1 + ! + interface eth0 + ip address 10.0.0.1/32 + ipv6 address a:0::1/128 + ! + } + iconcoords {115.14883649199999 139.941840849} + labelcoords {115.14883649199999 167.941840849} + canvas c1 + interface-peer {eth0 n11} +} + +node n2 { + type router + model mdr + network-config { + hostname n2 + ! + interface eth0 + ip address 10.0.0.2/32 + ipv6 address a:0::2/128 + ! + } + iconcoords {190.35166313500002 519.524009667} + labelcoords {190.35166313500002 547.524009667} + canvas c1 + interface-peer {eth0 n11} +} + +node n3 { + type router + model mdr + network-config { + hostname n3 + ! + interface eth0 + ip address 10.0.0.3/32 + ipv6 address a:0::3/128 + ! + } + iconcoords {142.172431565 307.104176223} + labelcoords {142.172431565 335.104176223} + canvas c1 + interface-peer {eth0 n11} +} + +node n4 { + type router + model mdr + network-config { + hostname n4 + ! + interface eth0 + ip address 10.0.0.4/32 + ipv6 address a:0::4/128 + ! + } + iconcoords {395.031505695 589.805480735} + labelcoords {395.031505695 617.805480735} + canvas c1 + interface-peer {eth0 n11} +} + +node n5 { + type router + model mdr + network-config { + hostname n5 + ! + interface eth0 + ip address 10.0.0.5/32 + ipv6 address a:0::5/128 + ! + } + iconcoords {250.817758443 27.20735313899999} + labelcoords {250.817758443 55.20735313899999} + canvas c1 + interface-peer {eth0 n11} +} + +node n6 { + type router + model mdr + network-config { + hostname n6 + ! + interface eth0 + ip address 10.0.0.6/32 + ipv6 address a:0::6/128 + ! + } + iconcoords {757.062318769 61.533941783} + labelcoords {757.062318769 89.533941783} + canvas c1 + interface-peer {eth0 n11} +} + +node n7 { + type router + model mdr + network-config { + hostname n7 + ! + interface eth0 + ip address 10.0.0.7/32 + ipv6 address a:0::7/128 + ! + } + iconcoords {909.142667152 593.227596061} + labelcoords {909.142667152 621.227596061} + canvas c1 + interface-peer {eth0 n11} +} + +node n8 { + type router + model mdr + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.0.8/32 + ipv6 address a:0::8/128 + ! + } + iconcoords {351.895107521 337.228007484} + labelcoords {351.895107521 365.228007484} + canvas c1 + interface-peer {eth0 n11} +} + +node n9 { + type router + model mdr + network-config { + hostname n9 + ! + interface eth0 + ip address 10.0.0.9/32 + ipv6 address a:0::9/128 + ! + } + iconcoords {528.693178831 84.9814304098} + labelcoords {528.693178831 112.98143041} + canvas c1 + interface-peer {eth0 n11} +} + +node n10 { + type router + model mdr + network-config { + hostname n10 + ! + interface eth0 + ip address 10.0.0.10/32 + ipv6 address a:0::10/128 + ! + } + iconcoords {568.534639911 526.4682890199999} + labelcoords {568.534639911 554.4682890199999} + canvas c1 + interface-peer {eth0 n11} +} + +node n11 { + bandwidth 54000000 + type wlan + range 275 + network-config { + hostname wlan11 + ! + interface wireless + ip address 10.0.0.0/32 + ipv6 address a:0::0/128 + ! + mobmodel + coreapi + emane_ieee80211abg + ! + } + canvas c1 + iconcoords {65.0 558.0} + labelcoords {65.0 590.0} + interface-peer {e0 n1} + interface-peer {e1 n2} + interface-peer {e2 n3} + interface-peer {e3 n4} + interface-peer {e4 n5} + interface-peer {e5 n6} + interface-peer {e6 n7} + interface-peer {e7 n8} + interface-peer {e8 n9} + interface-peer {e9 n10} +} + +link l1 { + nodes {n11 n1} + bandwidth 54000000 +} + +link l2 { + nodes {n11 n2} + bandwidth 54000000 +} + +link l3 { + nodes {n11 n3} + bandwidth 54000000 +} + +link l4 { + nodes {n11 n4} + bandwidth 54000000 +} + +link l5 { + nodes {n11 n5} + bandwidth 54000000 +} + +link l6 { + nodes {n11 n6} + bandwidth 54000000 +} + +link l7 { + nodes {n11 n7} + bandwidth 54000000 +} + +link l8 { + nodes {n11 n8} + bandwidth 54000000 +} + +link l9 { + nodes {n11 n9} + bandwidth 54000000 +} + +link l10 { + nodes {n11 n10} + bandwidth 54000000 +} + +canvas c1 { + name {Canvas1} + refpt {0 0 47.5791667 -122.132322 2.0} + scale 350.0 + size {1000 750} +} + +option global { + interface_names no + ip_addresses yes + ipv6_addresses yes + node_labels yes + link_labels yes + ipsec_configs yes + remote_exec no + exec_errors yes + show_api no + background_images no + annotations yes + grid yes + traffic_start 0 +} + diff --git a/gui/configs/sample8-ipsec-service.imn b/gui/configs/sample8-ipsec-service.imn new file mode 100644 index 00000000..ba409185 --- /dev/null +++ b/gui/configs/sample8-ipsec-service.imn @@ -0,0 +1,952 @@ +comments { +Sample scenario showing IPsec service configuration. + +There are three red routers having the IPsec service enabled. The IPsec service +must be customized with the tunnel hosts (peers) and their keys, and the subnet +addresses that should be tunneled. + +For simplicity, the same keys and certificates are used in each of the three +IPsec gateways. These are written to node n1's configuration directory. Keys +can be generated using the openssl utility. + +Note that this scenario may require at patched kernel in order to work; see the +kernels subdirectory of the CORE source for kernel patches. + +The racoon keying daemon and setkey from the ipsec-tools package should also be +installed. +} + +node n1 { + type router + model router + network-config { + hostname n1 + ! + interface eth3 + ip address 192.168.6.1/24 + ipv6 address 2001:6::1/64 + ! + interface eth2 + ip address 192.168.5.1/24 + ipv6 address 2001:5::1/64 + ! + interface eth1 + ip address 192.168.1.1/24 + ipv6 address 2001:1::1/64 + ! + interface eth0 + ip address 192.168.0.1/24 + ipv6 address 2001:0::1/64 + ! + } + canvas c1 + iconcoords {210.0 172.0} + labelcoords {210.0 200.0} + interface-peer {eth0 n2} + interface-peer {eth1 n3} + interface-peer {eth2 n7} + interface-peer {eth3 n8} + custom-config { + custom-config-id service:IPsec:copycerts.sh + custom-command copycerts.sh + config { + #!/bin/sh + + FILES="test1.pem test1.key ca-cert.pem" + + mkdir -p /tmp/certs + + for f in $FILES; do + cp $f /tmp/certs + done + } + } + custom-config { + custom-config-id service:IPsec:ca-cert.pem + custom-command ca-cert.pem + config { + Certificate: + Data: + Version: 3 (0x2) + Serial Number: 16615976057451940887 (0xe697ce3064d18c17) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=WA, O=CORE CA/emailAddress=root@localhost + Validity + Not Before: Sep 9 17:18:04 2013 GMT + Not After : Sep 7 17:18:04 2023 GMT + Subject: C=US, ST=WA, O=CORE CA/emailAddress=root@localhost + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:d3:0d:ab:91:72:50:ca:10:43:8d:18:d8:92:05: + 9d:d9:aa:16:2b:d1:25:f8:be:52:48:e4:e7:7a:83: + 9b:b4:3b:26:12:fa:46:23:df:09:cb:34:ba:6f:f6: + 5e:38:9c:d4:90:ea:44:ad:65:f6:bd:85:6f:ac:9f: + 4c:83:d4:10:ab:0a:0e:cd:ba:99:1a:ae:f7:b7:e2: + c3:00:0b:c1:02:69:16:c7:55:e3:cf:4c:c3:72:77: + 10:be:da:66:ce:91:b2:cc:92:e1:a8:f0:74:fe:b9: + 03:38:fc:49:97:73:bb:40:55:1b:7d:3e:41:63:02: + b5:ad:f4:33:95:76:fd:7b:61 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Subject Key Identifier: + 9A:EF:A7:36:28:06:4A:0A:2F:F9:2E:99:BE:6F:06:E1:83:9C:A2:0E + X509v3 Authority Key Identifier: + keyid:9A:EF:A7:36:28:06:4A:0A:2F:F9:2E:99:BE:6F:06:E1:83:9C:A2:0E + + X509v3 Basic Constraints: + CA:TRUE + Signature Algorithm: sha1WithRSAEncryption + 2d:88:84:20:19:9b:97:90:2d:18:86:7d:db:6c:d0:5e:ae:c2: + 55:61:af:ca:86:5b:3b:e8:15:c5:31:de:ea:d3:7e:9e:39:61: + 2e:b4:a0:93:43:bf:a2:95:f8:b6:13:b3:2f:cb:f8:fb:72:8c: + 40:95:50:db:03:cc:f7:b8:a5:d8:fb:77:88:c4:f5:f9:65:85: + 29:c8:0c:e9:ce:c9:fa:1d:4e:b2:3f:92:dc:b5:2e:73:50:c3: + c8:3e:90:9e:9a:34:ef:fd:ed:de:74:0b:19:73:6a:95:de:90: + 3b:ee:db:b0:be:14:fd:bf:3e:c6:7b:cd:7d:3c:ba:45:3c:f1: + 46:d7 + -----BEGIN CERTIFICATE----- + MIICZDCCAc2gAwIBAgIJAOaXzjBk0YwXMA0GCSqGSIb3DQEBBQUAMEsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UECgwHQ09SRSBDQTEdMBsGCSqGSIb3 + DQEJARYOcm9vdEBsb2NhbGhvc3QwHhcNMTMwOTA5MTcxODA0WhcNMjMwOTA3MTcx + ODA0WjBLMQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExEDAOBgNVBAoMB0NPUkUg + Q0ExHTAbBgkqhkiG9w0BCQEWDnJvb3RAbG9jYWxob3N0MIGfMA0GCSqGSIb3DQEB + AQUAA4GNADCBiQKBgQDTDauRclDKEEONGNiSBZ3ZqhYr0SX4vlJI5Od6g5u0OyYS + +kYj3wnLNLpv9l44nNSQ6kStZfa9hW+sn0yD1BCrCg7Nupkarve34sMAC8ECaRbH + VePPTMNydxC+2mbOkbLMkuGo8HT+uQM4/EmXc7tAVRt9PkFjArWt9DOVdv17YQID + AQABo1AwTjAdBgNVHQ4EFgQUmu+nNigGSgov+S6Zvm8G4YOcog4wHwYDVR0jBBgw + FoAUmu+nNigGSgov+S6Zvm8G4YOcog4wDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0B + AQUFAAOBgQAtiIQgGZuXkC0Yhn3bbNBersJVYa/Khls76BXFMd7q036eOWEutKCT + Q7+ilfi2E7Mvy/j7coxAlVDbA8z3uKXY+3eIxPX5ZYUpyAzpzsn6HU6yP5LctS5z + UMPIPpCemjTv/e3edAsZc2qV3pA77tuwvhT9vz7Ge819PLpFPPFG1w== + -----END CERTIFICATE----- + + } + } + custom-config { + custom-config-id service:IPsec:test1.pem + custom-command test1.pem + config { + Certificate: + Data: + Version: 3 (0x2) + Serial Number: 16098433458223693585 (0xdf691fefe5afbf11) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=US, ST=WA, O=CORE CA/emailAddress=root@localhost + Validity + Not Before: Sep 9 17:44:47 2013 GMT + Not After : Sep 7 17:44:47 2023 GMT + Subject: C=US, ST=WA, O=core-dev, CN=test1 + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (1024 bit) + Modulus: + 00:b3:26:ed:b6:eb:26:ea:c0:5a:d1:09:6f:d4:5f: + 8d:11:cc:3c:ff:d7:5e:37:e6:55:71:5c:eb:c9:e8: + f8:8e:a3:85:99:2c:3e:a2:8e:b2:1c:2f:fe:99:c6: + 0d:d3:ce:c0:ed:c1:e2:4d:bc:10:35:f6:61:02:b9: + 8f:cc:c5:80:d1:7f:c8:2e:2d:9a:32:9f:8a:bb:32: + ea:14:82:e0:6f:cb:3d:9d:d5:1c:f1:43:52:9f:49: + 79:f1:94:03:48:2c:91:51:c7:8f:32:90:a7:c2:c0: + 25:64:34:f1:c7:f2:ac:d5:96:87:a2:0a:fb:e5:b3: + 0b:90:bf:6f:08:75:5d:54:cb + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Comment: + OpenSSL Generated Certificate + X509v3 Subject Key Identifier: + B3:EC:1A:56:77:F9:DC:0E:60:0F:B7:69:C9:DC:43:2D:09:39:A6:1C + X509v3 Authority Key Identifier: + keyid:9A:EF:A7:36:28:06:4A:0A:2F:F9:2E:99:BE:6F:06:E1:83:9C:A2:0E + + Signature Algorithm: sha1WithRSAEncryption + c5:3f:65:1f:b6:a4:33:fd:c8:04:a1:da:07:f6:e0:3b:55:b9: + 76:b7:aa:78:55:4a:59:ad:36:7f:cb:00:1c:32:cb:fe:40:72: + eb:49:27:b4:9d:5d:05:6f:30:37:1d:49:35:5e:0b:6b:5d:c5: + 07:3d:c8:63:1f:b6:46:6d:f9:c9:52:ce:1d:1f:d9:e8:02:46: + 95:18:26:39:ec:17:fe:ae:07:cf:55:25:45:1f:8a:e4:bb:f2: + 73:d2:e1:01:c3:8e:5f:eb:e4:7e:80:44:40:e6:a1:cd:85:9b: + e8:fb:16:d0:7b:4f:ad:3b:4c:eb:bd:67:02:2c:08:2b:62:f1: + c5:0a + -----BEGIN CERTIFICATE----- + MIICgTCCAeqgAwIBAgIJAN9pH+/lr78RMA0GCSqGSIb3DQEBBQUAMEsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIDAJXQTEQMA4GA1UECgwHQ09SRSBDQTEdMBsGCSqGSIb3 + DQEJARYOcm9vdEBsb2NhbGhvc3QwHhcNMTMwOTA5MTc0NDQ3WhcNMjMwOTA3MTc0 + NDQ3WjA9MQswCQYDVQQGEwJVUzELMAkGA1UECAwCV0ExETAPBgNVBAoMCGNvcmUt + ZGV2MQ4wDAYDVQQDDAV0ZXN0MTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA + sybttusm6sBa0Qlv1F+NEcw8/9deN+ZVcVzryej4jqOFmSw+oo6yHC/+mcYN087A + 7cHiTbwQNfZhArmPzMWA0X/ILi2aMp+KuzLqFILgb8s9ndUc8UNSn0l58ZQDSCyR + UcePMpCnwsAlZDTxx/Ks1ZaHogr75bMLkL9vCHVdVMsCAwEAAaN7MHkwCQYDVR0T + BAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlmaWNh + dGUwHQYDVR0OBBYEFLPsGlZ3+dwOYA+3acncQy0JOaYcMB8GA1UdIwQYMBaAFJrv + pzYoBkoKL/kumb5vBuGDnKIOMA0GCSqGSIb3DQEBBQUAA4GBAMU/ZR+2pDP9yASh + 2gf24DtVuXa3qnhVSlmtNn/LABwyy/5AcutJJ7SdXQVvMDcdSTVeC2tdxQc9yGMf + tkZt+clSzh0f2egCRpUYJjnsF/6uB89VJUUfiuS78nPS4QHDjl/r5H6AREDmoc2F + m+j7FtB7T607TOu9ZwIsCCti8cUK + -----END CERTIFICATE----- + + } + } + custom-config { + custom-config-id service:IPsec:test1.key + custom-command test1.key + config { + -----BEGIN PRIVATE KEY----- + MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALMm7bbrJurAWtEJ + b9RfjRHMPP/XXjfmVXFc68no+I6jhZksPqKOshwv/pnGDdPOwO3B4k28EDX2YQK5 + j8zFgNF/yC4tmjKfirsy6hSC4G/LPZ3VHPFDUp9JefGUA0gskVHHjzKQp8LAJWQ0 + 8cfyrNWWh6IK++WzC5C/bwh1XVTLAgMBAAECgYB1zJIgZe04DPVqYC8lURL8cfRm + MeIlFZJ3MSdlo4fUmtddCYfB8dxRxok96cnrzRZ0/7jjblamdPQDC6rvdaqmfLFx + nJ/RVhCj6HqDMrQnv/9tnl6UQmkaYSnYvTn2GgmpqvBf9RUQk4+kjwgRgdqKxaIz + oH8j0ZxMh2DOZuzJMQJBAOJwEnbG085q2k1Qg8PQz0cpVG9QCE3sJUNs0hMPC7dk + IzknFtidlpCf6NMboJ2Nt9dzmJmKLqWb3oauyQRQA6MCQQDKin0wElLV1268IbcF + RXhkVlxcg5fDEazeNL9p1z5vmwaq0IcLtSPrIaect2hacCkfJoREhcA+f9YIpcod + lby5AkEApyXla0ofpXqYxIOPkGc96qCmlDh2uNZ9N0VH2Qu9MVW47oJdSe8h6oYv + /k2hhUvMjjzlQ0mOX28slyzEc+uAkwJAWlAsiE3zX+UjPIJwIMqcZ2lW3+3Rsyrj + gWXV4HUZIxzmeS5ouWC5NnSYT7o8ru8KdxhurDtTwMqx/sMmf9CwCQJAIDbMwwIs + XStw0y/M9+hdPUkccVoHyXKPTensyX/miAUwHZN/oadGUUOZO7XBKb1uNFv1uowU + 29bGgXa+mvb6aA== + -----END PRIVATE KEY----- + + } + } + custom-config { + custom-config-id service:IPsec:ipsec.sh + custom-command ipsec.sh + config { + #!/bin/sh + # set up static tunnel mode security assocation for service (security.py) + # -------- CUSTOMIZATION REQUIRED -------- + # + # The IPsec service builds ESP tunnels between the specified peers using the + # racoon IKEv2 keying daemon. You need to provide keys and the addresses of + # peers, along with subnets to tunnel. + + # directory containing the certificate and key described below + keydir=/tmp/certs + + # the name used for the "$certname.pem" x509 certificate and + # "$certname.key" RSA private key, which can be generated using openssl + certname=test1 + + # list the public-facing IP addresses, starting with the localhost and followed + # by each tunnel peer, separated with a single space + tunnelhosts="192.168.0.1AND192.168.0.2 192.168.1.1AND192.168.1.2" + + # Define T where i is the index for each tunnel peer host from + # the tunnel_hosts list above (0 is localhost). + # T is a list of IPsec tunnels with peer i, with a local subnet address + # followed by the remote subnet address: + # T="AND AND" + # For example, 192.168.0.0/24 is a local network (behind this node) to be + # tunneled and 192.168.2.0/24 is a remote network (behind peer 1) + T1="192.168.5.0/24AND192.168.8.0/24" + T2="192.168.5.0/24AND192.168.4.0/24 192.168.6.0/24AND192.168.4.0/24" + + # -------- END CUSTOMIZATION -------- + + echo "building config $PWD/ipsec.conf..." + echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log + + checkip=0 + if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then + echo "WARNING: ip validation disabled because package sipcalc not installed + " >> $PWD/ipsec.log + checkip=1 + fi + + echo "#!/usr/sbin/setkey -f + # Flush the SAD and SPD + flush; + spdflush; + + # Security policies \ + " > $PWD/ipsec.conf + i=0 + for hostpair in $tunnelhosts; do + i=`expr $i + 1` + # parse tunnel host IP + thishost=${hostpair%%AND*} + peerhost=${hostpair##*AND} + if [ $checkip = "0" ] && + [ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then + echo "ERROR: invalid host address $thishost or $peerhost \ + " >> $PWD/ipsec.log + fi + # parse each tunnel addresses + tunnel_list_var_name=T$i + eval tunnels="$"$tunnel_list_var_name"" + for ttunnel in $tunnels; do + lclnet=${ttunnel%%AND*} + rmtnet=${ttunnel##*AND} + if [ $checkip = "0" ] && + [ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then + echo "ERROR: invalid tunnel address $lclnet and $rmtnet \ + " >> $PWD/ipsec.log + fi + # add tunnel policies + echo " + spdadd $lclnet $rmtnet any -P out ipsec + esp/tunnel/$thishost-$peerhost/require; + spdadd $rmtnet $lclnet any -P in ipsec + esp/tunnel/$peerhost-$thishost/require; \ + " >> $PWD/ipsec.conf + done + done + + echo "building config $PWD/racoon.conf..." + if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then + echo "ERROR: missing certification files under $keydir \ + $certname.key or $certname.pem " >> $PWD/ipsec.log + fi + echo " + path certificate \"$keydir\"; + listen { + adminsock disabled; + } + remote anonymous + { + exchange_mode main; + certificate_type x509 \"$certname.pem\" \"$certname.key\"; + ca_type x509 \"ca-cert.pem\"; + my_identifier asn1dn; + peers_identifier asn1dn; + + proposal { + encryption_algorithm 3des ; + hash_algorithm sha1; + authentication_method rsasig ; + dh_group modp768; + } + } + sainfo anonymous + { + pfs_group modp768; + lifetime time 1 hour ; + encryption_algorithm 3des, blowfish 448, rijndael ; + authentication_algorithm hmac_sha1, hmac_md5 ; + compression_algorithm deflate ; + } + " > $PWD/racoon.conf + + # the setkey program is required from the ipsec-tools package + echo "running setkey -f $PWD/ipsec.conf..." + setkey -f $PWD/ipsec.conf + + echo "running racoon -d -f $PWD/racoon.conf..." + racoon -d -f $PWD/racoon.conf -l racoon.log + + } + } + custom-config { + custom-config-id service:IPsec + custom-command IPsec + config { + + ('ipsec.sh', 'test1.key', 'test1.pem', 'ca-cert.pem', 'copycerts.sh', ) + 60 + ('sh copycerts.sh', 'sh ipsec.sh', ) + ('killall racoon', ) + + + } + } + services {zebra OSPFv2 OSPFv3 IPForward IPsec} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif +} + +node n2 { + type router + model router + network-config { + hostname n2 + ! + interface eth3 + ip address 192.168.8.1/24 + ipv6 address 2001:8::1/64 + ! + interface eth2 + ip address 192.168.7.1/24 + ipv6 address 2001:7::1/64 + ! + interface eth1 + ip address 192.168.2.1/24 + ipv6 address 2001:2::1/64 + ! + interface eth0 + ip address 192.168.0.2/24 + ipv6 address 2001:0::2/64 + ! + } + canvas c1 + iconcoords {455.0 173.0} + labelcoords {455.0 201.0} + interface-peer {eth0 n1} + interface-peer {eth1 n4} + interface-peer {eth2 n9} + interface-peer {eth3 n10} + custom-config { + custom-config-id service:IPsec:ipsec.sh + custom-command ipsec.sh + config { + #!/bin/sh + # set up static tunnel mode security assocation for service (security.py) + # -------- CUSTOMIZATION REQUIRED -------- + # + # The IPsec service builds ESP tunnels between the specified peers using the + # racoon IKEv2 keying daemon. You need to provide keys and the addresses of + # peers, along with subnets to tunnel. + + # directory containing the certificate and key described below + keydir=/tmp/certs + + # the name used for the "$certname.pem" x509 certificate and + # "$certname.key" RSA private key, which can be generated using openssl + certname=test1 + + # list the public-facing IP addresses, starting with the localhost and followed + # by each tunnel peer, separated with a single space + tunnelhosts="192.168.0.2AND192.168.0.1" + + # Define T where i is the index for each tunnel peer host from + # the tunnel_hosts list above (0 is localhost). + # T is a list of IPsec tunnels with peer i, with a local subnet address + # followed by the remote subnet address: + # T="AND AND" + # For example, 192.168.0.0/24 is a local network (behind this node) to be + # tunneled and 192.168.2.0/24 is a remote network (behind peer 1) + T1="192.168.8.0/24AND192.168.5.0/24" + + # -------- END CUSTOMIZATION -------- + + echo "building config $PWD/ipsec.conf..." + echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log + + checkip=0 + if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then + echo "WARNING: ip validation disabled because package sipcalc not installed + " >> $PWD/ipsec.log + checkip=1 + fi + + echo "#!/usr/sbin/setkey -f + # Flush the SAD and SPD + flush; + spdflush; + + # Security policies \ + " > $PWD/ipsec.conf + i=0 + for hostpair in $tunnelhosts; do + i=`expr $i + 1` + # parse tunnel host IP + thishost=${hostpair%%AND*} + peerhost=${hostpair##*AND} + if [ $checkip = "0" ] && + [ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then + echo "ERROR: invalid host address $thishost or $peerhost \ + " >> $PWD/ipsec.log + fi + # parse each tunnel addresses + tunnel_list_var_name=T$i + eval tunnels="$"$tunnel_list_var_name"" + for ttunnel in $tunnels; do + lclnet=${ttunnel%%AND*} + rmtnet=${ttunnel##*AND} + if [ $checkip = "0" ] && + [ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then + echo "ERROR: invalid tunnel address $lclnet and $rmtnet \ + " >> $PWD/ipsec.log + fi + # add tunnel policies + echo " + spdadd $lclnet $rmtnet any -P out ipsec + esp/tunnel/$thishost-$peerhost/require; + spdadd $rmtnet $lclnet any -P in ipsec + esp/tunnel/$peerhost-$thishost/require; \ + " >> $PWD/ipsec.conf + done + done + + echo "building config $PWD/racoon.conf..." + if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then + echo "ERROR: missing certification files under $keydir \ + $certname.key or $certname.pem " >> $PWD/ipsec.log + fi + echo " + path certificate \"$keydir\"; + listen { + adminsock disabled; + } + remote anonymous + { + exchange_mode main; + certificate_type x509 \"$certname.pem\" \"$certname.key\"; + ca_type x509 \"ca-cert.pem\"; + my_identifier asn1dn; + peers_identifier asn1dn; + + proposal { + encryption_algorithm 3des ; + hash_algorithm sha1; + authentication_method rsasig ; + dh_group modp768; + } + } + sainfo anonymous + { + pfs_group modp768; + lifetime time 1 hour ; + encryption_algorithm 3des, blowfish 448, rijndael ; + authentication_algorithm hmac_sha1, hmac_md5 ; + compression_algorithm deflate ; + } + " > $PWD/racoon.conf + + # the setkey program is required from the ipsec-tools package + echo "running setkey -f $PWD/ipsec.conf..." + setkey -f $PWD/ipsec.conf + + echo "running racoon -d -f $PWD/racoon.conf..." + racoon -d -f $PWD/racoon.conf -l racoon.log + + } + } + custom-config { + custom-config-id service:IPsec + custom-command IPsec + config { + + ('ipsec.sh', ) + 60 + ('sh ipsec.sh', ) + ('killall racoon', ) + + + } + } + services {zebra OSPFv2 OSPFv3 IPForward IPsec} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif +} + +node n3 { + type router + model router + network-config { + hostname n3 + ! + interface eth2 + ip address 192.168.4.1/24 + ipv6 address 2001:4::1/64 + ! + interface eth1 + ip address 192.168.3.1/24 + ipv6 address 2001:3::1/64 + ! + interface eth0 + ip address 192.168.1.2/24 + ipv6 address 2001:1::2/64 + ! + } + canvas c1 + iconcoords {211.0 375.0} + labelcoords {211.0 403.0} + interface-peer {eth0 n1} + interface-peer {eth1 n5} + interface-peer {eth2 n6} + custom-config { + custom-config-id service:IPsec:ipsec.sh + custom-command ipsec.sh + config { + #!/bin/sh + # set up static tunnel mode security assocation for service (security.py) + # -------- CUSTOMIZATION REQUIRED -------- + # + # The IPsec service builds ESP tunnels between the specified peers using the + # racoon IKEv2 keying daemon. You need to provide keys and the addresses of + # peers, along with subnets to tunnel. + + # directory containing the certificate and key described below + keydir=/tmp/certs + + # the name used for the "$certname.pem" x509 certificate and + # "$certname.key" RSA private key, which can be generated using openssl + certname=test1 + + # list the public-facing IP addresses, starting with the localhost and followed + # by each tunnel peer, separated with a single space + tunnelhosts="192.168.1.2AND192.168.1.1" + + # Define T where i is the index for each tunnel peer host from + # the tunnel_hosts list above (0 is localhost). + # T is a list of IPsec tunnels with peer i, with a local subnet address + # followed by the remote subnet address: + # T="AND AND" + # For example, 192.168.0.0/24 is a local network (behind this node) to be + # tunneled and 192.168.2.0/24 is a remote network (behind peer 1) + T1="192.168.4.0/24AND192.168.5.0/24 192.168.4.0/24AND192.168.6.0/24" + + # -------- END CUSTOMIZATION -------- + + echo "building config $PWD/ipsec.conf..." + echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log + + checkip=0 + if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then + echo "WARNING: ip validation disabled because package sipcalc not installed + " >> $PWD/ipsec.log + checkip=1 + fi + + echo "#!/usr/sbin/setkey -f + # Flush the SAD and SPD + flush; + spdflush; + + # Security policies \ + " > $PWD/ipsec.conf + i=0 + for hostpair in $tunnelhosts; do + i=`expr $i + 1` + # parse tunnel host IP + thishost=${hostpair%%AND*} + peerhost=${hostpair##*AND} + if [ $checkip = "0" ] && + [ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then + echo "ERROR: invalid host address $thishost or $peerhost \ + " >> $PWD/ipsec.log + fi + # parse each tunnel addresses + tunnel_list_var_name=T$i + eval tunnels="$"$tunnel_list_var_name"" + for ttunnel in $tunnels; do + lclnet=${ttunnel%%AND*} + rmtnet=${ttunnel##*AND} + if [ $checkip = "0" ] && + [ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then + echo "ERROR: invalid tunnel address $lclnet and $rmtnet \ + " >> $PWD/ipsec.log + fi + # add tunnel policies + echo " + spdadd $lclnet $rmtnet any -P out ipsec + esp/tunnel/$thishost-$peerhost/require; + spdadd $rmtnet $lclnet any -P in ipsec + esp/tunnel/$peerhost-$thishost/require; \ + " >> $PWD/ipsec.conf + done + done + + echo "building config $PWD/racoon.conf..." + if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then + echo "ERROR: missing certification files under $keydir \ + $certname.key or $certname.pem " >> $PWD/ipsec.log + fi + echo " + path certificate \"$keydir\"; + listen { + adminsock disabled; + } + remote anonymous + { + exchange_mode main; + certificate_type x509 \"$certname.pem\" \"$certname.key\"; + ca_type x509 \"ca-cert.pem\"; + my_identifier asn1dn; + peers_identifier asn1dn; + + proposal { + encryption_algorithm 3des ; + hash_algorithm sha1; + authentication_method rsasig ; + dh_group modp768; + } + } + sainfo anonymous + { + pfs_group modp768; + lifetime time 1 hour ; + encryption_algorithm 3des, blowfish 448, rijndael ; + authentication_algorithm hmac_sha1, hmac_md5 ; + compression_algorithm deflate ; + } + " > $PWD/racoon.conf + + # the setkey program is required from the ipsec-tools package + echo "running setkey -f $PWD/ipsec.conf..." + setkey -f $PWD/ipsec.conf + + echo "running racoon -d -f $PWD/racoon.conf..." + racoon -d -f $PWD/racoon.conf -l racoon.log + + } + } + custom-config { + custom-config-id service:IPsec + custom-command IPsec + config { + + ('ipsec.sh', ) + 60 + ('sh ipsec.sh', ) + ('killall racoon', ) + + + } + } + services {zebra OSPFv2 OSPFv3 IPForward IPsec} + custom-image $CORE_DATA_DIR/icons/normal/router_red.gif +} + +node n4 { + type router + model router + network-config { + hostname n4 + ! + interface eth1 + ip address 192.168.9.1/24 + ipv6 address 2001:9::1/64 + ! + interface eth0 + ip address 192.168.2.2/24 + ipv6 address 2001:2::2/64 + ! + } + canvas c1 + iconcoords {456.0 376.0} + labelcoords {456.0 404.0} + interface-peer {eth0 n2} + interface-peer {eth1 n11} +} + +node n5 { + type router + model host + network-config { + hostname n5 + ! + interface eth0 + ip address 192.168.3.10/24 + ipv6 address 2001:3::10/64 + ! + } + canvas c1 + iconcoords {50.0 472.0} + labelcoords {50.0 504.0} + interface-peer {eth0 n3} +} + +node n6 { + type router + model host + network-config { + hostname n6 + ! + interface eth0 + ip address 192.168.4.10/24 + ipv6 address 2001:4::10/64 + ! + } + canvas c1 + iconcoords {44.0 292.0} + labelcoords {44.0 324.0} + interface-peer {eth0 n3} +} + +node n7 { + type router + model host + network-config { + hostname n7 + ! + interface eth0 + ip address 192.168.5.10/24 + ipv6 address 2001:5::10/64 + ! + } + canvas c1 + iconcoords {41.0 62.0} + labelcoords {41.0 94.0} + interface-peer {eth0 n1} +} + +node n8 { + type router + model host + network-config { + hostname n8 + ! + interface eth0 + ip address 192.168.6.10/24 + ipv6 address 2001:6::10/64 + ! + } + canvas c1 + iconcoords {39.0 121.0} + labelcoords {39.0 153.0} + interface-peer {eth0 n1} +} + +node n9 { + type router + model host + network-config { + hostname n9 + ! + interface eth0 + ip address 192.168.7.10/24 + ipv6 address 2001:7::10/64 + ! + } + canvas c1 + iconcoords {653.0 69.0} + labelcoords {653.0 101.0} + interface-peer {eth0 n2} +} + +node n10 { + type router + model host + network-config { + hostname n10 + ! + interface eth0 + ip address 192.168.8.10/24 + ipv6 address 2001:8::10/64 + ! + } + canvas c1 + iconcoords {454.0 48.0} + labelcoords {484.0 59.0} + interface-peer {eth0 n2} +} + +node n11 { + type router + model host + network-config { + hostname n11 + ! + interface eth0 + ip address 192.168.9.10/24 + ipv6 address 2001:9::10/64 + ! + } + canvas c1 + iconcoords {654.0 460.0} + labelcoords {654.0 492.0} + interface-peer {eth0 n4} +} + +link l1 { + nodes {n1 n2} + bandwidth 0 +} + +link l2 { + nodes {n1 n3} + bandwidth 0 +} + +link l3 { + nodes {n2 n4} + bandwidth 0 +} + +link l4 { + nodes {n3 n5} + bandwidth 0 +} + +link l5 { + nodes {n3 n6} + bandwidth 0 +} + +link l6 { + nodes {n1 n7} + bandwidth 0 +} + +link l7 { + nodes {n1 n8} + bandwidth 0 +} + +link l8 { + nodes {n2 n9} + bandwidth 0 +} + +link l9 { + nodes {n2 n10} + bandwidth 0 +} + +link l10 { + nodes {n4 n11} + bandwidth 0 +} + +annotation a1 { + iconcoords {8.0 6.0 514.0 99.0} + type rectangle + label {Tunnel 1} + labelcolor black + fontfamily {Arial} + fontsize {12} + color #ffd0d0 + width 0 + border #00ff00 + rad 22 + canvas c1 +} + +annotation a2 { + iconcoords {8.0 6.0 137.0 334.0} + type rectangle + label {Tunnel 2} + labelcolor black + fontfamily {Arial} + fontsize {12} + color #ffe1e1 + width 0 + border black + rad 23 + canvas c1 +} + +annotation a5 { + iconcoords {263.0 127.0} + type text + label {} + labelcolor black + fontfamily {Arial} + fontsize {12} + effects {underline} + canvas c1 +} + +canvas c1 { + name {Canvas1} +} + +option global { + interface_names yes + ip_addresses yes + ipv6_addresses no + node_labels yes + link_labels yes + ipsec_configs yes + exec_errors yes + show_api no + background_images no + annotations yes + grid yes + traffic_start 0 +} + diff --git a/gui/configs/sample9-vpn.imn b/gui/configs/sample9-vpn.imn new file mode 100644 index 00000000..095c2db4 --- /dev/null +++ b/gui/configs/sample9-vpn.imn @@ -0,0 +1,1015 @@ +comments { +Sample scenario showing VPNClient and VPNServer service configuration. + +This topology features an OpenVPN client and server for virtual private +networking. The client can access the private 10.0.6.0/24 network via the VPN +server. First wait until routing converges in the center routers (try using the +Adjacency Widget and wait for blue lines, meaning full adjacencies), then open +a shell on the vpnclient and try pinging the private address of the vpnserver: + + vpnclient> ping 10.0.6.1 + +You can also access the other 10.0.6.* hosts behind the server. Try running +tcpudmp on one of the center routers, e.g. the n2 eth1/10.0.5.2 interface, and +you'll see UDP packets with TLS encrypted data instead of ICMP packets. + +Keys are included as extra files in the VPNClient and VPNServer service +configuration. +} + +node n1 { + type router + model router + network-config { + hostname n1 + ! + interface eth2 + ip address 10.0.4.2/24 + ipv6 address 2001:4::2/64 + ! + interface eth1 + ip address 10.0.2.1/24 + ipv6 address 2001:2::1/64 + ! + interface eth0 + ip address 10.0.0.1/24 + ipv6 address 2001:0::1/64 + ! + } + canvas c1 + iconcoords {297.0 236.0} + labelcoords {297.0 264.0} + interface-peer {eth0 n6} + interface-peer {eth1 n2} + interface-peer {eth2 n3} +} + +node n2 { + type router + model router + network-config { + hostname n2 + ! + interface eth1 + ip address 10.0.5.2/24 + ipv6 address 2001:5::2/64 + ! + interface eth0 + ip address 10.0.2.2/24 + ipv6 address 2001:2::2/64 + ! + } + canvas c1 + iconcoords {298.0 432.0} + labelcoords {298.0 460.0} + interface-peer {eth0 n1} + interface-peer {eth1 n4} +} + +node n3 { + type router + model router + network-config { + hostname n3 + ! + interface eth1 + ip address 10.0.4.1/24 + ipv6 address 2001:4::1/64 + ! + interface eth0 + ip address 10.0.3.1/24 + ipv6 address 2001:3::1/64 + ! + } + canvas c1 + iconcoords {573.0 233.0} + labelcoords {573.0 261.0} + interface-peer {eth0 n4} + interface-peer {eth1 n1} +} + +node n4 { + type router + model router + network-config { + hostname n4 + ! + interface eth2 + ip address 10.0.5.1/24 + ipv6 address 2001:5::1/64 + ! + interface eth1 + ip address 10.0.3.2/24 + ipv6 address 2001:3::2/64 + ! + interface eth0 + ip address 10.0.1.1/24 + ipv6 address 2001:1::1/64 + ! + } + canvas c1 + iconcoords {574.0 429.0} + labelcoords {574.0 457.0} + interface-peer {eth0 n5} + interface-peer {eth1 n3} + interface-peer {eth2 n2} +} + +node n5 { + type router + model host + network-config { + hostname vpnserver + ! + interface eth1 + ipv6 address 2001:6::10/64 + ip address 10.0.6.1/24 + ! + interface eth0 + ip address 10.0.1.10/24 + ipv6 address 2001:1::10/64 + ! + } + canvas c1 + iconcoords {726.0 511.0} + labelcoords {726.0 543.0} + interface-peer {eth0 n4} + interface-peer {eth1 n7} + custom-config { + custom-config-id service:VPNServer:vpnserver.pem + custom-command vpnserver.pem + config { + Certificate: + Data: + Version: 3 (0x2) + Serial Number: 1 (0x1) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, O=core-dev/emailAddress=root@localhost + Validity + Not Before: May 19 02:09:57 2015 GMT + Not After : Apr 25 02:09:57 2115 GMT + Subject: C=US, ST=WA, O=core-dev, CN=vpnserver/emailAddress=root@localhost + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:d2:88:b2:9b:32:ac:38:ca:45:e0:6b:db:1c:92: + 5d:9a:42:23:df:64:a0:3b:c2:c4:f2:3a:75:bb:d6: + 54:12:61:6e:aa:ac:0f:a6:2e:d9:3b:63:dc:3d:48: + 02:f1:36:c8:97:d3:ef:24:6f:7f:dd:b7:9a:9d:6d: + c1:c9:e2:11:49:1c:e0:67:d6:b0:b7:62:84:9a:f8: + c3:af:4f:f7:77:29:74:01:81:47:49:84:d6:1c:0c: + 36:41:42:a2:3e:92:28:83:50:7a:9c:fd:f3:66:7c: + f8:d9:c7:f6:63:d2:59:d2:fd:9a:a4:9b:75:a6:16: + ec:37:de:05:dd:05:a2:31:65:79:66:eb:b3:82:41: + af:b9:e8:4a:bc:02:d6:a1:68:49:6c:09:e1:c5:9f: + 3e:cf:52:76:d9:63:65:7a:a5:34:75:ee:ce:a3:c6: + 92:f3:0d:2f:7f:b0:b4:12:fe:44:5f:77:10:25:98: + b2:45:af:69:c8:9b:13:fc:f9:de:c6:be:b5:cf:62: + 06:01:32:71:d5:84:1d:14:b9:28:46:80:f6:98:35: + 12:e4:c8:e4:7a:94:e5:99:a3:6b:34:de:be:33:fe: + 63:21:a3:cc:c4:64:15:49:e0:9d:57:84:b9:11:e5: + 05:79:ba:22:6e:e4:24:b0:64:f0:54:13:1a:f4:73: + 8a:ff + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + Netscape Cert Type: + SSL Server + Netscape Comment: + OpenSSL Generated Server Certificate + X509v3 Subject Key Identifier: + A5:A6:80:4D:3C:CD:E2:FE:AD:32:FD:9D:B2:7C:B6:00:76:16:C0:41 + X509v3 Authority Key Identifier: + keyid:DB:D2:9C:8D:22:D9:D7:E2:38:A0:8D:6C:3B:BE:33:CE:8D:2A:BE:C8 + DirName:/C=US/ST=WA/O=core-dev/emailAddress=root@localhost + serial:CE:78:96:91:DB:9B:84:FD + + X509v3 Extended Key Usage: + TLS Web Server Authentication + X509v3 Key Usage: + Digital Signature, Key Encipherment + Signature Algorithm: sha256WithRSAEncryption + 3b:40:af:3a:c0:95:10:bc:d4:63:4a:1b:0f:9d:af:9c:27:29: + 34:c9:80:dc:c2:2d:72:40:0e:50:15:fe:b4:87:bc:59:56:de: + 81:96:1f:4f:ec:1a:44:ce:23:ba:69:b1:f5:ed:4b:1a:22:cf: + 16:17:29:f9:bb:69:12:3c:42:87:09:48:26:a2:b3:88:40:3e: + 3c:06:92:e1:65:6e:c0:62:50:55:08:5d:a0:4b:3a:0f:ff:9d: + 65:91:b9:bf:d3:69:b9:ac:27:83:2c:fd:5a:bd:58:d3:75:a0: + 70:e6:21:e9:f0:0d:19:a6:5f:2b:2d:1f:c9:fb:72:73:06:40: + 32:5a:f8:81:30:59:b7:cb:3a:a7:3e:6f:af:4c:4b:57:eb:4a: + d8:24:65:13:c3:86:fd:35:d3:6d:a0:3a:4b:63:40:9e:b4:98: + e0:a2:c7:f2:71:42:d5:08:72:95:fd:df:8f:05:e9:68:a8:f8: + 13:db:e6:0a:ec:c2:df:29:65:33:52:57:52:e5:7e:1d:09:2c: + 56:0b:cc:d3:2d:dd:46:72:f0:cb:8b:2d:53:c4:d3:9d:63:a6: + 6e:9f:dd:1a:7c:b2:87:d1:9e:4e:a0:b2:36:85:4a:5e:89:f9: + 01:82:94:3d:3a:86:17:84:48:d4:0c:c4:25:25:54:3f:d7:65: + 6b:85:c2:44:b3:6a:f5:74:69:f4:be:b2:13:68:a0:99:82:88: + 07:23:8e:a3:67:e0:88:07:fe:fd:ba:85:f8:8a:1f:ac:1e:7d: + ac:1e:f9:d1:3d:a8:fd:d9:91:9e:b2:3d:4f:f1:b4:80:9e:0b: + aa:bb:44:6a:20:08:68:a4:45:0e:21:21:4d:d1:5f:ab:a8:96: + 5a:29:e1:0f:9a:ff:a4:58:c6:80:15:51:98:ac:3c:23:4e:9e: + 8f:a2:34:c1:f6:4c:26:f0:33:8d:db:15:b9:30:03:a7:b3:17: + 31:9f:9a:5a:e7:a1:10:5e:61:57:04:bf:9a:6f:ec:87:15:4e: + 33:2a:0c:e4:4a:b0:66:ab:04:7a:32:4d:66:44:af:d9:ad:41: + a9:b1:05:c4:7d:2a:ba:2b:bb:c9:1e:5a:ff:cd:e0:e3:54:39: + b6:be:e2:70:6c:db:e6:71:dc:27:7e:ef:e9:11:1f:cb:fa:cd: + e1:57:a9:b9:ba:d6:69:fc:c0:d7:57:b0:51:4d:c4:2a:2f:1b: + 99:fc:b7:65:11:99:fe:0b:58:4e:11:aa:06:c6:e1:53:20:c7: + 56:0a:de:a6:65:c1:a6:41:e1:7b:1d:d7:17:45:b0:e4:66:50: + 26:d8:85:c3:c3:93:2d:df:b0:35:6d:29:9a:6b:cc:cc:75:de: + cf:72:37:8b:2d:24:b2:45 + -----BEGIN CERTIFICATE----- + MIIFQTCCAymgAwIBAgIBATANBgkqhkiG9w0BAQsFADBMMQswCQYDVQQGEwJVUzEL + MAkGA1UECBMCV0ExETAPBgNVBAoTCGNvcmUtZGV2MR0wGwYJKoZIhvcNAQkBFg5y + b290QGxvY2FsaG9zdDAgFw0xNTA1MTkwMjA5NTdaGA8yMTE1MDQyNTAyMDk1N1ow + YDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMREwDwYDVQQKEwhjb3JlLWRldjES + MBAGA1UEAxMJdnBuc2VydmVyMR0wGwYJKoZIhvcNAQkBFg5yb290QGxvY2FsaG9z + dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANKIspsyrDjKReBr2xyS + XZpCI99koDvCxPI6dbvWVBJhbqqsD6Yu2Ttj3D1IAvE2yJfT7yRvf923mp1twcni + EUkc4GfWsLdihJr4w69P93cpdAGBR0mE1hwMNkFCoj6SKINQepz982Z8+NnH9mPS + WdL9mqSbdaYW7DfeBd0FojFleWbrs4JBr7noSrwC1qFoSWwJ4cWfPs9SdtljZXql + NHXuzqPGkvMNL3+wtBL+RF93ECWYskWvacibE/z53sa+tc9iBgEycdWEHRS5KEaA + 9pg1EuTI5HqU5ZmjazTevjP+YyGjzMRkFUngnVeEuRHlBXm6Im7kJLBk8FQTGvRz + iv8CAwEAAaOCARYwggESMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMG + CWCGSAGG+EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNh + dGUwHQYDVR0OBBYEFKWmgE08zeL+rTL9nbJ8tgB2FsBBMHwGA1UdIwR1MHOAFNvS + nI0i2dfiOKCNbDu+M86NKr7IoVCkTjBMMQswCQYDVQQGEwJVUzELMAkGA1UECBMC + V0ExETAPBgNVBAoTCGNvcmUtZGV2MR0wGwYJKoZIhvcNAQkBFg5yb290QGxvY2Fs + aG9zdIIJAM54lpHbm4T9MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIF + oDANBgkqhkiG9w0BAQsFAAOCAgEAO0CvOsCVELzUY0obD52vnCcpNMmA3MItckAO + UBX+tIe8WVbegZYfT+waRM4jummx9e1LGiLPFhcp+btpEjxChwlIJqKziEA+PAaS + 4WVuwGJQVQhdoEs6D/+dZZG5v9Npuawngyz9Wr1Y03WgcOYh6fANGaZfKy0fyfty + cwZAMlr4gTBZt8s6pz5vr0xLV+tK2CRlE8OG/TXTbaA6S2NAnrSY4KLH8nFC1Qhy + lf3fjwXpaKj4E9vmCuzC3yllM1JXUuV+HQksVgvM0y3dRnLwy4stU8TTnWOmbp/d + Gnyyh9GeTqCyNoVKXon5AYKUPTqGF4RI1AzEJSVUP9dla4XCRLNq9XRp9L6yE2ig + mYKIByOOo2fgiAf+/bqF+IofrB59rB750T2o/dmRnrI9T/G0gJ4LqrtEaiAIaKRF + DiEhTdFfq6iWWinhD5r/pFjGgBVRmKw8I06ej6I0wfZMJvAzjdsVuTADp7MXMZ+a + WuehEF5hVwS/mm/shxVOMyoM5EqwZqsEejJNZkSv2a1BqbEFxH0quiu7yR5a/83g + 41Q5tr7icGzb5nHcJ37v6REfy/rN4VepubrWafzA11ewUU3EKi8bmfy3ZRGZ/gtY + ThGqBsbhUyDHVgrepmXBpkHhex3XF0Ww5GZQJtiFw8OTLd+wNW0pmmvMzHXez3I3 + iy0kskU= + -----END CERTIFICATE----- + } + } + custom-config { + custom-config-id service:VPNServer:vpnserver.key + custom-command vpnserver.key + config { + -----BEGIN PRIVATE KEY----- + MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDSiLKbMqw4ykXg + a9sckl2aQiPfZKA7wsTyOnW71lQSYW6qrA+mLtk7Y9w9SALxNsiX0+8kb3/dt5qd + bcHJ4hFJHOBn1rC3YoSa+MOvT/d3KXQBgUdJhNYcDDZBQqI+kiiDUHqc/fNmfPjZ + x/Zj0lnS/Zqkm3WmFuw33gXdBaIxZXlm67OCQa+56Eq8AtahaElsCeHFnz7PUnbZ + Y2V6pTR17s6jxpLzDS9/sLQS/kRfdxAlmLJFr2nImxP8+d7GvrXPYgYBMnHVhB0U + uShGgPaYNRLkyOR6lOWZo2s03r4z/mMho8zEZBVJ4J1XhLkR5QV5uiJu5CSwZPBU + Exr0c4r/AgMBAAECggEATaktMUC09NHwisNedSCstI13TB2DWegT3EKiUWLTamBU + gVKtByE68sR4ZoacxzvtLMx555fVtATZXP8yv/TLaYvkX4l7cHo/7iabkJzP7T32 + U+PLVxxQGtKKZPJehPRHS4ExaZ3n3kN1TGiNw+7BQapZFCVgdZ75DfaxdQFx/gQD + 4htDr7SZL/jG1Sn08Cf7DRQ9U0rISecmm9nG9LV1LXMfnbdWTwvmm/z0a6wjDZ4n + Byfd5jOOI4cpgetHhAQCH+ksComY0GBf21MOsfQT/NzfuW/KFGag992Yub8tBUFa + AG+SlaNZBiLAPYfMegjCxR1jZeUCthlB2kdzYKcmyQKBgQD4lgbktGO/VKjnKnGD + SR8oJT82HvkfB16M/pSlTNOTmAAbbY9yMNj/zKPJDOhxHLhFxmXqVnvs70Ye+pfT + 3o3v0NkAfdIhzNzqbFlUA15QqELhrXHaTtpU3/nyw+/lQhoiEof9yj9vuGBy4iDf + GdP1R8G7OWqJIDXwZd3s99XujQKBgQDY0CPvCe1j43wfG8MjI/bLeDWmxjGaLWJu + IKbMepEbqcaz9x+25Ey3E35DYtfLjeAQ+qRc5T4Cj6L9iG/mS89/ShPGj91xyFzQ + qoZf4lGB0/A80aTgmDiF4FxHFzWpXKaJPS1gTfWZ9Qs7r0LP8Uwni9dhO8LGPNAr + 9Ky3jaHyuwKBgQDxml74yZpoyw+eHVJWFyuBCTJ2l4Po9HCg+I3gWtsICCOShNl2 + UqOVen91WGZSCWfP6RQEvimUDrpIQaZu9U9eVc2S/LbOwx2zebsYPG3eVqsqTDjr + xNfOxiFYIbd3Ste7ZedmcrtVCg4zmjP4olGvgx53qUYyIGxMSbV4KyhxwQKBgQCQ + ba7SSLGrrdl8O5k1Knr3tb8/tp1KUFtWc0fJxQgu/lzQe5nT0qdL+Z9NsmWAQqV1 + ihG9lDRHrnlsHNw19GBoMeeUiTeB2XACzOWwr+mN66oISbtkpeJZREkUTmC/zmld + 2LQGiEhIY9U00B5YuSv62AwEyLOKLO6bqWT47U9piwKBgQDm2+ufGU+ZBr0qpyCf + TiZFngJ1fGK0tyVrERm1y83vWzbqaW9nX5hJilqYDB3jpHARvbY56+jWVSm4ADwG + dlqEpmPHBgDRYBkdLUl50TN5vINcXQCGlA0zOHAueAUESyL5nLnO7NsK/McSRDd5 + XonR43XumudHzs3R6BwBTKv1gw== + -----END PRIVATE KEY----- + } + } + custom-config { + custom-config-id service:VPNServer:vpnserver.sh + custom-command vpnserver.sh + config { + #!/bin/sh + # custom VPN Server Configuration for service (security.py) + # -------- CUSTOMIZATION REQUIRED -------- + # + # The VPNServer service sets up the OpenVPN server for building VPN tunnels + # that allow access via TUN/TAP device to private networks. + # + # note that the IPForward and DefaultRoute services should be enabled + + # directory containing the certificate and key described below, in addition to + # a CA certificate and DH key + certdir=$SESSION_DIR/certs + keydir=$PWD + + # the name used for a "$keyname.pem" certificate and "$keyname.key" private key. + keyname=vpnserver + + # the VPN subnet address from which the client VPN IP (for the TUN/TAP) + # will be allocated + vpnsubnet=10.0.200.0 + + # public IP address of this vpn server (same as VPNClient vpnserver= setting) + vpnserver=10.0.1.10 + + # optional list of private subnets reachable behind this VPN server + # each subnet and next hop is separated by a space + # ", , ..." + privatenets="10.0.6.0,10.0.1.10" + + # optional list of VPN clients, for statically assigning IP addresses to + # clients; also, an optional client subnet can be specified for adding static + # routes via the client + # Note: VPN addresses x.x.x.0-3 are reserved + # ",, ,, ..." + #vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,," + vpnclients="" + + # NOTE: you may need to enable the StaticRoutes service on nodes within the + # private subnet, in order to have routes back to the client. + # /sbin/ip ro add /24 via + # /sbin/ip ro add /24 via + + # -------- END CUSTOMIZATION -------- + + echo > $PWD/vpnserver.log + rm -f -r $PWD/ccd + + # validate key and certification files + check-key-file() { + if [ ! -e $1 ]; then + echo "ERROR: missing certification or key file: $1" >> $PWD/vpnserver.log + fi + } + + check-key-file $keydir/$keyname.key + check-key-file $keydir/$keyname.pem + check-key-file $certdir/ca-cert.pem + check-key-file $certdir/dh2048.pem + + # validate configuration IP addresses + checkip=0 + if [ "$(dpkg -l | grep sipcalc)" = "" ]; then + echo "WARNING: ip validation disabled because package sipcalc not installed\ + " >> $PWD/vpnserver.log + checkip=1 + else + if [ "$(sipcalc "$vpnsubnet" "$vpnserver" | grep ERR)" != "" ]; then + echo "ERROR: invalid vpn subnet or server address \ + $vpnsubnet or $vpnserver " >> $PWD/vpnserver.log + fi + fi + + # create client vpn ip pool file + ( + cat << EOF + EOF + )> $PWD/ippool.txt + + # create server.conf file + ( + cat << EOF + # openvpn server config + local $vpnserver + server $vpnsubnet 255.255.255.0 + push redirect-gateway def1 + EOF + )> $PWD/server.conf + + # add routes to VPN server private subnets, and push these routes to clients + for privatenet in $privatenets; do + if [ $privatenet != "" ]; then + net=${privatenet%%,*} + nexthop=${privatenet##*,} + if [ $checkip = "0" ] && + [ "$(sipcalc "$net" "$nexthop" | grep ERR)" != "" ]; then + echo "ERROR: invalid vpn server private net address \ + $net or $nexthop " >> $PWD/vpnserver.log + fi + echo push route $net 255.255.255.0 >> $PWD/server.conf + fi + done + + # allow subnet through this VPN, one route for each client subnet + for client in $vpnclients; do + if [ $client != "" ]; then + cSubnetIP=${client##*,} + cVpnIP=${client#*,} + cVpnIP=${cVpnIP%%,*} + cKeyFilename=${client%%,*} + if [ "$cSubnetIP" != "" ]; then + if [ $checkip = "0" ] && + [ "$(sipcalc "$cSubnetIP" "$cVpnIP" | grep ERR)" != "" ]; then + echo "ERROR: invalid vpn client and subnet address \ + $cSubnetIP or $cVpnIP " >> $PWD/vpnserver.log + fi + echo route $cSubnetIP 255.255.255.0 >> $PWD/server.conf + if ! test -d $PWD/ccd; then + mkdir -p $PWD/ccd + echo client-config-dir $PWD/ccd >> $PWD/server.conf + fi + if test -e $PWD/ccd/$cKeyFilename; then + echo iroute $cSubnetIP 255.255.255.0 >> $PWD/ccd/$cKeyFilename + else + echo iroute $cSubnetIP 255.255.255.0 > $PWD/ccd/$cKeyFilename + fi + fi + if [ "$cVpnIP" != "" ]; then + echo $cKeyFilename,$cVpnIP >> $PWD/ippool.txt + fi + fi + done + + ( + cat << EOF + keepalive 10 120 + ca $certdir/ca-cert.pem + cert $keydir/$keyname.pem + key $keydir/$keyname.key + dh $certdir/dh2048.pem + cipher AES-256-CBC + status /var/log/openvpn-status.log + log /var/log/openvpn-server.log + ifconfig-pool-linear + ifconfig-pool-persist $PWD/ippool.txt + port 1194 + proto udp + dev tun + verb 4 + daemon + EOF + )>> $PWD/server.conf + + # start vpn server + openvpn --config server.conf + + } + } + custom-config { + custom-config-id service:VPNServer + custom-command VPNServer + config { + + ('vpnserver.sh', 'vpnserver.key', 'vpnserver.pem', ) + 50 + ('bash vpnserver.sh', ) + ('killall openvpn', ) + ('pidof openvpn', ) + + } + } + services {IPForward DefaultRoute SSH VPNServer} +} + +node n6 { + type router + model PC + network-config { + hostname vpnclient + ! + interface eth0 + ip address 10.0.0.20/24 + ipv6 address 2001:0::20/64 + ! + } + canvas c1 + iconcoords {120.0 133.0} + labelcoords {120.0 165.0} + interface-peer {eth0 n1} + custom-config { + custom-config-id service:VPNClient:vpnclient.key + custom-command vpnclient.key + config { + -----BEGIN PRIVATE KEY----- + MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC6T+1XG/EQX8pj + vGSq5QMS6G8sqozjDPInZz8hSukeJCueE9Ib/nbg9wXLST6fAVA/VsY4eGXZzQNB + vl6BxVXjjj/UkY4R+RXchKjg85fq+I37uZnzUL3S2jIalswB4LsLSGusBbTTfe8v + 0rMCPVUzojMSXhkOVJs1gzusmdNnqEg7TZ+W4Ov12jzACbRdGMA572sXvnXRPhdR + BI4CSfrsPRueWPO317oLDCECMrxzza9y65UFS3oPFvf2s6oR7FlAIEq0Z1YUQayx + LDH0kE1ZjhLxBmaqNO7hD0gCjYYX8XSHOpiq045N+OGmKcI3Th8ZsGkXKMMyQ1yN + a1c2PuehAgMBAAECggEAVVqYmPesEJxR1C9SzxfruJXTmNrpgHtF1NdwDIiNE8nu + UZUzBLAnNhj1BpSfo6iuYtYWKXi+8HEDtPLJyRnmp0Fb7L5iH8nFQilkVOpEBtmn + 8lKtPNMYo6him9vJynJyPlEHQt+6X8mp8nbMm5INnoIIc7m4MOCB2poslH5EY4/j + /WTohzXVdx3nhYAJ5v5uVi/i6jeNHfY+6sj6pBKDq82OrfxgjrtABRj3/K5UcVd0 + UBsaqXXObHxSvJkHFOSxK+qhVivuT3svZZlaYaleRB1oDmMx3uRNvtxdkhVz5zZY + zeEddu5/BQzHPpIwKR/HScpFes92fiQ639bJxIzGAQKBgQDtNoE7V2CVpvMVgTsX + 3WsqjTZZOnH8jWfWrOMPgRWmDWSA7P7SNSL7dCIftqdMv3pPxCoYVzH7Skp18HGD + 8EVRqrBUupfj7NXOWlG624qtlilWQsNwPkpjXWciQQapdAhEX+T+1/d5UrVR/mY9 + gP4M6rnT87ShcPPi/Otvr/B6SQKBgQDJEWj0wR5GyCfDA0L8ez3OzAwkiWB3K+8b + usc0Cs/+ZFOjQd4Qxahoo6lL8G17TaIcSvdrULaWsQ/oeJ40M2xzsdlwrrtr6ueL + 7q/mqsJeGLIN0h0I0i6DTUYWwzTR5CXCox967FZV46FQ8Jof3c7fsXCNUajHzcUh + zxqp+AdCmQKBgHeV5bqTxzZKrvtlZfQXBOKzw/VhuHs4kmOwTtvPGKnY0JUKZUB1 + 10fq+RUB0P+o/DFgVFRnCOSFRFqGt8NrCpcsNK7STqZyDCt2bwODkDsIm5hIGhzo + 2jmTqd2j6Ibe3xgRO/GZ0MHSB2TpmoNhFzJN1xbaInLM7ba+CLcKfHI5AoGBAIaS + b3O4uSHYnrwnt7KybXi2Gr5tb7HzJrKhfOf5AKKb1VqkIBOLpx55wzp/LVdka0aS + aixaNgp/cU0/RWtcq453jzeayvf8nYKLexFgYnyF/M3BPguEWPsqQenENtrv3tH5 + SX2FJnePxY0dq5n+Y5JV+SWsbNFliDYLniX6SimpAoGAR9ABECd9DMnhgi+WkhZs + 55YBE8P8jFuSUASBIKVmke60nDn4oot0YD74kykb6TxasGXX5lC1vCje/olNBIGM + y687YmrudDfXWxEUwlHMoec5wrHDGIGCnBRAlt4whDoCREH1H+fp/9BgAThcjv58 + NyNDfW+vGpDlkbwFvyhc4pY= + -----END PRIVATE KEY----- + } + } + custom-config { + custom-config-id service:VPNClient:vpnclient.pem + custom-command vpnclient.pem + config { + Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, ST=WA, O=core-dev/emailAddress=root@localhost + Validity + Not Before: May 19 02:09:57 2015 GMT + Not After : Apr 25 02:09:57 2115 GMT + Subject: C=US, ST=WA, O=core-dev, CN=vpnclient/emailAddress=root@localhost + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:ba:4f:ed:57:1b:f1:10:5f:ca:63:bc:64:aa:e5: + 03:12:e8:6f:2c:aa:8c:e3:0c:f2:27:67:3f:21:4a: + e9:1e:24:2b:9e:13:d2:1b:fe:76:e0:f7:05:cb:49: + 3e:9f:01:50:3f:56:c6:38:78:65:d9:cd:03:41:be: + 5e:81:c5:55:e3:8e:3f:d4:91:8e:11:f9:15:dc:84: + a8:e0:f3:97:ea:f8:8d:fb:b9:99:f3:50:bd:d2:da: + 32:1a:96:cc:01:e0:bb:0b:48:6b:ac:05:b4:d3:7d: + ef:2f:d2:b3:02:3d:55:33:a2:33:12:5e:19:0e:54: + 9b:35:83:3b:ac:99:d3:67:a8:48:3b:4d:9f:96:e0: + eb:f5:da:3c:c0:09:b4:5d:18:c0:39:ef:6b:17:be: + 75:d1:3e:17:51:04:8e:02:49:fa:ec:3d:1b:9e:58: + f3:b7:d7:ba:0b:0c:21:02:32:bc:73:cd:af:72:eb: + 95:05:4b:7a:0f:16:f7:f6:b3:aa:11:ec:59:40:20: + 4a:b4:67:56:14:41:ac:b1:2c:31:f4:90:4d:59:8e: + 12:f1:06:66:aa:34:ee:e1:0f:48:02:8d:86:17:f1: + 74:87:3a:98:aa:d3:8e:4d:f8:e1:a6:29:c2:37:4e: + 1f:19:b0:69:17:28:c3:32:43:5c:8d:6b:57:36:3e: + e7:a1 + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Basic Constraints: + CA:FALSE + X509v3 Subject Key Identifier: + 55:33:21:40:42:B6:63:7F:EA:A5:20:FA:18:21:C7:27:5B:6F:65:68 + X509v3 Authority Key Identifier: + keyid:DB:D2:9C:8D:22:D9:D7:E2:38:A0:8D:6C:3B:BE:33:CE:8D:2A:BE:C8 + DirName:/C=US/ST=WA/O=core-dev/emailAddress=root@localhost + serial:CE:78:96:91:DB:9B:84:FD + + Signature Algorithm: sha256WithRSAEncryption + 33:e5:aa:b3:19:63:ce:24:c7:ee:2f:11:18:5b:7b:1d:6b:4c: + 71:2d:0b:ea:6f:9b:5e:43:11:45:50:a6:00:fc:19:11:50:46: + 6a:d8:1d:38:eb:9f:3a:81:09:6e:dc:ae:b3:df:71:85:e2:16: + 7e:b8:bf:4c:ec:36:97:ae:58:a6:d9:d1:64:47:cf:8b:e8:0e: + 8c:41:2e:c3:a7:32:ef:a0:90:c2:5c:e5:6f:aa:03:ca:15:7f: + fb:ef:42:b1:22:28:47:fe:0a:df:58:b5:88:5b:a6:15:f8:13: + 3f:7e:19:da:ec:3f:63:72:0b:e5:c6:94:9f:53:1c:99:60:48: + 25:b5:b4:60:9a:4a:94:ab:68:be:5a:08:67:4c:c8:b5:7e:12: + 32:2c:e9:e7:fb:d1:a2:40:a8:e6:68:0e:37:a1:48:99:17:b6: + 40:f6:50:0a:f7:16:d1:90:e6:8a:3e:b0:c7:21:aa:9a:b9:79: + 6a:69:5c:25:9b:4b:29:b7:0e:13:80:ea:e8:5c:6d:95:cd:5c: + 69:de:20:69:d7:df:20:ec:6f:7b:9f:1d:61:c2:d1:59:6f:1e: + 0a:45:01:f6:25:02:e5:be:fb:91:a9:82:08:c8:42:2a:3e:2c: + 75:bb:4e:9c:0a:b6:07:24:52:e5:4f:f5:81:45:7b:77:ca:19: + 38:56:7f:17:63:5e:1f:a4:be:03:7d:d3:48:fc:e9:43:5c:2b: + b1:d5:da:44:c0:0c:56:23:4a:7d:bf:c0:ac:c6:9c:93:6d:69: + 9a:b9:02:3e:aa:1b:27:3e:b1:c8:6a:39:96:09:1d:c0:08:c8: + 1c:a4:82:ea:d2:72:e7:e1:47:66:7f:76:ac:d5:8c:99:59:02: + 25:ee:ec:ad:76:65:0f:8a:ba:5f:a7:33:ef:8e:34:71:d2:f5: + 3c:63:b0:c4:b2:65:c2:55:2d:35:d7:13:04:9c:87:d2:76:6b: + af:37:ba:58:d2:63:e0:fb:9c:a4:3a:97:e4:e6:79:0f:ca:d4: + 07:8c:39:80:4d:5e:d4:09:3a:09:1f:16:1a:58:c0:96:58:19: + e8:f7:56:bc:bd:fd:23:f4:4b:93:eb:a4:f2:22:7d:7c:d2:f3: + 6b:5a:13:24:a6:b8:1a:33:0c:fa:cd:77:36:12:c8:c6:ac:e9: + 0f:29:1a:4f:c3:3c:92:53:8c:af:80:04:ac:9b:2a:73:af:a8: + 0f:ef:7d:9f:5e:7c:52:d3:03:2e:19:6f:25:b0:f7:17:ef:c9: + 37:b9:50:ad:60:b0:c7:d9:ba:8f:9e:93:27:ba:52:27:70:b8: + ae:2b:9a:f7:33:2a:fd:a6:51:f5:e2:42:1a:e9:e6:08:5e:62: + 75:e9:b2:1b:ca:ce:cd:d1 + -----BEGIN CERTIFICATE----- + MIIE1TCCAr2gAwIBAgIBAjANBgkqhkiG9w0BAQsFADBMMQswCQYDVQQGEwJVUzEL + MAkGA1UECBMCV0ExETAPBgNVBAoTCGNvcmUtZGV2MR0wGwYJKoZIhvcNAQkBFg5y + b290QGxvY2FsaG9zdDAgFw0xNTA1MTkwMjA5NTdaGA8yMTE1MDQyNTAyMDk1N1ow + YDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMREwDwYDVQQKEwhjb3JlLWRldjES + MBAGA1UEAxMJdnBuY2xpZW50MR0wGwYJKoZIhvcNAQkBFg5yb290QGxvY2FsaG9z + dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALpP7Vcb8RBfymO8ZKrl + AxLobyyqjOMM8idnPyFK6R4kK54T0hv+duD3BctJPp8BUD9Wxjh4ZdnNA0G+XoHF + VeOOP9SRjhH5FdyEqODzl+r4jfu5mfNQvdLaMhqWzAHguwtIa6wFtNN97y/SswI9 + VTOiMxJeGQ5UmzWDO6yZ02eoSDtNn5bg6/XaPMAJtF0YwDnvaxe+ddE+F1EEjgJJ + +uw9G55Y87fXugsMIQIyvHPNr3LrlQVLeg8W9/azqhHsWUAgSrRnVhRBrLEsMfSQ + TVmOEvEGZqo07uEPSAKNhhfxdIc6mKrTjk344aYpwjdOHxmwaRcowzJDXI1rVzY+ + 56ECAwEAAaOBqzCBqDAJBgNVHRMEAjAAMB0GA1UdDgQWBBRVMyFAQrZjf+qlIPoY + IccnW29laDB8BgNVHSMEdTBzgBTb0pyNItnX4jigjWw7vjPOjSq+yKFQpE4wTDEL + MAkGA1UEBhMCVVMxCzAJBgNVBAgTAldBMREwDwYDVQQKEwhjb3JlLWRldjEdMBsG + CSqGSIb3DQEJARYOcm9vdEBsb2NhbGhvc3SCCQDOeJaR25uE/TANBgkqhkiG9w0B + AQsFAAOCAgEAM+WqsxljziTH7i8RGFt7HWtMcS0L6m+bXkMRRVCmAPwZEVBGatgd + OOufOoEJbtyus99xheIWfri/TOw2l65YptnRZEfPi+gOjEEuw6cy76CQwlzlb6oD + yhV/++9CsSIoR/4K31i1iFumFfgTP34Z2uw/Y3IL5caUn1McmWBIJbW0YJpKlKto + vloIZ0zItX4SMizp5/vRokCo5mgON6FImRe2QPZQCvcW0ZDmij6wxyGqmrl5amlc + JZtLKbcOE4Dq6Fxtlc1cad4gadffIOxve58dYcLRWW8eCkUB9iUC5b77kamCCMhC + Kj4sdbtOnAq2ByRS5U/1gUV7d8oZOFZ/F2NeH6S+A33TSPzpQ1wrsdXaRMAMViNK + fb/ArMack21pmrkCPqobJz6xyGo5lgkdwAjIHKSC6tJy5+FHZn92rNWMmVkCJe7s + rXZlD4q6X6cz7440cdL1PGOwxLJlwlUtNdcTBJyH0nZrrze6WNJj4PucpDqX5OZ5 + D8rUB4w5gE1e1Ak6CR8WGljAllgZ6PdWvL39I/RLk+uk8iJ9fNLza1oTJKa4GjMM + +s13NhLIxqzpDykaT8M8klOMr4AErJsqc6+oD+99n158UtMDLhlvJbD3F+/JN7lQ + rWCwx9m6j56TJ7pSJ3C4riua9zMq/aZR9eJCGunmCF5idemyG8rOzdE= + -----END CERTIFICATE----- + } + } + custom-config { + custom-config-id service:VPNClient:vpnclient.sh + custom-command vpnclient.sh + config { + #!/bin/sh + # custom VPN Client configuration for service (security.py) + # -------- CUSTOMIZATION REQUIRED -------- + # + # The VPNClient service builds a VPN tunnel to the specified VPN server using + # OpenVPN software and a virtual TUN/TAP device. + + # directory containing the certificate and key described below + certdir=$SESSION_DIR/certs + keydir=$PWD + + # the name used for a "$keyname.pem" certificate and "$keyname.key" private key. + keyname=vpnclient + + # the public IP address of the VPN server this client should connect with + vpnserver="10.0.1.10" + + # optional next hop for adding a static route to reach the VPN server + nexthop="" + + # --------- END CUSTOMIZATION -------- + + # validate addresses + if [ "$(dpkg -l | grep sipcalc)" = "" ]; then + echo "WARNING: ip validation disabled because package sipcalc not installed + " > $PWD/vpnclient.log + else + if [ "$(sipcalc "$vpnserver" "$nexthop" | grep ERR)" != "" ]; then + echo "ERROR: invalide address $vpnserver or $nexthop \ + " > $PWD/vpnclient.log + fi + fi + + # validate key and certification files + check-key-file() { + if [ ! -e $1 ]; then + echo "ERROR: missing certification or key file: $1" >> $PWD/vpnserver.log + fi + } + + check-key-file $keydir/$keyname.key + check-key-file $keydir/$keyname.pem + check-key-file $certdir/ca-cert.pem + check-key-file $certdir/dh2048.pem + + # if necessary, add a static route for reaching the VPN server IP via the IF + vpnservernet=${vpnserver%.*}.0/24 + if [ "$nexthop" != "" ]; then + /sbin/ip route add $vpnservernet via $nexthop + fi + + # create openvpn client.conf + ( + cat << EOF + client + dev tun + proto udp + remote $vpnserver 1194 + nobind + ca $certdir/ca-cert.pem + cert $keydir/$keyname.pem + key $keydir/$keyname.key + dh $certdir/dh2048.pem + cipher AES-256-CBC + log /var/log/openvpn-client.log + verb 4 + daemon + EOF + ) > client.conf + + openvpn --config client.conf + + } + } + custom-config { + custom-config-id service:VPNClient + custom-command VPNClient + config { + + ('vpnclient.sh', 'vpnclient.pem', 'vpnclient.key', ) + 60 + ('bash vpnclient.sh', ) + ('killall openvpn', ) + ('pidof openvpn', ) + + } + } + services {DefaultRoute VPNClient} +} + +node n7 { + type lanswitch + network-config { + hostname n7 + ! + } + canvas c1 + iconcoords {824.0 458.0} + labelcoords {824.0 482.0} + interface-peer {e0 n5} + interface-peer {e1 n8} + interface-peer {e2 n9} + interface-peer {e3 n10} +} + +node n8 { + type router + model PC + network-config { + hostname n8 + ! + interface eth0 + ip address 10.0.6.20/24 + ipv6 address 2001:6::20/64 + ! + } + canvas c1 + iconcoords {801.0 264.0} + labelcoords {801.0 296.0} + interface-peer {eth0 n7} +} + +node n9 { + type router + model PC + network-config { + hostname n9 + ! + interface eth0 + ip address 10.0.6.21/24 + ipv6 address 2001:6::21/64 + ! + } + canvas c1 + iconcoords {885.0 305.0} + labelcoords {885.0 337.0} + interface-peer {eth0 n7} +} + +node n10 { + type router + model PC + network-config { + hostname n10 + ! + interface eth0 + ip address 10.0.6.22/24 + ipv6 address 2001:6::22/64 + ! + } + canvas c1 + iconcoords {954.0 353.0} + labelcoords {954.0 385.0} + interface-peer {eth0 n7} +} + +link l1 { + nodes {n6 n1} + bandwidth 0 +} + +link l2 { + nodes {n4 n5} + bandwidth 0 +} + +link l3 { + nodes {n1 n2} + bandwidth 0 +} + +link l4 { + nodes {n3 n4} + bandwidth 0 +} + +link l5 { + nodes {n3 n1} + bandwidth 0 +} + +link l6 { + nodes {n4 n2} + bandwidth 0 +} + +link l7 { + nodes {n5 n7} + bandwidth 0 +} + +link l8 { + nodes {n8 n7} + bandwidth 0 +} + +link l9 { + nodes {n9 n7} + bandwidth 0 +} + +link l10 { + nodes {n10 n7} + bandwidth 0 +} + +annotation a1 { + iconcoords {661.0 187.0 997.0 579.0} + type rectangle + label {private network} + labelcolor black + fontfamily {Arial} + fontsize 12 + color #e9e9fe + width 0 + border black + rad 25 + effects {bold} + canvas c1 +} + +canvas c1 { + name {Canvas1} +} + +hook 3:instantiation_hook.sh { +#!/bin/sh +# session hook script; write commands here to execute on the host at the +# specified state + +CERT_DIR=$SESSION_DIR/certs +mkdir $CERT_DIR + +cat > $CERT_DIR/dh2048.pem < $CERT_DIR/ca-cert.pem <] [-s] [-a address] [-p port]" + echo " []" + echo "" + echo "Launches the CORE Tcl/Tk X11 GUI or starts an imn-based emulation." + echo "" + echo " -(-h)elp show help message and exit" + echo " -(-v)ersion show version number and exit" + echo " -(-b)atch batch mode (no X11 GUI)" + echo -n " -(-c)losebatch stop and clean up a batch mode " + echo "session " + echo " -(-s)tart start in execute mode, not edit mode" + echo " -(-a)ddress connect to the specified IP address (default 127.0.0.1)" + echo " -(-p)port connect to the specified TCP port (default 4038)" + echo " (optional) load the specified imn scenario file" + echo "" + echo "With no parameters, starts the GUI in edit mode with a blank canvas." + echo "" + exit 0 + ;; +-v | --version) + exec echo "`basename $0` version @PACKAGE_VERSION@ (@PACKAGE_DATE@)" + exit 0 + ;; +esac + +SHELL=/bin/sh +export SHELL + +export LIBDIR="@CORE_LIB_DIR@" +export SBINDIR="@sbindir@" +# eval is used here to expand "~" to user's home dir +if [ x$CONFDIR = x ]; then export CONFDIR=`eval "echo @CORE_GUI_CONF_DIR@"` ; fi +export CORE_STATE_DIR="@CORE_STATE_DIR@" +export CORE_DATA_DIR="@CORE_DATA_DIR@" +export CORE_USER=`id -u -n` +export CORE_START_DIR=$PWD + +init_conf_dir() { + echo "Setting up user config area $CONFDIR, $CONFDIR/configs, and " + echo " $CONFDIR/myservices" + mkdir -p $CONFDIR + if [ $? != 0 ]; then echo "error making directory $CONFDIR!"; fi + mkdir -p $CONFDIR/configs + if [ $? != 0 ]; then + echo "error making directory $CONFDIR/configs!"; + else + cp -a $CORE_DATA_DIR/examples/configs/* $CONFDIR/configs/ + fi + mkdir -p $CONFDIR/myservices + if [ $? != 0 ]; then + echo "error making directory $CONFDIR/myservices!"; + else + cp -a $CORE_DATA_DIR/examples/myservices/* $CONFDIR/myservices/ + fi + mkdir -p $CONFDIR/myemane + if [ $? != 0 ]; then + echo "error making directory $CONFDIR/myemane!"; + else + cp -a $CORE_DATA_DIR/examples/myemane/* $CONFDIR/myemane/ + fi +} + +cd $LIBDIR + +core=$LIBDIR/core.tcl + +# locate wish8.5 binaries +WISHLIST="/usr/bin/wish8.6 /usr/local/bin/wish8.5 /usr/bin/wish8.5" +for wishbin in $WISHLIST +do + if [ -x $wishbin ] + then + WISH=$wishbin; + break; + fi; +done; + +if [ a$WISH = a ] +then + echo "CORE could not locate the Tcl/Tk binary (wish8.5)." + exit 1; +fi; + +# create /home/user/.core directory if necessary +if [ ! -e $CONFDIR ] +then + init_conf_dir +fi; + +# check for and fix write permissions on /home/user/.core directory +while [ ! -w $CONFDIR ]; +do + echo " CORE requires write permissions to the '$CONFDIR'" + echo " configuration directory for the user '$CORE_USER'," + echo " would you like to fix this now [Y/n]?" + read yn + if [ "z$yn" = "zn" ]; then + break + fi + echo -n " (sudo may prompt you for a password; if you do not have sudo set" + echo " up for the" + echo " user '$CORE_USER', su to root and run this command:" + echo " chown -R $CORE_USER $CONFDIR )" + sudo chown -R $U $CONFDIR + sudo chmod -R u+w $CONFDIR +done + +# GUI config directory should not be a file (old prefs) +if [ ! -d $CONFDIR ] +then + + mv $CONFDIR $CONFDIR.tmp + if [ $? != 0 ]; then echo "error moving $CONFDIR!"; exit 1; fi + init_conf_dir + echo "Old preferences file $CONFDIR has been moved to $CONFDIR/prefs.conf" + mv $CONFDIR.tmp $CONFDIR/prefs.conf + if [ $? != 0 ]; then echo "error moving $CONFDIR.tmp to $CONFDIR/prefs.conf!"; exit 1; fi +fi; + +case $1 in +-b | --batch) + TCLBIN=`echo ${WISH} | sed s/wish/tclsh/g` + exec ${TCLBIN} $core "$@" + ;; +-c | --closebatch) + TCLBIN=`echo ${WISH} | sed s/wish/tclsh/g` + exec ${TCLBIN} $core "$@" + ;; +-s) + shift + exec ${WISH} $core -- "--start" "$@" + ;; +-a) + shift + exec ${WISH} $core -- "--address" "$@" + ;; +-p) + shift + exec ${WISH} $core -- "--port" "$@" + ;; +*) + exec ${WISH} $core -- $@ + ;; +esac + +cd $CORE_START_DIR diff --git a/gui/core.tcl b/gui/core.tcl new file mode 100644 index 00000000..aad43fd4 --- /dev/null +++ b/gui/core.tcl @@ -0,0 +1,196 @@ +# +# Copyright 2004-2008 University of Zagreb, Croatia. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# This work was supported in part by Croatian Ministry of Science +# and Technology through the research contract #IP-2003-143. +# + +if {[lindex $argv 0] == "-b" || [lindex $argv 0] == "--batch"} { + set argv [lrange $argv 1 end] + set execMode batch +} elseif {[lindex $argv 0] == "-c" || [lindex $argv 0] == "--closebatch"} { + set argv [lrange $argv 1 end] + set execMode closebatch +} elseif {[lindex $argv 0] == "--addons"} { + set argv [lrange $argv 1 end] + set execMode addons +} else { + set execMode interactive +} + +set LIBDIR "" +set SBINDIR "/usr/local/sbin" +set CONFDIR "." +set CORE_DATA_DIR "." +set CORE_STATE_DIR "." +set CORE_START_DIR "" +set CORE_USER "" +if { [info exists env(LIBDIR)] } { + set LIBDIR $env(LIBDIR) +} +if { [info exists env(SBINDIR)] } { + set SBINDIR $env(SBINDIR) +} +if { [info exists env(CONFDIR)] } { + set CONFDIR $env(CONFDIR) +} +if { [info exists env(CORE_DATA_DIR)] } { + set CORE_DATA_DIR $env(CORE_DATA_DIR) +} +if { [info exists env(CORE_STATE_DIR)] } { + set CORE_STATE_DIR $env(CORE_STATE_DIR) +} +if { [info exists env(CORE_START_DIR)] } { + set CORE_START_DIR $env(CORE_START_DIR) +} +if { [info exists env(CORE_USER)] } { + set CORE_USER $env(CORE_USER) +} + +source "$LIBDIR/version.tcl" + +source "$LIBDIR/linkcfg.tcl" +source "$LIBDIR/nodecfg.tcl" +source "$LIBDIR/ipv4.tcl" +source "$LIBDIR/ipv6.tcl" +source "$LIBDIR/cfgparse.tcl" +source "$LIBDIR/exec.tcl" +source "$LIBDIR/canvas.tcl" + +source "$LIBDIR/editor.tcl" +source "$LIBDIR/annotations.tcl" + +source "$LIBDIR/help.tcl" +source "$LIBDIR/filemgmt.tcl" + +source "$LIBDIR/ns2imunes.tcl" + + +source "$LIBDIR/mobility.tcl" +source "$LIBDIR/api.tcl" +source "$LIBDIR/wlan.tcl" +source "$LIBDIR/wlanscript.tcl" +source "$LIBDIR/util.tcl" +source "$LIBDIR/plugins.tcl" +source "$LIBDIR/nodes.tcl" +source "$LIBDIR/services.tcl" +source "$LIBDIR/traffic.tcl" +source "$LIBDIR/exceptions.tcl" + +# +# Global variables are initialized here +# +set node_list {} +set link_list {} +set annotation_list {} +set canvas_list {} +set eid e0 +set plot_list {} +array set exec_servers {} +loadServersConf ;# populate exec_servers + +# global vars +set showAPI 0 +set mac_byte4 0 +set mac_byte5 0 +set g_mrulist {} +initDefaultPrefs +loadDotFile +loadPluginsConf +checkCommandLineAddressPort +autoConnectPlugins + +set g_abort_session 0 + +# +# Initialization should be complete now, so let's start doing something... +# + +if {$execMode == "interactive"} { + # GUI-related files + source "$LIBDIR/widget.tcl" + source "$LIBDIR/tooltips.tcl" + source "$LIBDIR/initgui.tcl" + source "$LIBDIR/topogen.tcl" + source "$LIBDIR/graph_partitioning.tcl" + source "$LIBDIR/gpgui.tcl" + source "$LIBDIR/debug.tcl" + # Load all Tcl files from the addons directory + foreach file [glob -nocomplain -directory "$LIBDIR/addons" *.tcl] { + if { [catch { if { [file isfile $file ] } { source "$file"; } } e] } { + puts "*** Error loading addon file: $file" + puts " $e" + } + } + setOperMode edit + fileOpenStartUp + foreach arg $argv { + if { $arg == "--start" } { + global currentFile + if { [file extension $currentFile] == ".xml" } { + after 100; update; # yield to other events so XML file + after 100; update; # can be loaded and received + } + startStopButton "exec"; break; + } + } +# Boeing changed elseif to catch batch and else to output error +} elseif {$execMode == "batch"} { + puts "batch execute $argv" + set sock [lindex [getEmulPlugin "*"] 2] + if { $sock == "" || $sock == "-1" || $sock == -1 } { exit.real; } + if {$argv != ""} { + global currentFile + set currentFile [argAbsPathname $argv] + set fileId [open $currentFile r] + set cfg "" + foreach entry [read $fileId] { + lappend cfg $entry + } + close $fileId + after 100 { + loadCfg $cfg + deployCfgAPI $sock + puts "Waiting to enter RUNTIME state..." + } + global vwaitdummy + vwait vwaitdummy + } +} elseif {$execMode == "closebatch"} { + global g_session_choice + set g_session_choice $argv + puts "Attempting to close session $argv ..." + global vwaitdummy + vwait vwaitdummy +} elseif {$execMode == "addons"} { + # pass control to included addons code + foreach file [glob -nocomplain -directory "$LIBDIR/addons" *.tcl] { + if { [file isfile $file ] } { source "$file"; } + } + global vwaitdummy + vwait vwaitdummy +} else { + puts "ERROR: execMode is not set in core.tcl" +} + diff --git a/gui/debug.tcl b/gui/debug.tcl new file mode 100644 index 00000000..60e45cbc --- /dev/null +++ b/gui/debug.tcl @@ -0,0 +1,46 @@ +.menubar.tools add command -label "Debugger..." -command popupDebugger + +set g_last_debug_cmd "puts \"Hello world\"" + +proc popupDebugger {} { + global g_last_debug_cmd + + set wi .debugger + catch { destroy $wi } + toplevel $wi + + wm transient $wi . + wm resizable $wi 300 200 + wm title $wi "CORE Debugger" + + frame $wi.dbg -borderwidth 4 + label $wi.dbg.label1 \ + -text "Enter TCL/Tk commands below, press Run to evaluate:" + text $wi.dbg.cmd -bg white -width 100 -height 3 + + pack $wi.dbg.label1 $wi.dbg.cmd -side top -anchor w -padx 4 -pady 4 + pack $wi.dbg -side top + + $wi.dbg.cmd insert end "$g_last_debug_cmd" + + frame $wi.btn + # evaluate debugging commands entered into the text box below + button $wi.btn.exec -text "Run" -command { + global g_last_debug_cmd + set wi .debugger + set i 1 + set g_last_debug_cmd "" + while { 1 } { + set cmd [$wi.dbg.cmd get $i.0 $i.end] + set g_last_debug_cmd "$g_last_debug_cmd$cmd\n" + if { $cmd == "" } { break } + catch { eval $cmd } output + puts $output + incr i + } + } + button $wi.btn.close -text "Close" -command "destroy .debugger" + + pack $wi.btn.exec $wi.btn.close -side left -padx 4 -pady 4 + pack $wi.btn -side bottom +} diff --git a/gui/editor.tcl b/gui/editor.tcl new file mode 100644 index 00000000..fec8a499 --- /dev/null +++ b/gui/editor.tcl @@ -0,0 +1,5351 @@ +# +# Copyright 2004-2008 University of Zagreb, Croatia. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# This work was supported in part by the Croatian Ministry of Science +# and Technology through the research contract #IP-2003-143. +# + +#****h* imunes/editor.tcl +# NAME +# editor.tcl -- file used for defining functions that can be used in +# edit mode as well as all the functions which change the appearance +# of the imunes GUI. +# FUNCTION +# This module is used for defining all possible actions in imunes +# edit mode. It is also used for all the GUI related actions. +#**** + + +proc animateCursor {} { + global cursorState + global clock_seconds + + if { [clock seconds] == $clock_seconds } { + update + return + } + set clock_seconds [clock seconds] + if { $cursorState } { + .c config -cursor watch + set cursorState 0 + } else { + .c config -cursor pirate + set cursorState 1 + } + update +} + +#****f* editor.tcl/removeGUILink +# NAME +# removeGUILink -- remove link from GUI +# SYNOPSIS +# renoveGUILink $link_id $atomic +# FUNCTION +# Removes link from GUI. It removes standard links as well as +# split links and links connecting nodes on different canvases. +# INPUTS +# * link_id -- the link id +# * atomic -- defines if the remove was atomic action or a part +# of a composed, non-atomic action (relevant for updating log +# for undo). +#**** + +proc removeGUILink { link atomic } { + global changed + + set nodes [linkPeers $link] + set node1 [lindex $nodes 0] + set node2 [lindex $nodes 1] + if { [nodeType $node1] == "pseudo" } { + removeLink [getLinkMirror $link] + removeLink $link + removeNode [getNodeMirror $node1] + removeNode $node1 + .c delete $node1 + } elseif { [nodeType $node2] == "pseudo" } { + removeLink [getLinkMirror $link] + removeLink $link + removeNode [getNodeMirror $node2] + removeNode $node2 + .c delete $node2 + } else { + removeLink $link + } + .c delete $link + if { $atomic == "atomic" } { + set changed 1 + updateUndoLog + } +} + +#****f* editor.tcl/removeGUINode +# NAME +# removeGUINode -- remove node from GUI +# SYNOPSIS +# renoveGUINode $node_id +# FUNCTION +# Removes node from GUI. When removing a node from GUI the links +# connected to that node are also removed. +# INPUTS +# * node_id -- node id +#**** + +proc removeGUINode { node } { + set type [nodeType $node] + foreach ifc [ifcList $node] { + set peer [peerByIfc $node $ifc] + set link [lindex [.c gettags "link && $node && $peer"] 1] + removeGUILink $link non-atomic + } + if { [lsearch -exact "oval rectangle label text marker" $type] != -1 } { + deleteAnnotation .c $type $node + } elseif { $type != "pseudo" } { + removeNode $node + .c delete $node + } +} + +#****f* editor.tcl/updateUndoLog +# NAME +# updateUndoLog -- update the undo log +# SYNOPSIS +# updateUndoLog +# FUNCTION +# Updates the undo log. Writes the current configuration to the +# undolog array and updates the undolevel variable. +#**** + +proc updateUndoLog {} { + global changed undolog undolevel redolevel + + if { $changed } { + global t_undolog undolog + set t_undolog "" + dumpCfg string t_undolog + incr undolevel + set undolog($undolevel) $t_undolog + set redolevel $undolevel + updateUndoRedoMenu "" +# Boeing: XXX why is this set here? + set changed 0 + } +} + +#****f* editor.tcl/undo +# NAME +# undo -- undo function +# SYNOPSIS +# undo +# FUNCTION +# Undo the change. Reads the undolog and updates the current +# configuration. Reduces the value of undolevel. +#**** + +proc undo {} { + global undolevel undolog oper_mode + + if {$oper_mode == "edit" && $undolevel > 0} { + incr undolevel -1 + updateUndoRedoMenu "" + .c config -cursor watch + loadCfg $undolog($undolevel) + switchCanvas none + } +} + +#****f* editor.tcl/redo +# NAME +# redo +# SYNOPSIS +# redo +# FUNCTION +# Redo the change if possible (redolevel is greater than +# undolevel). Reads the configuration from undolog and +# updates the current configuration. Increases the value +# of undolevel. +#**** + +proc redo {} { + global undolevel redolevel undolog oper_mode + + if {$oper_mode == "edit" && $redolevel > $undolevel} { + incr undolevel + updateUndoRedoMenu "" + .c config -cursor watch + loadCfg $undolog($undolevel) + switchCanvas none + } +} + +proc updateUndoRedoMenu { forced } { + global undolevel redolevel + + if { $forced == "" } { + if { $undolevel > 0 } { set undo "normal" } else { set undo "disabled" } + if { $redolevel > $undolevel } { set redo "normal" + } else { set redo "disabled" } + } else { + set undo $forced + set redo $forced + } + + .menubar.edit entryconfigure "Undo" -state $undo + .menubar.edit entryconfigure "Redo" -state $redo +} + +#****f* editor.tcl/redrawAll +# NAME +# redrawAll +# SYNOPSIS +# redrawAll +# FUNCTION +# Redraws all the objects on the current canvas. +#**** + + +proc redrawAll {} { + global node_list plot_list link_list annotation_list plot_list background sizex sizey grid + global curcanvas zoom + global showAnnotations showGrid + + #Call_Trace ;# debugging when things disappear + + .bottom.zoom config -text "zoom [expr {int($zoom * 100)}]%" + set e_sizex [expr {int($sizex * $zoom)}] + set e_sizey [expr {int($sizey * $zoom)}] + set border 28 + .c configure -scrollregion \ + "-$border -$border [expr {$e_sizex + $border}] \ + [expr {$e_sizey + $border}]" + + + saveRestoreWlanLinks .c save + .c delete all + set background [.c create rectangle 0 0 $e_sizex $e_sizey \ + -fill white -tags "background"] + # Boeing: wallpaper + set wallpaper [lindex [getCanvasWallpaper $curcanvas] 0] + set wallpaperStyle [lindex [getCanvasWallpaper $curcanvas] 1] + if { $wallpaper != "" } { + drawWallpaper .c $wallpaper $wallpaperStyle + } + # end Boeing + + if { $showAnnotations == 1 } { + foreach obj $annotation_list { + # fix annotations having no canvas (from old config) + if { [getNodeCanvas $obj] == "" } { setNodeCanvas $obj $curcanvas} + if { [getNodeCanvas $obj] == $curcanvas } { + drawAnnotation $obj + } + } + } + + # Grid + set e_grid [expr {int($grid * $zoom)}] + set e_grid2 [expr {$e_grid * 2}] + if { $showGrid } { + for { set x $e_grid } { $x < $e_sizex } { incr x $e_grid } { + if { [expr {$x % $e_grid2}] != 0 } { + if { $zoom > 0.5 } { + .c create line $x 1 $x $e_sizey \ + -fill gray -dash {1 7} -tags "grid" + } + } else { + .c create line $x 1 $x $e_sizey -fill gray -dash {1 3} \ + -tags "grid" + } + } + for { set y $e_grid } { $y < $e_sizey } { incr y $e_grid } { + if { [expr {$y % $e_grid2}] != 0 } { + if { $zoom > 0.5 } { + .c create line 1 $y $e_sizex $y \ + -fill gray -dash {1 7} -tags "grid" + } + } else { + .c create line 1 $y $e_sizex $y -fill gray -dash {1 3} \ + -tags "grid" + } + } + } + + .c lower -withtags background + + foreach node $node_list { + if { [getNodeCanvas $node] == $curcanvas } { + drawNode .c $node + } + } + + redrawAllThruplots + foreach link $link_list { + set nodes [linkPeers $link] + if { [getNodeCanvas [lindex $nodes 0]] != $curcanvas || + [getNodeCanvas [lindex $nodes 1]] != $curcanvas } { + continue + } + drawLink $link + redrawLink $link + updateLinkLabel $link + } + saveRestoreWlanLinks .c restore + + .c config -cursor left_ptr + + raiseAll .c +} + +#****f* editor.tcl/drawNode +# NAME +# drawNode +# SYNOPSIS +# drawNode node_id +# FUNCTION +# Draws the specified node. Draws node's image (router pc +# host lanswitch rj45 hub pseudo) and label. +# The visibility of the label depends on the showNodeLabels +# variable for all types of nodes and on invisible variable +# for pseudo nodes. +# INPUTS +# * node_id -- node id +#**** + +proc drawNode { c node } { + global showNodeLabels + global router pc host lanswitch rj45 hub pseudo OVS + global curcanvas zoom + global wlan + if { $c == "" } { set c .c } ;# default canvas + + set type [nodeType $node] + set coords [getNodeCoords $node] + set x [expr {[lindex $coords 0] * $zoom}] + set y [expr {[lindex $coords 1] * $zoom}] + # special handling for custom images, dummy nodes + # could move this to separate getImage function + set model "" + set cimg "" + set imgzoom $zoom + if { $zoom == 0.75 || $zoom == 1.5 } { set imgzoom 1.0 } + if { $type == "router" || $type == "OVS" } { + set model [getNodeModel $node] + set cimg [getNodeTypeImage $model normal] + } + set tmp [absPathname [getCustomImage $node]] + if { $tmp != "" } { set cimg $tmp } + if { $cimg != "" } { + # name of global variable storing the image is the filename without path + set img [file tail $cimg] + # create the variable if the image hasn't been loaded before + global [set img] + if { ![info exists $img] } { + if { [catch { + set [set img] [image create photo -file $cimg] + createScaledImages $img + } e ] } { ;# problem loading image file + puts "icon error: $e" + set cimg "" ;# fall back to default model icon + setCustomImage $node "" ;# prevent errors elsewhere + } + } + if { $cimg != "" } { ;# only if image file loaded + global $img$imgzoom + $c create image $x $y -image [set $img$imgzoom] -tags "node $node" + } + } + if { $cimg == "" } { + if { $type == "pseudo" } { + $c create image $x $y -image [set $type] -tags "node $node" + } else { + # create scaled images based on zoom level + global $type$imgzoom + $c create image $x $y -image [set $type$imgzoom] \ + -tags "node $node" + } + } + set coords [getNodeLabelCoords $node] + set x [expr {[lindex $coords 0] * $zoom}] + set y [expr {[lindex $coords 1] * $zoom}] + if { [nodeType $node] != "pseudo" } { ;# Boeing: show remote server + set loc [getNodeLocation $node] + set labelstr0 "" + if { $loc != "" } { set labelstr0 "([getNodeLocation $node]):" } + set labelstr1 [getNodeName $node]; + set labelstr2 "" + if [info exists getNodePartition] { [getNodePartition $node]; } + set l [format "%s%s\n%s" $labelstr0 $labelstr1 $labelstr2]; + set label [$c create text $x $y -fill blue \ + -text "$l" \ + -tags "nodelabel $node"] + } else { + set pnode [getNodeName $node] + set pcanvas [getNodeCanvas $pnode] + set ifc [ifcByPeer $pnode [getNodeMirror $node]] + if { $pcanvas != $curcanvas } { + set label [$c create text $x $y -fill blue \ + -text "[getNodeName $pnode]:$ifc +@[getCanvasName $pcanvas]" \ + -tags "nodelabel $node" -justify center] + } else { + set label [$c create text $x $y -fill blue \ + -text "[getNodeName $pnode]:$ifc" \ + -tags "nodelabel $node" -justify center] + } + } + if { $showNodeLabels == 0} { + $c itemconfigure $label -state hidden + } + global invisible + if { $invisible == 1 && [nodeType $node] == "pseudo" } { + $c itemconfigure $label -state hidden + } +} + +#****f* editor.tcl/drawLink +# NAME +# drawLink +# SYNOPSIS +# drawLink link_id +# FUNCTION +# Draws the specified link. An arrow is displayed for links +# connected to pseudo nodes. If the variable invisible +# is specified link connecting a pseudo node stays hidden. +# INPUTS +# * link_id -- link id +#**** + +proc drawLink { link } { + set nodes [linkPeers $link] + set lnode1 [lindex $nodes 0] + set lnode2 [lindex $nodes 1] + set lwidth [getLinkWidth $link] + if { [getLinkMirror $link] != "" } { + set newlink [.c create line 0 0 0 0 \ + -fill [getLinkColor $link] -width $lwidth \ + -tags "link $link $lnode1 $lnode2" -arrow both] + } else { + set newlink [.c create line 0 0 0 0 \ + -fill [getLinkColor $link] -width $lwidth \ + -tags "link $link $lnode1 $lnode2"] + } + # Boeing: links between two nodes on different servers + if { [getNodeLocation $lnode1] != [getNodeLocation $lnode2]} { + .c itemconfigure $newlink -dash ","; + } + # end Boeing + # XXX Invisible pseudo-liks + global invisible + if { $invisible == 1 && [getLinkMirror $link] != "" } { + .c itemconfigure $link -state hidden + } + # Boeing: wlan links are hidden + if { [nodeType $lnode1] == "wlan" || [nodeType $lnode2] == "wlan" } { + global zoom + set imgzoom $zoom + if { $zoom == 0.75 || $zoom == 1.5 } { set imgzoom 1.0 } + global antenna$imgzoom + .c itemconfigure $link -state hidden + .c create image 0 0 -image [set antenna$imgzoom] \ + -tags "antenna $lnode2 $link" + .c create text 0 0 -tags "interface $lnode1 $link" -justify center + .c create text 0 0 -tags "interface $lnode2 $link" -justify center + .c raise interface "link || linklabel || background" + } else { + .c raise $newlink background + .c create text 0 0 -tags "linklabel $link" -justify center + .c create text 0 0 -tags "interface $lnode1 $link" -justify center + .c create text 0 0 -tags "interface $lnode2 $link" -justify center + .c raise linklabel "link || background" + .c raise interface "link || linklabel || background" + } + foreach n [list $lnode1 $lnode2] { + if { [getNodeHidden $n] } { + hideNode $n + statline "Hidden node(s) exist." + } + } +} + + +# draw a green link between wireless nodes (or other color if multiple WLANs) +# WLAN links appear on the canvas but not in the global link_list +proc drawWlanLink { node1 node2 wlan } { + global zoom defLinkWidth curcanvas + set c .c + + set wlanlink [$c find withtag "wlanlink && $node1 && $node2 && $wlan"] + if { $wlanlink != "" } { + return $wlanlink ;# already exists + } + + set color [getWlanColor $wlan] + + set xy [getNodeCoords $node1] + set x [lindex $xy 0]; set y [lindex $xy 1] + set pxy [getNodeCoords $node2] + set px [lindex $pxy 0]; set py [lindex $pxy 1] + + set wlanlink [$c create line [expr {$x*$zoom}] [expr {$y*$zoom}] \ + [expr {$px*$zoom}] [expr {$py*$zoom}] \ + -fill $color -width $defLinkWidth \ + -tags "wlanlink $node1 $node2 $wlan"] + + if { [getNodeCanvas $node1] == $curcanvas && + [getNodeCanvas $node2] == $curcanvas} { + $c itemconfigure $wlanlink -state normal + $c raise $wlanlink "background || grid || oval || rectangle" + } else { + $c itemconfigure $wlanlink -state hidden + } + + return $wlanlink +} + + +#****f* editor.tcl/chooseIfName +# NAME +# chooseIfName -- choose interface name +# SYNOPSIS +# set ifcName [chooseIfName $lnode1 $lnode2] +# FUNCTION +# Choose intreface name. The name can be: +# * eth -- for interface connecting pc, host and router +# * e -- for interface connecting hub and lanswitch +# INPUTS +# * link_id -- link id +# RESULT +# * ifcName -- the name of the interface +#**** + +proc chooseIfName { lnode1 lnode2 } { + global $lnode1 $lnode2 + + # TODO: just check if layer == NETWORK and return eth, LINK return e + switch -exact -- [nodeType $lnode1] { + pc { + return eth + } + host { + return eth + } + hub { + return e + } + lanswitch { + return e + } + router { + return eth + } + rj45 { + return + } + tunnel { + return e + } + ktunnel { + return + } + wlan { + return e + } + default { + return eth +# end Boeing: below + } + } +} + + +#****f* editor.tcl/listLANNodes +# NAME +# listLANNodes -- list LAN nodes +# SYNOPSIS +# set l2peers [listLANNodes $l2node $l2peers] +# FUNCTION +# Recursive function for finding all link layer nodes that are +# connected to node l2node. Returns the list of all link layer +# nodes that are on the same LAN as l2node. +# INPUTS +# * l2node -- node id of a link layer node +# * l2peers -- old link layer nodes on the same LAN +# RESULT +# * l2peers -- new link layer nodes on the same LAN +#**** + +proc listLANnodes { l2node l2peers } { + lappend l2peers $l2node + foreach ifc [ifcList $l2node] { + set peer [logicalPeerByIfc $l2node $ifc] + set type [nodeType $peer] + # Boeing + if { [ lsearch {lanswitch hub wlan} $type] != -1 } { + if { [lsearch $l2peers $peer] == -1 } { + set l2peers [listLANnodes $peer $l2peers] + } + } + } + return $l2peers +} + +#****f* editor.tcl/calcDxDy +# NAME +# calcDxDy lnode -- list LAN nodes +# SYNOPSIS +# calcDxDy $lnode +# FUNCTION +# Calculates dx and dy variables of the calling function. +# INPUTS +# * lnode -- node id of a node whose dx and dy coordinates are +# calculated +#**** + +proc calcDxDy { lnode } { + global showIfIPaddrs showIfIPv6addrs zoom + upvar dx x + upvar dy y + + if { $zoom > 1.0 } { + set x 1 + set y 1 + return + } + switch -exact -- [nodeType $lnode] { + hub { + set x [expr {1.5 / $zoom}] + set y [expr {2.6 / $zoom}] + } + lanswitch { + set x [expr {1.5 / $zoom}] + set y [expr {2.6 / $zoom}] + } + router { + set x [expr {1 / $zoom}] + set y [expr {2 / $zoom}] + } + rj45 { + set x [expr {1 / $zoom}] + set y [expr {1 / $zoom}] + } + tunnel { + set x [expr {1 / $zoom}] + set y [expr {1 / $zoom}] + } + wlan { + set x [expr {1.5 / $zoom}] + set y [expr {2.6 / $zoom}] + } + default { + set x [expr {1 / $zoom}] + set y [expr {2 / $zoom}] + } + } + return +} + +#****f* editor.tcl/updateIfcLabel +# NAME +# updateIfcLabel -- update interface label +# SYNOPSIS +# updateIfcLabel $lnode1 $lnode2 +# FUNCTION +# Updates the interface label, including interface name, +# interface state (* for interfaces that are down), IPv4 +# address and IPv6 address. +# INPUTS +# * lnode1 -- node id of a node where the interface resides +# * lnode2 -- node id of the node that is connected by this +# interface. +#**** +proc updateIfcLabel { lnode1 lnode2 } { + global showIfNames showIfIPaddrs showIfIPv6addrs + + set link [lindex [.c gettags "link && $lnode1 && $lnode2"] 1] + set ifc [ifcByPeer $lnode1 $lnode2] + set ifipv4addr [getIfcIPv4addr $lnode1 $ifc] + set ifipv6addr [getIfcIPv6addr $lnode1 $ifc] + if { $ifc == 0 } { + set ifc "" + } + if { [getIfcOperState $lnode1 $ifc] == "down" } { + set labelstr "*" + } else { + set labelstr "" + } + if { $showIfNames } { + set labelstr "$labelstr$ifc +" + } + if { $showIfIPaddrs && $ifipv4addr != "" } { + set labelstr "$labelstr$ifipv4addr +" + } + if { $showIfIPv6addrs && $ifipv6addr != "" } { + set labelstr "$labelstr$ifipv6addr +" + } + set labelstr \ + [string range $labelstr 0 [expr {[string length $labelstr] - 2}]] + .c itemconfigure "interface && $lnode1 && $link" \ + -text "$labelstr" + # Boeing: hide ifc label on wlans + if { [nodeType $lnode1] == "wlan" } { + .c itemconfigure "interface && $lnode1 && $link" -state hidden + } +} + + +#****f* editor.tcl/updateLinkLabel +# NAME +# updateLinkLabel -- update link label +# SYNOPSIS +# updateLinkLabel $link +# FUNCTION +# Updates the link label, including link bandwidth, link delay, +# BER and duplicate values. +# INPUTS +# * link -- link id of the link whose labels are updated. +#**** +proc updateLinkLabel { link } { + global showLinkLabels + + set bwstr [getLinkBandwidthString $link] + set delstr [getLinkDelayString $link] + set berstr [getLinkBERString $link] + set dupstr [getLinkDupString $link] + set labelstr " +" + if { "$bwstr" != "" } { + set labelstr "$labelstr$bwstr +" + } + if { "$delstr" != "" } { + set labelstr "$labelstr$delstr +" + } + if { "$berstr" != "" } { + set labelstr "$labelstr$berstr +" + } + if { "$dupstr" != "" } { + set labelstr "$labelstr$dupstr +" + } + set labelstr \ + [string range $labelstr 0 [expr {[string length $labelstr] - 2}]] + .c itemconfigure "linklabel && $link" -text "$labelstr" + if { $showLinkLabels == 0} { + .c itemconfigure "linklabel && $link" -state hidden + } +} + + +#****f* editor.tcl/redrawAllLinks +# NAME +# redrawAllLinks -- redraw all links +# SYNOPSIS +# redrawAllLinks +# FUNCTION +# Redraws all links on the current canvas. +#**** +proc redrawAllLinks {} { + global link_list curcanvas + + foreach link $link_list { + set nodes [linkPeers $link] + if { [getNodeCanvas [lindex $nodes 0]] != $curcanvas || + [getNodeCanvas [lindex $nodes 1]] != $curcanvas } { + continue + } + redrawLink $link + } +} + + +#****f* editor.tcl/redrawLink +# NAME +# redrawLink -- redraw a links +# SYNOPSIS +# redrawLink $link +# FUNCTION +# Redraws the specified link. +# INPUTS +# * link -- link id +#**** +proc redrawLink { link } { + global $link + + set limages [.c find withtag "link && $link"] + set limage1 [lindex $limages 0] + set limage2 [lindex $limages 1] + set tags [.c gettags $limage1] + set link [lindex $tags 1] + set lnode1 [lindex $tags 2] + set lnode2 [lindex $tags 3] + + set coords1 [.c coords "node && $lnode1"] + set coords2 [.c coords "node && $lnode2"] + set x1 [lindex $coords1 0] + set y1 [lindex $coords1 1] + set x2 [lindex $coords2 0] + set y2 [lindex $coords2 1] + + .c coords $limage1 $x1 $y1 $x2 $y2 + .c coords $limage2 $x1 $y1 $x2 $y2 + + set lx [expr {0.5 * ($x1 + $x2)}] + set ly [expr {0.5 * ($y1 + $y2)}] + .c coords "linklabel && $link" $lx $ly + + set n [expr {sqrt (($x1 - $x2) * ($x1 - $x2) + \ + ($y1 - $y2) * ($y1 - $y2)) * 0.015}] + if { $n < 1 } { + set n 1 + } + + calcDxDy $lnode1 + set lx [expr {($x1 * ($n * $dx - 1) + $x2) / $n / $dx}] + set ly [expr {($y1 * ($n * $dy - 1) + $y2) / $n / $dy}] + .c coords "interface && $lnode1 && $link" $lx $ly + updateIfcLabel $lnode1 $lnode2 + + calcDxDy $lnode2 + set lx [expr {($x1 + $x2 * ($n * $dx - 1)) / $n / $dx}] + set ly [expr {($y1 + $y2 * ($n * $dy - 1)) / $n / $dy}] + .c coords "interface && $lnode2 && $link" $lx $ly + updateIfcLabel $lnode2 $lnode1 + # Boeing - wlan antennas + if { [nodeType $lnode1] == "wlan" } { + global zoom + set an [lsearch -exact [findWlanNodes $lnode2] $lnode1] + if { $an < 0 || $an >= 5 } { set an 0 } + set dx [expr {20 - (10*$an)}] + .c coords "antenna && $lnode2 && $link" [expr {$x2-($dx*$zoom)}] \ + [expr {$y2-(20*$zoom)}] + } +} + +# Boeing +proc redrawWlanLink { link } { + global $link + + set tags [.c gettags $link] + set lnode1 [lindex $tags 1] + set lnode2 [lindex $tags 2] + set coords1 [.c coords "node && $lnode1"] + set coords2 [.c coords "node && $lnode2"] + set x1 [lindex $coords1 0] + set y1 [lindex $coords1 1] + set x2 [lindex $coords2 0] + set y2 [lindex $coords2 1] + set lx [expr {0.5 * ($x1 + $x2)}] + set ly [expr {0.5 * ($y1 + $y2)}] + + .c coords $link $x1 $y1 $x2 $y2 + .c coords "linklabel && $lnode2 && $lnode1" $lx $ly + + return +} +# end Boeing + +#****f* editor.tcl/splitGUILink +# NAME +# splitGUILink -- splits a links +# SYNOPSIS +# splitGUILink $link +# FUNCTION +# Splits the link and draws new links and new pseudo nodes +# on the canvas. +# INPUTS +# * link -- link id +#**** +proc splitGUILink { link } { + global changed zoom + + set peer_nodes [linkPeers $link] + set new_nodes [splitLink $link pseudo] + set orig_node1 [lindex $peer_nodes 0] + set orig_node2 [lindex $peer_nodes 1] + set new_node1 [lindex $new_nodes 0] + set new_node2 [lindex $new_nodes 1] + set new_link1 [linkByPeers $orig_node1 $new_node1] + set new_link2 [linkByPeers $orig_node2 $new_node2] + setLinkMirror $new_link1 $new_link2 + setLinkMirror $new_link2 $new_link1 + setNodeMirror $new_node1 $new_node2 + setNodeMirror $new_node2 $new_node1 + setNodeName $new_node1 $orig_node2 + setNodeName $new_node2 $orig_node1 + + set x1 [lindex [getNodeCoords $orig_node1] 0] + set y1 [lindex [getNodeCoords $orig_node1] 1] + set x2 [lindex [getNodeCoords $orig_node2] 0] + set y2 [lindex [getNodeCoords $orig_node2] 1] + + setNodeCoords $new_node1 \ + "[expr {($x1 + 0.4 * ($x2 - $x1)) / $zoom}] \ + [expr {($y1 + 0.4 * ($y2 - $y1)) / $zoom}]" + setNodeCoords $new_node2 \ + "[expr {($x1 + 0.6 * ($x2 - $x1)) / $zoom}] \ + [expr {($y1 + 0.6 * ($y2 - $y1)) / $zoom}]" + setNodeLabelCoords $new_node1 [getNodeCoords $new_node1] + setNodeLabelCoords $new_node2 [getNodeCoords $new_node2] + + set changed 1 + updateUndoLog + redrawAll +} + + +#****f* editor.tcl/selectNode +# NAME +# selectNode -- select node +# SYNOPSIS +# selectNode $c $obj +# FUNCTION +# Crates the selecting box around the specified canvas +# object. +# INPUTS +# * c -- tk canvas +# * obj -- tk canvas object tag id +#**** +proc selectNode { c obj } { + set node [lindex [$c gettags $obj] 1] + if { $node == "" } { return } ;# Boeing: fix occassional error + $c addtag selected withtag "node && $node" + if { [nodeType $node] == "pseudo" } { + set bbox [$c bbox "nodelabel && $node"] + } elseif { [nodeType $node] == "rectangle" } { + $c addtag selected withtag "rectangle && $node" + set bbox [$c bbox "rectangle && $node"] + } elseif { [nodeType $node] == "text" } { + $c addtag selected withtag "text && $node" + set bbox [$c bbox "text && $node"] + } elseif { [nodeType $node] == "oval" } { + $c addtag selected withtag "oval && $node" + set bbox [$c bbox "oval && $node"] + } else { + set bbox [$c bbox "node && $node"] + } + set bx1 [expr {[lindex $bbox 0] - 2}] + set by1 [expr {[lindex $bbox 1] - 2}] + set bx2 [expr {[lindex $bbox 2] + 1}] + set by2 [expr {[lindex $bbox 3] + 1}] + $c delete -withtags "selectmark && $node" + $c create line $bx1 $by1 $bx2 $by1 $bx2 $by2 $bx1 $by2 $bx1 $by1 \ + -dash {6 4} -fill black -width 1 -tags "selectmark $node" +} + +proc selectNodes { nodelist } { + foreach node $nodelist { + selectNode .c [.c find withtag "node && $node"] + } +} + +proc selectedNodes {} { + set selected {} + foreach obj [.c find withtag "node && selected"] { + lappend selected [lindex [.c gettags $obj] 1] + } + foreach obj [.c find withtag "oval && selected"] { + lappend selected [lindex [.c gettags $obj] 1] + } + foreach obj [.c find withtag "rectangle && selected"] { + lappend selected [lindex [.c gettags $obj] 1] + } + foreach obj [.c find withtag "text && selected"] { + lappend selected [lindex [.c gettags $obj] 1] + } + return $selected +} + +proc selectedRealNodes {} { + set selected {} + foreach obj [.c find withtag "node && selected"] { + set node [lindex [.c gettags $obj] 1] + if { [getNodeMirror $node] != "" || + [nodeType $node] == "rj45" } { + continue + } + lappend selected $node + } + return $selected +} + +proc selectAdjacent {} { + global curcanvas + + set selected [selectedNodes] + set adjacent {} + foreach node $selected { + foreach ifc [ifcList $node] { + set peer [peerByIfc $node $ifc] + if { [getNodeMirror $peer] != "" } { + return + } + if { [lsearch $adjacent $peer] < 0 } { + lappend adjacent $peer + } + } + } + selectNodes $adjacent +} + +#****f* editor.tcl/button3link +# NAME +# button3link +# SYNOPSIS +# button3link $c $x $y +# FUNCTION +# This procedure is called when a right mouse button is +# clicked on the canvas. If there is a link on the place of +# mouse click this procedure creates and configures a popup +# menu. The options in the menu are: +# * Configure -- configure the link +# * Delete -- delete the link +# * Split -- split the link +# * Merge -- this option is active only if the link is previously +# been split, by this action the link is merged. +# INPUTS +# * c -- tk canvas +# * x -- x coordinate for popup menu +# * y -- y coordinate for popup menu +#**** +proc button3link { c x y } { + global oper_mode env eid canvas_list node_list + global curcanvas + + set link [lindex [$c gettags {link && current}] 1] + if { $link == "" } { + set link [lindex [$c gettags {linklabel && current}] 1] + if { $link == "" } { + return + } + } + + .button3menu delete 0 end + + # + # Configure link + # + .button3menu add command -label "Configure" \ + -command "popupConfigDialog $c" + + # + # Delete link + # + if { $oper_mode != "exec" } { + .button3menu add command -label "Delete" \ + -command "removeGUILink $link atomic" + } else { + .button3menu add command -label "Delete" \ + -state disabled + } + + # + # Split link + # + if { $oper_mode != "exec" && [getLinkMirror $link] == "" } { + .button3menu add command -label "Split" \ + -command "splitGUILink $link" + } else { + .button3menu add command -label "Split" \ + -state disabled + } + + # + # Merge two pseudo nodes / links + # + if { $oper_mode != "exec" && [getLinkMirror $link] != "" && + [getNodeCanvas [getNodeMirror [lindex [linkPeers $link] 1]]] == + $curcanvas } { + .button3menu add command -label "Merge" \ + -command "mergeGUINode [lindex [linkPeers $link] 1]" + } else { + .button3menu add command -label "Merge" -state disabled + } + + set x [winfo pointerx .] + set y [winfo pointery .] + tk_popup .button3menu $x $y +} + + +#****f* editor.tcl/movetoCanvas +# NAME +# movetoCanvas -- move to canvas +# SYNOPSIS +# movetoCanvas $canvas +# FUNCTION +# This procedure moves all the nodes selected in the GUI to +# the specified canvas. +# INPUTS +# * canvas -- canvas id. +#**** +proc movetoCanvas { canvas } { + global changed + + set selected_nodes [selectedNodes] + foreach node $selected_nodes { + setNodeCanvas $node $canvas + set changed 1 + } + foreach obj [.c find withtag "linklabel"] { + set link [lindex [.c gettags $obj] 1] + set link_peers [linkPeers $link] + set peer1 [lindex $link_peers 0] + set peer2 [lindex $link_peers 1] + set peer1_in_selected [lsearch $selected_nodes $peer1] + set peer2_in_selected [lsearch $selected_nodes $peer2] + if { ($peer1_in_selected == -1 && $peer2_in_selected != -1) || + ($peer1_in_selected != -1 && $peer2_in_selected == -1) } { + if { [nodeType $peer2] == "pseudo" } { + setNodeCanvas $peer2 $canvas + if { [getNodeCanvas [getNodeMirror $peer2]] == $canvas } { + mergeLink $link + } + continue + } + set new_nodes [splitLink $link pseudo] + set new_node1 [lindex $new_nodes 0] + set new_node2 [lindex $new_nodes 1] + setNodeMirror $new_node1 $new_node2 + setNodeMirror $new_node2 $new_node1 + setNodeName $new_node1 $peer2 + setNodeName $new_node2 $peer1 + set link1 [linkByPeers $peer1 $new_node1] + set link2 [linkByPeers $peer2 $new_node2] + setLinkMirror $link1 $link2 + setLinkMirror $link2 $link1 + } + } + updateUndoLog + redrawAll +} + + +#****f* editor.tcl/mergeGUINode +# NAME +# mergeGUINode -- merge GUI node +# SYNOPSIS +# mergeGUINode $node +# FUNCTION +# This procedure removes the specified pseudo node as well +# as it's mirror copy. Also this procedure removes the +# pseudo links and reestablish the original link between +# the non-pseudo nodes. +# INPUTS +# * node -- node id of a pseudo node. +#**** +proc mergeGUINode { node } { + set link [lindex [linkByIfc $node [ifcList $node]] 0] + mergeLink $link + redrawAll +} + + +#****f* editor.tcl/button3node +# NAME +# button3node +# SYNOPSIS +# button3node $c $x $y +# FUNCTION +# This procedure is called when a right mouse button is +# clicked on the canvas. Also called when double-clicking +# on a node during runtime. +# If there is a node on the place of +# mouse click this procedure creates and configures a popup +# menu. The options in the menu are: +# * Configure -- configure the node +# * Create link to -- create a link to any available node, +# it can be on the same canvas or on a different canvas. +# * Move to -- move to some other canvas +# * Merge -- this option is available only for pseudo nodes +# that have mirror nodes on the same canvas (Pseudo nodes +# created by splitting a link). +# * Delete -- delete the node +# * Shell window -- specifies the shell window to open in +# exec mode. This option is available only to nodes on a +# network layer +# * Ethereal -- opens a Ethereal program for the specified +# node and the specified interface. This option is available +# only for network layer nodes in exec mode. +# INPUTS +# * c -- tk canvas +# * x -- x coordinate for popup menu +# * y -- y coordinate for popup menu +#**** +#old proc button3node { c x y } +#Boeing +proc button3node { c x y button } { + global oper_mode env eid canvas_list node_list curcanvas systype g_prefs + + set node [lindex [$c gettags {node && current}] 1] + if { $node == "" } { + set node [lindex [$c gettags {nodelabel && current}] 1] + if { $node == "" } { + return + } + } + set mirror_node [getNodeMirror $node] + + if { [$c gettags "node && $node && selected"] == "" } { + $c dtag node selected + $c delete -withtags selectmark + selectNode $c [$c find withtag "current"] + } + + # open up shells upon double-click or shift/ctrl-click + set shell $g_prefs(shell) + if { $button == "shift" || $button == "ctrl" } { + if { [nodeType $node] == "pseudo" } { + # + # Hyperlink to another canvas + # + set curcanvas [getNodeCanvas [getNodeMirror $node]] + switchCanvas none + return + } + # only open bash shells for NETWORK nodes and remote routers + if { [[typemodel $node].layer] != "NETWORK" } { + if { [typemodel $node] == "wlan" } { + wlanDoubleClick $node $button + } + return + } + if { $button == "shift" } { ;# normal bash shell + spawnShell $node $shell + } else { ;# right-click vtysh shell + set cmd [[typemodel $node].shellcmd $node] + if { $cmd != "/bin/sh" && $cmd != "" } { spawnShell $node $cmd } + } + return ;# open shell, don't post a menu + } + + # + # below here we build and post a menu + # + .button3menu delete 0 end + + # + # Configure node + # + if { [nodeType $node] != "pseudo" } { + .button3menu add command -label "Configure" \ + -command "popupConfigDialog $c" + } else { + .button3menu add command -label "Configure" \ + -command "popupConfigDialog $c" -state disabled + } + + # + # Select adjacent + # + if { [nodeType $node] != "pseudo" } { + .button3menu add command -label "Select adjacent" \ + -command "selectAdjacent" + } else { + .button3menu add command -label "Select adjacent" \ + -command "selectAdjacent" -state disabled + } + + # + # Create a new link - can be between different canvases + # + .button3menu.connect delete 0 end + if { $oper_mode == "exec" || [nodeType $node] == "pseudo" } { + #.button3menu add cascade -label "Create link to" \ + -menu .button3menu.connect -state disabled + } else { + .button3menu add cascade -label "Create link to" \ + -menu .button3menu.connect + } + destroy .button3menu.connect.selected + menu .button3menu.connect.selected -tearoff 0 + .button3menu.connect add cascade -label "Selected" \ + -menu .button3menu.connect.selected + .button3menu.connect.selected add command \ + -label "Chain" -command "P \[selectedRealNodes\]" + .button3menu.connect.selected add command \ + -label "Star" \ + -command "Kb \[lindex \[selectedRealNodes\] 0\] \ + \[lrange \[selectedNodes\] 1 end\]" + .button3menu.connect.selected add command \ + -label "Cycle" -command "C \[selectedRealNodes\]" + .button3menu.connect.selected add command \ + -label "Clique" -command "K \[selectedRealNodes\]" + .button3menu.connect add separator + foreach canvas $canvas_list { + destroy .button3menu.connect.$canvas + menu .button3menu.connect.$canvas -tearoff 0 + .button3menu.connect add cascade -label [getCanvasName $canvas] \ + -menu .button3menu.connect.$canvas + } + foreach peer_node $node_list { + set canvas [getNodeCanvas $peer_node] + if { $node != $peer_node && [nodeType $node] != "rj45" && + [lsearch {pseudo rj45} [nodeType $peer_node]] < 0 && + [ifcByLogicalPeer $node $peer_node] == "" } { + .button3menu.connect.$canvas add command \ + -label [getNodeName $peer_node] \ + -command "newGUILink $node $peer_node" + } elseif { [nodeType $peer_node] != "pseudo" } { + .button3menu.connect.$canvas add command \ + -label [getNodeName $peer_node] \ + -state disabled + } + } + # + # assign to emulation server + # + if { $oper_mode != "exec" } { + global exec_servers node_location + .button3menu.assign delete 0 end + .button3menu add cascade -label "Assign to" -menu .button3menu.assign + .button3menu.assign add command -label "(none)" \ + -command "assignSelection \"\"" + foreach server [lsort -dictionary [array names exec_servers]] { + .button3menu.assign add command -label "$server" \ + -command "assignSelection $server" + } + } + + # + # wlan link to all nodes + # + if { [nodeType $node] == "wlan" } { + .button3menu add command -label "Link to all routers" \ + -command "linkAllNodes $node" + set msg "Select new WLAN $node members:" + set cmd "linkSelectedNodes $node" + .button3menu add command -label "Select WLAN members..." \ + -command "popupSelectNodes \"$msg\" \"\" {$cmd}" + set state normal + if { $oper_mode != "exec" } { set state disabled } + .button3menu add command -label "Mobility script..." \ + -command "showMobilityScriptPopup $node" -state $state + } + + # + # Move to another canvas + # + .button3menu.moveto delete 0 end + if { $oper_mode != "exec" && [nodeType $node] != "pseudo" } { + .button3menu add cascade -label "Move to" \ + -menu .button3menu.moveto + .button3menu.moveto add command -label "Canvas:" -state disabled + foreach canvas $canvas_list { + if { $canvas != $curcanvas } { + .button3menu.moveto add command \ + -label [getCanvasName $canvas] \ + -command "movetoCanvas $canvas" + } else { + .button3menu.moveto add command \ + -label [getCanvasName $canvas] -state disabled + } + } + } + + # + # Merge two pseudo nodes / links + # + if { $oper_mode != "exec" && [nodeType $node] == "pseudo" && \ + [getNodeCanvas $mirror_node] == $curcanvas } { + .button3menu add command -label "Merge" \ + -command "mergeGUINode $node" + } + + # + # Delete selection + # + if { $oper_mode != "exec" } { + .button3menu add command -label "Cut" -command cutSelection + .button3menu add command -label "Copy" -command copySelection + .button3menu add command -label "Paste" -command pasteSelection + .button3menu add command -label "Delete" -command deleteSelection + } + + .button3menu add command -label "Hide" -command "hideSelected" + + # Boeing: flag used below + set execstate disabled + if { $oper_mode == "exec" } { set execstate normal } + + # + # Shell selection + # + .button3menu.shell delete 0 end + if { $oper_mode == "exec" && [[typemodel $node].layer] == "NETWORK" } { + .button3menu add cascade -label "Shell window" \ + -menu .button3menu.shell + set cmd [[typemodel $node].shellcmd $node] + if { $cmd != "/bin/sh" && $cmd != "" } { ;# typically adds vtysh + .button3menu.shell add command -label "$cmd" \ + -command "spawnShell $node $cmd" + } + .button3menu.shell add command -label "/bin/sh" \ + -command "spawnShell $node sh" + .button3menu.shell add command -label "$shell" \ + -command "spawnShell $node $shell" + } + + # + # services + # + .button3menu.services delete 0 end + if { $oper_mode == "exec" && [[typemodel $node].layer] == "NETWORK" } { + addServicesRightClickMenu .button3menu $node + } else { + .button3menu add command -label "Services..." -command \ + "sendConfRequestMessage -1 $node services 0x1 -1 \"\"" + } + + # + # Tcpdump, gpsd + # + if { $oper_mode == "exec" && [[typemodel $node].layer] == "NETWORK" } { + addInterfaceCommand $node .button3menu "Tcpdump" "tcpdump -n -l -i" \ + $execstate 1 + addInterfaceCommand $node .button3menu "TShark" "tshark -n -l -i" \ + $execstate 1 + addInterfaceCommand $node .button3menu "Wireshark" "wireshark -k -i" \ + $execstate 0 + # wireshark on host veth pair -- need veth pair name + #wireshark -k -i + if { [lindex $systype 0] == "Linux" } { + set name [getNodeName $node] + .button3menu add command -label "View log..." -state $execstate \ + -command "spawnShell $node \"less ../$name.log\"" + } + } + + # + # Finally post the popup menu on current pointer position + # + set x [winfo pointerx .] + set y [winfo pointery .] + + tk_popup .button3menu $x $y +} + + +#****f* editor.tcl/spawnShell +# NAME +# spawnShell -- spawn shell +# SYNOPSIS +# spawnShell $node $cmd +# FUNCTION +# This procedure spawns a new shell for a specified node. +# The shell is specified in cmd parameter. +# INPUTS +# * node -- node id of the node for which the shell +# is spawned. +# * cmd -- the path to the shell. +#**** +proc spawnShell { node cmd } { + # request an interactive terminal + set sock [lindex [getEmulPlugin $node] 2] + set flags 0x44 ;# set TTY, critical flags + set exec_num [newExecCallbackRequest shell] + sendExecMessage $sock $node $cmd $exec_num $flags +} + +# add a sub-menu to the parentmenu with the given command for each interface +proc addInterfaceCommand { node parentmenu txt cmd state isnodecmd } { + global g_current_session + set childmenu "$parentmenu.[lindex $cmd 0]" + $childmenu delete 0 end + $parentmenu add cascade -label $txt -menu $childmenu -state $state + if { ! $isnodecmd } { + if { $g_current_session == 0 } { set state disabled } + set ssid [shortSessionID $g_current_session] + } + foreach ifc [ifcList $node] { + set addr [lindex [getIfcIPv4addr $node $ifc] 0] + if { $addr != "" } { set addr " ($addr)" } + if { $isnodecmd } { ;# run command in a node + set icmd "spawnShell $node \"$cmd $ifc\"" + } else { ;# exec a command directly + set node_num [string range $node 1 end] + set hex [format "%x" $node_num] + set ifnum [string range $ifc 3 end] + set ifname "veth$hex\\.$ifnum\\.$ssid" + set icmd "exec $cmd $ifname &" + } + $childmenu add command -label "$ifc$addr" -state $state -command $icmd + } +} + +# Boeing: consolodate various raise statements here +proc raiseAll {c} { + $c raise rectangle background + $c raise oval "rectangle || background" + $c raise grid "oval || rectangle || background" + $c raise link "grid || oval || rectangle || background" + $c raise linklabel "link || grid || oval || rectangle || background" + $c raise newlink "linklabel || link || grid || oval || rectangle || background" + $c raise wlanlink "newlink || linklabel || link || grid || oval || rectangle || background" + $c raise antenna "wlanlink || newlink || linklabel || link || grid || oval || rectangle || background" + $c raise interface "antenna || wlanlink || newlink || linklabel || link || grid || oval || rectangle || background" + $c raise node "interface || antenna || wlanlink || newlink || linklabel || link || grid || oval || rectangle || background" + $c raise nodelabel "node || interface || antenna || wlanlink || newlink || linklabel || link || grid || oval || rectangle || background" + $c raise text "nodelabel || node || interface || antenna || wlanlink || newlink || linklabel || link || grid || oval || rectangle || background" + $c raise -cursor +} +# end Boeing + + +#****f* editor.tcl/button1 +# NAME +# button1 +# SYNOPSIS +# button1 $c $x $y $button +# FUNCTION +# This procedure is called when a left mouse button is +# clicked on the canvas. This procedure selects a new +# node or creates a new node, depending on the selected +# tool. +# INPUTS +# * c -- tk canvas +# * x -- x coordinate +# * y -- y coordinate +# * button -- the keyboard button that is pressed. +#**** +proc button1 { c x y button } { + global node_list plot_list curcanvas zoom + global activetool activetoolp newlink curobj changed def_router_model + global router pc host lanswitch rj45 hub OVS + global oval rectangle text + global lastX lastY + global background selectbox + global defLinkColor defLinkWidth + global resizemode resizeobj + global wlan g_twoNodeSelect + global g_view_locked + + set x [$c canvasx $x] + set y [$c canvasy $y] + + set lastX $x + set lastY $y + + # TODO: clean this up + # - too many global variables + # - too many hardcoded cases (lanswitch, router, etc) + # - should be functionalized since lengthy if-else difficult to read + + set curobj [$c find withtag current] + set curtype [lindex [$c gettags current] 0] + + + if { $curtype == "node" || \ + $curtype == "oval" || $curtype == "rectangle" || $curtype == "text" \ + || ( $curtype == "nodelabel" && \ + [nodeType [lindex [$c gettags $curobj] 1]] == "pseudo") } { + set node [lindex [$c gettags current] 1] + set wasselected \ + [expr {[lsearch [$c find withtag "selected"] \ + [$c find withtag "node && $node"]] > -1}] + if { $button == "ctrl" } { + if { $wasselected } { + $c dtag $node selected + $c delete -withtags "selectmark && $node" + } + } elseif { !$wasselected } { + $c dtag node selected + $c delete -withtags selectmark + } + if { $activetool == "select" && !$wasselected} { + selectNode $c $curobj + } + } elseif { $curtype == "selectmark" } { + setResizeMode $c $x $y + } elseif { $activetool == "plot" } { + # plot tool: create new plot windows when clicking on a link + set link "" + set tags [$c gettags $curobj] + if { $curtype == "link" || $curtype == "linklabel" } { + set link [lindex $tags 1] + } elseif { $curtype == "interface" } { + set link [lindex $tags 2] + } + if { $link != "" } { + thruPlot $c $link $x $y 150 220 false + } + return + } elseif { $button != "ctrl" || $activetool != "select" } { + $c dtag node selected + $c delete -withtags selectmark + } + # user has clicked on a blank area or background item + if { [lsearch [.c gettags $curobj] background] != -1 || + [lsearch [.c gettags $curobj] grid] != -1 || + [lsearch [.c gettags $curobj] annotation] != -1 } { + # left mouse button pressed to create a new node + if { [lsearch {select marker link mobility twonode run stop oval \ + rectangle text} $activetool] < 0 } { + if { $g_view_locked == 1 } { return } + if { $activetoolp == "routers" } { + if {$activetool != "OVS"} { + set node [newNode router] + } else { + set node [newNode OVS]} + setNodeModel $node $activetool + } else { + set node [newNode $activetool] + } + setNodeCanvas $node $curcanvas + setNodeCoords $node "[expr {$x / $zoom}] [expr {$y / $zoom}]" + lassign [getDefaultLabelOffsets $activetool] dx dy + setNodeLabelCoords $node "[expr {$x / $zoom + $dx}] \ + [expr {$y / $zoom + $dy}]" + drawNode $c $node + selectNode $c [$c find withtag "node && $node"] + set changed 1 + # remove any existing select box + } elseif { $activetool == "select" \ + && $curtype != "node" && $curtype != "nodelabel"} { + $c config -cursor cross + set lastX $x + set lastY $y + if {$selectbox != ""} { + # We actually shouldn't get here! + $c delete $selectbox + set selectbox "" + } + # begin drawing an annotation + } elseif { $activetoolp == "bgobjs" } { + set newcursor cross + if { $activetool == "text" } { set newcursor xterm } + $c config -cursor $newcursor + set lastX $x + set lastY $y + # draw with the marker + } elseif { $activetool == "marker" } { + global markersize markercolor + set newline [$c create oval $lastX $lastY $x $y \ + -width $markersize -outline $markercolor -tags "marker"] + $c raise $newline "background || link || linklabel || interface" + set lastX $x + set lastY $y + } + } else { + if {$curtype == "node" || $curtype == "nodelabel"} { + $c config -cursor fleur + } + if {$activetool == "link" && $curtype == "node"} { + $c config -cursor cross + set lastX [lindex [$c coords $curobj] 0] + set lastY [lindex [$c coords $curobj] 1] + set newlink [$c create line $lastX $lastY $x $y \ + -fill $defLinkColor -width $defLinkWidth \ + -tags "link"] + # twonode tool support + } elseif {$g_twoNodeSelect != "" && $curtype == "node"} { + set curnode [lindex [$c gettags $curobj] 1] + selectTwoNode $curnode + } elseif { $curtype == "node" } { + selectNode $c $curobj + } + # end Boeing + } + + raiseAll $c +} + +proc setResizeMode { c x y } { + set isThruplot false + set type1 notset + + if {$c == ".c"} { + set t1 [$c gettags current] + set o1 [lindex $t1 1] + set type1 [nodeType $o1] + } else { + set o1 $c + set c .c + set isThruplot true + } + #DYL + #puts "RESIZE NODETYPE = $type1" + global resizemode resizeobj + if {$type1== "oval" || $type1== "rectangle" || $isThruplot == true} { + set resizeobj $o1 + set bbox1 [$c bbox $o1] + set x1 [lindex $bbox1 0] + set y1 [lindex $bbox1 1] + set x2 [lindex $bbox1 2] + set y2 [lindex $bbox1 3] + set l 0 ;# left + set r 0 ;# right + set u 0 ;# up + set d 0 ;# down + + if { $x < [expr $x1+($x2-$x1)/8.0]} { set l 1 } + if { $x > [expr $x2-($x2-$x1)/8.0]} { set r 1 } + if { $y < [expr $y1+($y2-$y1)/8.0]} { set u 1 } + if { $y > [expr $y2-($y2-$y1)/8.0]} { set d 1 } + + if {$l==1} { + if {$u==1} { + set resizemode lu + } elseif {$d==1} { + set resizemode ld + } else { + set resizemode l + } + } elseif {$r==1} { + if {$u==1} { + set resizemode ru + } elseif {$d==1} { + set resizemode rd + } else { + set resizemode r + } + } elseif {$u==1} { + set resizemode u + } elseif {$d==1} { + set resizemode d + } else { + set resizemode false + } + } +} + + +#****f* editor.tcl/button1-motion +# NAME +# button1-motion +# SYNOPSIS +# button1-motion $c $x $y +# FUNCTION +# This procedure is called when a left mouse button is +# pressed and the mouse is moved around the canvas. +# This procedure creates new select box, moves the +# selected nodes or draws a new link. +# INPUTS +# * c -- tk canvas +# * x -- x coordinate +# * y -- y coordinate +#**** +proc button1-motion { c x y } { + global activetool newlink changed + global lastX lastY sizex sizey selectbox background + global oper_mode newoval newrect resizemode + global zoom + global g_view_locked + global thruPlotCur thruPlotDragStart + + set x [$c canvasx $x] + set y [$c canvasy $y] + + if {$thruPlotDragStart == "dragging"} { + #puts "active tool is $activetool" + thruPlotDrag $c $thruPlotCur $x $y null true + return + } + + # fix occasional error + if { $x == "" || $y == "" || $lastX == "" || $lastY == "" } { return } + + set curobj [$c find withtag current] + set curtype [lindex [$c gettags current] 0] + + # display coordinates in the status bar + set zoomx [expr {$x / $zoom}] + set zoomy [expr {$y / $zoom}] + .bottom.textbox config -text "<$zoomx, $zoomy>" + + # prevent dragging outside of the canvas area + if { $x < 0 } { + set x 0 + } elseif { $x > $sizex } { + set x $sizex + } + if { $y < 0 } { + set y 0 + } elseif { $y > $sizey } { + set y $sizey + } + + # marker tool drawing on the canvas + if { $activetool == "marker" } { + global markersize markercolor + set dx [expr {$x-$lastX} ] + set dy [expr {$y-$lastY} ] + # this provides smoother drawing + if { $dx > $markersize || $dy > $markersize } { + set mark [$c create line $lastX $lastY $x $y \ + -width $markersize -fill $markercolor -tags "marker"] + $c raise $mark \ + "marker || background || link || linklabel || interface" + } + set mark [$c create oval $x $y $x $y \ + -width $markersize -fill $markercolor \ + -outline $markercolor -tags "marker"] + $c raise $mark "marker || background || link || linklabel || interface" + set lastX $x + set lastY $y + return + } + # disable all other mouse drags in locked mode + if { $g_view_locked == 1 } { return } + + # don't move nodelabels in exec mode, use calcx,y instead of x,y + if {$oper_mode == "exec" && $curtype == "nodelabel" } { + set node [lindex [$c gettags $curobj] 1] + set curobj [$c find withtag "node && $node"] + set curtype "node" + set coords [$c coords $curobj] + set calcx [expr {[lindex $coords 0] / $zoom}] + set calcy [expr {[lindex $coords 1] / $zoom}] + selectNode $c $curobj + } else { + set calcx $x + set calcy $y + } + # drawing a new link + if {$activetool == "link" && $newlink != ""} { + $c coords $newlink $lastX $lastY $x $y + # draw a selection box + } elseif { $activetool == "select" && \ + ( $curobj == $selectbox || $curtype == "background" || $curtype == "grid")} { + if {$selectbox == ""} { + set selectbox [$c create line \ + $lastX $lastY $x $lastY $x $y $lastX $y $lastX $lastY \ + -dash {10 4} -fill black -width 1 -tags "selectbox"] + $c raise $selectbox "background || link || linklabel || interface" + } else { + $c coords $selectbox \ + $lastX $lastY $x $lastY $x $y $lastX $y $lastX $lastY + } + # move a text annotation + } elseif { $activetool == "select" && $curtype == "text" } { + $c move $curobj [expr {$x - $lastX}] [expr {$y - $lastY}] + set changed 1 + set lastX $x + set lastY $y + $c delete [$c find withtag "selectmark"] + # move a nodelabel apart from a node (edit mode only) + } elseif { $activetool == "select" && $curtype == "nodelabel" \ + && [nodeType [lindex [$c gettags $curobj] 1]] != "pseudo" } { + $c move $curobj [expr {$x - $lastX}] [expr {$y - $lastY}] + set changed 1 + set lastX $x + set lastY $y + # actually we should check if curobj==bkgImage + # annotations + } elseif { $activetool == "oval" && \ + ( $curobj == $newoval || $curobj == $background || $curtype == "background" || $curtype == "grid")} { + # Draw a new oval + if {$newoval == ""} { + set newoval [$c create oval $lastX $lastY $x $y \ + -dash {10 4} -width 1 -tags "newoval"] + $c raise $newoval "background || link || linklabel || interface" + } else { + $c coords $newoval \ + $lastX $lastY $x $y + } + # actually we should check if curobj==bkgImage + } elseif { $activetool == "rectangle" && \ + ( $curobj == $newrect || $curobj == $background || $curtype == "background" || $curtype == "grid")} { + # Draw a new rectangle + if {$newrect == ""} { + set newrect [$c create rectangle $lastX $lastY $x $y \ + -outline blue \ + -dash {10 4} -width 1 -tags "newrect"] + $c raise $newrect "oval || background || link || linklabel || interface" + } else { + $c coords $newrect $lastX $lastY $x $y + } + # resizing an annotation + } elseif { $curtype == "selectmark" } { + foreach o [$c find withtag "selected"] { + set node [lindex [$c gettags $o] 1] + set tagovi [$c gettags $o] + set koord [getNodeCoords $node] + + set oldX1 [lindex $koord 0] + set oldY1 [lindex $koord 1] + set oldX2 [lindex $koord 2] + set oldY2 [lindex $koord 3] + switch -exact -- $resizemode { + lu { + set oldX1 $x + set oldY1 $y + } + ld { + set oldX1 $x + set oldY2 $y + } + l { + set oldX1 $x + } + ru { + set oldX2 $x + set oldY1 $y + } + rd { + set oldX2 $x + set oldY2 $y + } + r { + set oldX2 $x + } + u { + set oldY1 $y + } + d { + set oldY2 $y + } + } + if {$selectbox == ""} { + # Boeing: fix "bad screen distance" error + if { $oldX1 == "" || $oldX2 == "" || $oldY1 == "" || \ + $oldY2 == "" } { return } + # end Boeing + set selectbox [$c create line \ + $oldX1 $oldY1 $oldX2 $oldY1 $oldX2 $oldY2 $oldX1 \ + $oldY2 $oldX1 $oldY1 \ + -dash {10 4} -fill black -width 1 -tags "selectbox"] + $c raise $selectbox \ + "background || link || linklabel || interface" + } else { + $c coords $selectbox \ + $oldX1 $oldY1 $oldX2 $oldY1 $oldX2 $oldY2 $oldX1 \ + $oldY2 $oldX1 $oldY1 + } + } + # selected node(s) are being moved + } else { + foreach img [$c find withtag "selected"] { + set node [lindex [$c gettags $img] 1] + set newcoords [$c coords $img] ;# different than getNodeCoords + set img [$c find withtag "selectmark && $node"] + if {$curtype == "oval" || $curtype == "rectangle"} { + $c move $img [expr {($x - $lastX) / 2}] \ + [expr {($y - $lastY) / 2}] + } else { + $c move $img [expr {$x - $lastX}] [expr {$y - $lastY}] + set img [$c find withtag "node && $node"] + $c move $img [expr {$x - $lastX}] [expr {$y - $lastY}] + set img [$c find withtag "nodelabel && $node"] + $c move $img [expr {$x - $lastX}] [expr {$y - $lastY}] + set img [$c find withtag "twonode && $node"] + if {$img != "" } {; # move Two Node Tool circles around node + $c move $img [expr {$x - $lastX}] [expr {$y - $lastY}] + }; + set img [$c find withtag "rangecircles && $node"] + if {$img != "" } {; # move throughput circles around node + $c move $img [expr {$x - $lastX}] [expr {$y - $lastY}] + }; + $c addtag need_redraw withtag "link && $node" + } + if { $oper_mode == "exec" } { + set newx [expr {[lindex $newcoords 0] / $zoom}] + set newy [expr {[lindex $newcoords 1] / $zoom}] + sendNodePosMessage -1 $node -1 $newx $newy -1 0 + } + $c addtag need_redraw withtag "wlanlink && $node" + widgets_move_node $c $node 0 + } + foreach link [$c find withtag "link && need_redraw"] { + redrawLink [lindex [$c gettags $link] 1] + } + foreach wlanlink [$c find withtag "wlanlink && need_redraw"] { + redrawWlanLink $wlanlink + } + $c dtag wlanlink need_redraw + $c dtag link need_redraw + set changed 1 + set lastX $x + set lastY $y + } +} + + +#****f* editor.tcl/pseudo.layer +# NAME +# pseudo.layer +# SYNOPSIS +# set layer [pseudo.layer] +# FUNCTION +# Returns the layer on which the pseudo node operates +# i.e. returns no layer. +# RESULT +# * layer -- returns an empty string +#**** +proc pseudo.layer {} { +} + + +#****f* editor.tcl/newGUILink +# NAME +# newGUILink -- new GUI link +# SYNOPSIS +# newGUILink $lnode1 $lnode2 +# FUNCTION +# This procedure is called to create a new link between +# nodes lnode1 and lnode2. Nodes can be on the same canvas +# or on different canvases. The result of this function +# is directly visible in GUI. +# INPUTS +# * lnode1 -- node id of the first node +# * lnode2 -- node id of the second node +#**** +proc newGUILink { lnode1 lnode2 } { + global changed + + set link [newLink $lnode1 $lnode2] + if { $link == "" } { + return + } + if { [getNodeCanvas $lnode1] != [getNodeCanvas $lnode2] } { + set new_nodes [splitLink $link pseudo] + set orig_nodes [linkPeers $link] + set new_node1 [lindex $new_nodes 0] + set new_node2 [lindex $new_nodes 1] + set orig_node1 [lindex $orig_nodes 0] + set orig_node2 [lindex $orig_nodes 1] + set new_link1 [linkByPeers $orig_node1 $new_node1] + set new_link2 [linkByPeers $orig_node2 $new_node2] + setNodeMirror $new_node1 $new_node2 + setNodeMirror $new_node2 $new_node1 + setNodeName $new_node1 $orig_node2 + setNodeName $new_node2 $orig_node1 + setLinkMirror $new_link1 $new_link2 + setLinkMirror $new_link2 $new_link1 + } + redrawAll + set changed 1 + updateUndoLog +} + + +#****f* editor.tcl/button1-release +# NAME +# button1-release +# SYNOPSIS +# button1-release $c $x $y +# FUNCTION +# This procedure is called when a left mouse button is +# released. +# The result of this function depends on the actions +# during the button1-motion procedure. +# INPUTS +# * c -- tk canvas +# * x -- x coordinate +# * y -- y coordinate +#**** +proc button1-release { c x y } { + global node_list plot_list activetool newlink curobj grid + global changed undolog undolevel redolevel selectbox + global lastX lastY sizex sizey zoom + global autorearrange_enabled + global resizemode resizeobj + set redrawNeeded 0 + global oper_mode + global g_prefs + global g_view_locked + + set x [$c canvasx $x] + set y [$c canvasy $y] + + $c config -cursor left_ptr + # place a new link between items + if {$activetool == "link" && $newlink != ""} { + if { $g_view_locked == 1 } { return } + $c delete $newlink + set newlink "" + set destobj "" + foreach obj [$c find overlapping $x $y $x $y] { + if {[lindex [$c gettags $obj] 0] == "node"} { + set destobj $obj + break + } + } + if {$destobj != "" && $curobj != "" && $destobj != $curobj} { + set lnode1 [lindex [$c gettags $curobj] 1] + set lnode2 [lindex [$c gettags $destobj] 1] + if { [ifcByLogicalPeer $lnode1 $lnode2] == "" } { + set link [newLink $lnode1 $lnode2] + if { $link != "" } { + drawLink $link + redrawLink $link + updateLinkLabel $link + set changed 1 + } + } + } + # annotations + } elseif {$activetool == "rectangle" || $activetool == "oval" } { + if { $g_view_locked == 1 } { return } + popupAnnotationDialog $c 0 "false" + # edit text annotation + } elseif {$activetool == "text" } { + if { $g_view_locked == 1 } { return } + textEnter $c $x $y + } + + if { $changed == 1 } { + set regular true + if { [lindex [$c gettags $curobj] 0] == "nodelabel" } { + set node [lindex [$c gettags $curobj] 1] + selectNode $c [$c find withtag "node && $node"] + } + set selected {} + foreach img [$c find withtag "selected"] { + set node [lindex [$c gettags $img] 1] + lappend selected $node + set coords [$c coords $img] + set x [expr {[lindex $coords 0] / $zoom}] + set y [expr {[lindex $coords 1] / $zoom}] + if { $autorearrange_enabled == 0 && $g_prefs(gui_snap_grid)} { + set dx [expr {(int($x / $grid + 0.5) * $grid - $x) * $zoom}] + set dy [expr {(int($y / $grid + 0.5) * $grid - $y) * $zoom}] + $c move $img $dx $dy + set coords [$c coords $img] + set x [expr {[lindex $coords 0] / $zoom}] + set y [expr {[lindex $coords 1] / $zoom}] + } else { + set dx 0 + set dy 0 + } + if {$x < 0 || $y < 0 || $x > $sizex || $y > $sizey} { + set regular false + } + # nodes with four coordinates + if { [lindex [$c gettags $node] 0] == "oval" || + [lindex [$c gettags $node] 0] == "rectangle" } { + set bbox [$c bbox "selectmark && $node"] + # Boeing: bbox causes annotations to grow, subtract 5 + if { [llength $bbox] > 3 } { + set x1 [lindex $bbox 0] + set y1 [lindex $bbox 1] + set x2 [expr {[lindex $bbox 2] - 5}] + set y2 [expr {[lindex $bbox 3] - 5}] + setNodeCoords $node "$x1 $y1 $x2 $y2" + set redrawNeeded 1 + if {$x1 < 0 || $y1 < 0 || $x1 > $sizex || $y1 > $sizey || \ + $x2 < 0 || $y2 < 0 || $x2 > $sizex || $y2 > $sizey} { + set regular false + } + } + # nodes with two coordinates + } else { + setNodeCoords $node "$x $y" + } + if {[$c find withtag "nodelabel && $node"] != "" } { + $c move "nodelabel && $node" $dx $dy + set coords [$c coords "nodelabel && $node"] + set x [expr {[lindex $coords 0] / $zoom}] + set y [expr {[lindex $coords 1] / $zoom}] + setNodeLabelCoords $node "$x $y" + if {$x < 0 || $y < 0 || $x > $sizex || $y > $sizey} { + set regular false + } + } + $c move "selectmark && $node" $dx $dy + $c addtag need_redraw withtag "link && $node" + set changed 1 + if { $oper_mode == "exec" } { + # send node position update using x,y stored in node + set xy [getNodeCoords $node] ;# read new coordinates + sendNodePosMessage -1 $node -1 [lindex $xy 0] [lindex $xy 1] \ + -1 0 + widgets_move_node $c $node 1 + } + $c addtag need_redraw withtag "wlanlink && $node" + } ;# end of: foreach img selected + if {$regular == "true"} { + # user has dragged something within the canvas boundaries + foreach link [$c find withtag "link && need_redraw"] { + redrawLink [lindex [$c gettags $link] 1] + } + } else { + # user has dragged something beyond the canvas boundaries + .c config -cursor watch + loadCfg $undolog($undolevel) + redrawAll + if {$activetool == "select" } { + selectNodes $selected + } + set changed 0 + } + $c dtag link need_redraw + nodeEnter $c + + # $changed!=1 + } elseif {$activetool == "select" } { + if {$selectbox == ""} { + set x1 $x + set y1 $y + rearrange_off + } else { + set coords [$c coords $selectbox] + set x [lindex $coords 0] + set y [lindex $coords 1] + set x1 [lindex $coords 4] + set y1 [lindex $coords 5] + $c delete $selectbox + set selectbox "" + } + + if { $resizemode == "false" } { + # select tool mouse button release while drawing select box + set enclosed {} + # fix occasional error + if { $x == "" || $y == "" || $x1 == "" || $y1 == "" } { return } + foreach obj [$c find enclosed $x $y $x1 $y1] { + set tags [$c gettags $obj] + if {[lindex $tags 0] == "node" && [lsearch $tags selected] == -1} { + lappend enclosed $obj + } + if {[lindex $tags 0] == "oval" && [lsearch $tags selected] == -1} { + lappend enclosed $obj + } + if {[lindex $tags 0] == "rectangle" && [lsearch $tags selected] == -1} { + lappend enclosed $obj + } + if {[lindex $tags 0] == "text" && [lsearch $tags selected] == -1} { + lappend enclosed $obj + } + } + foreach obj $enclosed { + selectNode $c $obj + } + } else { + # select tool resizing an object by dragging its handles + # DYL bugfix. if x,y does not change, do not resize! + # fixes a bug where the object dissappears + if { $x != $x1 || $y != $y1 } { + setNodeCoords $resizeobj "$x $y $x1 $y1" + } + set redrawNeeded 1 + set resizemode false + } + } + + if { $redrawNeeded } { + set redrawNeeded 0 + redrawAll + } else { + raiseAll $c + } + update + updateUndoLog +} + + +#****f* editor.tcl/nodeEnter +# NAME +# nodeEnter +# SYNOPSIS +# nodeEnter $c +# FUNCTION +# This procedure prints the node id, node name and +# node model (if exists), as well as all the interfaces +# of the node in the status line. +# Information is presented for the node above which is +# the mouse pointer. +# INPUTS +# * c -- tk canvas +#**** +proc nodeEnter { c } { + global activetool + + set curtags [$c gettags current] + if { [lsearch -exact "node nodelabel" [lindex $curtags 0]] < 0 } { + return ;# allow this proc to be called from button1-release + } + set node [lindex $curtags 1] + set type [nodeType $node] + set name [getNodeName $node] + set model [getNodeModel $node] + if { $model != "" } { + set line "{$node} $name ($model):" + } else { + set line "{$node} $name:" + } + if { $type != "rj45" && $type != "tunnel" } { + foreach ifc [ifcList $node] { + set line "$line $ifc:[getIfcIPv4addr $node $ifc]" + } + } + set xy [getNodeCoords $node] + set line "$line <[lindex $xy 0], [lindex $xy 1]>" + .bottom.textbox config -text "$line" + widgetObserveNode $c $node +} + + +#****f* editor.tcl/linkEnter +# NAME +# linkEnter +# SYNOPSIS +# linkEnter $c +# FUNCTION +# This procedure prints the link id, link bandwidth +# and link delay in the status line. +# Information is presented for the link above which is +# the mouse pointer. +# INPUTS +# * c -- tk canvas +#**** +proc linkEnter {c} { + global activetool link_list + + set link [lindex [$c gettags current] 1] + if { [lsearch $link_list $link] == -1 } { + return + } + set line "$link: [getLinkBandwidthString $link] [getLinkDelayString $link]" + .bottom.textbox config -text "$line" +} + + +#****f* editor.tcl/anyLeave +# NAME +# anyLeave +# SYNOPSIS +# anyLeave $c +# FUNCTION +# This procedure clears the status line. +# INPUTS +# * c -- tk canvas +#**** +proc anyLeave {c} { + global activetool + + .bottom.textbox config -text "" +# Boeing + widgetObserveNode $c "" +# nodeHighlights $c "" off "" +# end Boeing +} + + +#****f* editor.tcl/checkIntRange +# NAME +# checkIntRange -- check integer range +# SYNOPSIS +# set check [checkIntRange $str $low $high] +# FUNCTION +# This procedure checks the input string to see if it is +# an integer between the low and high value. +# INPUTS +# str -- string to check +# low -- the bottom value +# high -- the top value +# RESULT +# * check -- set to 1 if the str is string between low and high +# value, 0 otherwise. +#**** +proc checkIntRange { str low high } { + if { $str == "" } { + return 1 + } + set str [string trimleft $str 0] + if { $str == "" } { + set str 0 + } + if { ![string is integer $str] } { + return 0 + } + if { $str < $low || $str > $high } { + return 0 + } + return 1 +} + +proc checkFloatRange { str low high } { + if { $str == "" } { + return 1 + } + set str [string trimleft $str 0] + if { $str == "" } { + set str 0 + } + if { ![string is double $str] } { + return 0 + } + if { $str < $low || $str > $high } { + return 0 + } + return 1 +} + +proc checkHostname { str } { + # per RFC 952 and RFC 1123, any letter, number, or hyphen + return [regexp {^[A-Za-z0-9-]+$} $str] +} + + +#****f* editor.tcl/focusAndFlash +# NAME +# focusAndFlash -- focus and flash +# SYNOPSIS +# focusAndFlash $W $count +# FUNCTION +# This procedure sets the focus on the bad entry field +# and on this filed it provides an effect of flashing +# for approximately 1 second. +# INPUTS +# * W -- textbox field that caused the bed entry +# * count -- the parameter that causes flashes. +# It can be left blank. +#**** +proc focusAndFlash {W {count 9}} { + global badentry + + set fg black + set bg white + + if { $badentry == -1 } { + return + } else { + set badentry 1 + } + + focus -force $W + if {$count<1} { + $W configure -foreground $fg -background $bg + set badentry 0 + } else { + if {$count%2} { + $W configure -foreground $bg -background $fg + } else { + $W configure -foreground $fg -background $bg + } + after 200 [list focusAndFlash $W [expr {$count - 1}]] + } +} + + +#****f* editor.tcl/popupConfigDialog +# NAME +# popupConfigDialog -- popup Configuration Dialog Box +# SYNOPSIS +# popupConfigDialog $c +# FUNCTION +# Dynamically creates a popup dialog box for configuring +# links or nodes in IMUNES. +# INPUTS +# * c -- canvas id +#**** +proc popupConfigDialog { c } { + global activetool router_model link_color oper_mode + global badentry curcanvas + global node_location systype + global plugin_img_del + set type "" + + set wi .popup + if { [winfo exists $wi ] } { + return + } + catch {destroy $wi} + toplevel $wi + + wm transient $wi . + wm resizable $wi 1 1 + + set object_type "" + set tk_type [lindex [$c gettags current] 0] + set target [lindex [$c gettags current] 1] + if { [lsearch {node nodelabel interface} $tk_type] > -1 } { + set object_type node + } + if { [lsearch {link linklabel} $tk_type] > -1 } { + set object_type link + } + if { [lsearch {oval} $tk_type] > -1 } { + set object_type oval + } + if { [lsearch {rectangle} $tk_type] > -1 } { + set object_type rectangle + } + if { [lsearch {text} $tk_type] > -1 } { + set object_type text + } + if { "$object_type" == ""} { + destroy $wi + return + } + if { $object_type == "link" } { + set n0 [lindex [linkPeers $target] 0] + set n1 [lindex [linkPeers $target] 1] + # Boeing: added tunnel check + #if { [nodeType $n0] == "rj45" || [nodeType $n1] == "rj45" || \ + # [nodeType $n0] == "tunnel" || [nodeType $n1] == "tunnel" } { + # destroy $wi + # return + #} + } + $c dtag node selected + $c delete -withtags selectmark + + switch -exact -- $object_type { + node { + set type [nodeType $target] + if { $type == "pseudo" } { + # + # Hyperlink to another canvas + # + destroy $wi + set curcanvas [getNodeCanvas [getNodeMirror $target]] + switchCanvas none + return + } + set model [getNodeModel $target] + set router_model $model + wm title $wi "$type configuration" + ttk::frame $wi.ftop -borderwidth 4 + ttk::entry $wi.ftop.name -width 16 \ + -validate focus -invalidcommand "focusAndFlash %W" + if { $type == "rj45" } { + ttk::label $wi.ftop.name_label -text "Physical interface:" + } elseif { $type == "tunnel" } { + ttk::label $wi.ftop.name_label -text "IP address of tunnel peer:" + } else { + ttk::label $wi.ftop.name_label -text "Node name:" + $wi.ftop.name configure -validatecommand {checkHostname %P} + } + $wi.ftop.name insert 0 [getNodeName $target] + set img [getNodeImage $target] + ttk::button $wi.ftop.img -image $img -command "popupCustomImage $target" + + if { $type == "rj45" } { + rj45ifclist $wi $target 0 + } + # execution server + global exec_servers node_location + set node_location [getNodeLocation $target] + set servers [lsort -dictionary [array names exec_servers]] + set servers "(none) $servers" + if { $node_location == "" } { set node_location "(none)" } + eval tk_optionMenu $wi.ftop.menu node_location $servers + pack $wi.ftop.img $wi.ftop.menu $wi.ftop.name $wi.ftop.name_label \ + -side right -padx 4 -pady 4 + # end Boeing + pack $wi.ftop -side top + if { $type == "router" || $type == "OVS"} { + + ttk::frame $wi.model -borderwidth 4 + ttk::label $wi.model.label -text "Type:" + set runstate "disabled" + if { $oper_mode == "edit" } { + eval tk_optionMenu $wi.model.menu router_model \ + [getNodeTypeNames] + set runstate "normal" + } else { + tk_optionMenu $wi.model.menu router_model $model + } + # would be nice to update the image upon selection; binding to + # will not work + #tkwait variable router_model "customImageApply $wi $target" + set sock [lindex [getEmulPlugin $target] 2] + ttk::button $wi.model.services -text "Services..." -state $runstate \ + -command \ + "sendConfRequestMessage $sock $target services 0x1 -1 \"\"" + pack $wi.model.services $wi.model.menu $wi.model.label \ + -side right -padx 0 -pady 0 + pack $wi.model -side top + } + + if { $type == "wlan" } { + wlanConfigDialogHelper $wi $target 0 + } elseif { $type == "tunnel" } { + # + # tunnel controls + # + ttk::frame $wi.con2 + global conntap + set conntap [netconfFetchSection $target "tunnel-tap"] + if { $conntap == "" } { set conntap off } + # TODO: clean this up + ttk::radiobutton $wi.con2.dotap0 \ + -variable conntap -value off \ + -text "tunnel to another CORE emulation" + ttk::frame $wi.con2.key + ttk::label $wi.con2.key.lab -text "GRE key:" + ttk::entry $wi.con2.key.key -width 6 + ttk::radiobutton $wi.con2.dotap1 -state disabled \ + -variable conntap -value on \ + -text "tunnel to the virtual TAP interface of another system" + pack $wi.con2.key.lab $wi.con2.key.key -side left + pack $wi.con2.dotap0 -side top -anchor w + pack $wi.con2.key -side top + pack $wi.con2.dotap1 -side top -anchor w + pack $wi.con2 -side top + set key [netconfFetchSection $target "tunnel-key"] + if { $key == "" } { set key 1 } + $wi.con2.key.key insert 0 $key + + # TODO: clean this up + ttk::frame $wi.conn + ttk::label $wi.conn.label -text "Transport type:" + tk_optionMenu $wi.conn.conntype conntype "UDP" "TCP" + $wi.conn.conntype configure -state disabled + pack $wi.conn.label $wi.conn.conntype -side left -anchor w + pack $wi.conn -side top + global conntype + set conntype [netconfFetchSection $target "tunnel-type"] + if { $conntype == "" } { set conntype "UDP" } + + + # TODO: clean this up + ttk::frame $wi.linfo + ttk::label $wi.linfo.label -text "Local hook:" + ttk::entry $wi.linfo.local -state disabled + set localhook [netconfFetchSection $target "local-hook"] + if { $localhook == "" || $localhook == "(none)" } { + # automatically generate local hook name + set ifc [lindex [ifcList $target] 0] + if { $ifc != "" } { + set hname [info hostname] + set peer [peerByIfc $target $ifc] + set localhook "$hname$peer" + } else { + set localhook "(none)" + } + } + $wi.linfo.local insert 0 $localhook + pack $wi.linfo.label $wi.linfo.local -side left -anchor w + pack $wi.linfo -side top + + ttk::frame $wi.pinfo + ttk::label $wi.pinfo.label -text "Peer hook:" + ttk::entry $wi.pinfo.peer -state disabled + $wi.pinfo.peer insert 0 \ + [netconfFetchSection $target "peer-hook"] + pack $wi.pinfo.label $wi.pinfo.peer -side left -anchor w + pack $wi.pinfo -side top + } + + # interface list + if { [[typemodel $target].layer] == "NETWORK" } { + # canvas used for scrolling frames for each interface + ttk::frame $wi.ifaces + set height [expr {100 * [llength [ifcList $target]]}] + if { $height > 300 } { set height 300 } + canvas $wi.ifaces.c -height $height -highlightthickness 0 \ + -yscrollcommand "$wi.ifaces.scroll set" + scrollbar $wi.ifaces.scroll -command "$wi.ifaces.c yview" + pack $wi.ifaces.c -side left -fill both -expand 1 + pack $wi.ifaces.scroll -side right -fill y + pack $wi.ifaces -side top -fill both -expand 1 + set y 0 + + foreach ifc [lsort -ascii [ifcList $target]] { + set fr $wi.ifaces.c.if$ifc + ttk::labelframe $fr -text "Interface $ifc" + $wi.ifaces.c create window 4 $y -window $fr -anchor nw + incr y 100 + + set peer [peerByIfc $target $ifc] + if { [isEmane $peer] } { + ttk::frame $fr.opts + set caps [getCapabilities $peer "mobmodel"] + set cap [lindex $caps 0] + set cmd "sendConfRequestMessage -1 $target $cap 0x1 -1 \"\"" + ttk::button $fr.opts.cfg -command $cmd \ + -text "$cap options..." + pack $fr.opts.cfg -side left -padx 4 + pack $fr.opts -side top -anchor w + incr y 28 + } + + ttk::frame $fr.cfg + # + # MAC address + # + ttk::frame $fr.cfg.mac + ttk::label $fr.cfg.mac.addrl -text "MAC address" \ + -anchor w + set macaddr [getIfcMacaddr $target $ifc] + global if${ifc}_auto_mac + if { $macaddr == "" } { + set if${ifc}_auto_mac 1 + set state disabled + } else { + set if${ifc}_auto_mac 0 + set state normal + } + ttk::checkbutton $fr.cfg.mac.auto -text "auto-assign" \ + -variable if${ifc}_auto_mac \ + -command "macEntryHelper $wi $ifc" + ttk::entry $fr.cfg.mac.addrv -width 15 \ + -state $state + $fr.cfg.mac.addrv insert 0 $macaddr + pack $fr.cfg.mac.addrl $fr.cfg.mac.auto \ + $fr.cfg.mac.addrv -side left -padx 4 + pack $fr.cfg.mac -side top -anchor w + + # + # IPv4 address + # + ttk::frame $fr.cfg.ipv4 + ttk::label $fr.cfg.ipv4.addrl -text "IPv4 address" \ + -anchor w + ttk::entry $fr.cfg.ipv4.addrv -width 30 \ + -validate focus -invalidcommand "focusAndFlash %W" + $fr.cfg.ipv4.addrv insert 0 \ + [getIfcIPv4addr $target $ifc] + $fr.cfg.ipv4.addrv configure \ + -validatecommand {checkIPv4Net %P} + ttk::button $fr.cfg.ipv4.clear -image $plugin_img_del \ + -command "$fr.cfg.ipv4.addrv delete 0 end" + pack $fr.cfg.ipv4.addrl $fr.cfg.ipv4.addrv \ + $fr.cfg.ipv4.clear -side left + pack $fr.cfg.ipv4 -side top -anchor w -padx 4 + + # + # IPv6 address + # + ttk::frame $fr.cfg.ipv6 + ttk::label $fr.cfg.ipv6.addrl -text "IPv6 address" \ + -anchor w + ttk::entry $fr.cfg.ipv6.addrv -width 30 \ + -validate focus -invalidcommand "focusAndFlash %W" + $fr.cfg.ipv6.addrv insert 0 \ + [getIfcIPv6addr $target $ifc] + $fr.cfg.ipv6.addrv configure -validatecommand {checkIPv6Net %P} + ttk::button $fr.cfg.ipv6.clear -image $plugin_img_del \ + -command "$fr.cfg.ipv6.addrv delete 0 end" + pack $fr.cfg.ipv6.addrl $fr.cfg.ipv6.addrv \ + $fr.cfg.ipv6.clear -side left + pack $fr.cfg.ipv6 -side top -anchor w -padx 4 + pack $fr.cfg -side left + bind $fr.cfg <4> "$wi.ifaces.c yview scroll -1 units" + bind $fr.cfg <5> "$wi.ifaces.c yview scroll 1 units" + } ;# end foreach ifc + $wi.ifaces.c configure -scrollregion "0 0 250 $y" + # mouse wheel bindings for scrolling + foreach ctl [list $wi.ifaces.c $wi.ifaces.scroll] { + bind $ctl <4> "$wi.ifaces.c yview scroll -1 units" + bind $ctl <5> "$wi.ifaces.c yview scroll 1 units" + bind $ctl "$wi.ifaces.c yview scroll -1 units" + bind $ctl "$wi.ifaces.c yview scroll 1 units" + } + } + } + oval { + destroy $wi + annotationConfig $c $target + return + } + rectangle { + destroy $wi + annotationConfig $c $target + return + } + text { + destroy $wi + annotationConfig $c $target + return + } + link { + wm title $wi "link configuration" + ttk::frame $wi.ftop -borderwidth 6 + set nam0 [getNodeName $n0] + set nam1 [getNodeName $n1] + ttk::label $wi.ftop.name_label -justify left -text \ + "Link from $nam0 to $nam1" + pack $wi.ftop.name_label -side right + pack $wi.ftop -side top + + set spinbox [getspinbox] + global g_link_config_uni_state + set g_link_config_uni_state "bid" + + ttk::frame $wi.preset -borderwidth 4 + global link_preset_val + set link_preset_val unlimited + set linkpreMenu [tk_optionMenu $wi.preset.linkpre link_preset_val a] + # unidirectional links not always supported + if { [isUniSupported $n0 $n1] } { + set unistate normal + } else { + set unistate disabled + } + ttk::button $wi.preset.uni -text " >> " -state $unistate \ + -command "linkConfigUni $wi" + pack $wi.preset.uni $wi.preset.linkpre -side right + linkPresets $wi $linkpreMenu init + pack $wi.preset -side top -anchor e + + ttk::frame $wi.unilabel -borderwidth 4 + ttk::label $wi.unilabel.updown -text "Symmetric link effects:" + pack $wi.unilabel.updown -side left -anchor w + pack $wi.unilabel -side top -anchor w + + ttk::frame $wi.bandwidth -borderwidth 4 + ttk::label $wi.bandwidth.label -anchor e -text "Bandwidth (bps):" + $spinbox $wi.bandwidth.value -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.bandwidth.value insert 0 [getLinkBandwidth $target] + $wi.bandwidth.value configure \ + -validatecommand {checkIntRange %P 0 1000000000} \ + -from 0 -to 1000000000 -increment 1000000 + pack $wi.bandwidth.value $wi.bandwidth.label -side right + pack $wi.bandwidth -side top -anchor e + + ttk::frame $wi.delay -borderwidth 4 + ttk::label $wi.delay.label -anchor e -text "Delay (us):" + $spinbox $wi.delay.value -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.delay.value insert 0 [getLinkDelay $target] + # 274 seconds is maximum netem delay for Linux 3.2.0-60-generic kernel + $wi.delay.value configure \ + -validatecommand {checkIntRange %P 0 274000000} \ + -from 0 -to 10000000 -increment 5 + pack $wi.delay.value $wi.delay.label -side right + pack $wi.delay -side top -anchor e + + ttk::frame $wi.jitter -borderwidth 4 + ttk::label $wi.jitter.label -anchor e -text "Jitter (us):" + $spinbox $wi.jitter.value -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.jitter.value insert 0 [getLinkJitter $target] + $wi.jitter.value configure \ + -validatecommand {checkIntRange %P 0 10000000} \ + -from 0 -to 10000000 -increment 5 + pack $wi.jitter.value $wi.jitter.label -side right + pack $wi.jitter -side top -anchor e + + ttk::frame $wi.ber -borderwidth 4 + if { [lindex $systype 0] == "Linux" } { + set bertext "Loss (%):" + set berinc 0.1 + set bermax 100.0 + } else { ;# netgraph uses BER + set bertext "BER (1/N):" + set berinc 1000 + set bermax 10000000000000 + } + ttk::label $wi.ber.label -anchor e -text $bertext + $spinbox $wi.ber.value -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.ber.value insert 0 [getLinkBER $target] + $wi.ber.value configure \ + -validatecommand "checkFloatRange %P 0.0 $bermax" \ + -from 0.0 -to $bermax -increment $berinc + pack $wi.ber.value $wi.ber.label -side right + pack $wi.ber -side top -anchor e + + ttk::frame $wi.dup -borderwidth 4 + ttk::label $wi.dup.label -anchor e -text "Duplicate (%):" + $spinbox $wi.dup.value -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.dup.value insert 0 [getLinkDup $target] + $wi.dup.value configure \ + -validatecommand {checkFloatRange %P 0 50} \ + -from 0 -to 50 -increment 1 + pack $wi.dup.value $wi.dup.label -side right + pack $wi.dup -side top -anchor e + +# Boeing: jitter +# frame $wi.jitter -borderwidth 4 +# label $wi.jitter.label -anchor e -text "Jitter (us):" +# spinbox $wi.jitter.value -bg white -justify right -width 10 \ +# -validate focus -invalidcommand "focusAndFlash %W" +# $wi.jitter.value insert 0 [getLinkJitter $target] +# $wi.jitter.value configure \ +# -validatecommand {checkIntRange %P 0 10000000} \ +# -from 0 -to 10000000 -increment 5 +# pack $wi.jitter.value $wi.jitter.label -side right +# pack $wi.jitter -side top -anchor e +# end Boeing + + ttk::frame $wi.color -borderwidth 4 + ttk::label $wi.color.label -anchor e -text "Color:" + set link_color [getLinkColor $target] + tk_optionMenu $wi.color.value link_color \ + Red Green Blue Yellow Magenta Cyan Black + $wi.color.value configure -width 8 + pack $wi.color.value $wi.color.label -side right + pack $wi.color -side top -anchor e + + ttk::frame $wi.width -borderwidth 4 + ttk::label $wi.width.label -anchor e -text "Width:" + $spinbox $wi.width.value -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.width.value insert 0 [getLinkWidth $target] + $wi.width.value configure \ + -validatecommand {checkIntRange %P 1 8} \ + -from 1 -to 8 -increment 1 + pack $wi.width.value $wi.width.label -side right + pack $wi.width -side top -anchor e + + # auto-expand upstream if values exist + set bw [getLinkBandwidth $target up] + set dl [getLinkDelay $target up] + set jt [getLinkJitter $target up] + set ber [getLinkBER $target up] + set dup [getLinkDup $target up] + if { $bw > 0 || $dl > 0 || $jt > 0 || $ber > 0 || $dup > 0 } { + linkConfigUni $wi + $wi.bandwidth.value2 delete 0 end + $wi.bandwidth.value2 insert 0 $bw + $wi.delay.value2 delete 0 end + $wi.delay.value2 insert 0 $dl + $wi.jitter.value2 delete 0 end + $wi.jitter.value2 insert 0 $jt + $wi.ber.value2 delete 0 end + $wi.ber.value2 insert 0 $ber + $wi.dup.value2 delete 0 end + $wi.dup.value2 insert 0 $dup + } + } + } ;# end switch + + ttk::frame $wi.butt -borderwidth 6 + # NOTE: plugins.tcl:popupCapabilityConfig may read this command option + ttk::button $wi.butt.apply -text "Apply" -command \ + "popupConfigApply $wi $object_type $target 0" + focus $wi.butt.apply + # Boeing: remove range circles upon cancel + if {$type == "wlan"} { + set cancelcmd "set badentry -1 ; destroy $wi;" + set cancelcmd "$cancelcmd updateRangeCircles $target 0" + } else { + set cancelcmd "set badentry -1 ; destroy $wi" + } + ttk::button $wi.butt.cancel -text "Cancel" -command $cancelcmd + #end Boeing + pack $wi.butt.cancel $wi.butt.apply -side right + pack $wi.butt -side bottom + bind $wi $cancelcmd +# bind $wi "popupConfigApply $wi $object_type $target 0" +} + + +proc linkConfigUni { wi } { + global g_link_config_uni_state + + set capt [lindex [$wi.preset.uni configure -text] 4] + + if { $capt == " >> " } { + set g_link_config_uni_state "uni" + $wi.preset.uni configure -text " << " + set txt "Asymmetric effects: downstream / upstream" + $wi.unilabel.updown configure -text $txt + + set spinbox [getspinbox] + if { ![winfo exists $wi.bandwidth.value2] } { + $spinbox $wi.bandwidth.value2 -justify right \ + -width 10 -validate focus -invalidcommand "focusAndFlash %W" + $wi.bandwidth.value2 configure \ + -validatecommand {checkIntRange %P 0 1000000000} \ + -from 0 -to 1000000000 -increment 1000000 + } + $wi.bandwidth.value2 delete 0 end + $wi.bandwidth.value2 insert 0 [$wi.bandwidth.value get] + pack $wi.bandwidth.value2 -side right + pack $wi.bandwidth.value2 -before $wi.bandwidth.value + + if { ![winfo exists $wi.delay.value2] } { + $spinbox $wi.delay.value2 -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.delay.value2 configure \ + -validatecommand {checkIntRange %P 0 10000000} \ + -from 0 -to 10000000 -increment 5 + } + $wi.delay.value2 delete 0 end + $wi.delay.value2 insert 0 [$wi.delay.value get] + pack $wi.delay.value2 -side right + pack $wi.delay.value2 -before $wi.delay.value + + if { ![winfo exists $wi.jitter.value2] } { + $spinbox $wi.jitter.value2 -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.jitter.value2 configure \ + -validatecommand {checkIntRange %P 0 10000000} \ + -from 0 -to 10000000 -increment 5 + } + $wi.jitter.value2 delete 0 end + $wi.jitter.value2 insert 0 [$wi.jitter.value get] + pack $wi.jitter.value2 -side right + pack $wi.jitter.value2 -before $wi.jitter.value + + if { ![winfo exists $wi.ber.value2] } { + $spinbox $wi.ber.value2 -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.ber.value2 configure \ + -validatecommand "checkFloatRange %P 0.0 100.0" \ + -from 0.0 -to 100.0 -increment 0.1 + } + $wi.ber.value2 delete 0 end + $wi.ber.value2 insert 0 [$wi.ber.value get] + pack $wi.ber.value2 -side right + pack $wi.ber.value2 -before $wi.ber.value + + if { ![winfo exists $wi.dup.value2] } { + $spinbox $wi.dup.value2 -justify right -width 10 \ + -validate focus -invalidcommand "focusAndFlash %W" + $wi.dup.value2 configure \ + -validatecommand {checkFloatRange %P 0 50} \ + -from 0 -to 50 -increment 1 + } + $wi.dup.value2 delete 0 end + $wi.dup.value2 insert 0 [$wi.dup.value get] + pack $wi.dup.value2 -side right + pack $wi.dup.value2 -before $wi.dup.value + } else { + set g_link_config_uni_state "bid" + $wi.preset.uni configure -text " >> " + $wi.unilabel.updown configure -text "Symmetric link effects:" + pack forget $wi.bandwidth.value2 + pack forget $wi.delay.value2 + pack forget $wi.jitter.value2 + pack forget $wi.ber.value2 + pack forget $wi.dup.value2 + } +} + +# unidirectional links are not always supported +proc isUniSupported { n1 n2 } { + set blacklist [list "hub" "lanswitch"] + set type1 [nodeType $n1] + set type2 [nodeType $n2] + # not yet supported for GRE tap device + if { $type1 == "tunnel" || $type2 == "tunnel" } { + return false + } + # unidirectional links are supported between two switches/hubs + if { [lsearch $blacklist $type1] != -1 && \ + [lsearch $blacklist $type2] != -1 } { + return true + } + # unidirectional links not supported between hub/switch and something else + if { [lsearch $blacklist $type1] != -1 || \ + [lsearch $blacklist $type2] != -1 } { + return false + } + # unidirectional links are supported between routers, rj45s, etc. + # WLANs not included here because they have no link dialog + return true +} + +# toggle the state of the mac address entry, and insert MAC address template +proc macEntryHelper { wi ifc } { + set fr $wi.ifaces.c.if$ifc + set ctl $fr.cfg.mac.addrv + set s normal + if { [$ctl cget -state] == $s } { set s disabled } + $ctl configure -state $s + + if { [$ctl get] == "" } { $ctl insert 0 "00:00:00:00:00:00" } +} + + +#****f* editor.tcl/popupConfigApply +# NAME +# popupConfigApply -- popup configuration apply +# SYNOPSIS +# popupConfigApply $w $object_type $target $phase +# FUNCTION +# This procedure is called when the button apply is pressed in +# popup configuration dialog box. It reads different +# configuration parameters depending on the object_type. +# INPUTS +# * w -- widget +# * object_type -- describes the object type that is currently +# configured. It can be either link or node. +# * target -- node id of the configured node or link id of the +# configured link +# * phase -- This procedure is invoked in two diffenet phases +# to enable validation of the entry that was the last made. +# When calling this function always use the phase parameter +# set to 0. +#**** +proc popupConfigApply { wi object_type target phase } { + global changed oper_mode router_model link_color badentry + global customEnabled ipsecEnabled + global eid + + $wi config -cursor watch + update + if { $phase == 0 } { + set badentry 0 + focus . + after 100 "popupConfigApply $wi $object_type $target 1" + return + } elseif { $badentry } { + $wi config -cursor left_ptr + return + } + switch -exact -- $object_type { + # + # Node + # + node { + set type [nodeType $target] + set model [getNodeModel $target] + set name [string trim [$wi.ftop.name get]] + set changed_to_remote 0 + global node_location + if { $node_location != [getNodeLocation $target] } { + if { $node_location == "(none)" } { set node_location "" } + setNodeLocation $target $node_location + set changed 1 + } + set node_location "" + if { $name != [getNodeName $target] } { + setNodeName $target $name + set changed 1 + } + if { $oper_mode == "edit" && $type == "router" && \ + $router_model != $model } { + setNodeModel $target $router_model + set changed 1 + if { $router_model == "remote" } { set changed_to_remote 1 };#Boeing + } + +# Boeing - added wlan, remote, tunnel, ktunnel items + if { $type == "wlan" } { + wlanConfigDialogHelper $wi $target 1 + } elseif { $type == "tunnel" } { + # + # apply tunnel items + # + set ipaddr "$name/24" ;# tunnel name == IP address of peer + set oldipaddr [getIfcIPv4addr $target e0] + if { $ipaddr != $oldipaddr } { + setIfcIPv4addr $target e0 $ipaddr + } + global conntype conntap + set oldconntype [netconfFetchSection $target "tunnel-type"] + if { $oldconntype != $conntype } { + netconfInsertSection $target [list "tunnel-type" $conntype] + } + set oldconntap [netconfFetchSection $target "tunnel-tap"] + if { $oldconntap != $conntap } { + netconfInsertSection $target [list "tunnel-tap" $conntap] + } + set oldkey [netconfFetchSection $target "tunnel-key"] + set key [$wi.con2.key.key get] + if { $oldkey != $key } { + netconfInsertSection $target [list "tunnel-key" $key] + } + + set oldlocal [netconfFetchSection $target "local-hook"] + set local [$wi.linfo.local get] + if { $oldlocal != $local } { + netconfInsertSection $target [list "local-hook" $local] + } + + set oldpeer [netconfFetchSection $target "peer-hook"] + set peer [$wi.pinfo.peer get] + if { $oldpeer != $peer } { + netconfInsertSection $target [list "peer-hook" $peer] + } + } elseif { $type == "ktunnel" } { + # + # apply ktunnel items + # + set oldlocal [netconfFetchSection $target "local-hook"] + set local [$wi.linfo.local get] + if { $oldlocal != $local } { + netconfInsertSection $target [list "local-hook" $local] + } +# Boeing changing to interface name for RJ45 +# } elseif { $type == "rj45" } { +# # +# # apply rj45 items +# # +# set ifcName [string trim [$wi.interface.name get]] +# puts "$ifcName\n" +# + } elseif { $type == "router" && [getNodeModel $target] == "remote" } { + if { $changed_to_remote == 0 } { + set i 1 + set remoteIP [string trim [$wi.remoteinfo.ip.text get $i.0 $i.end]] + if { $remoteIP != [router.remote.getRemoteIP $target] } { + router.remote.setRemoteIP $target $remoteIP + set changed 1 + } + set ifc [string trim [$wi.remoteinfo.ifc.text get $i.0 $i.end]] + if { $ifc != [router.remote.getCInterface $target] } { + router.remote.setCInterface $target $ifc + set changed 1 + } + set startcmd [string trim [$wi.remotecommands.start.text get $i.0 $i.end]] + if { $startcmd != [router.remote.getStartCmd $target] } { + router.remote.setStartCmd $target $startcmd + set changed 1 + } + set stopcmd [string trim [$wi.remotecommands.stop.text get $i.0 $i.end]] + if { $stopcmd != [router.remote.getStopCmd $target] } { + router.remote.setStopCmd $target $stopcmd + set changed 1 + } + } + } + + if {[[typemodel $target].layer] == "NETWORK"} { + foreach ifc [ifcList $target] { + set fr $wi.ifaces.c.if$ifc + set macaddr [$fr.cfg.mac.addrv get] + global if${ifc}_auto_mac + if { [set if${ifc}_auto_mac] == 1 } { set macaddr "" } + set oldmacaddr [getIfcMacaddr $target $ifc] + if { $macaddr != $oldmacaddr } { + setIfcMacaddr $target $ifc $macaddr + set changed 1 + } + set ipaddr [$fr.cfg.ipv4.addrv get] + set oldipaddr [getIfcIPv4addr $target $ifc] + if { $ipaddr != $oldipaddr } { + setIfcIPv4addr $target $ifc $ipaddr + set changed 1 + } + set ipaddr [$fr.cfg.ipv6.addrv get] + set oldipaddr [getIfcIPv6addr $target $ifc] + if { $ipaddr != $oldipaddr } { + setIfcIPv6addr $target $ifc $ipaddr + set changed 1 + } + } + } + } + + link { + global g_link_config_uni_state + set mirror [getLinkMirror $target] + + if { [setIfChanged $target $mirror $wi "bandwidth" "LinkBandwidth"] } { + set changed 1 + } + if { [setIfChanged $target $mirror $wi "delay" "LinkDelay"] } { + set changed 1 + } + if { [setIfChanged $target $mirror $wi "ber" "LinkBER"] } { + set changed 1 + } + if { [setIfChanged $target $mirror $wi "dup" "LinkDup"] } { + set changed 1 + } + if { [setIfChanged $target $mirror $wi "jitter" "LinkJitter"] } { + set changed 1 + } + + if { $link_color != [getLinkColor $target] } { + setLinkColor $target $link_color + if { $mirror != "" } { + setLinkColor $mirror $link_color + } + set changed 1 + } + set width [$wi.width.value get] + if { $width != [getLinkWidth $target] } { + setLinkWidth $target $width + if { $mirror != "" } { + setLinkWidth $mirror $width + } + set changed 1 + } + if { $changed == 1 && $oper_mode == "exec" } { + execSetLinkParams $eid $target + } + } + + } + + popdownConfig $wi +} + +# helper for Link Config dialog +# ctl must exist as $wi.$ctl.value{2}, and {get,set}$procname must be valid +# returns true when value has changed, false otherwise +proc setIfChanged { target mirror wi ctl procname } { + global g_link_config_uni_state + + set val [$wi.$ctl.value get] + if { $g_link_config_uni_state == "uni" } { + set val [list $val [$wi.$ctl.value2 get]] + } + set oldval [get$procname $target] + set oldval2 [get$procname $target "up"] + if { $oldval2 != "" } { + set oldval [list $oldval $oldval2] + } + if { $val != $oldval } { + set$procname $target $val + if { $mirror != "" } { + set$procname $mirror $val + } + return true + } + return false +} + +#****f* editor.tcl/printCanvas +# NAME +# printCanvas -- print canvas +# SYNOPSIS +# printCanvas $w +# FUNCTION +# This procedure is called when the print button in +# print dialog box is pressed. +# INPUTS +# * w -- print dialog widget +#**** +proc printCanvas { w } { + global sizex sizey + + set prncmd [$w.e1 get] + destroy $w + set p [open "|$prncmd" WRONLY] + puts $p [.c postscript -height $sizey -width $sizex -x 0 -y 0 -rotate yes -pageheight 297m -pagewidth 210m] + close $p +} + + +#****f* editor.tcl/deleteSelection +# NAME +# deleteSelection -- delete selection +# SYNOPSIS +# deleteSelection +# FUNCTION +# By calling this procedure all the selected nodes in imunes will +# be deleted. +#**** +proc deleteSelection { } { + global changed + global background + global viewid + catch {unset viewid} + .c config -cursor watch; update + + foreach lnode [selectedNodes] { + if { $lnode != "" } { + removeGUINode $lnode + } + set changed 1 + } + + raiseAll .c + updateUndoLog + .c config -cursor left_ptr + .bottom.textbox config -text "" +} + + +proc assignSelection { server } { + global changed + .c config -cursor watch; update + + foreach node [selectedNodes] { + if { $node != "" } { + setNodeLocation $node $server + } + set changed 1 + } + + redrawAll + updateUndoLog + .c config -cursor left_ptr + .bottom.textbox config -text "" +} + + +proc align2grid {} { + global sizex sizey grid zoom changed + + set node_objects [.c find withtag node] + if { [llength $node_objects] == 0 } { + return + } + + set step [expr {$grid * 4}] + + for { set x $step } { $x <= [expr {$sizex - $step}] } { incr x $step } { + for { set y $step } { $y <= [expr {$sizey - $step}] } { incr y $step } { + if { [llength $node_objects] == 0 } { + set changed 1 + updateUndoLog + redrawAll + return + } + set node [lindex [.c gettags [lindex $node_objects 0]] 1] + set node_objects [lreplace $node_objects 0 0] + setNodeCoords $node "$x $y" + lassign [getDefaultLabelOffsets [nodeType $node]] dx dy + setNodeLabelCoords $node "[expr {$x + $dx}] [expr {$y + $dy}]" + } + } +} + +#****f* editor.tcl/rearrange +# NAME +# rearrange +# SYNOPSIS +# rearrange $mode +# FUNCTION +# This procedure rearranges the position of nodes in imunes. +# It can be used to rearrange all the nodes or only the selected +# nodes. +# INPUTS +# * mode -- when set to "selected" only the selected nodes will be +# rearranged. +#**** +proc rearrange { mode } { + global link_list autorearrange_enabled sizex sizey curcanvas zoom activetool + + set activetool select + + if { $autorearrange_enabled } { + rearrange_off + return + } + set autorearrange_enabled 1 + .bottom.mbuf config -text "autorearrange" + if { $mode == "selected" } { + .menubar.tools entryconfigure "Auto rearrange all" -state disabled + .menubar.tools entryconfigure "Auto rearrange all" -indicatoron off + .menubar.tools entryconfigure "Auto rearrange selected" -indicatoron on + set tagmatch "node && selected" + } else { + .menubar.tools entryconfigure "Auto rearrange all" -indicatoron on + .menubar.tools entryconfigure "Auto rearrange selected" -state disabled + .menubar.tools entryconfigure "Auto rearrange selected" -indicatoron off + set tagmatch "node" + } + set otime [clock clicks -milliseconds] + while { $autorearrange_enabled } { + set ntime [clock clicks -milliseconds] + if { $otime == $ntime } { + set dt 0.001 + } else { + set dt [expr {($ntime - $otime) * 0.001}] + if { $dt > 0.2 } { + set dt 0.2 + } + set otime $ntime + } + + set objects [.c find withtag $tagmatch] + set peer_objects [.c find withtag node] + foreach obj $peer_objects { + set node [lindex [.c gettags $obj] 1] + set coords [.c coords $obj] + set x [expr {[lindex $coords 0] / $zoom}] + set y [expr {[lindex $coords 1] / $zoom}] + set x_t($node) $x + set y_t($node) $y + + if { $x > 0 } { + set fx [expr {1000 / ($x * $x + 100)}] + } else { + set fx 10 + } + set dx [expr {$sizex - $x}] + if { $dx > 0 } { + set fx [expr {$fx - 1000 / ($dx * $dx + 100)}] + } else { + set fx [expr {$fx - 10}] + } + + if { $y > 0 } { + set fy [expr {1000 / ($y * $y + 100)}] + } else { + set fy 10 + } + set dy [expr {$sizey - $y}] + if { $dy > 0 } { + set fy [expr {$fy - 1000 / ($dy * $dy + 100)}] + } else { + set fy [expr {$fy - 10}] + } + set fx_t($node) $fx + set fy_t($node) $fy + } + + foreach obj $objects { + set node [lindex [.c gettags $obj] 1] + set i [lsearch -exact $peer_objects $obj] + set peer_objects [lreplace $peer_objects $i $i] + set x $x_t($node) + set y $y_t($node) + foreach other_obj $peer_objects { + set other [lindex [.c gettags $other_obj] 1] + set o_x $x_t($other) + set o_y $y_t($other) + set dx [expr {$x - $o_x}] + set dy [expr {$y - $o_y}] + set d [expr {hypot($dx, $dy)}] + set d2 [expr {$d * $d}] + set p_fx [expr {1000.0 * $dx / ($d2 * $d + 100)}] + set p_fy [expr {1000.0 * $dy / ($d2 * $d + 100)}] + if {[linkByPeers $node $other] != ""} { + set p_fx [expr {$p_fx - $dx * $d2 * .0000000005}] + set p_fy [expr {$p_fy - $dy * $d2 * .0000000005}] + } + set fx_t($node) [expr {$fx_t($node) + $p_fx}] + set fy_t($node) [expr {$fy_t($node) + $p_fy}] + set fx_t($other) [expr {$fx_t($other) - $p_fx}] + set fy_t($other) [expr {$fy_t($other) - $p_fy}] + } + + foreach link $link_list { + set nodes [linkPeers $link] + if { [getNodeCanvas [lindex $nodes 0]] != $curcanvas || + [getNodeCanvas [lindex $nodes 1]] != $curcanvas || + [getLinkMirror $link] != "" } { + continue + } + set peers [linkPeers $link] + set coords0 [getNodeCoords [lindex $peers 0]] + set coords1 [getNodeCoords [lindex $peers 1]] + set o_x \ + [expr {([lindex $coords0 0] + [lindex $coords1 0]) * .5}] + set o_y \ + [expr {([lindex $coords0 1] + [lindex $coords1 1]) * .5}] + set dx [expr {$x - $o_x}] + set dy [expr {$y - $o_y}] + set d [expr {hypot($dx, $dy)}] + set d2 [expr {$d * $d}] + set fx_t($node) \ + [expr {$fx_t($node) + 500.0 * $dx / ($d2 * $d + 100)}] + set fy_t($node) \ + [expr {$fy_t($node) + 500.0 * $dy / ($d2 * $d + 100)}] + } + } + + foreach obj $objects { + set node [lindex [.c gettags $obj] 1] + if { [catch "set v_t($node)" v] } { + set vx 0.0 + set vy 0.0 + } else { + set vx [lindex $v_t($node) 0] + set vy [lindex $v_t($node) 1] + } + set vx [expr {$vx + 1000.0 * $fx_t($node) * $dt}] + set vy [expr {$vy + 1000.0 * $fy_t($node) * $dt}] + set dampk [expr {0.5 + ($vx * $vx + $vy * $vy) * 0.00001}] + set vx [expr {$vx * exp( - $dampk * $dt)}] + set vy [expr {$vy * exp( - $dampk * $dt)}] + set dx [expr {$vx * $dt}] + set dy [expr {$vy * $dt}] + set x [expr {$x_t($node) + $dx}] + set y [expr {$y_t($node) + $dy}] + set v_t($node) "$vx $vy" + + setNodeCoords $node "$x $y" + set e_dx [expr {$dx * $zoom}] + set e_dy [expr {$dy * $zoom}] + .c move $obj $e_dx $e_dy + set img [.c find withtag "selectmark && $node"] + .c move $img $e_dx $e_dy + set img [.c find withtag "nodelabel && $node"] + .c move $img $e_dx $e_dy + set x [expr {[lindex [.c coords $img] 0] / $zoom}] + set y [expr {[lindex [.c coords $img] 1] / $zoom}] + setNodeLabelCoords $node "$x $y" + .c addtag need_redraw withtag "link && $node" + } + foreach link [.c find withtag "link && need_redraw"] { + redrawLink [lindex [.c gettags $link] 1] + } + .c dtag link need_redraw + update + } + + rearrange_off + .bottom.mbuf config -text "" +} + +proc rearrange_off { } { + global autorearrange_enabled + set autorearrange_enabled 0 + .menubar.tools entryconfigure "Auto rearrange all" -state normal + .menubar.tools entryconfigure "Auto rearrange all" -indicatoron off + .menubar.tools entryconfigure "Auto rearrange selected" -state normal + .menubar.tools entryconfigure "Auto rearrange selected" -indicatoron off +} + + +#****f* editor.tcl/switchCanvas +# NAME +# switchCanvas -- switch canvas +# SYNOPSIS +# switchCanvas $direction +# FUNCTION +# This procedure switches the canvas in one of the defined +# directions (previous, next, first and last). +# INPUTS +# * direction -- the direction of switching canvas. Can be: prev -- +# previus, next -- next, first -- first, last -- last. +#**** +proc switchCanvas { direction } { + global canvas_list curcanvas + global sizex sizey + + set i [lsearch $canvas_list $curcanvas] + switch -exact -- $direction { + prev { + incr i -1 + if { $i < 0 } { + set curcanvas [lindex $canvas_list end] + } else { + set curcanvas [lindex $canvas_list $i] + } + } + next { + incr i + if { $i >= [llength $canvas_list] } { + set curcanvas [lindex $canvas_list 0] + } else { + set curcanvas [lindex $canvas_list $i] + } + } + first { + set curcanvas [lindex $canvas_list 0] + } + last { + set curcanvas [lindex $canvas_list end] + } + } + + .hframe.t delete all + set x 0 + foreach canvas $canvas_list { + set text [.hframe.t create text 0 0 \ + -text "[getCanvasName $canvas]" -tags "text $canvas"] + set ox [lindex [.hframe.t bbox $text] 2] + set oy [lindex [.hframe.t bbox $text] 3] + set tab [.hframe.t create polygon $x 0 [expr {$x + 7}] 18 \ + [expr {$x + 2 * $ox + 17}] 18 [expr {$x + 2 * $ox + 24}] 0 $x 0 \ + -fill gray -tags "tab $canvas"] + set line [.hframe.t create line 0 0 $x 0 [expr {$x + 7}] 18 \ + [expr {$x + 2 * $ox + 17}] 18 [expr {$x + 2 * $ox + 24}] 0 999 0 \ + -fill #808080 -width 2 -tags "line $canvas"] + .hframe.t coords $text [expr {$x + $ox + 12}] [expr {$oy + 2}] + .hframe.t raise $text + incr x [expr {2 * $ox + 17}] + } + incr x 7 + .hframe.t raise "$curcanvas" + .hframe.t itemconfigure "tab && $curcanvas" -fill #e0e0e0 + .hframe.t configure -scrollregion "0 0 $x 18" + update + set width [lindex [.hframe.t configure -width] 4] + set lborder [lindex [.hframe.t bbox "tab && $curcanvas"] 0] + set rborder [lindex [.hframe.t bbox "tab && $curcanvas"] 2] + set lmargin [expr {[lindex [.hframe.t xview] 0] * $x - 1}] + set rmargin [expr {[lindex [.hframe.t xview] 1] * $x + 1}] + if { $lborder < $lmargin } { + .hframe.t xview moveto [expr {1.0 * ($lborder - 10) / $x}] + } + if { $rborder > $rmargin } { + .hframe.t xview moveto [expr {1.0 * ($rborder - $width + 10) / $x}] + } + + set sizex [lindex [getCanvasSize $curcanvas] 0] + set sizey [lindex [getCanvasSize $curcanvas] 1] + + redrawAll +} + +proc resizeCanvasPopup {} { + global curcanvas + + set w .canvasSizeScaleDialog + catch {destroy $w} + toplevel $w + + wm transient $w . + wm title $w "Canvas Size and Scale" + + frame $w.buttons + pack $w.buttons -side bottom -fill x -pady 2m + button $w.buttons.print -text "Apply" -command "resizeCanvasApply $w" + button $w.buttons.cancel -text "Cancel" -command "destroy $w" + pack $w.buttons.print $w.buttons.cancel -side left -expand 1 + + set cursize [getCanvasSize $curcanvas] + set x [lindex $cursize 0] + set y [lindex $cursize 1] + set scale [getCanvasScale $curcanvas] + set refpt [getCanvasRefPoint $curcanvas] + set refx [lindex $refpt 0] + set refy [lindex $refpt 1] + set latitude [lindex $refpt 2] + set longitude [lindex $refpt 3] + set altitude [lindex $refpt 4] + + + labelframe $w.size -text "Size" + frame $w.size.pixels + pack $w.size $w.size.pixels -side top -padx 4 -pady 4 -fill x + spinbox $w.size.pixels.x -bg white -width 5 + $w.size.pixels.x insert 0 $x + $w.size.pixels.x configure -from 300 -to 5000 -increment 2 + label $w.size.pixels.label -text "W x" + spinbox $w.size.pixels.y -bg white -width 5 + $w.size.pixels.y insert 0 $y + $w.size.pixels.y configure -from 300 -to 5000 -increment 2 + label $w.size.pixels.label2 -text "H pixels" + pack $w.size.pixels.x $w.size.pixels.label $w.size.pixels.y \ + $w.size.pixels.label2 -side left -pady 2 -padx 2 -fill x + + frame $w.size.meters + pack $w.size.meters -side top -padx 4 -pady 4 -fill x + spinbox $w.size.meters.x -bg white -width 7 + $w.size.meters.x configure -from 300 -to 10000 -increment 100 + label $w.size.meters.label -text "x" + spinbox $w.size.meters.y -bg white -width 7 + $w.size.meters.y configure -from 300 -to 10000 -increment 100 + label $w.size.meters.label2 -text "meters" + pack $w.size.meters.x $w.size.meters.label $w.size.meters.y \ + $w.size.meters.label2 -side left -pady 2 -padx 2 -fill x + + labelframe $w.scale -text "Scale" + frame $w.scale.ppm + pack $w.scale $w.scale.ppm -side top -padx 4 -pady 4 -fill x + label $w.scale.ppm.label -text "100 pixels =" + entry $w.scale.ppm.metersper100 -bg white -width 10 + $w.scale.ppm.metersper100 insert 0 $scale + label $w.scale.ppm.label2 -text "meters" + pack $w.scale.ppm.label $w.scale.ppm.metersper100 \ + $w.scale.ppm.label2 -side left -pady 2 -padx 2 -fill x + + bind $w.size.pixels.x