Compare commits
2 commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b7534f7cc5 | ||
|
|
41a78397e3 |
710 changed files with 76362 additions and 55492 deletions
|
|
@ -11,7 +11,6 @@ insert_final_newline = true
|
|||
[*.py]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
max_line_length = 88
|
||||
|
||||
[*.am]
|
||||
indent_style = tab
|
||||
|
|
|
|||
41
.github/workflows/daemon-checks.yml
vendored
41
.github/workflows/daemon-checks.yml
vendored
|
|
@ -1,41 +0,0 @@
|
|||
name: Daemon Checks
|
||||
|
||||
on: [push]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v1
|
||||
- name: Set up Python 3.9
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: install poetry
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install poetry
|
||||
cd daemon
|
||||
cp core/constants.py.in core/constants.py
|
||||
sed -i 's/required=True/required=False/g' core/emulator/coreemu.py
|
||||
poetry install
|
||||
- name: isort
|
||||
run: |
|
||||
cd daemon
|
||||
poetry run isort -c -df
|
||||
- name: black
|
||||
run: |
|
||||
cd daemon
|
||||
poetry run black --check .
|
||||
- name: flake8
|
||||
run: |
|
||||
cd daemon
|
||||
poetry run flake8
|
||||
- name: grpc
|
||||
run: |
|
||||
cd daemon/proto
|
||||
poetry run python -m grpc_tools.protoc -I . --python_out=.. --grpc_python_out=.. core/api/grpc/*.proto
|
||||
- name: test
|
||||
run: |
|
||||
cd daemon
|
||||
poetry run pytest --mock tests
|
||||
21
.github/workflows/documentation.yml
vendored
21
.github/workflows/documentation.yml
vendored
|
|
@ -1,21 +0,0 @@
|
|||
name: documentation
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.x
|
||||
- uses: actions/cache@v2
|
||||
with:
|
||||
key: ${{ github.ref }}
|
||||
path: .cache
|
||||
- run: pip install mkdocs-material
|
||||
- run: mkdocs gh-deploy --force
|
||||
18
.gitignore
vendored
18
.gitignore
vendored
|
|
@ -8,22 +8,18 @@ Makefile
|
|||
Makefile.in
|
||||
aclocal.m4
|
||||
autom4te.cache
|
||||
/config
|
||||
config
|
||||
config.h
|
||||
config.h.in
|
||||
config.log
|
||||
config.status
|
||||
configure
|
||||
configure~
|
||||
debian
|
||||
stamp-h1
|
||||
|
||||
# python virtual environments
|
||||
venv
|
||||
|
||||
# generated protobuf files
|
||||
*_pb2.py
|
||||
*_pb2_grpc.py
|
||||
daemon/core/grpc/core_pb2.py
|
||||
daemon/core/grpc/core_pb2_grpc.py
|
||||
|
||||
# python build directory
|
||||
dist
|
||||
|
|
@ -43,7 +39,6 @@ coverage.xml
|
|||
|
||||
# python files
|
||||
*.egg-info
|
||||
*.pyc
|
||||
|
||||
# ignore package files
|
||||
*.rpm
|
||||
|
|
@ -59,9 +54,4 @@ coverage.xml
|
|||
# ignore built input files
|
||||
netns/setup.py
|
||||
daemon/setup.py
|
||||
|
||||
# python
|
||||
__pycache__
|
||||
|
||||
# ignore core player files
|
||||
*.core
|
||||
ns3/setup.py
|
||||
|
|
|
|||
977
CHANGELOG.md
977
CHANGELOG.md
|
|
@ -1,977 +0,0 @@
|
|||
## 2023-08-01 CORE 9.0.3
|
||||
|
||||
* Installation
|
||||
* updated various dependencies
|
||||
* Documentation
|
||||
* improved GUI docs to include node interaction and note xhost usage
|
||||
* \#780 - fixed gRPC examples
|
||||
* \#787 - complete documentation revamp to leverage mkdocs material
|
||||
* \#790 - fixed custom emane model example
|
||||
* core-daemon
|
||||
* update type hinting to avoid deprecated imports
|
||||
* updated commands ran within docker based nodes to have proper environment variables
|
||||
* fixed issue improperly setting session options over gRPC
|
||||
* \#668 - add fedora sbin path to frr service
|
||||
* \#774 - fixed pcap configservice
|
||||
* \#805 - fixed radvd configservice template error
|
||||
* core-gui
|
||||
* update type hinting to avoid deprecated imports
|
||||
* fixed issue allowing duplicate named hook scripts
|
||||
* fixed issue joining sessions with RJ45 nodes
|
||||
* utility scripts
|
||||
* fixed issue in core-cleanup for removing devices
|
||||
|
||||
## 2023-03-02 CORE 9.0.2
|
||||
|
||||
* Installation
|
||||
* updated python dependencies, including invoke to resolve python 3.10+ issues
|
||||
* improved example dockerfiles to use less space for built images
|
||||
* Documentation
|
||||
* updated emane install instructions
|
||||
* added Docker related issues to install instructions
|
||||
* core-daemon
|
||||
* fixed issue using invalid device name in sysctl commands
|
||||
* updated PTP nodes to properly disable mac learning for their linux bridge
|
||||
* fixed issue for LXC nodes to properly use a configured image name and write it to XML
|
||||
* \#742 - fixed issue with bad wlan node id being used
|
||||
* \#744 - fixed issue not properly setting broadcast address
|
||||
* core-gui
|
||||
* fixed sample1.xml to remove SSH service
|
||||
* fixed emane demo examples
|
||||
* fixed issue displaying emane configs generally configured for a node
|
||||
|
||||
## 2022-11-28 CORE 9.0.1
|
||||
|
||||
* Installation
|
||||
* updated protobuf and grpcio-tools versions in pyproject.toml to account for bad version mix
|
||||
|
||||
## 2022-11-18 CORE 9.0.0
|
||||
|
||||
* Breaking Changes
|
||||
* removed session nodes file
|
||||
* removed session state file
|
||||
* emane now runs in one process per nem with unique control ports
|
||||
* grpc client has been refactored and updated
|
||||
* removed tcl/legacy gui, imn file support and the tlv api
|
||||
* link configuration is now different, but consistent, for wired links
|
||||
* Installation
|
||||
* added packaging for single file distribution
|
||||
* python3.9 is now the minimum required version
|
||||
* updated Dockerfile examples
|
||||
* updated various python dependencies
|
||||
* virtual environment is now installed to /opt/core/venv
|
||||
* Documentation
|
||||
* updated emane invoke task examples
|
||||
* revamped install documentation
|
||||
* added wireless node notes
|
||||
* core-gui
|
||||
* updated config services to display rendered templated and allow editing
|
||||
* fixed node icon issue when updating preferences
|
||||
* \#89 - throughput widget now works for hubs/switches
|
||||
* \#691 - fixed custom nodes to properly use config services
|
||||
* gRPC API
|
||||
* add linked call to support linking and unlinking interfaces without destroying them
|
||||
* fixed issue during start session clearing out session options
|
||||
* added call to get rendered config service files
|
||||
* removed get_node_links from links from client
|
||||
* nem id and nem port have been added to GetNode and AddLink calls
|
||||
* core-daemon
|
||||
* wired links always create two veth pairs joined by a bridge
|
||||
* node interfaces are now configured within the container to apply to outgoing traffic
|
||||
* session.add_node now uses NodeOptions, allowing for node specific options
|
||||
* fixed issue with xml reading node canvas values
|
||||
* removed Session.add_node_file
|
||||
* fixed get requirements logic
|
||||
* fixed docker/lxd node support terminal commands on remote servers
|
||||
* improved docker node command execution time using nsenter
|
||||
* new wireless node type added to support dynamic loss based on distance
|
||||
* \#513 - add and deleting distributed links during runtime is now supported
|
||||
* \#703 - fixed issue not starting emane event listening service
|
||||
|
||||
## 2022-03-21 CORE 8.2.0
|
||||
|
||||
* core-gui
|
||||
* improved failed starts to trigger runtime to allow node investigation
|
||||
* core-daemon
|
||||
* improved default service loading to use a full import path
|
||||
* updated session instantiation to always set to a runtime state
|
||||
* core-cli
|
||||
* \#672 - fixed xml loading
|
||||
* \#578 - restored json flag and added geo output to session overview
|
||||
* Documentation
|
||||
* updated emane example and documentation
|
||||
* improved table markdown
|
||||
|
||||
## 2022-02-18 CORE 8.1.0
|
||||
|
||||
* Installation
|
||||
* updated dependency versions to account for known vulnerabilities
|
||||
* GUI
|
||||
* fixed issue drawing asymmetric link configurations when joining a session
|
||||
* daemon
|
||||
* fixed issue getting templates and creating files for config services
|
||||
* added by directional support for network to network links
|
||||
* \#647 - fixed issue when creating RJ45 nodes
|
||||
* \#646 - fixed issue when creating files for Docker nodes
|
||||
* \#645 - improved wlan change updates to account for all updates with no delay
|
||||
* services
|
||||
* fixed file generation for OSPFv2 config service
|
||||
|
||||
## 2022-01-12 CORE 8.0.0
|
||||
|
||||
*Breaking Changes
|
||||
* heavily refactored gRPC client, removing some calls, adding others, all using type hinted classes representing their protobuf counterparts
|
||||
* emane adjustments to run each nem in its own process, includes adjustments to configuration, which may cause issues
|
||||
* internal daemon cleanup and refactoring, in a script directly driving a scenario is used
|
||||
* Installation
|
||||
* added options to allow installation without ospf mdr
|
||||
* removed tasks that are no longer needed
|
||||
* updates to properly install/remove example files
|
||||
* pipx/poetry/invoke versions are now locked to help avoid update related issues
|
||||
* install.sh is now setup.sh and is a convenience to get tool setup to run invoke
|
||||
* Documentation
|
||||
* formally added notes for Docker and LXD based node types
|
||||
* added config services
|
||||
* Updated README to have quick notes for installation
|
||||
* \#563 - update to note how to enable core service
|
||||
* Examples
|
||||
* \#598 - update to fix sample1.imn to working order
|
||||
* core-daemon
|
||||
* emane global configuration is now configurable per nem
|
||||
* fixed wlan loss to support float values
|
||||
* improved default service loading to use full core path
|
||||
* improved emane model loading to occur one time
|
||||
* fixed handling rj45 link edits from tlv api
|
||||
* fixed wlan config getting a default value for the promiscuous setting when not provided
|
||||
* ebtables usage has now been replaced with nftables
|
||||
* \#564 - logging is now using module named loggers
|
||||
* \#573 - emane processes are not created 1 to 1 with nems
|
||||
* \#608 - update lxml version
|
||||
* \#609 - update pyyaml version
|
||||
* \#623 - fixed issue with ovs mode and mac learning
|
||||
* core-gui
|
||||
* config services are now the default service type
|
||||
* legacy services are marked as deprecated
|
||||
* fix to properly load session options
|
||||
* logging is now using module named loggers
|
||||
* save as will not update the current session file name as expected
|
||||
* fix to properly clear out removed customized services
|
||||
* adding directories to a service that do not exist, is now valid
|
||||
* added flag to exit after creating gui directory from command line
|
||||
* added new options to enable/disable ip4/ip6 assignment
|
||||
* improved canvas draw order, when joining sessions
|
||||
* improved node copy/paste to avoid issues when pasting text into service config dialogs
|
||||
* each canvas will not correctly save and load their size from xml
|
||||
* gRPC API
|
||||
* session options are now returned for GetSession
|
||||
* fixed issue not properly creating the session directory during start session definition state
|
||||
* updates to separate editing a node and moving a node, new MoveNode call added, EditNode is now used for editing icons
|
||||
* Services
|
||||
* fixed default route config service
|
||||
* config services now have options for shadowing directories, including per node customization
|
||||
|
||||
## 2021-09-17 CORE 7.5.2
|
||||
|
||||
* Installation
|
||||
* \#596 - fixes issue related to installing poetry by pinning version to 1.1.7
|
||||
* updates pipx installation to pinned version 0.16.4
|
||||
* core-daemon
|
||||
* \#600 - fixes known vulnerability for pillow dependency by updating version
|
||||
|
||||
## 2021-04-15 CORE 7.5.1
|
||||
|
||||
* core-pygui
|
||||
* fixed issues creating and drawing custom nodes
|
||||
|
||||
## 2021-03-11 CORE 7.5.0
|
||||
|
||||
* core-daemon
|
||||
* fixed issue setting mobility loop value properly
|
||||
* fixed issue that some states would not properly remove session directories
|
||||
* \#560 - fixed issues with sdt integration for mobility movement and layer creation
|
||||
* core-pygui
|
||||
* added multiple canvas support
|
||||
* added support to hide nodes and restore them visually
|
||||
* update to assign full netmasks to wireless connected nodes by default
|
||||
* update to display services and action controls for nodes during runtime
|
||||
* fixed issues with custom nodes
|
||||
* fixed issue auto assigning macs, avoiding duplication
|
||||
* fixed issue joining session with different netmasks
|
||||
* fixed issues when deleting a session from the sessions dialog
|
||||
* \#550 - fixed issue not sending all service customization data
|
||||
* core-cli
|
||||
* added delete session command
|
||||
|
||||
## 2021-01-11 CORE 7.4.0
|
||||
|
||||
* Installation
|
||||
* fixed issue for automated install assuming ID_LIKE is always present in /etc/os-release
|
||||
* gRPC API
|
||||
* fixed issue stopping session and not properly going to data collect state
|
||||
* fixed issue to have start session properly create a directory before configuration state
|
||||
* core-pygui
|
||||
* fixed issue handling deletion of wired link to a switch
|
||||
* avoid saving edge metadata to xml when values are default
|
||||
* fixed issue editing node mac addresses
|
||||
* added support for configuring interface names
|
||||
* fixed issue with potential node names to allow hyphens and remove under bars
|
||||
* \#531 - fixed issue changing distributed nodes back to local
|
||||
* core-daemon
|
||||
* fixed issue to properly handle deleting links from a network to network node
|
||||
* updated xml to support writing and reading link buffer configurations
|
||||
* reverted change and removed mac learning from wlan, due to promiscuous like behavior
|
||||
* fixed issue creating control interfaces when starting services
|
||||
* fixed deadlock issue when clearing a session using sdt
|
||||
* \#116 - fixed issue for wlans handling multiple mobility scripts at once
|
||||
* \#539 - fixed issue in udp tlv api
|
||||
|
||||
## 2020-12-02 CORE 7.3.0
|
||||
|
||||
* core-daemon
|
||||
* fixed issue where emane global configuration was not being sent to core-gui
|
||||
* updated controlnet names on host to be prefixed with ctrl
|
||||
* fixed RJ45 link shutdown from core-gui causing an error
|
||||
* fixed emane external transport xml generation
|
||||
* \#517 - update to account for radvd required directory
|
||||
* \#514 - support added for session specific environment files
|
||||
* \#529 - updated to configure netem limit based on delay or user specified, requires kernel 3.3+
|
||||
* core-pygui
|
||||
* fixed issue drawing wlan/emane link options when it should not have
|
||||
* edge labels are now placed a set distance from nodes like original gui
|
||||
* link color/width are now saved to xml files
|
||||
* added support to configure buffer size for links
|
||||
* \#525 - added support for multiple wired links between the same nodes
|
||||
* \#526 - added option to hide/show links with 100% loss
|
||||
* Documentation
|
||||
* \#527 - typo in service documentation
|
||||
* \#515 - added examples to docs for using EMANE features within a CORE context
|
||||
|
||||
## 2020-09-29 CORE 7.2.1
|
||||
|
||||
* core-daemon
|
||||
* fixed issue where shutting down sessions may not have removed session directories
|
||||
* fixed issue with multiple emane interfaces on the same node not getting the right configuration
|
||||
* Installation
|
||||
* updated automated install to be a bit more robust for alternative distros
|
||||
* added force install type to try and leverage a redhat/debian like install
|
||||
* locked ospf mdr version installed to older commit to avoid issues with multiple interfaces on same node
|
||||
|
||||
## 2020-09-15 CORE 7.2.0
|
||||
|
||||
* Installation
|
||||
* locked down version of ospf-mdr installed in automated install
|
||||
* locked down version of emane to v1.2.5 in automated emane install
|
||||
* added option to install locally using the -l option
|
||||
* core-daemon
|
||||
* improve error when retrieving services that do not exist, or failed to load
|
||||
* fixed issue with writing/reading emane node interface configurations to xml
|
||||
* fixed issue with not setting the emane model when creating a node
|
||||
* added common utility method for getting a emane node interface config id in core.utils
|
||||
* fixed issue running emane on more than one interface for a node
|
||||
* fixed issue validating paths when creating emane transport xml for a node
|
||||
* fixed issue avoiding multiple calls to shutdown, if already in shutdown state
|
||||
* core-pygui
|
||||
* fixed issue configuring emane for a node interface
|
||||
* gRPC API
|
||||
* added wrapper client that can provide type hinting and a simpler interface at core.api.grpc.clientw
|
||||
* fixed issue creating sessions that default to having a very large reference scale
|
||||
* fixed issue with GetSession returning control net nodes
|
||||
|
||||
## 2020-08-21 CORE 7.1.0
|
||||
|
||||
* Installation
|
||||
* added core-python script that gets installed to help globally reference the virtual environment
|
||||
* gRPC API
|
||||
* GetSession will now return all configuration information for a session and the file it was opened from, if applicable
|
||||
* node update events will now include icon information
|
||||
* fixed issue with getting session throughputs for sessions with a high id
|
||||
* core-daemon
|
||||
* \#503 - EMANE networks will now work with mobility again
|
||||
* \#506 - fixed service dependency resolution issue
|
||||
* fixed issue sending hooks to core-gui when joining session
|
||||
* core-pygui
|
||||
* fixed issues editing hooks
|
||||
* fixed issue with cpu usage when joining a session
|
||||
* fixed mac field not being disabled during runtime when configuring a node
|
||||
* removed unlimited button from link config dialog
|
||||
* fixed issue with copy/paste links and their options
|
||||
* fixed issue with adding nodes/links and editing links during runtime
|
||||
* updated open file dialog in config dialogs to open to ~/.coregui home directory
|
||||
* fixed issue double clicking sessions dialog in invalid areas
|
||||
* added display of asymmetric link options on links
|
||||
* fixed emane config dialog display
|
||||
* fixed issue saving backgrounds in xml files
|
||||
* added view toggle for wired/wireless links
|
||||
* node events will now update icons
|
||||
|
||||
## 2020-07-28 CORE 7.0.1
|
||||
|
||||
* Bugfixes
|
||||
* \#500 - fixed issue running node commands with shell=True
|
||||
* fixed issue for poetry based install not properly vetting requirements for dataclasses dependency
|
||||
|
||||
## 2020-07-23 CORE 7.0.0
|
||||
|
||||
* Breaking Changes
|
||||
* core.emudata and core.data combined and cleaned up into core.data
|
||||
* updates to consistently use mac instead of hwaddr/mac
|
||||
* \#468 - code related to adding/editing/deleting links cleaned up
|
||||
* \#469 - usages of per all changed to loss to be consistent
|
||||
* \#470 - variables with numbered names now use numbers directly
|
||||
* \#471 - node startup is no longer embedded within its constructor
|
||||
* \#472 - code updated to refer to interfaces consistently as iface
|
||||
* \#475 - code updates changing how ip addresses are stored on interfaces
|
||||
* \#476 - executables to check for moved into own module core.executables
|
||||
* \#486 - core will now install into its own python virtual environment managed by poetry
|
||||
* core-daemon
|
||||
* updates to properly save/load distributed servers to xml
|
||||
* \#474 - added type hinting to all service files
|
||||
* \#478 - fixed typo in config service directory
|
||||
* \#479 - opening an xml file will now cycle through states like a normal session
|
||||
* \#480 - ovs configuration will now save/load from xml and display in guis
|
||||
* \#484 - changes to support adding emane links during runtime
|
||||
* core-pygui
|
||||
* fixed issue not displaying services for the default group in service dialogs
|
||||
* fixed issue starting a session when the daemon is not present
|
||||
* fixed issue attempting to open terminals for invalid nodes
|
||||
* fixed issue syncing session location
|
||||
* fixed issue joining a session with mobility, not in runtime
|
||||
* added cpu usage monitor to status bar
|
||||
* emane configurations can now be seen during runtime
|
||||
* rj45 nodes can only have one link
|
||||
* disabling throughputs will clear labels
|
||||
* improvements to custom service copy
|
||||
* link options will now be drawn on as a label
|
||||
* updates to handle runtime link events
|
||||
* \#477 - added optional details pane for a quick view of node/link details
|
||||
* \#485 - pygui fixed observer widget for invalid nodes
|
||||
* \#496 - improved alert handling
|
||||
* core-gui
|
||||
* \#493 - increased frame size to show all emane configuration options
|
||||
* gRPC API
|
||||
* added set session user rpc
|
||||
* added cpu usage stream
|
||||
* interface objects returned from get_node will now provide node_id, net_id, and net2_id data
|
||||
* peer to peer nodes will not be included in get_session calls
|
||||
* pathloss events will now throw an error when nem id not found
|
||||
* \#481 - link rpc calls will broadcast out
|
||||
* \#496 - added alert rpc call
|
||||
* Services
|
||||
* fixed issue reading files in security services
|
||||
* \#494 - add staticd to daemons list for frr services
|
||||
|
||||
## 2020-06-11 CORE 6.5.0
|
||||
* Breaking Changes
|
||||
* CoreNode.newnetif - both parameters are required and now takes an InterfaceData object as its second parameter
|
||||
* CoreNetworkBase.linkconfig - now takes a LinkOptions parameter instead of a subset of some of the options (ie bandwidth, delay, etc)
|
||||
* \#453 - Session.add_node and Session.get_node now requires the node class you expect to create/retrieve
|
||||
* \#458 - rj45 cleanup to only inherit from one class
|
||||
* Enhancements
|
||||
* fixed issues with handling bad commands for TLV execute messages
|
||||
* removed unused boot.sh from CoreNode types
|
||||
* added linkconfig to CoreNetworkBase and cleaned up function signature
|
||||
* emane position hook now saves geo position to node
|
||||
* emane pathloss support
|
||||
* core.emulator.emudata leveraged dataclass and type hinting
|
||||
* \#459 - updated transport type usage to an enum
|
||||
* \#460 - updated network policy type usage to an enum
|
||||
* Python GUI Enhancements
|
||||
* fixed throughput events do not work for joined sessions
|
||||
* fixed exiting app with a toolbar picker showing
|
||||
* fixed issue with creating interfaces and reusing subnets after deletion
|
||||
* fixed issue with moving text shapes
|
||||
* fixed scaling with custom node selected
|
||||
* fixed toolbar state switching issues
|
||||
* enable/disable toolbar when running stop/start
|
||||
* marker config integrated into toolbar
|
||||
* improved color picker layout
|
||||
* shapes can now be moved while drawing shapes
|
||||
* added observers to toolbar in run mode
|
||||
* gRPC API
|
||||
* node events will now have geo positional data
|
||||
* node geo data is now returned in get_session and get_node calls
|
||||
* \#451 - added wlan link api to allow direct linking/unlinking of wireless links between nodes
|
||||
* \#462 - added streaming call for sending node position/geo changes
|
||||
* \#463 - added streaming call for emane pathloss events
|
||||
* Bugfixes
|
||||
* \#454 - fixed issue creating docker nodes, but containers are now required to have networking tools
|
||||
* \#466 - fixed issue in python gui when xml file is loading nodes with no ip4 addresses
|
||||
|
||||
## 2020-05-11 CORE 6.4.0
|
||||
* Enhancements
|
||||
* updates to core-route-monitor, allow specific session, configurable settings, and properly
|
||||
listen on all interfaces
|
||||
* install.sh now has a "-r" option to help with reinstalling from current branch and installing
|
||||
current python dependencies
|
||||
* \#202 - enable OSPFv2 fast convergence
|
||||
* \#178 - added comments to OVS service
|
||||
* Python GUI Enhancements
|
||||
* added initial documentation to help support usage
|
||||
* supports drawing multiple links for wireless connections
|
||||
* supports differentiating wireless networks with different colored links
|
||||
* implemented unlink in node context menu to delete links to other nodes
|
||||
* implemented node run tool dialog
|
||||
* implemented find node dialog
|
||||
* implemented address configuration dialog
|
||||
* implemented mac configuration dialog
|
||||
* updated link address creation to more closely mimic prior behavior
|
||||
* updated configuration to use yaml class based configs
|
||||
* implemented auto grid layout for nodes
|
||||
* fixed drawn wlan ranges during configuration
|
||||
* Bugfixes
|
||||
* no longer writes link option data for WLAN/EMANE links in XML
|
||||
* avoid configuring links for WLAN/EMANE link options in XML, due to them being written to XML prior
|
||||
* updates to allow building python docs again
|
||||
* \#431 - peer to peer node uplink link data was not using an enum properly due to code changes
|
||||
* \#432 - loading XML was not setting EMANE nodes model
|
||||
* \#435 - loading XML was not maintaining existing session options
|
||||
* \#448 - fixed issue sorting hooks being saved to XML
|
||||
|
||||
## 2020-04-13 CORE 6.3.0
|
||||
* Features
|
||||
* \#424 - added FRR IS-IS service
|
||||
* Enhancements
|
||||
* \#414 - update GUI OSPFv2 adjacency widget to work with FRR
|
||||
* \#416 - EMANE links can now be drawn for 80211 and RF Pipe models
|
||||
* \#418 #409 - code cleanup
|
||||
* \#425 - added route monitor script for SDT3D integration
|
||||
* a formal error will now be thrown when EMANE binding are not installed, but attempted to be used
|
||||
* node positions will now default to 0,0 to avoid GUI errors, when one is not provided
|
||||
* improved SDT3D integration, multiple link support and usage of custom layers
|
||||
* Python GUI Enhancements
|
||||
* enabled edit menu delete
|
||||
* cleaned up node context menu and enabled delete
|
||||
* Bugfixes
|
||||
* \#427 - fixed issue in default route service
|
||||
* \#426 - fixed issue reading ipsec template file
|
||||
* \#420 - fixed issue with TLV API udp handler
|
||||
* \#411 - allow wlan to be configured with 0 values
|
||||
* \#415 - general EMANE configuration was not being saved/loaded from XML
|
||||
|
||||
## 2020-03-16 CORE 6.2.0
|
||||
* gRPC API
|
||||
* Added call to execute python script
|
||||
* Enhancements
|
||||
* \#371 - improved coretk gui scaling
|
||||
* \#374 - display range visually for wlan in coretk gui, when configuring
|
||||
* \#377 - improved coretk error dialogs
|
||||
* \#379 - fixed issues with core converting between x,y and lon,lat for values that would cross utm zones
|
||||
* \#384 - sdt integration moved internally to core code allowing it to work for coretk gui as well
|
||||
* \#387 - coretk gui will now auto detect potential valid terminal and command to use for interacting with nodes during runtime
|
||||
* \#389 - coretk gui will now attempt to reconnect to daemon without need to restart
|
||||
* \#395 - coretk gui now has "save" and "save as" menu options
|
||||
* \#402 - coretk will now allow terminal preference to be directly edited
|
||||
* Bugfixes
|
||||
* \#375 - fixed issues with emane event monitor handling data
|
||||
* \#381 - executing a python script will now wait until completion before looking to join a new session
|
||||
* \#391 - fixed configuring node ip addresses in coretk gui
|
||||
* \#392 - fixed coretk link display when addresses are cleared out
|
||||
* \#393 - coretk gui will properly clear marker annotations when switching sessions
|
||||
* \#396 - Docker and LXC nodes will now properly save to XML
|
||||
* \#406- WLAN bridge initialization was not ran when all nodes are disconnected
|
||||
|
||||
## 2020-02-20 CORE 6.1.0
|
||||
* New
|
||||
* config services - these services leverage a proper template engine and have configurable parameters, given enough time may replace existing services
|
||||
* core-imn-to-xml - IMN to XML utility script
|
||||
* replaced internal code for determining ip/mac address with netaddr library
|
||||
* Enhancements
|
||||
* added distributed package for built packages
|
||||
* made use of python type hinting for functions and their return values
|
||||
* updated Quagga zebra service to remove deprecated warning
|
||||
* Removed
|
||||
* removed stale ns3 code
|
||||
* CORETK GUI
|
||||
* added logging
|
||||
* improved error dialog
|
||||
* properly use global ipv6 addresses for nodes
|
||||
* disable proxy usage by default, flag available to enable
|
||||
* gRPC API
|
||||
* add_link - now returns created interface information
|
||||
* set_node_service - can now set files and directories to properly replicate previous usage
|
||||
* get_emane_event_channel - return information related to the currently used emane event channel
|
||||
* Bugfixes
|
||||
* fixed session SDT functionality back to working order, due to python3 changes
|
||||
* avoid shutting down services for nodes that are not up
|
||||
* EMANE bypass model options will now display properly in GUIs
|
||||
* XML scenarios will now properly read in custom node icons
|
||||
* \#372 - fixed mobility waypoint comparisons
|
||||
* \#370 - fixed radvd service
|
||||
* \#368 - updated frr services to properly start staticd when needed
|
||||
* \#358 - fixed systemd service install path
|
||||
* \#350 - fixed frr babel wireless configuration
|
||||
* \#354 - updated frr to reset interfaces to properly take configurations
|
||||
|
||||
## 2020-01-01 CORE 6.0.0
|
||||
* New
|
||||
* beta release of the python based tk GUI, use **coretk-gui** to try it out, plan will be to eventually sunset the old GUI once this is good enough
|
||||
* this GUI will allow us to provide enhancements and a consistent python dev environment for developers
|
||||
* Major Changes
|
||||
* python3.6+ support only, due to python2 EOL https://pyfound.blogspot.com/2019/12/python-2-sunset.html
|
||||
* distributed sessions now leverages the fabric library for sending remote SSH commands
|
||||
* Enhancements
|
||||
* changed usage of bridge-utils to using ip based bridge commands due to deprecation
|
||||
* installation.sh script to help automate a standard make install or dev install
|
||||
* when sessions are created without an id they will now always start from 1 and return the next unused id
|
||||
* gRPC is now running by default
|
||||
* Session API
|
||||
* removed **create_emane_network** and **create_wlan_network** to help force using **add_node** for all cases
|
||||
* removed **session.master** as it was only used for previous distributed sessions
|
||||
* updated **add_node** to allow providing a custom class for node creation
|
||||
* gRPC API
|
||||
* added get all services configurations
|
||||
* added get all wlan configurations
|
||||
* added start/stop session calls, provides more freedom for startup and shutdown logic
|
||||
* session events now have a session id to help differentiate which session they are coming from
|
||||
* throughput events now require a session id and responses include session id for differentiating data
|
||||
* session events can now be subscribed to with a subset of events or all
|
||||
* emane model config data now include interface ids properly
|
||||
* sessions returned from get sessions call may include file names when created from xml
|
||||
* when opening an xml the session can now be started or not
|
||||
* edit node will now broadcast the edit for others to listen to
|
||||
* all config responses will now be in the form of a mapped value of key to ConfigOption, or a list of these when retrieving all, sometimes the config response may be wrapped in a different message to include other metadata
|
||||
* Bugfixes
|
||||
* \#311 - initialize ebtables chains for wlan networks only
|
||||
* \#312 - removed sudo from init script
|
||||
* \#313 - check if interface exists before flushing, previously would log an exception that didn't matter
|
||||
* \#314 - node locations stored as floats instead of ints to avoid mobility calculations due to loss of precision
|
||||
* \#321 - python installation path will be based on distr ibution/python building it
|
||||
* emane options xml parsing didn't properly take into account the **emane_prefix** configuration
|
||||
* updates services that checked for ipv4/ipv6 addresses to not fail for valid ipv6 addresses with a decimal
|
||||
* Documentation
|
||||
* updated NRL links to new GitHub locations
|
||||
* updates for distributed session
|
||||
* updates to dev guide
|
||||
* updates to examples LXD/Docker setup
|
||||
* updates to FRR service documentation
|
||||
* gRPC get node service file will not throw an exception when node doesn't exist
|
||||
|
||||
## 2019-10-12 CORE 5.5.2
|
||||
* gRPC
|
||||
* Added emane_link API for linking/unlinking EMANE nodes within the GUI
|
||||
* Bugfixes
|
||||
* Fixed python3 issues when configuring WLAN nodes
|
||||
* Fixed issue due to refactoring when running distributed
|
||||
* Fixed issue when running python script from GUI
|
||||
|
||||
## 2019-10-09 CORE 5.5.1
|
||||
* Bugfix
|
||||
* Fixed issue with 5.5.0 refactoring causing issues in python2.
|
||||
* Fixed python3 issues with NRL services
|
||||
|
||||
## 2019-10-03 CORE 5.5.0
|
||||
* Documentation
|
||||
* updated dependencies for building OSPF MDR on installation page
|
||||
* added python/pip instruction on installation page
|
||||
* added ethtool dependency for CORE
|
||||
* GUI
|
||||
* removed experimental OVS node to avoid confusion and issues related to using it
|
||||
* Daemon
|
||||
* fixed core-daemon --ovs flag back to working order for running CORE using OVS bridges instead of Linux bridges
|
||||
* updated requirements.txt to refer to configparser 4.0.2, due to 4.0.1 removal by developers
|
||||
* update to fail fast for dependent executables that are not found within PATH
|
||||
* update to not load services that fail during service.on_load and move on
|
||||
* Build
|
||||
* fixed issue with configure script when using option flags
|
||||
* python install path will use the native install path for AM_PATH_PYTHON, instead of coercing to python3
|
||||
* Issues
|
||||
* \#271 - OVS node error in GUI
|
||||
* \#291 - configparser 4.0.1 issue
|
||||
* \#290 - python3 path issue when building
|
||||
|
||||
## 2019-09-23 CORE 5.4.0
|
||||
* Documentation
|
||||
* Updates to documentation dev guide
|
||||
* Improvements
|
||||
* Added support for Pipenv for development
|
||||
* Added configuration to leverage pre-commit during development
|
||||
* Added configuration to leverage isort, black, and flake8 during development
|
||||
* Added Github Actions to help verify pull requests in the same way as pre-commit
|
||||
* Issues
|
||||
* \#279 - WLAN configuration does not get set by default
|
||||
* \#272 - error installing python package futures==3.2.0
|
||||
* Pull Requests
|
||||
* \#275 - Disable MAC learning on WLAN
|
||||
* \#281 - Bumped jackson version on corefx
|
||||
|
||||
## 2019-07-05 CORE 5.3.1
|
||||
* Documentation
|
||||
* Updates to provide more information regarding several of the included services
|
||||
* Issues
|
||||
* \#252 - fixed changing wlan configurations during runtime
|
||||
* \#256 - fixed mobility waypoint comparison for python3
|
||||
* \#174 - turn tx/rx checksums off by default as they will never be valid for virtual interfaces
|
||||
* \#259 - fixes for distributed EMANE
|
||||
* \#260 - fixed issue with how execfile was being used due to it not existing within python3
|
||||
|
||||
## 2019-06-10 CORE 5.3.0
|
||||
* Enhancements
|
||||
* python 2 / 3 support
|
||||
* added new API using [gRPC](https://grpc.io/)
|
||||
* --grpc --grpc-port --grpc-address flags added to core-daemon
|
||||
* core.api.grpc.client.CoreGrpcClient, provides a convenience wrapper for leveraging the API
|
||||
* Docs
|
||||
* Updates to installation instructions for latest changes
|
||||
* Services
|
||||
* Added FRR service
|
||||
* EMANE
|
||||
* Added EMANE prefix configuration when looking for emane model manifest files
|
||||
* requires configuring **emane_prefix** in /etc/core/core.conf
|
||||
* Cleanup
|
||||
* Refactoring of the core python package structure, trying to help provide better organization and
|
||||
logical groupings
|
||||
* Issues
|
||||
* \#246 - Fixed network to network link handling when reading xml files
|
||||
* \#236 - Fixed storing/reading of link configuration values within xml files
|
||||
* \#170 - FRR Service
|
||||
* \#155 - EMANE path configuration
|
||||
* \#233 - Python 3 support
|
||||
* \#245 - Fixed bidirectional link configurations when reading from xml files
|
||||
* \#208 - gRPC API
|
||||
* Fixed link configuration dup handling when loaded from xml files
|
||||
|
||||
## 2019-06-07 CORE 5.2.2
|
||||
* Enhancements:
|
||||
* adds back in core-daemon udp support for coresendmsg, people may have depended on previously for certain scenarios
|
||||
* Bug Fixes:
|
||||
* fixes issue in GUI that would prevent moving nodes during mobility scenarios
|
||||
|
||||
## 2019-03-25 CORE 5.2.1
|
||||
* Packaging:
|
||||
* documentation no longer builds by default, must use configure flag
|
||||
* added configure flag to allow only building vcmd
|
||||
* sphinx will no long be required when not building documentation
|
||||
* Services:
|
||||
* Added source NAT service
|
||||
* Fixed DHCP service for Ubuntu 18.04
|
||||
* BUGFIXES:
|
||||
* \#188 - properly remove session on delete TLV API call
|
||||
* \#192 - updated default gnome terminal command for nodes to be Ubuntu 18.04 compatible
|
||||
* \#193 - updates to service validation, will retry on failure and better exception logging
|
||||
* \#195 - TLV link message data fix
|
||||
* \#196 - fix to avoid clearing out default services
|
||||
* \#197 - removed wireless_link_all API from EmuSession
|
||||
* \#216 - updated default WLAN bandwidth to 54Mbps
|
||||
* \#223 - fix to saving RJ45 to session XML files
|
||||
|
||||
## 2018-05-22 CORE 5.1
|
||||
* DAEMON:
|
||||
* removed and cleared out code that is either legacy or no longer supported (Xen, BSD, Kernel patching, RPM/DEB
|
||||
specific files)
|
||||
* default nodes are now set in the node map
|
||||
* moved ns3 and netns directories to the top of the repo
|
||||
* changes to make use of fpm as the tool for building packages
|
||||
* removed usage of logzero to avoid dependency issues for built packages
|
||||
* removed daemon addons directory
|
||||
* added CoreEmu to core.emulator.coreemu to help begin serving as the basis for a more formal API for scripting
|
||||
and creating new external APIs out of
|
||||
* cleaned up logging, moved more logging to DEBUG from INFO, tried to mold INFO message to be more simple and
|
||||
informative
|
||||
* EMANE 1.0.1-1.21 supported
|
||||
* updates to leverage EMANE python bindings for dynamically parsing phy/mac manifest files
|
||||
* example custom EMANE model lives under /usr/share/core/examples/myemane/examplemodel.py
|
||||
* EMANE TDMA model now supports an option to start a TDMA schedule when running
|
||||
* fixed issues with coresendmsg script due to code refactoring
|
||||
* added make target for generating documentation "make doc"
|
||||
* Python 2.7+ is now required
|
||||
* ns3 is no longer bundled by default, but will be produced as a separate package for installation
|
||||
* GUI:
|
||||
* updated broken help links in GUI Help->About
|
||||
* Packaging:
|
||||
* fixed PYTHON_PATH to PYTHONPATH in sysv script
|
||||
* added make command to leverage FPM as the tool for creating deb/rpm packages going forward, there is documentation
|
||||
within README.md to try it out
|
||||
* TEST:
|
||||
* fixed some broken tests
|
||||
* new test cases based on CoreEmu usage
|
||||
* BUGFIXES:
|
||||
* \#142 - duplication of custom services
|
||||
* \#136 - sphinx-apidoc command not found
|
||||
* \#137 - make command fails when using distclean
|
||||
|
||||
## 2017-09-01 CORE 5.0
|
||||
* DEVELOPMENT:
|
||||
* support for editorconfig to help standardize development across IDEs, from the defined configuration file
|
||||
* support for sonarqube analysis, from the defined configuration file
|
||||
* DAEMON:
|
||||
* code cleanup and improvements to adhere to coding standards (SonarQube)
|
||||
* leverage "logzero" module to make easy usage of the standard logging module
|
||||
* improvements to documentation across the code base
|
||||
* initial work to separate the dependence on TCP API messaging from the core library (easier core scripting)
|
||||
* beta support for running core in Open vSwitch mode, leveraging Open vSwitch bridges, instead of Linux bridges
|
||||
* SERVICES:
|
||||
* added Ryu SDN controller service
|
||||
* added Open vSwitch service
|
||||
* TEST:
|
||||
* added unit/integration tests to support validating changes going forward
|
||||
* BUGFIXES:
|
||||
* merged pull requests for: #115, #110, #109, #107, #106, #105, #103, #102, #101, #96
|
||||
|
||||
## 2015-06-05 CORE 4.8
|
||||
* EMANE:
|
||||
* support for EMANE 0.9.2
|
||||
* run emane in each container when using EMANE 0.9.2
|
||||
* support using separate control networks for EMANE OTA and event traffic
|
||||
* GUI:
|
||||
* fixed an issue where the adjacency widget lines pointed to old node positions
|
||||
* fixed an issue where not all EMANE 0.9.x IEEE 802.11 MAC parameter were configurable
|
||||
* fixed an issue related to running python scripts from the GUI when using tcl/tk version 8.6
|
||||
* improved batch mode execution to display the check emulation light status
|
||||
* improved managing multiple sessions
|
||||
* improved support for using multiple canvases
|
||||
* added a reload option to the file menu to revert back to a saved scenario
|
||||
* DAEMON:
|
||||
* support exporting scenarios in NRL Network Modeling Framework 1.0 XML format
|
||||
* support importing scenarios in NRL Network Modeling Framework 1.0 XML format
|
||||
* support exporting the deployed scenario state in NRL NMF XML 1.0 format
|
||||
* improved EMANE post-startup processing to better synchronize distributed emulations
|
||||
* improved how addresses are assigned to tun/tap devices
|
||||
* added support for python state-change callbacks
|
||||
* SERVICES:
|
||||
* added mgen sink and mgen actor services
|
||||
* added oslrv2 and olsr.org services
|
||||
* added a docker service
|
||||
* BUILD:
|
||||
* improved the install/uninstall process
|
||||
* improved debian and rpm packaging
|
||||
* BUGFIXES:
|
||||
* updated the http service for ubuntu 14.04
|
||||
* improved included examples
|
||||
* shortened the length of network interface names
|
||||
* improved how the core system service manages running the core daemon
|
||||
* fixed an issues related to applying session configuration setting
|
||||
* improved detecting when a distributed emulation is already running
|
||||
* improved documentation
|
||||
|
||||
## 2014-08-06 CORE 4.7
|
||||
* EMANE:
|
||||
* support for EMANE 0.9.1
|
||||
* fix error when using Comm Effect model with loss/duplicate string values
|
||||
* enable flow control in virtual transport if enabled in the MAC model
|
||||
* fix bug #150 where EMANE event service/address port were not used
|
||||
* GUI:
|
||||
* support Tcl/Tk 8.6 when available
|
||||
* added --(a)ddress and --(p)ort arguments to core-gui command-line
|
||||
* added File > Execute XML or Python script... option
|
||||
* added File > Execute Python script with options... menu item
|
||||
* when executing Python script from GUI, run in background thread, wait for
|
||||
RUNTIME state
|
||||
* enter RUNTIME state when start button pressed with empty canvas
|
||||
* added support for asymmetric link effects
|
||||
* support link delays up to 274 seconds (netem maximum)
|
||||
* allow runtime changes of WLAN link effects
|
||||
* DAEMON:
|
||||
* set NODE_NAME, NODE_NUMBER, SESSION_SHORT in default vnoded environment
|
||||
* changed host device naming to use veth, tap prefixes; b.n.SS for bridges
|
||||
* allow parsing XML files into live running session
|
||||
* enable link effects between hub/switch and hub/switch connections
|
||||
* update MDR service to use broadcast interfaces for non-WLAN links
|
||||
* allow node class to be specified when initializing XML parser
|
||||
* save and parse canvas origin (reference point) and scale in MP XML
|
||||
* up/down control script session option
|
||||
* fix hash calculation used to determine GRE tunnel keys
|
||||
* use shell script to detach SMF on startup
|
||||
* added NRL services for mgen sink and nrlolsrv2
|
||||
* use SDT URL session option
|
||||
* added core-manage tool for addons to add/remove/check services, models,
|
||||
and custom node types
|
||||
* API:
|
||||
* implement local flag in Execute Message for running host commands
|
||||
* jitter changed to 64-bit value to align with delay in Link Message
|
||||
* added unidirectional link flag TLV to Link Message
|
||||
* added reconfigure event type for re-generating service config files
|
||||
* return errors in API with failed services
|
||||
* BUGFIXES:
|
||||
* fix HTTP service running under Ubuntu
|
||||
* fixed the following bugs: #150, 169, 188, 220, 225, 230, 231, 242, 244,
|
||||
247, 248, 250, 251
|
||||
|
||||
## 2013-09-25 CORE 4.6
|
||||
* NOTE: cored is now core-daemon, and core is now core-gui (for Debian acceptance)
|
||||
* NOTE: /etc/init.d/core is now /etc/init.d/core-daemon (for insserv compatibility)
|
||||
* EMANE:
|
||||
* don't start EMANE locally if no local NEMs
|
||||
* EMANE poststartup() to re-transmit location events during initialization
|
||||
* added debug port to EMANE options
|
||||
* added a basic EMANE 802.11 CORE Python script example
|
||||
* expose transport XML block generation to EmaneModels
|
||||
* expose NEM entry to the EmaneModel so it can be overridden by a model
|
||||
* add the control interface bridge prior to starting EMANE, as some models may
|
||||
* depend on the controlnet functionality
|
||||
* added EMANE model to CORE converter
|
||||
* parse lat/long/alt from node messages, for moving nodes using command-line
|
||||
* fix bug #196 incorrect distance when traversing UTM zones
|
||||
* GUI:
|
||||
* added Cut, Copy, and Paste options to the Edit menu
|
||||
* paste will copy selected services and take care of node and interface
|
||||
* renumbering
|
||||
* implement Edit > Find dialog for searching nodes and links
|
||||
* when copying existing file for a service, perform string replacement of:
|
||||
* "~", "%SESSION%", "%SESSION_DIR%", "%SESSION_USER%", "%NODE%", "%NODENAME%"
|
||||
* use CORE_DATA_DIR insteadof LIBDIR
|
||||
* fix Adjacency Widget to work with OSPFv2 only networks
|
||||
* BUILD:
|
||||
* build/packaging improvements for inclusion on Debian
|
||||
* fix error when running scenario with a mobility script in batch mode
|
||||
* include Linux kernel patches for 3.8
|
||||
* renamed core-cleanup.sh to core-cleanup for Debian conformance
|
||||
* don't always generate man pages from Makefile; new manpages for
|
||||
coresendmsg and core-daemon
|
||||
* BUGFIXES:
|
||||
* don't auto-assign IPv4/IPv6 addresses when none received in Link Messages (session reconnect)
|
||||
* fixed lock view
|
||||
* fix GUI spinbox errors for Tk 8.5.8 (RHEL/CentOS 6.2)
|
||||
* fix broker node count for distributed session entering the RUNTIME state when
|
||||
* (non-EMANE) WLANs or GreTapBridges are involved;
|
||||
* fix "file exists" error message when distributed session number is re-used
|
||||
* and servers file is written
|
||||
* fix bug #194 configuration dialog too long, make dialog scrollable/resizable
|
||||
* allow float values for loss and duplicates percent
|
||||
* fix the following bugs: 166, 172, 177, 178, 192, 194, 196, 201, 202,
|
||||
205, 206, 210, 212, 213, 214, 221
|
||||
|
||||
## 2013-04-13 CORE 4.5
|
||||
* GUI:
|
||||
* improved behavior when starting GUI without daemon, or using File New after connection with daemon is lost
|
||||
* fix various GUI issues when reconnecting to a session
|
||||
* support 3D GUI via output to SDT3D
|
||||
* added "Execute Python script..." entry to the File Menu
|
||||
* support user-defined terminal program instead of hard-coded xterm
|
||||
* added session options for "enable RJ45s", "preserve session dir"
|
||||
* added buttons to the IP Addresses dialog for removing all/selected IPv4/IPv6
|
||||
* allow sessions with multiple canvases to enter RUNTIME state
|
||||
* added "--addons" startup mode to pass control to code included from addons dir
|
||||
* added "Locked" entry to View menu to prevent moving items
|
||||
* use currently selected node type when invoking a topology generator
|
||||
* updated throughput plots with resizing, color picker, plot labels, locked scales, and save/load plot
|
||||
configuration with imn file
|
||||
* improved session dialog
|
||||
* EMANE:
|
||||
* EMANE 0.8.1 support with backwards-compatibility for 0.7.4
|
||||
* extend CommEffect model to generate CommEffect events upon receipt of Link Messages having link effects
|
||||
* Services:
|
||||
* updated FTP service with root directory for anonymous users
|
||||
* added HTTP, PCAP, BIRD, RADVD, and Babel services
|
||||
* support copying existing files instead of always generating them
|
||||
* added "Services..." entry to node right-click menu
|
||||
* added "View" button for side-by-side comparison when copying customized config files
|
||||
* updated Quagga daemons to wait for zebra.vty VTY file before starting
|
||||
* General:
|
||||
* XML import and export
|
||||
* renamed "cored.py" to "cored", "coresendmsg.py" to "coresendmsg"
|
||||
* code reorganization and clean-up
|
||||
* updated XML export to write NetworkPlan, MotionPlan, and ServicePlan within a Scenario tag, added new
|
||||
"Save As XML..." File menu entry
|
||||
* added script_start/pause/stop options to Ns2ScriptedMobility
|
||||
* "python" source sub-directory renamed to "daemon"
|
||||
* added "cored -e" option to execute a Python script, adding its session to the active sessions list, allowing for
|
||||
GUI connection
|
||||
* support comma-separated list for custom_services_dir in core.conf file
|
||||
* updated kernel patches for Linux kernel 3.5
|
||||
* support RFC 6164-style IPv6 /127 addressing
|
||||
* ns-3:
|
||||
* integrate ns-3 node location between CORE and ns-3 simulation
|
||||
* added ns-3 random walk mobility example
|
||||
* updated ns-3 Wifi example to allow GUI connection and moving of nodes
|
||||
* fixed the following bugs: 54, 103, 111, 136, 145, 153, 157, 160, 161, 162, 164, 165, 168, 170, 171, 173, 174, 176,
|
||||
184, 190, 193
|
||||
|
||||
## 2012-09-25 CORE 4.4
|
||||
* GUI:
|
||||
* real-time bandwidth plotting tool
|
||||
* added Wireshark and tshark right-click menu items
|
||||
* X,Y coordinates shown in the status bar
|
||||
* updated GUI attribute option to link messages for changing color/width/dash
|
||||
* added sample IPsec and VPN scenarios, how many nodes script
|
||||
* added jitter parameter to WLANs
|
||||
* renamed Experiment menu to Session menu, added session options
|
||||
* use 'key=value' configuration for services, EMANE models, WLAN models, etc.
|
||||
* save only service values that have been customized
|
||||
* copy service parameters from one customized service to another
|
||||
* right-click menu to start/stop/restart each service
|
||||
* EMANE:
|
||||
* EMANE 0.7.4 support
|
||||
* added support for EMANE CommEffect model and Comm Effect controller GUI
|
||||
* added support for EMANE Raw Transport when using RJ45 devices
|
||||
* Services:
|
||||
* improved service customization; allow a service to define custom Tcl tab
|
||||
* added vtysh.conf for Quagga service to support 'write mem'
|
||||
* support scheduled events and services that start N seconds after runtime
|
||||
* added UCARP service
|
||||
* Documentation:
|
||||
* converted the CORE manual to reStructuredText using Sphinx; added Python docs
|
||||
* General:
|
||||
* Python code reorganization
|
||||
* improved cored.py thread locking
|
||||
* merged xen branch into trunk
|
||||
* added an event queue to a session with notion of time zero
|
||||
* added UDP support to cored.py
|
||||
* use UDP by default in coresendmsg.py; added '-H' option to print examples
|
||||
* enter a bash shell by default when running vcmd with no arguments
|
||||
* fixes to distributed emulation entering runtime state
|
||||
* write 'nodes' file upon session startup
|
||||
* make session number and other attributes available in environment
|
||||
* support /etc/core/environment and ~/.core/environment files
|
||||
* added Ns2ScriptedMobility model to Python, removed from the GUI
|
||||
* namespace nodes mount a private /sys
|
||||
* fixed the following bugs: 80, 81, 84, 99, 104, 109, 110, 122, 124, 131, 133, 134, 135, 137, 140, 143, 144, 146,
|
||||
147, 151, 154, 155
|
||||
|
||||
## 2012-03-07 CORE 4.3
|
||||
* EMANE 0.7.2 and 0.7.3 support
|
||||
* hook scripts: customize actions at any of six different session states
|
||||
* Check Emulation Light (CEL) exception feedback system
|
||||
* added FTP and XORP services, and service validate commands
|
||||
* services can flag when customization is required
|
||||
* Python classes to support ns-3 simulation experiments
|
||||
* write state, node X,Y position, and servers to pycore session dir
|
||||
* removed over 9,000 lines of unused GUI code
|
||||
* performance monitoring script
|
||||
* batch mode improvements and --closebatch option
|
||||
* export session to EmulationScript XML files
|
||||
* basic range model moved from GUI to Python, supports 3D coordinates
|
||||
* improved WLAN dialog with tabs
|
||||
* added PhysicalNode class for joining real nodes with emulated networks
|
||||
* fixed the following bugs: 50, 75, 76, 79, 82, 83, 85, 86, 89, 90, 92, 94, 96, 98, 100, 112, 113, 116, 119, 120
|
||||
|
||||
## 2011-08-19 CORE 4.2
|
||||
* EMANE 0.7.1 support
|
||||
* support for Bypass model, Universal PHY, logging, realtime
|
||||
* configurable MAC addresses
|
||||
* control interfaces (backchannel between node and host)
|
||||
* service customization dialog improved (tabbed)
|
||||
* new testing scripts for MDR and EMANE performance testing
|
||||
* improved upgrading of old imn files
|
||||
* new coresendmsg.py utility (deprecates libcoreapi and coreapisend)
|
||||
* new security services, custom service becomes UserDefined
|
||||
* new services and Python scripting chapters in manual
|
||||
* fixes to distributed emulation, linking tunnels/RJ45s with WLANs/hubs/switches
|
||||
* fixed the following bugs: 18, 32, 34, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 52, 53, 55, 57, 58, 60, 62, 64,
|
||||
65, 66, 68, 71, 72, 74
|
||||
|
||||
## 2011-01-05 CORE 4.1
|
||||
* new icons for toolbars and nodes
|
||||
* node services introduced, node models deprecated
|
||||
* customizable node types
|
||||
* traffic flow editor with MGEN support
|
||||
* user configs moved from /etc/core/`*` to ~/.core/
|
||||
* allocate addresses from custom IPv4/IPv6 prefixes
|
||||
* distributed emulation using GRE tunnels
|
||||
* FreeBSD 8.1 now uses cored.py
|
||||
* EMANE 0.6.4 support
|
||||
* numerous bugfixes
|
||||
|
||||
## 2010-08-17 CORE 4.0
|
||||
* Python framework with Linux network namespace (netns) support (Linux netns is now the primary supported platform)
|
||||
* ability to close the GUI and later reconnect to a running session (netns only)
|
||||
* EMANE integration (netns only)
|
||||
* new topology generators, host file generator
|
||||
* user-editable Observer Widgets
|
||||
* use of /etc/core instead of /usr/local/etc/core
|
||||
* various bugfixes
|
||||
|
||||
## 2009-09-15 CORE 3.5
|
||||
|
||||
## 2009-06-23 CORE 3.4
|
||||
|
||||
## 2009-03-11 CORE 3.3
|
||||
342
Changelog
Normal file
342
Changelog
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
2019-03-25 CORE 5.2.1
|
||||
* Packaging:
|
||||
- documentation no longer builds by default, must use configure flag
|
||||
- added configure flag to allow only building vcmd
|
||||
- sphinx will no long be required when not building documentation
|
||||
* Services:
|
||||
- Added source NAT service
|
||||
- Fixed DHCP service for Ubuntu 18.04
|
||||
* BUGFIXES:
|
||||
- #188 - properly remove session on delete TLV API call
|
||||
- #192 - updated default gnome terminal command for nodes to be Ubuntu 18.04 compatible
|
||||
- #193 - updates to service validation, will retry on failure and better exception logging
|
||||
- #195 - TLV link message data fix
|
||||
- #196 - fix to avoid clearing out default services
|
||||
- #197 - removed wireless_link_all API from EmuSession
|
||||
- #216 - updated default WLAN bandwidth to 54Mbps
|
||||
- #223 - fix to saving RJ45 to session XML files
|
||||
|
||||
2018-05-22 CORE 5.1
|
||||
* DAEMON:
|
||||
- removed and cleared out code that is either legacy or no longer supported (Xen, BSD, Kernel patching, RPM/DEB specific files)
|
||||
- default nodes are now set in the node map
|
||||
- moved ns3 and netns directories to the top of the repo
|
||||
- changes to make use of fpm as the tool for building packages
|
||||
- removed usage of logzero to avoid dependency issues for built packages
|
||||
- removed daemon addons directory
|
||||
- added CoreEmu to core.emulator.coreemu to help begin serving as the basis for a more formal API for scripting and creating new external APIs out of
|
||||
- cleaned up logging, moved more logging to DEBUG from INFO, tried to mold INFO message to be more simple and informative
|
||||
- EMANE 1.0.1-1.21 supported
|
||||
- updates to leverage EMANE python bindings for dynamically parsing phy/mac manifest files
|
||||
- example custom EMANE model lives under /usr/share/core/examples/myemane/examplemodel.py
|
||||
- EMANE TDMA model now supports an option to start a TDMA schedule when running
|
||||
- fixed issues with coresendmsg script due to code refactoring
|
||||
- added make target for generating documentation "make doc"
|
||||
- Python 2.7+ is now required
|
||||
- ns3 is no longer bundled by default, but will be produced as a separate package for installation
|
||||
* GUI:
|
||||
- updated broken help links in GUI Help->About
|
||||
* Packaging:
|
||||
- fixed PYTHON_PATH to PYTHONPATH in sysv script
|
||||
- added make command to leverage FPM as the tool for creating deb/rpm packages going forward, there is documentation within README.md to try it out
|
||||
* TEST:
|
||||
- fixed some broken tests
|
||||
- new test cases based on CoreEmu usage
|
||||
* BUGFIXES:
|
||||
- #142 - duplication of custom services
|
||||
- #136 - sphinx-apidoc command not found
|
||||
- #137 - make command fails when using distclean
|
||||
|
||||
2017-09-01 CORE 5.0
|
||||
* DEVELOPMENT:
|
||||
- support for editorconfig to help standardize development across IDEs, from the defined configuration file
|
||||
- support for sonarqube analysis, from the defined configuration file
|
||||
* DAEMON:
|
||||
- code cleanup and improvements to adhere to coding standards (SonarQube)
|
||||
- leverage "logzero" module to make easy usage of the standard logging module
|
||||
- improvements to documentation across the code base
|
||||
- initial work to separate the dependence on TCP API messaging from the core library (easier core scripting)
|
||||
- beta support for running core in Open vSwitch mode, leveraging Open vSwitch bridges, instead of Linux bridges
|
||||
* SERVICES:
|
||||
- added Ryu SDN controller service
|
||||
- added Open vSwitch service
|
||||
* TEST:
|
||||
- added unit/integration tests to support validating changes going forward
|
||||
* BUGFIXES:
|
||||
- merged pull requests for: #115, #110, #109, #107, #106, #105, #103, #102, #101, #96
|
||||
|
||||
2015-06-05 CORE 4.8
|
||||
* EMANE:
|
||||
- support for EMANE 0.9.2
|
||||
- run emane in each container when using EMANE 0.9.2
|
||||
- support using separate control networks for EMANE OTA and event traffic
|
||||
* GUI:
|
||||
- fixed an issue where the adjacency widget lines pointed to old node positions
|
||||
- fixed an issue where not all EMANE 0.9.x IEEE 802.11 MAC parameter were configurable
|
||||
- fixed an issue related to running python scripts from the GUI when using tcl/tk version 8.6
|
||||
- improved batch mode execution to display the check emulation light status
|
||||
- improved managing multiple sessions
|
||||
- improved support for using multiple canvases
|
||||
- added a reload option to the file menu to revert back to a saved scenario
|
||||
* DAEMON:
|
||||
- support exporting scenarios in NRL Network Modeling Framework 1.0 XML format
|
||||
- support importing scenarios in NRL Network Modeling Framework 1.0 XML format
|
||||
- support exporting the deployed scenario state in NRL NMF XML 1.0 format
|
||||
- improved EMANE post-startup processing to better synchronize distributed emulations
|
||||
- improved how addresses are assigned to tun/tap devices
|
||||
- added support for python state-change callbacks
|
||||
* SERVICES:
|
||||
- added mgen sink and mgen actor services
|
||||
- added oslrv2 and olsr.org services
|
||||
- added a docker service
|
||||
* BUILD:
|
||||
- improved the install/uninstall process
|
||||
- improved debian and rpm packaging
|
||||
* BUGFIXES:
|
||||
- updated the http service for ubuntu 14.04
|
||||
- improved included examples
|
||||
- shortened the length of network interface names
|
||||
- improved how the core system service manages running the core daemon
|
||||
- fixed an issues related to applying session configuration setting
|
||||
- improved detecting when a distributed emulation is already running
|
||||
- improved documentation
|
||||
|
||||
2014-08-06 CORE 4.7
|
||||
|
||||
* EMANE:
|
||||
- support for EMANE 0.9.1
|
||||
- fix error when using Comm Effect model with loss/duplicate string values
|
||||
- enable flow control in virtual transport if enabled in the MAC model
|
||||
- fix bug #150 where EMANE event service/address port were not used
|
||||
* GUI:
|
||||
- support Tcl/Tk 8.6 when available
|
||||
- added --(a)ddress and --(p)ort arguments to core-gui command-line
|
||||
- added File > Execute XML or Python script... option
|
||||
- added File > Execute Python script with options... menu item
|
||||
- when executing Python script from GUI, run in background thread, wait for
|
||||
RUNTIME state
|
||||
- enter RUNTIME state when start button pressed with empty canvas
|
||||
- added support for asymmetric link effects
|
||||
- support link delays up to 274 seconds (netem maximum)
|
||||
- allow runtime changes of WLAN link effects
|
||||
* DAEMON:
|
||||
- set NODE_NAME, NODE_NUMBER, SESSION_SHORT in default vnoded environment
|
||||
- changed host device naming to use veth, tap prefixes; b.n.SS for bridges
|
||||
- allow parsing XML files into live running session
|
||||
- enable link effects between hub/switch and hub/switch connections
|
||||
- update MDR service to use broadcast interfaces for non-WLAN links
|
||||
- allow node class to be specified when initializing XML parser
|
||||
- save and parse canvas origin (reference point) and scale in MP XML
|
||||
- up/down control script session option
|
||||
- fix hash calculation used to determine GRE tunnel keys
|
||||
- use shell script to detach SMF on startup
|
||||
- added NRL services for mgen sink and nrlolsrv2
|
||||
- use SDT URL session option
|
||||
- added core-manage tool for addons to add/remove/check services, models,
|
||||
and custom node types
|
||||
* API:
|
||||
- implement local flag in Execute Message for running host commands
|
||||
- jitter changed to 64-bit value to align with delay in Link Message
|
||||
- added unidirectional link flag TLV to Link Message
|
||||
- added reconfigure event type for re-generating service config files
|
||||
- return errors in API with failed services
|
||||
* BUGFIXES:
|
||||
- fix HTTP service running under Ubuntu
|
||||
- fixed the following bugs: #150, 169, 188, 220, 225, 230, 231, 242, 244,
|
||||
247, 248, 250, 251
|
||||
|
||||
2013-09-25 CORE 4.6
|
||||
|
||||
* NOTE: cored is now core-daemon, and core is now core-gui (for Debian
|
||||
acceptance)
|
||||
* NOTE: /etc/init.d/core is now /etc/init.d/core-daemon (for insserv
|
||||
compatibility)
|
||||
* EMANE:
|
||||
- don't start EMANE locally if no local NEMs
|
||||
- EMANE poststartup() to re-transmit location events during initialization
|
||||
- added debug port to EMANE options
|
||||
- added a basic EMANE 802.11 CORE Python script example
|
||||
- expose transport XML block generation to EmaneModels
|
||||
- expose NEM entry to the EmaneModel so it can be overridden by a model
|
||||
- add the control interface bridge prior to starting EMANE, as some models may
|
||||
- depend on the controlnet functionality
|
||||
- added EMANE model to CORE converter
|
||||
- parse lat/long/alt from node messages, for moving nodes using command-line
|
||||
- fix bug #196 incorrect distance when traversing UTM zones
|
||||
|
||||
* GUI:
|
||||
- added Cut, Copy, and Paste options to the Edit menu
|
||||
- paste will copy selected services and take care of node and interface
|
||||
- renumbering
|
||||
- implement Edit > Find dialog for searching nodes and links
|
||||
- when copying existing file for a service, perform string replacement of:
|
||||
- "~", "%SESSION%", "%SESSION_DIR%", "%SESSION_USER%", "%NODE%", "%NODENAME%"
|
||||
- use CORE_DATA_DIR insteadof LIBDIR
|
||||
- fix Adjacency Widget to work with OSPFv2 only networks
|
||||
|
||||
* BUILD:
|
||||
- build/packaging improvements for inclusion on Debian
|
||||
- fix error when running scenario with a mobility script in batch mode
|
||||
- include Linux kernel patches for 3.8
|
||||
- renamed core-cleanup.sh to core-cleanup for Debian conformance
|
||||
- don't always generate man pages from Makefile; new manpages for
|
||||
coresendmsg and core-daemon
|
||||
|
||||
* BUGFIXES:
|
||||
- don't auto-assign IPv4/IPv6 addresses when none received in Link Messages (session reconnect)
|
||||
- fixed lock view
|
||||
- fix GUI spinbox errors for Tk 8.5.8 (RHEL/CentOS 6.2)
|
||||
- fix broker node count for distributed session entering the RUNTIME state when
|
||||
- (non-EMANE) WLANs or GreTapBridges are involved;
|
||||
- fix "file exists" error message when distributed session number is re-used
|
||||
- and servers file is written
|
||||
- fix bug #194 configuration dialog too long, make dialog scrollable/resizable
|
||||
- allow float values for loss and duplicates percent
|
||||
- fix the following bugs: 166, 172, 177, 178, 192, 194, 196, 201, 202,
|
||||
205, 206, 210, 212, 213, 214, 221
|
||||
|
||||
2013-04-13 CORE 4.5
|
||||
|
||||
* GUI:
|
||||
- improved behavior when starting GUI without daemon, or using File New after connection with daemon is lost
|
||||
- fix various GUI issues when reconnecting to a session
|
||||
- support 3D GUI via output to SDT3D
|
||||
- added "Execute Python script..." entry to the File Menu
|
||||
- support user-defined terminal program instead of hard-coded xterm
|
||||
- added session options for "enable RJ45s", "preserve session dir"
|
||||
- added buttons to the IP Addresses dialog for removing all/selected IPv4/IPv6
|
||||
- allow sessions with multiple canvases to enter RUNTIME state
|
||||
- added "--addons" startup mode to pass control to code included from addons dir
|
||||
- added "Locked" entry to View menu to prevent moving items
|
||||
- use currently selected node type when invoking a topology generator
|
||||
- updated throughput plots with resizing, color picker, plot labels, locked scales, and save/load plot configuration with imn file
|
||||
- improved session dialog
|
||||
* EMANE:
|
||||
- EMANE 0.8.1 support with backwards-compatibility for 0.7.4
|
||||
- extend CommEffect model to generate CommEffect events upon receipt of Link Messages having link effects
|
||||
* Services:
|
||||
- updated FTP service with root directory for anonymous users
|
||||
- added HTTP, PCAP, BIRD, RADVD, and Babel services
|
||||
- support copying existing files instead of always generating them
|
||||
- added "Services..." entry to node right-click menu
|
||||
- added "View" button for side-by-side comparison when copying customized config files
|
||||
- updated Quagga daemons to wait for zebra.vty VTY file before starting
|
||||
* General:
|
||||
- XML import and export
|
||||
- renamed "cored.py" to "cored", "coresendmsg.py" to "coresendmsg"
|
||||
- code reorganization and clean-up
|
||||
- updated XML export to write NetworkPlan, MotionPlan, and ServicePlan within a Scenario tag, added new "Save As XML..." File menu entry
|
||||
- added script_start/pause/stop options to Ns2ScriptedMobility
|
||||
- "python" source sub-directory renamed to "daemon"
|
||||
- added "cored -e" option to execute a Python script, adding its session to the active sessions list, allowing for GUI connection
|
||||
- support comma-separated list for custom_services_dir in core.conf file
|
||||
- updated kernel patches for Linux kernel 3.5
|
||||
- support RFC 6164-style IPv6 /127 addressing
|
||||
* ns-3:
|
||||
- integrate ns-3 node location between CORE and ns-3 simulation
|
||||
- added ns-3 random walk mobility example
|
||||
- updated ns-3 Wifi example to allow GUI connection and moving of nodes
|
||||
* fixed the following bugs: 54, 103, 111, 136, 145, 153, 157, 160, 161, 162, 164, 165, 168, 170, 171, 173, 174, 176, 184, 190, 193
|
||||
|
||||
2012-09-25 CORE 4.4
|
||||
|
||||
* GUI:
|
||||
- real-time bandwidth plotting tool
|
||||
- added Wireshark and tshark right-click menu items
|
||||
- X,Y coordinates shown in the status bar
|
||||
- updated GUI attribute option to link messages for changing color/width/dash
|
||||
- added sample IPsec and VPN scenarios, how many nodes script
|
||||
- added jitter parameter to WLANs
|
||||
- renamed Experiment menu to Session menu, added session options
|
||||
- use 'key=value' configuration for services, EMANE models, WLAN models, etc.
|
||||
- save only service values that have been customized
|
||||
- copy service parameters from one customized service to another
|
||||
- right-click menu to start/stop/restart each service
|
||||
* EMANE:
|
||||
- EMANE 0.7.4 support
|
||||
- added support for EMANE CommEffect model and Comm Effect controller GUI
|
||||
- added support for EMANE Raw Transport when using RJ45 devices
|
||||
* Services:
|
||||
- improved service customization; allow a service to define custom Tcl tab
|
||||
- added vtysh.conf for Quagga service to support 'write mem'
|
||||
- support scheduled events and services that start N seconds after runtime
|
||||
- added UCARP service
|
||||
* Documentation:
|
||||
- converted the CORE manual to reStructuredText using Sphinx; added Python docs
|
||||
* General:
|
||||
- Python code reorganization
|
||||
- improved cored.py thread locking
|
||||
- merged xen branch into trunk
|
||||
- added an event queue to a session with notion of time zero
|
||||
- added UDP support to cored.py
|
||||
- use UDP by default in coresendmsg.py; added '-H' option to print examples
|
||||
- enter a bash shell by default when running vcmd with no arguments
|
||||
- fixes to distributed emulation entering runtime state
|
||||
- write 'nodes' file upon session startup
|
||||
- make session number and other attributes available in environment
|
||||
- support /etc/core/environment and ~/.core/environment files
|
||||
- added Ns2ScriptedMobility model to Python, removed from the GUI
|
||||
- namespace nodes mount a private /sys
|
||||
|
||||
- fixed the following bugs: 80, 81, 84, 99, 104, 109, 110, 122, 124, 131, 133, 134, 135, 137, 140, 143, 144, 146, 147, 151, 154, 155
|
||||
|
||||
2012-03-07 CORE 4.3
|
||||
|
||||
* EMANE 0.7.2 and 0.7.3 support
|
||||
* hook scripts: customize actions at any of six different session states
|
||||
* Check Emulation Light (CEL) exception feedback system
|
||||
* added FTP and XORP services, and service validate commands
|
||||
* services can flag when customization is required
|
||||
* Python classes to support ns-3 simulation experiments
|
||||
* write state, node X,Y position, and servers to pycore session dir
|
||||
* removed over 9,000 lines of unused GUI code
|
||||
* performance monitoring script
|
||||
* batch mode improvements and --closebatch option
|
||||
* export session to EmulationScript XML files
|
||||
* basic range model moved from GUI to Python, supports 3D coordinates
|
||||
* improved WLAN dialog with tabs
|
||||
* added PhysicalNode class for joining real nodes with emulated networks
|
||||
* fixed the following bugs: 50, 75, 76, 79, 82, 83, 85, 86, 89, 90, 92, 94, 96, 98, 100, 112, 113, 116, 119, 120
|
||||
|
||||
2011-08-19 CORE 4.2
|
||||
|
||||
* EMANE 0.7.1 support
|
||||
- support for Bypass model, Universal PHY, logging, realtime
|
||||
* configurable MAC addresses
|
||||
* control interfaces (backchannel between node and host)
|
||||
* service customization dialog improved (tabbed)
|
||||
* new testing scripts for MDR and EMANE performance testing
|
||||
* improved upgrading of old imn files
|
||||
* new coresendmsg.py utility (deprecates libcoreapi and coreapisend)
|
||||
* new security services, custom service becomes UserDefined
|
||||
* new services and Python scripting chapters in manual
|
||||
* fixes to distributed emulation, linking tunnels/RJ45s with WLANs/hubs/switches
|
||||
* fixed the following bugs: 18, 32, 34, 38, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 52, 53, 55, 57, 58, 60, 62, 64, 65, 66, 68, 71, 72, 74
|
||||
|
||||
2011-01-05 CORE 4.1
|
||||
* new icons for toolbars and nodes
|
||||
* node services introduced, node models deprecated
|
||||
* customizable node types
|
||||
* traffic flow editor with MGEN support
|
||||
* user configs moved from /etc/core/`*` to ~/.core/
|
||||
* allocate addresses from custom IPv4/IPv6 prefixes
|
||||
* distributed emulation using GRE tunnels
|
||||
* FreeBSD 8.1 now uses cored.py
|
||||
* EMANE 0.6.4 support
|
||||
* numerous bugfixes
|
||||
|
||||
2010-08-17 CORE 4.0
|
||||
* Python framework with Linux network namespace (netns) support (Linux netns is now the primary supported platform)
|
||||
* ability to close the GUI and later reconnect to a running session (netns only)
|
||||
* EMANE integration (netns only)
|
||||
* new topology generators, host file generator
|
||||
* user-editable Observer Widgets
|
||||
* use of /etc/core instead of /usr/local/etc/core
|
||||
* various bugfixes
|
||||
|
||||
2009-09-15 CORE 3.5
|
||||
|
||||
2009-06-23 CORE 3.4
|
||||
|
||||
2009-03-11 CORE 3.3
|
||||
|
||||
126
Dockerfile
126
Dockerfile
|
|
@ -1,126 +0,0 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
FROM ubuntu:22.04
|
||||
LABEL Description="CORE Docker Ubuntu Image"
|
||||
|
||||
ARG PREFIX=/usr/local
|
||||
ARG BRANCH=master
|
||||
ARG PROTOC_VERSION=3.19.6
|
||||
ARG VENV_PATH=/opt/core/venv
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV PATH="$PATH:${VENV_PATH}/bin"
|
||||
WORKDIR /opt
|
||||
|
||||
# install system dependencies
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y software-properties-common
|
||||
|
||||
RUN add-apt-repository "deb http://archive.ubuntu.com/ubuntu jammy universe"
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
automake \
|
||||
bash \
|
||||
ca-certificates \
|
||||
ethtool \
|
||||
gawk \
|
||||
gcc \
|
||||
g++ \
|
||||
iproute2 \
|
||||
iputils-ping \
|
||||
libc-dev \
|
||||
libev-dev \
|
||||
libreadline-dev \
|
||||
libtool \
|
||||
nftables \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-tk \
|
||||
pkg-config \
|
||||
tk \
|
||||
xauth \
|
||||
xterm \
|
||||
wireshark \
|
||||
vim \
|
||||
build-essential \
|
||||
nano \
|
||||
firefox \
|
||||
net-tools \
|
||||
rsync \
|
||||
openssh-server \
|
||||
openssh-client \
|
||||
vsftpd \
|
||||
atftpd \
|
||||
atftp \
|
||||
mini-httpd \
|
||||
lynx \
|
||||
tcpdump \
|
||||
iperf \
|
||||
iperf3 \
|
||||
tshark \
|
||||
openssh-sftp-server \
|
||||
bind9 \
|
||||
bind9-utils \
|
||||
openvpn \
|
||||
isc-dhcp-server \
|
||||
isc-dhcp-client \
|
||||
whois \
|
||||
ipcalc \
|
||||
socat \
|
||||
hping3 \
|
||||
libgtk-3-0 \
|
||||
librest-0.7-0 \
|
||||
libgtk-3-common \
|
||||
dconf-gsettings-backend \
|
||||
libsoup-gnome2.4-1 \
|
||||
libsoup2.4-1 \
|
||||
dconf-service \
|
||||
x11-xserver-utils \
|
||||
ftp \
|
||||
git \
|
||||
sudo \
|
||||
wget \
|
||||
tzdata \
|
||||
libpcap-dev \
|
||||
libpcre3-dev \
|
||||
libprotobuf-dev \
|
||||
libxml2-dev \
|
||||
protobuf-compiler \
|
||||
unzip \
|
||||
uuid-dev \
|
||||
iproute2 \
|
||||
vlc \
|
||||
iputils-ping && \
|
||||
apt-get autoremove -y
|
||||
|
||||
# install core
|
||||
RUN git clone https://github.com/coreemu/core && \
|
||||
cd core && \
|
||||
git checkout ${BRANCH} && \
|
||||
./setup.sh && \
|
||||
PATH=/root/.local/bin:$PATH inv install -v -p ${PREFIX} && \
|
||||
cd /opt && \
|
||||
rm -rf ospf-mdr
|
||||
|
||||
# install emane
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-linux-x86_64.zip && \
|
||||
mkdir protoc && \
|
||||
unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d protoc && \
|
||||
git clone https://github.com/adjacentlink/emane.git && \
|
||||
cd emane && \
|
||||
./autogen.sh && \
|
||||
./configure --prefix=/usr && \
|
||||
make -j$(nproc) && \
|
||||
make install && \
|
||||
cd src/python && \
|
||||
make clean && \
|
||||
PATH=/opt/protoc/bin:$PATH make && \
|
||||
${VENV_PATH}/bin/python -m pip install . && \
|
||||
cd /opt && \
|
||||
rm -rf protoc && \
|
||||
rm -rf emane && \
|
||||
rm -f protoc-${PROTOC_VERSION}-linux-x86_64.zip
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
CMD /opt/core/venv/bin/core-daemon
|
||||
20
Jenkinsfile
vendored
Normal file
20
Jenkinsfile
vendored
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage('build core') {
|
||||
steps {
|
||||
sh './bootstrap.sh'
|
||||
sh './configure'
|
||||
sh 'make'
|
||||
sh 'sudo make install'
|
||||
}
|
||||
}
|
||||
stage('test core') {
|
||||
steps {
|
||||
sh 'pytest daemon/tests/test_core.py'
|
||||
sh 'pytest daemon/tests/test_gui.py'
|
||||
sh 'pytest daemon/tests/test_emane.py'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
155
Makefile.am
155
Makefile.am
|
|
@ -6,26 +6,29 @@ if WANT_DOCS
|
|||
DOCS = docs man
|
||||
endif
|
||||
|
||||
if WANT_GUI
|
||||
GUI = gui
|
||||
endif
|
||||
|
||||
if WANT_DAEMON
|
||||
DAEMON = daemon
|
||||
DAEMON = scripts daemon
|
||||
endif
|
||||
|
||||
if WANT_NETNS
|
||||
NETNS = netns
|
||||
NETNS = netns ns3
|
||||
endif
|
||||
|
||||
# keep docs last due to dependencies on binaries
|
||||
SUBDIRS = $(DAEMON) $(NETNS) $(DOCS)
|
||||
SUBDIRS = $(GUI) $(DAEMON) $(NETNS) $(DOCS)
|
||||
|
||||
ACLOCAL_AMFLAGS = -I config
|
||||
|
||||
# extra files to include with distribution tarball
|
||||
EXTRA_DIST = bootstrap.sh \
|
||||
package \
|
||||
LICENSE \
|
||||
README.md \
|
||||
ASSIGNMENT_OF_COPYRIGHT.pdf \
|
||||
CHANGELOG.md \
|
||||
Changelog \
|
||||
.version \
|
||||
.version.date
|
||||
|
||||
|
|
@ -41,117 +44,80 @@ DISTCLEANFILES = aclocal.m4 \
|
|||
MAINTAINERCLEANFILES = .version \
|
||||
.version.date
|
||||
|
||||
define fpm-distributed-deb =
|
||||
fpm -s dir -t deb -n core-distributed \
|
||||
define fpm-python =
|
||||
fpm -s python -t $1 \
|
||||
-m "$(PACKAGE_MAINTAINERS)" \
|
||||
--license "BSD" \
|
||||
--description "Common Open Research Emulator Distributed Package" \
|
||||
--url https://github.com/coreemu/core \
|
||||
--vendor "$(PACKAGE_VENDOR)" \
|
||||
-p core-distributed_VERSION_ARCH.deb \
|
||||
-v $(PACKAGE_VERSION) \
|
||||
-d "ethtool" \
|
||||
-d "procps" \
|
||||
-d "libc6 >= 2.14" \
|
||||
-d "bash >= 3.0" \
|
||||
-d "nftables" \
|
||||
-d "iproute2" \
|
||||
-d "libev4" \
|
||||
-d "openssh-server" \
|
||||
-d "xterm" \
|
||||
netns/vnoded=/usr/bin/ \
|
||||
netns/vcmd=/usr/bin/
|
||||
$2
|
||||
endef
|
||||
|
||||
define fpm-distributed-rpm =
|
||||
fpm -s dir -t rpm -n core-distributed \
|
||||
define fpm-gui =
|
||||
fpm -s dir -t $1 -n core-gui \
|
||||
-m "$(PACKAGE_MAINTAINERS)" \
|
||||
--license "BSD" \
|
||||
--description "Common Open Research Emulator Distributed Package" \
|
||||
--description "Common Open Research Emulator GUI front-end" \
|
||||
--url https://github.com/coreemu/core \
|
||||
--vendor "$(PACKAGE_VENDOR)" \
|
||||
-p core-distributed_VERSION_ARCH.rpm \
|
||||
-p core-gui_VERSION_ARCH.$1 \
|
||||
-v $(PACKAGE_VERSION) \
|
||||
-d "ethtool" \
|
||||
-d "procps-ng" \
|
||||
-d "bash >= 3.0" \
|
||||
-d "nftables" \
|
||||
-d "iproute" \
|
||||
-d "libev" \
|
||||
-d "net-tools" \
|
||||
-d "openssh-server" \
|
||||
-d "xterm" \
|
||||
netns/vnoded=/usr/bin/ \
|
||||
netns/vcmd=/usr/bin/
|
||||
endef
|
||||
|
||||
define fpm-rpm =
|
||||
fpm -s dir -t rpm -n core \
|
||||
-m "$(PACKAGE_MAINTAINERS)" \
|
||||
--license "BSD" \
|
||||
--description "core vnoded/vcmd and system dependencies" \
|
||||
--url https://github.com/coreemu/core \
|
||||
--vendor "$(PACKAGE_VENDOR)" \
|
||||
-p core_VERSION_ARCH.rpm \
|
||||
-v $(PACKAGE_VERSION) \
|
||||
--rpm-init package/core-daemon \
|
||||
--after-install package/after-install.sh \
|
||||
--after-remove package/after-remove.sh \
|
||||
-d "ethtool" \
|
||||
-d "bash" \
|
||||
-d "tcl" \
|
||||
-d "tk" \
|
||||
$2 \
|
||||
-C $(DESTDIR)
|
||||
endef
|
||||
|
||||
define fpm-daemon-rpm =
|
||||
fpm -s python -t rpm \
|
||||
-p NAME_sysv_VERSION_ARCH.rpm \
|
||||
--rpm-init scripts/core-daemon \
|
||||
--python-install-bin $(bindir) \
|
||||
--python-install-data $(prefix) \
|
||||
--python-install-lib $(pythondir) \
|
||||
-m "$(PACKAGE_MAINTAINERS)" \
|
||||
--vendor "$(PACKAGE_VENDOR)" \
|
||||
-d "procps-ng" \
|
||||
-d "bash >= 3.0" \
|
||||
-d "bridge-utils" \
|
||||
-d "ebtables" \
|
||||
-d "iproute" \
|
||||
-d "libev" \
|
||||
-d "net-tools" \
|
||||
-d "nftables" \
|
||||
netns/vnoded=/usr/bin/ \
|
||||
netns/vcmd=/usr/bin/ \
|
||||
package/etc/core.conf=/etc/core/ \
|
||||
package/etc/logging.conf=/etc/core/ \
|
||||
package/examples=/opt/core/ \
|
||||
daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/
|
||||
-d "python >= 2.7, python < 3.0" \
|
||||
netns/setup.py daemon/setup.py
|
||||
endef
|
||||
|
||||
define fpm-deb =
|
||||
fpm -s dir -t deb -n core \
|
||||
define fpm-daemon-deb =
|
||||
fpm -s python -t deb \
|
||||
-p NAME_$1_VERSION_ARCH.deb \
|
||||
--python-install-bin $(bindir) \
|
||||
--python-install-data $(prefix) \
|
||||
--python-install-lib $(pythondir) \
|
||||
$2 $3 \
|
||||
-m "$(PACKAGE_MAINTAINERS)" \
|
||||
--license "BSD" \
|
||||
--description "core vnoded/vcmd and system dependencies" \
|
||||
--url https://github.com/coreemu/core \
|
||||
--vendor "$(PACKAGE_VENDOR)" \
|
||||
-p core_VERSION_ARCH.deb \
|
||||
-v $(PACKAGE_VERSION) \
|
||||
--deb-systemd package/core-daemon.service \
|
||||
--deb-no-default-config-files \
|
||||
--after-install package/after-install.sh \
|
||||
--after-remove package/after-remove.sh \
|
||||
-d "ethtool" \
|
||||
-d "tk" \
|
||||
-d "libtk-img" \
|
||||
-d "procps" \
|
||||
-d "libc6 >= 2.14" \
|
||||
-d "bash >= 3.0" \
|
||||
-d "bridge-utils" \
|
||||
-d "ebtables" \
|
||||
-d "iproute2" \
|
||||
-d "libev4" \
|
||||
-d "nftables" \
|
||||
netns/vnoded=/usr/bin/ \
|
||||
netns/vcmd=/usr/bin/ \
|
||||
package/etc/core.conf=/etc/core/ \
|
||||
package/etc/logging.conf=/etc/core/ \
|
||||
package/examples=/opt/core/ \
|
||||
daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/
|
||||
-d "python (>= 2.7), python (<< 3.0)" \
|
||||
--deb-recommends quagga \
|
||||
netns/setup.py daemon/setup.py
|
||||
endef
|
||||
|
||||
.PHONY: fpm
|
||||
fpm: clean-local-fpm
|
||||
cd daemon && poetry build -f wheel
|
||||
$(call fpm-deb)
|
||||
$(call fpm-rpm)
|
||||
$(call fpm-distributed-deb)
|
||||
$(call fpm-distributed-rpm)
|
||||
$(MAKE) -C gui install DESTDIR=$(DESTDIR)
|
||||
$(call fpm-gui,rpm)
|
||||
$(call fpm-gui,deb,-d "libtk-img")
|
||||
$(call fpm-python,rpm,ns3/setup.py)
|
||||
$(call fpm-python,deb,ns3/setup.py)
|
||||
$(call fpm-daemon-rpm)
|
||||
$(call fpm-daemon-deb,sysv,--deb-init,scripts/core-daemon)
|
||||
$(call fpm-daemon-deb,systemd,--deb-systemd,scripts/core-daemon.service)
|
||||
|
||||
.PHONY: clean-local-fpm
|
||||
clean-local-fpm:
|
||||
|
|
@ -170,12 +136,24 @@ define change-files =
|
|||
$(info creating file $1 from $1.in)
|
||||
@$(SED) -e 's,[@]sbindir[@],$(sbindir),g' \
|
||||
-e 's,[@]bindir[@],$(bindir),g' \
|
||||
-e 's,[@]pythondir[@],$(pythondir),g' \
|
||||
-e 's,[@]PYTHON[@],$(PYTHON),g' \
|
||||
-e 's,[@]PACKAGE_VERSION[@],$(PACKAGE_VERSION),g' \
|
||||
-e 's,[@]PACKAGE_DATE[@],$(PACKAGE_DATE),g' \
|
||||
-e 's,[@]CORE_LIB_DIR[@],$(CORE_LIB_DIR),g' \
|
||||
-e 's,[@]CORE_STATE_DIR[@],$(CORE_STATE_DIR),g' \
|
||||
-e 's,[@]CORE_DATA_DIR[@],$(CORE_DATA_DIR),g' \
|
||||
-e 's,[@]CORE_CONF_DIR[@],$(CORE_CONF_DIR),g' \
|
||||
-e 's,[@]CORE_GUI_CONF_DIR[@],$(CORE_GUI_CONF_DIR),g' \
|
||||
-e 's,[@]brctl_path[@],$(brctl_path),g' \
|
||||
-e 's,[@]sysctl_path[@],$(sysctl_path),g' \
|
||||
-e 's,[@]ip_path[@],$(ip_path),g' \
|
||||
-e 's,[@]tc_path[@],$(tc_path),g' \
|
||||
-e 's,[@]ebtables_path[@],$(ebtables_path),g' \
|
||||
-e 's,[@]mount_path[@],$(mount_path),g' \
|
||||
-e 's,[@]umount_path[@],$(umount_path),g' \
|
||||
-e 's,[@]ovs_vs_path[@],$(ovs_vs_path),g' \
|
||||
-e 's,[@]ovs_of_path[@],$(ovs_of_path),g' \
|
||||
< $1.in > $1
|
||||
endef
|
||||
|
||||
|
|
@ -183,8 +161,13 @@ all: change-files
|
|||
|
||||
.PHONY: change-files
|
||||
change-files:
|
||||
$(call change-files,gui/core-gui)
|
||||
$(call change-files,scripts/core-daemon.service)
|
||||
$(call change-files,scripts/core-daemon)
|
||||
$(call change-files,daemon/core/constants.py)
|
||||
$(call change-files,ns3/setup.py)
|
||||
$(call change-files,netns/setup.py)
|
||||
$(call change-files,daemon/setup.py)
|
||||
|
||||
CORE_DOC_SRC = core-python-$(PACKAGE_VERSION)
|
||||
.PHONY: doc
|
||||
|
|
|
|||
118
README.md
118
README.md
|
|
@ -1,107 +1,41 @@
|
|||
# Index
|
||||
- CORE
|
||||
- Docker Setup
|
||||
- Precompiled container image
|
||||
- Build container image from source
|
||||
- Adding extra packages
|
||||
|
||||
- Useful commands
|
||||
- License
|
||||
|
||||
# CORE
|
||||
# CORE [](https://www.codacy.com/app/blakeharnden/core?utm_source=github.com&utm_medium=referral&utm_content=coreemu/core&utm_campaign=Badge_Grade)
|
||||
|
||||
CORE: Common Open Research Emulator
|
||||
|
||||
Copyright (c)2005-2022 the Boeing Company.
|
||||
Copyright (c)2005-2018 the Boeing Company.
|
||||
|
||||
See the LICENSE file included in this distribution.
|
||||
|
||||
# Docker Setup
|
||||
## About
|
||||
|
||||
Here you have 2 choices
|
||||
The Common Open Research Emulator (CORE) is a tool for emulating
|
||||
networks on one or more machines. You can connect these emulated
|
||||
networks to live networks. CORE consists of a GUI for drawing
|
||||
topologies of lightweight virtual machines, and Python modules for
|
||||
scripting network emulation.
|
||||
|
||||
## Precompiled container image
|
||||
## Documentation and Examples
|
||||
|
||||
```bash
|
||||
* Documentation hosted on GitHub
|
||||
* <http://coreemu.github.io/core/>
|
||||
* Basic Script Examples
|
||||
* [Examples](daemon/examples/api)
|
||||
* Custom Service Example
|
||||
* [sample.py](daemon/examples/myservices/sample.py)
|
||||
* Custom Emane Model Example
|
||||
* [examplemodel.py](daemon/examples/myemane/examplemodel.py)
|
||||
|
||||
# Start container
|
||||
sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged --restart unless-stopped git.olympuslab.net/afonso/core-extra:latest
|
||||
## Support
|
||||
|
||||
```
|
||||
## Build container image from source
|
||||
We are leveraging Discord for persistent chat rooms, voice chat, and
|
||||
GitHub integration. This allows for more dynamic conversations and the
|
||||
capability to respond faster. Feel free to join us at the link below.
|
||||
<https://discord.gg/AKd7kmP>
|
||||
|
||||
```bash
|
||||
# Clone the repo
|
||||
git clone https://gitea.olympuslab.net/afonso/core-extra.git
|
||||
## Building CORE
|
||||
|
||||
# cd into the directory
|
||||
cd core-extra
|
||||
See [CORE Installation](http://coreemu.github.io/core/install.html) for detailed build instructions.
|
||||
|
||||
# build the docker image
|
||||
sudo docker build -t core-extra .
|
||||
### Running CORE
|
||||
|
||||
# start container
|
||||
sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged --restart unless-stopped core-extra
|
||||
|
||||
```
|
||||
|
||||
### Adding extra packages
|
||||
|
||||
To add extra packages you must modify the Dockerfile and then compile the docker image.
|
||||
If you install it after starting the container it will, by docker nature, be reverted on the next boot of the container.
|
||||
|
||||
# Useful commands
|
||||
|
||||
I have the following functions on my fish shell
|
||||
to help me better use core
|
||||
|
||||
THIS ONLY WORKS ON FISH, MODIFY FOR BASH OR ZSH
|
||||
|
||||
```fish
|
||||
|
||||
# RUN CORE GUI
|
||||
function core
|
||||
xhost +local:root
|
||||
sudo docker exec -it core core-gui
|
||||
end
|
||||
|
||||
# RUN BASH INSIDE THE CONTAINER
|
||||
function core-bash
|
||||
sudo docker exec -it core /bin/bash
|
||||
end
|
||||
|
||||
|
||||
# LAUNCH NODE BASH ON THE HOST MACHINE
|
||||
function launch-term --argument nodename
|
||||
sudo docker exec -it core xterm -bg black -fg white -fa 'DejaVu Sans Mono' -fs 16 -e vcmd -c /tmp/pycore.1/$nodename -- /bin/bash
|
||||
end
|
||||
|
||||
#TO RUN ANY OTHER COMMAND
|
||||
sudo docker exec -it core COMAND_GOES_HERE
|
||||
|
||||
```
|
||||
|
||||
## LICENSE
|
||||
|
||||
Copyright (c) 2005-2018, the Boeing Company.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
||||
THE POSSIBILITY OF SUCH DAMAGE.
|
||||
See [Using the CORE GUI](http://coreemu.github.io/core/usage.html) for more details on running CORE.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,9 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# (c)2010-2012 the Boeing Company
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Bootstrap the autoconf system.
|
||||
#
|
||||
|
||||
|
|
|
|||
176
configure.ac
176
configure.ac
|
|
@ -2,7 +2,7 @@
|
|||
# Process this file with autoconf to produce a configure script.
|
||||
|
||||
# this defines the CORE version number, must be static for AC_INIT
|
||||
AC_INIT(core, 9.0.3)
|
||||
AC_INIT(core, 5.2.1, core-dev@nrl.navy.mil)
|
||||
|
||||
# autoconf and automake initialization
|
||||
AC_CONFIG_SRCDIR([netns/version.h.in])
|
||||
|
|
@ -14,7 +14,7 @@ AM_INIT_AUTOMAKE([tar-ustar])
|
|||
# define variables used for packaging and date display
|
||||
PACKAGE_DATE=m4_esyscmd_s([date +%Y%m%d])
|
||||
PACKAGE_VENDOR="CORE Developers"
|
||||
PACKAGE_MAINTAINERS="$PACKAGE_VENDOR"
|
||||
PACKAGE_MAINTAINERS="$PACKAGE_VENDOR <$PACKAGE_BUGREPORT>"
|
||||
|
||||
# core specific variables
|
||||
CORE_LIB_DIR="\${prefix}/lib/core"
|
||||
|
|
@ -30,14 +30,20 @@ AC_SUBST(CORE_CONF_DIR)
|
|||
AC_SUBST(CORE_DATA_DIR)
|
||||
AC_SUBST(CORE_STATE_DIR)
|
||||
|
||||
# documentation option
|
||||
AC_ARG_ENABLE([docs],
|
||||
[AS_HELP_STRING([--enable-docs[=ARG]],
|
||||
[build python documentation (default is no)])],
|
||||
[], [enable_docs=no])
|
||||
AC_SUBST(enable_docs)
|
||||
# CORE GUI configuration files and preferences in CORE_GUI_CONF_DIR
|
||||
# scenario files in ~/.core/configs/
|
||||
AC_ARG_WITH([guiconfdir],
|
||||
[AS_HELP_STRING([--with-guiconfdir=dir],
|
||||
[specify GUI configuration directory])],
|
||||
[CORE_GUI_CONF_DIR="$with_guiconfdir"],
|
||||
[CORE_GUI_CONF_DIR="\$\${HOME}/.core"])
|
||||
AC_SUBST(CORE_GUI_CONF_DIR)
|
||||
AC_ARG_ENABLE([gui],
|
||||
[AS_HELP_STRING([--enable-gui[=ARG]],
|
||||
[build and install the GUI (default is yes)])],
|
||||
[], [enable_gui=yes])
|
||||
AC_SUBST(enable_gui)
|
||||
|
||||
# python option
|
||||
AC_ARG_ENABLE([python],
|
||||
[AS_HELP_STRING([--enable-python[=ARG]],
|
||||
[build and install the python bindings (default is yes)])],
|
||||
|
|
@ -48,7 +54,6 @@ if test "x$enable_python" = "xyes" ; then
|
|||
else
|
||||
want_python=no
|
||||
fi
|
||||
|
||||
AC_ARG_ENABLE([daemon],
|
||||
[AS_HELP_STRING([--enable-daemon[=ARG]],
|
||||
[build and install the daemon with Python modules
|
||||
|
|
@ -83,62 +88,8 @@ if test "x$enable_daemon" = "xyes"; then
|
|||
want_python=yes
|
||||
want_linux_netns=yes
|
||||
|
||||
AM_PATH_PYTHON(3.9)
|
||||
AS_IF([$PYTHON -m grpc_tools.protoc -h &> /dev/null], [], [AC_MSG_ERROR([please install python grpcio-tools])])
|
||||
|
||||
AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$sysctl_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate sysctl (from procps package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(nftables_path, nft, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$nftables_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate nftables (from nftables package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ip_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ip (from iproute package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$tc_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate tc (from iproute package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ethtool_path, ethtool, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ethtool_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ethtool (from package ethtool)])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$mount_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate mount (from package mount)])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$umount_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate umount (from package mount)])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH)
|
||||
if test "x$convert" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ImageMagick convert.])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ovs_vs_path" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS mode])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ovs_of_path" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS mode])
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; then
|
||||
want_linux_netns=yes
|
||||
# Checks for libraries.
|
||||
AC_CHECK_LIB([netgraph], [NgMkSockNode])
|
||||
|
||||
# Checks for header files.
|
||||
AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h stdint.h stdlib.h string.h sys/ioctl.h sys/mount.h sys/socket.h sys/time.h termios.h unistd.h])
|
||||
|
|
@ -158,6 +109,54 @@ if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ;
|
|||
AC_FUNC_REALLOC
|
||||
AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname])
|
||||
|
||||
AM_PATH_PYTHON(2.7)
|
||||
|
||||
AC_CHECK_PROG(brctl_path, brctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$brctl_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate brctl (from bridge-utils package).])
|
||||
fi
|
||||
AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(ebtables_path, ebtables, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ebtables_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ebtables (from ebtables package).])
|
||||
fi
|
||||
AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ip_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ip (from iproute package).])
|
||||
fi
|
||||
AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$tc_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate tc (from iproute package).])
|
||||
fi
|
||||
AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH)
|
||||
AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH)
|
||||
if test "x$convert" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ImageMagick convert.])
|
||||
fi
|
||||
AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ovs_vs_path" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS nodes])
|
||||
fi
|
||||
AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ovs_of_path" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS nodes])
|
||||
fi
|
||||
|
||||
CFLAGS_save=$CFLAGS
|
||||
CPPFLAGS_save=$CPPFLAGS
|
||||
if test "x$PYTHON_INCLUDE_DIR" = "x"; then
|
||||
PYTHON_INCLUDE_DIR=`$PYTHON -c "import distutils.sysconfig; print distutils.sysconfig.get_python_inc()"`
|
||||
fi
|
||||
CFLAGS="-I$PYTHON_INCLUDE_DIR"
|
||||
CPPFLAGS="-I$PYTHON_INCLUDE_DIR"
|
||||
AC_CHECK_HEADERS([Python.h], [],
|
||||
AC_MSG_ERROR([Python bindings require Python development headers (try installing your 'python-devel' or 'python-dev' package)]))
|
||||
CFLAGS=$CFLAGS_save
|
||||
CPPFLAGS=$CPPFLAGS_save
|
||||
fi
|
||||
if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; then
|
||||
want_linux_netns=yes
|
||||
PKG_CHECK_MODULES(libev, libev,
|
||||
AC_MSG_RESULT([found libev using pkgconfig OK])
|
||||
AC_SUBST(libev_CFLAGS)
|
||||
|
|
@ -171,7 +170,8 @@ if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ;
|
|||
fi
|
||||
|
||||
want_docs=no
|
||||
if [test "x$want_python" = "xyes" && test "x$enable_docs" = "xyes"] ; then
|
||||
if test "x$enable_docs" = "xyes" ; then
|
||||
|
||||
AC_CHECK_PROG(help2man, help2man, yes, no, $SEARCHPATH)
|
||||
|
||||
if test "x$help2man" = "xno" ; then
|
||||
|
|
@ -189,17 +189,37 @@ if [test "x$want_python" = "xyes" && test "x$enable_docs" = "xyes"] ; then
|
|||
# check for sphinx required during make
|
||||
AC_CHECK_PROG(sphinxapi_path, sphinx-apidoc, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$sphinxapi_path" = "xno" ; then
|
||||
AC_MSG_ERROR(["Could not locate sphinx-apidoc, install python3 -m pip install sphinx"])
|
||||
AC_MSG_ERROR(["Could not location sphinx-apidoc, from the python-sphinx package"])
|
||||
want_docs=no
|
||||
fi
|
||||
AS_IF([$PYTHON -c "import sphinx_rtd_theme" &> /dev/null], [], [AC_MSG_ERROR([doc dependency missing, please install python3 -m pip install sphinx-rtd-theme])])
|
||||
fi
|
||||
|
||||
#AC_PATH_PROGS(tcl_path, [tclsh tclsh8.5 tclsh8.4], no)
|
||||
#if test "x$tcl_path" = "xno" ; then
|
||||
# AC_MSG_ERROR([Could not locate tclsh. Please install Tcl/Tk.])
|
||||
#fi
|
||||
|
||||
#AC_PATH_PROGS(wish_path, [wish wish8.5 wish8.4], no)
|
||||
#if test "x$wish_path" = "xno" ; then
|
||||
# AC_MSG_ERROR([Could not locate wish. Please install Tcl/Tk.])
|
||||
#fi
|
||||
|
||||
AC_ARG_WITH([startup],
|
||||
[AS_HELP_STRING([--with-startup=option],
|
||||
[option=systemd,suse,none to install systemd/SUSE init scripts])],
|
||||
[with_startup=$with_startup],
|
||||
[with_startup=initd])
|
||||
AC_SUBST(with_startup)
|
||||
AC_MSG_RESULT([using startup option $with_startup])
|
||||
|
||||
# Variable substitutions
|
||||
AM_CONDITIONAL(WANT_GUI, test x$enable_gui = xyes)
|
||||
AM_CONDITIONAL(WANT_DAEMON, test x$enable_daemon = xyes)
|
||||
AM_CONDITIONAL(WANT_DOCS, test x$want_docs = xyes)
|
||||
AM_CONDITIONAL(WANT_PYTHON, test x$want_python = xyes)
|
||||
AM_CONDITIONAL(WANT_NETNS, test x$want_linux_netns = xyes)
|
||||
AM_CONDITIONAL(WANT_INITD, test x$with_startup = xinitd)
|
||||
AM_CONDITIONAL(WANT_SYSTEMD, test x$with_startup = xsystemd)
|
||||
AM_CONDITIONAL(WANT_VNODEDONLY, test x$enable_vnodedonly = xyes)
|
||||
|
||||
if test $cross_compiling = no; then
|
||||
|
|
@ -210,6 +230,11 @@ fi
|
|||
|
||||
# Output files
|
||||
AC_CONFIG_FILES([Makefile
|
||||
gui/version.tcl
|
||||
gui/Makefile
|
||||
gui/icons/Makefile
|
||||
scripts/Makefile
|
||||
scripts/perf/Makefile
|
||||
man/Makefile
|
||||
docs/Makefile
|
||||
daemon/Makefile
|
||||
|
|
@ -217,7 +242,8 @@ AC_CONFIG_FILES([Makefile
|
|||
daemon/doc/conf.py
|
||||
daemon/proto/Makefile
|
||||
netns/Makefile
|
||||
netns/version.h],)
|
||||
netns/version.h
|
||||
ns3/Makefile],)
|
||||
AC_OUTPUT
|
||||
|
||||
# Summary text
|
||||
|
|
@ -231,12 +257,20 @@ Build:
|
|||
Prefix: ${prefix}
|
||||
Exec Prefix: ${exec_prefix}
|
||||
|
||||
GUI:
|
||||
GUI path: ${CORE_LIB_DIR}
|
||||
GUI config: ${CORE_GUI_CONF_DIR}
|
||||
|
||||
Daemon:
|
||||
Daemon path: ${bindir}
|
||||
Daemon config: ${CORE_CONF_DIR}
|
||||
Python: ${PYTHON}
|
||||
Python modules: ${pythondir}
|
||||
Logs: ${CORE_STATE_DIR}/log
|
||||
|
||||
Startup: ${with_startup}
|
||||
|
||||
Features to build:
|
||||
Build GUI: ${enable_gui}
|
||||
Build Daemon: ${enable_daemon}
|
||||
Documentation: ${want_docs}
|
||||
|
||||
|
|
|
|||
2
daemon/.gitignore
vendored
Normal file
2
daemon/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
*.pyc
|
||||
build
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: bash -c 'cd daemon && poetry run isort --atomic -y'
|
||||
types: [python]
|
||||
|
||||
- id: black
|
||||
name: black
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: bash -c 'cd daemon && poetry run black .'
|
||||
types: [python]
|
||||
|
||||
- id: flake8
|
||||
name: flake8
|
||||
stages: [commit]
|
||||
language: system
|
||||
entry: bash -c 'cd daemon && poetry run flake8'
|
||||
types: [python]
|
||||
|
|
@ -1,14 +1,50 @@
|
|||
# CORE
|
||||
# (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# Makefile for building netns components.
|
||||
#
|
||||
|
||||
SETUPPY = setup.py
|
||||
SETUPPYFLAGS = -v
|
||||
|
||||
if WANT_DOCS
|
||||
DOCS = doc
|
||||
DOCS = doc
|
||||
endif
|
||||
|
||||
SUBDIRS = proto $(DOCS)
|
||||
|
||||
SCRIPT_FILES := $(notdir $(wildcard scripts/*))
|
||||
MAN_FILES := $(notdir $(wildcard ../man/*.1))
|
||||
|
||||
# Python package build
|
||||
noinst_SCRIPTS = build
|
||||
build:
|
||||
$(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) build
|
||||
|
||||
# Python package install
|
||||
install-exec-hook:
|
||||
$(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) install \
|
||||
--root=/$(DESTDIR) \
|
||||
--prefix=$(prefix) \
|
||||
--install-lib=$(pythondir) \
|
||||
--single-version-externally-managed
|
||||
|
||||
# Python package uninstall
|
||||
uninstall-hook:
|
||||
rm -rf $(DESTDIR)/etc/core
|
||||
rm -rf $(DESTDIR)/$(datadir)/core
|
||||
rm -f $(addprefix $(DESTDIR)/$(datarootdir)/man/man1/, $(MAN_FILES))
|
||||
rm -f $(addprefix $(DESTDIR)/$(bindir)/,$(SCRIPT_FILES))
|
||||
rm -rf $(DESTDIR)/$(pythondir)/core-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info
|
||||
rm -rf $(DESTDIR)/$(pythondir)/core
|
||||
|
||||
# Python package cleanup
|
||||
clean-local:
|
||||
-rm -rf build
|
||||
|
||||
# because we include entire directories with EXTRA_DIST, we need to clean up
|
||||
# the source control files
|
||||
dist-hook:
|
||||
|
|
@ -17,12 +53,17 @@ dist-hook:
|
|||
distclean-local:
|
||||
-rm -rf core.egg-info
|
||||
|
||||
|
||||
DISTCLEANFILES = Makefile.in
|
||||
|
||||
# files to include with distribution tarball
|
||||
EXTRA_DIST = core \
|
||||
EXTRA_DIST = $(SETUPPY) \
|
||||
core \
|
||||
data \
|
||||
doc/conf.py.in \
|
||||
examples \
|
||||
scripts \
|
||||
tests \
|
||||
test.py \
|
||||
setup.cfg \
|
||||
poetry.lock \
|
||||
pyproject.toml
|
||||
requirements.txt
|
||||
|
|
|
|||
|
|
@ -1,4 +1,30 @@
|
|||
import json
|
||||
import logging.config
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from core import constants
|
||||
|
||||
# setup default null handler
|
||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def load_logging_config():
|
||||
"""
|
||||
Load CORE logging configuration file.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
log_config_path = os.path.join(constants.CORE_CONF_DIR, "logging.conf")
|
||||
with open(log_config_path, "r") as log_config_file:
|
||||
log_config = json.load(log_config_file)
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
|
||||
class CoreCommandError(subprocess.CalledProcessError):
|
||||
"""
|
||||
Used when encountering internal CORE command errors.
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "Command(%s), Status(%s):\n%s" % (self.cmd, self.returncode, self.output)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
"""
|
||||
Contains code specific to the legacy TCP API for interacting with the TCL based GUI.
|
||||
"""
|
||||
1003
daemon/core/api/coreapi.py
Normal file
1003
daemon/core/api/coreapi.py
Normal file
File diff suppressed because it is too large
Load diff
65
daemon/core/api/dataconversion.py
Normal file
65
daemon/core/api/dataconversion.py
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
"""
|
||||
Converts CORE data objects into legacy API messages.
|
||||
"""
|
||||
|
||||
from core.api import coreapi
|
||||
from core.enumerations import ConfigTlvs
|
||||
from core.enumerations import NodeTlvs
|
||||
from core.misc import structutils
|
||||
|
||||
|
||||
def convert_node(node_data):
|
||||
"""
|
||||
Convenience method for converting NodeData to a packed TLV message.
|
||||
|
||||
:param core.data.NodeData node_data: node data to convert
|
||||
:return: packed node message
|
||||
"""
|
||||
tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [
|
||||
(NodeTlvs.NUMBER, node_data.id),
|
||||
(NodeTlvs.TYPE, node_data.node_type),
|
||||
(NodeTlvs.NAME, node_data.name),
|
||||
(NodeTlvs.IP_ADDRESS, node_data.ip_address),
|
||||
(NodeTlvs.MAC_ADDRESS, node_data.mac_address),
|
||||
(NodeTlvs.IP6_ADDRESS, node_data.ip6_address),
|
||||
(NodeTlvs.MODEL, node_data.model),
|
||||
(NodeTlvs.EMULATION_ID, node_data.emulation_id),
|
||||
(NodeTlvs.EMULATION_SERVER, node_data.emulation_server),
|
||||
(NodeTlvs.SESSION, node_data.session),
|
||||
(NodeTlvs.X_POSITION, node_data.x_position),
|
||||
(NodeTlvs.Y_POSITION, node_data.y_position),
|
||||
(NodeTlvs.CANVAS, node_data.canvas),
|
||||
(NodeTlvs.NETWORK_ID, node_data.network_id),
|
||||
(NodeTlvs.SERVICES, node_data.services),
|
||||
(NodeTlvs.LATITUDE, node_data.latitude),
|
||||
(NodeTlvs.LONGITUDE, node_data.longitude),
|
||||
(NodeTlvs.ALTITUDE, node_data.altitude),
|
||||
(NodeTlvs.ICON, node_data.icon),
|
||||
(NodeTlvs.OPAQUE, node_data.opaque)
|
||||
])
|
||||
return coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data)
|
||||
|
||||
|
||||
def convert_config(config_data):
|
||||
"""
|
||||
Convenience method for converting ConfigData to a packed TLV message.
|
||||
|
||||
:param core.data.ConfigData config_data: config data to convert
|
||||
:return: packed message
|
||||
"""
|
||||
tlv_data = structutils.pack_values(coreapi.CoreConfigTlv, [
|
||||
(ConfigTlvs.NODE, config_data.node),
|
||||
(ConfigTlvs.OBJECT, config_data.object),
|
||||
(ConfigTlvs.TYPE, config_data.type),
|
||||
(ConfigTlvs.DATA_TYPES, config_data.data_types),
|
||||
(ConfigTlvs.VALUES, config_data.data_values),
|
||||
(ConfigTlvs.CAPTIONS, config_data.captions),
|
||||
(ConfigTlvs.BITMAP, config_data.bitmap),
|
||||
(ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values),
|
||||
(ConfigTlvs.GROUPS, config_data.groups),
|
||||
(ConfigTlvs.SESSION, config_data.session),
|
||||
(ConfigTlvs.INTERFACE_NUMBER, config_data.interface_number),
|
||||
(ConfigTlvs.NETWORK_ID, config_data.network_id),
|
||||
(ConfigTlvs.OPAQUE, config_data.opaque),
|
||||
])
|
||||
return coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,219 +0,0 @@
|
|||
import logging
|
||||
from collections.abc import Iterable
|
||||
from queue import Empty, Queue
|
||||
from typing import Optional
|
||||
|
||||
from core.api.grpc import core_pb2, grpcutils
|
||||
from core.api.grpc.grpcutils import convert_link_data
|
||||
from core.emulator.data import (
|
||||
ConfigData,
|
||||
EventData,
|
||||
ExceptionData,
|
||||
FileData,
|
||||
LinkData,
|
||||
NodeData,
|
||||
)
|
||||
from core.emulator.session import Session
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def handle_node_event(session: Session, node_data: NodeData) -> core_pb2.Event:
|
||||
"""
|
||||
Handle node event when there is a node event
|
||||
|
||||
:param session: session node is from
|
||||
:param node_data: node data
|
||||
:return: node event that contains node id, name, model, position, and services
|
||||
"""
|
||||
node = node_data.node
|
||||
emane_configs = grpcutils.get_emane_model_configs_dict(session)
|
||||
node_emane_configs = emane_configs.get(node.id, [])
|
||||
node_proto = grpcutils.get_node_proto(session, node, node_emane_configs)
|
||||
message_type = node_data.message_type.value
|
||||
node_event = core_pb2.NodeEvent(message_type=message_type, node=node_proto)
|
||||
return core_pb2.Event(node_event=node_event, source=node_data.source)
|
||||
|
||||
|
||||
def handle_link_event(link_data: LinkData) -> core_pb2.Event:
|
||||
"""
|
||||
Handle link event when there is a link event
|
||||
|
||||
:param link_data: link data
|
||||
:return: link event that has message type and link information
|
||||
"""
|
||||
link = convert_link_data(link_data)
|
||||
message_type = link_data.message_type.value
|
||||
link_event = core_pb2.LinkEvent(message_type=message_type, link=link)
|
||||
return core_pb2.Event(link_event=link_event, source=link_data.source)
|
||||
|
||||
|
||||
def handle_session_event(event_data: EventData) -> core_pb2.Event:
|
||||
"""
|
||||
Handle session event when there is a session event
|
||||
|
||||
:param event_data: event data
|
||||
:return: session event
|
||||
"""
|
||||
event_time = event_data.time
|
||||
if event_time is not None:
|
||||
event_time = float(event_time)
|
||||
session_event = core_pb2.SessionEvent(
|
||||
node_id=event_data.node,
|
||||
event=event_data.event_type.value,
|
||||
name=event_data.name,
|
||||
data=event_data.data,
|
||||
time=event_time,
|
||||
)
|
||||
return core_pb2.Event(session_event=session_event)
|
||||
|
||||
|
||||
def handle_config_event(config_data: ConfigData) -> core_pb2.Event:
|
||||
"""
|
||||
Handle configuration event when there is configuration event
|
||||
|
||||
:param config_data: configuration data
|
||||
:return: configuration event
|
||||
"""
|
||||
config_event = core_pb2.ConfigEvent(
|
||||
message_type=config_data.message_type,
|
||||
node_id=config_data.node,
|
||||
object=config_data.object,
|
||||
type=config_data.type,
|
||||
captions=config_data.captions,
|
||||
bitmap=config_data.bitmap,
|
||||
data_values=config_data.data_values,
|
||||
possible_values=config_data.possible_values,
|
||||
groups=config_data.groups,
|
||||
iface_id=config_data.iface_id,
|
||||
network_id=config_data.network_id,
|
||||
opaque=config_data.opaque,
|
||||
data_types=config_data.data_types,
|
||||
)
|
||||
return core_pb2.Event(config_event=config_event)
|
||||
|
||||
|
||||
def handle_exception_event(exception_data: ExceptionData) -> core_pb2.Event:
|
||||
"""
|
||||
Handle exception event when there is exception event
|
||||
|
||||
:param exception_data: exception data
|
||||
:return: exception event
|
||||
"""
|
||||
exception_event = core_pb2.ExceptionEvent(
|
||||
node_id=exception_data.node,
|
||||
level=exception_data.level.value,
|
||||
source=exception_data.source,
|
||||
date=exception_data.date,
|
||||
text=exception_data.text,
|
||||
opaque=exception_data.opaque,
|
||||
)
|
||||
return core_pb2.Event(exception_event=exception_event)
|
||||
|
||||
|
||||
def handle_file_event(file_data: FileData) -> core_pb2.Event:
|
||||
"""
|
||||
Handle file event
|
||||
|
||||
:param file_data: file data
|
||||
:return: file event
|
||||
"""
|
||||
file_event = core_pb2.FileEvent(
|
||||
message_type=file_data.message_type.value,
|
||||
node_id=file_data.node,
|
||||
name=file_data.name,
|
||||
mode=file_data.mode,
|
||||
number=file_data.number,
|
||||
type=file_data.type,
|
||||
source=file_data.source,
|
||||
data=file_data.data,
|
||||
compressed_data=file_data.compressed_data,
|
||||
)
|
||||
return core_pb2.Event(file_event=file_event)
|
||||
|
||||
|
||||
class EventStreamer:
|
||||
"""
|
||||
Processes session events to generate grpc events.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, session: Session, event_types: Iterable[core_pb2.EventType]
|
||||
) -> None:
|
||||
"""
|
||||
Create a EventStreamer instance.
|
||||
|
||||
:param session: session to process events for
|
||||
:param event_types: types of events to process
|
||||
"""
|
||||
self.session: Session = session
|
||||
self.event_types: Iterable[core_pb2.EventType] = event_types
|
||||
self.queue: Queue = Queue()
|
||||
self.add_handlers()
|
||||
|
||||
def add_handlers(self) -> None:
|
||||
"""
|
||||
Add a session event handler for desired event types.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if core_pb2.EventType.NODE in self.event_types:
|
||||
self.session.node_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.LINK in self.event_types:
|
||||
self.session.link_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.CONFIG in self.event_types:
|
||||
self.session.config_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.FILE in self.event_types:
|
||||
self.session.file_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.EXCEPTION in self.event_types:
|
||||
self.session.exception_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.SESSION in self.event_types:
|
||||
self.session.event_handlers.append(self.queue.put)
|
||||
|
||||
def process(self) -> Optional[core_pb2.Event]:
|
||||
"""
|
||||
Process the next event in the queue.
|
||||
|
||||
:return: grpc event, or None when invalid event or queue timeout
|
||||
"""
|
||||
event = None
|
||||
try:
|
||||
data = self.queue.get(timeout=1)
|
||||
if isinstance(data, NodeData):
|
||||
event = handle_node_event(self.session, data)
|
||||
elif isinstance(data, LinkData):
|
||||
event = handle_link_event(data)
|
||||
elif isinstance(data, EventData):
|
||||
event = handle_session_event(data)
|
||||
elif isinstance(data, ConfigData):
|
||||
event = handle_config_event(data)
|
||||
elif isinstance(data, ExceptionData):
|
||||
event = handle_exception_event(data)
|
||||
elif isinstance(data, FileData):
|
||||
event = handle_file_event(data)
|
||||
else:
|
||||
logger.error("unknown event: %s", data)
|
||||
except Empty:
|
||||
pass
|
||||
if event:
|
||||
event.session_id = self.session.id
|
||||
return event
|
||||
|
||||
def remove_handlers(self) -> None:
|
||||
"""
|
||||
Remove session event handlers for events being watched.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if core_pb2.EventType.NODE in self.event_types:
|
||||
self.session.node_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.LINK in self.event_types:
|
||||
self.session.link_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.CONFIG in self.event_types:
|
||||
self.session.config_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.FILE in self.event_types:
|
||||
self.session.file_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.EXCEPTION in self.event_types:
|
||||
self.session.exception_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.SESSION in self.event_types:
|
||||
self.session.event_handlers.remove(self.queue.put)
|
||||
|
|
@ -1,908 +0,0 @@
|
|||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
import grpc
|
||||
from grpc import ServicerContext
|
||||
|
||||
from core import utils
|
||||
from core.api.grpc import common_pb2, core_pb2, wrappers
|
||||
from core.api.grpc.configservices_pb2 import ConfigServiceConfig
|
||||
from core.api.grpc.emane_pb2 import NodeEmaneConfig
|
||||
from core.api.grpc.services_pb2 import (
|
||||
NodeServiceConfig,
|
||||
NodeServiceData,
|
||||
ServiceConfig,
|
||||
ServiceDefaults,
|
||||
)
|
||||
from core.config import ConfigurableOptions
|
||||
from core.emane.nodes import EmaneNet, EmaneOptions
|
||||
from core.emulator.data import InterfaceData, LinkData, LinkOptions
|
||||
from core.emulator.enumerations import LinkTypes, NodeTypes
|
||||
from core.emulator.links import CoreLink
|
||||
from core.emulator.session import Session
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility
|
||||
from core.nodes.base import (
|
||||
CoreNode,
|
||||
CoreNodeBase,
|
||||
CoreNodeOptions,
|
||||
NodeBase,
|
||||
NodeOptions,
|
||||
Position,
|
||||
)
|
||||
from core.nodes.docker import DockerNode, DockerOptions
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.nodes.lxd import LxcNode, LxcOptions
|
||||
from core.nodes.network import CoreNetwork, CtrlNet, PtpNet, WlanNode
|
||||
from core.nodes.podman import PodmanNode, PodmanOptions
|
||||
from core.nodes.wireless import WirelessNode
|
||||
from core.services.coreservices import CoreService
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
WORKERS = 10
|
||||
|
||||
|
||||
class CpuUsage:
|
||||
def __init__(self) -> None:
|
||||
self.stat_file: Path = Path("/proc/stat")
|
||||
self.prev_idle: int = 0
|
||||
self.prev_total: int = 0
|
||||
|
||||
def run(self) -> float:
|
||||
lines = self.stat_file.read_text().splitlines()[0]
|
||||
values = [int(x) for x in lines.split()[1:]]
|
||||
idle = sum(values[3:5])
|
||||
non_idle = sum(values[:3] + values[5:8])
|
||||
total = idle + non_idle
|
||||
total_diff = total - self.prev_total
|
||||
idle_diff = idle - self.prev_idle
|
||||
self.prev_idle = idle
|
||||
self.prev_total = total
|
||||
return (total_diff - idle_diff) / total_diff
|
||||
|
||||
|
||||
def add_node_data(
|
||||
_class: type[NodeBase], node_proto: core_pb2.Node
|
||||
) -> tuple[Position, NodeOptions]:
|
||||
"""
|
||||
Convert node protobuf message to data for creating a node.
|
||||
|
||||
:param _class: node class to create options from
|
||||
:param node_proto: node proto message
|
||||
:return: node type, id, and options
|
||||
"""
|
||||
options = _class.create_options()
|
||||
options.icon = node_proto.icon
|
||||
options.canvas = node_proto.canvas
|
||||
if isinstance(options, CoreNodeOptions):
|
||||
options.model = node_proto.model
|
||||
options.services = node_proto.services
|
||||
options.config_services = node_proto.config_services
|
||||
if isinstance(options, EmaneOptions):
|
||||
options.emane_model = node_proto.emane
|
||||
if isinstance(options, (DockerOptions, LxcOptions, PodmanOptions)):
|
||||
options.image = node_proto.image
|
||||
position = Position()
|
||||
position.set(node_proto.position.x, node_proto.position.y)
|
||||
if node_proto.HasField("geo"):
|
||||
geo = node_proto.geo
|
||||
position.set_geo(geo.lon, geo.lat, geo.alt)
|
||||
return position, options
|
||||
|
||||
|
||||
def link_iface(iface_proto: core_pb2.Interface) -> InterfaceData:
|
||||
"""
|
||||
Create interface data from interface proto.
|
||||
|
||||
:param iface_proto: interface proto
|
||||
:return: interface data
|
||||
"""
|
||||
iface_data = None
|
||||
if iface_proto:
|
||||
name = iface_proto.name if iface_proto.name else None
|
||||
mac = iface_proto.mac if iface_proto.mac else None
|
||||
ip4 = iface_proto.ip4 if iface_proto.ip4 else None
|
||||
ip6 = iface_proto.ip6 if iface_proto.ip6 else None
|
||||
iface_data = InterfaceData(
|
||||
id=iface_proto.id,
|
||||
name=name,
|
||||
mac=mac,
|
||||
ip4=ip4,
|
||||
ip4_mask=iface_proto.ip4_mask,
|
||||
ip6=ip6,
|
||||
ip6_mask=iface_proto.ip6_mask,
|
||||
)
|
||||
return iface_data
|
||||
|
||||
|
||||
def add_link_data(
|
||||
link_proto: core_pb2.Link,
|
||||
) -> tuple[InterfaceData, InterfaceData, LinkOptions]:
|
||||
"""
|
||||
Convert link proto to link interfaces and options data.
|
||||
|
||||
:param link_proto: link proto
|
||||
:return: link interfaces and options
|
||||
"""
|
||||
iface1_data = link_iface(link_proto.iface1)
|
||||
iface2_data = link_iface(link_proto.iface2)
|
||||
options = LinkOptions()
|
||||
options_proto = link_proto.options
|
||||
if options_proto:
|
||||
options.delay = options_proto.delay
|
||||
options.bandwidth = options_proto.bandwidth
|
||||
options.loss = options_proto.loss
|
||||
options.dup = options_proto.dup
|
||||
options.jitter = options_proto.jitter
|
||||
options.mer = options_proto.mer
|
||||
options.burst = options_proto.burst
|
||||
options.mburst = options_proto.mburst
|
||||
options.buffer = options_proto.buffer
|
||||
options.unidirectional = options_proto.unidirectional
|
||||
options.key = options_proto.key
|
||||
return iface1_data, iface2_data, options
|
||||
|
||||
|
||||
def create_nodes(
|
||||
session: Session, node_protos: list[core_pb2.Node]
|
||||
) -> tuple[list[NodeBase], list[Exception]]:
|
||||
"""
|
||||
Create nodes using a thread pool and wait for completion.
|
||||
|
||||
:param session: session to create nodes in
|
||||
:param node_protos: node proto messages
|
||||
:return: results and exceptions for created nodes
|
||||
"""
|
||||
funcs = []
|
||||
for node_proto in node_protos:
|
||||
_type = NodeTypes(node_proto.type)
|
||||
_class = session.get_node_class(_type)
|
||||
position, options = add_node_data(_class, node_proto)
|
||||
args = (
|
||||
_class,
|
||||
node_proto.id or None,
|
||||
node_proto.name or None,
|
||||
node_proto.server or None,
|
||||
position,
|
||||
options,
|
||||
)
|
||||
funcs.append((session.add_node, args, {}))
|
||||
start = time.monotonic()
|
||||
results, exceptions = utils.threadpool(funcs)
|
||||
total = time.monotonic() - start
|
||||
logger.debug("grpc created nodes time: %s", total)
|
||||
return results, exceptions
|
||||
|
||||
|
||||
def create_links(
|
||||
session: Session, link_protos: list[core_pb2.Link]
|
||||
) -> tuple[list[NodeBase], list[Exception]]:
|
||||
"""
|
||||
Create links using a thread pool and wait for completion.
|
||||
|
||||
:param session: session to create nodes in
|
||||
:param link_protos: link proto messages
|
||||
:return: results and exceptions for created links
|
||||
"""
|
||||
funcs = []
|
||||
for link_proto in link_protos:
|
||||
node1_id = link_proto.node1_id
|
||||
node2_id = link_proto.node2_id
|
||||
iface1, iface2, options = add_link_data(link_proto)
|
||||
args = (node1_id, node2_id, iface1, iface2, options)
|
||||
funcs.append((session.add_link, args, {}))
|
||||
start = time.monotonic()
|
||||
results, exceptions = utils.threadpool(funcs)
|
||||
total = time.monotonic() - start
|
||||
logger.debug("grpc created links time: %s", total)
|
||||
return results, exceptions
|
||||
|
||||
|
||||
def edit_links(
|
||||
session: Session, link_protos: list[core_pb2.Link]
|
||||
) -> tuple[list[None], list[Exception]]:
|
||||
"""
|
||||
Edit links using a thread pool and wait for completion.
|
||||
|
||||
:param session: session to create nodes in
|
||||
:param link_protos: link proto messages
|
||||
:return: results and exceptions for created links
|
||||
"""
|
||||
funcs = []
|
||||
for link_proto in link_protos:
|
||||
node1_id = link_proto.node1_id
|
||||
node2_id = link_proto.node2_id
|
||||
iface1, iface2, options = add_link_data(link_proto)
|
||||
args = (node1_id, node2_id, iface1.id, iface2.id, options)
|
||||
funcs.append((session.update_link, args, {}))
|
||||
start = time.monotonic()
|
||||
results, exceptions = utils.threadpool(funcs)
|
||||
total = time.monotonic() - start
|
||||
logger.debug("grpc edit links time: %s", total)
|
||||
return results, exceptions
|
||||
|
||||
|
||||
def convert_value(value: Any) -> str:
|
||||
"""
|
||||
Convert value into string.
|
||||
|
||||
:param value: value
|
||||
:return: string conversion of the value
|
||||
"""
|
||||
if value is not None:
|
||||
value = str(value)
|
||||
return value
|
||||
|
||||
|
||||
def convert_session_options(session: Session) -> dict[str, common_pb2.ConfigOption]:
|
||||
config_options = {}
|
||||
for option in session.options.options:
|
||||
value = session.options.get(option.id)
|
||||
config_option = common_pb2.ConfigOption(
|
||||
label=option.label,
|
||||
name=option.id,
|
||||
value=value,
|
||||
type=option.type.value,
|
||||
select=option.options,
|
||||
group="Options",
|
||||
)
|
||||
config_options[option.id] = config_option
|
||||
return config_options
|
||||
|
||||
|
||||
def get_config_options(
|
||||
config: dict[str, str],
|
||||
configurable_options: Union[ConfigurableOptions, type[ConfigurableOptions]],
|
||||
) -> dict[str, common_pb2.ConfigOption]:
|
||||
"""
|
||||
Retrieve configuration options in a form that is used by the grpc server.
|
||||
|
||||
:param config: configuration
|
||||
:param configurable_options: configurable options
|
||||
:return: mapping of configuration ids to configuration options
|
||||
"""
|
||||
results = {}
|
||||
for configuration in configurable_options.configurations():
|
||||
value = config.get(configuration.id, configuration.default)
|
||||
config_option = common_pb2.ConfigOption(
|
||||
label=configuration.label,
|
||||
name=configuration.id,
|
||||
value=value,
|
||||
type=configuration.type.value,
|
||||
select=configuration.options,
|
||||
)
|
||||
results[configuration.id] = config_option
|
||||
for config_group in configurable_options.config_groups():
|
||||
start = config_group.start - 1
|
||||
stop = config_group.stop
|
||||
options = list(results.values())[start:stop]
|
||||
for option in options:
|
||||
option.group = config_group.name
|
||||
return results
|
||||
|
||||
|
||||
def get_node_proto(
|
||||
session: Session, node: NodeBase, emane_configs: list[NodeEmaneConfig]
|
||||
) -> core_pb2.Node:
|
||||
"""
|
||||
Convert CORE node to protobuf representation.
|
||||
|
||||
:param session: session containing node
|
||||
:param node: node to convert
|
||||
:param emane_configs: emane configs related to node
|
||||
:return: node proto
|
||||
"""
|
||||
node_type = session.get_node_type(node.__class__)
|
||||
position = core_pb2.Position(
|
||||
x=node.position.x, y=node.position.y, z=node.position.z
|
||||
)
|
||||
geo = core_pb2.Geo(
|
||||
lat=node.position.lat, lon=node.position.lon, alt=node.position.alt
|
||||
)
|
||||
services = [x.name for x in node.services]
|
||||
node_dir = None
|
||||
config_services = []
|
||||
if isinstance(node, CoreNodeBase):
|
||||
node_dir = str(node.directory)
|
||||
config_services = [x for x in node.config_services]
|
||||
channel = None
|
||||
if isinstance(node, CoreNode):
|
||||
channel = str(node.ctrlchnlname)
|
||||
emane_model = None
|
||||
if isinstance(node, EmaneNet):
|
||||
emane_model = node.wireless_model.name
|
||||
image = None
|
||||
if isinstance(node, (DockerNode, LxcNode, PodmanNode)):
|
||||
image = node.image
|
||||
# check for wlan config
|
||||
wlan_config = session.mobility.get_configs(
|
||||
node.id, config_type=BasicRangeModel.name
|
||||
)
|
||||
if wlan_config:
|
||||
wlan_config = get_config_options(wlan_config, BasicRangeModel)
|
||||
# check for wireless config
|
||||
wireless_config = None
|
||||
if isinstance(node, WirelessNode):
|
||||
configs = node.get_config()
|
||||
wireless_config = {}
|
||||
for config in configs.values():
|
||||
config_option = common_pb2.ConfigOption(
|
||||
label=config.label,
|
||||
name=config.id,
|
||||
value=config.default,
|
||||
type=config.type.value,
|
||||
select=config.options,
|
||||
group=config.group,
|
||||
)
|
||||
wireless_config[config.id] = config_option
|
||||
# check for mobility config
|
||||
mobility_config = session.mobility.get_configs(
|
||||
node.id, config_type=Ns2ScriptedMobility.name
|
||||
)
|
||||
if mobility_config:
|
||||
mobility_config = get_config_options(mobility_config, Ns2ScriptedMobility)
|
||||
# check for service configs
|
||||
custom_services = session.services.custom_services.get(node.id)
|
||||
service_configs = {}
|
||||
if custom_services:
|
||||
for service in custom_services.values():
|
||||
service_proto = get_service_configuration(service)
|
||||
service_configs[service.name] = NodeServiceConfig(
|
||||
node_id=node.id,
|
||||
service=service.name,
|
||||
data=service_proto,
|
||||
files=service.config_data,
|
||||
)
|
||||
# check for config service configs
|
||||
config_service_configs = {}
|
||||
if isinstance(node, CoreNode):
|
||||
for service in node.config_services.values():
|
||||
if not service.custom_templates and not service.custom_config:
|
||||
continue
|
||||
config_service_configs[service.name] = ConfigServiceConfig(
|
||||
node_id=node.id,
|
||||
name=service.name,
|
||||
templates=service.custom_templates,
|
||||
config=service.custom_config,
|
||||
)
|
||||
return core_pb2.Node(
|
||||
id=node.id,
|
||||
name=node.name,
|
||||
emane=emane_model,
|
||||
model=node.model,
|
||||
type=node_type.value,
|
||||
position=position,
|
||||
geo=geo,
|
||||
services=services,
|
||||
icon=node.icon,
|
||||
image=image,
|
||||
config_services=config_services,
|
||||
dir=node_dir,
|
||||
channel=channel,
|
||||
canvas=node.canvas,
|
||||
wlan_config=wlan_config,
|
||||
wireless_config=wireless_config,
|
||||
mobility_config=mobility_config,
|
||||
service_configs=service_configs,
|
||||
config_service_configs=config_service_configs,
|
||||
emane_configs=emane_configs,
|
||||
)
|
||||
|
||||
|
||||
def get_links(session: Session, node: NodeBase) -> list[core_pb2.Link]:
|
||||
"""
|
||||
Retrieve a list of links for grpc to use.
|
||||
|
||||
:param session: session to get links for node
|
||||
:param node: node to get links from
|
||||
:return: protobuf links
|
||||
"""
|
||||
link_protos = []
|
||||
for core_link in session.link_manager.node_links(node):
|
||||
link_protos.extend(convert_core_link(core_link))
|
||||
if isinstance(node, (WlanNode, EmaneNet)):
|
||||
for link_data in node.links():
|
||||
link_protos.append(convert_link_data(link_data))
|
||||
return link_protos
|
||||
|
||||
|
||||
def convert_iface(iface: CoreInterface) -> core_pb2.Interface:
|
||||
"""
|
||||
Convert interface to protobuf.
|
||||
|
||||
:param iface: interface to convert
|
||||
:return: protobuf interface
|
||||
"""
|
||||
if isinstance(iface.node, CoreNetwork):
|
||||
return core_pb2.Interface(id=iface.id)
|
||||
else:
|
||||
ip4 = iface.get_ip4()
|
||||
ip4_mask = ip4.prefixlen if ip4 else None
|
||||
ip4 = str(ip4.ip) if ip4 else None
|
||||
ip6 = iface.get_ip6()
|
||||
ip6_mask = ip6.prefixlen if ip6 else None
|
||||
ip6 = str(ip6.ip) if ip6 else None
|
||||
mac = str(iface.mac) if iface.mac else None
|
||||
return core_pb2.Interface(
|
||||
id=iface.id,
|
||||
name=iface.name,
|
||||
mac=mac,
|
||||
ip4=ip4,
|
||||
ip4_mask=ip4_mask,
|
||||
ip6=ip6,
|
||||
ip6_mask=ip6_mask,
|
||||
)
|
||||
|
||||
|
||||
def convert_core_link(core_link: CoreLink) -> list[core_pb2.Link]:
|
||||
"""
|
||||
Convert core link to protobuf data.
|
||||
|
||||
:param core_link: core link to convert
|
||||
:return: protobuf link data
|
||||
"""
|
||||
links = []
|
||||
node1, iface1 = core_link.node1, core_link.iface1
|
||||
node2, iface2 = core_link.node2, core_link.iface2
|
||||
unidirectional = core_link.is_unidirectional()
|
||||
link = convert_link(node1, iface1, node2, iface2, iface1.options, unidirectional)
|
||||
links.append(link)
|
||||
if unidirectional:
|
||||
link = convert_link(
|
||||
node2, iface2, node1, iface1, iface2.options, unidirectional
|
||||
)
|
||||
links.append(link)
|
||||
return links
|
||||
|
||||
|
||||
def convert_link_data(link_data: LinkData) -> core_pb2.Link:
|
||||
"""
|
||||
Convert link_data into core protobuf link.
|
||||
:param link_data: link to convert
|
||||
:return: core protobuf Link
|
||||
"""
|
||||
iface1 = None
|
||||
if link_data.iface1 is not None:
|
||||
iface1 = convert_iface_data(link_data.iface1)
|
||||
iface2 = None
|
||||
if link_data.iface2 is not None:
|
||||
iface2 = convert_iface_data(link_data.iface2)
|
||||
options = convert_link_options(link_data.options)
|
||||
return core_pb2.Link(
|
||||
type=link_data.type.value,
|
||||
node1_id=link_data.node1_id,
|
||||
node2_id=link_data.node2_id,
|
||||
iface1=iface1,
|
||||
iface2=iface2,
|
||||
options=options,
|
||||
network_id=link_data.network_id,
|
||||
label=link_data.label,
|
||||
color=link_data.color,
|
||||
)
|
||||
|
||||
|
||||
def convert_iface_data(iface_data: InterfaceData) -> core_pb2.Interface:
|
||||
"""
|
||||
Convert interface data to protobuf.
|
||||
|
||||
:param iface_data: interface data to convert
|
||||
:return: interface protobuf
|
||||
"""
|
||||
return core_pb2.Interface(
|
||||
id=iface_data.id,
|
||||
name=iface_data.name,
|
||||
mac=iface_data.mac,
|
||||
ip4=iface_data.ip4,
|
||||
ip4_mask=iface_data.ip4_mask,
|
||||
ip6=iface_data.ip6,
|
||||
ip6_mask=iface_data.ip6_mask,
|
||||
)
|
||||
|
||||
|
||||
def convert_link_options(options: LinkOptions) -> core_pb2.LinkOptions:
|
||||
"""
|
||||
Convert link options to protobuf.
|
||||
|
||||
:param options: link options to convert
|
||||
:return: link options protobuf
|
||||
"""
|
||||
return core_pb2.LinkOptions(
|
||||
jitter=options.jitter,
|
||||
key=options.key,
|
||||
mburst=options.mburst,
|
||||
mer=options.mer,
|
||||
loss=options.loss,
|
||||
bandwidth=options.bandwidth,
|
||||
burst=options.burst,
|
||||
delay=options.delay,
|
||||
dup=options.dup,
|
||||
buffer=options.buffer,
|
||||
unidirectional=options.unidirectional,
|
||||
)
|
||||
|
||||
|
||||
def convert_options_proto(options: core_pb2.LinkOptions) -> LinkOptions:
|
||||
return LinkOptions(
|
||||
delay=options.delay,
|
||||
bandwidth=options.bandwidth,
|
||||
loss=options.loss,
|
||||
dup=options.dup,
|
||||
jitter=options.jitter,
|
||||
mer=options.mer,
|
||||
burst=options.burst,
|
||||
mburst=options.mburst,
|
||||
buffer=options.buffer,
|
||||
unidirectional=options.unidirectional,
|
||||
key=options.key,
|
||||
)
|
||||
|
||||
|
||||
def convert_link(
|
||||
node1: NodeBase,
|
||||
iface1: Optional[CoreInterface],
|
||||
node2: NodeBase,
|
||||
iface2: Optional[CoreInterface],
|
||||
options: LinkOptions,
|
||||
unidirectional: bool,
|
||||
) -> core_pb2.Link:
|
||||
"""
|
||||
Convert link objects to link protobuf.
|
||||
|
||||
:param node1: first node in link
|
||||
:param iface1: node1 interface
|
||||
:param node2: second node in link
|
||||
:param iface2: node2 interface
|
||||
:param options: link options
|
||||
:param unidirectional: if this link is considered unidirectional
|
||||
:return: protobuf link
|
||||
"""
|
||||
if iface1 is not None:
|
||||
iface1 = convert_iface(iface1)
|
||||
if iface2 is not None:
|
||||
iface2 = convert_iface(iface2)
|
||||
is_node1_wireless = isinstance(node1, (WlanNode, EmaneNet))
|
||||
is_node2_wireless = isinstance(node2, (WlanNode, EmaneNet))
|
||||
if not (is_node1_wireless or is_node2_wireless):
|
||||
options = convert_link_options(options)
|
||||
options.unidirectional = unidirectional
|
||||
else:
|
||||
options = None
|
||||
return core_pb2.Link(
|
||||
type=LinkTypes.WIRED.value,
|
||||
node1_id=node1.id,
|
||||
node2_id=node2.id,
|
||||
iface1=iface1,
|
||||
iface2=iface2,
|
||||
options=options,
|
||||
network_id=None,
|
||||
label=None,
|
||||
color=None,
|
||||
)
|
||||
|
||||
|
||||
def parse_proc_net_dev(lines: list[str]) -> dict[str, dict[str, float]]:
|
||||
"""
|
||||
Parse lines of output from /proc/net/dev.
|
||||
|
||||
:param lines: lines of /proc/net/dev
|
||||
:return: parsed device to tx/rx values
|
||||
"""
|
||||
stats = {}
|
||||
for line in lines[2:]:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
line = line.split()
|
||||
line[0] = line[0].strip(":")
|
||||
stats[line[0]] = {"rx": float(line[1]), "tx": float(line[9])}
|
||||
return stats
|
||||
|
||||
|
||||
def get_net_stats() -> dict[str, dict[str, float]]:
|
||||
"""
|
||||
Retrieve status about the current interfaces in the system
|
||||
|
||||
:return: send and receive status of the interfaces in the system
|
||||
"""
|
||||
with open("/proc/net/dev", "r") as f:
|
||||
lines = f.readlines()[2:]
|
||||
return parse_proc_net_dev(lines)
|
||||
|
||||
|
||||
def session_location(session: Session, location: core_pb2.SessionLocation) -> None:
|
||||
"""
|
||||
Set session location based on location proto.
|
||||
|
||||
:param session: session for location
|
||||
:param location: location to set
|
||||
:return: nothing
|
||||
"""
|
||||
session.location.refxyz = (location.x, location.y, location.z)
|
||||
session.location.setrefgeo(location.lat, location.lon, location.alt)
|
||||
session.location.refscale = location.scale
|
||||
|
||||
|
||||
def service_configuration(session: Session, config: ServiceConfig) -> None:
|
||||
"""
|
||||
Convenience method for setting a node service configuration.
|
||||
|
||||
:param session: session for service configuration
|
||||
:param config: service configuration
|
||||
:return:
|
||||
"""
|
||||
session.services.set_service(config.node_id, config.service)
|
||||
service = session.services.get_service(config.node_id, config.service)
|
||||
if config.files:
|
||||
service.configs = tuple(config.files)
|
||||
if config.directories:
|
||||
service.dirs = tuple(config.directories)
|
||||
if config.startup:
|
||||
service.startup = tuple(config.startup)
|
||||
if config.validate:
|
||||
service.validate = tuple(config.validate)
|
||||
if config.shutdown:
|
||||
service.shutdown = tuple(config.shutdown)
|
||||
|
||||
|
||||
def get_service_configuration(service: CoreService) -> NodeServiceData:
|
||||
"""
|
||||
Convenience for converting a service to service data proto.
|
||||
|
||||
:param service: service to get proto data for
|
||||
:return: service proto data
|
||||
"""
|
||||
return NodeServiceData(
|
||||
executables=service.executables,
|
||||
dependencies=service.dependencies,
|
||||
dirs=service.dirs,
|
||||
configs=service.configs,
|
||||
startup=service.startup,
|
||||
validate=service.validate,
|
||||
validation_mode=service.validation_mode.value,
|
||||
validation_timer=service.validation_timer,
|
||||
shutdown=service.shutdown,
|
||||
meta=service.meta,
|
||||
)
|
||||
|
||||
|
||||
def iface_to_proto(session: Session, iface: CoreInterface) -> core_pb2.Interface:
|
||||
"""
|
||||
Convenience for converting a core interface to the protobuf representation.
|
||||
|
||||
:param session: session interface belongs to
|
||||
:param iface: interface to convert
|
||||
:return: interface proto
|
||||
"""
|
||||
ip4_net = iface.get_ip4()
|
||||
ip4 = str(ip4_net.ip) if ip4_net else None
|
||||
ip4_mask = ip4_net.prefixlen if ip4_net else None
|
||||
ip6_net = iface.get_ip6()
|
||||
ip6 = str(ip6_net.ip) if ip6_net else None
|
||||
ip6_mask = ip6_net.prefixlen if ip6_net else None
|
||||
mac = str(iface.mac) if iface.mac else None
|
||||
nem_id = None
|
||||
nem_port = None
|
||||
if isinstance(iface.net, EmaneNet):
|
||||
nem_id = session.emane.get_nem_id(iface)
|
||||
nem_port = session.emane.get_nem_port(iface)
|
||||
return core_pb2.Interface(
|
||||
id=iface.id,
|
||||
name=iface.name,
|
||||
mac=mac,
|
||||
mtu=iface.mtu,
|
||||
flow_id=iface.flow_id,
|
||||
ip4=ip4,
|
||||
ip4_mask=ip4_mask,
|
||||
ip6=ip6,
|
||||
ip6_mask=ip6_mask,
|
||||
nem_id=nem_id,
|
||||
nem_port=nem_port,
|
||||
)
|
||||
|
||||
|
||||
def get_nem_id(
|
||||
session: Session, node: CoreNode, iface_id: int, context: ServicerContext
|
||||
) -> int:
|
||||
"""
|
||||
Get nem id for a given node and interface id.
|
||||
|
||||
:param session: session node belongs to
|
||||
:param node: node to get nem id for
|
||||
:param iface_id: id of interface on node to get nem id for
|
||||
:param context: request context
|
||||
:return: nem id
|
||||
"""
|
||||
iface = node.ifaces.get(iface_id)
|
||||
if not iface:
|
||||
message = f"{node.name} missing interface {iface_id}"
|
||||
context.abort(grpc.StatusCode.NOT_FOUND, message)
|
||||
net = iface.net
|
||||
if not isinstance(net, EmaneNet):
|
||||
message = f"{node.name} interface {iface_id} is not an EMANE network"
|
||||
context.abort(grpc.StatusCode.INVALID_ARGUMENT, message)
|
||||
nem_id = session.emane.get_nem_id(iface)
|
||||
if nem_id is None:
|
||||
message = f"{node.name} interface {iface_id} nem id does not exist"
|
||||
context.abort(grpc.StatusCode.INVALID_ARGUMENT, message)
|
||||
return nem_id
|
||||
|
||||
|
||||
def get_emane_model_configs_dict(session: Session) -> dict[int, list[NodeEmaneConfig]]:
|
||||
"""
|
||||
Get emane model configuration protobuf data.
|
||||
|
||||
:param session: session to get emane model configuration for
|
||||
:return: dict of emane model protobuf configurations
|
||||
"""
|
||||
configs = {}
|
||||
for _id, model_configs in session.emane.node_configs.items():
|
||||
for model_name in model_configs:
|
||||
model_class = session.emane.get_model(model_name)
|
||||
current_config = session.emane.get_config(_id, model_name)
|
||||
config = get_config_options(current_config, model_class)
|
||||
node_id, iface_id = utils.parse_iface_config_id(_id)
|
||||
iface_id = iface_id if iface_id is not None else -1
|
||||
node_config = NodeEmaneConfig(
|
||||
model=model_name, iface_id=iface_id, config=config
|
||||
)
|
||||
node_configs = configs.setdefault(node_id, [])
|
||||
node_configs.append(node_config)
|
||||
return configs
|
||||
|
||||
|
||||
def get_hooks(session: Session) -> list[core_pb2.Hook]:
|
||||
"""
|
||||
Retrieve hook protobuf data for a session.
|
||||
|
||||
:param session: session to get hooks for
|
||||
:return: list of hook protobufs
|
||||
"""
|
||||
hooks = []
|
||||
for state in session.hooks:
|
||||
state_hooks = session.hooks[state]
|
||||
for file_name, file_data in state_hooks:
|
||||
hook = core_pb2.Hook(state=state.value, file=file_name, data=file_data)
|
||||
hooks.append(hook)
|
||||
return hooks
|
||||
|
||||
|
||||
def get_default_services(session: Session) -> list[ServiceDefaults]:
|
||||
"""
|
||||
Retrieve the default service sets for a given session.
|
||||
|
||||
:param session: session to get default service sets for
|
||||
:return: list of default service sets
|
||||
"""
|
||||
default_services = []
|
||||
for model, services in session.services.default_services.items():
|
||||
default_service = ServiceDefaults(model=model, services=services)
|
||||
default_services.append(default_service)
|
||||
return default_services
|
||||
|
||||
|
||||
def get_mobility_node(
|
||||
session: Session, node_id: int, context: ServicerContext
|
||||
) -> Union[WlanNode, EmaneNet]:
|
||||
"""
|
||||
Get mobility node.
|
||||
|
||||
:param session: session to get node from
|
||||
:param node_id: id of node to get
|
||||
:param context: grpc context
|
||||
:return: wlan or emane node
|
||||
"""
|
||||
try:
|
||||
return session.get_node(node_id, WlanNode)
|
||||
except CoreError:
|
||||
try:
|
||||
return session.get_node(node_id, EmaneNet)
|
||||
except CoreError:
|
||||
context.abort(grpc.StatusCode.NOT_FOUND, "node id is not for wlan or emane")
|
||||
|
||||
|
||||
def convert_session(session: Session) -> wrappers.Session:
|
||||
"""
|
||||
Convert session to its wrapped version.
|
||||
|
||||
:param session: session to convert
|
||||
:return: wrapped session data
|
||||
"""
|
||||
emane_configs = get_emane_model_configs_dict(session)
|
||||
nodes = []
|
||||
links = []
|
||||
for _id in session.nodes:
|
||||
node = session.nodes[_id]
|
||||
if not isinstance(node, (PtpNet, CtrlNet)):
|
||||
node_emane_configs = emane_configs.get(node.id, [])
|
||||
node_proto = get_node_proto(session, node, node_emane_configs)
|
||||
nodes.append(node_proto)
|
||||
if isinstance(node, (WlanNode, EmaneNet)):
|
||||
for link_data in node.links():
|
||||
links.append(convert_link_data(link_data))
|
||||
for core_link in session.link_manager.links():
|
||||
links.extend(convert_core_link(core_link))
|
||||
default_services = get_default_services(session)
|
||||
x, y, z = session.location.refxyz
|
||||
lat, lon, alt = session.location.refgeo
|
||||
location = core_pb2.SessionLocation(
|
||||
x=x, y=y, z=z, lat=lat, lon=lon, alt=alt, scale=session.location.refscale
|
||||
)
|
||||
hooks = get_hooks(session)
|
||||
session_file = str(session.file_path) if session.file_path else None
|
||||
options = convert_session_options(session)
|
||||
servers = [
|
||||
core_pb2.Server(name=x.name, host=x.host)
|
||||
for x in session.distributed.servers.values()
|
||||
]
|
||||
return core_pb2.Session(
|
||||
id=session.id,
|
||||
state=session.state.value,
|
||||
nodes=nodes,
|
||||
links=links,
|
||||
dir=str(session.directory),
|
||||
user=session.user,
|
||||
default_services=default_services,
|
||||
location=location,
|
||||
hooks=hooks,
|
||||
metadata=session.metadata,
|
||||
file=session_file,
|
||||
options=options,
|
||||
servers=servers,
|
||||
)
|
||||
|
||||
|
||||
def configure_node(
|
||||
session: Session, node: core_pb2.Node, core_node: NodeBase, context: ServicerContext
|
||||
) -> None:
|
||||
"""
|
||||
Configure a node using all provided protobuf data.
|
||||
|
||||
:param session: session for node
|
||||
:param node: node protobuf data
|
||||
:param core_node: session node
|
||||
:param context: grpc context
|
||||
:return: nothing
|
||||
"""
|
||||
for emane_config in node.emane_configs:
|
||||
_id = utils.iface_config_id(node.id, emane_config.iface_id)
|
||||
config = {k: v.value for k, v in emane_config.config.items()}
|
||||
session.emane.set_config(_id, emane_config.model, config)
|
||||
if node.wlan_config:
|
||||
config = {k: v.value for k, v in node.wlan_config.items()}
|
||||
session.mobility.set_model_config(node.id, BasicRangeModel.name, config)
|
||||
if node.mobility_config:
|
||||
config = {k: v.value for k, v in node.mobility_config.items()}
|
||||
session.mobility.set_model_config(node.id, Ns2ScriptedMobility.name, config)
|
||||
if isinstance(core_node, WirelessNode) and node.wireless_config:
|
||||
config = {k: v.value for k, v in node.wireless_config.items()}
|
||||
core_node.set_config(config)
|
||||
for service_name, service_config in node.service_configs.items():
|
||||
data = service_config.data
|
||||
config = ServiceConfig(
|
||||
node_id=node.id,
|
||||
service=service_name,
|
||||
startup=data.startup,
|
||||
validate=data.validate,
|
||||
shutdown=data.shutdown,
|
||||
files=data.configs,
|
||||
directories=data.dirs,
|
||||
)
|
||||
service_configuration(session, config)
|
||||
for file_name, file_data in service_config.files.items():
|
||||
session.services.set_service_file(
|
||||
node.id, service_name, file_name, file_data
|
||||
)
|
||||
if node.config_service_configs:
|
||||
if not isinstance(core_node, CoreNode):
|
||||
context.abort(
|
||||
grpc.StatusCode.INVALID_ARGUMENT,
|
||||
"invalid node type with config service configs",
|
||||
)
|
||||
for service_name, service_config in node.config_service_configs.items():
|
||||
service = core_node.config_services[service_name]
|
||||
if service_config.config:
|
||||
service.set_config(service_config.config)
|
||||
for name, template in service_config.templates.items():
|
||||
service.set_template(name, template)
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
1108
daemon/core/broker.py
Normal file
1108
daemon/core/broker.py
Normal file
File diff suppressed because it is too large
Load diff
398
daemon/core/conf.py
Normal file
398
daemon/core/conf.py
Normal file
|
|
@ -0,0 +1,398 @@
|
|||
"""
|
||||
Common support for configurable CORE objects.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
|
||||
from core.data import ConfigData
|
||||
|
||||
|
||||
class ConfigShim(object):
|
||||
"""
|
||||
Provides helper methods for converting newer configuration values into TLV compatible formats.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def str_to_dict(cls, key_values):
|
||||
"""
|
||||
Converts a TLV key/value string into an ordered mapping.
|
||||
|
||||
:param str key_values:
|
||||
:return: ordered mapping of key/value pairs
|
||||
:rtype: OrderedDict
|
||||
"""
|
||||
key_values = key_values.split("|")
|
||||
values = OrderedDict()
|
||||
for key_value in key_values:
|
||||
key, value = key_value.split("=", 1)
|
||||
values[key] = value
|
||||
return values
|
||||
|
||||
@classmethod
|
||||
def groups_to_str(cls, config_groups):
|
||||
"""
|
||||
Converts configuration groups to a TLV formatted string.
|
||||
|
||||
:param list[ConfigGroup] config_groups: configuration groups to format
|
||||
:return: TLV configuration group string
|
||||
:rtype: str
|
||||
"""
|
||||
group_strings = []
|
||||
for config_group in config_groups:
|
||||
group_string = "%s:%s-%s" % (config_group.name, config_group.start, config_group.stop)
|
||||
group_strings.append(group_string)
|
||||
return "|".join(group_strings)
|
||||
|
||||
@classmethod
|
||||
def config_data(cls, flags, node_id, type_flags, configurable_options, config):
|
||||
"""
|
||||
Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
|
||||
:param int flags: message flags
|
||||
:param int node_id: node id
|
||||
:param int type_flags: type flags
|
||||
:param ConfigurableOptions configurable_options: options to create config data for
|
||||
:param dict config: configuration values for options
|
||||
:return: configuration data object
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
key_values = None
|
||||
captions = None
|
||||
data_types = []
|
||||
possible_values = []
|
||||
logging.debug("configurable: %s", configurable_options)
|
||||
logging.debug("configuration options: %s", configurable_options.configurations)
|
||||
logging.debug("configuration data: %s", config)
|
||||
for configuration in configurable_options.configurations():
|
||||
if not captions:
|
||||
captions = configuration.label
|
||||
else:
|
||||
captions += "|%s" % configuration.label
|
||||
|
||||
data_types.append(configuration.type.value)
|
||||
|
||||
options = ",".join(configuration.options)
|
||||
possible_values.append(options)
|
||||
|
||||
_id = configuration.id
|
||||
config_value = config.get(_id, configuration.default)
|
||||
key_value = "%s=%s" % (_id, config_value)
|
||||
if not key_values:
|
||||
key_values = key_value
|
||||
else:
|
||||
key_values += "|%s" % key_value
|
||||
|
||||
groups_str = cls.groups_to_str(configurable_options.config_groups())
|
||||
return ConfigData(
|
||||
message_type=flags,
|
||||
node=node_id,
|
||||
object=configurable_options.name,
|
||||
type=type_flags,
|
||||
data_types=tuple(data_types),
|
||||
data_values=key_values,
|
||||
captions=captions,
|
||||
possible_values="|".join(possible_values),
|
||||
bitmap=configurable_options.bitmap,
|
||||
groups=groups_str
|
||||
)
|
||||
|
||||
|
||||
class Configuration(object):
|
||||
"""
|
||||
Represents a configuration options.
|
||||
"""
|
||||
|
||||
def __init__(self, _id, _type, label=None, default="", options=None):
|
||||
"""
|
||||
Creates a Configuration object.
|
||||
|
||||
:param str _id: unique name for configuration
|
||||
:param core.enumerations.ConfigDataTypes _type: configuration data type
|
||||
:param str label: configuration label for display
|
||||
:param str default: default value for configuration
|
||||
:param list options: list options if this is a configuration with a combobox
|
||||
"""
|
||||
self.id = _id
|
||||
self.type = _type
|
||||
self.default = default
|
||||
if not options:
|
||||
options = []
|
||||
self.options = options
|
||||
if not label:
|
||||
label = _id
|
||||
self.label = label
|
||||
|
||||
def __str__(self):
|
||||
return "%s(id=%s, type=%s, default=%s, options=%s)" % (
|
||||
self.__class__.__name__, self.id, self.type, self.default, self.options)
|
||||
|
||||
|
||||
class ConfigurableManager(object):
|
||||
"""
|
||||
Provides convenience methods for storing and retrieving configuration options for nodes.
|
||||
"""
|
||||
_default_node = -1
|
||||
_default_type = _default_node
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a ConfigurableManager object.
|
||||
"""
|
||||
self.node_configurations = {}
|
||||
|
||||
def nodes(self):
|
||||
"""
|
||||
Retrieves the ids of all node configurations known by this manager.
|
||||
|
||||
:return: list of node ids
|
||||
:rtype: list
|
||||
"""
|
||||
return [node_id for node_id in self.node_configurations.iterkeys() if node_id != self._default_node]
|
||||
|
||||
def config_reset(self, node_id=None):
|
||||
"""
|
||||
Clears all configurations or configuration for a specific node.
|
||||
|
||||
:param int node_id: node id to clear configurations for, default is None and clears all configurations
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("resetting all configurations: %s", self.__class__.__name__)
|
||||
if not node_id:
|
||||
self.node_configurations.clear()
|
||||
elif node_id in self.node_configurations:
|
||||
self.node_configurations.pop(node_id)
|
||||
|
||||
def set_config(self, _id, value, node_id=_default_node, config_type=_default_type):
|
||||
"""
|
||||
Set a specific configuration value for a node and configuration type.
|
||||
|
||||
:param str _id: configuration key
|
||||
:param str value: configuration value
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("setting config for node(%s) type(%s): %s=%s", node_id, config_type, _id, value)
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_type_configs = node_configs.setdefault(config_type, OrderedDict())
|
||||
node_type_configs[_id] = value
|
||||
|
||||
def set_configs(self, config, node_id=_default_node, config_type=_default_type):
|
||||
"""
|
||||
Set configurations for a node and configuration type.
|
||||
|
||||
:param dict config: configurations to set
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("setting config for node(%s) type(%s): %s", node_id, config_type, config)
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_configs[config_type] = config
|
||||
|
||||
def get_config(self, _id, node_id=_default_node, config_type=_default_type, default=None):
|
||||
"""
|
||||
Retrieves a specific configuration for a node and configuration type.
|
||||
|
||||
:param str _id: specific configuration to retrieve
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:param default: default value to return when value is not found
|
||||
:return: configuration value
|
||||
:rtype str
|
||||
"""
|
||||
logging.debug("getting config for node(%s) type(%s): %s", node_id, config_type, _id)
|
||||
result = default
|
||||
node_type_configs = self.get_configs(node_id, config_type)
|
||||
if node_type_configs:
|
||||
result = node_type_configs.get(_id, default)
|
||||
return result
|
||||
|
||||
def get_configs(self, node_id=_default_node, config_type=_default_type):
|
||||
"""
|
||||
Retrieve configurations for a node and configuration type.
|
||||
|
||||
:param int node_id: node id to store configuration for
|
||||
:param str config_type: configuration type to store configuration for
|
||||
:return: configurations
|
||||
:rtype: dict
|
||||
"""
|
||||
logging.debug("getting configs for node(%s) type(%s)", node_id, config_type)
|
||||
result = None
|
||||
node_configs = self.node_configurations.get(node_id)
|
||||
if node_configs:
|
||||
result = node_configs.get(config_type)
|
||||
return result
|
||||
|
||||
def get_all_configs(self, node_id=_default_node):
|
||||
"""
|
||||
Retrieve all current configuration types for a node.
|
||||
|
||||
:param int node_id: node id to retrieve configurations for
|
||||
:return: all configuration types for a node
|
||||
:rtype: dict
|
||||
"""
|
||||
logging.debug("getting all configs for node(%s)", node_id)
|
||||
return self.node_configurations.get(node_id)
|
||||
|
||||
|
||||
class ConfigGroup(object):
|
||||
"""
|
||||
Defines configuration group tabs used for display by ConfigurationOptions.
|
||||
"""
|
||||
|
||||
def __init__(self, name, start, stop):
|
||||
"""
|
||||
Creates a ConfigGroup object.
|
||||
|
||||
:param str name: configuration group display name
|
||||
:param int start: configurations start index for this group
|
||||
:param int stop: configurations stop index for this group
|
||||
"""
|
||||
self.name = name
|
||||
self.start = start
|
||||
self.stop = stop
|
||||
|
||||
|
||||
class ConfigurableOptions(object):
|
||||
"""
|
||||
Provides a base for defining configuration options within CORE.
|
||||
"""
|
||||
name = None
|
||||
bitmap = None
|
||||
options = []
|
||||
|
||||
@classmethod
|
||||
def configurations(cls):
|
||||
"""
|
||||
Provides the configurations for this class.
|
||||
|
||||
:return: configurations
|
||||
:rtype: list[Configuration]
|
||||
"""
|
||||
return cls.options
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
"""
|
||||
Defines how configurations are grouped.
|
||||
|
||||
:return: configuration group definition
|
||||
:rtype: list[ConfigGroup]
|
||||
"""
|
||||
return [
|
||||
ConfigGroup("Options", 1, len(cls.configurations()))
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def default_values(cls):
|
||||
"""
|
||||
Provides an ordered mapping of configuration keys to default values.
|
||||
|
||||
:return: ordered configuration mapping default values
|
||||
:rtype: OrderedDict
|
||||
"""
|
||||
return OrderedDict([(config.id, config.default) for config in cls.configurations()])
|
||||
|
||||
|
||||
class ModelManager(ConfigurableManager):
|
||||
"""
|
||||
Helps handle setting models for nodes and managing their model configurations.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a ModelManager object.
|
||||
"""
|
||||
super(ModelManager, self).__init__()
|
||||
self.models = {}
|
||||
self.node_models = {}
|
||||
|
||||
def set_model_config(self, node_id, model_name, config=None):
|
||||
"""
|
||||
Set configuration data for a model.
|
||||
|
||||
:param int node_id: node id to set model configuration for
|
||||
:param str model_name: model to set configuration for
|
||||
:param dict config: configuration data to set for model
|
||||
:return: nothing
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError("%s is an invalid model" % model_name)
|
||||
|
||||
# retrieve default values
|
||||
model_config = self.get_model_config(node_id, model_name)
|
||||
if not config:
|
||||
config = {}
|
||||
for key, value in config.iteritems():
|
||||
model_config[key] = value
|
||||
|
||||
# set as node model for startup
|
||||
self.node_models[node_id] = model_name
|
||||
|
||||
# set configuration
|
||||
self.set_configs(model_config, node_id=node_id, config_type=model_name)
|
||||
|
||||
def get_model_config(self, node_id, model_name):
|
||||
"""
|
||||
Set configuration data for a model.
|
||||
|
||||
:param int node_id: node id to set model configuration for
|
||||
:param str model_name: model to set configuration for
|
||||
:return: current model configuration for node
|
||||
:rtype: dict
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError("%s is an invalid model" % model_name)
|
||||
|
||||
config = self.get_configs(node_id=node_id, config_type=model_name)
|
||||
if not config:
|
||||
# set default values, when not already set
|
||||
config = model_class.default_values()
|
||||
self.set_configs(config, node_id=node_id, config_type=model_name)
|
||||
|
||||
return config
|
||||
|
||||
def set_model(self, node, model_class, config=None):
|
||||
"""
|
||||
Set model and model configuration for node.
|
||||
|
||||
:param node: node to set model for
|
||||
:param model_class: model class to set for node
|
||||
:param dict config: model configuration, None for default configuration
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("setting mobility model(%s) for node(%s): %s", model_class.name, node.objid, config)
|
||||
self.set_model_config(node.objid, model_class.name, config)
|
||||
config = self.get_model_config(node.objid, model_class.name)
|
||||
node.setmodel(model_class, config)
|
||||
|
||||
def get_models(self, node):
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
|
||||
:param node: network node to get models for
|
||||
:return: list of model and values tuples for the network node
|
||||
:rtype: list
|
||||
"""
|
||||
all_configs = self.get_all_configs(node.objid)
|
||||
if not all_configs:
|
||||
all_configs = {}
|
||||
|
||||
models = []
|
||||
for model_name, config in all_configs.iteritems():
|
||||
if model_name == ModelManager._default_node:
|
||||
continue
|
||||
model_class = self.models[model_name]
|
||||
models.append((model_class, config))
|
||||
|
||||
logging.debug("models for node(%s): %s", node.objid, models)
|
||||
return models
|
||||
|
|
@ -1,385 +0,0 @@
|
|||
"""
|
||||
Common support for configurable CORE objects.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union
|
||||
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
from core.errors import CoreConfigError
|
||||
from core.nodes.network import WlanNode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.location.mobility import WirelessModel
|
||||
|
||||
WirelessModelType = type[WirelessModel]
|
||||
|
||||
_BOOL_OPTIONS: set[str] = {"0", "1"}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigGroup:
|
||||
"""
|
||||
Defines configuration group tabs used for display by ConfigurationOptions.
|
||||
"""
|
||||
|
||||
name: str
|
||||
start: int
|
||||
stop: int
|
||||
|
||||
|
||||
@dataclass
|
||||
class Configuration:
|
||||
"""
|
||||
Represents a configuration option.
|
||||
"""
|
||||
|
||||
id: str
|
||||
type: ConfigDataTypes
|
||||
label: str = None
|
||||
default: str = ""
|
||||
options: list[str] = field(default_factory=list)
|
||||
group: str = "Configuration"
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self.label = self.label if self.label else self.id
|
||||
if self.type == ConfigDataTypes.BOOL:
|
||||
if self.default and self.default not in _BOOL_OPTIONS:
|
||||
raise CoreConfigError(
|
||||
f"{self.id} bool value must be one of: {_BOOL_OPTIONS}: "
|
||||
f"{self.default}"
|
||||
)
|
||||
elif self.type == ConfigDataTypes.FLOAT:
|
||||
if self.default:
|
||||
try:
|
||||
float(self.default)
|
||||
except ValueError:
|
||||
raise CoreConfigError(
|
||||
f"{self.id} is not a valid float: {self.default}"
|
||||
)
|
||||
elif self.type != ConfigDataTypes.STRING:
|
||||
if self.default:
|
||||
try:
|
||||
int(self.default)
|
||||
except ValueError:
|
||||
raise CoreConfigError(
|
||||
f"{self.id} is not a valid int: {self.default}"
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigBool(Configuration):
|
||||
"""
|
||||
Represents a boolean configuration option.
|
||||
"""
|
||||
|
||||
type: ConfigDataTypes = ConfigDataTypes.BOOL
|
||||
value: bool = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigFloat(Configuration):
|
||||
"""
|
||||
Represents a float configuration option.
|
||||
"""
|
||||
|
||||
type: ConfigDataTypes = ConfigDataTypes.FLOAT
|
||||
value: float = 0.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigInt(Configuration):
|
||||
"""
|
||||
Represents an integer configuration option.
|
||||
"""
|
||||
|
||||
type: ConfigDataTypes = ConfigDataTypes.INT32
|
||||
value: int = 0
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigString(Configuration):
|
||||
"""
|
||||
Represents a string configuration option.
|
||||
"""
|
||||
|
||||
type: ConfigDataTypes = ConfigDataTypes.STRING
|
||||
value: str = ""
|
||||
|
||||
|
||||
class ConfigurableOptions:
|
||||
"""
|
||||
Provides a base for defining configuration options within CORE.
|
||||
"""
|
||||
|
||||
name: Optional[str] = None
|
||||
options: list[Configuration] = []
|
||||
|
||||
@classmethod
|
||||
def configurations(cls) -> list[Configuration]:
|
||||
"""
|
||||
Provides the configurations for this class.
|
||||
|
||||
:return: configurations
|
||||
"""
|
||||
return cls.options
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls) -> list[ConfigGroup]:
|
||||
"""
|
||||
Defines how configurations are grouped.
|
||||
|
||||
:return: configuration group definition
|
||||
"""
|
||||
return [ConfigGroup("Options", 1, len(cls.configurations()))]
|
||||
|
||||
@classmethod
|
||||
def default_values(cls) -> dict[str, str]:
|
||||
"""
|
||||
Provides an ordered mapping of configuration keys to default values.
|
||||
|
||||
:return: ordered configuration mapping default values
|
||||
"""
|
||||
return OrderedDict(
|
||||
[(config.id, config.default) for config in cls.configurations()]
|
||||
)
|
||||
|
||||
|
||||
class ConfigurableManager:
|
||||
"""
|
||||
Provides convenience methods for storing and retrieving configuration options for
|
||||
nodes.
|
||||
"""
|
||||
|
||||
_default_node: int = -1
|
||||
_default_type: int = _default_node
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Creates a ConfigurableManager object.
|
||||
"""
|
||||
self.node_configurations = {}
|
||||
|
||||
def nodes(self) -> list[int]:
|
||||
"""
|
||||
Retrieves the ids of all node configurations known by this manager.
|
||||
|
||||
:return: list of node ids
|
||||
"""
|
||||
return [x for x in self.node_configurations if x != self._default_node]
|
||||
|
||||
def config_reset(self, node_id: int = None) -> None:
|
||||
"""
|
||||
Clears all configurations or configuration for a specific node.
|
||||
|
||||
:param node_id: node id to clear configurations for, default is None and clears
|
||||
all configurations
|
||||
:return: nothing
|
||||
"""
|
||||
if not node_id:
|
||||
self.node_configurations.clear()
|
||||
elif node_id in self.node_configurations:
|
||||
self.node_configurations.pop(node_id)
|
||||
|
||||
def set_config(
|
||||
self,
|
||||
_id: str,
|
||||
value: str,
|
||||
node_id: int = _default_node,
|
||||
config_type: str = _default_type,
|
||||
) -> None:
|
||||
"""
|
||||
Set a specific configuration value for a node and configuration type.
|
||||
|
||||
:param _id: configuration key
|
||||
:param value: configuration value
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_type_configs = node_configs.setdefault(config_type, OrderedDict())
|
||||
node_type_configs[_id] = value
|
||||
|
||||
def set_configs(
|
||||
self,
|
||||
config: dict[str, str],
|
||||
node_id: int = _default_node,
|
||||
config_type: str = _default_type,
|
||||
) -> None:
|
||||
"""
|
||||
Set configurations for a node and configuration type.
|
||||
|
||||
:param config: configurations to set
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug(
|
||||
"setting config for node(%s) type(%s): %s", node_id, config_type, config
|
||||
)
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_configs[config_type] = config
|
||||
|
||||
def get_config(
|
||||
self,
|
||||
_id: str,
|
||||
node_id: int = _default_node,
|
||||
config_type: str = _default_type,
|
||||
default: str = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieves a specific configuration for a node and configuration type.
|
||||
|
||||
:param _id: specific configuration to retrieve
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:param default: default value to return when value is not found
|
||||
:return: configuration value
|
||||
"""
|
||||
result = default
|
||||
node_type_configs = self.get_configs(node_id, config_type)
|
||||
if node_type_configs:
|
||||
result = node_type_configs.get(_id, default)
|
||||
return result
|
||||
|
||||
def get_configs(
|
||||
self, node_id: int = _default_node, config_type: str = _default_type
|
||||
) -> Optional[dict[str, str]]:
|
||||
"""
|
||||
Retrieve configurations for a node and configuration type.
|
||||
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:return: configurations
|
||||
"""
|
||||
result = None
|
||||
node_configs = self.node_configurations.get(node_id)
|
||||
if node_configs:
|
||||
result = node_configs.get(config_type)
|
||||
return result
|
||||
|
||||
def get_all_configs(self, node_id: int = _default_node) -> dict[str, Any]:
|
||||
"""
|
||||
Retrieve all current configuration types for a node.
|
||||
|
||||
:param node_id: node id to retrieve configurations for
|
||||
:return: all configuration types for a node
|
||||
"""
|
||||
return self.node_configurations.get(node_id)
|
||||
|
||||
|
||||
class ModelManager(ConfigurableManager):
|
||||
"""
|
||||
Helps handle setting models for nodes and managing their model configurations.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Creates a ModelManager object.
|
||||
"""
|
||||
super().__init__()
|
||||
self.models: dict[str, Any] = {}
|
||||
self.node_models: dict[int, str] = {}
|
||||
|
||||
def set_model_config(
|
||||
self, node_id: int, model_name: str, config: dict[str, str] = None
|
||||
) -> None:
|
||||
"""
|
||||
Set configuration data for a model.
|
||||
|
||||
:param node_id: node id to set model configuration for
|
||||
:param model_name: model to set configuration for
|
||||
:param config: configuration data to set for model
|
||||
:return: nothing
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError(f"{model_name} is an invalid model")
|
||||
|
||||
# retrieve default values
|
||||
model_config = self.get_model_config(node_id, model_name)
|
||||
if not config:
|
||||
config = {}
|
||||
for key in config:
|
||||
value = config[key]
|
||||
model_config[key] = value
|
||||
|
||||
# set as node model for startup
|
||||
self.node_models[node_id] = model_name
|
||||
|
||||
# set configuration
|
||||
self.set_configs(model_config, node_id=node_id, config_type=model_name)
|
||||
|
||||
def get_model_config(self, node_id: int, model_name: str) -> dict[str, str]:
|
||||
"""
|
||||
Retrieve configuration data for a model.
|
||||
|
||||
:param node_id: node id to set model configuration for
|
||||
:param model_name: model to set configuration for
|
||||
:return: current model configuration for node
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError(f"{model_name} is an invalid model")
|
||||
|
||||
config = self.get_configs(node_id=node_id, config_type=model_name)
|
||||
if not config:
|
||||
# set default values, when not already set
|
||||
config = model_class.default_values()
|
||||
self.set_configs(config, node_id=node_id, config_type=model_name)
|
||||
|
||||
return config
|
||||
|
||||
def set_model(
|
||||
self,
|
||||
node: Union[WlanNode, EmaneNet],
|
||||
model_class: "WirelessModelType",
|
||||
config: dict[str, str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Set model and model configuration for node.
|
||||
|
||||
:param node: node to set model for
|
||||
:param model_class: model class to set for node
|
||||
:param config: model configuration, None for default configuration
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug(
|
||||
"setting model(%s) for node(%s): %s", model_class.name, node.id, config
|
||||
)
|
||||
self.set_model_config(node.id, model_class.name, config)
|
||||
config = self.get_model_config(node.id, model_class.name)
|
||||
node.setmodel(model_class, config)
|
||||
|
||||
def get_models(
|
||||
self, node: Union[WlanNode, EmaneNet]
|
||||
) -> list[tuple[type, dict[str, str]]]:
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
|
||||
:param node: network node to get models for
|
||||
:return: list of model and values tuples for the network node
|
||||
"""
|
||||
all_configs = self.get_all_configs(node.id)
|
||||
if not all_configs:
|
||||
all_configs = {}
|
||||
|
||||
models = []
|
||||
for model_name in all_configs:
|
||||
config = all_configs[model_name]
|
||||
if model_name == ModelManager._default_node:
|
||||
continue
|
||||
model_class = self.models[model_name]
|
||||
models.append((model_class, config))
|
||||
|
||||
logger.debug("models for node(%s): %s", node.id, models)
|
||||
return models
|
||||
|
|
@ -1,510 +0,0 @@
|
|||
import abc
|
||||
import enum
|
||||
import inspect
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
from mako import exceptions
|
||||
from mako.lookup import TemplateLookup
|
||||
from mako.template import Template
|
||||
|
||||
from core.config import Configuration
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.nodes.base import CoreNode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
TEMPLATES_DIR: str = "templates"
|
||||
|
||||
|
||||
def get_template_path(file_path: Path) -> str:
|
||||
"""
|
||||
Utility to convert a given file path to a valid template path format.
|
||||
|
||||
:param file_path: file path to convert
|
||||
:return: template path
|
||||
"""
|
||||
if file_path.is_absolute():
|
||||
template_path = str(file_path.relative_to("/"))
|
||||
else:
|
||||
template_path = str(file_path)
|
||||
return template_path
|
||||
|
||||
|
||||
class ConfigServiceMode(enum.Enum):
|
||||
BLOCKING = 0
|
||||
NON_BLOCKING = 1
|
||||
TIMER = 2
|
||||
|
||||
|
||||
class ConfigServiceBootError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConfigServiceTemplateError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ShadowDir:
|
||||
path: str
|
||||
src: Optional[str] = None
|
||||
templates: bool = False
|
||||
has_node_paths: bool = False
|
||||
|
||||
|
||||
class ConfigService(abc.ABC):
|
||||
"""
|
||||
Base class for creating configurable services.
|
||||
"""
|
||||
|
||||
# validation period in seconds, how frequent validation is attempted
|
||||
validation_period: float = 0.5
|
||||
|
||||
# time to wait in seconds for determining if service started successfully
|
||||
validation_timer: int = 5
|
||||
|
||||
# directories to shadow and copy files from
|
||||
shadow_directories: list[ShadowDir] = []
|
||||
|
||||
def __init__(self, node: CoreNode) -> None:
|
||||
"""
|
||||
Create ConfigService instance.
|
||||
|
||||
:param node: node this service is assigned to
|
||||
"""
|
||||
self.node: CoreNode = node
|
||||
class_file = inspect.getfile(self.__class__)
|
||||
templates_path = Path(class_file).parent.joinpath(TEMPLATES_DIR)
|
||||
self.templates: TemplateLookup = TemplateLookup(directories=templates_path)
|
||||
self.config: dict[str, Configuration] = {}
|
||||
self.custom_templates: dict[str, str] = {}
|
||||
self.custom_config: dict[str, str] = {}
|
||||
configs = self.default_configs[:]
|
||||
self._define_config(configs)
|
||||
|
||||
@staticmethod
|
||||
def clean_text(text: str) -> str:
|
||||
"""
|
||||
Returns space stripped text for string literals, while keeping space
|
||||
indentations.
|
||||
|
||||
:param text: text to clean
|
||||
:return: cleaned text
|
||||
"""
|
||||
return inspect.cleandoc(text)
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def name(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def group(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def directories(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def files(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def default_configs(self) -> list[Configuration]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def modes(self) -> dict[str, dict[str, str]]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def executables(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def dependencies(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def startup(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def validate(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def shutdown(self) -> list[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def validation_mode(self) -> ConfigServiceMode:
|
||||
raise NotImplementedError
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
Creates services files/directories, runs startup, and validates based on
|
||||
validation mode.
|
||||
|
||||
:return: nothing
|
||||
:raises ConfigServiceBootError: when there is an error starting service
|
||||
"""
|
||||
logger.info("node(%s) service(%s) starting...", self.node.name, self.name)
|
||||
self.create_shadow_dirs()
|
||||
self.create_dirs()
|
||||
self.create_files()
|
||||
wait = self.validation_mode == ConfigServiceMode.BLOCKING
|
||||
self.run_startup(wait)
|
||||
if not wait:
|
||||
if self.validation_mode == ConfigServiceMode.TIMER:
|
||||
self.wait_validation()
|
||||
else:
|
||||
self.run_validation()
|
||||
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
Stop service using shutdown commands.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
for cmd in self.shutdown:
|
||||
try:
|
||||
self.node.cmd(cmd)
|
||||
except CoreCommandError:
|
||||
logger.exception(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"failed shutdown: {cmd}"
|
||||
)
|
||||
|
||||
def restart(self) -> None:
|
||||
"""
|
||||
Restarts service by running stop and then start.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.stop()
|
||||
self.start()
|
||||
|
||||
def create_shadow_dirs(self) -> None:
|
||||
"""
|
||||
Creates a shadow of a host system directory recursively
|
||||
to be mapped and live within a node.
|
||||
|
||||
:return: nothing
|
||||
:raises CoreError: when there is a failure creating a directory or file
|
||||
"""
|
||||
for shadow_dir in self.shadow_directories:
|
||||
# setup shadow and src paths, using node unique paths when configured
|
||||
shadow_path = Path(shadow_dir.path)
|
||||
if shadow_dir.src is None:
|
||||
src_path = shadow_path
|
||||
else:
|
||||
src_path = Path(shadow_dir.src)
|
||||
if shadow_dir.has_node_paths:
|
||||
src_path = src_path / self.node.name
|
||||
# validate shadow and src paths
|
||||
if not shadow_path.is_absolute():
|
||||
raise CoreError(f"shadow dir({shadow_path}) is not absolute")
|
||||
if not src_path.is_absolute():
|
||||
raise CoreError(f"shadow source dir({src_path}) is not absolute")
|
||||
if not src_path.is_dir():
|
||||
raise CoreError(f"shadow source dir({src_path}) does not exist")
|
||||
# create root of the shadow path within node
|
||||
logger.info(
|
||||
"node(%s) creating shadow directory(%s) src(%s) node paths(%s) "
|
||||
"templates(%s)",
|
||||
self.node.name,
|
||||
shadow_path,
|
||||
src_path,
|
||||
shadow_dir.has_node_paths,
|
||||
shadow_dir.templates,
|
||||
)
|
||||
self.node.create_dir(shadow_path)
|
||||
# find all directories and files to create
|
||||
dir_paths = []
|
||||
file_paths = []
|
||||
for path in src_path.rglob("*"):
|
||||
shadow_src_path = shadow_path / path.relative_to(src_path)
|
||||
if path.is_dir():
|
||||
dir_paths.append(shadow_src_path)
|
||||
else:
|
||||
file_paths.append((path, shadow_src_path))
|
||||
# create all directories within node
|
||||
for path in dir_paths:
|
||||
self.node.create_dir(path)
|
||||
# create all files within node, from templates when configured
|
||||
data = self.data()
|
||||
templates = TemplateLookup(directories=src_path)
|
||||
for path, dst_path in file_paths:
|
||||
if shadow_dir.templates:
|
||||
template = templates.get_template(path.name)
|
||||
rendered = self._render(template, data)
|
||||
self.node.create_file(dst_path, rendered)
|
||||
else:
|
||||
self.node.copy_file(path, dst_path)
|
||||
|
||||
def create_dirs(self) -> None:
|
||||
"""
|
||||
Creates directories for service.
|
||||
|
||||
:return: nothing
|
||||
:raises CoreError: when there is a failure creating a directory
|
||||
"""
|
||||
logger.debug("creating config service directories")
|
||||
for directory in sorted(self.directories):
|
||||
dir_path = Path(directory)
|
||||
try:
|
||||
self.node.create_dir(dir_path)
|
||||
except (CoreCommandError, CoreError):
|
||||
raise CoreError(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"failure to create service directory: {directory}"
|
||||
)
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
"""
|
||||
Returns key/value data, used when rendering file templates.
|
||||
|
||||
:return: key/value template data
|
||||
"""
|
||||
return {}
|
||||
|
||||
def set_template(self, name: str, template: str) -> None:
|
||||
"""
|
||||
Store custom template to render for a given file.
|
||||
|
||||
:param name: file to store custom template for
|
||||
:param template: custom template to render
|
||||
:return: nothing
|
||||
"""
|
||||
self.custom_templates[name] = template
|
||||
|
||||
def get_text_template(self, name: str) -> str:
|
||||
"""
|
||||
Retrieves text based template for files that do not have a file based template.
|
||||
|
||||
:param name: name of file to get template for
|
||||
:return: template to render
|
||||
"""
|
||||
raise CoreError(f"service({self.name}) unknown template({name})")
|
||||
|
||||
def get_templates(self) -> dict[str, str]:
|
||||
"""
|
||||
Retrieves mapping of file names to templates for all cases, which
|
||||
includes custom templates, file templates, and text templates.
|
||||
|
||||
:return: mapping of files to templates
|
||||
"""
|
||||
templates = {}
|
||||
for file in self.files:
|
||||
file_path = Path(file)
|
||||
template_path = get_template_path(file_path)
|
||||
if file in self.custom_templates:
|
||||
template = self.custom_templates[file]
|
||||
template = self.clean_text(template)
|
||||
elif self.templates.has_template(template_path):
|
||||
template = self.templates.get_template(template_path).source
|
||||
else:
|
||||
try:
|
||||
template = self.get_text_template(file)
|
||||
except Exception as e:
|
||||
raise ConfigServiceTemplateError(
|
||||
f"node({self.node.name}) service({self.name}) file({file}) "
|
||||
f"failure getting template: {e}"
|
||||
)
|
||||
template = self.clean_text(template)
|
||||
templates[file] = template
|
||||
return templates
|
||||
|
||||
def get_rendered_templates(self) -> dict[str, str]:
|
||||
templates = {}
|
||||
data = self.data()
|
||||
for file in sorted(self.files):
|
||||
rendered = self._get_rendered_template(file, data)
|
||||
templates[file] = rendered
|
||||
return templates
|
||||
|
||||
def _get_rendered_template(self, file: str, data: dict[str, Any]) -> str:
|
||||
file_path = Path(file)
|
||||
template_path = get_template_path(file_path)
|
||||
if file in self.custom_templates:
|
||||
text = self.custom_templates[file]
|
||||
rendered = self.render_text(text, data)
|
||||
elif self.templates.has_template(template_path):
|
||||
rendered = self.render_template(template_path, data)
|
||||
else:
|
||||
try:
|
||||
text = self.get_text_template(file)
|
||||
except Exception as e:
|
||||
raise ConfigServiceTemplateError(
|
||||
f"node({self.node.name}) service({self.name}) file({file}) "
|
||||
f"failure getting template: {e}"
|
||||
)
|
||||
rendered = self.render_text(text, data)
|
||||
return rendered
|
||||
|
||||
def create_files(self) -> None:
|
||||
"""
|
||||
Creates service files inside associated node.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
data = self.data()
|
||||
for file in sorted(self.files):
|
||||
logger.debug(
|
||||
"node(%s) service(%s) template(%s)", self.node.name, self.name, file
|
||||
)
|
||||
rendered = self._get_rendered_template(file, data)
|
||||
file_path = Path(file)
|
||||
self.node.create_file(file_path, rendered)
|
||||
|
||||
def run_startup(self, wait: bool) -> None:
|
||||
"""
|
||||
Run startup commands for service on node.
|
||||
|
||||
:param wait: wait successful command exit status when True, ignore status
|
||||
otherwise
|
||||
:return: nothing
|
||||
:raises ConfigServiceBootError: when a command that waits fails
|
||||
"""
|
||||
for cmd in self.startup:
|
||||
try:
|
||||
self.node.cmd(cmd, wait=wait)
|
||||
except CoreCommandError as e:
|
||||
raise ConfigServiceBootError(
|
||||
f"node({self.node.name}) service({self.name}) failed startup: {e}"
|
||||
)
|
||||
|
||||
def wait_validation(self) -> None:
|
||||
"""
|
||||
Waits for a period of time to consider service started successfully.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
time.sleep(self.validation_timer)
|
||||
|
||||
def run_validation(self) -> None:
|
||||
"""
|
||||
Runs validation commands for service on node.
|
||||
|
||||
:return: nothing
|
||||
:raises ConfigServiceBootError: if there is a validation failure
|
||||
"""
|
||||
start = time.monotonic()
|
||||
cmds = self.validate[:]
|
||||
index = 0
|
||||
while cmds:
|
||||
cmd = cmds[index]
|
||||
try:
|
||||
self.node.cmd(cmd)
|
||||
del cmds[index]
|
||||
index += 1
|
||||
except CoreCommandError:
|
||||
logger.debug(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"validate command failed: {cmd}"
|
||||
)
|
||||
time.sleep(self.validation_period)
|
||||
|
||||
if cmds and time.monotonic() - start > self.validation_timer:
|
||||
raise ConfigServiceBootError(
|
||||
f"node({self.node.name}) service({self.name}) failed to validate"
|
||||
)
|
||||
|
||||
def _render(self, template: Template, data: dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Renders template providing all associated data to template.
|
||||
|
||||
:param template: template to render
|
||||
:param data: service specific defined data for template
|
||||
:return: rendered template
|
||||
"""
|
||||
if data is None:
|
||||
data = {}
|
||||
return template.render_unicode(
|
||||
node=self.node, config=self.render_config(), **data
|
||||
)
|
||||
|
||||
def render_text(self, text: str, data: dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Renders text based template providing all associated data to template.
|
||||
|
||||
:param text: text to render
|
||||
:param data: service specific defined data for template
|
||||
:return: rendered template
|
||||
"""
|
||||
text = self.clean_text(text)
|
||||
try:
|
||||
template = Template(text)
|
||||
return self._render(template, data)
|
||||
except Exception:
|
||||
raise CoreError(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"{exceptions.text_error_template().render_unicode()}"
|
||||
)
|
||||
|
||||
def render_template(self, template_path: str, data: dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Renders file based template providing all associated data to template.
|
||||
|
||||
:param template_path: path of file to render
|
||||
:param data: service specific defined data for template
|
||||
:return: rendered template
|
||||
"""
|
||||
try:
|
||||
template = self.templates.get_template(template_path)
|
||||
return self._render(template, data)
|
||||
except Exception:
|
||||
raise CoreError(
|
||||
f"node({self.node.name}) service({self.name}) file({template_path})"
|
||||
f"{exceptions.text_error_template().render_unicode()}"
|
||||
)
|
||||
|
||||
def _define_config(self, configs: list[Configuration]) -> None:
|
||||
"""
|
||||
Initializes default configuration data.
|
||||
|
||||
:param configs: configs to initialize
|
||||
:return: nothing
|
||||
"""
|
||||
for config in configs:
|
||||
self.config[config.id] = config
|
||||
|
||||
def render_config(self) -> dict[str, str]:
|
||||
"""
|
||||
Returns configuration data key/value pairs for rendering a template.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if self.custom_config:
|
||||
return self.custom_config
|
||||
else:
|
||||
return {k: v.default for k, v in self.config.items()}
|
||||
|
||||
def set_config(self, data: dict[str, str]) -> None:
|
||||
"""
|
||||
Set configuration data from key/value pairs.
|
||||
|
||||
:param data: configuration key/values to set
|
||||
:return: nothing
|
||||
:raise CoreError: when an unknown configuration value is given
|
||||
"""
|
||||
for key, value in data.items():
|
||||
if key not in self.config:
|
||||
raise CoreError(f"unknown config: {key}")
|
||||
self.custom_config[key] = value
|
||||
|
|
@ -1,125 +0,0 @@
|
|||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.configservice.base import ConfigService
|
||||
|
||||
|
||||
class ConfigServiceDependencies:
|
||||
"""
|
||||
Generates sets of services to start in order of their dependencies.
|
||||
"""
|
||||
|
||||
def __init__(self, services: dict[str, "ConfigService"]) -> None:
|
||||
"""
|
||||
Create a ConfigServiceDependencies instance.
|
||||
|
||||
:param services: services for determining dependency sets
|
||||
"""
|
||||
# helpers to check validity
|
||||
self.dependents: dict[str, set[str]] = {}
|
||||
self.started: set[str] = set()
|
||||
self.node_services: dict[str, "ConfigService"] = {}
|
||||
for service in services.values():
|
||||
self.node_services[service.name] = service
|
||||
for dependency in service.dependencies:
|
||||
dependents = self.dependents.setdefault(dependency, set())
|
||||
dependents.add(service.name)
|
||||
|
||||
# used to find paths
|
||||
self.path: list["ConfigService"] = []
|
||||
self.visited: set[str] = set()
|
||||
self.visiting: set[str] = set()
|
||||
|
||||
def startup_paths(self) -> list[list["ConfigService"]]:
|
||||
"""
|
||||
Find startup path sets based on service dependencies.
|
||||
|
||||
:return: lists of lists of services that can be started in parallel
|
||||
"""
|
||||
paths = []
|
||||
for name in self.node_services:
|
||||
service = self.node_services[name]
|
||||
if service.name in self.started:
|
||||
logger.debug(
|
||||
"skipping service that will already be started: %s", service.name
|
||||
)
|
||||
continue
|
||||
|
||||
path = self._start(service)
|
||||
if path:
|
||||
paths.append(path)
|
||||
|
||||
if self.started != set(self.node_services):
|
||||
raise ValueError(
|
||||
f"failure to start all services: {self.started} != "
|
||||
f"{self.node_services.keys()}"
|
||||
)
|
||||
|
||||
return paths
|
||||
|
||||
def _reset(self) -> None:
|
||||
"""
|
||||
Clear out metadata used for finding service dependency sets.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.path = []
|
||||
self.visited.clear()
|
||||
self.visiting.clear()
|
||||
|
||||
def _start(self, service: "ConfigService") -> list["ConfigService"]:
|
||||
"""
|
||||
Starts a oath for checking dependencies for a given service.
|
||||
|
||||
:param service: service to check dependencies for
|
||||
:return: list of config services to start in order
|
||||
"""
|
||||
logger.debug("starting service dependency check: %s", service.name)
|
||||
self._reset()
|
||||
return self._visit(service)
|
||||
|
||||
def _visit(self, current_service: "ConfigService") -> list["ConfigService"]:
|
||||
"""
|
||||
Visits a service when discovering dependency chains for service.
|
||||
|
||||
:param current_service: service being visited
|
||||
:return: list of dependent services for a visited service
|
||||
"""
|
||||
logger.debug("visiting service(%s): %s", current_service.name, self.path)
|
||||
self.visited.add(current_service.name)
|
||||
self.visiting.add(current_service.name)
|
||||
|
||||
# dive down
|
||||
for service_name in current_service.dependencies:
|
||||
if service_name not in self.node_services:
|
||||
raise ValueError(
|
||||
"required dependency was not included in node "
|
||||
f"services: {service_name}"
|
||||
)
|
||||
|
||||
if service_name in self.visiting:
|
||||
raise ValueError(
|
||||
f"cyclic dependency at service({current_service.name}): "
|
||||
f"{service_name}"
|
||||
)
|
||||
|
||||
if service_name not in self.visited:
|
||||
service = self.node_services[service_name]
|
||||
self._visit(service)
|
||||
|
||||
# add service when bottom is found
|
||||
logger.debug("adding service to startup path: %s", current_service.name)
|
||||
self.started.add(current_service.name)
|
||||
self.path.append(current_service)
|
||||
self.visiting.remove(current_service.name)
|
||||
|
||||
# rise back up
|
||||
for service_name in self.dependents.get(current_service.name, []):
|
||||
if service_name not in self.visited:
|
||||
service = self.node_services[service_name]
|
||||
self._visit(service)
|
||||
|
||||
return self.path
|
||||
|
|
@ -1,103 +0,0 @@
|
|||
import logging
|
||||
import pathlib
|
||||
import pkgutil
|
||||
from pathlib import Path
|
||||
|
||||
from core import configservices, utils
|
||||
from core.configservice.base import ConfigService
|
||||
from core.errors import CoreError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ConfigServiceManager:
|
||||
"""
|
||||
Manager for configurable services.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a ConfigServiceManager instance.
|
||||
"""
|
||||
self.services: dict[str, type[ConfigService]] = {}
|
||||
|
||||
def get_service(self, name: str) -> type[ConfigService]:
|
||||
"""
|
||||
Retrieve a service by name.
|
||||
|
||||
:param name: name of service
|
||||
:return: service class
|
||||
:raises CoreError: when service is not found
|
||||
"""
|
||||
service_class = self.services.get(name)
|
||||
if service_class is None:
|
||||
raise CoreError(f"service does not exist {name}")
|
||||
return service_class
|
||||
|
||||
def add(self, service: type[ConfigService]) -> None:
|
||||
"""
|
||||
Add service to manager, checking service requirements have been met.
|
||||
|
||||
:param service: service to add to manager
|
||||
:return: nothing
|
||||
:raises CoreError: when service is a duplicate or has unmet executables
|
||||
"""
|
||||
name = service.name
|
||||
logger.debug(
|
||||
"loading service: class(%s) name(%s)", service.__class__.__name__, name
|
||||
)
|
||||
|
||||
# avoid duplicate services
|
||||
if name in self.services:
|
||||
raise CoreError(f"duplicate service being added: {name}")
|
||||
|
||||
# validate dependent executables are present
|
||||
for executable in service.executables:
|
||||
try:
|
||||
utils.which(executable, required=True)
|
||||
except CoreError as e:
|
||||
raise CoreError(f"config service({service.name}): {e}")
|
||||
|
||||
# make service available
|
||||
self.services[name] = service
|
||||
|
||||
def load_locals(self) -> list[str]:
|
||||
"""
|
||||
Search and add config service from local core module.
|
||||
|
||||
:return: list of errors when loading services
|
||||
"""
|
||||
errors = []
|
||||
for module_info in pkgutil.walk_packages(
|
||||
configservices.__path__, f"{configservices.__name__}."
|
||||
):
|
||||
services = utils.load_module(module_info.name, ConfigService)
|
||||
for service in services:
|
||||
try:
|
||||
self.add(service)
|
||||
except CoreError as e:
|
||||
errors.append(service.name)
|
||||
logger.debug("not loading config service(%s): %s", service.name, e)
|
||||
return errors
|
||||
|
||||
def load(self, path: Path) -> list[str]:
|
||||
"""
|
||||
Search path provided for config services and add them for being managed.
|
||||
|
||||
:param path: path to search configurable services
|
||||
:return: list errors when loading services
|
||||
"""
|
||||
path = pathlib.Path(path)
|
||||
subdirs = [x for x in path.iterdir() if x.is_dir()]
|
||||
subdirs.append(path)
|
||||
service_errors = []
|
||||
for subdir in subdirs:
|
||||
logger.debug("loading config services from: %s", subdir)
|
||||
services = utils.load_classes(subdir, ConfigService)
|
||||
for service in services:
|
||||
try:
|
||||
self.add(service)
|
||||
except CoreError as e:
|
||||
service_errors.append(service.name)
|
||||
logger.debug("not loading service(%s): %s", service.name, e)
|
||||
return service_errors
|
||||
|
|
@ -1,420 +0,0 @@
|
|||
import abc
|
||||
from typing import Any
|
||||
|
||||
from core.config import Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.nodes.base import CoreNodeBase, NodeBase
|
||||
from core.nodes.interface import DEFAULT_MTU, CoreInterface
|
||||
from core.nodes.network import PtpNet, WlanNode
|
||||
from core.nodes.physical import Rj45Node
|
||||
from core.nodes.wireless import WirelessNode
|
||||
|
||||
GROUP: str = "FRR"
|
||||
FRR_STATE_DIR: str = "/var/run/frr"
|
||||
|
||||
|
||||
def is_wireless(node: NodeBase) -> bool:
|
||||
"""
|
||||
Check if the node is a wireless type node.
|
||||
|
||||
:param node: node to check type for
|
||||
:return: True if wireless type, False otherwise
|
||||
"""
|
||||
return isinstance(node, (WlanNode, EmaneNet, WirelessNode))
|
||||
|
||||
|
||||
def has_mtu_mismatch(iface: CoreInterface) -> bool:
|
||||
"""
|
||||
Helper to detect MTU mismatch and add the appropriate FRR
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
"""
|
||||
if iface.mtu != DEFAULT_MTU:
|
||||
return True
|
||||
if not iface.net:
|
||||
return False
|
||||
for iface in iface.net.get_ifaces():
|
||||
if iface.mtu != iface.mtu:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_min_mtu(iface: CoreInterface) -> int:
|
||||
"""
|
||||
Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
"""
|
||||
mtu = iface.mtu
|
||||
if not iface.net:
|
||||
return mtu
|
||||
for iface in iface.net.get_ifaces():
|
||||
if iface.mtu < mtu:
|
||||
mtu = iface.mtu
|
||||
return mtu
|
||||
|
||||
|
||||
def get_router_id(node: CoreNodeBase) -> str:
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for iface in node.get_ifaces(control=False):
|
||||
ip4 = iface.get_ip4()
|
||||
if ip4:
|
||||
return str(ip4.ip)
|
||||
return "0.0.0.0"
|
||||
|
||||
|
||||
def rj45_check(iface: CoreInterface) -> bool:
|
||||
"""
|
||||
Helper to detect whether interface is connected an external RJ45
|
||||
link.
|
||||
"""
|
||||
if iface.net:
|
||||
for peer_iface in iface.net.get_ifaces():
|
||||
if peer_iface == iface:
|
||||
continue
|
||||
if isinstance(peer_iface.node, Rj45Node):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class FRRZebra(ConfigService):
|
||||
name: str = "FRRzebra"
|
||||
group: str = GROUP
|
||||
directories: list[str] = ["/usr/local/etc/frr", "/var/run/frr", "/var/log/frr"]
|
||||
files: list[str] = [
|
||||
"/usr/local/etc/frr/frr.conf",
|
||||
"frrboot.sh",
|
||||
"/usr/local/etc/frr/vtysh.conf",
|
||||
"/usr/local/etc/frr/daemons",
|
||||
]
|
||||
executables: list[str] = ["zebra"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash frrboot.sh zebra"]
|
||||
validate: list[str] = ["pidof zebra"]
|
||||
shutdown: list[str] = ["killall zebra"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
frr_conf = self.files[0]
|
||||
frr_bin_search = self.node.session.options.get(
|
||||
"frr_bin_search", default="/usr/local/bin /usr/bin /usr/lib/frr"
|
||||
).strip('"')
|
||||
frr_sbin_search = self.node.session.options.get(
|
||||
"frr_sbin_search",
|
||||
default="/usr/local/sbin /usr/sbin /usr/lib/frr /usr/libexec/frr",
|
||||
).strip('"')
|
||||
|
||||
services = []
|
||||
want_ip4 = False
|
||||
want_ip6 = False
|
||||
for service in self.node.config_services.values():
|
||||
if self.name not in service.dependencies:
|
||||
continue
|
||||
if not isinstance(service, FrrService):
|
||||
continue
|
||||
if service.ipv4_routing:
|
||||
want_ip4 = True
|
||||
if service.ipv6_routing:
|
||||
want_ip6 = True
|
||||
services.append(service)
|
||||
|
||||
ifaces = []
|
||||
for iface in self.node.get_ifaces():
|
||||
ip4s = []
|
||||
ip6s = []
|
||||
for ip4 in iface.ip4s:
|
||||
ip4s.append(str(ip4.ip))
|
||||
for ip6 in iface.ip6s:
|
||||
ip6s.append(str(ip6.ip))
|
||||
ifaces.append((iface, ip4s, ip6s, iface.control))
|
||||
|
||||
return dict(
|
||||
frr_conf=frr_conf,
|
||||
frr_sbin_search=frr_sbin_search,
|
||||
frr_bin_search=frr_bin_search,
|
||||
frr_state_dir=FRR_STATE_DIR,
|
||||
ifaces=ifaces,
|
||||
want_ip4=want_ip4,
|
||||
want_ip6=want_ip6,
|
||||
services=services,
|
||||
)
|
||||
|
||||
|
||||
class FrrService(abc.ABC):
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = []
|
||||
executables: list[str] = []
|
||||
dependencies: list[str] = ["FRRzebra"]
|
||||
startup: list[str] = []
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
ipv4_routing: bool = False
|
||||
ipv6_routing: bool = False
|
||||
|
||||
@abc.abstractmethod
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def frr_config(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class FRROspfv2(FrrService, ConfigService):
|
||||
"""
|
||||
The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified frr.conf file.
|
||||
"""
|
||||
|
||||
name: str = "FRROSPFv2"
|
||||
shutdown: list[str] = ["killall ospfd"]
|
||||
validate: list[str] = ["pidof ospfd"]
|
||||
ipv4_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
addresses = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
for ip4 in iface.ip4s:
|
||||
addresses.append(str(ip4))
|
||||
data = dict(router_id=router_id, addresses=addresses)
|
||||
text = """
|
||||
router ospf
|
||||
router-id ${router_id}
|
||||
% for addr in addresses:
|
||||
network ${addr} area 0
|
||||
% endfor
|
||||
ospf opaque-lsa
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
has_mtu = has_mtu_mismatch(iface)
|
||||
has_rj45 = rj45_check(iface)
|
||||
is_ptp = isinstance(iface.net, PtpNet)
|
||||
data = dict(has_mtu=has_mtu, is_ptp=is_ptp, has_rj45=has_rj45)
|
||||
text = """
|
||||
% if has_mtu:
|
||||
ip ospf mtu-ignore
|
||||
% endif
|
||||
% if has_rj45:
|
||||
<% return STOP_RENDERING %>
|
||||
% endif
|
||||
% if is_ptp:
|
||||
ip ospf network point-to-point
|
||||
% endif
|
||||
ip ospf hello-interval 2
|
||||
ip ospf dead-interval 6
|
||||
ip ospf retransmit-interval 5
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
|
||||
class FRROspfv3(FrrService, ConfigService):
|
||||
"""
|
||||
The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified frr.conf file.
|
||||
"""
|
||||
|
||||
name: str = "FRROSPFv3"
|
||||
shutdown: list[str] = ["killall ospf6d"]
|
||||
validate: list[str] = ["pidof ospf6d"]
|
||||
ipv4_routing: bool = True
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
data = dict(router_id=router_id, ifnames=ifnames)
|
||||
text = """
|
||||
router ospf6
|
||||
router-id ${router_id}
|
||||
% for ifname in ifnames:
|
||||
interface ${ifname} area 0.0.0.0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
mtu = get_min_mtu(iface)
|
||||
if mtu < iface.mtu:
|
||||
return f"ipv6 ospf6 ifmtu {mtu}"
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRBgp(FrrService, ConfigService):
|
||||
"""
|
||||
The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
"""
|
||||
|
||||
name: str = "FRRBGP"
|
||||
shutdown: list[str] = ["killall bgpd"]
|
||||
validate: list[str] = ["pidof bgpd"]
|
||||
custom_needed: bool = True
|
||||
ipv4_routing: bool = True
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
text = f"""
|
||||
! BGP configuration
|
||||
! You should configure the AS number below
|
||||
! along with this router's peers.
|
||||
router bgp {self.node.id}
|
||||
bgp router-id {router_id}
|
||||
redistribute connected
|
||||
!neighbor 1.2.3.4 remote-as 555
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRRip(FrrService, ConfigService):
|
||||
"""
|
||||
The RIP service provides IPv4 routing for wired networks.
|
||||
"""
|
||||
|
||||
name: str = "FRRRIP"
|
||||
shutdown: list[str] = ["killall ripd"]
|
||||
validate: list[str] = ["pidof ripd"]
|
||||
ipv4_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
text = """
|
||||
router rip
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf
|
||||
network 0.0.0.0/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRRipng(FrrService, ConfigService):
|
||||
"""
|
||||
The RIP NG service provides IPv6 routing for wired networks.
|
||||
"""
|
||||
|
||||
name: str = "FRRRIPNG"
|
||||
shutdown: list[str] = ["killall ripngd"]
|
||||
validate: list[str] = ["pidof ripngd"]
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
text = """
|
||||
router ripng
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf6
|
||||
network ::/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRBabel(FrrService, ConfigService):
|
||||
"""
|
||||
The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
"""
|
||||
|
||||
name: str = "FRRBabel"
|
||||
shutdown: list[str] = ["killall babeld"]
|
||||
validate: list[str] = ["pidof babeld"]
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
text = """
|
||||
router babel
|
||||
% for ifname in ifnames:
|
||||
network ${ifname}
|
||||
% endfor
|
||||
redistribute static
|
||||
redistribute ipv4 connected
|
||||
!
|
||||
"""
|
||||
data = dict(ifnames=ifnames)
|
||||
return self.render_text(text, data)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
if is_wireless(iface.net):
|
||||
text = """
|
||||
babel wireless
|
||||
no babel split-horizon
|
||||
"""
|
||||
else:
|
||||
text = """
|
||||
babel wired
|
||||
babel split-horizon
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
|
||||
class FRRpimd(FrrService, ConfigService):
|
||||
"""
|
||||
PIM multicast routing based on XORP.
|
||||
"""
|
||||
|
||||
name: str = "FRRpimd"
|
||||
shutdown: list[str] = ["killall pimd"]
|
||||
validate: list[str] = ["pidof pimd"]
|
||||
ipv4_routing: bool = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
ifname = "eth0"
|
||||
for iface in self.node.get_ifaces():
|
||||
if iface.name != "lo":
|
||||
ifname = iface.name
|
||||
break
|
||||
|
||||
text = f"""
|
||||
router mfea
|
||||
!
|
||||
router igmp
|
||||
!
|
||||
router pim
|
||||
!ip pim rp-address 10.0.0.1
|
||||
ip pim bsr-candidate {ifname}
|
||||
ip pim rp-candidate {ifname}
|
||||
!ip pim spt-threshold interval 10 bytes 80000
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_iface_config(self, iface: CoreInterface) -> str:
|
||||
text = """
|
||||
ip mfea
|
||||
ip igmp
|
||||
ip pim
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
|
@ -1,109 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by zebra service (frr.py)
|
||||
FRR_CONF="${frr_conf}"
|
||||
FRR_SBIN_SEARCH="${frr_sbin_search}"
|
||||
FRR_BIN_SEARCH="${frr_bin_search}"
|
||||
FRR_STATE_DIR="${frr_state_dir}"
|
||||
|
||||
searchforprog()
|
||||
{
|
||||
prog=$1
|
||||
searchpath=$@
|
||||
ret=
|
||||
for p in $searchpath; do
|
||||
if [ -x $p/$prog ]; then
|
||||
ret=$p
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo $ret
|
||||
}
|
||||
|
||||
confcheck()
|
||||
{
|
||||
CONF_DIR=`dirname $FRR_CONF`
|
||||
# if /etc/frr exists, point /etc/frr/frr.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/frr.conf ]; then
|
||||
ln -s $CONF_DIR/frr.conf /etc/frr/frr.conf
|
||||
fi
|
||||
# if /etc/frr exists, point /etc/frr/vtysh.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/vtysh.conf ]; then
|
||||
ln -s $CONF_DIR/vtysh.conf /etc/frr/vtysh.conf
|
||||
fi
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
{
|
||||
FRR_SBIN_DIR=$(searchforprog $1 $FRR_SBIN_SEARCH)
|
||||
if [ "z$FRR_SBIN_DIR" = "z" ]; then
|
||||
echo "ERROR: FRR's '$1' daemon not found in search path:"
|
||||
echo " $FRR_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flags=""
|
||||
|
||||
if [ "$1" = "pimd" ] && \\
|
||||
grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $FRR_CONF; then
|
||||
flags="$flags -6"
|
||||
fi
|
||||
|
||||
if [ "$1" = "ospfd" ]; then
|
||||
flags="$flags --apiserver"
|
||||
fi
|
||||
|
||||
#force FRR to use CORE generated conf file
|
||||
flags="$flags -d -f $FRR_CONF"
|
||||
$FRR_SBIN_DIR/$1 $flags
|
||||
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "ERROR: FRR's '$1' daemon failed to start!:"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
bootfrr()
|
||||
{
|
||||
FRR_BIN_DIR=$(searchforprog 'vtysh' $FRR_BIN_SEARCH)
|
||||
if [ "z$FRR_BIN_DIR" = "z" ]; then
|
||||
echo "ERROR: FRR's 'vtysh' program not found in search path:"
|
||||
echo " $FRR_BIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# fix /var/run/frr permissions
|
||||
id -u frr 2>/dev/null >/dev/null
|
||||
if [ "$?" = "0" ]; then
|
||||
chown frr $FRR_STATE_DIR
|
||||
fi
|
||||
|
||||
bootdaemon "zebra"
|
||||
if grep -q "^ip route " $FRR_CONF; then
|
||||
bootdaemon "staticd"
|
||||
fi
|
||||
for r in rip ripng ospf6 ospf bgp babel; do
|
||||
if grep -q "^router \\<$${}{r}\\>" $FRR_CONF; then
|
||||
bootdaemon "$${}{r}d"
|
||||
fi
|
||||
done
|
||||
|
||||
if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $FRR_CONF; then
|
||||
bootdaemon "pimd"
|
||||
fi
|
||||
|
||||
$FRR_BIN_DIR/vtysh -b
|
||||
}
|
||||
|
||||
if [ "$1" != "zebra" ]; then
|
||||
echo "WARNING: '$1': all FRR daemons are launched by the 'zebra' service!"
|
||||
exit 1
|
||||
fi
|
||||
confcheck
|
||||
bootfrr
|
||||
|
||||
# reset interfaces
|
||||
% for iface, _, _ , _ in ifaces:
|
||||
ip link set dev ${iface.name} down
|
||||
sleep 1
|
||||
ip link set dev ${iface.name} up
|
||||
% endfor
|
||||
|
|
@ -1,60 +0,0 @@
|
|||
#
|
||||
# When activation a daemon at the first time, a config file, even if it is
|
||||
# empty, has to be present *and* be owned by the user and group "frr", else
|
||||
# the daemon will not be started by /etc/init.d/frr. The permissions should
|
||||
# be u=rw,g=r,o=.
|
||||
# When using "vtysh" such a config file is also needed. It should be owned by
|
||||
# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too.
|
||||
#
|
||||
# The watchfrr and zebra daemons are always started.
|
||||
#
|
||||
bgpd=yes
|
||||
ospfd=yes
|
||||
ospf6d=yes
|
||||
ripd=yes
|
||||
ripngd=yes
|
||||
isisd=yes
|
||||
pimd=yes
|
||||
ldpd=yes
|
||||
nhrpd=yes
|
||||
eigrpd=yes
|
||||
babeld=yes
|
||||
sharpd=yes
|
||||
staticd=yes
|
||||
pbrd=yes
|
||||
bfdd=yes
|
||||
fabricd=yes
|
||||
|
||||
#
|
||||
# If this option is set the /etc/init.d/frr script automatically loads
|
||||
# the config via "vtysh -b" when the servers are started.
|
||||
# Check /etc/pam.d/frr if you intend to use "vtysh"!
|
||||
#
|
||||
vtysh_enable=yes
|
||||
zebra_options=" -A 127.0.0.1 -s 90000000"
|
||||
bgpd_options=" -A 127.0.0.1"
|
||||
ospfd_options=" -A 127.0.0.1"
|
||||
ospf6d_options=" -A ::1"
|
||||
ripd_options=" -A 127.0.0.1"
|
||||
ripngd_options=" -A ::1"
|
||||
isisd_options=" -A 127.0.0.1"
|
||||
pimd_options=" -A 127.0.0.1"
|
||||
ldpd_options=" -A 127.0.0.1"
|
||||
nhrpd_options=" -A 127.0.0.1"
|
||||
eigrpd_options=" -A 127.0.0.1"
|
||||
babeld_options=" -A 127.0.0.1"
|
||||
sharpd_options=" -A 127.0.0.1"
|
||||
pbrd_options=" -A 127.0.0.1"
|
||||
staticd_options="-A 127.0.0.1"
|
||||
bfdd_options=" -A 127.0.0.1"
|
||||
fabricd_options="-A 127.0.0.1"
|
||||
|
||||
# The list of daemons to watch is automatically generated by the init script.
|
||||
#watchfrr_options=""
|
||||
|
||||
# for debugging purposes, you can specify a "wrap" command to start instead
|
||||
# of starting the daemon directly, e.g. to use valgrind on ospfd:
|
||||
# ospfd_wrap="/usr/bin/valgrind"
|
||||
# or you can use "all_wrap" for all daemons, e.g. to use perf record:
|
||||
# all_wrap="/usr/bin/perf record --call-graph -"
|
||||
# the normal daemon command is added to this at the end.
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
% for iface, ip4s, ip6s, is_control in ifaces:
|
||||
interface ${iface.name}
|
||||
% if want_ip4:
|
||||
% for addr in ip4s:
|
||||
ip address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if want_ip6:
|
||||
% for addr in ip6s:
|
||||
ipv6 address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if not is_control:
|
||||
% for service in services:
|
||||
% for line in service.frr_iface_config(iface).split("\n"):
|
||||
${line}
|
||||
% endfor
|
||||
% endfor
|
||||
% endif
|
||||
!
|
||||
% endfor
|
||||
|
||||
% for service in services:
|
||||
${service.frr_config()}
|
||||
% endfor
|
||||
|
|
@ -1 +0,0 @@
|
|||
service integrated-vtysh-config
|
||||
|
|
@ -1,164 +0,0 @@
|
|||
from typing import Any
|
||||
|
||||
from core import utils
|
||||
from core.config import Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
|
||||
GROUP: str = "ProtoSvc"
|
||||
|
||||
|
||||
class MgenSinkService(ConfigService):
|
||||
name: str = "MGEN_Sink"
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["mgensink.sh", "sink.mgen"]
|
||||
executables: list[str] = ["mgen"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash mgensink.sh"]
|
||||
validate: list[str] = ["pidof mgen"]
|
||||
shutdown: list[str] = ["killall mgen"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces():
|
||||
name = utils.sysctl_devname(iface.name)
|
||||
ifnames.append(name)
|
||||
return dict(ifnames=ifnames)
|
||||
|
||||
|
||||
class NrlNhdp(ConfigService):
|
||||
name: str = "NHDP"
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["nrlnhdp.sh"]
|
||||
executables: list[str] = ["nrlnhdp"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash nrlnhdp.sh"]
|
||||
validate: list[str] = ["pidof nrlnhdp"]
|
||||
shutdown: list[str] = ["killall nrlnhdp"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
return dict(has_smf=has_smf, ifnames=ifnames)
|
||||
|
||||
|
||||
class NrlSmf(ConfigService):
|
||||
name: str = "SMF"
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["startsmf.sh"]
|
||||
executables: list[str] = ["nrlsmf", "killall"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash startsmf.sh"]
|
||||
validate: list[str] = ["pidof nrlsmf"]
|
||||
shutdown: list[str] = ["killall nrlsmf"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
has_nhdp = "NHDP" in self.node.config_services
|
||||
has_olsr = "OLSR" in self.node.config_services
|
||||
ifnames = []
|
||||
ip4_prefix = None
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
ip4 = iface.get_ip4()
|
||||
if ip4:
|
||||
ip4_prefix = f"{ip4.ip}/{24}"
|
||||
break
|
||||
return dict(
|
||||
has_nhdp=has_nhdp, has_olsr=has_olsr, ifnames=ifnames, ip4_prefix=ip4_prefix
|
||||
)
|
||||
|
||||
|
||||
class NrlOlsr(ConfigService):
|
||||
name: str = "OLSR"
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["nrlolsrd.sh"]
|
||||
executables: list[str] = ["nrlolsrd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash nrlolsrd.sh"]
|
||||
validate: list[str] = ["pidof nrlolsrd"]
|
||||
shutdown: list[str] = ["killall nrlolsrd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
has_zebra = "zebra" in self.node.config_services
|
||||
ifname = None
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifname = iface.name
|
||||
break
|
||||
return dict(has_smf=has_smf, has_zebra=has_zebra, ifname=ifname)
|
||||
|
||||
|
||||
class NrlOlsrv2(ConfigService):
|
||||
name: str = "OLSRv2"
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["nrlolsrv2.sh"]
|
||||
executables: list[str] = ["nrlolsrv2"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash nrlolsrv2.sh"]
|
||||
validate: list[str] = ["pidof nrlolsrv2"]
|
||||
shutdown: list[str] = ["killall nrlolsrv2"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
return dict(has_smf=has_smf, ifnames=ifnames)
|
||||
|
||||
|
||||
class OlsrOrg(ConfigService):
|
||||
name: str = "OLSRORG"
|
||||
group: str = GROUP
|
||||
directories: list[str] = ["/etc/olsrd"]
|
||||
files: list[str] = ["olsrd.sh", "/etc/olsrd/olsrd.conf"]
|
||||
executables: list[str] = ["olsrd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash olsrd.sh"]
|
||||
validate: list[str] = ["pidof olsrd"]
|
||||
shutdown: list[str] = ["killall olsrd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
return dict(has_smf=has_smf, ifnames=ifnames)
|
||||
|
||||
|
||||
class MgenActor(ConfigService):
|
||||
name: str = "MgenActor"
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["start_mgen_actor.sh"]
|
||||
executables: list[str] = ["mgen"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash start_mgen_actor.sh"]
|
||||
validate: list[str] = ["pidof mgen"]
|
||||
shutdown: list[str] = ["killall mgen"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
|
@ -1,312 +0,0 @@
|
|||
#
|
||||
# OLSR.org routing daemon config file
|
||||
# This file contains the usual options for an ETX based
|
||||
# stationary network without fisheye
|
||||
# (for other options see olsrd.conf.default.full)
|
||||
#
|
||||
# Lines starting with a # are discarded
|
||||
#
|
||||
|
||||
#### ATTENTION for IPv6 users ####
|
||||
# Because of limitations in the parser IPv6 addresses must NOT
|
||||
# begin with a ":", so please add a "0" as a prefix.
|
||||
|
||||
###########################
|
||||
### Basic configuration ###
|
||||
###########################
|
||||
# keep this settings at the beginning of your first configuration file
|
||||
|
||||
# Debug level (0-9)
|
||||
# If set to 0 the daemon runs in the background, unless "NoFork" is set to true
|
||||
# (Default is 1)
|
||||
|
||||
# DebugLevel 1
|
||||
|
||||
# IP version to use (4 or 6)
|
||||
# (Default is 4)
|
||||
|
||||
# IpVersion 4
|
||||
|
||||
#################################
|
||||
### OLSRd agent configuration ###
|
||||
#################################
|
||||
# this parameters control the settings of the routing agent which are not
|
||||
# related to the OLSR protocol and it's extensions
|
||||
|
||||
# FIBMetric controls the metric value of the host-routes OLSRd sets.
|
||||
# - "flat" means that the metric value is always 2. This is the preferred value
|
||||
# because it helps the linux kernel routing to clean up older routes
|
||||
# - "correct" use the hopcount as the metric value.
|
||||
# - "approx" use the hopcount as the metric value too, but does only update the
|
||||
# hopcount if the nexthop changes too
|
||||
# (Default is "flat")
|
||||
|
||||
# FIBMetric "flat"
|
||||
|
||||
#######################################
|
||||
### Linux specific OLSRd extensions ###
|
||||
#######################################
|
||||
# these parameters are only working on linux at the moment
|
||||
|
||||
# SrcIpRoutes tells OLSRd to set the Src flag of host routes to the originator-ip
|
||||
# of the node. In addition to this an additional localhost device is created
|
||||
# to make sure the returning traffic can be received.
|
||||
# (Default is "no")
|
||||
|
||||
# SrcIpRoutes no
|
||||
|
||||
# Specify the proto tag to be used for routes olsr inserts into kernel
|
||||
# currently only implemented for linux
|
||||
# valid values under linux are 1 .. 254
|
||||
# 1 gets remapped by olsrd to 0 UNSPECIFIED (1 is reserved for ICMP redirects)
|
||||
# 2 KERNEL routes (not very wise to use)
|
||||
# 3 BOOT (should in fact not be used by routing daemons)
|
||||
# 4 STATIC
|
||||
# 8 .. 15 various routing daemons (gated, zebra, bird, & co)
|
||||
# (defaults to 0 which gets replaced by an OS-specific default value
|
||||
# under linux 3 (BOOT) (for backward compatibility)
|
||||
|
||||
# RtProto 0
|
||||
|
||||
# Activates (in IPv6 mode) the automatic use of NIIT
|
||||
# (see README-Olsr-Extensions)
|
||||
# (default is "yes")
|
||||
|
||||
# UseNiit yes
|
||||
|
||||
# Activates the smartgateway ipip tunnel feature.
|
||||
# See README-Olsr-Extensions for a description of smartgateways.
|
||||
# (default is "no")
|
||||
|
||||
# SmartGateway no
|
||||
|
||||
# Signals that the server tunnel must always be removed on shutdown,
|
||||
# irrespective of the interface up/down state during startup.
|
||||
# (default is "no")
|
||||
|
||||
# SmartGatewayAlwaysRemoveServerTunnel no
|
||||
|
||||
# Determines the maximum number of gateways that can be in use at any given
|
||||
# time. This setting is used to mitigate the effects of breaking connections
|
||||
# (due to the selection of a new gateway) on a dynamic network.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayUseCount 1
|
||||
|
||||
# Determines the take-down percentage for a non-current smart gateway tunnel.
|
||||
# If the cost of the current smart gateway tunnel is less than this percentage
|
||||
# of the cost of the non-current smart gateway tunnel, then the non-current smart
|
||||
# gateway tunnel is taken down because it is then presumed to be 'too expensive'.
|
||||
# This setting is only relevant when SmartGatewayUseCount is larger than 1;
|
||||
# a value of 0 will result in the tunnels not being taken down proactively.
|
||||
# (default is 0)
|
||||
|
||||
# SmartGatewayTakeDownPercentage 0
|
||||
|
||||
# Determines the policy routing script that is executed during startup and
|
||||
# shutdown of olsrd. The script is only executed when SmartGatewayUseCount
|
||||
# is set to a value larger than 1. The script must setup policy routing
|
||||
# rules such that multi-gateway mode works. A sample script is included.
|
||||
# (default is not set)
|
||||
|
||||
# SmartGatewayPolicyRoutingScript ""
|
||||
|
||||
# Determines the egress interfaces that are part of the multi-gateway setup and
|
||||
# therefore only relevant when SmartGatewayUseCount is larger than 1 (in which
|
||||
# case it must be explicitly set).
|
||||
# (default is not set)
|
||||
|
||||
# SmartGatewayEgressInterfaces ""
|
||||
|
||||
# Determines the routing tables offset for multi-gateway policy routing tables
|
||||
# See the policy routing script for an explanation.
|
||||
# (default is 90)
|
||||
|
||||
# SmartGatewayTablesOffset 90
|
||||
|
||||
# Determines the policy routing rules offset for multi-gateway policy routing
|
||||
# rules. See the policy routing script for an explanation.
|
||||
# (default is 0, which indicates that the rules and tables should be aligned and
|
||||
# puts this value at SmartGatewayTablesOffset - # egress interfaces -
|
||||
# # olsr interfaces)
|
||||
|
||||
# SmartGatewayRulesOffset 87
|
||||
|
||||
# Allows the selection of a smartgateway with NAT (only for IPv4)
|
||||
# (default is "yes")
|
||||
|
||||
# SmartGatewayAllowNAT yes
|
||||
|
||||
# Determines the period (in milliseconds) on which a new smart gateway
|
||||
# selection is performed.
|
||||
# (default is 10000 milliseconds)
|
||||
|
||||
# SmartGatewayPeriod 10000
|
||||
|
||||
# Determines the number of times the link state database must be stable
|
||||
# before a new smart gateway is selected.
|
||||
# (default is 6)
|
||||
|
||||
# SmartGatewayStableCount 6
|
||||
|
||||
# When another gateway than the current one has a cost of less than the cost
|
||||
# of the current gateway multiplied by SmartGatewayThreshold then the smart
|
||||
# gateway is switched to the other gateway. The unit is percentage.
|
||||
# (defaults to 0)
|
||||
|
||||
# SmartGatewayThreshold 0
|
||||
|
||||
# The weighing factor for the gateway uplink bandwidth (exit link, uplink).
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayWeightExitLinkUp 1
|
||||
|
||||
# The weighing factor for the gateway downlink bandwidth (exit link, downlink).
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayWeightExitLinkDown 1
|
||||
|
||||
# The weighing factor for the ETX costs.
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayWeightEtx 1
|
||||
|
||||
# The divider for the ETX costs.
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 0)
|
||||
|
||||
# SmartGatewayDividerEtx 0
|
||||
|
||||
# Defines what kind of Uplink this node will publish as a
|
||||
# smartgateway. The existence of the uplink is detected by
|
||||
# a route to 0.0.0.0/0, ::ffff:0:0/96 and/or 2000::/3.
|
||||
# possible values are "none", "ipv4", "ipv6", "both"
|
||||
# (default is "both")
|
||||
|
||||
# SmartGatewayUplink "both"
|
||||
|
||||
# Specifies if the local ipv4 uplink use NAT
|
||||
# (default is "yes")
|
||||
|
||||
# SmartGatewayUplinkNAT yes
|
||||
|
||||
# Specifies the speed of the uplink in kilobit/s.
|
||||
# First parameter is upstream, second parameter is downstream
|
||||
# (default is 128/1024)
|
||||
|
||||
# SmartGatewaySpeed 128 1024
|
||||
|
||||
# Specifies the EXTERNAL ipv6 prefix of the uplink. A prefix
|
||||
# length of more than 64 is not allowed.
|
||||
# (default is 0::/0
|
||||
|
||||
# SmartGatewayPrefix 0::/0
|
||||
|
||||
##############################
|
||||
### OLSR protocol settings ###
|
||||
##############################
|
||||
|
||||
# HNA (Host network association) allows the OLSR to announce
|
||||
# additional IPs or IP subnets to the net that are reachable
|
||||
# through this node.
|
||||
# Syntax for HNA4 is "network-address network-mask"
|
||||
# Syntax for HNA6 is "network-address prefix-length"
|
||||
# (default is no HNA)
|
||||
Hna4
|
||||
{
|
||||
# Internet gateway
|
||||
# 0.0.0.0 0.0.0.0
|
||||
# specific small networks reachable through this node
|
||||
# 15.15.0.0 255.255.255.0
|
||||
}
|
||||
Hna6
|
||||
{
|
||||
# Internet gateway
|
||||
# 0:: 0
|
||||
# specific small networks reachable through this node
|
||||
# fec0:2200:106:0:0:0:0:0 48
|
||||
}
|
||||
|
||||
################################
|
||||
### OLSR protocol extensions ###
|
||||
################################
|
||||
|
||||
# Link quality algorithm (only for lq level 2)
|
||||
# (see README-Olsr-Extensions)
|
||||
# - "etx_float", a floating point ETX with exponential aging
|
||||
# - "etx_fpm", same as ext_float, but with integer arithmetic
|
||||
# - "etx_ff" (ETX freifunk), an etx variant which use all OLSR
|
||||
# traffic (instead of only hellos) for ETX calculation
|
||||
# - "etx_ffeth", an incompatible variant of etx_ff that allows
|
||||
# ethernet links with ETX 0.1.
|
||||
# (defaults to "etx_ff")
|
||||
|
||||
# LinkQualityAlgorithm "etx_ff"
|
||||
|
||||
# Fisheye mechanism for TCs (0 meansoff, 1 means on)
|
||||
# (default is 1)
|
||||
|
||||
LinkQualityFishEye 0
|
||||
|
||||
#####################################
|
||||
### Example plugin configurations ###
|
||||
#####################################
|
||||
# Olsrd plugins to load
|
||||
# This must be the absolute path to the file
|
||||
# or the loader will use the following scheme:
|
||||
# - Try the paths in the LD_LIBRARY_PATH
|
||||
# environment variable.
|
||||
# - The list of libraries cached in /etc/ld.so.cache
|
||||
# - /lib, followed by /usr/lib
|
||||
#
|
||||
# the examples in this list are for linux, so check if the plugin is
|
||||
# available if you use windows.
|
||||
# each plugin should have a README file in it's lib subfolder
|
||||
|
||||
# LoadPlugin "olsrd_txtinfo.dll"
|
||||
#LoadPlugin "olsrd_txtinfo.so.0.1"
|
||||
#{
|
||||
# the default port is 2006 but you can change it like this:
|
||||
#PlParam "port" "8080"
|
||||
|
||||
# You can set a "accept" single address to allow to connect to
|
||||
# txtinfo. If no address is specified, then localhost (127.0.0.1)
|
||||
# is allowed by default. txtinfo will only use the first "accept"
|
||||
# parameter specified and will ignore the rest.
|
||||
|
||||
# to allow a specific host:
|
||||
#PlParam "accept" "172.29.44.23"
|
||||
# if you set it to 0.0.0.0, it will accept all connections
|
||||
#PlParam "accept" "0.0.0.0"
|
||||
#}
|
||||
|
||||
#############################################
|
||||
### OLSRD default interface configuration ###
|
||||
#############################################
|
||||
# the default interface section can have the same values as the following
|
||||
# interface configuration. It will allow you so set common options for all
|
||||
# interfaces.
|
||||
|
||||
InterfaceDefaults {
|
||||
Ip4Broadcast 255.255.255.255
|
||||
}
|
||||
|
||||
######################################
|
||||
### OLSRd Interfaces configuration ###
|
||||
######################################
|
||||
# multiple interfaces can be specified for a single configuration block
|
||||
# multiple configuration blocks can be specified
|
||||
|
||||
# WARNING, don't forget to insert your interface names here !
|
||||
#Interface "<OLSRd-Interface1>" "<OLSRd-Interface2>"
|
||||
#{
|
||||
# Interface Mode is used to prevent unnecessary
|
||||
# packet forwarding on switched ethernet interfaces
|
||||
# valid Modes are "mesh" and "ether"
|
||||
# (default is "mesh")
|
||||
|
||||
# Mode "mesh"
|
||||
#}
|
||||
|
|
@ -1 +0,0 @@
|
|||
mgen input sink.mgen output mgen_${node.name}.log
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
<%
|
||||
ifaces = "-i " + " -i ".join(ifnames)
|
||||
smf = ""
|
||||
if has_smf:
|
||||
smf = "-flooding ecds -smfClient %s_smf" % node.name
|
||||
%>
|
||||
nrlnhdp -l /var/log/nrlnhdp.log -rpipe ${node.name}_nhdp ${smf} ${ifaces}
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
<%
|
||||
smf = ""
|
||||
if has_smf:
|
||||
smf = "-flooding s-mpr -smfClient %s_smf" % node.name
|
||||
zebra = ""
|
||||
if has_zebra:
|
||||
zebra = "-z"
|
||||
%>
|
||||
nrlolsrd -i ${ifname} -l /var/log/nrlolsrd.log -rpipe ${node.name}_olsr ${smf} ${zebra}
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
<%
|
||||
ifaces = "-i " + " -i ".join(ifnames)
|
||||
smf = ""
|
||||
if has_smf:
|
||||
smf = "-flooding ecds -smfClient %s_smf" % node.name
|
||||
%>
|
||||
nrlolsrv2 -l /var/log/nrlolsrv2.log -rpipe ${node.name}_olsrv2 -p olsr ${smf} ${ifaces}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
<%
|
||||
ifaces = "-i " + " -i ".join(ifnames)
|
||||
%>
|
||||
olsrd ${ifaces}
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
0.0 LISTEN UDP 5000
|
||||
% for ifname in ifnames:
|
||||
0.0 Join 224.225.1.2 INTERFACE ${ifname}
|
||||
% endfor
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by MgenActor service
|
||||
mgenBasicActor.py -n ${node.name} -a 0.0.0.0 < /dev/null > /dev/null 2>&1 &
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
<%
|
||||
ifaces = ",".join(ifnames)
|
||||
if has_nhdp:
|
||||
flood = "ecds"
|
||||
elif has_olsr:
|
||||
flood = "smpr"
|
||||
else:
|
||||
flood = "cf"
|
||||
%>
|
||||
#!/bin/sh
|
||||
# auto-generated by NrlSmf service
|
||||
nrlsmf instance ${node.name}_smf ${flood} ${ifaces} hash MD5 log /var/log/nrlsmf.log < /dev/null > /dev/null 2>&1 &
|
||||
|
|
@ -1,453 +0,0 @@
|
|||
import abc
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from core.config import Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.nodes.base import CoreNodeBase, NodeBase
|
||||
from core.nodes.interface import DEFAULT_MTU, CoreInterface
|
||||
from core.nodes.network import PtpNet, WlanNode
|
||||
from core.nodes.physical import Rj45Node
|
||||
from core.nodes.wireless import WirelessNode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
GROUP: str = "Quagga"
|
||||
QUAGGA_STATE_DIR: str = "/var/run/quagga"
|
||||
|
||||
|
||||
def is_wireless(node: NodeBase) -> bool:
|
||||
"""
|
||||
Check if the node is a wireless type node.
|
||||
|
||||
:param node: node to check type for
|
||||
:return: True if wireless type, False otherwise
|
||||
"""
|
||||
return isinstance(node, (WlanNode, EmaneNet, WirelessNode))
|
||||
|
||||
|
||||
def has_mtu_mismatch(iface: CoreInterface) -> bool:
|
||||
"""
|
||||
Helper to detect MTU mismatch and add the appropriate OSPF
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
"""
|
||||
if iface.mtu != DEFAULT_MTU:
|
||||
return True
|
||||
if not iface.net:
|
||||
return False
|
||||
for iface in iface.net.get_ifaces():
|
||||
if iface.mtu != iface.mtu:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_min_mtu(iface: CoreInterface):
|
||||
"""
|
||||
Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
"""
|
||||
mtu = iface.mtu
|
||||
if not iface.net:
|
||||
return mtu
|
||||
for iface in iface.net.get_ifaces():
|
||||
if iface.mtu < mtu:
|
||||
mtu = iface.mtu
|
||||
return mtu
|
||||
|
||||
|
||||
def get_router_id(node: CoreNodeBase) -> str:
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for iface in node.get_ifaces(control=False):
|
||||
ip4 = iface.get_ip4()
|
||||
if ip4:
|
||||
return str(ip4.ip)
|
||||
return "0.0.0.0"
|
||||
|
||||
|
||||
def rj45_check(iface: CoreInterface) -> bool:
|
||||
"""
|
||||
Helper to detect whether interface is connected an external RJ45
|
||||
link.
|
||||
"""
|
||||
if iface.net:
|
||||
for peer_iface in iface.net.get_ifaces():
|
||||
if peer_iface == iface:
|
||||
continue
|
||||
if isinstance(peer_iface.node, Rj45Node):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Zebra(ConfigService):
|
||||
name: str = "zebra"
|
||||
group: str = GROUP
|
||||
directories: list[str] = ["/usr/local/etc/quagga", "/var/run/quagga"]
|
||||
files: list[str] = [
|
||||
"/usr/local/etc/quagga/Quagga.conf",
|
||||
"quaggaboot.sh",
|
||||
"/usr/local/etc/quagga/vtysh.conf",
|
||||
]
|
||||
executables: list[str] = ["zebra"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash quaggaboot.sh zebra"]
|
||||
validate: list[str] = ["pidof zebra"]
|
||||
shutdown: list[str] = ["killall zebra"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
quagga_bin_search = self.node.session.options.get(
|
||||
"quagga_bin_search", default="/usr/local/bin /usr/bin /usr/lib/quagga"
|
||||
).strip('"')
|
||||
quagga_sbin_search = self.node.session.options.get(
|
||||
"quagga_sbin_search", default="/usr/local/sbin /usr/sbin /usr/lib/quagga"
|
||||
).strip('"')
|
||||
quagga_state_dir = QUAGGA_STATE_DIR
|
||||
quagga_conf = self.files[0]
|
||||
|
||||
services = []
|
||||
want_ip4 = False
|
||||
want_ip6 = False
|
||||
for service in self.node.config_services.values():
|
||||
if self.name not in service.dependencies:
|
||||
continue
|
||||
if not isinstance(service, QuaggaService):
|
||||
continue
|
||||
if service.ipv4_routing:
|
||||
want_ip4 = True
|
||||
if service.ipv6_routing:
|
||||
want_ip6 = True
|
||||
services.append(service)
|
||||
|
||||
ifaces = []
|
||||
for iface in self.node.get_ifaces():
|
||||
ip4s = []
|
||||
ip6s = []
|
||||
for ip4 in iface.ip4s:
|
||||
ip4s.append(str(ip4))
|
||||
for ip6 in iface.ip6s:
|
||||
ip6s.append(str(ip6))
|
||||
configs = []
|
||||
if not iface.control:
|
||||
for service in services:
|
||||
config = service.quagga_iface_config(iface)
|
||||
if config:
|
||||
configs.append(config.split("\n"))
|
||||
ifaces.append((iface, ip4s, ip6s, configs))
|
||||
|
||||
return dict(
|
||||
quagga_bin_search=quagga_bin_search,
|
||||
quagga_sbin_search=quagga_sbin_search,
|
||||
quagga_state_dir=quagga_state_dir,
|
||||
quagga_conf=quagga_conf,
|
||||
ifaces=ifaces,
|
||||
want_ip4=want_ip4,
|
||||
want_ip6=want_ip6,
|
||||
services=services,
|
||||
)
|
||||
|
||||
|
||||
class QuaggaService(abc.ABC):
|
||||
group: str = GROUP
|
||||
directories: list[str] = []
|
||||
files: list[str] = []
|
||||
executables: list[str] = []
|
||||
dependencies: list[str] = ["zebra"]
|
||||
startup: list[str] = []
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
ipv4_routing: bool = False
|
||||
ipv6_routing: bool = False
|
||||
|
||||
@abc.abstractmethod
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def quagga_config(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Ospfv2(QuaggaService, ConfigService):
|
||||
"""
|
||||
The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
|
||||
name: str = "OSPFv2"
|
||||
validate: list[str] = ["pidof ospfd"]
|
||||
shutdown: list[str] = ["killall ospfd"]
|
||||
ipv4_routing: bool = True
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
has_mtu = has_mtu_mismatch(iface)
|
||||
has_rj45 = rj45_check(iface)
|
||||
is_ptp = isinstance(iface.net, PtpNet)
|
||||
data = dict(has_mtu=has_mtu, is_ptp=is_ptp, has_rj45=has_rj45)
|
||||
text = """
|
||||
% if has_mtu:
|
||||
ip ospf mtu-ignore
|
||||
% endif
|
||||
% if has_rj45:
|
||||
<% return STOP_RENDERING %>
|
||||
% endif
|
||||
% if is_ptp:
|
||||
ip ospf network point-to-point
|
||||
% endif
|
||||
ip ospf hello-interval 2
|
||||
ip ospf dead-interval 6
|
||||
ip ospf retransmit-interval 5
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
addresses = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
for ip4 in iface.ip4s:
|
||||
addresses.append(str(ip4))
|
||||
data = dict(router_id=router_id, addresses=addresses)
|
||||
text = """
|
||||
router ospf
|
||||
router-id ${router_id}
|
||||
% for addr in addresses:
|
||||
network ${addr} area 0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
|
||||
class Ospfv3(QuaggaService, ConfigService):
|
||||
"""
|
||||
The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
|
||||
name: str = "OSPFv3"
|
||||
shutdown: list[str] = ["killall ospf6d"]
|
||||
validate: list[str] = ["pidof ospf6d"]
|
||||
ipv4_routing: bool = True
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
mtu = get_min_mtu(iface)
|
||||
if mtu < iface.mtu:
|
||||
return f"ipv6 ospf6 ifmtu {mtu}"
|
||||
else:
|
||||
return ""
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
data = dict(router_id=router_id, ifnames=ifnames)
|
||||
text = """
|
||||
router ospf6
|
||||
instance-id 65
|
||||
router-id ${router_id}
|
||||
% for ifname in ifnames:
|
||||
interface ${ifname} area 0.0.0.0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
|
||||
class Ospfv3mdr(Ospfv3):
|
||||
"""
|
||||
The OSPFv3 MANET Designated Router (MDR) service provides IPv6
|
||||
routing for wireless networks. It does not build its own
|
||||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
|
||||
name: str = "OSPFv3MDR"
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
config = super().quagga_iface_config(iface)
|
||||
if is_wireless(iface.net):
|
||||
config = self.clean_text(
|
||||
f"""
|
||||
{config}
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network manet-designated-router
|
||||
ipv6 ospf6 twohoprefresh 3
|
||||
ipv6 ospf6 adjacencyconnectivity uniconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
"""
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
class Bgp(QuaggaService, ConfigService):
|
||||
"""
|
||||
The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
"""
|
||||
|
||||
name: str = "BGP"
|
||||
shutdown: list[str] = ["killall bgpd"]
|
||||
validate: list[str] = ["pidof bgpd"]
|
||||
ipv4_routing: bool = True
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
text = f"""
|
||||
! BGP configuration
|
||||
! You should configure the AS number below
|
||||
! along with this router's peers.
|
||||
router bgp {self.node.id}
|
||||
bgp router-id {router_id}
|
||||
redistribute connected
|
||||
!neighbor 1.2.3.4 remote-as 555
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class Rip(QuaggaService, ConfigService):
|
||||
"""
|
||||
The RIP service provides IPv4 routing for wired networks.
|
||||
"""
|
||||
|
||||
name: str = "RIP"
|
||||
shutdown: list[str] = ["killall ripd"]
|
||||
validate: list[str] = ["pidof ripd"]
|
||||
ipv4_routing: bool = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
text = """
|
||||
router rip
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf
|
||||
network 0.0.0.0/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class Ripng(QuaggaService, ConfigService):
|
||||
"""
|
||||
The RIP NG service provides IPv6 routing for wired networks.
|
||||
"""
|
||||
|
||||
name: str = "RIPNG"
|
||||
shutdown: list[str] = ["killall ripngd"]
|
||||
validate: list[str] = ["pidof ripngd"]
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
text = """
|
||||
router ripng
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf6
|
||||
network ::/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class Babel(QuaggaService, ConfigService):
|
||||
"""
|
||||
The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
"""
|
||||
|
||||
name: str = "Babel"
|
||||
shutdown: list[str] = ["killall babeld"]
|
||||
validate: list[str] = ["pidof babeld"]
|
||||
ipv6_routing: bool = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
text = """
|
||||
router babel
|
||||
% for ifname in ifnames:
|
||||
network ${ifname}
|
||||
% endfor
|
||||
redistribute static
|
||||
redistribute connected
|
||||
!
|
||||
"""
|
||||
data = dict(ifnames=ifnames)
|
||||
return self.render_text(text, data)
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
if is_wireless(iface.net):
|
||||
text = """
|
||||
babel wireless
|
||||
no babel split-horizon
|
||||
"""
|
||||
else:
|
||||
text = """
|
||||
babel wired
|
||||
babel split-horizon
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
|
||||
class Xpimd(QuaggaService, ConfigService):
|
||||
"""
|
||||
PIM multicast routing based on XORP.
|
||||
"""
|
||||
|
||||
name: str = "Xpimd"
|
||||
shutdown: list[str] = ["killall xpimd"]
|
||||
validate: list[str] = ["pidof xpimd"]
|
||||
ipv4_routing: bool = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
ifname = "eth0"
|
||||
for iface in self.node.get_ifaces():
|
||||
if iface.name != "lo":
|
||||
ifname = iface.name
|
||||
break
|
||||
|
||||
text = f"""
|
||||
router mfea
|
||||
!
|
||||
router igmp
|
||||
!
|
||||
router pim
|
||||
!ip pim rp-address 10.0.0.1
|
||||
ip pim bsr-candidate {ifname}
|
||||
ip pim rp-candidate {ifname}
|
||||
!ip pim spt-threshold interval 10 bytes 80000
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_iface_config(self, iface: CoreInterface) -> str:
|
||||
text = """
|
||||
ip mfea
|
||||
ip pim
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
|
@ -1,92 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by zebra service (quagga.py)
|
||||
QUAGGA_CONF="${quagga_conf}"
|
||||
QUAGGA_SBIN_SEARCH="${quagga_sbin_search}"
|
||||
QUAGGA_BIN_SEARCH="${quagga_bin_search}"
|
||||
QUAGGA_STATE_DIR="${quagga_state_dir}"
|
||||
|
||||
searchforprog()
|
||||
{
|
||||
prog=$1
|
||||
searchpath=$@
|
||||
ret=
|
||||
for p in $searchpath; do
|
||||
if [ -x $p/$prog ]; then
|
||||
ret=$p
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo $ret
|
||||
}
|
||||
|
||||
confcheck()
|
||||
{
|
||||
CONF_DIR=`dirname $QUAGGA_CONF`
|
||||
# if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then
|
||||
ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf
|
||||
fi
|
||||
# if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then
|
||||
ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf
|
||||
fi
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
{
|
||||
QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH)
|
||||
if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon not found in search path:"
|
||||
echo " $QUAGGA_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flags=""
|
||||
|
||||
if [ "$1" = "xpimd" ] && \\
|
||||
grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $QUAGGA_CONF; then
|
||||
flags="$flags -6"
|
||||
fi
|
||||
|
||||
$QUAGGA_SBIN_DIR/$1 $flags -d
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon failed to start!:"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
bootquagga()
|
||||
{
|
||||
QUAGGA_BIN_DIR=$(searchforprog 'vtysh' $QUAGGA_BIN_SEARCH)
|
||||
if [ "z$QUAGGA_BIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's 'vtysh' program not found in search path:"
|
||||
echo " $QUAGGA_BIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# fix /var/run/quagga permissions
|
||||
id -u quagga 2>/dev/null >/dev/null
|
||||
if [ "$?" = "0" ]; then
|
||||
chown quagga $QUAGGA_STATE_DIR
|
||||
fi
|
||||
|
||||
bootdaemon "zebra"
|
||||
for r in rip ripng ospf6 ospf bgp babel; do
|
||||
if grep -q "^router \\<$${}{r}\\>" $QUAGGA_CONF; then
|
||||
bootdaemon "$${}{r}d"
|
||||
fi
|
||||
done
|
||||
|
||||
if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $QUAGGA_CONF; then
|
||||
bootdaemon "xpimd"
|
||||
fi
|
||||
|
||||
$QUAGGA_BIN_DIR/vtysh -b
|
||||
}
|
||||
|
||||
if [ "$1" != "zebra" ]; then
|
||||
echo "WARNING: '$1': all Quagga daemons are launched by the 'zebra' service!"
|
||||
exit 1
|
||||
fi
|
||||
confcheck
|
||||
bootquagga
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
% for iface, ip4s, ip6s, configs in ifaces:
|
||||
interface ${iface.name}
|
||||
% if want_ip4:
|
||||
% for addr in ip4s:
|
||||
ip address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if want_ip6:
|
||||
% for addr in ip6s:
|
||||
ipv6 address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% for config in configs:
|
||||
% for line in config:
|
||||
${line}
|
||||
% endfor
|
||||
% endfor
|
||||
!
|
||||
% endfor
|
||||
|
||||
% for service in services:
|
||||
${service.quagga_config()}
|
||||
% endfor
|
||||
|
|
@ -1 +0,0 @@
|
|||
service integrated-vtysh-config
|
||||
|
|
@ -1,104 +0,0 @@
|
|||
from typing import Any
|
||||
|
||||
from core.config import ConfigString, Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
|
||||
GROUP_NAME: str = "Security"
|
||||
|
||||
|
||||
class VpnClient(ConfigService):
|
||||
name: str = "VPNClient"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["vpnclient.sh"]
|
||||
executables: list[str] = ["openvpn", "ip", "killall"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash vpnclient.sh"]
|
||||
validate: list[str] = ["pidof openvpn"]
|
||||
shutdown: list[str] = ["killall openvpn"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = [
|
||||
ConfigString(id="keydir", label="Key Dir", default="/etc/core/keys"),
|
||||
ConfigString(id="keyname", label="Key Name", default="client1"),
|
||||
ConfigString(id="server", label="Server", default="10.0.2.10"),
|
||||
]
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
|
||||
class VpnServer(ConfigService):
|
||||
name: str = "VPNServer"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["vpnserver.sh"]
|
||||
executables: list[str] = ["openvpn", "ip", "killall"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash vpnserver.sh"]
|
||||
validate: list[str] = ["pidof openvpn"]
|
||||
shutdown: list[str] = ["killall openvpn"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = [
|
||||
ConfigString(id="keydir", label="Key Dir", default="/etc/core/keys"),
|
||||
ConfigString(id="keyname", label="Key Name", default="server"),
|
||||
ConfigString(id="subnet", label="Subnet", default="10.0.200.0"),
|
||||
]
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
address = None
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ip4 = iface.get_ip4()
|
||||
if ip4:
|
||||
address = str(ip4.ip)
|
||||
break
|
||||
return dict(address=address)
|
||||
|
||||
|
||||
class IPsec(ConfigService):
|
||||
name: str = "IPsec"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["ipsec.sh"]
|
||||
executables: list[str] = ["racoon", "ip", "setkey", "killall"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash ipsec.sh"]
|
||||
validate: list[str] = ["pidof racoon"]
|
||||
shutdown: list[str] = ["killall racoon"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
|
||||
class Firewall(ConfigService):
|
||||
name: str = "Firewall"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["firewall.sh"]
|
||||
executables: list[str] = ["iptables"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash firewall.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
|
||||
class Nat(ConfigService):
|
||||
name: str = "NAT"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["nat.sh"]
|
||||
executables: list[str] = ["iptables"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash nat.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
return dict(ifnames=ifnames)
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# Below are sample iptables firewall rules that you can uncomment and edit.
|
||||
# You can also use ip6tables rules for IPv6.
|
||||
#
|
||||
|
||||
# start by flushing all firewall rules (so this script may be re-run)
|
||||
#iptables -F
|
||||
|
||||
# allow traffic related to established connections
|
||||
#iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
|
||||
# allow TCP packets from any source destined for 192.168.1.1
|
||||
#iptables -A INPUT -s 0/0 -i eth0 -d 192.168.1.1 -p TCP -j ACCEPT
|
||||
|
||||
# allow OpenVPN server traffic from eth0
|
||||
#iptables -A INPUT -p udp --dport 1194 -j ACCEPT
|
||||
#iptables -A INPUT -i eth0 -j DROP
|
||||
#iptables -A OUTPUT -p udp --sport 1194 -j ACCEPT
|
||||
#iptables -A OUTPUT -o eth0 -j DROP
|
||||
|
||||
# allow ICMP ping traffic
|
||||
#iptables -A OUTPUT -p icmp --icmp-type echo-request -j ACCEPT
|
||||
#iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
|
||||
# allow SSH traffic
|
||||
#iptables -A -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
|
||||
|
||||
# drop all other traffic coming in eth0
|
||||
#iptables -A INPUT -i eth0 -j DROP
|
||||
|
|
@ -1,114 +0,0 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The IPsec service builds ESP tunnels between the specified peers using the
|
||||
# racoon IKEv2 keying daemon. You need to provide keys and the addresses of
|
||||
# peers, along with subnets to tunnel.
|
||||
|
||||
# directory containing the certificate and key described below
|
||||
keydir=/etc/core/keys
|
||||
|
||||
# the name used for the "$certname.pem" x509 certificate and
|
||||
# "$certname.key" RSA private key, which can be generated using openssl
|
||||
certname=ipsec1
|
||||
|
||||
# list the public-facing IP addresses, starting with the localhost and followed
|
||||
# by each tunnel peer, separated with a single space
|
||||
tunnelhosts="172.16.0.1AND172.16.0.2 172.16.0.1AND172.16.2.1"
|
||||
|
||||
# Define T<i> where i is the index for each tunnel peer host from
|
||||
# the tunnel_hosts list above (0 is localhost).
|
||||
# T<i> is a list of IPsec tunnels with peer i, with a local subnet address
|
||||
# followed by the remote subnet address:
|
||||
# T<i>="<local>AND<remote> <local>AND<remote>"
|
||||
# For example, 172.16.0.0/24 is a local network (behind this node) to be
|
||||
# tunneled and 172.16.2.0/24 is a remote network (behind peer 1)
|
||||
T1="172.16.3.0/24AND172.16.5.0/24"
|
||||
T2="172.16.4.0/24AND172.16.5.0/24 172.16.4.0/24AND172.16.6.0/24"
|
||||
|
||||
# -------- END CUSTOMIZATION --------
|
||||
|
||||
echo "building config $PWD/ipsec.conf..."
|
||||
echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log
|
||||
|
||||
checkip=0
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed
|
||||
" >> $PWD/ipsec.log
|
||||
checkip=1
|
||||
fi
|
||||
|
||||
echo "#!/usr/sbin/setkey -f
|
||||
# Flush the SAD and SPD
|
||||
flush;
|
||||
spdflush;
|
||||
|
||||
# Security policies " > $PWD/ipsec.conf
|
||||
i=0
|
||||
for hostpair in $tunnelhosts; do
|
||||
i=`expr $i + 1`
|
||||
# parse tunnel host IP
|
||||
thishost=$${}{hostpair%%AND*}
|
||||
peerhost=$${}{hostpair##*AND}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid host address $thishost or $peerhost " >> $PWD/ipsec.log
|
||||
fi
|
||||
# parse each tunnel addresses
|
||||
tunnel_list_var_name=T$i
|
||||
eval tunnels="$"$tunnel_list_var_name""
|
||||
for ttunnel in $tunnels; do
|
||||
lclnet=$${}{ttunnel%%AND*}
|
||||
rmtnet=$${}{ttunnel##*AND}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid tunnel address $lclnet and $rmtnet " >> $PWD/ipsec.log
|
||||
fi
|
||||
# add tunnel policies
|
||||
echo "
|
||||
spdadd $lclnet $rmtnet any -P out ipsec
|
||||
esp/tunnel/$thishost-$peerhost/require;
|
||||
spdadd $rmtnet $lclnet any -P in ipsec
|
||||
esp/tunnel/$peerhost-$thishost/require; " >> $PWD/ipsec.conf
|
||||
done
|
||||
done
|
||||
|
||||
echo "building config $PWD/racoon.conf..."
|
||||
if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then
|
||||
echo "ERROR: missing certification files under $keydir $certname.key or $certname.pem " >> $PWD/ipsec.log
|
||||
fi
|
||||
echo "
|
||||
path certificate \"$keydir\";
|
||||
listen {
|
||||
adminsock disabled;
|
||||
}
|
||||
remote anonymous
|
||||
{
|
||||
exchange_mode main;
|
||||
certificate_type x509 \"$certname.pem\" \"$certname.key\";
|
||||
ca_type x509 \"ca-cert.pem\";
|
||||
my_identifier asn1dn;
|
||||
peers_identifier asn1dn;
|
||||
|
||||
proposal {
|
||||
encryption_algorithm 3des ;
|
||||
hash_algorithm sha1;
|
||||
authentication_method rsasig ;
|
||||
dh_group modp768;
|
||||
}
|
||||
}
|
||||
sainfo anonymous
|
||||
{
|
||||
pfs_group modp768;
|
||||
lifetime time 1 hour ;
|
||||
encryption_algorithm 3des, blowfish 448, rijndael ;
|
||||
authentication_algorithm hmac_sha1, hmac_md5 ;
|
||||
compression_algorithm deflate ;
|
||||
}
|
||||
" > $PWD/racoon.conf
|
||||
|
||||
# the setkey program is required from the ipsec-tools package
|
||||
echo "running setkey -f $PWD/ipsec.conf..."
|
||||
setkey -f $PWD/ipsec.conf
|
||||
|
||||
echo "running racoon -d -f $PWD/racoon.conf..."
|
||||
racoon -d -f $PWD/racoon.conf -l racoon.log
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
#!/bin/sh
|
||||
# generated by security.py
|
||||
# NAT out the first interface by default
|
||||
% for index, ifname in enumerate(ifnames):
|
||||
% if index == 0:
|
||||
iptables -t nat -A POSTROUTING -o ${ifname} -j MASQUERADE
|
||||
iptables -A FORWARD -i ${ifname} -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
iptables -A FORWARD -i ${ifname} -j DROP
|
||||
% else:
|
||||
# iptables -t nat -A POSTROUTING -o ${ifname} -j MASQUERADE
|
||||
# iptables -A FORWARD -i ${ifname} -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# iptables -A FORWARD -i ${ifname} -j DROP
|
||||
% endif
|
||||
% endfor
|
||||
|
|
@ -1,61 +0,0 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The VPNClient service builds a VPN tunnel to the specified VPN server using
|
||||
# OpenVPN software and a virtual TUN/TAP device.
|
||||
|
||||
# directory containing the certificate and key described below
|
||||
keydir=${config["keydir"]}
|
||||
|
||||
# the name used for a "$keyname.crt" certificate and "$keyname.key" private key.
|
||||
keyname=${config["keyname"]}
|
||||
|
||||
# the public IP address of the VPN server this client should connect with
|
||||
vpnserver=${config["server"]}
|
||||
|
||||
# optional next hop for adding a static route to reach the VPN server
|
||||
#nexthop="10.0.1.1"
|
||||
|
||||
# --------- END CUSTOMIZATION --------
|
||||
|
||||
# validate addresses
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed
|
||||
" > $PWD/vpnclient.log
|
||||
else
|
||||
if [ "$(sipcalc "$vpnserver" "$nexthop" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalide address $vpnserver or $nexthop " > $PWD/vpnclient.log
|
||||
fi
|
||||
fi
|
||||
|
||||
# validate key and certification files
|
||||
if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \
|
||||
|| [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then
|
||||
echo "ERROR: missing certification or key files under $keydir $keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnclient.log
|
||||
fi
|
||||
|
||||
# if necessary, add a static route for reaching the VPN server IP via the IF
|
||||
vpnservernet=$${}{vpnserver%.*}.0/24
|
||||
if [ "$nexthop" != "" ]; then
|
||||
ip route add $vpnservernet via $nexthop
|
||||
fi
|
||||
|
||||
# create openvpn client.conf
|
||||
(
|
||||
cat << EOF
|
||||
client
|
||||
dev tun
|
||||
proto udp
|
||||
remote $vpnserver 1194
|
||||
nobind
|
||||
ca $keydir/ca.crt
|
||||
cert $keydir/$keyname.crt
|
||||
key $keydir/$keyname.key
|
||||
dh $keydir/dh1024.pem
|
||||
cipher AES-256-CBC
|
||||
log $PWD/openvpn-client.log
|
||||
verb 4
|
||||
daemon
|
||||
EOF
|
||||
) > client.conf
|
||||
|
||||
openvpn --config client.conf
|
||||
|
|
@ -1,147 +0,0 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The VPNServer service sets up the OpenVPN server for building VPN tunnels
|
||||
# that allow access via TUN/TAP device to private networks.
|
||||
#
|
||||
# note that the IPForward and DefaultRoute services should be enabled
|
||||
|
||||
# directory containing the certificate and key described below, in addition to
|
||||
# a CA certificate and DH key
|
||||
keydir=${config["keydir"]}
|
||||
|
||||
# the name used for a "$keyname.crt" certificate and "$keyname.key" private key.
|
||||
keyname=${config["keyname"]}
|
||||
|
||||
# the VPN subnet address from which the client VPN IP (for the TUN/TAP)
|
||||
# will be allocated
|
||||
vpnsubnet=${config["subnet"]}
|
||||
|
||||
# public IP address of this vpn server (same as VPNClient vpnserver= setting)
|
||||
vpnserver=${address}
|
||||
|
||||
# optional list of private subnets reachable behind this VPN server
|
||||
# each subnet and next hop is separated by a space
|
||||
# "<subnet1>,<nexthop1> <subnet2>,<nexthop2> ..."
|
||||
#privatenets="10.0.11.0,10.0.10.1 10.0.12.0,10.0.10.1"
|
||||
|
||||
# optional list of VPN clients, for statically assigning IP addresses to
|
||||
# clients; also, an optional client subnet can be specified for adding static
|
||||
# routes via the client
|
||||
# Note: VPN addresses x.x.x.0-3 are reserved
|
||||
# "<keyname>,<vpnIP>,<subnetIP> <keyname>,<vpnIP>,<subnetIP> ..."
|
||||
#vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,,"
|
||||
|
||||
# NOTE: you may need to enable the StaticRoutes service on nodes within the
|
||||
# private subnet, in order to have routes back to the client.
|
||||
# /sbin/ip ro add <vpnsubnet>/24 via <vpnServerRemoteInterface>
|
||||
# /sbin/ip ro add <vpnClientSubnet>/24 via <vpnServerRemoteInterface>
|
||||
|
||||
# -------- END CUSTOMIZATION --------
|
||||
|
||||
echo > $PWD/vpnserver.log
|
||||
rm -f -r $PWD/ccd
|
||||
|
||||
# validate key and certification files
|
||||
if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \
|
||||
|| [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then
|
||||
echo "ERROR: missing certification or key files under $keydir \
|
||||
$keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnserver.log
|
||||
fi
|
||||
|
||||
# validate configuration IP addresses
|
||||
checkip=0
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed\
|
||||
" >> $PWD/vpnserver.log
|
||||
checkip=1
|
||||
else
|
||||
if [ "$(sipcalc "$vpnsubnet" "$vpnserver" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn subnet or server address \
|
||||
$vpnsubnet or $vpnserver " >> $PWD/vpnserver.log
|
||||
fi
|
||||
fi
|
||||
|
||||
# create client vpn ip pool file
|
||||
(
|
||||
cat << EOF
|
||||
EOF
|
||||
)> $PWD/ippool.txt
|
||||
|
||||
# create server.conf file
|
||||
(
|
||||
cat << EOF
|
||||
# openvpn server config
|
||||
local $vpnserver
|
||||
server $vpnsubnet 255.255.255.0
|
||||
push "redirect-gateway def1"
|
||||
EOF
|
||||
)> $PWD/server.conf
|
||||
|
||||
# add routes to VPN server private subnets, and push these routes to clients
|
||||
for privatenet in $privatenets; do
|
||||
if [ $privatenet != "" ]; then
|
||||
net=$${}{privatenet%%,*}
|
||||
nexthop=$${}{privatenet##*,}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$net" "$nexthop" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn server private net address \
|
||||
$net or $nexthop " >> $PWD/vpnserver.log
|
||||
fi
|
||||
echo push route $net 255.255.255.0 >> $PWD/server.conf
|
||||
ip ro add $net/24 via $nexthop
|
||||
ip ro add $vpnsubnet/24 via $nexthop
|
||||
fi
|
||||
done
|
||||
|
||||
# allow subnet through this VPN, one route for each client subnet
|
||||
for client in $vpnclients; do
|
||||
if [ $client != "" ]; then
|
||||
cSubnetIP=$${}{client##*,}
|
||||
cVpnIP=$${}{client#*,}
|
||||
cVpnIP=$${}{cVpnIP%%,*}
|
||||
cKeyFilename=$${}{client%%,*}
|
||||
if [ "$cSubnetIP" != "" ]; then
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$cSubnetIP" "$cVpnIP" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn client and subnet address \
|
||||
$cSubnetIP or $cVpnIP " >> $PWD/vpnserver.log
|
||||
fi
|
||||
echo route $cSubnetIP 255.255.255.0 >> $PWD/server.conf
|
||||
if ! test -d $PWD/ccd; then
|
||||
mkdir -p $PWD/ccd
|
||||
echo client-config-dir $PWD/ccd >> $PWD/server.conf
|
||||
fi
|
||||
if test -e $PWD/ccd/$cKeyFilename; then
|
||||
echo iroute $cSubnetIP 255.255.255.0 >> $PWD/ccd/$cKeyFilename
|
||||
else
|
||||
echo iroute $cSubnetIP 255.255.255.0 > $PWD/ccd/$cKeyFilename
|
||||
fi
|
||||
fi
|
||||
if [ "$cVpnIP" != "" ]; then
|
||||
echo $cKeyFilename,$cVpnIP >> $PWD/ippool.txt
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
(
|
||||
cat << EOF
|
||||
keepalive 10 120
|
||||
ca $keydir/ca.crt
|
||||
cert $keydir/$keyname.crt
|
||||
key $keydir/$keyname.key
|
||||
dh $keydir/dh1024.pem
|
||||
cipher AES-256-CBC
|
||||
status /var/log/openvpn-status.log
|
||||
log /var/log/openvpn-server.log
|
||||
ifconfig-pool-linear
|
||||
ifconfig-pool-persist $PWD/ippool.txt
|
||||
port 1194
|
||||
proto udp
|
||||
dev tun
|
||||
verb 4
|
||||
daemon
|
||||
EOF
|
||||
)>> $PWD/server.conf
|
||||
|
||||
# start vpn server
|
||||
openvpn --config server.conf
|
||||
|
|
@ -1,291 +0,0 @@
|
|||
from typing import Any
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import utils
|
||||
from core.config import Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
|
||||
GROUP_NAME = "Utility"
|
||||
|
||||
|
||||
class DefaultRouteService(ConfigService):
|
||||
name: str = "DefaultRoute"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["defaultroute.sh"]
|
||||
executables: list[str] = ["ip"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash defaultroute.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
# only add default routes for linked routing nodes
|
||||
routes = []
|
||||
ifaces = self.node.get_ifaces()
|
||||
if ifaces:
|
||||
iface = ifaces[0]
|
||||
for ip in iface.ips():
|
||||
net = ip.cidr
|
||||
if net.size > 1:
|
||||
router = net[1]
|
||||
routes.append(str(router))
|
||||
return dict(routes=routes)
|
||||
|
||||
|
||||
class DefaultMulticastRouteService(ConfigService):
|
||||
name: str = "DefaultMulticastRoute"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["defaultmroute.sh"]
|
||||
executables: list[str] = []
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash defaultmroute.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifname = None
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifname = iface.name
|
||||
break
|
||||
return dict(ifname=ifname)
|
||||
|
||||
|
||||
class StaticRouteService(ConfigService):
|
||||
name: str = "StaticRoute"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["staticroute.sh"]
|
||||
executables: list[str] = []
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash staticroute.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
routes = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
for ip in iface.ips():
|
||||
address = str(ip.ip)
|
||||
if netaddr.valid_ipv6(address):
|
||||
dst = "3ffe:4::/64"
|
||||
else:
|
||||
dst = "10.9.8.0/24"
|
||||
if ip[-2] != ip[1]:
|
||||
routes.append((dst, ip[1]))
|
||||
return dict(routes=routes)
|
||||
|
||||
|
||||
class IpForwardService(ConfigService):
|
||||
name: str = "IPForward"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["ipforward.sh"]
|
||||
executables: list[str] = ["sysctl"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash ipforward.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = []
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
devnames = []
|
||||
for iface in self.node.get_ifaces():
|
||||
devname = utils.sysctl_devname(iface.name)
|
||||
devnames.append(devname)
|
||||
return dict(devnames=devnames)
|
||||
|
||||
|
||||
class SshService(ConfigService):
|
||||
name: str = "SSH"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = ["/etc/ssh", "/var/run/sshd"]
|
||||
files: list[str] = ["startsshd.sh", "/etc/ssh/sshd_config"]
|
||||
executables: list[str] = ["sshd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash startsshd.sh"]
|
||||
validate: list[str] = []
|
||||
shutdown: list[str] = ["killall sshd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
return dict(
|
||||
sshcfgdir=self.directories[0],
|
||||
sshstatedir=self.directories[1],
|
||||
sshlibdir="/usr/lib/openssh",
|
||||
)
|
||||
|
||||
|
||||
class DhcpService(ConfigService):
|
||||
name: str = "DHCP"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = ["/etc/dhcp", "/var/lib/dhcp"]
|
||||
files: list[str] = ["/etc/dhcp/dhcpd.conf"]
|
||||
executables: list[str] = ["dhcpd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["touch /var/lib/dhcp/dhcpd.leases", "dhcpd"]
|
||||
validate: list[str] = ["pidof dhcpd"]
|
||||
shutdown: list[str] = ["killall dhcpd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
subnets = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
for ip4 in iface.ip4s:
|
||||
if ip4.size == 1:
|
||||
continue
|
||||
# divide the address space in half
|
||||
index = (ip4.size - 2) / 2
|
||||
rangelow = ip4[index]
|
||||
rangehigh = ip4[-2]
|
||||
subnets.append((ip4.cidr.ip, ip4.netmask, rangelow, rangehigh, ip4.ip))
|
||||
return dict(subnets=subnets)
|
||||
|
||||
|
||||
class DhcpClientService(ConfigService):
|
||||
name: str = "DHCPClient"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["startdhcpclient.sh"]
|
||||
executables: list[str] = ["dhclient"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash startdhcpclient.sh"]
|
||||
validate: list[str] = ["pidof dhclient"]
|
||||
shutdown: list[str] = ["killall dhclient"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
return dict(ifnames=ifnames)
|
||||
|
||||
|
||||
class FtpService(ConfigService):
|
||||
name: str = "FTP"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = ["/var/run/vsftpd/empty", "/var/ftp"]
|
||||
files: list[str] = ["vsftpd.conf"]
|
||||
executables: list[str] = ["vsftpd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["vsftpd ./vsftpd.conf"]
|
||||
validate: list[str] = ["pidof vsftpd"]
|
||||
shutdown: list[str] = ["killall vsftpd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
|
||||
class PcapService(ConfigService):
|
||||
name: str = "pcap"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = []
|
||||
files: list[str] = ["pcap.sh"]
|
||||
executables: list[str] = ["tcpdump"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash pcap.sh start"]
|
||||
validate: list[str] = ["pidof tcpdump"]
|
||||
shutdown: list[str] = ["bash pcap.sh stop"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifnames = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifnames.append(iface.name)
|
||||
return dict(ifnames=ifnames)
|
||||
|
||||
|
||||
class RadvdService(ConfigService):
|
||||
name: str = "radvd"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = ["/etc/radvd", "/var/run/radvd"]
|
||||
files: list[str] = ["/etc/radvd/radvd.conf"]
|
||||
executables: list[str] = ["radvd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = [
|
||||
"radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log"
|
||||
]
|
||||
validate: list[str] = ["pidof radvd"]
|
||||
shutdown: list[str] = ["pkill radvd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifaces = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
prefixes = []
|
||||
for ip6 in iface.ip6s:
|
||||
prefixes.append(str(ip6))
|
||||
if not prefixes:
|
||||
continue
|
||||
ifaces.append((iface.name, prefixes))
|
||||
return dict(ifaces=ifaces)
|
||||
|
||||
|
||||
class AtdService(ConfigService):
|
||||
name: str = "atd"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = ["/var/spool/cron/atjobs", "/var/spool/cron/atspool"]
|
||||
files: list[str] = ["startatd.sh"]
|
||||
executables: list[str] = ["atd"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["bash startatd.sh"]
|
||||
validate: list[str] = ["pidof atd"]
|
||||
shutdown: list[str] = ["pkill atd"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
|
||||
class HttpService(ConfigService):
|
||||
name: str = "HTTP"
|
||||
group: str = GROUP_NAME
|
||||
directories: list[str] = [
|
||||
"/etc/apache2",
|
||||
"/var/run/apache2",
|
||||
"/var/log/apache2",
|
||||
"/run/lock",
|
||||
"/var/lock/apache2",
|
||||
"/var/www",
|
||||
]
|
||||
files: list[str] = [
|
||||
"/etc/apache2/apache2.conf",
|
||||
"/etc/apache2/envvars",
|
||||
"/var/www/index.html",
|
||||
]
|
||||
executables: list[str] = ["apache2ctl"]
|
||||
dependencies: list[str] = []
|
||||
startup: list[str] = ["chown www-data /var/lock/apache2", "apache2ctl start"]
|
||||
validate: list[str] = ["pidof apache2"]
|
||||
shutdown: list[str] = ["apache2ctl stop"]
|
||||
validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING
|
||||
default_configs: list[Configuration] = []
|
||||
modes: dict[str, dict[str, str]] = {}
|
||||
|
||||
def data(self) -> dict[str, Any]:
|
||||
ifaces = []
|
||||
for iface in self.node.get_ifaces(control=False):
|
||||
ifaces.append(iface)
|
||||
return dict(ifaces=ifaces)
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by DefaultMulticastRoute service (utility.py)
|
||||
# the first interface is chosen below; please change it as needed
|
||||
ip route add 224.0.0.0/4 dev ${ifname}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by DefaultRoute service
|
||||
% for route in routes:
|
||||
ip route add default via ${route}
|
||||
% endfor
|
||||
|
|
@ -1,102 +0,0 @@
|
|||
# apache2.conf generated by utility.py:HttpService
|
||||
Mutex file:$APACHE_LOCK_DIR default
|
||||
|
||||
PidFile $APACHE_PID_FILE
|
||||
Timeout 300
|
||||
KeepAlive On
|
||||
MaxKeepAliveRequests 100
|
||||
KeepAliveTimeout 5
|
||||
|
||||
LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so
|
||||
|
||||
<IfModule mpm_prefork_module>
|
||||
StartServers 5
|
||||
MinSpareServers 5
|
||||
MaxSpareServers 10
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_worker_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_event_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
User $APACHE_RUN_USER
|
||||
Group $APACHE_RUN_GROUP
|
||||
|
||||
AccessFileName .htaccess
|
||||
|
||||
<Files ~ "^\\.ht">
|
||||
Require all denied
|
||||
</Files>
|
||||
|
||||
DefaultType None
|
||||
|
||||
HostnameLookups Off
|
||||
|
||||
ErrorLog $APACHE_LOG_DIR/error.log
|
||||
LogLevel warn
|
||||
|
||||
#Include mods-enabled/*.load
|
||||
#Include mods-enabled/*.conf
|
||||
LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so
|
||||
LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so
|
||||
LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so
|
||||
LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so
|
||||
LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so
|
||||
LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so
|
||||
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
|
||||
LoadModule env_module /usr/lib/apache2/modules/mod_env.so
|
||||
|
||||
NameVirtualHost *:80
|
||||
Listen 80
|
||||
|
||||
<IfModule mod_ssl.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
<IfModule mod_gnutls.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
|
||||
LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O" common
|
||||
LogFormat "%{Referer}i -> %U" referer
|
||||
LogFormat "%{User-agent}i" agent
|
||||
|
||||
ServerTokens OS
|
||||
ServerSignature On
|
||||
TraceEnable Off
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerAdmin webmaster@localhost
|
||||
DocumentRoot /var/www
|
||||
<Directory />
|
||||
Options FollowSymLinks
|
||||
AllowOverride None
|
||||
</Directory>
|
||||
<Directory /var/www/>
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Require all granted
|
||||
</Directory>
|
||||
ErrorLog $APACHE_LOG_DIR/error.log
|
||||
LogLevel warn
|
||||
CustomLog $APACHE_LOG_DIR/access.log combined
|
||||
</VirtualHost>
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
# this file is used by apache2ctl - generated by utility.py:HttpService
|
||||
# these settings come from a default Ubuntu apache2 installation
|
||||
export APACHE_RUN_USER=www-data
|
||||
export APACHE_RUN_GROUP=www-data
|
||||
export APACHE_PID_FILE=/var/run/apache2.pid
|
||||
export APACHE_RUN_DIR=/var/run/apache2
|
||||
export APACHE_LOCK_DIR=/var/lock/apache2
|
||||
export APACHE_LOG_DIR=/var/log/apache2
|
||||
export LANG=C
|
||||
export LANG
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
# auto-generated by DHCP service (utility.py)
|
||||
# NOTE: move these option lines into the desired pool { } block(s) below
|
||||
#option domain-name "test.com";
|
||||
#option domain-name-servers 10.0.0.1;
|
||||
#option routers 10.0.0.1;
|
||||
|
||||
log-facility local6;
|
||||
|
||||
default-lease-time 600;
|
||||
max-lease-time 7200;
|
||||
|
||||
ddns-update-style none;
|
||||
|
||||
% for subnet, netmask, rangelow, rangehigh, addr in subnets:
|
||||
subnet ${subnet} netmask ${netmask} {
|
||||
pool {
|
||||
range ${rangelow} ${rangehigh};
|
||||
default-lease-time 600;
|
||||
option routers ${addr};
|
||||
}
|
||||
}
|
||||
% endfor
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# auto-generated by RADVD service (utility.py)
|
||||
% for ifname, prefixes in ifaces:
|
||||
interface ${ifname}
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
MinRtrAdvInterval 3;
|
||||
MaxRtrAdvInterval 10;
|
||||
AdvDefaultPreference low;
|
||||
AdvHomeAgentFlag off;
|
||||
% for prefix in prefixes:
|
||||
prefix ${prefix}
|
||||
{
|
||||
AdvOnLink on;
|
||||
AdvAutonomous on;
|
||||
AdvRouterAddr on;
|
||||
};
|
||||
% endfor
|
||||
};
|
||||
% endfor
|
||||
|
|
@ -1,37 +0,0 @@
|
|||
# auto-generated by SSH service (utility.py)
|
||||
Port 22
|
||||
Protocol 2
|
||||
HostKey ${sshcfgdir}/ssh_host_rsa_key
|
||||
UsePrivilegeSeparation yes
|
||||
PidFile ${sshstatedir}/sshd.pid
|
||||
|
||||
KeyRegenerationInterval 3600
|
||||
ServerKeyBits 768
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
LoginGraceTime 120
|
||||
PermitRootLogin yes
|
||||
StrictModes yes
|
||||
|
||||
RSAAuthentication yes
|
||||
PubkeyAuthentication yes
|
||||
|
||||
IgnoreRhosts yes
|
||||
RhostsRSAAuthentication no
|
||||
HostbasedAuthentication no
|
||||
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp ${sshlibdir}/sftp-server
|
||||
UsePAM yes
|
||||
UseDNS no
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
sysctl -w net.ipv4.conf.default.forwarding=1
|
||||
sysctl -w net.ipv6.conf.all.forwarding=1
|
||||
sysctl -w net.ipv6.conf.default.forwarding=1
|
||||
sysctl -w net.ipv4.conf.all.send_redirects=0
|
||||
sysctl -w net.ipv4.conf.default.send_redirects=0
|
||||
sysctl -w net.ipv4.conf.all.rp_filter=0
|
||||
sysctl -w net.ipv4.conf.default.rp_filter=0
|
||||
# setup forwarding for node interfaces
|
||||
% for devname in devnames:
|
||||
sysctl -w net.ipv4.conf.${devname}.forwarding=1
|
||||
sysctl -w net.ipv4.conf.${devname}.send_redirects=0
|
||||
sysctl -w net.ipv4.conf.${devname}.rp_filter=0
|
||||
sysctl -w net.ipv6.conf.${devname}.forwarding=1
|
||||
% endfor
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
#!/bin/sh
|
||||
# set tcpdump options here (see 'man tcpdump' for help)
|
||||
# (-s snap length, -C limit pcap file length, -n disable name resolution)
|
||||
if [ "x$1" = "xstart" ]; then
|
||||
% for ifname in ifnames:
|
||||
tcpdump -s 12288 -C 10 -n -w ${node.name}.${ifname}.pcap -i ${ifname} > /dev/null 2>&1 &
|
||||
% endfor
|
||||
elif [ "x$1" = "xstop" ]; then
|
||||
mkdir -p $SESSION_DIR/pcap
|
||||
mv *.pcap $SESSION_DIR/pcap
|
||||
fi;
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
#!/bin/sh
|
||||
echo 00001 > /var/spool/cron/atjobs/.SEQ
|
||||
chown -R daemon /var/spool/cron/*
|
||||
chmod -R 700 /var/spool/cron/*
|
||||
atd
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by DHCPClient service (utility.py)
|
||||
# uncomment this mkdir line and symlink line to enable client-side DNS\n# resolution based on the DHCP server response.
|
||||
#mkdir -p /var/run/resolvconf/interface
|
||||
% for ifname in ifnames:
|
||||
#ln -s /var/run/resolvconf/interface/${ifname}.dhclient /var/run/resolvconf/resolv.conf
|
||||
dhclient -nw -pf /var/run/dhclient-${ifname}.pid -lf /var/run/dhclient-${ifname}.lease ${ifname}
|
||||
% endfor
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by SSH service (utility.py)
|
||||
ssh-keygen -q -t rsa -N "" -f ${sshcfgdir}/ssh_host_rsa_key
|
||||
chmod 655 ${sshstatedir}
|
||||
# wait until RSA host key has been generated to launch sshd
|
||||
$(which sshd) -f ${sshcfgdir}/sshd_config
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by StaticRoute service (utility.py)
|
||||
# NOTE: this service must be customized to be of any use
|
||||
# Below are samples that you can uncomment and edit.
|
||||
% for dest, addr in routes:
|
||||
#ip route add ${dest} via ${addr}
|
||||
% endfor
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
<!-- generated by utility.py:HttpService -->
|
||||
<html>
|
||||
<body>
|
||||
<h1>${node.name} web server</h1>
|
||||
<p>This is the default web page for this server.</p>
|
||||
<p>The web server software is running but no content has been added, yet.</p>
|
||||
<ul>
|
||||
% for iface in ifaces:
|
||||
<li>${iface.name} - ${iface.addrlist}</li>
|
||||
% endfor
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
# vsftpd.conf auto-generated by FTP service (utility.py)
|
||||
listen=YES
|
||||
anonymous_enable=YES
|
||||
local_enable=YES
|
||||
dirmessage_enable=YES
|
||||
use_localtime=YES
|
||||
xferlog_enable=YES
|
||||
connect_from_port_20=YES
|
||||
xferlog_file=/var/log/vsftpd.log
|
||||
ftpd_banner=Welcome to the CORE FTP service
|
||||
secure_chroot_dir=/var/run/vsftpd/empty
|
||||
anon_root=/var/ftp
|
||||
|
|
@ -1,5 +1,28 @@
|
|||
from pathlib import Path
|
||||
import os
|
||||
|
||||
COREDPY_VERSION: str = "@PACKAGE_VERSION@"
|
||||
CORE_CONF_DIR: Path = Path("@CORE_CONF_DIR@")
|
||||
CORE_DATA_DIR: Path = Path("@CORE_DATA_DIR@")
|
||||
COREDPY_VERSION = "@PACKAGE_VERSION@"
|
||||
CORE_STATE_DIR = "@CORE_STATE_DIR@"
|
||||
CORE_CONF_DIR = "@CORE_CONF_DIR@"
|
||||
CORE_DATA_DIR = "@CORE_DATA_DIR@"
|
||||
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
|
||||
FRR_STATE_DIR = "@CORE_STATE_DIR@/run/frr"
|
||||
|
||||
|
||||
def which(command):
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
command_path = os.path.join(path, command)
|
||||
if os.path.isfile(command_path) and os.access(command_path, os.X_OK):
|
||||
return command_path
|
||||
|
||||
|
||||
VNODED_BIN = which("vnoded")
|
||||
VCMD_BIN = which("vcmd")
|
||||
BRCTL_BIN = which("brctl")
|
||||
SYSCTL_BIN = which("sysctl")
|
||||
IP_BIN = which("ip")
|
||||
TC_BIN = which("tc")
|
||||
EBTABLES_BIN = which("ebtables")
|
||||
MOUNT_BIN = which("mount")
|
||||
UMOUNT_BIN = which("umount")
|
||||
OVS_BIN = which("ovs-vsctl")
|
||||
OVS_FLOW_BIN = which("ovs-ofctl")
|
||||
|
|
|
|||
1776
daemon/core/corehandlers.py
Normal file
1776
daemon/core/corehandlers.py
Normal file
File diff suppressed because it is too large
Load diff
758
daemon/core/coreobj.py
Normal file
758
daemon/core/coreobj.py
Normal file
|
|
@ -0,0 +1,758 @@
|
|||
"""
|
||||
Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode,
|
||||
PyCoreNet, and PyCoreNetIf.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import threading
|
||||
from socket import AF_INET
|
||||
from socket import AF_INET6
|
||||
|
||||
from core.data import NodeData, LinkData
|
||||
from core.enumerations import LinkTypes
|
||||
from core.misc import ipaddress
|
||||
|
||||
|
||||
class Position(object):
|
||||
"""
|
||||
Helper class for Cartesian coordinate position
|
||||
"""
|
||||
|
||||
def __init__(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Creates a Position instance.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:param z: z position
|
||||
:return:
|
||||
"""
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
|
||||
def set(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Returns True if the position has actually changed.
|
||||
|
||||
:param float x: x position
|
||||
:param float y: y position
|
||||
:param float z: z position
|
||||
:return: True if position changed, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
if self.x == x and self.y == y and self.z == z:
|
||||
return False
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
return True
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Retrieve x,y,z position.
|
||||
|
||||
:return: x,y,z position tuple
|
||||
:rtype: tuple
|
||||
"""
|
||||
return self.x, self.y, self.z
|
||||
|
||||
|
||||
class PyCoreObj(object):
|
||||
"""
|
||||
Base class for CORE objects (nodes and networks)
|
||||
"""
|
||||
apitype = None
|
||||
|
||||
# TODO: appears start has no usage, verify and remove
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
Creates a PyCoreObj instance.
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param int objid: object id
|
||||
:param str name: object name
|
||||
:param bool start: start value
|
||||
:return:
|
||||
"""
|
||||
|
||||
self.session = session
|
||||
if objid is None:
|
||||
objid = session.get_object_id()
|
||||
self.objid = objid
|
||||
if name is None:
|
||||
name = "o%s" % self.objid
|
||||
self.name = name
|
||||
self.type = None
|
||||
self.server = None
|
||||
self.services = None
|
||||
# ifindex is key, PyCoreNetIf instance is value
|
||||
self._netif = {}
|
||||
self.ifindex = 0
|
||||
self.canvas = None
|
||||
self.icon = None
|
||||
self.opaque = None
|
||||
self.position = Position()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Each object implements its own startup method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Each object implements its own shutdown method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Set the (x,y,z) position of the object.
|
||||
|
||||
:param float x: x position
|
||||
:param float y: y position
|
||||
:param float z: z position
|
||||
:return: True if position changed, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.position.set(x=x, y=y, z=z)
|
||||
|
||||
def getposition(self):
|
||||
"""
|
||||
Return an (x,y,z) tuple representing this object's position.
|
||||
|
||||
:return: x,y,z position tuple
|
||||
:rtype: tuple
|
||||
"""
|
||||
return self.position.get()
|
||||
|
||||
def ifname(self, ifindex):
|
||||
"""
|
||||
Retrieve interface name for index.
|
||||
|
||||
:param int ifindex: interface index
|
||||
:return: interface name
|
||||
:rtype: str
|
||||
"""
|
||||
return self._netif[ifindex].name
|
||||
|
||||
def netifs(self, sort=False):
|
||||
"""
|
||||
Retrieve network interfaces, sorted if desired.
|
||||
|
||||
:param bool sort: boolean used to determine if interfaces should be sorted
|
||||
:return: network interfaces
|
||||
:rtype: list
|
||||
"""
|
||||
if sort:
|
||||
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
|
||||
else:
|
||||
return self._netif.itervalues()
|
||||
|
||||
def numnetif(self):
|
||||
"""
|
||||
Return the attached interface count.
|
||||
|
||||
:return: number of network interfaces
|
||||
:rtype: int
|
||||
"""
|
||||
return len(self._netif)
|
||||
|
||||
def getifindex(self, netif):
|
||||
"""
|
||||
Retrieve index for an interface.
|
||||
|
||||
:param PyCoreNetIf netif: interface to get index for
|
||||
:return: interface index if found, -1 otherwise
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
return ifindex
|
||||
|
||||
return -1
|
||||
|
||||
def newifindex(self):
|
||||
"""
|
||||
Create a new interface index.
|
||||
|
||||
:return: interface index
|
||||
:rtype: int
|
||||
"""
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
|
||||
def data(self, message_type, lat=None, lon=None, alt=None):
|
||||
"""
|
||||
Build a data object for this node.
|
||||
|
||||
:param message_type: purpose for the data object we are creating
|
||||
:param str lat: latitude
|
||||
:param str lon: longitude
|
||||
:param str alt: altitude
|
||||
:return: node data object
|
||||
:rtype: core.data.NodeData
|
||||
"""
|
||||
if self.apitype is None:
|
||||
return None
|
||||
|
||||
x, y, _ = self.getposition()
|
||||
model = self.type
|
||||
emulation_server = self.server
|
||||
|
||||
services = self.services
|
||||
if services is not None:
|
||||
services = "|".join([service.name for service in services])
|
||||
|
||||
node_data = NodeData(
|
||||
message_type=message_type,
|
||||
id=self.objid,
|
||||
node_type=self.apitype,
|
||||
name=self.name,
|
||||
emulation_id=self.objid,
|
||||
canvas=self.canvas,
|
||||
icon=self.icon,
|
||||
opaque=self.opaque,
|
||||
x_position=x,
|
||||
y_position=y,
|
||||
latitude=lat,
|
||||
longitude=lon,
|
||||
altitude=alt,
|
||||
model=model,
|
||||
emulation_server=emulation_server,
|
||||
services=services
|
||||
)
|
||||
|
||||
return node_data
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build CORE Link data for this object. There is no default
|
||||
method for PyCoreObjs as PyCoreNodes do not implement this but
|
||||
PyCoreNets do.
|
||||
|
||||
:param flags: message flags
|
||||
:return: list of link data
|
||||
:rtype: core.data.LinkData
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class PyCoreNode(PyCoreObj):
|
||||
"""
|
||||
Base class for CORE nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
Create a PyCoreNode instance.
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param int objid: object id
|
||||
:param str name: object name
|
||||
:param bool start: boolean for starting
|
||||
"""
|
||||
super(PyCoreNode, self).__init__(session, objid, name, start=start)
|
||||
self.services = []
|
||||
self.nodedir = None
|
||||
self.tmpnodedir = False
|
||||
|
||||
def addservice(self, service):
|
||||
"""
|
||||
Add a services to the service list.
|
||||
|
||||
:param core.service.CoreService service: service to add
|
||||
:return: nothing
|
||||
"""
|
||||
if service is not None:
|
||||
self.services.append(service)
|
||||
|
||||
def makenodedir(self):
|
||||
"""
|
||||
Create the node directory.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if self.nodedir is None:
|
||||
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
|
||||
os.makedirs(self.nodedir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
self.tmpnodedir = False
|
||||
|
||||
def rmnodedir(self):
|
||||
"""
|
||||
Remove the node directory, unless preserve directory has been set.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
preserve = self.session.options.get_config("preservedir") == "1"
|
||||
if preserve:
|
||||
return
|
||||
|
||||
if self.tmpnodedir:
|
||||
shutil.rmtree(self.nodedir, ignore_errors=True)
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
"""
|
||||
Add network interface to node and set the network interface index if successful.
|
||||
|
||||
:param PyCoreNetIf netif: network interface to add
|
||||
:param int ifindex: interface index
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex in self._netif:
|
||||
raise ValueError("ifindex %s already exists" % ifindex)
|
||||
self._netif[ifindex] = netif
|
||||
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
|
||||
netif.netindex = ifindex
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
"""
|
||||
Delete a network interface
|
||||
|
||||
:param int ifindex: interface index to delete
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
netif = self._netif.pop(ifindex)
|
||||
netif.shutdown()
|
||||
del netif
|
||||
|
||||
# TODO: net parameter is not used, remove
|
||||
def netif(self, ifindex, net=None):
|
||||
"""
|
||||
Retrieve network interface.
|
||||
|
||||
:param int ifindex: index of interface to retrieve
|
||||
:param PyCoreNetIf net: network node
|
||||
:return: network interface, or None if not found
|
||||
:rtype: PyCoreNetIf
|
||||
"""
|
||||
if ifindex in self._netif:
|
||||
return self._netif[ifindex]
|
||||
else:
|
||||
return None
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
"""
|
||||
Attach a network.
|
||||
|
||||
:param int ifindex: interface of index to attach
|
||||
:param PyCoreNetIf net: network to attach
|
||||
:return:
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
"""
|
||||
Detach network interface.
|
||||
|
||||
:param int ifindex: interface index to detach
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Set position.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:param z: z position
|
||||
:return: nothing
|
||||
"""
|
||||
changed = super(PyCoreNode, self).setposition(x, y, z)
|
||||
if changed:
|
||||
for netif in self.netifs(sort=True):
|
||||
netif.setposition(x, y, z)
|
||||
|
||||
def commonnets(self, obj, want_ctrl=False):
|
||||
"""
|
||||
Given another node or net object, return common networks between
|
||||
this node and that object. A list of tuples is returned, with each tuple
|
||||
consisting of (network, interface1, interface2).
|
||||
|
||||
:param obj: object to get common network with
|
||||
:param want_ctrl: flag set to determine if control network are wanted
|
||||
:return: tuples of common networks
|
||||
:rtype: list
|
||||
"""
|
||||
common = []
|
||||
for netif1 in self.netifs():
|
||||
if not want_ctrl and hasattr(netif1, "control"):
|
||||
continue
|
||||
for netif2 in obj.netifs():
|
||||
if netif1.net == netif2.net:
|
||||
common.append((netif1.net, netif1, netif2))
|
||||
|
||||
return common
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def termcmdstring(self, sh):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
||||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class PyCoreNet(PyCoreObj):
|
||||
"""
|
||||
Base class for networks
|
||||
"""
|
||||
linktype = LinkTypes.WIRED.value
|
||||
|
||||
def __init__(self, session, objid, name, start=True):
|
||||
"""
|
||||
Create a PyCoreNet instance.
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param int objid: object id
|
||||
:param str name: object name
|
||||
:param bool start: should object start
|
||||
"""
|
||||
super(PyCoreNet, self).__init__(session, objid, name, start=start)
|
||||
self._linked = {}
|
||||
self._linked_lock = threading.Lock()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Each object implements its own startup method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Each object implements its own shutdown method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def attach(self, netif):
|
||||
"""
|
||||
Attach network interface.
|
||||
|
||||
:param PyCoreNetIf netif: network interface to attach
|
||||
:return: nothing
|
||||
"""
|
||||
i = self.newifindex()
|
||||
self._netif[i] = netif
|
||||
netif.netifi = i
|
||||
with self._linked_lock:
|
||||
self._linked[netif] = {}
|
||||
|
||||
def detach(self, netif):
|
||||
"""
|
||||
Detach network interface.
|
||||
|
||||
:param PyCoreNetIf netif: network interface to detach
|
||||
:return: nothing
|
||||
"""
|
||||
del self._netif[netif.netifi]
|
||||
netif.netifi = None
|
||||
with self._linked_lock:
|
||||
del self._linked[netif]
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build link data objects for this network. Each link object describes a link
|
||||
between this network and a node.
|
||||
"""
|
||||
all_links = []
|
||||
|
||||
# build a link message from this network node to each node having a
|
||||
# connected interface
|
||||
for netif in self.netifs(sort=True):
|
||||
if not hasattr(netif, "node"):
|
||||
continue
|
||||
otherobj = netif.node
|
||||
uni = False
|
||||
if otherobj is None:
|
||||
# two layer-2 switches/hubs linked together via linknet()
|
||||
if not hasattr(netif, "othernet"):
|
||||
continue
|
||||
otherobj = netif.othernet
|
||||
if otherobj.objid == self.objid:
|
||||
continue
|
||||
netif.swapparams('_params_up')
|
||||
upstream_params = netif.getparams()
|
||||
netif.swapparams('_params_up')
|
||||
if netif.getparams() != upstream_params:
|
||||
uni = True
|
||||
|
||||
unidirectional = 0
|
||||
if uni:
|
||||
unidirectional = 1
|
||||
|
||||
interface2_ip4 = None
|
||||
interface2_ip4_mask = None
|
||||
interface2_ip6 = None
|
||||
interface2_ip6_mask = None
|
||||
for address in netif.addrlist:
|
||||
ip, _sep, mask = address.partition("/")
|
||||
mask = int(mask)
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip6_mask = mask
|
||||
|
||||
link_data = LinkData(
|
||||
message_type=flags,
|
||||
node1_id=self.objid,
|
||||
node2_id=otherobj.objid,
|
||||
link_type=self.linktype,
|
||||
unidirectional=unidirectional,
|
||||
interface2_id=otherobj.getifindex(netif),
|
||||
interface2_mac=netif.hwaddr,
|
||||
interface2_ip4=interface2_ip4,
|
||||
interface2_ip4_mask=interface2_ip4_mask,
|
||||
interface2_ip6=interface2_ip6,
|
||||
interface2_ip6_mask=interface2_ip6_mask,
|
||||
delay=netif.getparam("delay"),
|
||||
bandwidth=netif.getparam("bw"),
|
||||
dup=netif.getparam("duplicate"),
|
||||
jitter=netif.getparam("jitter"),
|
||||
per=netif.getparam("loss")
|
||||
)
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
if not uni:
|
||||
continue
|
||||
|
||||
netif.swapparams('_params_up')
|
||||
link_data = LinkData(
|
||||
message_type=0,
|
||||
node1_id=otherobj.objid,
|
||||
node2_id=self.objid,
|
||||
unidirectional=1,
|
||||
delay=netif.getparam("delay"),
|
||||
bandwidth=netif.getparam("bw"),
|
||||
dup=netif.getparam("duplicate"),
|
||||
jitter=netif.getparam("jitter"),
|
||||
per=netif.getparam("loss")
|
||||
)
|
||||
netif.swapparams('_params_up')
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class PyCoreNetIf(object):
|
||||
"""
|
||||
Base class for network interfaces.
|
||||
"""
|
||||
|
||||
def __init__(self, node, name, mtu):
|
||||
"""
|
||||
Creates a PyCoreNetIf instance.
|
||||
|
||||
:param core.coreobj.PyCoreNode node: node for interface
|
||||
:param str name: interface name
|
||||
:param mtu: mtu value
|
||||
"""
|
||||
|
||||
self.node = node
|
||||
self.name = name
|
||||
if not isinstance(mtu, (int, long)):
|
||||
raise ValueError
|
||||
self.mtu = mtu
|
||||
self.net = None
|
||||
self._params = {}
|
||||
self.addrlist = []
|
||||
self.hwaddr = None
|
||||
# placeholder position hook
|
||||
self.poshook = lambda a, b, c, d: None
|
||||
# used with EMANE
|
||||
self.transport_type = None
|
||||
# interface index on the network
|
||||
self.netindex = None
|
||||
# index used to find flow data
|
||||
self.flow_id = None
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Startup method for the interface.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown method for the interface.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
def attachnet(self, net):
|
||||
"""
|
||||
Attach network.
|
||||
|
||||
:param core.coreobj.PyCoreNet net: network to attach
|
||||
:return: nothing
|
||||
"""
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
self.net = None
|
||||
|
||||
net.attach(self)
|
||||
self.net = net
|
||||
|
||||
def detachnet(self):
|
||||
"""
|
||||
Detach from a network.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if self.net is not None:
|
||||
self.net.detach(self)
|
||||
|
||||
def addaddr(self, addr):
|
||||
"""
|
||||
Add address.
|
||||
|
||||
:param str addr: address to add
|
||||
:return: nothing
|
||||
"""
|
||||
|
||||
self.addrlist.append(addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
"""
|
||||
Delete address.
|
||||
|
||||
:param str addr: address to delete
|
||||
:return: nothing
|
||||
"""
|
||||
self.addrlist.remove(addr)
|
||||
|
||||
def sethwaddr(self, addr):
|
||||
"""
|
||||
Set hardware address.
|
||||
|
||||
:param core.misc.ipaddress.MacAddress addr: hardware address to set to.
|
||||
:return: nothing
|
||||
"""
|
||||
self.hwaddr = addr
|
||||
|
||||
def getparam(self, key):
|
||||
"""
|
||||
Retrieve a parameter from the, or None if the parameter does not exist.
|
||||
|
||||
:param key: parameter to get value for
|
||||
:return: parameter value
|
||||
"""
|
||||
return self._params.get(key)
|
||||
|
||||
def getparams(self):
|
||||
"""
|
||||
Return (key, value) pairs for parameters.
|
||||
"""
|
||||
parameters = []
|
||||
for k in sorted(self._params.keys()):
|
||||
parameters.append((k, self._params[k]))
|
||||
return parameters
|
||||
|
||||
def setparam(self, key, value):
|
||||
"""
|
||||
Set a parameter value, returns True if the parameter has changed.
|
||||
|
||||
:param key: parameter name to set
|
||||
:param value: parameter value
|
||||
:return: True if parameter changed, False otherwise
|
||||
"""
|
||||
# treat None and 0 as unchanged values
|
||||
current_value = self._params.get(key)
|
||||
if current_value == value or current_value <= 0 and value <= 0:
|
||||
return False
|
||||
|
||||
self._params[key] = value
|
||||
return True
|
||||
|
||||
def swapparams(self, name):
|
||||
"""
|
||||
Swap out parameters dict for name. If name does not exist,
|
||||
intialize it. This is for supporting separate upstream/downstream
|
||||
parameters when two layer-2 nodes are linked together.
|
||||
|
||||
:param str name: name of parameter to swap
|
||||
:return: nothing
|
||||
"""
|
||||
tmp = self._params
|
||||
if not hasattr(self, name):
|
||||
setattr(self, name, {})
|
||||
self._params = getattr(self, name)
|
||||
setattr(self, name, tmp)
|
||||
|
||||
def setposition(self, x, y, z):
|
||||
"""
|
||||
Dispatch position hook handler.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:param z: z position
|
||||
:return: nothing
|
||||
"""
|
||||
self.poshook(self, x, y, z)
|
||||
30
daemon/core/coreserver.py
Normal file
30
daemon/core/coreserver.py
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
"""
|
||||
Defines core server for handling TCP connections.
|
||||
"""
|
||||
|
||||
import SocketServer
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
|
||||
|
||||
class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
|
||||
"""
|
||||
TCP server class, manages sessions and spawns request handlers for
|
||||
incoming connections.
|
||||
"""
|
||||
daemon_threads = True
|
||||
allow_reuse_address = True
|
||||
|
||||
def __init__(self, server_address, handler_class, config=None):
|
||||
"""
|
||||
Server class initialization takes configuration data and calls
|
||||
the SocketServer constructor
|
||||
|
||||
:param tuple[str, int] server_address: server host and port to use
|
||||
:param class handler_class: request handler
|
||||
:param dict config: configuration setting
|
||||
:return:
|
||||
"""
|
||||
self.coreemu = CoreEmu(config)
|
||||
self.config = config
|
||||
SocketServer.TCPServer.__init__(self, server_address, handler_class)
|
||||
120
daemon/core/data.py
Normal file
120
daemon/core/data.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
"""
|
||||
CORE data objects.
|
||||
"""
|
||||
|
||||
import collections
|
||||
|
||||
ConfigData = collections.namedtuple("ConfigData", [
|
||||
"message_type",
|
||||
"node",
|
||||
"object",
|
||||
"type",
|
||||
"data_types",
|
||||
"data_values",
|
||||
"captions",
|
||||
"bitmap",
|
||||
"possible_values",
|
||||
"groups",
|
||||
"session",
|
||||
"interface_number",
|
||||
"network_id",
|
||||
"opaque"
|
||||
])
|
||||
ConfigData.__new__.__defaults__ = (None,) * len(ConfigData._fields)
|
||||
|
||||
EventData = collections.namedtuple("EventData", [
|
||||
"node",
|
||||
"event_type",
|
||||
"name",
|
||||
"data",
|
||||
"time",
|
||||
"session"
|
||||
])
|
||||
EventData.__new__.__defaults__ = (None,) * len(EventData._fields)
|
||||
|
||||
ExceptionData = collections.namedtuple("ExceptionData", [
|
||||
"node",
|
||||
"session",
|
||||
"level",
|
||||
"source",
|
||||
"date",
|
||||
"text",
|
||||
"opaque"
|
||||
])
|
||||
ExceptionData.__new__.__defaults__ = (None,) * len(ExceptionData._fields)
|
||||
|
||||
FileData = collections.namedtuple("FileData", [
|
||||
"message_type",
|
||||
"node",
|
||||
"name",
|
||||
"mode",
|
||||
"number",
|
||||
"type",
|
||||
"source",
|
||||
"session",
|
||||
"data",
|
||||
"compressed_data"
|
||||
])
|
||||
FileData.__new__.__defaults__ = (None,) * len(FileData._fields)
|
||||
|
||||
NodeData = collections.namedtuple("NodeData", [
|
||||
"message_type",
|
||||
"id",
|
||||
"node_type",
|
||||
"name",
|
||||
"ip_address",
|
||||
"mac_address",
|
||||
"ip6_address",
|
||||
"model",
|
||||
"emulation_id",
|
||||
"emulation_server",
|
||||
"session",
|
||||
"x_position",
|
||||
"y_position",
|
||||
"canvas",
|
||||
"network_id",
|
||||
"services",
|
||||
"latitude",
|
||||
"longitude",
|
||||
"altitude",
|
||||
"icon",
|
||||
"opaque"
|
||||
])
|
||||
NodeData.__new__.__defaults__ = (None,) * len(NodeData._fields)
|
||||
|
||||
LinkData = collections.namedtuple("LinkData", [
|
||||
"message_type",
|
||||
"node1_id",
|
||||
"node2_id",
|
||||
"delay",
|
||||
"bandwidth",
|
||||
"per",
|
||||
"dup",
|
||||
"jitter",
|
||||
"mer",
|
||||
"burst",
|
||||
"session",
|
||||
"mburst",
|
||||
"link_type",
|
||||
"gui_attributes",
|
||||
"unidirectional",
|
||||
"emulation_id",
|
||||
"network_id",
|
||||
"key",
|
||||
"interface1_id",
|
||||
"interface1_name",
|
||||
"interface1_ip4",
|
||||
"interface1_ip4_mask",
|
||||
"interface1_mac",
|
||||
"interface1_ip6",
|
||||
"interface1_ip6_mask",
|
||||
"interface2_id",
|
||||
"interface2_name",
|
||||
"interface2_ip4",
|
||||
"interface2_ip4_mask",
|
||||
"interface2_mac",
|
||||
"interface2_ip6",
|
||||
"interface2_ip6_mask",
|
||||
"opaque"
|
||||
])
|
||||
LinkData.__new__.__defaults__ = (None,) * len(LinkData._fields)
|
||||
42
daemon/core/emane/bypass.py
Normal file
42
daemon/core/emane/bypass.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
"""
|
||||
EMANE Bypass model for CORE
|
||||
"""
|
||||
from core.conf import ConfigGroup
|
||||
from core.conf import Configuration
|
||||
from core.emane import emanemodel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
||||
|
||||
class EmaneBypassModel(emanemodel.EmaneModel):
|
||||
name = "emane_bypass"
|
||||
|
||||
# values to ignore, when writing xml files
|
||||
config_ignore = {"none"}
|
||||
|
||||
# mac definitions
|
||||
mac_library = "bypassmaclayer"
|
||||
mac_config = [
|
||||
Configuration(
|
||||
_id="none",
|
||||
_type=ConfigDataTypes.BOOL,
|
||||
default="0",
|
||||
options=["True", "False"],
|
||||
label="There are no parameters for the bypass model."
|
||||
)
|
||||
]
|
||||
|
||||
# phy definitions
|
||||
phy_library = "bypassphylayer"
|
||||
phy_config = []
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix):
|
||||
# ignore default logic
|
||||
pass
|
||||
|
||||
# override config groups
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
return [
|
||||
ConfigGroup("Bypass Parameters", 1, 1),
|
||||
]
|
||||
142
daemon/core/emane/commeffect.py
Normal file
142
daemon/core/emane/commeffect.py
Normal file
|
|
@ -0,0 +1,142 @@
|
|||
"""
|
||||
commeffect.py: EMANE CommEffect model for CORE
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core.conf import ConfigGroup
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
from core.xml import emanexml
|
||||
|
||||
try:
|
||||
from emane.events.commeffectevent import CommEffectEvent
|
||||
except ImportError:
|
||||
try:
|
||||
from emanesh.events.commeffectevent import CommEffectEvent
|
||||
except ImportError:
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
def convert_none(x):
|
||||
"""
|
||||
Helper to use 0 for None values.
|
||||
"""
|
||||
if isinstance(x, basestring):
|
||||
x = float(x)
|
||||
if x is None:
|
||||
return 0
|
||||
else:
|
||||
return int(x)
|
||||
|
||||
|
||||
class EmaneCommEffectModel(emanemodel.EmaneModel):
|
||||
name = "emane_commeffect"
|
||||
|
||||
shim_library = "commeffectshim"
|
||||
shim_xml = "commeffectshim.xml"
|
||||
shim_defaults = {}
|
||||
config_shim = []
|
||||
|
||||
# comm effect does not need the default phy and external configurations
|
||||
phy_config = []
|
||||
external_config = []
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix):
|
||||
shim_xml_path = os.path.join(emane_prefix, "share/emane/manifest", cls.shim_xml)
|
||||
cls.config_shim = emanemanifest.parse(shim_xml_path, cls.shim_defaults)
|
||||
|
||||
@classmethod
|
||||
def configurations(cls):
|
||||
return cls.config_shim
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls):
|
||||
return [
|
||||
ConfigGroup("CommEffect SHIM Parameters", 1, len(cls.configurations()))
|
||||
]
|
||||
|
||||
def build_xml_files(self, config, interface=None):
|
||||
"""
|
||||
Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
|
||||
:param dict config: emane model configuration for the node and interface
|
||||
:param interface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
# retrieve xml names
|
||||
nem_name = emanexml.nem_file_name(self, interface)
|
||||
shim_name = emanexml.shim_file_name(self, interface)
|
||||
|
||||
# create and write nem document
|
||||
nem_element = etree.Element("nem", name="%s NEM" % self.name, type="unstructured")
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
transport_file = emanexml.transport_file_name(self.object_id, transport_type)
|
||||
etree.SubElement(nem_element, "transport", definition=transport_file)
|
||||
|
||||
# set shim configuration
|
||||
etree.SubElement(nem_element, "shim", definition=shim_name)
|
||||
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_file(nem_element, "nem", nem_file)
|
||||
|
||||
# create and write shim document
|
||||
shim_element = etree.Element("shim", name="%s SHIM" % self.name, library=self.shim_library)
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
for configuration in self.config_shim:
|
||||
name = configuration.id
|
||||
if name == "filterfile":
|
||||
continue
|
||||
value = config[name]
|
||||
emanexml.add_param(shim_element, name, value)
|
||||
|
||||
# empty filterfile is not allowed
|
||||
ff = config["filterfile"]
|
||||
if ff.strip() != "":
|
||||
emanexml.add_param(shim_element, "filterfile", ff)
|
||||
|
||||
shim_file = os.path.join(self.session.session_dir, shim_name)
|
||||
emanexml.create_file(shim_element, "shim", shim_file)
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
Generate CommEffect events when a Link Message is received having
|
||||
link parameters.
|
||||
"""
|
||||
service = self.session.emane.service
|
||||
if service is None:
|
||||
logging.warn("%s: EMANE event service unavailable", self.name)
|
||||
return
|
||||
|
||||
if netif is None or netif2 is None:
|
||||
logging.warn("%s: missing NEM information", self.name)
|
||||
return
|
||||
|
||||
# TODO: batch these into multiple events per transmission
|
||||
# TODO: may want to split out seconds portion of delay and jitter
|
||||
event = CommEffectEvent()
|
||||
emane_node = self.session.get_object(self.object_id)
|
||||
nemid = emane_node.getnemid(netif)
|
||||
nemid2 = emane_node.getnemid(netif2)
|
||||
mbw = bw
|
||||
logging.info("sending comm effect event")
|
||||
event.append(
|
||||
nemid,
|
||||
latency=convert_none(delay),
|
||||
jitter=convert_none(jitter),
|
||||
loss=convert_none(loss),
|
||||
duplicate=convert_none(duplicate),
|
||||
unicast=long(convert_none(bw)),
|
||||
broadcast=long(convert_none(mbw))
|
||||
)
|
||||
service.publish(nemid2, event)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -1,10 +1,7 @@
|
|||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from core.config import Configuration
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from core.conf import Configuration
|
||||
from core.enumerations import ConfigDataTypes
|
||||
|
||||
manifest = None
|
||||
try:
|
||||
|
|
@ -13,16 +10,15 @@ except ImportError:
|
|||
try:
|
||||
from emanesh import manifest
|
||||
except ImportError:
|
||||
manifest = None
|
||||
logger.debug("compatible emane python bindings not installed")
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
def _type_value(config_type: str) -> ConfigDataTypes:
|
||||
def _type_value(config_type):
|
||||
"""
|
||||
Convert emane configuration type to core configuration value.
|
||||
|
||||
:param config_type: emane configuration type
|
||||
:return: core config type
|
||||
:param str config_type: emane configuration type
|
||||
:return:
|
||||
"""
|
||||
config_type = config_type.upper()
|
||||
if config_type == "DOUBLE":
|
||||
|
|
@ -32,13 +28,14 @@ def _type_value(config_type: str) -> ConfigDataTypes:
|
|||
return ConfigDataTypes[config_type]
|
||||
|
||||
|
||||
def _get_possible(config_type: str, config_regex: str) -> list[str]:
|
||||
def _get_possible(config_type, config_regex):
|
||||
"""
|
||||
Retrieve possible config value options based on emane regexes.
|
||||
|
||||
:param config_type: emane configuration type
|
||||
:param config_regex: emane configuration regex
|
||||
:param str config_type: emane configuration type
|
||||
:param str config_regex: emane configuration regex
|
||||
:return: a string listing comma delimited values, if needed, empty string otherwise
|
||||
:rtype: list
|
||||
"""
|
||||
if config_type == "bool":
|
||||
return ["On", "Off"]
|
||||
|
|
@ -50,14 +47,16 @@ def _get_possible(config_type: str, config_regex: str) -> list[str]:
|
|||
return []
|
||||
|
||||
|
||||
def _get_default(config_type_name: str, config_value: list[str]) -> str:
|
||||
def _get_default(config_type_name, config_value):
|
||||
"""
|
||||
Convert default configuration values to one used by core.
|
||||
|
||||
:param config_type_name: emane configuration type name
|
||||
:param config_value: emane configuration value list
|
||||
:param str config_type_name: emane configuration type name
|
||||
:param list config_value: emane configuration value list
|
||||
:return: default core config value
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
config_default = ""
|
||||
|
||||
if config_type_name == "bool":
|
||||
|
|
@ -73,14 +72,14 @@ def _get_default(config_type_name: str, config_value: list[str]) -> str:
|
|||
return config_default
|
||||
|
||||
|
||||
def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]:
|
||||
def parse(manifest_path, defaults):
|
||||
"""
|
||||
Parses a valid emane manifest file and converts the provided configuration values
|
||||
into ones used by core.
|
||||
Parses a valid emane manifest file and converts the provided configuration values into ones used by core.
|
||||
|
||||
:param manifest_path: absolute manifest file path
|
||||
:param defaults: used to override default values for configurations
|
||||
:param str manifest_path: absolute manifest file path
|
||||
:param dict defaults: used to override default values for configurations
|
||||
:return: list of core configuration values
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
# no results when emane bindings are not present
|
||||
|
|
@ -88,7 +87,7 @@ def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]:
|
|||
return []
|
||||
|
||||
# load configuration file
|
||||
manifest_file = manifest.Manifest(str(manifest_path))
|
||||
manifest_file = manifest.Manifest(manifest_path)
|
||||
manifest_configurations = manifest_file.getAllConfiguration()
|
||||
|
||||
configurations = []
|
||||
|
|
@ -116,14 +115,14 @@ def parse(manifest_path: Path, defaults: dict[str, str]) -> list[Configuration]:
|
|||
# define description and account for gui quirks
|
||||
config_descriptions = config_name
|
||||
if config_name.endswith("uri"):
|
||||
config_descriptions = f"{config_descriptions} file"
|
||||
config_descriptions = "%s file" % config_descriptions
|
||||
|
||||
configuration = Configuration(
|
||||
id=config_name,
|
||||
type=config_type_value,
|
||||
_id=config_name,
|
||||
_type=config_type_value,
|
||||
default=config_default,
|
||||
options=possible,
|
||||
label=config_descriptions,
|
||||
label=config_descriptions
|
||||
)
|
||||
configurations.append(configuration)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,21 +2,15 @@
|
|||
Defines Emane Models used within CORE.
|
||||
"""
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import os
|
||||
|
||||
from core.config import ConfigBool, ConfigGroup, ConfigString, Configuration
|
||||
from core.conf import ConfigGroup
|
||||
from core.conf import Configuration
|
||||
from core.emane import emanemanifest
|
||||
from core.emulator.data import LinkOptions
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import WirelessModel
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.mobility import WirelessModel
|
||||
from core.xml import emanexml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
DEFAULT_DEV: str = "ctrl0"
|
||||
MANIFEST_PATH: str = "share/emane/manifest"
|
||||
|
||||
|
||||
class EmaneModel(WirelessModel):
|
||||
"""
|
||||
|
|
@ -24,151 +18,142 @@ class EmaneModel(WirelessModel):
|
|||
handling configuration messages based on the list of
|
||||
configurable parameters. Helper functions also live here.
|
||||
"""
|
||||
|
||||
# default platform configuration settings
|
||||
platform_controlport: str = "controlportendpoint"
|
||||
platform_xml: str = "nemmanager.xml"
|
||||
platform_defaults: dict[str, str] = {
|
||||
"eventservicedevice": DEFAULT_DEV,
|
||||
"eventservicegroup": "224.1.2.8:45703",
|
||||
"otamanagerdevice": DEFAULT_DEV,
|
||||
"otamanagergroup": "224.1.2.8:45702",
|
||||
}
|
||||
platform_config: list[Configuration] = []
|
||||
|
||||
# default mac configuration settings
|
||||
mac_library: Optional[str] = None
|
||||
mac_xml: Optional[str] = None
|
||||
mac_defaults: dict[str, str] = {}
|
||||
mac_config: list[Configuration] = []
|
||||
mac_library = None
|
||||
mac_xml = None
|
||||
mac_defaults = {}
|
||||
mac_config = []
|
||||
|
||||
# default phy configuration settings, using the universal model
|
||||
phy_library: Optional[str] = None
|
||||
phy_xml: str = "emanephy.xml"
|
||||
phy_defaults: dict[str, str] = {
|
||||
phy_library = None
|
||||
phy_xml = "emanephy.xml"
|
||||
phy_defaults = {
|
||||
"subid": "1",
|
||||
"propagationmodel": "2ray",
|
||||
"noisemode": "none",
|
||||
"noisemode": "none"
|
||||
}
|
||||
phy_config: list[Configuration] = []
|
||||
phy_config = []
|
||||
|
||||
# support for external configurations
|
||||
external_config: list[Configuration] = [
|
||||
ConfigBool(id="external", default="0"),
|
||||
ConfigString(id="platformendpoint", default="127.0.0.1:40001"),
|
||||
ConfigString(id="transportendpoint", default="127.0.0.1:50002"),
|
||||
external_config = [
|
||||
Configuration("external", ConfigDataTypes.BOOL, default="0"),
|
||||
Configuration("platformendpoint", ConfigDataTypes.STRING, default="127.0.0.1:40001"),
|
||||
Configuration("transportendpoint", ConfigDataTypes.STRING, default="127.0.0.1:50002")
|
||||
]
|
||||
|
||||
config_ignore: set[str] = set()
|
||||
config_ignore = set()
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: Path) -> None:
|
||||
def load(cls, emane_prefix):
|
||||
"""
|
||||
Called after being loaded within the EmaneManager. Provides configured
|
||||
emane_prefix for parsing xml files.
|
||||
Called after being loaded within the EmaneManager. Provides configured emane_prefix for
|
||||
parsing xml files.
|
||||
|
||||
:param emane_prefix: configured emane prefix path
|
||||
:param str emane_prefix: configured emane prefix path
|
||||
:return: nothing
|
||||
"""
|
||||
cls._load_platform_config(emane_prefix)
|
||||
manifest_path = "share/emane/manifest"
|
||||
# load mac configuration
|
||||
mac_xml_path = emane_prefix / MANIFEST_PATH / cls.mac_xml
|
||||
mac_xml_path = os.path.join(emane_prefix, manifest_path, cls.mac_xml)
|
||||
cls.mac_config = emanemanifest.parse(mac_xml_path, cls.mac_defaults)
|
||||
|
||||
# load phy configuration
|
||||
phy_xml_path = emane_prefix / MANIFEST_PATH / cls.phy_xml
|
||||
phy_xml_path = os.path.join(emane_prefix, manifest_path, cls.phy_xml)
|
||||
cls.phy_config = emanemanifest.parse(phy_xml_path, cls.phy_defaults)
|
||||
|
||||
@classmethod
|
||||
def _load_platform_config(cls, emane_prefix: Path) -> None:
|
||||
platform_xml_path = emane_prefix / MANIFEST_PATH / cls.platform_xml
|
||||
cls.platform_config = emanemanifest.parse(
|
||||
platform_xml_path, cls.platform_defaults
|
||||
)
|
||||
# remove controlport configuration, since core will set this directly
|
||||
controlport_index = None
|
||||
for index, configuration in enumerate(cls.platform_config):
|
||||
if configuration.id == cls.platform_controlport:
|
||||
controlport_index = index
|
||||
break
|
||||
if controlport_index is not None:
|
||||
cls.platform_config.pop(controlport_index)
|
||||
|
||||
@classmethod
|
||||
def configurations(cls) -> list[Configuration]:
|
||||
def configurations(cls):
|
||||
"""
|
||||
Returns the combination all all configurations (mac, phy, and external).
|
||||
|
||||
:return: all configurations
|
||||
:rtype: list[Configuration]
|
||||
"""
|
||||
return (
|
||||
cls.platform_config + cls.mac_config + cls.phy_config + cls.external_config
|
||||
)
|
||||
return cls.mac_config + cls.phy_config + cls.external_config
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls) -> list[ConfigGroup]:
|
||||
def config_groups(cls):
|
||||
"""
|
||||
Returns the defined configuration groups.
|
||||
|
||||
:return: list of configuration groups.
|
||||
:rtype: list[ConfigGroup]
|
||||
"""
|
||||
platform_len = len(cls.platform_config)
|
||||
mac_len = len(cls.mac_config) + platform_len
|
||||
mac_len = len(cls.mac_config)
|
||||
phy_len = len(cls.phy_config) + mac_len
|
||||
config_len = len(cls.configurations())
|
||||
return [
|
||||
ConfigGroup("Platform Parameters", 1, platform_len),
|
||||
ConfigGroup("MAC Parameters", platform_len + 1, mac_len),
|
||||
ConfigGroup("MAC Parameters", 1, mac_len),
|
||||
ConfigGroup("PHY Parameters", mac_len + 1, phy_len),
|
||||
ConfigGroup("External Parameters", phy_len + 1, config_len),
|
||||
ConfigGroup("External Parameters", phy_len + 1, config_len)
|
||||
]
|
||||
|
||||
def build_xml_files(self, config: dict[str, str], iface: CoreInterface) -> None:
|
||||
def build_xml_files(self, config, interface=None):
|
||||
"""
|
||||
Builds xml files for this emane model. Creates a nem.xml file that points to
|
||||
both mac.xml and phy.xml definitions.
|
||||
Builds xml files for this emane model. Creates a nem.xml file that points to both mac.xml and phy.xml
|
||||
definitions.
|
||||
|
||||
:param config: emane model configuration for the node and interface
|
||||
:param iface: interface to run emane for
|
||||
:param dict config: emane model configuration for the node and interface
|
||||
:param interface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
# create nem, mac, and phy xml files
|
||||
emanexml.create_nem_xml(self, iface, config)
|
||||
emanexml.create_mac_xml(self, iface, config)
|
||||
emanexml.create_phy_xml(self, iface, config)
|
||||
emanexml.create_transport_xml(iface, config)
|
||||
nem_name = emanexml.nem_file_name(self, interface)
|
||||
mac_name = emanexml.mac_file_name(self, interface)
|
||||
phy_name = emanexml.phy_file_name(self, interface)
|
||||
|
||||
def post_startup(self, iface: CoreInterface) -> None:
|
||||
# check if this is external
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
transport_name = emanexml.transport_file_name(self.object_id, transport_type)
|
||||
|
||||
# create nem xml file
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_nem_xml(self, config, nem_file, transport_name, mac_name, phy_name)
|
||||
|
||||
# create mac xml file
|
||||
mac_file = os.path.join(self.session.session_dir, mac_name)
|
||||
emanexml.create_mac_xml(self, config, mac_file)
|
||||
|
||||
# create phy xml file
|
||||
phy_file = os.path.join(self.session.session_dir, phy_name)
|
||||
emanexml.create_phy_xml(self, config, phy_file)
|
||||
|
||||
def post_startup(self):
|
||||
"""
|
||||
Logic to execute after the emane manager is finished with startup.
|
||||
|
||||
:param iface: interface for post startup
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("emane model(%s) has no post setup tasks", self.name)
|
||||
logging.info("emane model(%s) has no post setup tasks", self.name)
|
||||
|
||||
def update(self, moved_ifaces: list[CoreInterface]) -> None:
|
||||
def update(self, moved, moved_netifs):
|
||||
"""
|
||||
Invoked from MobilityModel when nodes are moved; this causes
|
||||
emane location events to be generated for the nodes in the moved
|
||||
list, making EmaneModels compatible with Ns2ScriptedMobility.
|
||||
|
||||
:param moved_ifaces: interfaces that were moved
|
||||
:return: nothing
|
||||
:param bool moved: were nodes moved
|
||||
:param list moved_netifs: interfaces that were moved
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
self.session.emane.set_nem_positions(moved_ifaces)
|
||||
except CoreError:
|
||||
logger.exception("error during update")
|
||||
wlan = self.session.get_object(self.object_id)
|
||||
wlan.setnempositions(moved_netifs)
|
||||
except KeyError:
|
||||
logging.exception("error during update")
|
||||
|
||||
def linkconfig(
|
||||
self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None
|
||||
) -> None:
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
Invoked when a Link Message is received. Default is unimplemented.
|
||||
|
||||
:param iface: interface one
|
||||
:param options: options for configuring link
|
||||
:param iface2: interface two
|
||||
:param core.netns.vif.Veth netif: interface one
|
||||
:param bw: bandwidth to set to
|
||||
:param delay: packet delay to set to
|
||||
:param loss: packet loss to set to
|
||||
:param duplicate: duplicate percentage to set to
|
||||
:param jitter: jitter to set to
|
||||
:param core.netns.vif.Veth netif2: interface two
|
||||
:return: nothing
|
||||
"""
|
||||
logger.warning("emane model(%s) does not support link config", self.name)
|
||||
logging.warn("emane model(%s) does not support link configuration", self.name)
|
||||
|
|
|
|||
23
daemon/core/emane/ieee80211abg.py
Normal file
23
daemon/core/emane/ieee80211abg.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
"""
|
||||
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
|
||||
"""
|
||||
import os
|
||||
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
class EmaneIeee80211abgModel(emanemodel.EmaneModel):
|
||||
# model name
|
||||
name = "emane_ieee80211abg"
|
||||
|
||||
# mac configuration
|
||||
mac_library = "ieee80211abgmaclayer"
|
||||
mac_xml = "ieee80211abgmaclayer.xml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix):
|
||||
cls.mac_defaults["pcrcurveuri"] = os.path.join(
|
||||
emane_prefix,
|
||||
"share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml"
|
||||
)
|
||||
super(EmaneIeee80211abgModel, cls).load(emane_prefix)
|
||||
|
|
@ -1,328 +0,0 @@
|
|||
import logging
|
||||
import sched
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.data import LinkData
|
||||
from core.emulator.enumerations import LinkTypes, MessageFlags
|
||||
from core.nodes.network import CtrlNet
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from emane import shell
|
||||
except ImportError:
|
||||
try:
|
||||
from emanesh import shell
|
||||
except ImportError:
|
||||
shell = None
|
||||
logger.debug("compatible emane python bindings not installed")
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emane.emanemanager import EmaneManager
|
||||
|
||||
MAC_COMPONENT_INDEX: int = 1
|
||||
EMANE_RFPIPE: str = "rfpipemaclayer"
|
||||
EMANE_80211: str = "ieee80211abgmaclayer"
|
||||
EMANE_TDMA: str = "tdmaeventschedulerradiomodel"
|
||||
SINR_TABLE: str = "NeighborStatusTable"
|
||||
NEM_SELF: int = 65535
|
||||
|
||||
|
||||
class LossTable:
|
||||
def __init__(self, losses: dict[float, float]) -> None:
|
||||
self.losses: dict[float, float] = losses
|
||||
self.sinrs: list[float] = sorted(self.losses.keys())
|
||||
self.loss_lookup: dict[int, float] = {}
|
||||
for index, value in enumerate(self.sinrs):
|
||||
self.loss_lookup[index] = self.losses[value]
|
||||
self.mac_id: Optional[str] = None
|
||||
|
||||
def get_loss(self, sinr: float) -> float:
|
||||
index = self._get_index(sinr)
|
||||
loss = 100.0 - self.loss_lookup[index]
|
||||
return loss
|
||||
|
||||
def _get_index(self, current_sinr: float) -> int:
|
||||
for index, sinr in enumerate(self.sinrs):
|
||||
if current_sinr <= sinr:
|
||||
return index
|
||||
return len(self.sinrs) - 1
|
||||
|
||||
|
||||
class EmaneLink:
|
||||
def __init__(self, from_nem: int, to_nem: int, sinr: float) -> None:
|
||||
self.from_nem: int = from_nem
|
||||
self.to_nem: int = to_nem
|
||||
self.sinr: float = sinr
|
||||
self.last_seen: Optional[float] = None
|
||||
self.updated: bool = False
|
||||
self.touch()
|
||||
|
||||
def update(self, sinr: float) -> None:
|
||||
self.updated = self.sinr != sinr
|
||||
self.sinr = sinr
|
||||
self.touch()
|
||||
|
||||
def touch(self) -> None:
|
||||
self.last_seen = time.monotonic()
|
||||
|
||||
def is_dead(self, timeout: int) -> bool:
|
||||
return (time.monotonic() - self.last_seen) >= timeout
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"EmaneLink({self.from_nem}, {self.to_nem}, {self.sinr})"
|
||||
|
||||
|
||||
class EmaneClient:
|
||||
def __init__(self, address: str, port: int) -> None:
|
||||
self.address: str = address
|
||||
self.client: shell.ControlPortClient = shell.ControlPortClient(
|
||||
self.address, port
|
||||
)
|
||||
self.nems: dict[int, LossTable] = {}
|
||||
self.setup()
|
||||
|
||||
def setup(self) -> None:
|
||||
manifest = self.client.getManifest()
|
||||
for nem_id, components in manifest.items():
|
||||
# get mac config
|
||||
mac_id, _, emane_model = components[MAC_COMPONENT_INDEX]
|
||||
mac_config = self.client.getConfiguration(mac_id)
|
||||
logger.debug(
|
||||
"address(%s) nem(%s) emane(%s)", self.address, nem_id, emane_model
|
||||
)
|
||||
|
||||
# create loss table based on current configuration
|
||||
if emane_model == EMANE_80211:
|
||||
loss_table = self.handle_80211(mac_config)
|
||||
elif emane_model == EMANE_RFPIPE:
|
||||
loss_table = self.handle_rfpipe(mac_config)
|
||||
else:
|
||||
logger.warning("unknown emane link model: %s", emane_model)
|
||||
continue
|
||||
logger.info("monitoring links nem(%s) model(%s)", nem_id, emane_model)
|
||||
loss_table.mac_id = mac_id
|
||||
self.nems[nem_id] = loss_table
|
||||
|
||||
def check_links(
|
||||
self, links: dict[tuple[int, int], EmaneLink], loss_threshold: int
|
||||
) -> None:
|
||||
for from_nem, loss_table in self.nems.items():
|
||||
tables = self.client.getStatisticTable(loss_table.mac_id, (SINR_TABLE,))
|
||||
table = tables[SINR_TABLE][1:][0]
|
||||
for row in table:
|
||||
row = row
|
||||
to_nem = row[0][0]
|
||||
sinr = row[5][0]
|
||||
age = row[-1][0]
|
||||
|
||||
# exclude invalid links
|
||||
is_self = to_nem == NEM_SELF
|
||||
has_valid_age = 0 <= age <= 1
|
||||
if is_self or not has_valid_age:
|
||||
continue
|
||||
|
||||
# check if valid link loss
|
||||
link_key = (from_nem, to_nem)
|
||||
loss = loss_table.get_loss(sinr)
|
||||
if loss < loss_threshold:
|
||||
link = links.get(link_key)
|
||||
if link:
|
||||
link.update(sinr)
|
||||
else:
|
||||
link = EmaneLink(from_nem, to_nem, sinr)
|
||||
links[link_key] = link
|
||||
|
||||
def handle_tdma(self, config: dict[str, tuple]):
|
||||
pcr = config["pcrcurveuri"][0][0]
|
||||
logger.debug("tdma pcr: %s", pcr)
|
||||
|
||||
def handle_80211(self, config: dict[str, tuple]) -> LossTable:
|
||||
unicastrate = config["unicastrate"][0][0]
|
||||
pcr = config["pcrcurveuri"][0][0]
|
||||
logger.debug("80211 pcr: %s", pcr)
|
||||
tree = etree.parse(pcr)
|
||||
root = tree.getroot()
|
||||
table = root.find("table")
|
||||
losses = {}
|
||||
for rate in table.iter("datarate"):
|
||||
index = int(rate.get("index"))
|
||||
if index == unicastrate:
|
||||
for row in rate.iter("row"):
|
||||
sinr = float(row.get("sinr"))
|
||||
por = float(row.get("por"))
|
||||
losses[sinr] = por
|
||||
return LossTable(losses)
|
||||
|
||||
def handle_rfpipe(self, config: dict[str, tuple]) -> LossTable:
|
||||
pcr = config["pcrcurveuri"][0][0]
|
||||
logger.debug("rfpipe pcr: %s", pcr)
|
||||
tree = etree.parse(pcr)
|
||||
root = tree.getroot()
|
||||
table = root.find("table")
|
||||
losses = {}
|
||||
for row in table.iter("row"):
|
||||
sinr = float(row.get("sinr"))
|
||||
por = float(row.get("por"))
|
||||
losses[sinr] = por
|
||||
return LossTable(losses)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.client.stop()
|
||||
|
||||
|
||||
class EmaneLinkMonitor:
|
||||
def __init__(self, emane_manager: "EmaneManager") -> None:
|
||||
self.emane_manager: "EmaneManager" = emane_manager
|
||||
self.clients: list[EmaneClient] = []
|
||||
self.links: dict[tuple[int, int], EmaneLink] = {}
|
||||
self.complete_links: set[tuple[int, int]] = set()
|
||||
self.loss_threshold: Optional[int] = None
|
||||
self.link_interval: Optional[int] = None
|
||||
self.link_timeout: Optional[int] = None
|
||||
self.scheduler: Optional[sched.scheduler] = None
|
||||
self.running: bool = False
|
||||
|
||||
def start(self) -> None:
|
||||
options = self.emane_manager.session.options
|
||||
self.loss_threshold = options.get_int("loss_threshold")
|
||||
self.link_interval = options.get_int("link_interval")
|
||||
self.link_timeout = options.get_int("link_timeout")
|
||||
self.initialize()
|
||||
if not self.clients:
|
||||
logger.info("no valid emane models to monitor links")
|
||||
return
|
||||
self.scheduler = sched.scheduler()
|
||||
self.scheduler.enter(0, 0, self.check_links)
|
||||
self.running = True
|
||||
thread = threading.Thread(target=self.scheduler.run, daemon=True)
|
||||
thread.start()
|
||||
|
||||
def initialize(self) -> None:
|
||||
addresses = self.get_addresses()
|
||||
for address, port in addresses:
|
||||
client = EmaneClient(address, port)
|
||||
if client.nems:
|
||||
self.clients.append(client)
|
||||
|
||||
def get_addresses(self) -> list[tuple[str, int]]:
|
||||
addresses = []
|
||||
nodes = self.emane_manager.getnodes()
|
||||
for node in nodes:
|
||||
control = None
|
||||
ports = []
|
||||
for iface in node.get_ifaces():
|
||||
if isinstance(iface.net, CtrlNet):
|
||||
ip4 = iface.get_ip4()
|
||||
if ip4:
|
||||
control = str(ip4.ip)
|
||||
if isinstance(iface.net, EmaneNet):
|
||||
port = self.emane_manager.get_nem_port(iface)
|
||||
ports.append(port)
|
||||
if control:
|
||||
for port in ports:
|
||||
addresses.append((control, port))
|
||||
return addresses
|
||||
|
||||
def check_links(self) -> None:
|
||||
# check for new links
|
||||
previous_links = set(self.links.keys())
|
||||
for client in self.clients:
|
||||
try:
|
||||
client.check_links(self.links, self.loss_threshold)
|
||||
except shell.ControlPortException:
|
||||
if self.running:
|
||||
logger.exception("link monitor error")
|
||||
|
||||
# find new links
|
||||
current_links = set(self.links.keys())
|
||||
new_links = current_links - previous_links
|
||||
|
||||
# find updated and dead links
|
||||
dead_links = []
|
||||
for link_id, link in self.links.items():
|
||||
complete_id = self.get_complete_id(link_id)
|
||||
if link.is_dead(self.link_timeout):
|
||||
dead_links.append(link_id)
|
||||
elif link.updated and complete_id in self.complete_links:
|
||||
link.updated = False
|
||||
self.send_link(MessageFlags.NONE, complete_id)
|
||||
|
||||
# announce dead links
|
||||
for link_id in dead_links:
|
||||
complete_id = self.get_complete_id(link_id)
|
||||
if complete_id in self.complete_links:
|
||||
self.complete_links.remove(complete_id)
|
||||
self.send_link(MessageFlags.DELETE, complete_id)
|
||||
del self.links[link_id]
|
||||
|
||||
# announce new links
|
||||
for link_id in new_links:
|
||||
complete_id = self.get_complete_id(link_id)
|
||||
if complete_id in self.complete_links:
|
||||
continue
|
||||
if self.is_complete_link(link_id):
|
||||
self.complete_links.add(complete_id)
|
||||
self.send_link(MessageFlags.ADD, complete_id)
|
||||
|
||||
if self.running:
|
||||
self.scheduler.enter(self.link_interval, 0, self.check_links)
|
||||
|
||||
def get_complete_id(self, link_id: tuple[int, int]) -> tuple[int, int]:
|
||||
value1, value2 = link_id
|
||||
if value1 < value2:
|
||||
return value1, value2
|
||||
else:
|
||||
return value2, value1
|
||||
|
||||
def is_complete_link(self, link_id: tuple[int, int]) -> bool:
|
||||
reverse_id = link_id[1], link_id[0]
|
||||
return link_id in self.links and reverse_id in self.links
|
||||
|
||||
def get_link_label(self, link_id: tuple[int, int]) -> str:
|
||||
source_id = tuple(sorted(link_id))
|
||||
source_link = self.links[source_id]
|
||||
dest_id = link_id[::-1]
|
||||
dest_link = self.links[dest_id]
|
||||
return f"{source_link.sinr:.1f} / {dest_link.sinr:.1f}"
|
||||
|
||||
def send_link(self, message_type: MessageFlags, link_id: tuple[int, int]) -> None:
|
||||
nem1, nem2 = link_id
|
||||
link = self.emane_manager.get_nem_link(nem1, nem2, message_type)
|
||||
if link:
|
||||
label = self.get_link_label(link_id)
|
||||
link.label = label
|
||||
self.emane_manager.session.broadcast_link(link)
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
message_type: MessageFlags,
|
||||
label: str,
|
||||
node1: int,
|
||||
node2: int,
|
||||
emane_id: int,
|
||||
) -> None:
|
||||
color = self.emane_manager.session.get_link_color(emane_id)
|
||||
link_data = LinkData(
|
||||
message_type=message_type,
|
||||
type=LinkTypes.WIRELESS,
|
||||
label=label,
|
||||
node1_id=node1,
|
||||
node2_id=node2,
|
||||
network_id=emane_id,
|
||||
color=color,
|
||||
)
|
||||
self.emane_manager.session.broadcast_link(link_data)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.running = False
|
||||
for client in self.clients:
|
||||
client.stop()
|
||||
self.clients.clear()
|
||||
self.links.clear()
|
||||
self.complete_links.clear()
|
||||
|
|
@ -1,69 +0,0 @@
|
|||
import logging
|
||||
import pkgutil
|
||||
from pathlib import Path
|
||||
|
||||
from core import utils
|
||||
from core.emane import models as emane_models
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.errors import CoreError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmaneModelManager:
|
||||
models: dict[str, type[EmaneModel]] = {}
|
||||
|
||||
@classmethod
|
||||
def load_locals(cls, emane_prefix: Path) -> list[str]:
|
||||
"""
|
||||
Load local core emane models and make them available.
|
||||
|
||||
:param emane_prefix: installed emane prefix
|
||||
:return: list of errors encountered loading emane models
|
||||
"""
|
||||
errors = []
|
||||
for module_info in pkgutil.walk_packages(
|
||||
emane_models.__path__, f"{emane_models.__name__}."
|
||||
):
|
||||
models = utils.load_module(module_info.name, EmaneModel)
|
||||
for model in models:
|
||||
logger.debug("loading emane model: %s", model.name)
|
||||
try:
|
||||
model.load(emane_prefix)
|
||||
cls.models[model.name] = model
|
||||
except CoreError as e:
|
||||
errors.append(model.name)
|
||||
logger.debug("not loading emane model(%s): %s", model.name, e)
|
||||
return errors
|
||||
|
||||
@classmethod
|
||||
def load(cls, path: Path, emane_prefix: Path) -> list[str]:
|
||||
"""
|
||||
Search and load custom emane models and make them available.
|
||||
|
||||
:param path: path to search for custom emane models
|
||||
:param emane_prefix: installed emane prefix
|
||||
:return: list of errors encountered loading emane models
|
||||
"""
|
||||
subdirs = [x for x in path.iterdir() if x.is_dir()]
|
||||
subdirs.append(path)
|
||||
errors = []
|
||||
for subdir in subdirs:
|
||||
logger.debug("loading emane models from: %s", subdir)
|
||||
models = utils.load_classes(subdir, EmaneModel)
|
||||
for model in models:
|
||||
logger.debug("loading emane model: %s", model.name)
|
||||
try:
|
||||
model.load(emane_prefix)
|
||||
cls.models[model.name] = model
|
||||
except CoreError as e:
|
||||
errors.append(model.name)
|
||||
logger.debug("not loading emane model(%s): %s", model.name, e)
|
||||
return errors
|
||||
|
||||
@classmethod
|
||||
def get(cls, name: str) -> type[EmaneModel]:
|
||||
model = cls.models.get(name)
|
||||
if model is None:
|
||||
raise CoreError(f"emame model does not exist {name}")
|
||||
return model
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
"""
|
||||
EMANE Bypass model for CORE
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from core.config import ConfigBool, Configuration
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
class EmaneBypassModel(emanemodel.EmaneModel):
|
||||
name: str = "emane_bypass"
|
||||
|
||||
# values to ignore, when writing xml files
|
||||
config_ignore: set[str] = {"none"}
|
||||
|
||||
# mac definitions
|
||||
mac_library: str = "bypassmaclayer"
|
||||
mac_config: list[Configuration] = [
|
||||
ConfigBool(
|
||||
id="none",
|
||||
default="0",
|
||||
label="There are no parameters for the bypass model.",
|
||||
)
|
||||
]
|
||||
|
||||
# phy definitions
|
||||
phy_library: str = "bypassphylayer"
|
||||
phy_config: list[Configuration] = []
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: Path) -> None:
|
||||
cls._load_platform_config(emane_prefix)
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
"""
|
||||
commeffect.py: EMANE CommEffect model for CORE
|
||||
"""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core.config import ConfigGroup, Configuration
|
||||
from core.emane import emanemanifest, emanemodel
|
||||
from core.emulator.data import LinkOptions
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.xml import emanexml
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from emane.events.commeffectevent import CommEffectEvent
|
||||
except ImportError:
|
||||
try:
|
||||
from emanesh.events.commeffectevent import CommEffectEvent
|
||||
except ImportError:
|
||||
CommEffectEvent = None
|
||||
logger.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
def convert_none(x: float) -> int:
|
||||
"""
|
||||
Helper to use 0 for None values.
|
||||
"""
|
||||
if isinstance(x, str):
|
||||
x = float(x)
|
||||
if x is None:
|
||||
return 0
|
||||
else:
|
||||
return int(x)
|
||||
|
||||
|
||||
class EmaneCommEffectModel(emanemodel.EmaneModel):
|
||||
name: str = "emane_commeffect"
|
||||
shim_library: str = "commeffectshim"
|
||||
shim_xml: str = "commeffectshim.xml"
|
||||
shim_defaults: dict[str, str] = {}
|
||||
config_shim: list[Configuration] = []
|
||||
|
||||
# comm effect does not need the default phy and external configurations
|
||||
phy_config: list[Configuration] = []
|
||||
external_config: list[Configuration] = []
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: Path) -> None:
|
||||
cls._load_platform_config(emane_prefix)
|
||||
shim_xml_path = emane_prefix / "share/emane/manifest" / cls.shim_xml
|
||||
cls.config_shim = emanemanifest.parse(shim_xml_path, cls.shim_defaults)
|
||||
|
||||
@classmethod
|
||||
def configurations(cls) -> list[Configuration]:
|
||||
return cls.platform_config + cls.config_shim
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls) -> list[ConfigGroup]:
|
||||
platform_len = len(cls.platform_config)
|
||||
return [
|
||||
ConfigGroup("Platform Parameters", 1, platform_len),
|
||||
ConfigGroup(
|
||||
"CommEffect SHIM Parameters",
|
||||
platform_len + 1,
|
||||
len(cls.configurations()),
|
||||
),
|
||||
]
|
||||
|
||||
def build_xml_files(self, config: dict[str, str], iface: CoreInterface) -> None:
|
||||
"""
|
||||
Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
|
||||
:param config: emane model configuration for the node and interface
|
||||
:param iface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
# create and write nem document
|
||||
nem_element = etree.Element("nem", name=f"{self.name} NEM", type="unstructured")
|
||||
transport_name = emanexml.transport_file_name(iface)
|
||||
etree.SubElement(nem_element, "transport", definition=transport_name)
|
||||
|
||||
# set shim configuration
|
||||
nem_name = emanexml.nem_file_name(iface)
|
||||
shim_name = emanexml.shim_file_name(iface)
|
||||
etree.SubElement(nem_element, "shim", definition=shim_name)
|
||||
emanexml.create_node_file(iface.node, nem_element, "nem", nem_name)
|
||||
|
||||
# create and write shim document
|
||||
shim_element = etree.Element(
|
||||
"shim", name=f"{self.name} SHIM", library=self.shim_library
|
||||
)
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
for configuration in self.config_shim:
|
||||
name = configuration.id
|
||||
if name == "filterfile":
|
||||
continue
|
||||
value = config[name]
|
||||
emanexml.add_param(shim_element, name, value)
|
||||
|
||||
# empty filterfile is not allowed
|
||||
ff = config["filterfile"]
|
||||
if ff.strip() != "":
|
||||
emanexml.add_param(shim_element, "filterfile", ff)
|
||||
emanexml.create_node_file(iface.node, shim_element, "shim", shim_name)
|
||||
|
||||
# create transport xml
|
||||
emanexml.create_transport_xml(iface, config)
|
||||
|
||||
def linkconfig(
|
||||
self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None
|
||||
) -> None:
|
||||
"""
|
||||
Generate CommEffect events when a Link Message is received having
|
||||
link parameters.
|
||||
"""
|
||||
if iface is None or iface2 is None:
|
||||
logger.warning("%s: missing NEM information", self.name)
|
||||
return
|
||||
# TODO: batch these into multiple events per transmission
|
||||
# TODO: may want to split out seconds portion of delay and jitter
|
||||
event = CommEffectEvent()
|
||||
nem1 = self.session.emane.get_nem_id(iface)
|
||||
nem2 = self.session.emane.get_nem_id(iface2)
|
||||
logger.info("sending comm effect event")
|
||||
event.append(
|
||||
nem1,
|
||||
latency=convert_none(options.delay),
|
||||
jitter=convert_none(options.jitter),
|
||||
loss=convert_none(options.loss),
|
||||
duplicate=convert_none(options.dup),
|
||||
unicast=int(convert_none(options.bandwidth)),
|
||||
broadcast=int(convert_none(options.bandwidth)),
|
||||
)
|
||||
self.session.emane.publish_event(nem2, event)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
"""
|
||||
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
class EmaneIeee80211abgModel(emanemodel.EmaneModel):
|
||||
# model name
|
||||
name: str = "emane_ieee80211abg"
|
||||
|
||||
# mac configuration
|
||||
mac_library: str = "ieee80211abgmaclayer"
|
||||
mac_xml: str = "ieee80211abgmaclayer.xml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: Path) -> None:
|
||||
cls.mac_defaults["pcrcurveuri"] = str(
|
||||
emane_prefix / "share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml"
|
||||
)
|
||||
super().load(emane_prefix)
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
"""
|
||||
rfpipe.py: EMANE RF-PIPE model for CORE
|
||||
"""
|
||||
from pathlib import Path
|
||||
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
class EmaneRfPipeModel(emanemodel.EmaneModel):
|
||||
# model name
|
||||
name: str = "emane_rfpipe"
|
||||
|
||||
# mac configuration
|
||||
mac_library: str = "rfpipemaclayer"
|
||||
mac_xml: str = "rfpipemaclayer.xml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: Path) -> None:
|
||||
cls.mac_defaults["pcrcurveuri"] = str(
|
||||
emane_prefix / "share/emane/xml/models/mac/rfpipe/rfpipepcr.xml"
|
||||
)
|
||||
super().load(emane_prefix)
|
||||
|
|
@ -1,65 +0,0 @@
|
|||
"""
|
||||
tdma.py: EMANE TDMA model bindings for CORE
|
||||
"""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
from core import constants, utils
|
||||
from core.config import ConfigString
|
||||
from core.emane import emanemodel
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.nodes.interface import CoreInterface
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmaneTdmaModel(emanemodel.EmaneModel):
|
||||
# model name
|
||||
name: str = "emane_tdma"
|
||||
|
||||
# mac configuration
|
||||
mac_library: str = "tdmaeventschedulerradiomodel"
|
||||
mac_xml: str = "tdmaeventschedulerradiomodel.xml"
|
||||
|
||||
# add custom schedule options and ignore it when writing emane xml
|
||||
schedule_name: str = "schedule"
|
||||
default_schedule: Path = (
|
||||
constants.CORE_DATA_DIR / "examples" / "tdma" / "schedule.xml"
|
||||
)
|
||||
config_ignore: set[str] = {schedule_name}
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: Path) -> None:
|
||||
cls.mac_defaults["pcrcurveuri"] = str(
|
||||
emane_prefix
|
||||
/ "share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml"
|
||||
)
|
||||
super().load(emane_prefix)
|
||||
config_item = ConfigString(
|
||||
id=cls.schedule_name,
|
||||
default=str(cls.default_schedule),
|
||||
label="TDMA schedule file (core)",
|
||||
)
|
||||
cls.mac_config.insert(0, config_item)
|
||||
|
||||
def post_startup(self, iface: CoreInterface) -> None:
|
||||
# get configured schedule
|
||||
emane_net = self.session.get_node(self.id, EmaneNet)
|
||||
config = self.session.emane.get_iface_config(emane_net, iface)
|
||||
schedule = Path(config[self.schedule_name])
|
||||
if not schedule.is_file():
|
||||
logger.error("ignoring invalid tdma schedule: %s", schedule)
|
||||
return
|
||||
# initiate tdma schedule
|
||||
nem_id = self.session.emane.get_nem_id(iface)
|
||||
if not nem_id:
|
||||
logger.error("could not find nem for interface")
|
||||
return
|
||||
service = self.session.emane.nem_service.get(nem_id)
|
||||
if service:
|
||||
device = service.device
|
||||
logger.info(
|
||||
"setting up tdma schedule: schedule(%s) device(%s)", schedule, device
|
||||
)
|
||||
utils.cmd(f"emaneevent-tdmaschedule -i {device} {schedule}")
|
||||
|
|
@ -1,26 +1,15 @@
|
|||
"""
|
||||
Provides an EMANE network node class, which has several attached NEMs that
|
||||
nodes.py: definition of an EmaneNode class for implementing configuration
|
||||
control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
||||
share the same MAC+PHY model.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import TYPE_CHECKING, Callable, Optional, Union
|
||||
|
||||
from core.emulator.data import InterfaceData, LinkData, LinkOptions
|
||||
from core.emulator.distributed import DistributedServer
|
||||
from core.emulator.enumerations import MessageFlags, RegisterTlvs
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.nodes.base import CoreNetworkBase, CoreNode, NodeOptions
|
||||
from core.nodes.interface import CoreInterface
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.emulator.session import Session
|
||||
from core.location.mobility import WayPointMobility
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
|
||||
try:
|
||||
from emane.events import LocationEvent
|
||||
|
|
@ -28,263 +17,194 @@ except ImportError:
|
|||
try:
|
||||
from emanesh.events import LocationEvent
|
||||
except ImportError:
|
||||
LocationEvent = None
|
||||
logger.debug("compatible emane python bindings not installed")
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
class TunTap(CoreInterface):
|
||||
class EmaneNet(PyCoreNet):
|
||||
"""
|
||||
TUN/TAP virtual device in TAP mode
|
||||
EMANE network base class.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
_id: int,
|
||||
name: str,
|
||||
localname: str,
|
||||
use_ovs: bool,
|
||||
node: CoreNode = None,
|
||||
server: "DistributedServer" = None,
|
||||
) -> None:
|
||||
super().__init__(_id, name, localname, use_ovs, node=node, server=server)
|
||||
self.node: CoreNode = node
|
||||
|
||||
def startup(self) -> None:
|
||||
"""
|
||||
Startup logic for a tunnel tap.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.up = True
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shutdown functionality for a tunnel tap.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if not self.up:
|
||||
return
|
||||
self.up = False
|
||||
|
||||
def waitfor(
|
||||
self, func: Callable[[], int], attempts: int = 10, maxretrydelay: float = 0.25
|
||||
) -> bool:
|
||||
"""
|
||||
Wait for func() to return zero with exponential backoff.
|
||||
|
||||
:param func: function to wait for a result of zero
|
||||
:param attempts: number of attempts to wait for a zero result
|
||||
:param maxretrydelay: maximum retry delay
|
||||
:return: True if wait succeeded, False otherwise
|
||||
"""
|
||||
delay = 0.01
|
||||
result = False
|
||||
for i in range(1, attempts + 1):
|
||||
r = func()
|
||||
if r == 0:
|
||||
result = True
|
||||
break
|
||||
msg = f"attempt {i} failed with nonzero exit status {r}"
|
||||
if i < attempts + 1:
|
||||
msg += ", retrying..."
|
||||
logger.info(msg)
|
||||
time.sleep(delay)
|
||||
delay += delay
|
||||
if delay > maxretrydelay:
|
||||
delay = maxretrydelay
|
||||
else:
|
||||
msg += ", giving up"
|
||||
logger.info(msg)
|
||||
return result
|
||||
|
||||
def nodedevexists(self) -> int:
|
||||
"""
|
||||
Checks if device exists.
|
||||
|
||||
:return: 0 if device exists, 1 otherwise
|
||||
"""
|
||||
try:
|
||||
self.node.node_net_client.device_show(self.name)
|
||||
return 0
|
||||
except CoreCommandError:
|
||||
return 1
|
||||
|
||||
def waitfordevicenode(self) -> None:
|
||||
"""
|
||||
Check for presence of a node device - tap device may not appear right away waits.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
logger.debug("waiting for device node: %s", self.name)
|
||||
count = 0
|
||||
while True:
|
||||
result = self.waitfor(self.nodedevexists)
|
||||
if result:
|
||||
break
|
||||
should_retry = count < 5
|
||||
is_emane_running = self.node.session.emane.emanerunning(self.node)
|
||||
if all([should_retry, is_emane_running]):
|
||||
count += 1
|
||||
else:
|
||||
raise RuntimeError("node device failed to exist")
|
||||
|
||||
def set_ips(self) -> None:
|
||||
"""
|
||||
Set interface ip addresses.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.waitfordevicenode()
|
||||
for ip in self.ips():
|
||||
self.node.node_net_client.create_address(self.name, str(ip))
|
||||
apitype = NodeTypes.EMANE.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
# icon used
|
||||
type = "wlan"
|
||||
|
||||
|
||||
@dataclass
|
||||
class EmaneOptions(NodeOptions):
|
||||
emane_model: str = None
|
||||
"""name of emane model to associate an emane network to"""
|
||||
|
||||
|
||||
class EmaneNet(CoreNetworkBase):
|
||||
class EmaneNode(EmaneNet):
|
||||
"""
|
||||
EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session: "Session",
|
||||
_id: int = None,
|
||||
name: str = None,
|
||||
server: DistributedServer = None,
|
||||
options: EmaneOptions = None,
|
||||
) -> None:
|
||||
options = options or EmaneOptions()
|
||||
super().__init__(session, _id, name, server, options)
|
||||
self.conf: str = ""
|
||||
self.mobility: Optional[WayPointMobility] = None
|
||||
model_class = self.session.emane.get_model(options.emane_model)
|
||||
self.wireless_model: Optional["EmaneModel"] = model_class(self.session, self.id)
|
||||
if self.session.is_running():
|
||||
self.session.emane.add_node(self)
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
super(EmaneNode, self).__init__(session, objid, name, start)
|
||||
self.conf = ""
|
||||
self.up = False
|
||||
self.nemidmap = {}
|
||||
self.model = None
|
||||
self.mobility = None
|
||||
|
||||
@classmethod
|
||||
def create_options(cls) -> EmaneOptions:
|
||||
return EmaneOptions()
|
||||
|
||||
def linkconfig(
|
||||
self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None
|
||||
) -> None:
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
"""
|
||||
The CommEffect model supports link configuration.
|
||||
"""
|
||||
if not self.wireless_model:
|
||||
if not self.model:
|
||||
return
|
||||
self.wireless_model.linkconfig(iface, options, iface2)
|
||||
return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
|
||||
def startup(self) -> None:
|
||||
self.up = True
|
||||
def config(self, conf):
|
||||
self.conf = conf
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self.up = False
|
||||
|
||||
def link(self, iface1: CoreInterface, iface2: CoreInterface) -> None:
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
def unlink(self, iface1: CoreInterface, iface2: CoreInterface) -> None:
|
||||
def link(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
def updatemodel(self, config: dict[str, str]) -> None:
|
||||
"""
|
||||
Update configuration for the current model.
|
||||
def unlink(self, netif1, netif2):
|
||||
pass
|
||||
|
||||
:param config: configuration to update model with
|
||||
:return: nothing
|
||||
"""
|
||||
if not self.wireless_model:
|
||||
raise CoreError(f"no model set to update for node({self.name})")
|
||||
logger.info(
|
||||
"node(%s) updating model(%s): %s", self.id, self.wireless_model.name, config
|
||||
)
|
||||
self.wireless_model.update_config(config)
|
||||
def updatemodel(self, config):
|
||||
if not self.model:
|
||||
raise ValueError("no model set to update for node(%s)", self.objid)
|
||||
logging.info("node(%s) updating model(%s): %s", self.objid, self.model.name, config)
|
||||
self.model.set_configs(config, node_id=self.objid)
|
||||
|
||||
def setmodel(
|
||||
self,
|
||||
model: Union[type["EmaneModel"], type["WayPointMobility"]],
|
||||
config: dict[str, str],
|
||||
) -> None:
|
||||
def setmodel(self, model, config):
|
||||
"""
|
||||
set the EmaneModel associated with this node
|
||||
"""
|
||||
if model.config_type == RegisterTlvs.WIRELESS:
|
||||
self.wireless_model = model(session=self.session, _id=self.id)
|
||||
self.wireless_model.update_config(config)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY:
|
||||
self.mobility = model(session=self.session, _id=self.id)
|
||||
logging.info("adding model: %s", model.name)
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
# EmaneModel really uses values from ConfigurableManager
|
||||
# when buildnemxml() is called, not during init()
|
||||
self.model = model(session=self.session, object_id=self.objid)
|
||||
self.model.update_config(config)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid)
|
||||
self.mobility.update_config(config)
|
||||
|
||||
def links(self, flags: MessageFlags = MessageFlags.NONE) -> list[LinkData]:
|
||||
links = []
|
||||
emane_manager = self.session.emane
|
||||
# gather current emane links
|
||||
nem_ids = set()
|
||||
for iface in self.get_ifaces():
|
||||
nem_id = emane_manager.get_nem_id(iface)
|
||||
nem_ids.add(nem_id)
|
||||
emane_links = emane_manager.link_monitor.links
|
||||
considered = set()
|
||||
for link_key in emane_links:
|
||||
considered_key = tuple(sorted(link_key))
|
||||
if considered_key in considered:
|
||||
continue
|
||||
considered.add(considered_key)
|
||||
nem1, nem2 = considered_key
|
||||
# ignore links not related to this node
|
||||
if nem1 not in nem_ids and nem2 not in nem_ids:
|
||||
continue
|
||||
# ignore incomplete links
|
||||
if (nem2, nem1) not in emane_links:
|
||||
continue
|
||||
link = emane_manager.get_nem_link(nem1, nem2, flags)
|
||||
if link:
|
||||
links.append(link)
|
||||
return links
|
||||
|
||||
def create_tuntap(self, node: CoreNode, iface_data: InterfaceData) -> CoreInterface:
|
||||
def setnemid(self, netif, nemid):
|
||||
"""
|
||||
Create a tuntap interface for the provided node.
|
||||
|
||||
:param node: node to create tuntap interface for
|
||||
:param iface_data: interface data to create interface with
|
||||
:return: created tuntap interface
|
||||
Record an interface to numerical ID mapping. The Emane controller
|
||||
object manages and assigns these IDs for all NEMs.
|
||||
"""
|
||||
with node.lock:
|
||||
if iface_data.id is not None and iface_data.id in node.ifaces:
|
||||
raise CoreError(
|
||||
f"node({self.id}) interface({iface_data.id}) already exists"
|
||||
)
|
||||
iface_id = (
|
||||
iface_data.id if iface_data.id is not None else node.next_iface_id()
|
||||
)
|
||||
name = iface_data.name if iface_data.name is not None else f"eth{iface_id}"
|
||||
session_id = self.session.short_session_id()
|
||||
localname = f"tap{node.id}.{iface_id}.{session_id}"
|
||||
iface = TunTap(iface_id, name, localname, self.session.use_ovs(), node=node)
|
||||
if iface_data.mac:
|
||||
iface.set_mac(iface_data.mac)
|
||||
for ip in iface_data.get_ips():
|
||||
iface.add_ip(ip)
|
||||
node.ifaces[iface_id] = iface
|
||||
self.attach(iface)
|
||||
if self.up:
|
||||
iface.startup()
|
||||
if self.session.is_running():
|
||||
self.session.emane.start_iface(self, iface)
|
||||
return iface
|
||||
self.nemidmap[netif] = nemid
|
||||
|
||||
def adopt_iface(self, iface: CoreInterface, name: str) -> None:
|
||||
raise CoreError(
|
||||
f"emane network({self.name}) do not support adopting interfaces"
|
||||
)
|
||||
def getnemid(self, netif):
|
||||
"""
|
||||
Given an interface, return its numerical ID.
|
||||
"""
|
||||
if netif not in self.nemidmap:
|
||||
return None
|
||||
else:
|
||||
return self.nemidmap[netif]
|
||||
|
||||
def getnemnetif(self, nemid):
|
||||
"""
|
||||
Given a numerical NEM ID, return its interface. This returns the
|
||||
first interface that matches the given NEM ID.
|
||||
"""
|
||||
for netif in self.nemidmap:
|
||||
if self.nemidmap[netif] == nemid:
|
||||
return netif
|
||||
return None
|
||||
|
||||
def netifs(self, sort=True):
|
||||
"""
|
||||
Retrieve list of linked interfaces sorted by node number.
|
||||
"""
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
|
||||
|
||||
def installnetifs(self):
|
||||
"""
|
||||
Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
to bind to the TAPs.
|
||||
"""
|
||||
if self.session.emane.genlocationevents() and self.session.emane.service is None:
|
||||
warntxt = "unable to publish EMANE events because the eventservice "
|
||||
warntxt += "Python bindings failed to load"
|
||||
logging.error(warntxt)
|
||||
|
||||
for netif in self.netifs():
|
||||
external = self.session.emane.get_config("external", self.objid, self.model.name)
|
||||
if external == "0":
|
||||
netif.setaddrs()
|
||||
|
||||
if not self.session.emane.genlocationevents():
|
||||
netif.poshook = None
|
||||
continue
|
||||
|
||||
# at this point we register location handlers for generating
|
||||
# EMANE location events
|
||||
netif.poshook = self.setnemposition
|
||||
x, y, z = netif.node.position.get()
|
||||
self.setnemposition(netif, x, y, z)
|
||||
|
||||
def deinstallnetifs(self):
|
||||
"""
|
||||
Uninstall TAP devices. This invokes their shutdown method for
|
||||
any required cleanup; the device may be actually removed when
|
||||
emanetransportd terminates.
|
||||
"""
|
||||
for netif in self.netifs():
|
||||
if "virtual" in netif.transport_type.lower():
|
||||
netif.shutdown()
|
||||
netif.poshook = None
|
||||
|
||||
def setnemposition(self, netif, x, y, z):
|
||||
"""
|
||||
Publish a NEM location change event using the EMANE event service.
|
||||
"""
|
||||
if self.session.emane.service is None:
|
||||
logging.info("position service not available")
|
||||
return
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
logging.info("nemid for %s is unknown" % ifname)
|
||||
return
|
||||
lat, long, alt = self.session.location.getgeo(x, y, z)
|
||||
logging.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, long, alt)
|
||||
event = LocationEvent()
|
||||
|
||||
# altitude must be an integer or warning is printed
|
||||
# unused: yaw, pitch, roll, azimuth, elevation, velocity
|
||||
alt = int(round(alt))
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
self.session.emane.service.publish(0, event)
|
||||
|
||||
def setnempositions(self, moved_netifs):
|
||||
"""
|
||||
Several NEMs have moved, from e.g. a WaypointMobilityModel
|
||||
calculation. Generate an EMANE Location Event having several
|
||||
entries for each netif that has moved.
|
||||
"""
|
||||
if len(moved_netifs) == 0:
|
||||
return
|
||||
|
||||
if self.session.emane.service is None:
|
||||
logging.info("position service not available")
|
||||
return
|
||||
|
||||
event = LocationEvent()
|
||||
i = 0
|
||||
for netif in moved_netifs:
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
logging.info("nemid for %s is unknown" % ifname)
|
||||
continue
|
||||
x, y, z = netif.node.getposition()
|
||||
lat, long, alt = self.session.location.getgeo(x, y, z)
|
||||
logging.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)",
|
||||
i, ifname, nemid, x, y, z, lat, long, alt)
|
||||
# altitude must be an integer or warning is printed
|
||||
alt = int(round(alt))
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
i += 1
|
||||
|
||||
self.session.emane.service.publish(0, event)
|
||||
|
|
|
|||
23
daemon/core/emane/rfpipe.py
Normal file
23
daemon/core/emane/rfpipe.py
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
"""
|
||||
rfpipe.py: EMANE RF-PIPE model for CORE
|
||||
"""
|
||||
import os
|
||||
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
class EmaneRfPipeModel(emanemodel.EmaneModel):
|
||||
# model name
|
||||
name = "emane_rfpipe"
|
||||
|
||||
# mac configuration
|
||||
mac_library = "rfpipemaclayer"
|
||||
mac_xml = "rfpipemaclayer.xml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix):
|
||||
cls.mac_defaults["pcrcurveuri"] = os.path.join(
|
||||
emane_prefix,
|
||||
"share/emane/xml/models/mac/rfpipe/rfpipepcr.xml"
|
||||
)
|
||||
super(EmaneRfPipeModel, cls).load(emane_prefix)
|
||||
62
daemon/core/emane/tdma.py
Normal file
62
daemon/core/emane/tdma.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
"""
|
||||
tdma.py: EMANE TDMA model bindings for CORE
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from core import constants
|
||||
from core.conf import Configuration
|
||||
from core.emane import emanemodel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import utils
|
||||
|
||||
|
||||
class EmaneTdmaModel(emanemodel.EmaneModel):
|
||||
# model name
|
||||
name = "emane_tdma"
|
||||
|
||||
# mac configuration
|
||||
mac_library = "tdmaeventschedulerradiomodel"
|
||||
mac_xml = "tdmaeventschedulerradiomodel.xml"
|
||||
|
||||
# add custom schedule options and ignore it when writing emane xml
|
||||
schedule_name = "schedule"
|
||||
default_schedule = os.path.join(constants.CORE_DATA_DIR, "examples", "tdma", "schedule.xml")
|
||||
config_ignore = {schedule_name}
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix):
|
||||
cls.mac_defaults["pcrcurveuri"] = os.path.join(
|
||||
emane_prefix,
|
||||
"share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml"
|
||||
)
|
||||
super(EmaneTdmaModel, cls).load(emane_prefix)
|
||||
cls.mac_config.insert(
|
||||
0,
|
||||
Configuration(
|
||||
_id=cls.schedule_name,
|
||||
_type=ConfigDataTypes.STRING,
|
||||
default=cls.default_schedule,
|
||||
label="TDMA schedule file (core)"
|
||||
)
|
||||
)
|
||||
|
||||
def post_startup(self):
|
||||
"""
|
||||
Logic to execute after the emane manager is finished with startup.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# get configured schedule
|
||||
config = self.session.emane.get_configs(node_id=self.object_id, config_type=self.name)
|
||||
if not config:
|
||||
return
|
||||
schedule = config[self.schedule_name]
|
||||
|
||||
# get the set event device
|
||||
event_device = self.session.emane.event_device
|
||||
|
||||
# initiate tdma schedule
|
||||
logging.info("setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device)
|
||||
utils.check_cmd(["emaneevent-tdmaschedule", "-i", event_device, schedule])
|
||||
|
|
@ -1,67 +0,0 @@
|
|||
from collections.abc import Callable
|
||||
from typing import TypeVar, Union
|
||||
|
||||
from core.emulator.data import (
|
||||
ConfigData,
|
||||
EventData,
|
||||
ExceptionData,
|
||||
FileData,
|
||||
LinkData,
|
||||
NodeData,
|
||||
)
|
||||
from core.errors import CoreError
|
||||
|
||||
T = TypeVar(
|
||||
"T", bound=Union[EventData, ExceptionData, NodeData, LinkData, FileData, ConfigData]
|
||||
)
|
||||
|
||||
|
||||
class BroadcastManager:
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Creates a BroadcastManager instance.
|
||||
"""
|
||||
self.handlers: dict[type[T], set[Callable[[T], None]]] = {}
|
||||
|
||||
def send(self, data: T) -> None:
|
||||
"""
|
||||
Retrieve handlers for data, and run all current handlers.
|
||||
|
||||
:param data: data to provide to handlers
|
||||
:return: nothing
|
||||
"""
|
||||
handlers = self.handlers.get(type(data), set())
|
||||
for handler in handlers:
|
||||
handler(data)
|
||||
|
||||
def add_handler(self, data_type: type[T], handler: Callable[[T], None]) -> None:
|
||||
"""
|
||||
Add a handler for a given data type.
|
||||
|
||||
:param data_type: type of data to add handler for
|
||||
:param handler: handler to add
|
||||
:return: nothing
|
||||
"""
|
||||
handlers = self.handlers.setdefault(data_type, set())
|
||||
if handler in handlers:
|
||||
raise CoreError(
|
||||
f"cannot add data({data_type}) handler({repr(handler)}), "
|
||||
f"already exists"
|
||||
)
|
||||
handlers.add(handler)
|
||||
|
||||
def remove_handler(self, data_type: type[T], handler: Callable[[T], None]) -> None:
|
||||
"""
|
||||
Remove a handler for a given data type.
|
||||
|
||||
:param data_type: type of data to remove handler for
|
||||
:param handler: handler to remove
|
||||
:return: nothing
|
||||
"""
|
||||
handlers = self.handlers.get(data_type, set())
|
||||
if handler not in handlers:
|
||||
raise CoreError(
|
||||
f"cannot remove data({data_type}) handler({repr(handler)}), "
|
||||
f"does not exist"
|
||||
)
|
||||
handlers.remove(handler)
|
||||
|
|
@ -1,239 +0,0 @@
|
|||
import logging
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from core import utils
|
||||
from core.emulator.data import InterfaceData
|
||||
from core.errors import CoreError
|
||||
from core.nodes.base import CoreNode
|
||||
from core.nodes.interface import DEFAULT_MTU
|
||||
from core.nodes.network import CtrlNet
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emulator.session import Session
|
||||
|
||||
CTRL_NET_ID: int = 9001
|
||||
ETC_HOSTS_PATH: str = "/etc/hosts"
|
||||
|
||||
|
||||
class ControlNetManager:
|
||||
def __init__(self, session: "Session") -> None:
|
||||
self.session: "Session" = session
|
||||
self.etc_hosts_header: str = f"CORE session {self.session.id} host entries"
|
||||
|
||||
def _etc_hosts_enabled(self) -> bool:
|
||||
"""
|
||||
Determines if /etc/hosts should be configured.
|
||||
|
||||
:return: True if /etc/hosts should be configured, False otherwise
|
||||
"""
|
||||
return self.session.options.get_bool("update_etc_hosts", False)
|
||||
|
||||
def _get_server_ifaces(
|
||||
self,
|
||||
) -> tuple[None, Optional[str], Optional[str], Optional[str]]:
|
||||
"""
|
||||
Retrieve control net server interfaces.
|
||||
|
||||
:return: control net server interfaces
|
||||
"""
|
||||
d0 = self.session.options.get("controlnetif0")
|
||||
if d0:
|
||||
logger.error("controlnet0 cannot be assigned with a host interface")
|
||||
d1 = self.session.options.get("controlnetif1")
|
||||
d2 = self.session.options.get("controlnetif2")
|
||||
d3 = self.session.options.get("controlnetif3")
|
||||
return None, d1, d2, d3
|
||||
|
||||
def _get_prefixes(
|
||||
self,
|
||||
) -> tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
|
||||
"""
|
||||
Retrieve control net prefixes.
|
||||
|
||||
:return: control net prefixes
|
||||
"""
|
||||
p = self.session.options.get("controlnet")
|
||||
p0 = self.session.options.get("controlnet0")
|
||||
p1 = self.session.options.get("controlnet1")
|
||||
p2 = self.session.options.get("controlnet2")
|
||||
p3 = self.session.options.get("controlnet3")
|
||||
if not p0 and p:
|
||||
p0 = p
|
||||
return p0, p1, p2, p3
|
||||
|
||||
def update_etc_hosts(self) -> None:
|
||||
"""
|
||||
Add the IP addresses of control interfaces to the /etc/hosts file.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if not self._etc_hosts_enabled():
|
||||
return
|
||||
control_net = self.get_control_net(0)
|
||||
entries = ""
|
||||
for iface in control_net.get_ifaces():
|
||||
name = iface.node.name
|
||||
for ip in iface.ips():
|
||||
entries += f"{ip.ip} {name}\n"
|
||||
logger.info("adding entries to /etc/hosts")
|
||||
utils.file_munge(ETC_HOSTS_PATH, self.etc_hosts_header, entries)
|
||||
|
||||
def clear_etc_hosts(self) -> None:
|
||||
"""
|
||||
Clear IP addresses of control interfaces from the /etc/hosts file.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if not self._etc_hosts_enabled():
|
||||
return
|
||||
logger.info("removing /etc/hosts file entries")
|
||||
utils.file_demunge(ETC_HOSTS_PATH, self.etc_hosts_header)
|
||||
|
||||
def get_control_net_index(self, dev: str) -> int:
|
||||
"""
|
||||
Retrieve control net index.
|
||||
|
||||
:param dev: device to get control net index for
|
||||
:return: control net index, -1 otherwise
|
||||
"""
|
||||
if dev[0:4] == "ctrl" and int(dev[4]) in (0, 1, 2, 3):
|
||||
index = int(dev[4])
|
||||
if index == 0:
|
||||
return index
|
||||
if index < 4 and self._get_prefixes()[index] is not None:
|
||||
return index
|
||||
return -1
|
||||
|
||||
def get_control_net(self, index: int) -> Optional[CtrlNet]:
|
||||
"""
|
||||
Retrieve a control net based on index.
|
||||
|
||||
:param index: control net index
|
||||
:return: control net when available, None otherwise
|
||||
"""
|
||||
try:
|
||||
return self.session.get_node(CTRL_NET_ID + index, CtrlNet)
|
||||
except CoreError:
|
||||
return None
|
||||
|
||||
def add_control_net(
|
||||
self, index: int, conf_required: bool = True
|
||||
) -> Optional[CtrlNet]:
|
||||
"""
|
||||
Create a control network bridge as necessary. The conf_reqd flag,
|
||||
when False, causes a control network bridge to be added even if
|
||||
one has not been configured.
|
||||
|
||||
:param index: network index to add
|
||||
:param conf_required: flag to check if conf is required
|
||||
:return: control net node
|
||||
"""
|
||||
logger.info(
|
||||
"checking to add control net index(%s) conf_required(%s)",
|
||||
index,
|
||||
conf_required,
|
||||
)
|
||||
# check for valid index
|
||||
if not (0 <= index <= 3):
|
||||
raise CoreError(f"invalid control net index({index})")
|
||||
# return any existing control net bridge
|
||||
control_net = self.get_control_net(index)
|
||||
if control_net:
|
||||
logger.info("control net index(%s) already exists", index)
|
||||
return control_net
|
||||
# retrieve prefix for current index
|
||||
index_prefix = self._get_prefixes()[index]
|
||||
if not index_prefix:
|
||||
if conf_required:
|
||||
return None
|
||||
else:
|
||||
index_prefix = CtrlNet.DEFAULT_PREFIX_LIST[index]
|
||||
# retrieve valid prefix from old style values
|
||||
prefixes = index_prefix.split()
|
||||
if len(prefixes) > 1:
|
||||
# a list of per-host prefixes is provided
|
||||
try:
|
||||
prefix = prefixes[0].split(":", 1)[1]
|
||||
except IndexError:
|
||||
prefix = prefixes[0]
|
||||
else:
|
||||
prefix = prefixes[0]
|
||||
# use the updown script for control net 0 only
|
||||
updown_script = None
|
||||
if index == 0:
|
||||
updown_script = self.session.options.get("controlnet_updown_script")
|
||||
# build a new controlnet bridge
|
||||
_id = CTRL_NET_ID + index
|
||||
server_iface = self._get_server_ifaces()[index]
|
||||
logger.info(
|
||||
"adding controlnet(%s) prefix(%s) updown(%s) server interface(%s)",
|
||||
_id,
|
||||
prefix,
|
||||
updown_script,
|
||||
server_iface,
|
||||
)
|
||||
options = CtrlNet.create_options()
|
||||
options.prefix = prefix
|
||||
options.updown_script = updown_script
|
||||
options.serverintf = server_iface
|
||||
control_net = self.session.create_node(CtrlNet, False, _id, options=options)
|
||||
control_net.brname = f"ctrl{index}.{self.session.short_session_id()}"
|
||||
control_net.startup()
|
||||
return control_net
|
||||
|
||||
def remove_control_net(self, index: int) -> None:
|
||||
"""
|
||||
Removes control net.
|
||||
|
||||
:param index: index of control net to remove
|
||||
:return: nothing
|
||||
"""
|
||||
control_net = self.get_control_net(index)
|
||||
if control_net:
|
||||
logger.info("removing control net index(%s)", index)
|
||||
self.session.delete_node(control_net.id)
|
||||
|
||||
def add_control_iface(self, node: CoreNode, index: int) -> None:
|
||||
"""
|
||||
Adds a control net interface to a node.
|
||||
|
||||
:param node: node to add control net interface to
|
||||
:param index: index of control net to add interface to
|
||||
:return: nothing
|
||||
:raises CoreError: if control net doesn't exist, interface already exists,
|
||||
or there is an error creating the interface
|
||||
"""
|
||||
control_net = self.get_control_net(index)
|
||||
if not control_net:
|
||||
raise CoreError(f"control net index({index}) does not exist")
|
||||
iface_id = control_net.CTRLIF_IDX_BASE + index
|
||||
if node.ifaces.get(iface_id):
|
||||
raise CoreError(f"control iface({iface_id}) already exists")
|
||||
try:
|
||||
logger.info(
|
||||
"node(%s) adding control net index(%s) interface(%s)",
|
||||
node.name,
|
||||
index,
|
||||
iface_id,
|
||||
)
|
||||
ip4 = control_net.prefix[node.id]
|
||||
ip4_mask = control_net.prefix.prefixlen
|
||||
iface_data = InterfaceData(
|
||||
id=iface_id,
|
||||
name=f"ctrl{index}",
|
||||
mac=utils.random_mac(),
|
||||
ip4=ip4,
|
||||
ip4_mask=ip4_mask,
|
||||
mtu=DEFAULT_MTU,
|
||||
)
|
||||
iface = node.create_iface(iface_data)
|
||||
control_net.attach(iface)
|
||||
iface.control = True
|
||||
except ValueError:
|
||||
raise CoreError(
|
||||
f"error adding control net interface to node({node.id}), "
|
||||
f"invalid control net prefix({control_net.prefix}), "
|
||||
"a longer prefix length may be required"
|
||||
)
|
||||
|
|
@ -1,156 +1,888 @@
|
|||
import atexit
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import signal
|
||||
import sys
|
||||
|
||||
from core import utils
|
||||
from core.configservice.manager import ConfigServiceManager
|
||||
from core.emane.modelmanager import EmaneModelManager
|
||||
from core.emulator.session import Session
|
||||
from core.executables import get_requirements
|
||||
from core.services.coreservices import ServiceManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DEFAULT_EMANE_PREFIX: str = "/usr"
|
||||
import core.services
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.data import NodeData
|
||||
from core.emulator.emudata import LinkOptions
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.enumerations import EventTypes
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodemaps
|
||||
from core.misc import nodeutils
|
||||
from core.service import ServiceManager
|
||||
from core.session import Session
|
||||
from core.xml.corexml import CoreXmlReader, CoreXmlWriter
|
||||
|
||||
|
||||
class CoreEmu:
|
||||
def signal_handler(signal_number, _):
|
||||
"""
|
||||
Handle signals and force an exit with cleanup.
|
||||
|
||||
:param int signal_number: signal number
|
||||
:param _: ignored
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("caught signal: %s", signal_number)
|
||||
sys.exit(signal_number)
|
||||
|
||||
|
||||
signal.signal(signal.SIGHUP, signal_handler)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
signal.signal(signal.SIGUSR1, signal_handler)
|
||||
signal.signal(signal.SIGUSR2, signal_handler)
|
||||
|
||||
|
||||
def create_interface(node, network, interface_data):
|
||||
"""
|
||||
Create an interface for a node on a network using provided interface data.
|
||||
|
||||
:param node: node to create interface for
|
||||
:param network: network to associate interface with
|
||||
:param core.emulator.emudata.InterfaceData interface_data: interface data
|
||||
:return: created interface
|
||||
"""
|
||||
node.newnetif(
|
||||
network,
|
||||
addrlist=interface_data.get_addresses(),
|
||||
hwaddr=interface_data.mac,
|
||||
ifindex=interface_data.id,
|
||||
ifname=interface_data.name
|
||||
)
|
||||
return node.netif(interface_data.id, network)
|
||||
|
||||
|
||||
def link_config(network, interface, link_options, devname=None, interface_two=None):
|
||||
"""
|
||||
Convenience method for configuring a link,
|
||||
|
||||
:param network: network to configure link for
|
||||
:param interface: interface to configure
|
||||
:param core.emulator.emudata.LinkOptions link_options: data to configure link with
|
||||
:param str devname: device name, default is None
|
||||
:param interface_two: other interface associated, default is None
|
||||
:return: nothing
|
||||
"""
|
||||
config = {
|
||||
"netif": interface,
|
||||
"bw": link_options.bandwidth,
|
||||
"delay": link_options.delay,
|
||||
"loss": link_options.per,
|
||||
"duplicate": link_options.dup,
|
||||
"jitter": link_options.jitter,
|
||||
"netif2": interface_two
|
||||
}
|
||||
|
||||
# hacky check here, because physical and emane nodes do not conform to the same linkconfig interface
|
||||
if not nodeutils.is_node(network, [NodeTypes.EMANE, NodeTypes.PHYSICAL]):
|
||||
config["devname"] = devname
|
||||
|
||||
network.linkconfig(**config)
|
||||
|
||||
|
||||
def is_net_node(node):
|
||||
"""
|
||||
Convenience method for testing if a legacy core node is considered a network node.
|
||||
|
||||
:param object node: object to test against
|
||||
:return: True if object is an instance of a network node, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return isinstance(node, PyCoreNet)
|
||||
|
||||
|
||||
def is_core_node(node):
|
||||
"""
|
||||
Convenience method for testing if a legacy core node is considered a core node.
|
||||
|
||||
:param object node: object to test against
|
||||
:return: True if object is an instance of a core node, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return isinstance(node, PyCoreNode)
|
||||
|
||||
|
||||
class IdGen(object):
|
||||
def __init__(self, _id=0):
|
||||
self.id = _id
|
||||
|
||||
def next(self):
|
||||
self.id += 1
|
||||
return self.id
|
||||
|
||||
|
||||
class EmuSession(Session):
|
||||
def __init__(self, _id, config=None, mkdir=True):
|
||||
super(EmuSession, self).__init__(_id, config, mkdir)
|
||||
|
||||
# object management
|
||||
self.node_id_gen = IdGen()
|
||||
|
||||
# set default services
|
||||
self.services.default_services = {
|
||||
"mdr": ("zebra", "OSPFv3MDR", "IPForward"),
|
||||
"PC": ("DefaultRoute",),
|
||||
"prouter": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
"router": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
"host": ("DefaultRoute", "SSH"),
|
||||
}
|
||||
|
||||
def _link_nodes(self, node_one_id, node_two_id):
|
||||
"""
|
||||
Convenience method for retrieving nodes within link data.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:return: nodes, network nodes if present, and tunnel if present
|
||||
:rtype: tuple
|
||||
"""
|
||||
logging.debug("link message between node1(%s) and node2(%s)", node_one_id, node_two_id)
|
||||
|
||||
# values to fill
|
||||
net_one = None
|
||||
net_two = None
|
||||
|
||||
# retrieve node one
|
||||
node_one = self.get_object(node_one_id)
|
||||
node_two = self.get_object(node_two_id)
|
||||
|
||||
# both node ids are provided
|
||||
tunnel = self.broker.gettunnel(node_one_id, node_two_id)
|
||||
logging.debug("tunnel between nodes: %s", tunnel)
|
||||
if nodeutils.is_node(tunnel, NodeTypes.TAP_BRIDGE):
|
||||
net_one = tunnel
|
||||
if tunnel.remotenum == node_one_id:
|
||||
node_one = None
|
||||
else:
|
||||
node_two = None
|
||||
# physical node connected via gre tap tunnel
|
||||
elif tunnel:
|
||||
if tunnel.remotenum == node_one_id:
|
||||
node_one = None
|
||||
else:
|
||||
node_two = None
|
||||
|
||||
if is_net_node(node_one):
|
||||
if not net_one:
|
||||
net_one = node_one
|
||||
else:
|
||||
net_two = node_one
|
||||
node_one = None
|
||||
|
||||
if is_net_node(node_two):
|
||||
if not net_one:
|
||||
net_one = node_two
|
||||
else:
|
||||
net_two = node_two
|
||||
node_two = None
|
||||
|
||||
logging.debug("link node types n1(%s) n2(%s) net1(%s) net2(%s) tunnel(%s)",
|
||||
node_one, node_two, net_one, net_two, tunnel)
|
||||
return node_one, node_two, net_one, net_two, tunnel
|
||||
|
||||
# TODO: this doesn't appear to ever be used, EMANE or basic wireless range
|
||||
def _link_wireless(self, objects, connect):
|
||||
"""
|
||||
Objects to deal with when connecting/disconnecting wireless links.
|
||||
|
||||
:param list objects: possible objects to deal with
|
||||
:param bool connect: link interfaces if True, unlink otherwise
|
||||
:return: nothing
|
||||
"""
|
||||
objects = [x for x in objects if x]
|
||||
if len(objects) < 2:
|
||||
raise ValueError("wireless link failure: %s", objects)
|
||||
logging.debug("handling wireless linking objects(%s) connect(%s)", objects, connect)
|
||||
common_networks = objects[0].commonnets(objects[1])
|
||||
if not common_networks:
|
||||
raise ValueError("no common network found for wireless link/unlink")
|
||||
|
||||
for common_network, interface_one, interface_two in common_networks:
|
||||
if not nodeutils.is_node(common_network, [NodeTypes.WIRELESS_LAN, NodeTypes.EMANE]):
|
||||
logging.info("skipping common network that is not wireless/emane: %s", common_network)
|
||||
continue
|
||||
|
||||
logging.info("wireless linking connect(%s): %s - %s", connect, interface_one, interface_two)
|
||||
if connect:
|
||||
common_network.link(interface_one, interface_two)
|
||||
else:
|
||||
common_network.unlink(interface_one, interface_two)
|
||||
|
||||
def add_link(self, node_one_id, node_two_id, interface_one=None, interface_two=None, link_options=LinkOptions()):
|
||||
"""
|
||||
Add a link between nodes.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:param core.emulator.emudata.InterfaceData interface_one: node one interface data, defaults to none
|
||||
:param core.emulator.emudata.InterfaceData interface_two: node two interface data, defaults to none
|
||||
:param core.emulator.emudata.LinkOptions link_options: data for creating link, defaults to no options
|
||||
:return:
|
||||
"""
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
if node_one:
|
||||
node_one.lock.acquire()
|
||||
if node_two:
|
||||
node_two.lock.acquire()
|
||||
|
||||
try:
|
||||
# wireless link
|
||||
if link_options.type == LinkTypes.WIRELESS:
|
||||
objects = [node_one, node_two, net_one, net_two]
|
||||
self._link_wireless(objects, connect=True)
|
||||
# wired link
|
||||
else:
|
||||
# 2 nodes being linked, ptp network
|
||||
if all([node_one, node_two]) and not net_one:
|
||||
logging.info("adding link for peer to peer nodes: %s - %s", node_one.name, node_two.name)
|
||||
ptp_class = nodeutils.get_node_class(NodeTypes.PEER_TO_PEER)
|
||||
start = self.state > EventTypes.DEFINITION_STATE.value
|
||||
net_one = self.add_object(cls=ptp_class, start=start)
|
||||
|
||||
# node to network
|
||||
if node_one and net_one:
|
||||
logging.info("adding link from node to network: %s - %s", node_one.name, net_one.name)
|
||||
interface = create_interface(node_one, net_one, interface_one)
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
# network to node
|
||||
if node_two and net_one:
|
||||
logging.info("adding link from network to node: %s - %s", node_two.name, net_one.name)
|
||||
interface = create_interface(node_two, net_one, interface_two)
|
||||
if not link_options.unidirectional:
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
# network to network
|
||||
if net_one and net_two:
|
||||
logging.info("adding link from network to network: %s - %s", net_one.name, net_two.name)
|
||||
if nodeutils.is_node(net_two, NodeTypes.RJ45):
|
||||
interface = net_two.linknet(net_one)
|
||||
else:
|
||||
interface = net_one.linknet(net_two)
|
||||
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
if not link_options.unidirectional:
|
||||
interface.swapparams("_params_up")
|
||||
link_config(net_two, interface, link_options, devname=interface.name)
|
||||
interface.swapparams("_params_up")
|
||||
|
||||
# a tunnel node was found for the nodes
|
||||
addresses = []
|
||||
if not node_one and all([net_one, interface_one]):
|
||||
addresses.extend(interface_one.get_addresses())
|
||||
|
||||
if not node_two and all([net_two, interface_two]):
|
||||
addresses.extend(interface_two.get_addresses())
|
||||
|
||||
# tunnel node logic
|
||||
key = link_options.key
|
||||
if key and nodeutils.is_node(net_one, NodeTypes.TUNNEL):
|
||||
logging.info("setting tunnel key for: %s", net_one.name)
|
||||
net_one.setkey(key)
|
||||
if addresses:
|
||||
net_one.addrconfig(addresses)
|
||||
if key and nodeutils.is_node(net_two, NodeTypes.TUNNEL):
|
||||
logging.info("setting tunnel key for: %s", net_two.name)
|
||||
net_two.setkey(key)
|
||||
if addresses:
|
||||
net_two.addrconfig(addresses)
|
||||
|
||||
# physical node connected with tunnel
|
||||
if not net_one and not net_two and (node_one or node_two):
|
||||
if node_one and nodeutils.is_node(node_one, NodeTypes.PHYSICAL):
|
||||
logging.info("adding link for physical node: %s", node_one.name)
|
||||
addresses = interface_one.get_addresses()
|
||||
node_one.adoptnetif(tunnel, interface_one.id, interface_one.mac, addresses)
|
||||
link_config(node_one, tunnel, link_options)
|
||||
elif node_two and nodeutils.is_node(node_two, NodeTypes.PHYSICAL):
|
||||
logging.info("adding link for physical node: %s", node_two.name)
|
||||
addresses = interface_two.get_addresses()
|
||||
node_two.adoptnetif(tunnel, interface_two.id, interface_two.mac, addresses)
|
||||
link_config(node_two, tunnel, link_options)
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def delete_link(self, node_one_id, node_two_id, interface_one_id, interface_two_id, link_type=LinkTypes.WIRED):
|
||||
"""
|
||||
Delete a link between nodes.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:param int interface_one_id: interface id for node one
|
||||
:param int interface_two_id: interface id for node two
|
||||
:param core.enumerations.LinkTypes link_type: link type to delete
|
||||
:return: nothing
|
||||
"""
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, _tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
if node_one:
|
||||
node_one.lock.acquire()
|
||||
if node_two:
|
||||
node_two.lock.acquire()
|
||||
|
||||
try:
|
||||
# wireless link
|
||||
if link_type == LinkTypes.WIRELESS:
|
||||
objects = [node_one, node_two, net_one, net_two]
|
||||
self._link_wireless(objects, connect=False)
|
||||
# wired link
|
||||
else:
|
||||
if all([node_one, node_two]):
|
||||
# TODO: fix this for the case where ifindex[1,2] are not specified
|
||||
# a wired unlink event, delete the connecting bridge
|
||||
interface_one = node_one.netif(interface_one_id)
|
||||
interface_two = node_two.netif(interface_two_id)
|
||||
|
||||
# get interfaces from common network, if no network node
|
||||
# otherwise get interfaces between a node and network
|
||||
if not interface_one and not interface_two:
|
||||
common_networks = node_one.commonnets(node_two)
|
||||
for network, common_interface_one, common_interface_two in common_networks:
|
||||
if (net_one and network == net_one) or not net_one:
|
||||
interface_one = common_interface_one
|
||||
interface_two = common_interface_two
|
||||
break
|
||||
|
||||
if all([interface_one, interface_two]) and any([interface_one.net, interface_two.net]):
|
||||
if interface_one.net != interface_two.net and all([interface_one.up, interface_two.up]):
|
||||
raise ValueError("no common network found")
|
||||
|
||||
logging.info("deleting link node(%s):interface(%s) node(%s):interface(%s)",
|
||||
node_one.name, interface_one.name, node_two.name, interface_two.name)
|
||||
net_one = interface_one.net
|
||||
interface_one.detachnet()
|
||||
interface_two.detachnet()
|
||||
if net_one.numnetif() == 0:
|
||||
self.delete_object(net_one.objid)
|
||||
node_one.delnetif(interface_one.netindex)
|
||||
node_two.delnetif(interface_two.netindex)
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def update_link(self, node_one_id, node_two_id, interface_one_id=None, interface_two_id=None,
|
||||
link_options=LinkOptions()):
|
||||
"""
|
||||
Update link information between nodes.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:param int interface_one_id: interface id for node one
|
||||
:param int interface_two_id: interface id for node two
|
||||
:param core.emulator.emudata.LinkOptions link_options: data to update link with
|
||||
:return: nothing
|
||||
"""
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, _tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
if node_one:
|
||||
node_one.lock.acquire()
|
||||
if node_two:
|
||||
node_two.lock.acquire()
|
||||
|
||||
try:
|
||||
# wireless link
|
||||
if link_options.type == LinkTypes.WIRELESS.value:
|
||||
raise ValueError("cannot update wireless link")
|
||||
else:
|
||||
if not node_one and not node_two:
|
||||
if net_one and net_two:
|
||||
# modify link between nets
|
||||
interface = net_one.getlinknetif(net_two)
|
||||
upstream = False
|
||||
|
||||
if not interface:
|
||||
upstream = True
|
||||
interface = net_two.getlinknetif(net_one)
|
||||
|
||||
if not interface:
|
||||
raise ValueError("modify unknown link between nets")
|
||||
|
||||
if upstream:
|
||||
interface.swapparams("_params_up")
|
||||
link_config(net_one, interface, link_options, devname=interface.name)
|
||||
interface.swapparams("_params_up")
|
||||
else:
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
if not link_options.unidirectional:
|
||||
if upstream:
|
||||
link_config(net_two, interface, link_options)
|
||||
else:
|
||||
interface.swapparams("_params_up")
|
||||
link_config(net_two, interface, link_options, devname=interface.name)
|
||||
interface.swapparams("_params_up")
|
||||
else:
|
||||
raise ValueError("modify link for unknown nodes")
|
||||
elif not node_one:
|
||||
# node1 = layer 2node, node2 = layer3 node
|
||||
interface = node_two.netif(interface_two_id, net_one)
|
||||
link_config(net_one, interface, link_options)
|
||||
elif not node_two:
|
||||
# node2 = layer 2node, node1 = layer3 node
|
||||
interface = node_one.netif(interface_one_id, net_one)
|
||||
link_config(net_one, interface, link_options)
|
||||
else:
|
||||
common_networks = node_one.commonnets(node_two)
|
||||
if not common_networks:
|
||||
raise ValueError("no common network found")
|
||||
|
||||
for net_one, interface_one, interface_two in common_networks:
|
||||
if interface_one_id is not None and interface_one_id != node_one.getifindex(interface_one):
|
||||
continue
|
||||
|
||||
link_config(net_one, interface_one, link_options, interface_two=interface_two)
|
||||
if not link_options.unidirectional:
|
||||
link_config(net_one, interface_two, link_options, interface_two=interface_one)
|
||||
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def add_node(self, _type=NodeTypes.DEFAULT, _id=None, node_options=NodeOptions()):
|
||||
"""
|
||||
Add a node to the session, based on the provided node data.
|
||||
|
||||
:param core.enumerations.NodeTypes _type: type of node to create
|
||||
:param int _id: id for node, defaults to None for generated id
|
||||
:param core.emulator.emudata.NodeOptions node_options: data to create node with
|
||||
:return: created node
|
||||
"""
|
||||
|
||||
# retrieve node class for given node type
|
||||
try:
|
||||
node_class = nodeutils.get_node_class(_type)
|
||||
except KeyError:
|
||||
logging.error("invalid node type to create: %s", _type)
|
||||
return None
|
||||
|
||||
# set node start based on current session state, override and check when rj45
|
||||
start = self.state > EventTypes.DEFINITION_STATE.value
|
||||
enable_rj45 = self.options.get_config("enablerj45") == "1"
|
||||
if _type == NodeTypes.RJ45 and not enable_rj45:
|
||||
start = False
|
||||
|
||||
# determine node id
|
||||
if not _id:
|
||||
while True:
|
||||
_id = self.node_id_gen.next()
|
||||
if _id not in self.objects:
|
||||
break
|
||||
|
||||
# generate name if not provided
|
||||
name = node_options.name
|
||||
if not name:
|
||||
name = "%s%s" % (node_class.__name__, _id)
|
||||
|
||||
# create node
|
||||
logging.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start)
|
||||
node = self.add_object(cls=node_class, objid=_id, name=name, start=start)
|
||||
|
||||
# set node attributes
|
||||
node.icon = node_options.icon
|
||||
node.canvas = node_options.canvas
|
||||
node.opaque = node_options.opaque
|
||||
|
||||
# set node position and broadcast it
|
||||
self.set_node_position(node, node_options)
|
||||
|
||||
# add services to default and physical nodes only
|
||||
if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]:
|
||||
node.type = node_options.model
|
||||
logging.debug("set node type: %s", node.type)
|
||||
self.services.add_services(node, node.type, node_options.services)
|
||||
|
||||
# boot nodes if created after runtime, LcxNodes, Physical, and RJ45 are all PyCoreNodes
|
||||
is_boot_node = isinstance(node, PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45)
|
||||
if self.state == EventTypes.RUNTIME_STATE.value and is_boot_node:
|
||||
self.write_objects()
|
||||
self.add_remove_control_interface(node=node, remove=False)
|
||||
self.services.boot_services(node)
|
||||
|
||||
return node
|
||||
|
||||
def update_node(self, node_id, node_options):
|
||||
"""
|
||||
Update node information.
|
||||
|
||||
:param int node_id: id of node to update
|
||||
:param core.emulator.emudata.NodeOptions node_options: data to update node with
|
||||
:return: True if node updated, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
result = False
|
||||
try:
|
||||
# get node to update
|
||||
node = self.get_object(node_id)
|
||||
|
||||
# set node position and broadcast it
|
||||
self.set_node_position(node, node_options)
|
||||
|
||||
# update attributes
|
||||
node.canvas = node_options.canvas
|
||||
node.icon = node_options.icon
|
||||
|
||||
# set node as updated successfully
|
||||
result = True
|
||||
except KeyError:
|
||||
logging.error("failure to update node that does not exist: %s", node_id)
|
||||
|
||||
return result
|
||||
|
||||
def delete_node(self, node_id):
|
||||
"""
|
||||
Delete a node from the session and check if session should shutdown, if no nodes are left.
|
||||
|
||||
:param int node_id: id of node to delete
|
||||
:return: True if node deleted, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
# delete node and check for session shutdown if a node was removed
|
||||
result = self.custom_delete_object(node_id)
|
||||
if result:
|
||||
self.check_shutdown()
|
||||
return result
|
||||
|
||||
def set_node_position(self, node, node_options):
|
||||
"""
|
||||
Set position for a node, use lat/lon/alt if needed.
|
||||
|
||||
:param node: node to set position for
|
||||
:param core.emulator.emudata.NodeOptions node_options: data for node
|
||||
:return: nothing
|
||||
"""
|
||||
# extract location values
|
||||
x = node_options.x
|
||||
y = node_options.y
|
||||
lat = node_options.lat
|
||||
lon = node_options.lon
|
||||
alt = node_options.alt
|
||||
|
||||
# check if we need to generate position from lat/lon/alt
|
||||
has_empty_position = all(i is None for i in [x, y])
|
||||
has_lat_lon_alt = all(i is not None for i in [lat, lon, alt])
|
||||
using_lat_lon_alt = has_empty_position and has_lat_lon_alt
|
||||
if using_lat_lon_alt:
|
||||
x, y, _ = self.location.getxyz(lat, lon, alt)
|
||||
|
||||
# set position and broadcast
|
||||
if None not in [x, y]:
|
||||
node.setposition(x, y, None)
|
||||
|
||||
# broadcast updated location when using lat/lon/alt
|
||||
if using_lat_lon_alt:
|
||||
self.broadcast_node_location(node)
|
||||
|
||||
def broadcast_node_location(self, node):
|
||||
"""
|
||||
Broadcast node location to all listeners.
|
||||
|
||||
:param core.netns.nodes.PyCoreObj node: node to broadcast location for
|
||||
:return: nothing
|
||||
"""
|
||||
node_data = NodeData(
|
||||
message_type=0,
|
||||
id=node.objid,
|
||||
x_position=node.position.x,
|
||||
y_position=node.position.y
|
||||
)
|
||||
self.broadcast_node(node_data)
|
||||
|
||||
def start_mobility(self, node_ids=None):
|
||||
"""
|
||||
Start mobility for the provided node ids.
|
||||
|
||||
:param list[int] node_ids: nodes to start mobility for
|
||||
:return: nothing
|
||||
"""
|
||||
self.mobility.startup(node_ids)
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown session.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("session(%s) shutting down", self.id)
|
||||
self.set_state(EventTypes.DATACOLLECT_STATE, send_event=True)
|
||||
self.set_state(EventTypes.SHUTDOWN_STATE, send_event=True)
|
||||
super(EmuSession, self).shutdown()
|
||||
|
||||
def custom_delete_object(self, object_id):
|
||||
"""
|
||||
Remove an emulation object.
|
||||
|
||||
:param int object_id: object id to remove
|
||||
:return: True if object deleted, False otherwise
|
||||
"""
|
||||
result = False
|
||||
with self._objects_lock:
|
||||
if object_id in self.objects:
|
||||
obj = self.objects.pop(object_id)
|
||||
obj.shutdown()
|
||||
result = True
|
||||
return result
|
||||
|
||||
def is_active(self):
|
||||
"""
|
||||
Determine if this session is considered to be active. (Runtime or Data collect states)
|
||||
|
||||
:return: True if active, False otherwise
|
||||
"""
|
||||
result = self.state in {EventTypes.RUNTIME_STATE.value, EventTypes.DATACOLLECT_STATE.value}
|
||||
logging.info("session(%s) checking if active: %s", self.id, result)
|
||||
return result
|
||||
|
||||
def open_xml(self, file_name, start=False):
|
||||
"""
|
||||
Import a session from the EmulationScript XML format.
|
||||
|
||||
:param str file_name: xml file to load session from
|
||||
:param bool start: instantiate session if true, false otherwise
|
||||
:return: nothing
|
||||
"""
|
||||
# clear out existing session
|
||||
self.clear()
|
||||
|
||||
# write out xml file
|
||||
CoreXmlReader(self).read(file_name)
|
||||
|
||||
# start session if needed
|
||||
if start:
|
||||
self.name = os.path.basename(file_name)
|
||||
self.file_name = file_name
|
||||
self.instantiate()
|
||||
|
||||
def save_xml(self, file_name):
|
||||
"""
|
||||
Export a session to the EmulationScript XML format.
|
||||
|
||||
:param str file_name: file name to write session xml to
|
||||
:return: nothing
|
||||
"""
|
||||
CoreXmlWriter(self).write(file_name)
|
||||
|
||||
def add_hook(self, state, file_name, source_name, data):
|
||||
"""
|
||||
Store a hook from a received file message.
|
||||
|
||||
:param int state: when to run hook
|
||||
:param str file_name: file name for hook
|
||||
:param str source_name: source name
|
||||
:param data: hook data
|
||||
:return: nothing
|
||||
"""
|
||||
# hack to conform with old logic until updated
|
||||
state = ":%s" % state
|
||||
self.set_hook(state, file_name, source_name, data)
|
||||
|
||||
def add_node_file(self, node_id, source_name, file_name, data):
|
||||
"""
|
||||
Add a file to a node.
|
||||
|
||||
:param int node_id: node to add file to
|
||||
:param str source_name: source file name
|
||||
:param str file_name: file name to add
|
||||
:param str data: file data
|
||||
:return: nothing
|
||||
"""
|
||||
|
||||
node = self.get_object(node_id)
|
||||
|
||||
if source_name is not None:
|
||||
node.addfile(source_name, file_name)
|
||||
elif data is not None:
|
||||
node.nodefile(file_name, data)
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
Clear all CORE session data. (objects, hooks, broker)
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.delete_objects()
|
||||
self.del_hooks()
|
||||
self.broker.reset()
|
||||
self.emane.reset()
|
||||
|
||||
def start_events(self):
|
||||
"""
|
||||
Start event loop.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.event_loop.run()
|
||||
|
||||
def mobility_event(self, event_data):
|
||||
"""
|
||||
Handle a mobility event.
|
||||
|
||||
:param core.data.EventData event_data: event data to handle
|
||||
:return: nothing
|
||||
"""
|
||||
self.mobility.handleevent(event_data)
|
||||
|
||||
def create_wireless_node(self, _id=None, node_options=NodeOptions()):
|
||||
"""
|
||||
Create a wireless node for use within an wireless/EMANE networks.
|
||||
|
||||
:param int _id: int for node, defaults to None and will be generated
|
||||
:param core.emulator.emudata.NodeOptions node_options: options for emane node, model will always be "mdr"
|
||||
:return: new emane node
|
||||
:rtype: core.netns.nodes.CoreNode
|
||||
"""
|
||||
node_options.model = "mdr"
|
||||
return self.add_node(_type=NodeTypes.DEFAULT, _id=_id, node_options=node_options)
|
||||
|
||||
def create_emane_network(self, model, geo_reference, geo_scale=None, node_options=NodeOptions(), config=None):
|
||||
"""
|
||||
Convenience method for creating an emane network.
|
||||
|
||||
:param model: emane model to use for emane network
|
||||
:param geo_reference: geo reference point to use for emane node locations
|
||||
:param geo_scale: geo scale to use for emane node locations, defaults to 1.0
|
||||
:param core.emulator.emudata.NodeOptions node_options: options for emane node being created
|
||||
:param dict config: emane model configuration
|
||||
:return: create emane network
|
||||
"""
|
||||
# required to be set for emane to function properly
|
||||
self.location.setrefgeo(*geo_reference)
|
||||
if geo_scale:
|
||||
self.location.refscale = geo_scale
|
||||
|
||||
# create and return network
|
||||
emane_network = self.add_node(_type=NodeTypes.EMANE, node_options=node_options)
|
||||
self.emane.set_model(emane_network, model, config)
|
||||
return emane_network
|
||||
|
||||
|
||||
class CoreEmu(object):
|
||||
"""
|
||||
Provides logic for creating and configuring CORE sessions and the nodes within them.
|
||||
"""
|
||||
|
||||
def __init__(self, config: dict[str, str] = None) -> None:
|
||||
def __init__(self, config=None):
|
||||
"""
|
||||
Create a CoreEmu object.
|
||||
|
||||
:param config: configuration options
|
||||
:param dict config: configuration options
|
||||
"""
|
||||
# set umask 0
|
||||
os.umask(0)
|
||||
|
||||
# configuration
|
||||
config = config if config else {}
|
||||
self.config: dict[str, str] = config
|
||||
if not config:
|
||||
config = {}
|
||||
self.config = config
|
||||
|
||||
# session management
|
||||
self.sessions: dict[int, Session] = {}
|
||||
self.session_id_gen = IdGen(_id=59999)
|
||||
self.sessions = {}
|
||||
|
||||
# set default nodes
|
||||
node_map = nodemaps.NODES
|
||||
nodeutils.set_node_map(node_map)
|
||||
|
||||
# load services
|
||||
self.service_errors: list[str] = []
|
||||
self.service_manager: ConfigServiceManager = ConfigServiceManager()
|
||||
self._load_services()
|
||||
self.service_errors = []
|
||||
self.load_services()
|
||||
|
||||
# check and load emane
|
||||
self.has_emane: bool = False
|
||||
self._load_emane()
|
||||
# catch exit event
|
||||
atexit.register(self.shutdown)
|
||||
|
||||
# check executables exist on path
|
||||
self._validate_env()
|
||||
|
||||
def _validate_env(self) -> None:
|
||||
"""
|
||||
Validates executables CORE depends on exist on path.
|
||||
|
||||
:return: nothing
|
||||
:raises core.errors.CoreError: when an executable does not exist on path
|
||||
"""
|
||||
use_ovs = self.config.get("ovs") == "1"
|
||||
for requirement in get_requirements(use_ovs):
|
||||
utils.which(requirement, required=True)
|
||||
|
||||
def _load_services(self) -> None:
|
||||
"""
|
||||
Loads default and custom services for use within CORE.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
def load_services(self):
|
||||
# load default services
|
||||
self.service_errors = ServiceManager.load_locals()
|
||||
self.service_errors = core.services.load()
|
||||
|
||||
# load custom services
|
||||
service_paths = self.config.get("custom_services_dir")
|
||||
logger.debug("custom service paths: %s", service_paths)
|
||||
if service_paths is not None:
|
||||
for service_path in service_paths.split(","):
|
||||
service_path = Path(service_path.strip())
|
||||
logging.debug("custom service paths: %s", service_paths)
|
||||
if service_paths:
|
||||
for service_path in service_paths.split(','):
|
||||
service_path = service_path.strip()
|
||||
custom_service_errors = ServiceManager.add_services(service_path)
|
||||
self.service_errors.extend(custom_service_errors)
|
||||
# load default config services
|
||||
self.service_manager.load_locals()
|
||||
# load custom config services
|
||||
custom_dir = self.config.get("custom_config_services_dir")
|
||||
if custom_dir is not None:
|
||||
custom_dir = Path(custom_dir)
|
||||
self.service_manager.load(custom_dir)
|
||||
|
||||
def _load_emane(self) -> None:
|
||||
def update_nodes(self, node_map):
|
||||
"""
|
||||
Check if emane is installed and load models.
|
||||
Updates node map used by core.
|
||||
|
||||
:param dict node_map: node map to update existing node map with
|
||||
:return: nothing
|
||||
"""
|
||||
# check for emane
|
||||
path = utils.which("emane", required=False)
|
||||
self.has_emane = path is not None
|
||||
if not self.has_emane:
|
||||
logger.info("emane is not installed, emane functionality disabled")
|
||||
return
|
||||
# get version
|
||||
emane_version = utils.cmd("emane --version")
|
||||
logger.info("using emane: %s", emane_version)
|
||||
emane_prefix = self.config.get("emane_prefix", DEFAULT_EMANE_PREFIX)
|
||||
emane_prefix = Path(emane_prefix)
|
||||
EmaneModelManager.load_locals(emane_prefix)
|
||||
# load custom models
|
||||
custom_path = self.config.get("emane_models_dir")
|
||||
if custom_path is not None:
|
||||
logger.info("loading custom emane models: %s", custom_path)
|
||||
custom_path = Path(custom_path)
|
||||
EmaneModelManager.load(custom_path, emane_prefix)
|
||||
nodeutils.update_node_map(node_map)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown all CORE session.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("shutting down all sessions")
|
||||
while self.sessions:
|
||||
_, session = self.sessions.popitem()
|
||||
logging.info("shutting down all sessions")
|
||||
sessions = self.sessions.copy()
|
||||
self.sessions.clear()
|
||||
for session in sessions.itervalues():
|
||||
session.shutdown()
|
||||
|
||||
def create_session(self, _id: int = None, _cls: type[Session] = Session) -> Session:
|
||||
def create_session(self, _id=None, master=True, _cls=EmuSession):
|
||||
"""
|
||||
Create a new CORE session.
|
||||
Create a new CORE session, set to master if running standalone.
|
||||
|
||||
:param _id: session id for new session
|
||||
:param _cls: Session class to use
|
||||
:param int _id: session id for new session
|
||||
:param bool master: sets session to master
|
||||
:param class _cls: EmuSession class to use
|
||||
:return: created session
|
||||
:rtype: EmuSession
|
||||
"""
|
||||
if not _id:
|
||||
_id = 1
|
||||
while _id in self.sessions:
|
||||
_id += 1
|
||||
session = _cls(_id, config=self.config)
|
||||
session.service_manager = self.service_manager
|
||||
logger.info("created session: %s", _id)
|
||||
self.sessions[_id] = session
|
||||
|
||||
session_id = _id
|
||||
if not session_id:
|
||||
while True:
|
||||
session_id = self.session_id_gen.next()
|
||||
if session_id not in self.sessions:
|
||||
break
|
||||
|
||||
session = _cls(session_id, config=self.config)
|
||||
logging.info("created session: %s", session_id)
|
||||
if master:
|
||||
session.master = True
|
||||
|
||||
self.sessions[session_id] = session
|
||||
return session
|
||||
|
||||
def delete_session(self, _id: int) -> bool:
|
||||
def delete_session(self, _id):
|
||||
"""
|
||||
Shutdown and delete a CORE session.
|
||||
|
||||
:param _id: session id to delete
|
||||
:param int _id: session id to delete
|
||||
:return: True if deleted, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
logger.info("deleting session: %s", _id)
|
||||
logging.info("deleting session: %s", _id)
|
||||
session = self.sessions.pop(_id, None)
|
||||
result = False
|
||||
if session:
|
||||
logger.info("shutting session down: %s", _id)
|
||||
session.data_collect()
|
||||
logging.info("shutting session down: %s", _id)
|
||||
session.shutdown()
|
||||
result = True
|
||||
else:
|
||||
logger.error("session to delete did not exist: %s", _id)
|
||||
logging.error("session to delete did not exist: %s", _id)
|
||||
|
||||
return result
|
||||
|
|
|
|||
|
|
@ -1,357 +0,0 @@
|
|||
"""
|
||||
CORE data objects.
|
||||
"""
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any, Optional
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import utils
|
||||
from core.emulator.enumerations import (
|
||||
EventTypes,
|
||||
ExceptionLevels,
|
||||
LinkTypes,
|
||||
MessageFlags,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.nodes.base import CoreNode, NodeBase
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigData:
|
||||
message_type: int = None
|
||||
node: int = None
|
||||
object: str = None
|
||||
type: int = None
|
||||
data_types: tuple[int] = None
|
||||
data_values: str = None
|
||||
captions: str = None
|
||||
bitmap: str = None
|
||||
possible_values: str = None
|
||||
groups: str = None
|
||||
session: int = None
|
||||
iface_id: int = None
|
||||
network_id: int = None
|
||||
opaque: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventData:
|
||||
node: int = None
|
||||
event_type: EventTypes = None
|
||||
name: str = None
|
||||
data: str = None
|
||||
time: str = None
|
||||
session: int = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExceptionData:
|
||||
node: int = None
|
||||
session: int = None
|
||||
level: ExceptionLevels = None
|
||||
source: str = None
|
||||
date: str = None
|
||||
text: str = None
|
||||
opaque: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileData:
|
||||
message_type: MessageFlags = None
|
||||
node: int = None
|
||||
name: str = None
|
||||
mode: str = None
|
||||
number: int = None
|
||||
type: str = None
|
||||
source: str = None
|
||||
session: int = None
|
||||
data: str = None
|
||||
compressed_data: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeOptions:
|
||||
"""
|
||||
Options for creating and updating nodes within core.
|
||||
"""
|
||||
|
||||
name: str = None
|
||||
model: Optional[str] = "PC"
|
||||
canvas: int = None
|
||||
icon: str = None
|
||||
services: list[str] = field(default_factory=list)
|
||||
config_services: list[str] = field(default_factory=list)
|
||||
x: float = None
|
||||
y: float = None
|
||||
lat: float = None
|
||||
lon: float = None
|
||||
alt: float = None
|
||||
server: str = None
|
||||
image: str = None
|
||||
emane: str = None
|
||||
legacy: bool = False
|
||||
# src, dst
|
||||
binds: list[tuple[str, str]] = field(default_factory=list)
|
||||
# src, dst, unique, delete
|
||||
volumes: list[tuple[str, str, bool, bool]] = field(default_factory=list)
|
||||
|
||||
def set_position(self, x: float, y: float) -> None:
|
||||
"""
|
||||
Convenience method for setting position.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:return: nothing
|
||||
"""
|
||||
self.x = x
|
||||
self.y = y
|
||||
|
||||
def set_location(self, lat: float, lon: float, alt: float) -> None:
|
||||
"""
|
||||
Convenience method for setting location.
|
||||
|
||||
:param lat: latitude
|
||||
:param lon: longitude
|
||||
:param alt: altitude
|
||||
:return: nothing
|
||||
"""
|
||||
self.lat = lat
|
||||
self.lon = lon
|
||||
self.alt = alt
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeData:
|
||||
"""
|
||||
Node to broadcast.
|
||||
"""
|
||||
|
||||
node: "NodeBase"
|
||||
message_type: MessageFlags = None
|
||||
source: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class InterfaceData:
|
||||
"""
|
||||
Convenience class for storing interface data.
|
||||
"""
|
||||
|
||||
id: int = None
|
||||
name: str = None
|
||||
mac: str = None
|
||||
ip4: str = None
|
||||
ip4_mask: int = None
|
||||
ip6: str = None
|
||||
ip6_mask: int = None
|
||||
mtu: int = None
|
||||
|
||||
def get_ips(self) -> list[str]:
|
||||
"""
|
||||
Returns a list of ip4 and ip6 addresses when present.
|
||||
|
||||
:return: list of ip addresses
|
||||
"""
|
||||
ips = []
|
||||
if self.ip4 and self.ip4_mask:
|
||||
ips.append(f"{self.ip4}/{self.ip4_mask}")
|
||||
if self.ip6 and self.ip6_mask:
|
||||
ips.append(f"{self.ip6}/{self.ip6_mask}")
|
||||
return ips
|
||||
|
||||
|
||||
@dataclass
|
||||
class LinkOptions:
|
||||
"""
|
||||
Options for creating and updating links within core.
|
||||
"""
|
||||
|
||||
delay: int = None
|
||||
bandwidth: int = None
|
||||
loss: float = None
|
||||
dup: int = None
|
||||
jitter: int = None
|
||||
mer: int = None
|
||||
burst: int = None
|
||||
mburst: int = None
|
||||
unidirectional: int = None
|
||||
key: int = None
|
||||
buffer: int = None
|
||||
|
||||
def update(self, options: "LinkOptions") -> bool:
|
||||
"""
|
||||
Updates current options with values from other options.
|
||||
|
||||
:param options: options to update with
|
||||
:return: True if any value has changed, False otherwise
|
||||
"""
|
||||
changed = False
|
||||
if options.delay is not None and 0 <= options.delay != self.delay:
|
||||
self.delay = options.delay
|
||||
changed = True
|
||||
if options.bandwidth is not None and 0 <= options.bandwidth != self.bandwidth:
|
||||
self.bandwidth = options.bandwidth
|
||||
changed = True
|
||||
if options.loss is not None and 0 <= options.loss != self.loss:
|
||||
self.loss = options.loss
|
||||
changed = True
|
||||
if options.dup is not None and 0 <= options.dup != self.dup:
|
||||
self.dup = options.dup
|
||||
changed = True
|
||||
if options.jitter is not None and 0 <= options.jitter != self.jitter:
|
||||
self.jitter = options.jitter
|
||||
changed = True
|
||||
if options.buffer is not None and 0 <= options.buffer != self.buffer:
|
||||
self.buffer = options.buffer
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def is_clear(self) -> bool:
|
||||
"""
|
||||
Checks if the current option values represent a clear state.
|
||||
|
||||
:return: True if the current values should clear, False otherwise
|
||||
"""
|
||||
clear = self.delay is None or self.delay <= 0
|
||||
clear &= self.jitter is None or self.jitter <= 0
|
||||
clear &= self.loss is None or self.loss <= 0
|
||||
clear &= self.dup is None or self.dup <= 0
|
||||
clear &= self.bandwidth is None or self.bandwidth <= 0
|
||||
clear &= self.buffer is None or self.buffer <= 0
|
||||
return clear
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""
|
||||
Custom logic to check if this link options is equivalent to another.
|
||||
|
||||
:param other: other object to check
|
||||
:return: True if they are both link options with the same values,
|
||||
False otherwise
|
||||
"""
|
||||
if not isinstance(other, LinkOptions):
|
||||
return False
|
||||
return (
|
||||
self.delay == other.delay
|
||||
and self.jitter == other.jitter
|
||||
and self.loss == other.loss
|
||||
and self.dup == other.dup
|
||||
and self.bandwidth == other.bandwidth
|
||||
and self.buffer == other.buffer
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class LinkData:
|
||||
"""
|
||||
Represents all data associated with a link.
|
||||
"""
|
||||
|
||||
message_type: MessageFlags = None
|
||||
type: LinkTypes = LinkTypes.WIRED
|
||||
label: str = None
|
||||
node1_id: int = None
|
||||
node2_id: int = None
|
||||
network_id: int = None
|
||||
iface1: InterfaceData = None
|
||||
iface2: InterfaceData = None
|
||||
options: LinkOptions = LinkOptions()
|
||||
color: str = None
|
||||
source: str = None
|
||||
|
||||
|
||||
class IpPrefixes:
|
||||
"""
|
||||
Convenience class to help generate IP4 and IP6 addresses for nodes within CORE.
|
||||
"""
|
||||
|
||||
def __init__(self, ip4_prefix: str = None, ip6_prefix: str = None) -> None:
|
||||
"""
|
||||
Creates an IpPrefixes object.
|
||||
|
||||
:param ip4_prefix: ip4 prefix to use for generation
|
||||
:param ip6_prefix: ip6 prefix to use for generation
|
||||
:raises ValueError: when both ip4 and ip6 prefixes have not been provided
|
||||
"""
|
||||
if not ip4_prefix and not ip6_prefix:
|
||||
raise ValueError("ip4 or ip6 must be provided")
|
||||
|
||||
self.ip4 = None
|
||||
if ip4_prefix:
|
||||
self.ip4 = netaddr.IPNetwork(ip4_prefix)
|
||||
self.ip6 = None
|
||||
if ip6_prefix:
|
||||
self.ip6 = netaddr.IPNetwork(ip6_prefix)
|
||||
|
||||
def ip4_address(self, node_id: int) -> str:
|
||||
"""
|
||||
Convenience method to return the IP4 address for a node.
|
||||
|
||||
:param node_id: node id to get IP4 address for
|
||||
:return: IP4 address or None
|
||||
"""
|
||||
if not self.ip4:
|
||||
raise ValueError("ip4 prefixes have not been set")
|
||||
return str(self.ip4[node_id])
|
||||
|
||||
def ip6_address(self, node_id: int) -> str:
|
||||
"""
|
||||
Convenience method to return the IP6 address for a node.
|
||||
|
||||
:param node_id: node id to get IP6 address for
|
||||
:return: IP4 address or None
|
||||
"""
|
||||
if not self.ip6:
|
||||
raise ValueError("ip6 prefixes have not been set")
|
||||
return str(self.ip6[node_id])
|
||||
|
||||
def gen_iface(self, node_id: int, name: str = None, mac: str = None):
|
||||
"""
|
||||
Creates interface data for linking nodes, using the nodes unique id for
|
||||
generation, along with a random mac address, unless provided.
|
||||
|
||||
:param node_id: node id to create an interface for
|
||||
:param name: name to set for interface, default is eth{id}
|
||||
:param mac: mac address to use for this interface, default is random
|
||||
generation
|
||||
:return: new interface data for the provided node
|
||||
"""
|
||||
# generate ip4 data
|
||||
ip4 = None
|
||||
ip4_mask = None
|
||||
if self.ip4:
|
||||
ip4 = self.ip4_address(node_id)
|
||||
ip4_mask = self.ip4.prefixlen
|
||||
|
||||
# generate ip6 data
|
||||
ip6 = None
|
||||
ip6_mask = None
|
||||
if self.ip6:
|
||||
ip6 = self.ip6_address(node_id)
|
||||
ip6_mask = self.ip6.prefixlen
|
||||
|
||||
# random mac
|
||||
if not mac:
|
||||
mac = utils.random_mac()
|
||||
|
||||
return InterfaceData(
|
||||
name=name, ip4=ip4, ip4_mask=ip4_mask, ip6=ip6, ip6_mask=ip6_mask, mac=mac
|
||||
)
|
||||
|
||||
def create_iface(
|
||||
self, node: "CoreNode", name: str = None, mac: str = None
|
||||
) -> InterfaceData:
|
||||
"""
|
||||
Creates interface data for linking nodes, using the nodes unique id for
|
||||
generation, along with a random mac address, unless provided.
|
||||
|
||||
:param node: node to create interface for
|
||||
:param name: name to set for interface, default is eth{id}
|
||||
:param mac: mac address to use for this interface, default is random
|
||||
generation
|
||||
:return: new interface data for the provided node
|
||||
"""
|
||||
iface_data = self.gen_iface(node.id, name, mac)
|
||||
iface_data.id = node.next_iface_id()
|
||||
return iface_data
|
||||
|
|
@ -1,266 +0,0 @@
|
|||
"""
|
||||
Defines distributed server functionality.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from collections import OrderedDict
|
||||
from pathlib import Path
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import TYPE_CHECKING, Callable
|
||||
|
||||
import netaddr
|
||||
from fabric import Connection
|
||||
from invoke import UnexpectedExit
|
||||
|
||||
from core import utils
|
||||
from core.emulator.links import CoreLink
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.executables import get_requirements
|
||||
from core.nodes.interface import GreTap
|
||||
from core.nodes.network import CoreNetwork, CtrlNet
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emulator.session import Session
|
||||
|
||||
LOCK = threading.Lock()
|
||||
CMD_HIDE = True
|
||||
|
||||
|
||||
class DistributedServer:
|
||||
"""
|
||||
Provides distributed server interactions.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, host: str) -> None:
|
||||
"""
|
||||
Create a DistributedServer instance.
|
||||
|
||||
:param name: convenience name to associate with host
|
||||
:param host: host to connect to
|
||||
"""
|
||||
self.name: str = name
|
||||
self.host: str = host
|
||||
self.conn: Connection = Connection(host, user="root")
|
||||
self.lock: threading.Lock = threading.Lock()
|
||||
|
||||
def remote_cmd(
|
||||
self, cmd: str, env: dict[str, str] = None, cwd: str = None, wait: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
Run command remotely using server connection.
|
||||
|
||||
:param cmd: command to run
|
||||
:param env: environment for remote command, default is None
|
||||
:param cwd: directory to run command in, defaults to None, which is the
|
||||
user's home directory
|
||||
:param wait: True to wait for status, False to background process
|
||||
:return: stdout when success
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
|
||||
replace_env = env is not None
|
||||
if not wait:
|
||||
cmd += " &"
|
||||
logger.debug(
|
||||
"remote cmd server(%s) cwd(%s) wait(%s): %s", self.host, cwd, wait, cmd
|
||||
)
|
||||
try:
|
||||
if cwd is None:
|
||||
result = self.conn.run(
|
||||
cmd, hide=CMD_HIDE, env=env, replace_env=replace_env
|
||||
)
|
||||
else:
|
||||
with self.conn.cd(cwd):
|
||||
result = self.conn.run(
|
||||
cmd, hide=CMD_HIDE, env=env, replace_env=replace_env
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except UnexpectedExit as e:
|
||||
stdout, stderr = e.streams_for_display()
|
||||
raise CoreCommandError(e.result.exited, cmd, stdout, stderr)
|
||||
|
||||
def remote_put(self, src_path: Path, dst_path: Path) -> None:
|
||||
"""
|
||||
Push file to remote server.
|
||||
|
||||
:param src_path: source file to push
|
||||
:param dst_path: destination file location
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
self.conn.put(str(src_path), str(dst_path))
|
||||
|
||||
def remote_put_temp(self, dst_path: Path, data: str) -> None:
|
||||
"""
|
||||
Remote push file contents to a remote server, using a temp file as an
|
||||
intermediate step.
|
||||
|
||||
:param dst_path: file destination for data
|
||||
:param data: data to store in remote file
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
temp.write(data.encode())
|
||||
temp.close()
|
||||
self.conn.put(temp.name, str(dst_path))
|
||||
os.unlink(temp.name)
|
||||
|
||||
|
||||
class DistributedController:
|
||||
"""
|
||||
Provides logic for dealing with remote tunnels and distributed servers.
|
||||
"""
|
||||
|
||||
def __init__(self, session: "Session") -> None:
|
||||
"""
|
||||
Create
|
||||
|
||||
:param session: session
|
||||
"""
|
||||
self.session: "Session" = session
|
||||
self.servers: dict[str, DistributedServer] = OrderedDict()
|
||||
self.tunnels: dict[int, tuple[GreTap, GreTap]] = {}
|
||||
self.address: str = self.session.options.get("distributed_address")
|
||||
|
||||
def add_server(self, name: str, host: str) -> None:
|
||||
"""
|
||||
Add distributed server configuration.
|
||||
|
||||
:param name: distributed server name
|
||||
:param host: distributed server host address
|
||||
:return: nothing
|
||||
:raises CoreError: when there is an error validating server
|
||||
"""
|
||||
server = DistributedServer(name, host)
|
||||
for requirement in get_requirements(self.session.use_ovs()):
|
||||
try:
|
||||
server.remote_cmd(f"which {requirement}")
|
||||
except CoreCommandError:
|
||||
raise CoreError(
|
||||
f"server({server.name}) failed validation for "
|
||||
f"command({requirement})"
|
||||
)
|
||||
self.servers[name] = server
|
||||
cmd = f"mkdir -p {self.session.directory}"
|
||||
server.remote_cmd(cmd)
|
||||
|
||||
def execute(self, func: Callable[[DistributedServer], None]) -> None:
|
||||
"""
|
||||
Convenience for executing logic against all distributed servers.
|
||||
|
||||
:param func: function to run, that takes a DistributedServer as a parameter
|
||||
:return: nothing
|
||||
"""
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
func(server)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shutdown logic for dealing with distributed tunnels and server session
|
||||
directories.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# shutdown all tunnels
|
||||
for key in self.tunnels:
|
||||
tunnels = self.tunnels[key]
|
||||
for tunnel in tunnels:
|
||||
tunnel.shutdown()
|
||||
# remove all remote session directories
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
cmd = f"rm -rf {self.session.directory}"
|
||||
server.remote_cmd(cmd)
|
||||
# clear tunnels
|
||||
self.tunnels.clear()
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
Start distributed network tunnels for control networks.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
mtu = self.session.options.get_int("mtu")
|
||||
for node in self.session.nodes.values():
|
||||
if not isinstance(node, CtrlNet) or node.serverintf is not None:
|
||||
continue
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
self.create_gre_tunnel(node, server, mtu, True)
|
||||
|
||||
def create_gre_tunnels(self, core_link: CoreLink) -> None:
|
||||
"""
|
||||
Creates gre tunnels for a core link with a ptp network connection.
|
||||
|
||||
:param core_link: core link to create gre tunnel for
|
||||
:return: nothing
|
||||
"""
|
||||
if not self.servers:
|
||||
return
|
||||
if not core_link.ptp:
|
||||
raise CoreError(
|
||||
"attempted to create gre tunnel for core link without a ptp network"
|
||||
)
|
||||
mtu = self.session.options.get_int("mtu")
|
||||
for server in self.servers.values():
|
||||
self.create_gre_tunnel(core_link.ptp, server, mtu, True)
|
||||
|
||||
def create_gre_tunnel(
|
||||
self, node: CoreNetwork, server: DistributedServer, mtu: int, start: bool
|
||||
) -> tuple[GreTap, GreTap]:
|
||||
"""
|
||||
Create gre tunnel using a pair of gre taps between the local and remote server.
|
||||
|
||||
:param node: node to create gre tunnel for
|
||||
:param server: server to create tunnel for
|
||||
:param mtu: mtu for gre taps
|
||||
:param start: True to start gre taps, False otherwise
|
||||
:return: local and remote gre taps created for tunnel
|
||||
"""
|
||||
host = server.host
|
||||
key = self.tunnel_key(node.id, netaddr.IPAddress(host).value)
|
||||
tunnel = self.tunnels.get(key)
|
||||
if tunnel is not None:
|
||||
return tunnel
|
||||
# local to server
|
||||
logger.info("local tunnel node(%s) to remote(%s) key(%s)", node.name, host, key)
|
||||
local_tap = GreTap(self.session, host, key=key, mtu=mtu)
|
||||
if start:
|
||||
local_tap.startup()
|
||||
local_tap.net_client.set_iface_master(node.brname, local_tap.localname)
|
||||
# server to local
|
||||
logger.info(
|
||||
"remote tunnel node(%s) to local(%s) key(%s)", node.name, self.address, key
|
||||
)
|
||||
remote_tap = GreTap(self.session, self.address, key=key, server=server, mtu=mtu)
|
||||
if start:
|
||||
remote_tap.startup()
|
||||
remote_tap.net_client.set_iface_master(node.brname, remote_tap.localname)
|
||||
# save tunnels for shutdown
|
||||
tunnel = (local_tap, remote_tap)
|
||||
self.tunnels[key] = tunnel
|
||||
return tunnel
|
||||
|
||||
def tunnel_key(self, node1_id: int, node2_id: int) -> int:
|
||||
"""
|
||||
Compute a 32-bit key used to uniquely identify a GRE tunnel.
|
||||
The hash(n1num), hash(n2num) values are used, so node numbers may be
|
||||
None or string values (used for e.g. "ctrlnet").
|
||||
|
||||
:param node1_id: node one id
|
||||
:param node2_id: node two id
|
||||
:return: tunnel key for the node pair
|
||||
"""
|
||||
logger.debug("creating tunnel key for: %s, %s", node1_id, node2_id)
|
||||
key = (
|
||||
(self.session.id << 16)
|
||||
^ utils.hashkey(node1_id)
|
||||
^ (utils.hashkey(node2_id) << 8)
|
||||
)
|
||||
return key & 0xFFFFFFFF
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue