diff --git a/.github/workflows/daemon-checks.yml b/.github/workflows/daemon-checks.yml index 52440467..dc169dcf 100644 --- a/.github/workflows/daemon-checks.yml +++ b/.github/workflows/daemon-checks.yml @@ -4,13 +4,13 @@ on: [push] jobs: build: - runs-on: ubuntu-18.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v1 - - name: Set up Python 3.6 + - name: Set up Python 3.9 uses: actions/setup-python@v1 with: - python-version: 3.6 + python-version: 3.9 - name: install poetry run: | python -m pip install --upgrade pip diff --git a/.gitignore b/.gitignore index 2012df9d..beb83c13 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ config.h.in config.log config.status configure +configure~ debian stamp-h1 @@ -58,3 +59,6 @@ daemon/setup.py # python __pycache__ + +# ignore core player files +*.core diff --git a/CHANGELOG.md b/CHANGELOG.md index 7a85ee34..bc3b1e53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,46 @@ +## 2022-11-18 CORE 9.0.0 + +* Breaking Changes + * removed session nodes file + * removed session state file + * emane now runs in one process per nem with unique control ports + * grpc client has been refactored and updated + * removed tcl/legacy gui, imn file support and the tlv api + * link configuration is now different, but consistent, for wired links +* Installation + * added packaging for single file distribution + * python3.9 is now the minimum required version + * updated Dockerfile examples + * updated various python dependencies + * virtual environment is now installed to /opt/core/venv +* Documentation + * updated emane invoke task examples + * revamped install documentation + * added wireless node notes +* core-gui + * updated config services to display rendered templated and allow editing + * fixed node icon issue when updating preferences + * \#89 - throughput widget now works for hubs/switches + * \#691 - fixed custom nodes to properly use config services +* gRPC API + * add linked call to support linking and unlinking interfaces without destroying them + * fixed issue during start session clearing out session options + * added call to get rendered config service files + * removed get_node_links from links from client + * nem id and nem port have been added to GetNode and AddLink calls +* core-daemon + * wired links always create two veth pairs joined by a bridge + * node interfaces are now configured within the container to apply to outgoing traffic + * session.add_node now uses NodeOptions, allowing for node specific options + * fixed issue with xml reading node canvas values + * removed Session.add_node_file + * fixed get requirements logic + * fixed docker/lxd node support terminal commands on remote servers + * improved docker node command execution time using nsenter + * new wireless node type added to support dynamic loss based on distance + * \#513 - add and deleting distributed links during runtime is now supported + * \#703 - fixed issue not starting emane event listening service + ## 2022-03-21 CORE 8.2.0 * core-gui diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index a116becb..00000000 --- a/Dockerfile +++ /dev/null @@ -1,100 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM ubuntu:20.04 -LABEL Description="CORE Docker Image" - -# define variables -ARG DEBIAN_FRONTEND=noninteractive -ARG PREFIX=/usr/local -ARG BRANCH=master -ARG CORE_TARBALL=core.tar.gz -ARG OSPF_TARBALL=ospf.tar.gz - -# install system dependencies -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - automake \ - bash \ - ca-certificates \ - ethtool \ - gawk \ - gcc \ - g++ \ - iproute2 \ - iputils-ping \ - libc-dev \ - libev-dev \ - libreadline-dev \ - libtool \ - libtk-img \ - make \ - nftables \ - python3 \ - python3-pip \ - python3-tk \ - pkg-config \ - systemctl \ - tk \ - wget \ - xauth \ - xterm \ - && apt-get clean -# install python dependencies -RUN python3 -m pip install \ - grpcio==1.27.2 \ - grpcio-tools==1.27.2 \ - poetry==1.1.7 -# retrieve, build, and install core -RUN wget -q -O ${CORE_TARBALL} https://api.github.com/repos/coreemu/core/tarball/${BRANCH} && \ - tar xf ${CORE_TARBALL} && \ - cd coreemu-core* && \ - ./bootstrap.sh && \ - ./configure && \ - make -j $(nproc) && \ - make install && \ - cd daemon && \ - python3 -m poetry build -f wheel && \ - python3 -m pip install dist/* && \ - cp scripts/* ${PREFIX}/bin && \ - mkdir /etc/core && \ - cp -n data/core.conf /etc/core && \ - cp -n data/logging.conf /etc/core && \ - mkdir -p ${PREFIX}/share/core && \ - cp -r examples ${PREFIX}/share/core && \ - echo '\ -[Unit]\n\ -Description=Common Open Research Emulator Service\n\ -After=network.target\n\ -\n\ -[Service]\n\ -Type=simple\n\ -ExecStart=/usr/local/bin/core-daemon\n\ -TasksMax=infinity\n\ -\n\ -[Install]\n\ -WantedBy=multi-user.target\ -' > /lib/systemd/system/core-daemon.service && \ - cd ../.. && \ - rm ${CORE_TARBALL} && \ - rm -rf coreemu-core* -# retrieve, build, and install ospf mdr -RUN wget -q -O ${OSPF_TARBALL} https://github.com/USNavalResearchLaboratory/ospf-mdr/tarball/master && \ - tar xf ${OSPF_TARBALL} && \ - cd USNavalResearchLaboratory-ospf-mdr* && \ - ./bootstrap.sh && \ - ./configure --disable-doc --enable-user=root --enable-group=root \ - --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ - --localstatedir=/var/run/quagga && \ - make -j $(nproc) && \ - make install && \ - cd .. && \ - rm ${OSPF_TARBALL} && \ - rm -rf USNavalResearchLaboratory-ospf-mdr* -# retrieve and install emane packages -RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.2.7-release-1.ubuntu-20_04.amd64.tar.gz && \ - tar xf emane*.tar.gz && \ - cd emane-1.2.7-release-1/debs/ubuntu-20_04/amd64 && \ - apt-get install -y ./emane*.deb ./python3-emane_*.deb && \ - cd ../../../.. && \ - rm emane-1.2.7-release-1.ubuntu-20_04.amd64.tar.gz && \ - rm -rf emane-1.2.7-release-1 -CMD ["systemctl", "start", "core-daemon"] diff --git a/Makefile.am b/Makefile.am index bd15cf09..2b5f29e2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -6,10 +6,6 @@ if WANT_DOCS DOCS = docs man endif -if WANT_GUI - GUI = gui -endif - if WANT_DAEMON DAEMON = daemon endif @@ -19,12 +15,13 @@ if WANT_NETNS endif # keep docs last due to dependencies on binaries -SUBDIRS = $(GUI) $(DAEMON) $(NETNS) $(DOCS) +SUBDIRS = $(DAEMON) $(NETNS) $(DOCS) ACLOCAL_AMFLAGS = -I config # extra files to include with distribution tarball EXTRA_DIST = bootstrap.sh \ + package \ LICENSE \ README.md \ ASSIGNMENT_OF_COPYRIGHT.pdf \ @@ -51,7 +48,7 @@ fpm -s dir -t deb -n core-distributed \ --description "Common Open Research Emulator Distributed Package" \ --url https://github.com/coreemu/core \ --vendor "$(PACKAGE_VENDOR)" \ - -p core_distributed_VERSION_ARCH.deb \ + -p core-distributed_VERSION_ARCH.deb \ -v $(PACKAGE_VERSION) \ -d "ethtool" \ -d "procps" \ @@ -62,7 +59,8 @@ fpm -s dir -t deb -n core-distributed \ -d "libev4" \ -d "openssh-server" \ -d "xterm" \ - -C $(DESTDIR) + netns/vnoded=/usr/bin/ \ + netns/vcmd=/usr/bin/ endef define fpm-distributed-rpm = @@ -72,7 +70,7 @@ fpm -s dir -t rpm -n core-distributed \ --description "Common Open Research Emulator Distributed Package" \ --url https://github.com/coreemu/core \ --vendor "$(PACKAGE_VENDOR)" \ - -p core_distributed_VERSION_ARCH.rpm \ + -p core-distributed_VERSION_ARCH.rpm \ -v $(PACKAGE_VERSION) \ -d "ethtool" \ -d "procps-ng" \ @@ -83,12 +81,75 @@ fpm -s dir -t rpm -n core-distributed \ -d "net-tools" \ -d "openssh-server" \ -d "xterm" \ - -C $(DESTDIR) + netns/vnoded=/usr/bin/ \ + netns/vcmd=/usr/bin/ endef -.PHONY: fpm-distributed -fpm-distributed: clean-local-fpm - $(MAKE) -C netns install DESTDIR=$(DESTDIR) +define fpm-rpm = +fpm -s dir -t rpm -n core \ + -m "$(PACKAGE_MAINTAINERS)" \ + --license "BSD" \ + --description "core vnoded/vcmd and system dependencies" \ + --url https://github.com/coreemu/core \ + --vendor "$(PACKAGE_VENDOR)" \ + -p core_VERSION_ARCH.rpm \ + -v $(PACKAGE_VERSION) \ + --rpm-init package/core-daemon \ + --after-install package/after-install.sh \ + --after-remove package/after-remove.sh \ + -d "ethtool" \ + -d "tk" \ + -d "procps-ng" \ + -d "bash >= 3.0" \ + -d "ebtables" \ + -d "iproute" \ + -d "libev" \ + -d "net-tools" \ + -d "nftables" \ + netns/vnoded=/usr/bin/ \ + netns/vcmd=/usr/bin/ \ + package/etc/core.conf=/etc/core/ \ + package/etc/logging.conf=/etc/core/ \ + package/examples=/opt/core/ \ + daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/ +endef + +define fpm-deb = +fpm -s dir -t deb -n core \ + -m "$(PACKAGE_MAINTAINERS)" \ + --license "BSD" \ + --description "core vnoded/vcmd and system dependencies" \ + --url https://github.com/coreemu/core \ + --vendor "$(PACKAGE_VENDOR)" \ + -p core_VERSION_ARCH.deb \ + -v $(PACKAGE_VERSION) \ + --deb-systemd package/core-daemon.service \ + --deb-no-default-config-files \ + --after-install package/after-install.sh \ + --after-remove package/after-remove.sh \ + -d "ethtool" \ + -d "tk" \ + -d "libtk-img" \ + -d "procps" \ + -d "libc6 >= 2.14" \ + -d "bash >= 3.0" \ + -d "ebtables" \ + -d "iproute2" \ + -d "libev4" \ + -d "nftables" \ + netns/vnoded=/usr/bin/ \ + netns/vcmd=/usr/bin/ \ + package/etc/core.conf=/etc/core/ \ + package/etc/logging.conf=/etc/core/ \ + package/examples=/opt/core/ \ + daemon/dist/core-$(PACKAGE_VERSION)-py3-none-any.whl=/opt/core/ +endef + +.PHONY: fpm +fpm: clean-local-fpm + cd daemon && poetry build -f wheel + $(call fpm-deb) + $(call fpm-rpm) $(call fpm-distributed-deb) $(call fpm-distributed-rpm) @@ -115,7 +176,6 @@ $(info creating file $1 from $1.in) -e 's,[@]CORE_STATE_DIR[@],$(CORE_STATE_DIR),g' \ -e 's,[@]CORE_DATA_DIR[@],$(CORE_DATA_DIR),g' \ -e 's,[@]CORE_CONF_DIR[@],$(CORE_CONF_DIR),g' \ - -e 's,[@]CORE_GUI_CONF_DIR[@],$(CORE_GUI_CONF_DIR),g' \ < $1.in > $1 endef @@ -123,7 +183,6 @@ all: change-files .PHONY: change-files change-files: - $(call change-files,gui/core-gui-legacy) $(call change-files,daemon/core/constants.py) $(call change-files,netns/setup.py) diff --git a/README.md b/README.md index 8dbe4e56..b0aa133f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,4 @@ # CORE - CORE: Common Open Research Emulator Copyright (c)2005-2022 the Boeing Company. @@ -7,7 +6,6 @@ Copyright (c)2005-2022 the Boeing Company. See the LICENSE file included in this distribution. ## About - The Common Open Research Emulator (CORE) is a tool for emulating networks on one or more machines. You can connect these emulated networks to live networks. CORE consists of a GUI for drawing @@ -15,12 +13,34 @@ topologies of lightweight virtual machines, and Python modules for scripting network emulation. ## Quick Start +Requires Python 3.9+. More detailed instructions and install options can be found +[here](https://coreemu.github.io/core/install.html). -The following should get you up and running on Ubuntu 18+ and CentOS 7+ -from a clean install, it will prompt you for sudo password. This would +### Package Install +Grab the latest deb/rpm from [releases](https://github.com/coreemu/core/releases). + +This will install vnoded/vcmd, system dependencies, and CORE within a python +virtual environment at `/opt/core/venv`. +```shell +sudo install -y ./ +``` + +Then install OSPF MDR from source: +```shell +git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git +cd ospf-mdr +./bootstrap.sh +./configure --disable-doc --enable-user=root --enable-group=root \ + --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ + --localstatedir=/var/run/quagga +make -j$(nproc) +sudo make install +``` + +### Script Install +The following should get you up and running on Ubuntu 22.04. This would install CORE into a python3 virtual environment and install [OSPF MDR](https://github.com/USNavalResearchLaboratory/ospf-mdr) from source. -For more detailed installation see [here](https://coreemu.github.io/core/install.html). ```shell git clone https://github.com/coreemu/core.git @@ -36,7 +56,6 @@ inv install -p /usr ``` ## Documentation & Support - We are leveraging GitHub hosted documentation and Discord for persistent chat rooms. This allows for more dynamic conversations and the capability to respond faster. Feel free to join us at the link below. diff --git a/bootstrap.sh b/bootstrap.sh index ab3d741c..25fdecfd 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -1,9 +1,5 @@ #!/bin/sh # -# (c)2010-2012 the Boeing Company -# -# author: Jeff Ahrenholz -# # Bootstrap the autoconf system. # diff --git a/configure.ac b/configure.ac index 78980b56..872aca17 100644 --- a/configure.ac +++ b/configure.ac @@ -2,7 +2,7 @@ # Process this file with autoconf to produce a configure script. # this defines the CORE version number, must be static for AC_INIT -AC_INIT(core, 8.2.0) +AC_INIT(core, 9.0.0) # autoconf and automake initialization AC_CONFIG_SRCDIR([netns/version.h.in]) @@ -30,25 +30,14 @@ AC_SUBST(CORE_CONF_DIR) AC_SUBST(CORE_DATA_DIR) AC_SUBST(CORE_STATE_DIR) -# CORE GUI configuration files and preferences in CORE_GUI_CONF_DIR -# scenario files in ~/.core/configs/ -AC_ARG_WITH([guiconfdir], - [AS_HELP_STRING([--with-guiconfdir=dir], - [specify GUI configuration directory])], - [CORE_GUI_CONF_DIR="$with_guiconfdir"], - [CORE_GUI_CONF_DIR="\$\${HOME}/.core"]) -AC_SUBST(CORE_GUI_CONF_DIR) -AC_ARG_ENABLE([gui], - [AS_HELP_STRING([--enable-gui[=ARG]], - [build and install the GUI (default is yes)])], - [], [enable_gui=yes]) -AC_SUBST(enable_gui) +# documentation option AC_ARG_ENABLE([docs], [AS_HELP_STRING([--enable-docs[=ARG]], [build python documentation (default is no)])], [], [enable_docs=no]) AC_SUBST(enable_docs) +# python option AC_ARG_ENABLE([python], [AS_HELP_STRING([--enable-python[=ARG]], [build and install the python bindings (default is yes)])], @@ -94,28 +83,7 @@ if test "x$enable_daemon" = "xyes"; then want_python=yes want_linux_netns=yes - # Checks for libraries. - AC_CHECK_LIB([netgraph], [NgMkSockNode]) - - # Checks for header files. - AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h stdint.h stdlib.h string.h sys/ioctl.h sys/mount.h sys/socket.h sys/time.h termios.h unistd.h]) - - # Checks for typedefs, structures, and compiler characteristics. - AC_C_INLINE - AC_TYPE_INT32_T - AC_TYPE_PID_T - AC_TYPE_SIZE_T - AC_TYPE_SSIZE_T - AC_TYPE_UINT32_T - AC_TYPE_UINT8_T - - # Checks for library functions. - AC_FUNC_FORK - AC_FUNC_MALLOC - AC_FUNC_REALLOC - AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname]) - - AM_PATH_PYTHON(3.6) + AM_PATH_PYTHON(3.9) AS_IF([$PYTHON -m grpc_tools.protoc -h &> /dev/null], [], [AC_MSG_ERROR([please install python grpcio-tools])]) AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH) @@ -171,6 +139,25 @@ fi if [ test "x$enable_daemon" = "xyes" || test "x$enable_vnodedonly" = "xyes" ] ; then want_linux_netns=yes + + # Checks for header files. + AC_CHECK_HEADERS([arpa/inet.h fcntl.h limits.h stdint.h stdlib.h string.h sys/ioctl.h sys/mount.h sys/socket.h sys/time.h termios.h unistd.h]) + + # Checks for typedefs, structures, and compiler characteristics. + AC_C_INLINE + AC_TYPE_INT32_T + AC_TYPE_PID_T + AC_TYPE_SIZE_T + AC_TYPE_SSIZE_T + AC_TYPE_UINT32_T + AC_TYPE_UINT8_T + + # Checks for library functions. + AC_FUNC_FORK + AC_FUNC_MALLOC + AC_FUNC_REALLOC + AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname]) + PKG_CHECK_MODULES(libev, libev, AC_MSG_RESULT([found libev using pkgconfig OK]) AC_SUBST(libev_CFLAGS) @@ -209,7 +196,6 @@ if [test "x$want_python" = "xyes" && test "x$enable_docs" = "xyes"] ; then fi # Variable substitutions -AM_CONDITIONAL(WANT_GUI, test x$enable_gui = xyes) AM_CONDITIONAL(WANT_DAEMON, test x$enable_daemon = xyes) AM_CONDITIONAL(WANT_DOCS, test x$want_docs = xyes) AM_CONDITIONAL(WANT_PYTHON, test x$want_python = xyes) @@ -224,9 +210,6 @@ fi # Output files AC_CONFIG_FILES([Makefile - gui/version.tcl - gui/Makefile - gui/icons/Makefile man/Makefile docs/Makefile daemon/Makefile @@ -248,17 +231,12 @@ Build: Prefix: ${prefix} Exec Prefix: ${exec_prefix} -GUI: - GUI path: ${CORE_LIB_DIR} - GUI config: ${CORE_GUI_CONF_DIR} - Daemon: Daemon path: ${bindir} Daemon config: ${CORE_CONF_DIR} Python: ${PYTHON} Features to build: - Build GUI: ${enable_gui} Build Daemon: ${enable_daemon} Documentation: ${want_docs} diff --git a/daemon/Makefile.am b/daemon/Makefile.am index 7528dc01..2585ea1a 100644 --- a/daemon/Makefile.am +++ b/daemon/Makefile.am @@ -1,8 +1,4 @@ # CORE -# (c)2010-2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz # # Makefile for building netns components. # @@ -25,10 +21,7 @@ DISTCLEANFILES = Makefile.in # files to include with distribution tarball EXTRA_DIST = core \ - data \ doc/conf.py.in \ - examples \ - scripts \ tests \ setup.cfg \ poetry.lock \ diff --git a/daemon/core/api/grpc/client.py b/daemon/core/api/grpc/client.py index e2e1e729..4ea3332a 100644 --- a/daemon/core/api/grpc/client.py +++ b/daemon/core/api/grpc/client.py @@ -14,9 +14,17 @@ import grpc from core.api.grpc import core_pb2, core_pb2_grpc, emane_pb2, wrappers from core.api.grpc.configservices_pb2 import ( GetConfigServiceDefaultsRequest, + GetConfigServiceRenderedRequest, GetNodeConfigServiceRequest, ) -from core.api.grpc.core_pb2 import ExecuteScriptRequest, GetConfigRequest +from core.api.grpc.core_pb2 import ( + ExecuteScriptRequest, + GetConfigRequest, + GetWirelessConfigRequest, + LinkedRequest, + WirelessConfigRequest, + WirelessLinkedRequest, +) from core.api.grpc.emane_pb2 import ( EmaneLinkRequest, GetEmaneEventChannelRequest, @@ -43,17 +51,19 @@ from core.api.grpc.wlan_pb2 import ( WlanConfig, WlanLinkRequest, ) +from core.api.grpc.wrappers import LinkOptions from core.emulator.data import IpPrefixes from core.errors import CoreError +from core.utils import SetQueue logger = logging.getLogger(__name__) class MoveNodesStreamer: - def __init__(self, session_id: int = None, source: str = None) -> None: - self.session_id = session_id - self.source = source - self.queue: Queue = Queue() + def __init__(self, session_id: int, source: str = None) -> None: + self.session_id: int = session_id + self.source: Optional[str] = source + self.queue: SetQueue = SetQueue() def send_position(self, node_id: int, x: float, y: float) -> None: position = wrappers.Position(x=x, y=y) @@ -563,23 +573,6 @@ class CoreGrpcClient: response = self.stub.GetNodeTerminal(request) return response.terminal - def get_node_links(self, session_id: int, node_id: int) -> List[wrappers.Link]: - """ - Get current links for a node. - - :param session_id: session id - :param node_id: node id - :return: list of links - :raises grpc.RpcError: when session or node doesn't exist - """ - request = core_pb2.GetNodeLinksRequest(session_id=session_id, node_id=node_id) - response = self.stub.GetNodeLinks(request) - links = [] - for link_proto in response.links: - link = wrappers.Link.from_proto(link_proto) - links.append(link) - return links - def add_link( self, session_id: int, link: wrappers.Link, source: str = None ) -> Tuple[bool, wrappers.Interface, wrappers.Interface]: @@ -741,9 +734,9 @@ class CoreGrpcClient: :raises grpc.RpcError: when session doesn't exist """ defaults = [] - for node_type in service_defaults: - services = service_defaults[node_type] - default = ServiceDefaults(node_type=node_type, services=services) + for model in service_defaults: + services = service_defaults[model] + default = ServiceDefaults(model=model, services=services) defaults.append(default) request = SetServiceDefaultsRequest(session_id=session_id, defaults=defaults) response = self.stub.SetServiceDefaults(request) @@ -987,6 +980,23 @@ class CoreGrpcClient: response = self.stub.GetNodeConfigService(request) return dict(response.config) + def get_config_service_rendered( + self, session_id: int, node_id: int, name: str + ) -> Dict[str, str]: + """ + Retrieve the rendered config service files for a node. + + :param session_id: id of session + :param node_id: id of node + :param name: name of service + :return: dict mapping names of files to rendered data + """ + request = GetConfigServiceRenderedRequest( + session_id=session_id, node_id=node_id, name=name + ) + response = self.stub.GetConfigServiceRendered(request) + return dict(response.rendered) + def get_emane_event_channel( self, session_id: int, nem_id: int ) -> wrappers.EmaneEventChannel: @@ -1049,6 +1059,81 @@ class CoreGrpcClient: """ self.stub.EmanePathlosses(streamer.iter()) + def linked( + self, + session_id: int, + node1_id: int, + node2_id: int, + iface1_id: int, + iface2_id: int, + linked: bool, + ) -> None: + """ + Link or unlink an existing core wired link. + + :param session_id: session containing the link + :param node1_id: first node in link + :param node2_id: second node in link + :param iface1_id: node1 interface + :param iface2_id: node2 interface + :param linked: True to connect link, False to disconnect + :return: nothing + """ + request = LinkedRequest( + session_id=session_id, + node1_id=node1_id, + node2_id=node2_id, + iface1_id=iface1_id, + iface2_id=iface2_id, + linked=linked, + ) + self.stub.Linked(request) + + def wireless_linked( + self, + session_id: int, + wireless_id: int, + node1_id: int, + node2_id: int, + linked: bool, + ) -> None: + request = WirelessLinkedRequest( + session_id=session_id, + wireless_id=wireless_id, + node1_id=node1_id, + node2_id=node2_id, + linked=linked, + ) + self.stub.WirelessLinked(request) + + def wireless_config( + self, + session_id: int, + wireless_id: int, + node1_id: int, + node2_id: int, + options1: LinkOptions, + options2: LinkOptions = None, + ) -> None: + if options2 is None: + options2 = options1 + request = WirelessConfigRequest( + session_id=session_id, + wireless_id=wireless_id, + node1_id=node1_id, + node2_id=node2_id, + options1=options1.to_proto(), + options2=options2.to_proto(), + ) + self.stub.WirelessConfig(request) + + def get_wireless_config( + self, session_id: int, node_id: int + ) -> Dict[str, wrappers.ConfigOption]: + request = GetWirelessConfigRequest(session_id=session_id, node_id=node_id) + response = self.stub.GetWirelessConfig(request) + return wrappers.ConfigOption.from_dict(response.config) + def connect(self) -> None: """ Open connection to server, must be closed manually. diff --git a/daemon/core/api/grpc/events.py b/daemon/core/api/grpc/events.py index b319a978..1a716364 100644 --- a/daemon/core/api/grpc/events.py +++ b/daemon/core/api/grpc/events.py @@ -3,7 +3,7 @@ from queue import Empty, Queue from typing import Iterable, Optional from core.api.grpc import core_pb2 -from core.api.grpc.grpcutils import convert_link +from core.api.grpc.grpcutils import convert_link_data from core.emulator.data import ( ConfigData, EventData, @@ -33,7 +33,7 @@ def handle_node_event(node_data: NodeData) -> core_pb2.Event: node_proto = core_pb2.Node( id=node.id, name=node.name, - model=node.type, + model=node.model, icon=node.icon, position=position, geo=geo, @@ -51,7 +51,7 @@ def handle_link_event(link_data: LinkData) -> core_pb2.Event: :param link_data: link data :return: link event that has message type and link information """ - link = convert_link(link_data) + link = convert_link_data(link_data) message_type = link_data.message_type.value link_event = core_pb2.LinkEvent(message_type=message_type, link=link) return core_pb2.Event(link_event=link_event, source=link_data.source) diff --git a/daemon/core/api/grpc/grpcutils.py b/daemon/core/api/grpc/grpcutils.py index c585a135..5d8d7ceb 100644 --- a/daemon/core/api/grpc/grpcutils.py +++ b/daemon/core/api/grpc/grpcutils.py @@ -1,7 +1,7 @@ import logging import time from pathlib import Path -from typing import Any, Dict, List, Tuple, Type, Union +from typing import Any, Dict, List, Optional, Tuple, Type, Union import grpc from grpc import ServicerContext @@ -17,17 +17,26 @@ from core.api.grpc.services_pb2 import ( ServiceDefaults, ) from core.config import ConfigurableOptions -from core.emane.nodes import EmaneNet -from core.emulator.data import InterfaceData, LinkData, LinkOptions, NodeOptions +from core.emane.nodes import EmaneNet, EmaneOptions +from core.emulator.data import InterfaceData, LinkData, LinkOptions from core.emulator.enumerations import LinkTypes, NodeTypes +from core.emulator.links import CoreLink from core.emulator.session import Session from core.errors import CoreError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility -from core.nodes.base import CoreNode, CoreNodeBase, NodeBase -from core.nodes.docker import DockerNode +from core.nodes.base import ( + CoreNode, + CoreNodeBase, + CoreNodeOptions, + NodeBase, + NodeOptions, + Position, +) +from core.nodes.docker import DockerNode, DockerOptions from core.nodes.interface import CoreInterface from core.nodes.lxd import LxcNode -from core.nodes.network import CtrlNet, PtpNet, WlanNode +from core.nodes.network import CoreNetwork, CtrlNet, PtpNet, WlanNode +from core.nodes.wireless import WirelessNode from core.services.coreservices import CoreService logger = logging.getLogger(__name__) @@ -53,34 +62,33 @@ class CpuUsage: return (total_diff - idle_diff) / total_diff -def add_node_data(node_proto: core_pb2.Node) -> Tuple[NodeTypes, int, NodeOptions]: +def add_node_data( + _class: Type[NodeBase], node_proto: core_pb2.Node +) -> Tuple[Position, NodeOptions]: """ Convert node protobuf message to data for creating a node. + :param _class: node class to create options from :param node_proto: node proto message :return: node type, id, and options """ - _id = node_proto.id - _type = NodeTypes(node_proto.type) - options = NodeOptions( - name=node_proto.name, - model=node_proto.model, - icon=node_proto.icon, - image=node_proto.image, - services=node_proto.services, - config_services=node_proto.config_services, - canvas=node_proto.canvas, - ) - if node_proto.emane: - options.emane = node_proto.emane - if node_proto.server: - options.server = node_proto.server - position = node_proto.position - options.set_position(position.x, position.y) + options = _class.create_options() + options.icon = node_proto.icon + options.canvas = node_proto.canvas + if isinstance(options, CoreNodeOptions): + options.model = node_proto.model + options.services = node_proto.services + options.config_services = node_proto.config_services + if isinstance(options, EmaneOptions): + options.emane_model = node_proto.emane + if isinstance(options, DockerOptions): + options.image = node_proto.image + position = Position() + position.set(node_proto.position.x, node_proto.position.y) if node_proto.HasField("geo"): geo = node_proto.geo - options.set_location(geo.lat, geo.lon, geo.alt) - return _type, _id, options + position.set_geo(geo.lon, geo.lat, geo.alt) + return position, options def link_iface(iface_proto: core_pb2.Interface) -> InterfaceData: @@ -110,7 +118,7 @@ def link_iface(iface_proto: core_pb2.Interface) -> InterfaceData: def add_link_data( link_proto: core_pb2.Link -) -> Tuple[InterfaceData, InterfaceData, LinkOptions, LinkTypes]: +) -> Tuple[InterfaceData, InterfaceData, LinkOptions]: """ Convert link proto to link interfaces and options data. @@ -119,7 +127,6 @@ def add_link_data( """ iface1_data = link_iface(link_proto.iface1) iface2_data = link_iface(link_proto.iface2) - link_type = LinkTypes(link_proto.type) options = LinkOptions() options_proto = link_proto.options if options_proto: @@ -134,7 +141,7 @@ def add_link_data( options.buffer = options_proto.buffer options.unidirectional = options_proto.unidirectional options.key = options_proto.key - return iface1_data, iface2_data, options, link_type + return iface1_data, iface2_data, options def create_nodes( @@ -149,9 +156,17 @@ def create_nodes( """ funcs = [] for node_proto in node_protos: - _type, _id, options = add_node_data(node_proto) + _type = NodeTypes(node_proto.type) _class = session.get_node_class(_type) - args = (_class, _id, options) + position, options = add_node_data(_class, node_proto) + args = ( + _class, + node_proto.id or None, + node_proto.name or None, + node_proto.server or None, + position, + options, + ) funcs.append((session.add_node, args, {})) start = time.monotonic() results, exceptions = utils.threadpool(funcs) @@ -174,8 +189,8 @@ def create_links( for link_proto in link_protos: node1_id = link_proto.node1_id node2_id = link_proto.node2_id - iface1, iface2, options, link_type = add_link_data(link_proto) - args = (node1_id, node2_id, iface1, iface2, options, link_type) + iface1, iface2, options = add_link_data(link_proto) + args = (node1_id, node2_id, iface1, iface2, options) funcs.append((session.add_link, args, {})) start = time.monotonic() results, exceptions = utils.threadpool(funcs) @@ -198,8 +213,8 @@ def edit_links( for link_proto in link_protos: node1_id = link_proto.node1_id node2_id = link_proto.node2_id - iface1, iface2, options, link_type = add_link_data(link_proto) - args = (node1_id, node2_id, iface1.id, iface2.id, options, link_type) + iface1, iface2, options = add_link_data(link_proto) + args = (node1_id, node2_id, iface1.id, iface2.id, options) funcs.append((session.update_link, args, {})) start = time.monotonic() results, exceptions = utils.threadpool(funcs) @@ -220,6 +235,22 @@ def convert_value(value: Any) -> str: return value +def convert_session_options(session: Session) -> Dict[str, common_pb2.ConfigOption]: + config_options = {} + for option in session.options.options: + value = session.options.get(option.id) + config_option = common_pb2.ConfigOption( + label=option.label, + name=option.id, + value=value, + type=option.type.value, + select=option.options, + group="Options", + ) + config_options[option.id] = config_option + return config_options + + def get_config_options( config: Dict[str, str], configurable_options: Union[ConfigurableOptions, Type[ConfigurableOptions]], @@ -270,7 +301,6 @@ def get_node_proto( lat=node.position.lat, lon=node.position.lon, alt=node.position.alt ) services = [x.name for x in node.services] - model = node.type node_dir = None config_services = [] if isinstance(node, CoreNodeBase): @@ -281,7 +311,7 @@ def get_node_proto( channel = str(node.ctrlchnlname) emane_model = None if isinstance(node, EmaneNet): - emane_model = node.model.name + emane_model = node.wireless_model.name image = None if isinstance(node, (DockerNode, LxcNode)): image = node.image @@ -291,6 +321,21 @@ def get_node_proto( ) if wlan_config: wlan_config = get_config_options(wlan_config, BasicRangeModel) + # check for wireless config + wireless_config = None + if isinstance(node, WirelessNode): + configs = node.get_config() + wireless_config = {} + for config in configs.values(): + config_option = common_pb2.ConfigOption( + label=config.label, + name=config.id, + value=config.default, + type=config.type.value, + select=config.options, + group=config.group, + ) + wireless_config[config.id] = config_option # check for mobility config mobility_config = session.mobility.get_configs( node.id, config_type=Ns2ScriptedMobility.name @@ -325,7 +370,7 @@ def get_node_proto( id=node.id, name=node.name, emane=emane_model, - model=model, + model=node.model, type=node_type.value, position=position, geo=geo, @@ -337,6 +382,7 @@ def get_node_proto( channel=channel, canvas=node.canvas, wlan_config=wlan_config, + wireless_config=wireless_config, mobility_config=mobility_config, service_configs=service_configs, config_service_configs=config_service_configs, @@ -344,61 +390,84 @@ def get_node_proto( ) -def get_links(node: NodeBase): +def get_links(session: Session, node: NodeBase) -> List[core_pb2.Link]: """ Retrieve a list of links for grpc to use. + :param session: session to get links for node :param node: node to get links from :return: protobuf links """ + link_protos = [] + for core_link in session.link_manager.node_links(node): + link_protos.extend(convert_core_link(core_link)) + if isinstance(node, (WlanNode, EmaneNet)): + for link_data in node.links(): + link_protos.append(convert_link_data(link_data)) + return link_protos + + +def convert_iface(iface: CoreInterface) -> core_pb2.Interface: + """ + Convert interface to protobuf. + + :param iface: interface to convert + :return: protobuf interface + """ + if isinstance(iface.node, CoreNetwork): + return core_pb2.Interface(id=iface.id) + else: + ip4 = iface.get_ip4() + ip4_mask = ip4.prefixlen if ip4 else None + ip4 = str(ip4.ip) if ip4 else None + ip6 = iface.get_ip6() + ip6_mask = ip6.prefixlen if ip6 else None + ip6 = str(ip6.ip) if ip6 else None + mac = str(iface.mac) if iface.mac else None + return core_pb2.Interface( + id=iface.id, + name=iface.name, + mac=mac, + ip4=ip4, + ip4_mask=ip4_mask, + ip6=ip6, + ip6_mask=ip6_mask, + ) + + +def convert_core_link(core_link: CoreLink) -> List[core_pb2.Link]: + """ + Convert core link to protobuf data. + + :param core_link: core link to convert + :return: protobuf link data + """ links = [] - for link in node.links(): - link_proto = convert_link(link) - links.append(link_proto) + node1, iface1 = core_link.node1, core_link.iface1 + node2, iface2 = core_link.node2, core_link.iface2 + unidirectional = core_link.is_unidirectional() + link = convert_link(node1, iface1, node2, iface2, iface1.options, unidirectional) + links.append(link) + if unidirectional: + link = convert_link( + node2, iface2, node1, iface1, iface2.options, unidirectional + ) + links.append(link) return links -def convert_iface(iface_data: InterfaceData) -> core_pb2.Interface: - return core_pb2.Interface( - id=iface_data.id, - name=iface_data.name, - mac=iface_data.mac, - ip4=iface_data.ip4, - ip4_mask=iface_data.ip4_mask, - ip6=iface_data.ip6, - ip6_mask=iface_data.ip6_mask, - ) - - -def convert_link_options(options_data: LinkOptions) -> core_pb2.LinkOptions: - return core_pb2.LinkOptions( - jitter=options_data.jitter, - key=options_data.key, - mburst=options_data.mburst, - mer=options_data.mer, - loss=options_data.loss, - bandwidth=options_data.bandwidth, - burst=options_data.burst, - delay=options_data.delay, - dup=options_data.dup, - buffer=options_data.buffer, - unidirectional=options_data.unidirectional, - ) - - -def convert_link(link_data: LinkData) -> core_pb2.Link: +def convert_link_data(link_data: LinkData) -> core_pb2.Link: """ Convert link_data into core protobuf link. - :param link_data: link to convert :return: core protobuf Link """ iface1 = None if link_data.iface1 is not None: - iface1 = convert_iface(link_data.iface1) + iface1 = convert_iface_data(link_data.iface1) iface2 = None if link_data.iface2 is not None: - iface2 = convert_iface(link_data.iface2) + iface2 = convert_iface_data(link_data.iface2) options = convert_link_options(link_data.options) return core_pb2.Link( type=link_data.type.value, @@ -413,6 +482,123 @@ def convert_link(link_data: LinkData) -> core_pb2.Link: ) +def convert_iface_data(iface_data: InterfaceData) -> core_pb2.Interface: + """ + Convert interface data to protobuf. + + :param iface_data: interface data to convert + :return: interface protobuf + """ + return core_pb2.Interface( + id=iface_data.id, + name=iface_data.name, + mac=iface_data.mac, + ip4=iface_data.ip4, + ip4_mask=iface_data.ip4_mask, + ip6=iface_data.ip6, + ip6_mask=iface_data.ip6_mask, + ) + + +def convert_link_options(options: LinkOptions) -> core_pb2.LinkOptions: + """ + Convert link options to protobuf. + + :param options: link options to convert + :return: link options protobuf + """ + return core_pb2.LinkOptions( + jitter=options.jitter, + key=options.key, + mburst=options.mburst, + mer=options.mer, + loss=options.loss, + bandwidth=options.bandwidth, + burst=options.burst, + delay=options.delay, + dup=options.dup, + buffer=options.buffer, + unidirectional=options.unidirectional, + ) + + +def convert_options_proto(options: core_pb2.LinkOptions) -> LinkOptions: + return LinkOptions( + delay=options.delay, + bandwidth=options.bandwidth, + loss=options.loss, + dup=options.dup, + jitter=options.jitter, + mer=options.mer, + burst=options.burst, + mburst=options.mburst, + buffer=options.buffer, + unidirectional=options.unidirectional, + key=options.key, + ) + + +def convert_link( + node1: NodeBase, + iface1: Optional[CoreInterface], + node2: NodeBase, + iface2: Optional[CoreInterface], + options: LinkOptions, + unidirectional: bool, +) -> core_pb2.Link: + """ + Convert link objects to link protobuf. + + :param node1: first node in link + :param iface1: node1 interface + :param node2: second node in link + :param iface2: node2 interface + :param options: link options + :param unidirectional: if this link is considered unidirectional + :return: protobuf link + """ + if iface1 is not None: + iface1 = convert_iface(iface1) + if iface2 is not None: + iface2 = convert_iface(iface2) + is_node1_wireless = isinstance(node1, (WlanNode, EmaneNet)) + is_node2_wireless = isinstance(node2, (WlanNode, EmaneNet)) + if not (is_node1_wireless or is_node2_wireless): + options = convert_link_options(options) + options.unidirectional = unidirectional + else: + options = None + return core_pb2.Link( + type=LinkTypes.WIRED.value, + node1_id=node1.id, + node2_id=node2.id, + iface1=iface1, + iface2=iface2, + options=options, + network_id=None, + label=None, + color=None, + ) + + +def parse_proc_net_dev(lines: List[str]) -> Dict[str, Any]: + """ + Parse lines of output from /proc/net/dev. + + :param lines: lines of /proc/net/dev + :return: parsed device to tx/rx values + """ + stats = {} + for line in lines[2:]: + line = line.strip() + if not line: + continue + line = line.split() + line[0] = line[0].strip(":") + stats[line[0]] = {"rx": float(line[1]), "tx": float(line[9])} + return stats + + def get_net_stats() -> Dict[str, Dict]: """ Retrieve status about the current interfaces in the system @@ -420,18 +606,8 @@ def get_net_stats() -> Dict[str, Dict]: :return: send and receive status of the interfaces in the system """ with open("/proc/net/dev", "r") as f: - data = f.readlines()[2:] - - stats = {} - for line in data: - line = line.strip() - if not line: - continue - line = line.split() - line[0] = line[0].strip(":") - stats[line[0]] = {"rx": float(line[1]), "tx": float(line[9])} - - return stats + lines = f.readlines()[2:] + return parse_proc_net_dev(lines) def session_location(session: Session, location: core_pb2.SessionLocation) -> None: @@ -490,39 +666,14 @@ def get_service_configuration(service: CoreService) -> NodeServiceData: ) -def iface_to_data(iface: CoreInterface) -> InterfaceData: - ip4 = iface.get_ip4() - ip4_addr = str(ip4.ip) if ip4 else None - ip4_mask = ip4.prefixlen if ip4 else None - ip6 = iface.get_ip6() - ip6_addr = str(ip6.ip) if ip6 else None - ip6_mask = ip6.prefixlen if ip6 else None - return InterfaceData( - id=iface.node_id, - name=iface.name, - mac=str(iface.mac), - ip4=ip4_addr, - ip4_mask=ip4_mask, - ip6=ip6_addr, - ip6_mask=ip6_mask, - ) - - -def iface_to_proto(node_id: int, iface: CoreInterface) -> core_pb2.Interface: +def iface_to_proto(session: Session, iface: CoreInterface) -> core_pb2.Interface: """ Convenience for converting a core interface to the protobuf representation. - :param node_id: id of node to convert interface for + :param session: session interface belongs to :param iface: interface to convert :return: interface proto """ - if iface.node and iface.node.id == node_id: - _id = iface.node_id - else: - _id = iface.net_id - net_id = iface.net.id if iface.net else None - node_id = iface.node.id if iface.node else None - net2_id = iface.othernet.id if iface.othernet else None ip4_net = iface.get_ip4() ip4 = str(ip4_net.ip) if ip4_net else None ip4_mask = ip4_net.prefixlen if ip4_net else None @@ -530,11 +681,13 @@ def iface_to_proto(node_id: int, iface: CoreInterface) -> core_pb2.Interface: ip6 = str(ip6_net.ip) if ip6_net else None ip6_mask = ip6_net.prefixlen if ip6_net else None mac = str(iface.mac) if iface.mac else None + nem_id = None + nem_port = None + if isinstance(iface.net, EmaneNet): + nem_id = session.emane.get_nem_id(iface) + nem_port = session.emane.get_nem_port(iface) return core_pb2.Interface( - id=_id, - net_id=net_id, - net2_id=net2_id, - node_id=node_id, + id=iface.id, name=iface.name, mac=mac, mtu=iface.mtu, @@ -543,6 +696,8 @@ def iface_to_proto(node_id: int, iface: CoreInterface) -> core_pb2.Interface: ip4_mask=ip4_mask, ip6=ip6, ip6_mask=ip6_mask, + nem_id=nem_id, + nem_port=nem_port, ) @@ -574,6 +729,12 @@ def get_nem_id( def get_emane_model_configs_dict(session: Session) -> Dict[int, List[NodeEmaneConfig]]: + """ + Get emane model configuration protobuf data. + + :param session: session to get emane model configuration for + :return: dict of emane model protobuf configurations + """ configs = {} for _id, model_configs in session.emane.node_configs.items(): for model_name in model_configs: @@ -591,6 +752,12 @@ def get_emane_model_configs_dict(session: Session) -> Dict[int, List[NodeEmaneCo def get_hooks(session: Session) -> List[core_pb2.Hook]: + """ + Retrieve hook protobuf data for a session. + + :param session: session to get hooks for + :return: list of hook protobufs + """ hooks = [] for state in session.hooks: state_hooks = session.hooks[state] @@ -601,9 +768,15 @@ def get_hooks(session: Session) -> List[core_pb2.Hook]: def get_default_services(session: Session) -> List[ServiceDefaults]: + """ + Retrieve the default service sets for a given session. + + :param session: session to get default service sets for + :return: list of default service sets + """ default_services = [] - for name, services in session.services.default_services.items(): - default_service = ServiceDefaults(node_type=name, services=services) + for model, services in session.services.default_services.items(): + default_service = ServiceDefaults(model=model, services=services) default_services.append(default_service) return default_services @@ -611,6 +784,14 @@ def get_default_services(session: Session) -> List[ServiceDefaults]: def get_mobility_node( session: Session, node_id: int, context: ServicerContext ) -> Union[WlanNode, EmaneNet]: + """ + Get mobility node. + + :param session: session to get node from + :param node_id: id of node to get + :param context: grpc context + :return: wlan or emane node + """ try: return session.get_node(node_id, WlanNode) except CoreError: @@ -621,17 +802,26 @@ def get_mobility_node( def convert_session(session: Session) -> wrappers.Session: - links = [] - nodes = [] + """ + Convert session to its wrapped version. + + :param session: session to convert + :return: wrapped session data + """ emane_configs = get_emane_model_configs_dict(session) + nodes = [] + links = [] for _id in session.nodes: node = session.nodes[_id] if not isinstance(node, (PtpNet, CtrlNet)): node_emane_configs = emane_configs.get(node.id, []) node_proto = get_node_proto(session, node, node_emane_configs) nodes.append(node_proto) - node_links = get_links(node) - links.extend(node_links) + if isinstance(node, (WlanNode, EmaneNet)): + for link_data in node.links(): + links.append(convert_link_data(link_data)) + for core_link in session.link_manager.links(): + links.extend(convert_core_link(core_link)) default_services = get_default_services(session) x, y, z = session.location.refxyz lat, lon, alt = session.location.refgeo @@ -640,7 +830,7 @@ def convert_session(session: Session) -> wrappers.Session: ) hooks = get_hooks(session) session_file = str(session.file_path) if session.file_path else None - options = get_config_options(session.options.get_configs(), session.options) + options = convert_session_options(session) servers = [ core_pb2.Server(name=x.name, host=x.host) for x in session.distributed.servers.values() @@ -665,6 +855,15 @@ def convert_session(session: Session) -> wrappers.Session: def configure_node( session: Session, node: core_pb2.Node, core_node: NodeBase, context: ServicerContext ) -> None: + """ + Configure a node using all provided protobuf data. + + :param session: session for node + :param node: node protobuf data + :param core_node: session node + :param context: grpc context + :return: nothing + """ for emane_config in node.emane_configs: _id = utils.iface_config_id(node.id, emane_config.iface_id) config = {k: v.value for k, v in emane_config.config.items()} @@ -675,6 +874,9 @@ def configure_node( if node.mobility_config: config = {k: v.value for k, v in node.mobility_config.items()} session.mobility.set_model_config(node.id, Ns2ScriptedMobility.name, config) + if isinstance(core_node, WirelessNode) and node.wireless_config: + config = {k: v.value for k, v in node.wireless_config.items()} + core_node.set_config(config) for service_name, service_config in node.service_configs.items(): data = service_config.data config = ServiceConfig( diff --git a/daemon/core/api/grpc/server.py b/daemon/core/api/grpc/server.py index 060bc4b6..47615b29 100644 --- a/daemon/core/api/grpc/server.py +++ b/daemon/core/api/grpc/server.py @@ -1,7 +1,8 @@ -import atexit import logging import os import re +import signal +import sys import tempfile import time from concurrent import futures @@ -23,10 +24,22 @@ from core.api.grpc.configservices_pb2 import ( ConfigService, GetConfigServiceDefaultsRequest, GetConfigServiceDefaultsResponse, + GetConfigServiceRenderedRequest, + GetConfigServiceRenderedResponse, GetNodeConfigServiceRequest, GetNodeConfigServiceResponse, ) -from core.api.grpc.core_pb2 import ExecuteScriptResponse +from core.api.grpc.core_pb2 import ( + ExecuteScriptResponse, + GetWirelessConfigRequest, + GetWirelessConfigResponse, + LinkedRequest, + LinkedResponse, + WirelessConfigRequest, + WirelessConfigResponse, + WirelessLinkedRequest, + WirelessLinkedResponse, +) from core.api.grpc.emane_pb2 import ( EmaneLinkRequest, EmaneLinkResponse, @@ -79,19 +92,20 @@ from core.emulator.data import InterfaceData, LinkData, LinkOptions from core.emulator.enumerations import ( EventTypes, ExceptionLevels, - LinkTypes, MessageFlags, + NodeTypes, ) from core.emulator.session import NT, Session from core.errors import CoreCommandError, CoreError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility from core.nodes.base import CoreNode, NodeBase -from core.nodes.network import WlanNode +from core.nodes.network import CoreNetwork, WlanNode +from core.nodes.wireless import WirelessNode from core.services.coreservices import ServiceManager logger = logging.getLogger(__name__) _ONE_DAY_IN_SECONDS: int = 60 * 60 * 24 -_INTERFACE_REGEX: Pattern = re.compile(r"veth(?P[0-9a-fA-F]+)") +_INTERFACE_REGEX: Pattern = re.compile(r"beth(?P[0-9a-fA-F]+)") _MAX_WORKERS = 1000 @@ -107,11 +121,20 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): self.coreemu: CoreEmu = coreemu self.running: bool = True self.server: Optional[grpc.Server] = None - atexit.register(self._exit_handler) + # catch signals + signal.signal(signal.SIGHUP, self._signal_handler) + signal.signal(signal.SIGINT, self._signal_handler) + signal.signal(signal.SIGTERM, self._signal_handler) + signal.signal(signal.SIGUSR1, self._signal_handler) + signal.signal(signal.SIGUSR2, self._signal_handler) - def _exit_handler(self) -> None: - logger.debug("catching exit, stop running") + def _signal_handler(self, signal_number: int, _) -> None: + logger.info("caught signal: %s", signal_number) + self.coreemu.shutdown() self.running = False + if self.server: + self.server.stop(None) + sys.exit(signal_number) def _is_running(self, context) -> bool: return self.running and context.is_active() @@ -248,18 +271,18 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): # clear previous state and setup for creation session.clear() + session.directory.mkdir(exist_ok=True) if request.definition: state = EventTypes.DEFINITION_STATE else: state = EventTypes.CONFIGURATION_STATE - session.directory.mkdir(exist_ok=True) session.set_state(state) - session.user = request.session.user + if request.session.user: + session.set_user(request.session.user) # session options - session.options.config_reset() for option in request.session.options.values(): - session.options.set_config(option.name, option.value) + session.options.set(option.name, option.value) session.metadata = dict(request.session.metadata) # add servers @@ -378,11 +401,11 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): self, request: core_pb2.GetSessionsRequest, context: ServicerContext ) -> core_pb2.GetSessionsResponse: """ - Delete the session + Get all currently known session overviews. - :param request: get-session request + :param request: get sessions request :param context: context object - :return: a delete-session response + :return: a get sessions response """ logger.debug("get sessions: %s", request) sessions = [] @@ -469,7 +492,6 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): while self._is_running(context): now = time.monotonic() stats = get_net_stats() - # calculate average if last_check is not None: interval = now - last_check @@ -486,7 +508,7 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): (current_rxtx["tx"] - previous_rxtx["tx"]) * 8.0 / interval ) throughput = rx_kbps + tx_kbps - if key.startswith("veth"): + if key.startswith("beth"): key = key.split(".") node_id = _INTERFACE_REGEX.search(key[0]).group("node") node_id = int(node_id, base=16) @@ -512,7 +534,6 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): bridge_throughput.throughput = throughput except ValueError: pass - yield throughputs_event last_check = now @@ -540,9 +561,17 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): """ logger.debug("add node: %s", request) session = self.get_session(request.session_id, context) - _type, _id, options = grpcutils.add_node_data(request.node) + _type = NodeTypes(request.node.type) _class = session.get_node_class(_type) - node = session.add_node(_class, _id, options) + position, options = grpcutils.add_node_data(_class, request.node) + node = session.add_node( + _class, + request.node.id or None, + request.node.name or None, + request.node.server or None, + position, + options, + ) grpcutils.configure_node(session, request.node, node, context) source = request.source if request.source else None session.broadcast_node(node, MessageFlags.ADD, source) @@ -564,12 +593,12 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): ifaces = [] for iface_id in node.ifaces: iface = node.ifaces[iface_id] - iface_proto = grpcutils.iface_to_proto(request.node_id, iface) + iface_proto = grpcutils.iface_to_proto(session, iface) ifaces.append(iface_proto) emane_configs = grpcutils.get_emane_model_configs_dict(session) node_emane_configs = emane_configs.get(node.id, []) node_proto = grpcutils.get_node_proto(session, node, node_emane_configs) - links = get_links(node) + links = get_links(session, node) return core_pb2.GetNodeResponse(node=node_proto, ifaces=ifaces, links=links) def MoveNode( @@ -705,18 +734,22 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): node2_id = request.link.node2_id self.get_node(session, node1_id, context, NodeBase) self.get_node(session, node2_id, context, NodeBase) - iface1_data, iface2_data, options, link_type = grpcutils.add_link_data( - request.link - ) + iface1_data, iface2_data, options = grpcutils.add_link_data(request.link) node1_iface, node2_iface = session.add_link( - node1_id, node2_id, iface1_data, iface2_data, options, link_type + node1_id, node2_id, iface1_data, iface2_data, options ) iface1_data = None if node1_iface: - iface1_data = grpcutils.iface_to_data(node1_iface) + if isinstance(node1_iface.node, CoreNetwork): + iface1_data = InterfaceData(id=node1_iface.id) + else: + iface1_data = node1_iface.get_data() iface2_data = None if node2_iface: - iface2_data = grpcutils.iface_to_data(node2_iface) + if isinstance(node2_iface.node, CoreNetwork): + iface2_data = InterfaceData(id=node2_iface.id) + else: + iface2_data = node2_iface.get_data() source = request.source if request.source else None link_data = LinkData( message_type=MessageFlags.ADD, @@ -731,9 +764,9 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): iface1_proto = None iface2_proto = None if node1_iface: - iface1_proto = grpcutils.iface_to_proto(node1_id, node1_iface) + iface1_proto = grpcutils.iface_to_proto(session, node1_iface) if node2_iface: - iface2_proto = grpcutils.iface_to_proto(node2_id, node2_iface) + iface2_proto = grpcutils.iface_to_proto(session, node2_iface) return core_pb2.AddLinkResponse( result=True, iface1=iface1_proto, iface2=iface2_proto ) @@ -912,7 +945,7 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): session.services.default_services.clear() for service_defaults in request.defaults: session.services.default_services[ - service_defaults.node_type + service_defaults.model ] = service_defaults.services return SetServiceDefaultsResponse(result=True) @@ -1163,7 +1196,8 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): self, request: core_pb2.GetInterfacesRequest, context: ServicerContext ) -> core_pb2.GetInterfacesResponse: """ - Retrieve all the interfaces of the system including bridges, virtual ethernet, and loopback + Retrieve all the interfaces of the system including bridges, virtual ethernet, + and loopback. :param request: get-interfaces request :param context: context object @@ -1188,32 +1222,9 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): """ logger.debug("emane link: %s", request) session = self.get_session(request.session_id, context) - nem1 = request.nem1 - iface1 = session.emane.get_iface(nem1) - if not iface1: - context.abort(grpc.StatusCode.NOT_FOUND, f"nem one {nem1} not found") - node1 = iface1.node - - nem2 = request.nem2 - iface2 = session.emane.get_iface(nem2) - if not iface2: - context.abort(grpc.StatusCode.NOT_FOUND, f"nem two {nem2} not found") - node2 = iface2.node - - if iface1.net == iface2.net: - if request.linked: - flag = MessageFlags.ADD - else: - flag = MessageFlags.DELETE - color = session.get_link_color(iface1.net.id) - link = LinkData( - message_type=flag, - type=LinkTypes.WIRELESS, - node1_id=node1.id, - node2_id=node2.id, - network_id=iface1.net.id, - color=color, - ) + flag = MessageFlags.ADD if request.linked else MessageFlags.DELETE + link = session.emane.get_nem_link(request.nem1, request.nem2, flag) + if link: session.broadcast_link(link) return EmaneLinkResponse(result=True) else: @@ -1240,6 +1251,27 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): config = {x.id: x.default for x in service.default_configs} return GetNodeConfigServiceResponse(config=config) + def GetConfigServiceRendered( + self, request: GetConfigServiceRenderedRequest, context: ServicerContext + ) -> GetConfigServiceRenderedResponse: + """ + Retrieves the rendered file data for a given config service on a node. + + :param request: config service render request + :param context: grpc context + :return: rendered config service files + """ + session = self.get_session(request.session_id, context) + node = self.get_node(session, request.node_id, context, CoreNode) + self.validate_service(request.name, context) + service = node.config_services.get(request.name) + if not service: + context.abort( + grpc.StatusCode.NOT_FOUND, f"unknown node service {request.name}" + ) + rendered = service.get_rendered_templates() + return GetConfigServiceRenderedResponse(rendered=rendered) + def GetConfigServiceDefaults( self, request: GetConfigServiceDefaultsRequest, context: ServicerContext ) -> GetConfigServiceDefaultsResponse: @@ -1299,18 +1331,21 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): ) -> WlanLinkResponse: session = self.get_session(request.session_id, context) wlan = self.get_node(session, request.wlan, context, WlanNode) - if not isinstance(wlan.model, BasicRangeModel): + if not isinstance(wlan.wireless_model, BasicRangeModel): context.abort( grpc.StatusCode.NOT_FOUND, - f"wlan node {request.wlan} does not using BasicRangeModel", + f"wlan node {request.wlan} is not using BasicRangeModel", ) node1 = self.get_node(session, request.node1_id, context, CoreNode) node2 = self.get_node(session, request.node2_id, context, CoreNode) node1_iface, node2_iface = None, None - for net, iface1, iface2 in node1.commonnets(node2): - if net == wlan: - node1_iface = iface1 - node2_iface = iface2 + for iface in node1.get_ifaces(control=False): + if iface.net == wlan: + node1_iface = iface + break + for iface in node2.get_ifaces(control=False): + if iface.net == wlan: + node2_iface = iface break result = False if node1_iface and node2_iface: @@ -1318,7 +1353,9 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): wlan.link(node1_iface, node2_iface) else: wlan.unlink(node1_iface, node2_iface) - wlan.model.sendlinkmsg(node1_iface, node2_iface, unlink=not request.linked) + wlan.wireless_model.sendlinkmsg( + node1_iface, node2_iface, unlink=not request.linked + ) result = True return WlanLinkResponse(result=result) @@ -1335,3 +1372,60 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer): nem2 = grpcutils.get_nem_id(session, node2, request.iface2_id, context) session.emane.publish_pathloss(nem1, nem2, request.rx1, request.rx2) return EmanePathlossesResponse() + + def Linked( + self, request: LinkedRequest, context: ServicerContext + ) -> LinkedResponse: + session = self.get_session(request.session_id, context) + session.linked( + request.node1_id, + request.node2_id, + request.iface1_id, + request.iface2_id, + request.linked, + ) + return LinkedResponse() + + def WirelessLinked( + self, request: WirelessLinkedRequest, context: ServicerContext + ) -> WirelessLinkedResponse: + session = self.get_session(request.session_id, context) + wireless = self.get_node(session, request.wireless_id, context, WirelessNode) + wireless.link_control(request.node1_id, request.node2_id, request.linked) + return WirelessLinkedResponse() + + def WirelessConfig( + self, request: WirelessConfigRequest, context: ServicerContext + ) -> WirelessConfigResponse: + session = self.get_session(request.session_id, context) + wireless = self.get_node(session, request.wireless_id, context, WirelessNode) + options1 = request.options1 + options2 = options1 + if request.HasField("options2"): + options2 = request.options2 + options1 = grpcutils.convert_options_proto(options1) + options2 = grpcutils.convert_options_proto(options2) + wireless.link_config(request.node1_id, request.node2_id, options1, options2) + return WirelessConfigResponse() + + def GetWirelessConfig( + self, request: GetWirelessConfigRequest, context: ServicerContext + ) -> GetWirelessConfigResponse: + session = self.get_session(request.session_id, context) + try: + wireless = session.get_node(request.node_id, WirelessNode) + configs = wireless.get_config() + except CoreError: + configs = {x.id: x for x in WirelessNode.options} + config_options = {} + for config in configs.values(): + config_option = common_pb2.ConfigOption( + label=config.label, + name=config.id, + value=config.default, + type=config.type.value, + select=config.options, + group=config.group, + ) + config_options[config.id] = config_option + return GetWirelessConfigResponse(config=config_options) diff --git a/daemon/core/api/grpc/wrappers.py b/daemon/core/api/grpc/wrappers.py index 94a1598c..d3167a98 100644 --- a/daemon/core/api/grpc/wrappers.py +++ b/daemon/core/api/grpc/wrappers.py @@ -67,6 +67,7 @@ class NodeType(Enum): CONTROL_NET = 13 DOCKER = 15 LXC = 16 + WIRELESS = 17 class LinkType(Enum): @@ -209,12 +210,12 @@ class Service: @dataclass class ServiceDefault: - node_type: str + model: str services: List[str] @classmethod def from_proto(cls, proto: services_pb2.ServiceDefaults) -> "ServiceDefault": - return ServiceDefault(node_type=proto.node_type, services=list(proto.services)) + return ServiceDefault(model=proto.model, services=list(proto.services)) @dataclass @@ -480,6 +481,8 @@ class Interface: mtu: int = None node_id: int = None net2_id: int = None + nem_id: int = None + nem_port: int = None @classmethod def from_proto(cls, proto: core_pb2.Interface) -> "Interface": @@ -496,6 +499,8 @@ class Interface: mtu=proto.mtu, node_id=proto.node_id, net2_id=proto.net2_id, + nem_id=proto.nem_id, + nem_port=proto.nem_port, ) def to_proto(self) -> core_pb2.Interface: @@ -736,6 +741,7 @@ class Node: Tuple[str, Optional[int]], Dict[str, ConfigOption] ] = field(default_factory=dict, repr=False) wlan_config: Dict[str, ConfigOption] = field(default_factory=dict, repr=False) + wireless_config: Dict[str, ConfigOption] = field(default_factory=dict, repr=False) mobility_config: Dict[str, ConfigOption] = field(default_factory=dict, repr=False) service_configs: Dict[str, NodeServiceData] = field( default_factory=dict, repr=False @@ -770,7 +776,7 @@ class Node: id=proto.id, name=proto.name, type=NodeType(proto.type), - model=proto.model, + model=proto.model or None, position=Position.from_proto(proto.position), services=set(proto.services), config_services=set(proto.config_services), @@ -788,6 +794,7 @@ class Node: service_file_configs=service_file_configs, config_service_configs=config_service_configs, emane_model_configs=emane_configs, + wireless_config=ConfigOption.from_dict(proto.wireless_config), ) def to_proto(self) -> core_pb2.Node: @@ -839,6 +846,7 @@ class Node: service_configs=service_configs, config_service_configs=config_service_configs, emane_configs=emane_configs, + wireless_config={k: v.to_proto() for k, v in self.wireless_config.items()}, ) def set_wlan(self, config: Dict[str, str]) -> None: @@ -883,9 +891,7 @@ class Session: def from_proto(cls, proto: core_pb2.Session) -> "Session": nodes: Dict[int, Node] = {x.id: Node.from_proto(x) for x in proto.nodes} links = [Link.from_proto(x) for x in proto.links] - default_services = { - x.node_type: set(x.services) for x in proto.default_services - } + default_services = {x.model: set(x.services) for x in proto.default_services} hooks = {x.file: Hook.from_proto(x) for x in proto.hooks} file_path = Path(proto.file) if proto.file else None options = ConfigOption.from_dict(proto.options) @@ -913,9 +919,9 @@ class Session: options = {k: v.to_proto() for k, v in self.options.items()} servers = [x.to_proto() for x in self.servers] default_services = [] - for node_type, services in self.default_services.items(): + for model, services in self.default_services.items(): default_service = services_pb2.ServiceDefaults( - node_type=node_type, services=services + model=model, services=services ) default_services.append(default_service) file = str(self.file) if self.file else None @@ -1102,7 +1108,6 @@ class ConfigEvent: data_types=list(proto.data_types), data_values=proto.data_values, captions=proto.captions, - bitmap=proto.bitmap, possible_values=proto.possible_values, groups=proto.groups, iface_id=proto.iface_id, @@ -1194,13 +1199,13 @@ class EmanePathlossesRequest: ) -@dataclass +@dataclass(frozen=True) class MoveNodesRequest: session_id: int node_id: int - source: str = None - position: Position = None - geo: Geo = None + source: str = field(compare=False, default=None) + position: Position = field(compare=False, default=None) + geo: Geo = field(compare=False, default=None) def to_proto(self) -> core_pb2.MoveNodesRequest: position = self.position.to_proto() if self.position else None diff --git a/daemon/core/api/tlv/coreapi.py b/daemon/core/api/tlv/coreapi.py deleted file mode 100644 index 756b623c..00000000 --- a/daemon/core/api/tlv/coreapi.py +++ /dev/null @@ -1,1016 +0,0 @@ -""" -Uses coreapi_data for message and TLV types, and defines TLV data -types and objects used for parsing and building CORE API messages. - -CORE API messaging is leveraged for communication with the GUI. -""" - -import binascii -import socket -import struct -from enum import Enum - -import netaddr - -from core.api.tlv import structutils -from core.api.tlv.enumerations import ( - ConfigTlvs, - EventTlvs, - ExceptionTlvs, - ExecuteTlvs, - FileTlvs, - InterfaceTlvs, - LinkTlvs, - MessageTypes, - NodeTlvs, - SessionTlvs, -) -from core.emulator.enumerations import MessageFlags, RegisterTlvs - - -class CoreTlvData: - """ - Helper base class used for packing and unpacking values using struct. - """ - - # format string for packing data - data_format = None - # python data type for the data - data_type = None - # pad length for data after packing - pad_len = None - - @classmethod - def pack(cls, value): - """ - Convenience method for packing data using the struct module. - - :param value: value to pack - :return: length of data and the packed data itself - :rtype: tuple - """ - data = struct.pack(cls.data_format, value) - length = len(data) - cls.pad_len - return length, data - - @classmethod - def unpack(cls, data): - """ - Convenience method for unpacking data using the struct module. - - :param data: data to unpack - :return: the value of the unpacked data - """ - return struct.unpack(cls.data_format, data)[0] - - @classmethod - def pack_string(cls, value): - """ - Convenience method for packing data from a string representation. - - :param str value: value to pack - :return: length of data and the packed data itself - :rtype: tuple - """ - return cls.pack(cls.from_string(value)) - - @classmethod - def from_string(cls, value): - """ - Retrieve the value type from a string representation. - - :param str value: value to get a data type from - :return: value parse from string representation - """ - return cls.data_type(value) - - -class CoreTlvDataObj(CoreTlvData): - """ - Helper class for packing custom object data. - """ - - @classmethod - def pack(cls, value): - """ - Convenience method for packing custom object data. - - :param value: custom object to pack - :return: length of data and the packed data itself - :rtype: tuple - """ - value = cls.get_value(value) - return super().pack(value) - - @classmethod - def unpack(cls, data): - """ - Convenience method for unpacking custom object data. - - :param data: data to unpack custom object from - :return: unpacked custom object - """ - data = super().unpack(data) - return cls.new_obj(data) - - @staticmethod - def get_value(obj): - """ - Method that will be used to retrieve the data to pack from a custom object. - - :param obj: custom object to get data to pack - :return: data value to pack - """ - raise NotImplementedError - - @staticmethod - def new_obj(obj): - """ - Method for retrieving data to unpack from an object. - - :param obj: object to get unpack data from - :return: value of unpacked data - """ - raise NotImplementedError - - -class CoreTlvDataUint16(CoreTlvData): - """ - Helper class for packing uint16 data. - """ - - data_format = "!H" - data_type = int - pad_len = 0 - - -class CoreTlvDataUint32(CoreTlvData): - """ - Helper class for packing uint32 data. - """ - - data_format = "!2xI" - data_type = int - pad_len = 2 - - -class CoreTlvDataUint64(CoreTlvData): - """ - Helper class for packing uint64 data. - """ - - data_format = "!2xQ" - data_type = int - pad_len = 2 - - -class CoreTlvDataString(CoreTlvData): - """ - Helper class for packing string data. - """ - - data_type = str - - @classmethod - def pack(cls, value): - """ - Convenience method for packing string data. - - :param str value: string to pack - :return: length of data packed and the packed data - :rtype: tuple - """ - if not isinstance(value, str): - raise ValueError(f"value not a string: {type(value)}") - value = value.encode("utf-8") - - if len(value) < 256: - header_len = CoreTlv.header_len - else: - header_len = CoreTlv.long_header_len - - pad_len = -(header_len + len(value)) % 4 - return len(value), value + b"\0" * pad_len - - @classmethod - def unpack(cls, data): - """ - Convenience method for unpacking string data. - - :param str data: unpack string data - :return: unpacked string data - """ - return data.rstrip(b"\0").decode("utf-8") - - -class CoreTlvDataUint16List(CoreTlvData): - """ - List of unsigned 16-bit values. - """ - - data_type = tuple - data_format = "!H" - - @classmethod - def pack(cls, values): - """ - Convenience method for packing a uint 16 list. - - :param list values: unint 16 list to pack - :return: length of data packed and the packed data - :rtype: tuple - """ - if not isinstance(values, tuple): - raise ValueError(f"value not a tuple: {values}") - - data = b"" - for value in values: - data += struct.pack(cls.data_format, value) - - pad_len = -(CoreTlv.header_len + len(data)) % 4 - return len(data), data + b"\0" * pad_len - - @classmethod - def unpack(cls, data): - """ - Convenience method for unpacking a uint 16 list. - - :param data: data to unpack - :return: unpacked data - """ - size = int(len(data) / 2) - data_format = f"!{size}H" - return struct.unpack(data_format, data) - - @classmethod - def from_string(cls, value): - """ - Retrieves a unint 16 list from a string - - :param str value: string representation of a uint 16 list - :return: uint 16 list - :rtype: list - """ - return tuple(int(x) for x in value.split()) - - -class CoreTlvDataIpv4Addr(CoreTlvDataObj): - """ - Utility class for packing/unpacking Ipv4 addresses. - """ - - data_type = str - data_format = "!2x4s" - pad_len = 2 - - @staticmethod - def get_value(obj): - """ - Retrieve Ipv4 address value from object. - - :param str obj: ip address to get value from - :return: packed address - :rtype: bytes - """ - return socket.inet_pton(socket.AF_INET, obj) - - @staticmethod - def new_obj(value): - """ - Retrieve Ipv4 address from a string representation. - - :param bytes value: value to get Ipv4 address from - :return: Ipv4 address - :rtype: str - """ - return socket.inet_ntop(socket.AF_INET, value) - - -class CoreTlvDataIPv6Addr(CoreTlvDataObj): - """ - Utility class for packing/unpacking Ipv6 addresses. - """ - - data_format = "!16s2x" - data_type = str - pad_len = 2 - - @staticmethod - def get_value(obj): - """ - Retrieve Ipv6 address value from object. - - :param str obj: ip address to get value from - :return: packed address - :rtype: bytes - """ - return socket.inet_pton(socket.AF_INET6, obj) - - @staticmethod - def new_obj(value): - """ - Retrieve Ipv6 address from a string representation. - - :param bytes value: value to get Ipv4 address from - :return: Ipv4 address - :rtype: str - """ - return socket.inet_ntop(socket.AF_INET6, value) - - -class CoreTlvDataMacAddr(CoreTlvDataObj): - """ - Utility class for packing/unpacking mac addresses. - """ - - data_format = "!2x8s" - data_type = str - pad_len = 2 - - @staticmethod - def get_value(obj): - """ - Retrieve Ipv6 address value from object. - - :param str obj: mac address to get value from - :return: packed mac address - :rtype: bytes - """ - # extend to 64 bits - return b"\0\0" + netaddr.EUI(obj).packed - - @staticmethod - def new_obj(value): - """ - Retrieve mac address from a string representation. - - :param bytes value: value to get Ipv4 address from - :return: mac address - :rtype: str - """ - # only use 48 bits - value = binascii.hexlify(value[2:]).decode() - mac = netaddr.EUI(value, dialect=netaddr.mac_unix_expanded) - return str(mac) - - -class CoreTlv: - """ - Base class for representing CORE TLVs. - """ - - header_format = "!BB" - header_len = struct.calcsize(header_format) - - long_header_format = "!BBH" - long_header_len = struct.calcsize(long_header_format) - - tlv_type_map = Enum - tlv_data_class_map = {} - - def __init__(self, tlv_type, tlv_data): - """ - Create a CoreTlv instance. - - :param int tlv_type: tlv type - :param tlv_data: data to unpack - :return: unpacked data - """ - self.tlv_type = tlv_type - if tlv_data: - try: - self.value = self.tlv_data_class_map[self.tlv_type].unpack(tlv_data) - except KeyError: - self.value = tlv_data - else: - self.value = None - - @classmethod - def unpack(cls, data): - """ - Parse data and return unpacked class. - - :param data: data to unpack - :return: unpacked data class - """ - tlv_type, tlv_len = struct.unpack(cls.header_format, data[: cls.header_len]) - header_len = cls.header_len - if tlv_len == 0: - tlv_type, _zero, tlv_len = struct.unpack( - cls.long_header_format, data[: cls.long_header_len] - ) - header_len = cls.long_header_len - tlv_size = header_len + tlv_len - # for 32-bit alignment - tlv_size += -tlv_size % 4 - return cls(tlv_type, data[header_len:tlv_size]), data[tlv_size:] - - @classmethod - def pack(cls, tlv_type, value): - """ - Pack a TLV value, based on type. - - :param int tlv_type: type of data to pack - :param value: data to pack - :return: header and packed data - """ - tlv_len, tlv_data = cls.tlv_data_class_map[tlv_type].pack(value) - if tlv_len < 256: - hdr = struct.pack(cls.header_format, tlv_type, tlv_len) - else: - hdr = struct.pack(cls.long_header_format, tlv_type, 0, tlv_len) - return hdr + tlv_data - - @classmethod - def pack_string(cls, tlv_type, value): - """ - Pack data type from a string representation - - :param int tlv_type: type of data to pack - :param str value: string representation of data - :return: header and packed data - """ - return cls.pack(tlv_type, cls.tlv_data_class_map[tlv_type].from_string(value)) - - def type_str(self): - """ - Retrieve type string for this data type. - - :return: data type name - :rtype: str - """ - try: - return self.tlv_type_map(self.tlv_type).name - except ValueError: - return f"unknown tlv type: {self.tlv_type}" - - def __str__(self): - """ - String representation of this data type. - - :return: string representation - :rtype: str - """ - return f"{self.__class__.__name__} " - - -class CoreNodeTlv(CoreTlv): - """ - Class for representing CORE Node TLVs. - """ - - tlv_type_map = NodeTlvs - tlv_data_class_map = { - NodeTlvs.NUMBER.value: CoreTlvDataUint32, - NodeTlvs.TYPE.value: CoreTlvDataUint32, - NodeTlvs.NAME.value: CoreTlvDataString, - NodeTlvs.IP_ADDRESS.value: CoreTlvDataIpv4Addr, - NodeTlvs.MAC_ADDRESS.value: CoreTlvDataMacAddr, - NodeTlvs.IP6_ADDRESS.value: CoreTlvDataIPv6Addr, - NodeTlvs.MODEL.value: CoreTlvDataString, - NodeTlvs.EMULATION_SERVER.value: CoreTlvDataString, - NodeTlvs.SESSION.value: CoreTlvDataString, - NodeTlvs.X_POSITION.value: CoreTlvDataUint16, - NodeTlvs.Y_POSITION.value: CoreTlvDataUint16, - NodeTlvs.CANVAS.value: CoreTlvDataUint16, - NodeTlvs.EMULATION_ID.value: CoreTlvDataUint32, - NodeTlvs.NETWORK_ID.value: CoreTlvDataUint32, - NodeTlvs.SERVICES.value: CoreTlvDataString, - NodeTlvs.LATITUDE.value: CoreTlvDataString, - NodeTlvs.LONGITUDE.value: CoreTlvDataString, - NodeTlvs.ALTITUDE.value: CoreTlvDataString, - NodeTlvs.ICON.value: CoreTlvDataString, - NodeTlvs.OPAQUE.value: CoreTlvDataString, - } - - -class CoreLinkTlv(CoreTlv): - """ - Class for representing CORE link TLVs. - """ - - tlv_type_map = LinkTlvs - tlv_data_class_map = { - LinkTlvs.N1_NUMBER.value: CoreTlvDataUint32, - LinkTlvs.N2_NUMBER.value: CoreTlvDataUint32, - LinkTlvs.DELAY.value: CoreTlvDataUint64, - LinkTlvs.BANDWIDTH.value: CoreTlvDataUint64, - LinkTlvs.LOSS.value: CoreTlvDataString, - LinkTlvs.DUP.value: CoreTlvDataString, - LinkTlvs.JITTER.value: CoreTlvDataUint64, - LinkTlvs.MER.value: CoreTlvDataUint16, - LinkTlvs.BURST.value: CoreTlvDataUint16, - LinkTlvs.SESSION.value: CoreTlvDataString, - LinkTlvs.MBURST.value: CoreTlvDataUint16, - LinkTlvs.TYPE.value: CoreTlvDataUint32, - LinkTlvs.GUI_ATTRIBUTES.value: CoreTlvDataString, - LinkTlvs.UNIDIRECTIONAL.value: CoreTlvDataUint16, - LinkTlvs.EMULATION_ID.value: CoreTlvDataUint32, - LinkTlvs.NETWORK_ID.value: CoreTlvDataUint32, - LinkTlvs.KEY.value: CoreTlvDataUint32, - LinkTlvs.IFACE1_NUMBER.value: CoreTlvDataUint16, - LinkTlvs.IFACE1_IP4.value: CoreTlvDataIpv4Addr, - LinkTlvs.IFACE1_IP4_MASK.value: CoreTlvDataUint16, - LinkTlvs.IFACE1_MAC.value: CoreTlvDataMacAddr, - LinkTlvs.IFACE1_IP6.value: CoreTlvDataIPv6Addr, - LinkTlvs.IFACE1_IP6_MASK.value: CoreTlvDataUint16, - LinkTlvs.IFACE2_NUMBER.value: CoreTlvDataUint16, - LinkTlvs.IFACE2_IP4.value: CoreTlvDataIpv4Addr, - LinkTlvs.IFACE2_IP4_MASK.value: CoreTlvDataUint16, - LinkTlvs.IFACE2_MAC.value: CoreTlvDataMacAddr, - LinkTlvs.IFACE2_IP6.value: CoreTlvDataIPv6Addr, - LinkTlvs.IFACE2_IP6_MASK.value: CoreTlvDataUint16, - LinkTlvs.IFACE1_NAME.value: CoreTlvDataString, - LinkTlvs.IFACE2_NAME.value: CoreTlvDataString, - LinkTlvs.OPAQUE.value: CoreTlvDataString, - } - - -class CoreExecuteTlv(CoreTlv): - """ - Class for representing CORE execute TLVs. - """ - - tlv_type_map = ExecuteTlvs - tlv_data_class_map = { - ExecuteTlvs.NODE.value: CoreTlvDataUint32, - ExecuteTlvs.NUMBER.value: CoreTlvDataUint32, - ExecuteTlvs.TIME.value: CoreTlvDataUint32, - ExecuteTlvs.COMMAND.value: CoreTlvDataString, - ExecuteTlvs.RESULT.value: CoreTlvDataString, - ExecuteTlvs.STATUS.value: CoreTlvDataUint32, - ExecuteTlvs.SESSION.value: CoreTlvDataString, - } - - -class CoreRegisterTlv(CoreTlv): - """ - Class for representing CORE register TLVs. - """ - - tlv_type_map = RegisterTlvs - tlv_data_class_map = { - RegisterTlvs.WIRELESS.value: CoreTlvDataString, - RegisterTlvs.MOBILITY.value: CoreTlvDataString, - RegisterTlvs.UTILITY.value: CoreTlvDataString, - RegisterTlvs.EXECUTE_SERVER.value: CoreTlvDataString, - RegisterTlvs.GUI.value: CoreTlvDataString, - RegisterTlvs.EMULATION_SERVER.value: CoreTlvDataString, - RegisterTlvs.SESSION.value: CoreTlvDataString, - } - - -class CoreConfigTlv(CoreTlv): - """ - Class for representing CORE configuration TLVs. - """ - - tlv_type_map = ConfigTlvs - tlv_data_class_map = { - ConfigTlvs.NODE.value: CoreTlvDataUint32, - ConfigTlvs.OBJECT.value: CoreTlvDataString, - ConfigTlvs.TYPE.value: CoreTlvDataUint16, - ConfigTlvs.DATA_TYPES.value: CoreTlvDataUint16List, - ConfigTlvs.VALUES.value: CoreTlvDataString, - ConfigTlvs.CAPTIONS.value: CoreTlvDataString, - ConfigTlvs.BITMAP.value: CoreTlvDataString, - ConfigTlvs.POSSIBLE_VALUES.value: CoreTlvDataString, - ConfigTlvs.GROUPS.value: CoreTlvDataString, - ConfigTlvs.SESSION.value: CoreTlvDataString, - ConfigTlvs.IFACE_ID.value: CoreTlvDataUint16, - ConfigTlvs.NETWORK_ID.value: CoreTlvDataUint32, - ConfigTlvs.OPAQUE.value: CoreTlvDataString, - } - - -class CoreFileTlv(CoreTlv): - """ - Class for representing CORE file TLVs. - """ - - tlv_type_map = FileTlvs - tlv_data_class_map = { - FileTlvs.NODE.value: CoreTlvDataUint32, - FileTlvs.NAME.value: CoreTlvDataString, - FileTlvs.MODE.value: CoreTlvDataString, - FileTlvs.NUMBER.value: CoreTlvDataUint16, - FileTlvs.TYPE.value: CoreTlvDataString, - FileTlvs.SOURCE_NAME.value: CoreTlvDataString, - FileTlvs.SESSION.value: CoreTlvDataString, - FileTlvs.DATA.value: CoreTlvDataString, - FileTlvs.COMPRESSED_DATA.value: CoreTlvDataString, - } - - -class CoreInterfaceTlv(CoreTlv): - """ - Class for representing CORE interface TLVs. - """ - - tlv_type_map = InterfaceTlvs - tlv_data_class_map = { - InterfaceTlvs.NODE.value: CoreTlvDataUint32, - InterfaceTlvs.NUMBER.value: CoreTlvDataUint16, - InterfaceTlvs.NAME.value: CoreTlvDataString, - InterfaceTlvs.IP_ADDRESS.value: CoreTlvDataIpv4Addr, - InterfaceTlvs.MASK.value: CoreTlvDataUint16, - InterfaceTlvs.MAC_ADDRESS.value: CoreTlvDataMacAddr, - InterfaceTlvs.IP6_ADDRESS.value: CoreTlvDataIPv6Addr, - InterfaceTlvs.IP6_MASK.value: CoreTlvDataUint16, - InterfaceTlvs.TYPE.value: CoreTlvDataUint16, - InterfaceTlvs.SESSION.value: CoreTlvDataString, - InterfaceTlvs.STATE.value: CoreTlvDataUint16, - InterfaceTlvs.EMULATION_ID.value: CoreTlvDataUint32, - InterfaceTlvs.NETWORK_ID.value: CoreTlvDataUint32, - } - - -class CoreEventTlv(CoreTlv): - """ - Class for representing CORE event TLVs. - """ - - tlv_type_map = EventTlvs - tlv_data_class_map = { - EventTlvs.NODE.value: CoreTlvDataUint32, - EventTlvs.TYPE.value: CoreTlvDataUint32, - EventTlvs.NAME.value: CoreTlvDataString, - EventTlvs.DATA.value: CoreTlvDataString, - EventTlvs.TIME.value: CoreTlvDataString, - EventTlvs.SESSION.value: CoreTlvDataString, - } - - -class CoreSessionTlv(CoreTlv): - """ - Class for representing CORE session TLVs. - """ - - tlv_type_map = SessionTlvs - tlv_data_class_map = { - SessionTlvs.NUMBER.value: CoreTlvDataString, - SessionTlvs.NAME.value: CoreTlvDataString, - SessionTlvs.FILE.value: CoreTlvDataString, - SessionTlvs.NODE_COUNT.value: CoreTlvDataString, - SessionTlvs.DATE.value: CoreTlvDataString, - SessionTlvs.THUMB.value: CoreTlvDataString, - SessionTlvs.USER.value: CoreTlvDataString, - SessionTlvs.OPAQUE.value: CoreTlvDataString, - } - - -class CoreExceptionTlv(CoreTlv): - """ - Class for representing CORE exception TLVs. - """ - - tlv_type_map = ExceptionTlvs - tlv_data_class_map = { - ExceptionTlvs.NODE.value: CoreTlvDataUint32, - ExceptionTlvs.SESSION.value: CoreTlvDataString, - ExceptionTlvs.LEVEL.value: CoreTlvDataUint16, - ExceptionTlvs.SOURCE.value: CoreTlvDataString, - ExceptionTlvs.DATE.value: CoreTlvDataString, - ExceptionTlvs.TEXT.value: CoreTlvDataString, - ExceptionTlvs.OPAQUE.value: CoreTlvDataString, - } - - -class CoreMessage: - """ - Base class for representing CORE messages. - """ - - header_format = "!BBH" - header_len = struct.calcsize(header_format) - message_type = None - flag_map = MessageFlags - tlv_class = CoreTlv - - def __init__(self, flags, hdr, data): - self.raw_message = hdr + data - self.flags = flags - self.tlv_data = {} - self.parse_data(data) - - @classmethod - def unpack_header(cls, data): - """ - parse data and return (message_type, message_flags, message_len). - - :param str data: data to parse - :return: unpacked tuple - :rtype: tuple - """ - message_type, message_flags, message_len = struct.unpack( - cls.header_format, data[: cls.header_len] - ) - return message_type, message_flags, message_len - - @classmethod - def create(cls, flags, values): - tlv_data = structutils.pack_values(cls.tlv_class, values) - packed = cls.pack(flags, tlv_data) - header_data = packed[: cls.header_len] - return cls(flags, header_data, tlv_data) - - @classmethod - def pack(cls, message_flags, tlv_data): - """ - Pack CORE message data. - - :param message_flags: message flags to pack with data - :param tlv_data: data to get length from for packing - :return: combined header and tlv data - """ - header = struct.pack( - cls.header_format, cls.message_type, message_flags, len(tlv_data) - ) - return header + tlv_data - - def add_tlv_data(self, key, value): - """ - Add TLV data into the data map. - - :param int key: key to store TLV data - :param value: data to associate with key - :return: nothing - """ - if key in self.tlv_data: - raise KeyError(f"key already exists: {key} (val={value})") - - self.tlv_data[key] = value - - def get_tlv(self, tlv_type): - """ - Retrieve TLV data from data map. - - :param int tlv_type: type of data to retrieve - :return: TLV type data - """ - return self.tlv_data.get(tlv_type) - - def parse_data(self, data): - """ - Parse data while possible and adding TLV data to the data map. - - :param data: data to parse for TLV data - :return: nothing - """ - while data: - tlv, data = self.tlv_class.unpack(data) - self.add_tlv_data(tlv.tlv_type, tlv.value) - - def pack_tlv_data(self): - """ - Opposite of parse_data(). Return packed TLV data using self.tlv_data dict. Used by repack(). - - :return: packed data - :rtype: str - """ - keys = sorted(self.tlv_data.keys()) - tlv_data = b"" - for key in keys: - value = self.tlv_data[key] - tlv_data += self.tlv_class.pack(key, value) - return tlv_data - - def repack(self): - """ - Invoke after updating self.tlv_data[] to rebuild self.raw_message. - Useful for modifying a message that has been parsed, before - sending the raw data again. - - :return: nothing - """ - tlv_data = self.pack_tlv_data() - self.raw_message = self.pack(self.flags, tlv_data) - - def type_str(self): - """ - Retrieve data of the message type. - - :return: name of message type - :rtype: str - """ - try: - return MessageTypes(self.message_type).name - except ValueError: - return f"unknown message type: {self.message_type}" - - def flag_str(self): - """ - Retrieve message flag string. - - :return: message flag string - :rtype: str - """ - message_flags = [] - flag = 1 - - while True: - if self.flags & flag: - try: - message_flags.append(self.flag_map(flag).name) - except ValueError: - message_flags.append(f"0x{flag:x}") - flag <<= 1 - if not (self.flags & ~(flag - 1)): - break - - message_flags = " | ".join(message_flags) - return f"0x{self.flags:x} <{message_flags}>" - - def __str__(self): - """ - Retrieve string representation of the message. - - :return: string representation - :rtype: str - """ - result = f"{self.__class__.__name__} " - - for key in self.tlv_data: - value = self.tlv_data[key] - try: - tlv_type = self.tlv_class.tlv_type_map(key).name - except ValueError: - tlv_type = f"tlv type {key}" - - result += f"\n {tlv_type}: {value}" - - return result - - def node_numbers(self): - """ - Return a list of node numbers included in this message. - """ - number1 = None - number2 = None - - # not all messages have node numbers - if self.message_type == MessageTypes.NODE.value: - number1 = self.get_tlv(NodeTlvs.NUMBER.value) - elif self.message_type == MessageTypes.LINK.value: - number1 = self.get_tlv(LinkTlvs.N1_NUMBER.value) - number2 = self.get_tlv(LinkTlvs.N2_NUMBER.value) - elif self.message_type == MessageTypes.EXECUTE.value: - number1 = self.get_tlv(ExecuteTlvs.NODE.value) - elif self.message_type == MessageTypes.CONFIG.value: - number1 = self.get_tlv(ConfigTlvs.NODE.value) - elif self.message_type == MessageTypes.FILE.value: - number1 = self.get_tlv(FileTlvs.NODE.value) - elif self.message_type == MessageTypes.INTERFACE.value: - number1 = self.get_tlv(InterfaceTlvs.NODE.value) - elif self.message_type == MessageTypes.EVENT.value: - number1 = self.get_tlv(EventTlvs.NODE.value) - - result = [] - - if number1: - result.append(number1) - - if number2: - result.append(number2) - - return result - - def session_numbers(self): - """ - Return a list of session numbers included in this message. - """ - result = [] - - if self.message_type == MessageTypes.SESSION.value: - sessions = self.get_tlv(SessionTlvs.NUMBER.value) - elif self.message_type == MessageTypes.EXCEPTION.value: - sessions = self.get_tlv(ExceptionTlvs.SESSION.value) - else: - # All other messages share TLV number 0xA for the session number(s). - sessions = self.get_tlv(NodeTlvs.SESSION.value) - - if sessions: - for session_id in sessions.split("|"): - result.append(int(session_id)) - - return result - - -class CoreNodeMessage(CoreMessage): - """ - CORE node message class. - """ - - message_type = MessageTypes.NODE.value - tlv_class = CoreNodeTlv - - -class CoreLinkMessage(CoreMessage): - """ - CORE link message class. - """ - - message_type = MessageTypes.LINK.value - tlv_class = CoreLinkTlv - - -class CoreExecMessage(CoreMessage): - """ - CORE execute message class. - """ - - message_type = MessageTypes.EXECUTE.value - tlv_class = CoreExecuteTlv - - -class CoreRegMessage(CoreMessage): - """ - CORE register message class. - """ - - message_type = MessageTypes.REGISTER.value - tlv_class = CoreRegisterTlv - - -class CoreConfMessage(CoreMessage): - """ - CORE configuration message class. - """ - - message_type = MessageTypes.CONFIG.value - tlv_class = CoreConfigTlv - - -class CoreFileMessage(CoreMessage): - """ - CORE file message class. - """ - - message_type = MessageTypes.FILE.value - tlv_class = CoreFileTlv - - -class CoreIfaceMessage(CoreMessage): - """ - CORE interface message class. - """ - - message_type = MessageTypes.INTERFACE.value - tlv_class = CoreInterfaceTlv - - -class CoreEventMessage(CoreMessage): - """ - CORE event message class. - """ - - message_type = MessageTypes.EVENT.value - tlv_class = CoreEventTlv - - -class CoreSessionMessage(CoreMessage): - """ - CORE session message class. - """ - - message_type = MessageTypes.SESSION.value - tlv_class = CoreSessionTlv - - -class CoreExceptionMessage(CoreMessage): - """ - CORE exception message class. - """ - - message_type = MessageTypes.EXCEPTION.value - tlv_class = CoreExceptionTlv - - -# map used to translate enumerated message type values to message class objects -CLASS_MAP = { - MessageTypes.NODE.value: CoreNodeMessage, - MessageTypes.LINK.value: CoreLinkMessage, - MessageTypes.EXECUTE.value: CoreExecMessage, - MessageTypes.REGISTER.value: CoreRegMessage, - MessageTypes.CONFIG.value: CoreConfMessage, - MessageTypes.FILE.value: CoreFileMessage, - MessageTypes.INTERFACE.value: CoreIfaceMessage, - MessageTypes.EVENT.value: CoreEventMessage, - MessageTypes.SESSION.value: CoreSessionMessage, - MessageTypes.EXCEPTION.value: CoreExceptionMessage, -} - - -def str_to_list(value): - """ - Helper to convert pipe-delimited string ("a|b|c") into a list (a, b, c). - - :param str value: string to convert - :return: converted list - :rtype: list - """ - - if value is None: - return None - - return value.split("|") diff --git a/daemon/core/api/tlv/corehandlers.py b/daemon/core/api/tlv/corehandlers.py deleted file mode 100644 index 1937aea8..00000000 --- a/daemon/core/api/tlv/corehandlers.py +++ /dev/null @@ -1,2051 +0,0 @@ -""" -socket server request handlers leveraged by core servers. -""" - -import logging -import shlex -import shutil -import socketserver -import sys -import threading -import time -from itertools import repeat -from pathlib import Path -from queue import Empty, Queue -from typing import Optional - -from core import utils -from core.api.tlv import coreapi, dataconversion, structutils -from core.api.tlv.dataconversion import ConfigShim -from core.api.tlv.enumerations import ( - ConfigFlags, - ConfigTlvs, - EventTlvs, - ExceptionTlvs, - ExecuteTlvs, - FileTlvs, - LinkTlvs, - MessageTypes, - NodeTlvs, - SessionTlvs, -) -from core.emane.modelmanager import EmaneModelManager -from core.emulator.data import ( - ConfigData, - EventData, - ExceptionData, - FileData, - InterfaceData, - LinkOptions, - NodeOptions, -) -from core.emulator.enumerations import ( - ConfigDataTypes, - EventTypes, - ExceptionLevels, - LinkTypes, - MessageFlags, - NodeTypes, - RegisterTlvs, -) -from core.emulator.session import Session -from core.errors import CoreCommandError, CoreError -from core.location.mobility import BasicRangeModel -from core.nodes.base import CoreNode, CoreNodeBase, NodeBase -from core.nodes.network import WlanNode -from core.nodes.physical import Rj45Node -from core.services.coreservices import ServiceManager, ServiceShim - -logger = logging.getLogger(__name__) - - -class CoreHandler(socketserver.BaseRequestHandler): - """ - The CoreHandler class uses the RequestHandler class for servicing requests. - """ - - session_clients = {} - - def __init__(self, request, client_address, server): - """ - Create a CoreRequestHandler instance. - - :param request: request object - :param str client_address: client address - :param CoreServer server: core server instance - """ - self.done = False - self.message_handlers = { - MessageTypes.NODE.value: self.handle_node_message, - MessageTypes.LINK.value: self.handle_link_message, - MessageTypes.EXECUTE.value: self.handle_execute_message, - MessageTypes.REGISTER.value: self.handle_register_message, - MessageTypes.CONFIG.value: self.handle_config_message, - MessageTypes.FILE.value: self.handle_file_message, - MessageTypes.INTERFACE.value: self.handle_iface_message, - MessageTypes.EVENT.value: self.handle_event_message, - MessageTypes.SESSION.value: self.handle_session_message, - } - self.message_queue = Queue() - self.node_status_request = {} - self._shutdown_lock = threading.Lock() - self._sessions_lock = threading.Lock() - - self.handler_threads = [] - thread = threading.Thread(target=self.handler_thread, daemon=True) - thread.start() - self.handler_threads.append(thread) - - self.session: Optional[Session] = None - self.coreemu = server.coreemu - utils.close_onexec(request.fileno()) - socketserver.BaseRequestHandler.__init__(self, request, client_address, server) - - def setup(self): - """ - Client has connected, set up a new connection. - - :return: nothing - """ - logger.debug("new TCP connection: %s", self.client_address) - - def finish(self): - """ - Client has disconnected, end this request handler and disconnect - from the session. Shutdown sessions that are not running. - - :return: nothing - """ - logger.debug("finishing request handler") - logger.debug("remaining message queue size: %s", self.message_queue.qsize()) - - # give some time for message queue to deplete - timeout = 10 - wait = 0 - while not self.message_queue.empty(): - logger.debug("waiting for message queue to empty: %s seconds", wait) - time.sleep(1) - wait += 1 - if wait == timeout: - logger.warning("queue failed to be empty, finishing request handler") - break - - logger.info("client disconnected: notifying threads") - self.done = True - for thread in self.handler_threads: - logger.info("waiting for thread: %s", thread.getName()) - thread.join(timeout) - if thread.is_alive(): - logger.warning( - "joining %s failed: still alive after %s sec", - thread.getName(), - timeout, - ) - - logger.info("connection closed: %s", self.client_address) - if self.session: - # remove client from session broker and shutdown if there are no clients - self.remove_session_handlers() - clients = self.session_clients[self.session.id] - clients.remove(self) - if not clients and not self.session.is_active(): - logger.info( - "no session clients left and not active, initiating shutdown" - ) - self.coreemu.delete_session(self.session.id) - - return socketserver.BaseRequestHandler.finish(self) - - def session_message(self, flags=0): - """ - Build CORE API Sessions message based on current session info. - - :param int flags: message flags - :return: session message - """ - id_list = [] - name_list = [] - file_list = [] - node_count_list = [] - date_list = [] - thumb_list = [] - num_sessions = 0 - with self._sessions_lock: - for _id in self.coreemu.sessions: - session = self.coreemu.sessions[_id] - num_sessions += 1 - id_list.append(str(_id)) - name = session.name - if not name: - name = "" - name_list.append(name) - file_name = str(session.file_path) if session.file_path else "" - file_list.append(str(file_name)) - node_count_list.append(str(session.get_node_count())) - date_list.append(time.ctime(session.state_time)) - thumb = str(session.thumbnail) if session.thumbnail else "" - thumb_list.append(thumb) - session_ids = "|".join(id_list) - names = "|".join(name_list) - files = "|".join(file_list) - node_counts = "|".join(node_count_list) - dates = "|".join(date_list) - thumbs = "|".join(thumb_list) - if num_sessions > 0: - tlv_data = b"" - if len(session_ids) > 0: - tlv_data += coreapi.CoreSessionTlv.pack( - SessionTlvs.NUMBER.value, session_ids - ) - if len(names) > 0: - tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.NAME.value, names) - if len(files) > 0: - tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.FILE.value, files) - if len(node_counts) > 0: - tlv_data += coreapi.CoreSessionTlv.pack( - SessionTlvs.NODE_COUNT.value, node_counts - ) - if len(dates) > 0: - tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.DATE.value, dates) - if len(thumbs) > 0: - tlv_data += coreapi.CoreSessionTlv.pack(SessionTlvs.THUMB.value, thumbs) - message = coreapi.CoreSessionMessage.pack(flags, tlv_data) - else: - message = None - return message - - def handle_broadcast_event(self, event_data): - """ - Callback to handle an event broadcast out from a session. - - :param core.emulator.data.EventData event_data: event data to handle - :return: nothing - """ - logger.debug("handling broadcast event: %s", event_data) - - tlv_data = structutils.pack_values( - coreapi.CoreEventTlv, - [ - (EventTlvs.NODE, event_data.node), - (EventTlvs.TYPE, event_data.event_type.value), - (EventTlvs.NAME, event_data.name), - (EventTlvs.DATA, event_data.data), - (EventTlvs.TIME, event_data.time), - (EventTlvs.SESSION, event_data.session), - ], - ) - message = coreapi.CoreEventMessage.pack(0, tlv_data) - - try: - self.sendall(message) - except IOError: - logger.exception("error sending event message") - - def handle_broadcast_file(self, file_data): - """ - Callback to handle a file broadcast out from a session. - - :param core.emulator.data.FileData file_data: file data to handle - :return: nothing - """ - logger.debug("handling broadcast file: %s", file_data) - - tlv_data = structutils.pack_values( - coreapi.CoreFileTlv, - [ - (FileTlvs.NODE, file_data.node), - (FileTlvs.NAME, file_data.name), - (FileTlvs.MODE, file_data.mode), - (FileTlvs.NUMBER, file_data.number), - (FileTlvs.TYPE, file_data.type), - (FileTlvs.SOURCE_NAME, file_data.source), - (FileTlvs.SESSION, file_data.session), - (FileTlvs.DATA, file_data.data), - (FileTlvs.COMPRESSED_DATA, file_data.compressed_data), - ], - ) - message = coreapi.CoreFileMessage.pack(file_data.message_type.value, tlv_data) - - try: - self.sendall(message) - except IOError: - logger.exception("error sending file message") - - def handle_broadcast_config(self, config_data): - """ - Callback to handle a config broadcast out from a session. - - :param core.emulator.data.ConfigData config_data: config data to handle - :return: nothing - """ - logger.debug("handling broadcast config: %s", config_data) - message = dataconversion.convert_config(config_data) - try: - self.sendall(message) - except IOError: - logger.exception("error sending config message") - - def handle_broadcast_exception(self, exception_data): - """ - Callback to handle an exception broadcast out from a session. - - :param core.emulator.data.ExceptionData exception_data: exception data to handle - :return: nothing - """ - logger.debug("handling broadcast exception: %s", exception_data) - tlv_data = structutils.pack_values( - coreapi.CoreExceptionTlv, - [ - (ExceptionTlvs.NODE, exception_data.node), - (ExceptionTlvs.SESSION, str(exception_data.session)), - (ExceptionTlvs.LEVEL, exception_data.level.value), - (ExceptionTlvs.SOURCE, exception_data.source), - (ExceptionTlvs.DATE, exception_data.date), - (ExceptionTlvs.TEXT, exception_data.text), - ], - ) - message = coreapi.CoreExceptionMessage.pack(0, tlv_data) - - try: - self.sendall(message) - except IOError: - logger.exception("error sending exception message") - - def handle_broadcast_node(self, node_data): - """ - Callback to handle an node broadcast out from a session. - - :param core.emulator.data.NodeData node_data: node data to handle - :return: nothing - """ - logger.debug("handling broadcast node: %s", node_data) - message = dataconversion.convert_node(node_data) - try: - self.sendall(message) - except IOError: - logger.exception("error sending node message") - - def handle_broadcast_link(self, link_data): - """ - Callback to handle an link broadcast out from a session. - - :param core.emulator.data.LinkData link_data: link data to handle - :return: nothing - """ - logger.debug("handling broadcast link: %s", link_data) - options_data = link_data.options - loss = "" - if options_data.loss is not None: - loss = str(options_data.loss) - dup = "" - if options_data.dup is not None: - dup = str(options_data.dup) - iface1 = link_data.iface1 - if iface1 is None: - iface1 = InterfaceData() - iface2 = link_data.iface2 - if iface2 is None: - iface2 = InterfaceData() - - tlv_data = structutils.pack_values( - coreapi.CoreLinkTlv, - [ - (LinkTlvs.N1_NUMBER, link_data.node1_id), - (LinkTlvs.N2_NUMBER, link_data.node2_id), - (LinkTlvs.DELAY, options_data.delay), - (LinkTlvs.BANDWIDTH, options_data.bandwidth), - (LinkTlvs.LOSS, loss), - (LinkTlvs.DUP, dup), - (LinkTlvs.JITTER, options_data.jitter), - (LinkTlvs.MER, options_data.mer), - (LinkTlvs.BURST, options_data.burst), - (LinkTlvs.MBURST, options_data.mburst), - (LinkTlvs.TYPE, link_data.type.value), - (LinkTlvs.UNIDIRECTIONAL, options_data.unidirectional), - (LinkTlvs.NETWORK_ID, link_data.network_id), - (LinkTlvs.KEY, options_data.key), - (LinkTlvs.IFACE1_NUMBER, iface1.id), - (LinkTlvs.IFACE1_IP4, iface1.ip4), - (LinkTlvs.IFACE1_IP4_MASK, iface1.ip4_mask), - (LinkTlvs.IFACE1_MAC, iface1.mac), - (LinkTlvs.IFACE1_IP6, iface1.ip6), - (LinkTlvs.IFACE1_IP6_MASK, iface1.ip6_mask), - (LinkTlvs.IFACE2_NUMBER, iface2.id), - (LinkTlvs.IFACE2_IP4, iface2.ip4), - (LinkTlvs.IFACE2_IP4_MASK, iface2.ip4_mask), - (LinkTlvs.IFACE2_MAC, iface2.mac), - (LinkTlvs.IFACE2_IP6, iface2.ip6), - (LinkTlvs.IFACE2_IP6_MASK, iface2.ip6_mask), - ], - ) - - message = coreapi.CoreLinkMessage.pack(link_data.message_type.value, tlv_data) - - try: - self.sendall(message) - except IOError: - logger.exception("error sending Event Message") - - def register(self): - """ - Return a Register Message - - :return: register message data - """ - logger.info( - "GUI has connected to session %d at %s", self.session.id, time.ctime() - ) - tlv_data = b"" - tlv_data += coreapi.CoreRegisterTlv.pack( - RegisterTlvs.EXECUTE_SERVER.value, "core-daemon" - ) - tlv_data += coreapi.CoreRegisterTlv.pack( - RegisterTlvs.EMULATION_SERVER.value, "core-daemon" - ) - tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.UTILITY.value, "broker") - tlv_data += coreapi.CoreRegisterTlv.pack( - self.session.location.config_type.value, self.session.location.name - ) - tlv_data += coreapi.CoreRegisterTlv.pack( - self.session.mobility.config_type.value, self.session.mobility.name - ) - for model_name in self.session.mobility.models: - model_class = self.session.mobility.models[model_name] - tlv_data += coreapi.CoreRegisterTlv.pack( - model_class.config_type.value, model_class.name - ) - tlv_data += coreapi.CoreRegisterTlv.pack( - self.session.services.config_type.value, self.session.services.name - ) - tlv_data += coreapi.CoreRegisterTlv.pack( - self.session.emane.config_type.value, self.session.emane.name - ) - for model_name, model_class in EmaneModelManager.models.items(): - tlv_data += coreapi.CoreRegisterTlv.pack( - model_class.config_type.value, model_class.name - ) - tlv_data += coreapi.CoreRegisterTlv.pack( - self.session.options.config_type.value, self.session.options.name - ) - tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.UTILITY.value, "metadata") - - return coreapi.CoreRegMessage.pack(MessageFlags.ADD.value, tlv_data) - - def sendall(self, data): - """ - Send raw data to the other end of this TCP connection - using socket"s sendall(). - - :param data: data to send over request socket - :return: data sent - """ - return self.request.sendall(data) - - def receive_message(self): - """ - Receive data and return a CORE API message object. - - :return: received message - :rtype: core.api.tlv.coreapi.CoreMessage - """ - try: - header = self.request.recv(coreapi.CoreMessage.header_len) - except IOError as e: - raise IOError(f"error receiving header ({e})") - - if len(header) != coreapi.CoreMessage.header_len: - if len(header) == 0: - raise EOFError("client disconnected") - else: - raise IOError("invalid message header size") - - message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header( - header - ) - if message_len == 0: - logger.warning("received message with no data") - - data = b"" - while len(data) < message_len: - data += self.request.recv(message_len - len(data)) - if len(data) > message_len: - error_message = f"received message length does not match received data ({len(data)} != {message_len})" - logger.error(error_message) - raise IOError(error_message) - - try: - message_class = coreapi.CLASS_MAP[message_type] - message = message_class(message_flags, header, data) - except KeyError: - message = coreapi.CoreMessage(message_flags, header, data) - message.message_type = message_type - logger.exception("unimplemented core message type: %s", message.type_str()) - - return message - - def queue_message(self, message): - """ - Queue an API message for later processing. - - :param message: message to queue - :return: nothing - """ - logger.debug( - "queueing msg (queuedtimes = %s): type %s", - message.queuedtimes, - MessageTypes(message.message_type), - ) - self.message_queue.put(message) - - def handler_thread(self): - """ - CORE API message handling loop that is spawned for each server - thread; get CORE API messages from the incoming message queue, - and call handlemsg() for processing. - - :return: nothing - """ - while not self.done: - try: - message = self.message_queue.get(timeout=1) - self.handle_message(message) - except Empty: - pass - - def handle_message(self, message): - """ - Handle an incoming message; dispatch based on message type, - optionally sending replies. - - :param message: message to handle - :return: nothing - """ - logger.debug( - "%s handling message:\n%s", threading.currentThread().getName(), message - ) - if message.message_type not in self.message_handlers: - logger.error("no handler for message type: %s", message.type_str()) - return - - message_handler = self.message_handlers[message.message_type] - try: - # TODO: this needs to be removed, make use of the broadcast message methods - replies = message_handler(message) - self.dispatch_replies(replies, message) - except Exception as e: - self.send_exception(ExceptionLevels.ERROR, "corehandler", str(e)) - logger.exception( - "%s: exception while handling message: %s", - threading.currentThread().getName(), - message, - ) - - def dispatch_replies(self, replies, message): - """ - Dispatch replies by CORE to message msg previously received from the client. - - :param list replies: reply messages to dispatch - :param message: message for replies - :return: nothing - """ - for reply in replies: - message_type, message_flags, message_length = coreapi.CoreMessage.unpack_header( - reply - ) - try: - reply_message = coreapi.CLASS_MAP[message_type]( - message_flags, - reply[: coreapi.CoreMessage.header_len], - reply[coreapi.CoreMessage.header_len :], - ) - except KeyError: - # multiple TLVs of same type cause KeyError exception - reply_message = f"CoreMessage (type {message_type} flags {message_flags} length {message_length})" - - logger.debug("sending reply:\n%s", reply_message) - - try: - self.sendall(reply) - except IOError: - logger.exception("error dispatching reply") - - def handle(self): - """ - Handle a new connection request from a client. Dispatch to the - recvmsg() method for receiving data into CORE API messages, and - add them to an incoming message queue. - - :return: nothing - """ - # use port as session id - port = self.request.getpeername()[1] - - # TODO: add shutdown handler for session - self.session = self.coreemu.create_session(port) - logger.debug("created new session for client: %s", self.session.id) - clients = self.session_clients.setdefault(self.session.id, []) - clients.append(self) - - # add handlers for various data - self.add_session_handlers() - - # set initial session state - self.session.set_state(EventTypes.DEFINITION_STATE) - - while True: - try: - message = self.receive_message() - except EOFError: - logger.info("client disconnected") - break - except IOError: - logger.exception("error receiving message") - break - - message.queuedtimes = 0 - self.queue_message(message) - - # delay is required for brief connections, allow session joining - if message.message_type == MessageTypes.SESSION.value: - time.sleep(0.125) - - # broadcast node/link messages to other connected clients - if message.message_type not in [ - MessageTypes.NODE.value, - MessageTypes.LINK.value, - ]: - continue - - clients = self.session_clients[self.session.id] - for client in clients: - if client == self: - continue - - logger.debug("BROADCAST TO OTHER CLIENT: %s", client) - client.sendall(message.raw_message) - - def send_exception(self, level, source, text, node=None): - """ - Sends an exception for display within the GUI. - - :param core.emulator.enumerations.ExceptionLevel level: level for exception - :param str source: source where exception came from - :param str text: details about exception - :param int node: node id, if related to a specific node - :return: nothing - """ - exception_data = ExceptionData( - session=self.session.id, - node=node, - date=time.ctime(), - level=level, - source=source, - text=text, - ) - self.handle_broadcast_exception(exception_data) - - def add_session_handlers(self): - logger.debug("adding session broadcast handlers") - self.session.event_handlers.append(self.handle_broadcast_event) - self.session.exception_handlers.append(self.handle_broadcast_exception) - self.session.node_handlers.append(self.handle_broadcast_node) - self.session.link_handlers.append(self.handle_broadcast_link) - self.session.file_handlers.append(self.handle_broadcast_file) - self.session.config_handlers.append(self.handle_broadcast_config) - - def remove_session_handlers(self): - logger.debug("removing session broadcast handlers") - self.session.event_handlers.remove(self.handle_broadcast_event) - self.session.exception_handlers.remove(self.handle_broadcast_exception) - self.session.node_handlers.remove(self.handle_broadcast_node) - self.session.link_handlers.remove(self.handle_broadcast_link) - self.session.file_handlers.remove(self.handle_broadcast_file) - self.session.config_handlers.remove(self.handle_broadcast_config) - - def handle_node_message(self, message): - """ - Node Message handler - - :param core.api.tlv.coreapi.CoreNodeMessage message: node message - :return: replies to node message - """ - replies = [] - if ( - message.flags & MessageFlags.ADD.value - and message.flags & MessageFlags.DELETE.value - ): - logger.warning("ignoring invalid message: add and delete flag both set") - return () - - _class = CoreNode - node_type_value = message.get_tlv(NodeTlvs.TYPE.value) - if node_type_value is not None: - node_type = NodeTypes(node_type_value) - _class = self.session.get_node_class(node_type) - - node_id = message.get_tlv(NodeTlvs.NUMBER.value) - - options = NodeOptions( - name=message.get_tlv(NodeTlvs.NAME.value), - model=message.get_tlv(NodeTlvs.MODEL.value), - legacy=True, - ) - options.set_position( - x=message.get_tlv(NodeTlvs.X_POSITION.value), - y=message.get_tlv(NodeTlvs.Y_POSITION.value), - ) - - lat = message.get_tlv(NodeTlvs.LATITUDE.value) - if lat is not None: - lat = float(lat) - lon = message.get_tlv(NodeTlvs.LONGITUDE.value) - if lon is not None: - lon = float(lon) - alt = message.get_tlv(NodeTlvs.ALTITUDE.value) - if alt is not None: - alt = float(alt) - options.set_location(lat=lat, lon=lon, alt=alt) - - options.icon = message.get_tlv(NodeTlvs.ICON.value) - options.canvas = message.get_tlv(NodeTlvs.CANVAS.value) - options.server = message.get_tlv(NodeTlvs.EMULATION_SERVER.value) - - services = message.get_tlv(NodeTlvs.SERVICES.value) - if services: - options.services = services.split("|") - - if message.flags & MessageFlags.ADD.value: - node = self.session.add_node(_class, node_id, options) - has_geo = all( - i is not None for i in [options.lon, options.lat, options.alt] - ) - if has_geo: - self.session.broadcast_node(node) - if message.flags & MessageFlags.STRING.value: - self.node_status_request[node.id] = True - if self.session.state == EventTypes.RUNTIME_STATE: - self.send_node_emulation_id(node.id) - elif message.flags & MessageFlags.DELETE.value: - with self._shutdown_lock: - result = self.session.delete_node(node_id) - if result and self.session.get_node_count() == 0: - self.session.set_state(EventTypes.SHUTDOWN_STATE) - self.session.delete_nodes() - self.session.distributed.shutdown() - self.session.sdt.shutdown() - - # if we deleted a node broadcast out its removal - if result and message.flags & MessageFlags.STRING.value: - tlvdata = b"" - tlvdata += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id) - flags = MessageFlags.DELETE.value | MessageFlags.LOCAL.value - replies.append(coreapi.CoreNodeMessage.pack(flags, tlvdata)) - # node update - else: - node = self.session.get_node(node_id, NodeBase) - node.icon = options.icon - has_geo = all( - i is not None for i in [options.lon, options.lat, options.alt] - ) - if has_geo: - self.session.set_node_geo(node, options.lon, options.lat, options.alt) - self.session.broadcast_node(node) - else: - self.session.set_node_pos(node, options.x, options.y) - - return replies - - def handle_link_message(self, message): - """ - Link Message handler - - :param core.api.tlv.coreapi.CoreLinkMessage message: link message to handle - :return: link message replies - """ - node1_id = message.get_tlv(LinkTlvs.N1_NUMBER.value) - node2_id = message.get_tlv(LinkTlvs.N2_NUMBER.value) - iface1_data = InterfaceData( - id=message.get_tlv(LinkTlvs.IFACE1_NUMBER.value), - name=message.get_tlv(LinkTlvs.IFACE1_NAME.value), - mac=message.get_tlv(LinkTlvs.IFACE1_MAC.value), - ip4=message.get_tlv(LinkTlvs.IFACE1_IP4.value), - ip4_mask=message.get_tlv(LinkTlvs.IFACE1_IP4_MASK.value), - ip6=message.get_tlv(LinkTlvs.IFACE1_IP6.value), - ip6_mask=message.get_tlv(LinkTlvs.IFACE1_IP6_MASK.value), - ) - iface2_data = InterfaceData( - id=message.get_tlv(LinkTlvs.IFACE2_NUMBER.value), - name=message.get_tlv(LinkTlvs.IFACE2_NAME.value), - mac=message.get_tlv(LinkTlvs.IFACE2_MAC.value), - ip4=message.get_tlv(LinkTlvs.IFACE2_IP4.value), - ip4_mask=message.get_tlv(LinkTlvs.IFACE2_IP4_MASK.value), - ip6=message.get_tlv(LinkTlvs.IFACE2_IP6.value), - ip6_mask=message.get_tlv(LinkTlvs.IFACE2_IP6_MASK.value), - ) - link_type = LinkTypes.WIRED - link_type_value = message.get_tlv(LinkTlvs.TYPE.value) - if link_type_value is not None: - link_type = LinkTypes(link_type_value) - options = LinkOptions() - options.delay = message.get_tlv(LinkTlvs.DELAY.value) - options.bandwidth = message.get_tlv(LinkTlvs.BANDWIDTH.value) - options.jitter = message.get_tlv(LinkTlvs.JITTER.value) - options.mer = message.get_tlv(LinkTlvs.MER.value) - options.burst = message.get_tlv(LinkTlvs.BURST.value) - options.mburst = message.get_tlv(LinkTlvs.MBURST.value) - options.unidirectional = message.get_tlv(LinkTlvs.UNIDIRECTIONAL.value) - options.key = message.get_tlv(LinkTlvs.KEY.value) - loss = message.get_tlv(LinkTlvs.LOSS.value) - dup = message.get_tlv(LinkTlvs.DUP.value) - if loss is not None: - options.loss = float(loss) - if dup is not None: - options.dup = int(dup) - - # fix for rj45 nodes missing iface id - node1 = self.session.get_node(node1_id, NodeBase) - node2 = self.session.get_node(node2_id, NodeBase) - if isinstance(node1, Rj45Node) and iface1_data.id is None: - iface1_data.id = 0 - if isinstance(node2, Rj45Node) and iface2_data.id is None: - iface2_data.id = 0 - - if message.flags & MessageFlags.ADD.value: - self.session.add_link( - node1_id, node2_id, iface1_data, iface2_data, options, link_type - ) - elif message.flags & MessageFlags.DELETE.value: - if isinstance(node1, Rj45Node): - iface1_data.id = node1.iface_id - if isinstance(node2, Rj45Node): - iface2_data.id = node2.iface_id - self.session.delete_link( - node1_id, node2_id, iface1_data.id, iface2_data.id, link_type - ) - else: - self.session.update_link( - node1_id, node2_id, iface1_data.id, iface2_data.id, options, link_type - ) - return () - - def handle_execute_message(self, message): - """ - Execute Message handler - - :param core.api.tlv.coreapi.CoreExecMessage message: execute message to handle - :return: reply messages - """ - node_id = message.get_tlv(ExecuteTlvs.NODE.value) - execute_num = message.get_tlv(ExecuteTlvs.NUMBER.value) - execute_time = message.get_tlv(ExecuteTlvs.TIME.value) - command = message.get_tlv(ExecuteTlvs.COMMAND.value) - - # local flag indicates command executed locally, not on a node - if node_id is None and not message.flags & MessageFlags.LOCAL.value: - raise ValueError("Execute Message is missing node number.") - - if execute_num is None: - raise ValueError("Execute Message is missing execution number.") - - if execute_time is not None: - self.session.add_event( - float(execute_time), node_id=node_id, name=None, data=command - ) - return () - - try: - node = self.session.get_node(node_id, CoreNodeBase) - - # build common TLV items for reply - tlv_data = b"" - if node_id is not None: - tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node_id) - tlv_data += coreapi.CoreExecuteTlv.pack( - ExecuteTlvs.NUMBER.value, execute_num - ) - tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, command) - - if message.flags & MessageFlags.TTY.value: - if node_id is None: - raise NotImplementedError - # echo back exec message with cmd for spawning interactive terminal - if command == "bash": - command = "/bin/bash" - res = node.termcmdstring(command) - tlv_data += coreapi.CoreExecuteTlv.pack(ExecuteTlvs.RESULT.value, res) - reply = coreapi.CoreExecMessage.pack(MessageFlags.TTY.value, tlv_data) - return (reply,) - else: - # execute command and send a response - if ( - message.flags & MessageFlags.STRING.value - or message.flags & MessageFlags.TEXT.value - ): - if message.flags & MessageFlags.LOCAL.value: - try: - res = utils.cmd(command) - status = 0 - except CoreCommandError as e: - res = e.stderr - status = e.returncode - else: - try: - res = node.cmd(command) - status = 0 - except CoreCommandError as e: - res = e.stderr - status = e.returncode - if message.flags & MessageFlags.TEXT.value: - tlv_data += coreapi.CoreExecuteTlv.pack( - ExecuteTlvs.RESULT.value, res - ) - if message.flags & MessageFlags.STRING.value: - tlv_data += coreapi.CoreExecuteTlv.pack( - ExecuteTlvs.STATUS.value, status - ) - reply = coreapi.CoreExecMessage.pack(0, tlv_data) - return (reply,) - # execute the command with no response - else: - if message.flags & MessageFlags.LOCAL.value: - utils.mute_detach(command) - else: - node.cmd(command, wait=False) - except CoreError: - logger.exception("error getting object: %s", node_id) - # XXX wait and queue this message to try again later - # XXX maybe this should be done differently - if not message.flags & MessageFlags.LOCAL.value: - time.sleep(0.125) - self.queue_message(message) - - return () - - def handle_register_message(self, message): - """ - Register Message Handler - - :param core.api.tlv.coreapi.CoreRegMessage message: register message to handle - :return: reply messages - """ - replies = [] - - # execute a Python script or XML file - execute_server = message.get_tlv(RegisterTlvs.EXECUTE_SERVER.value) - if execute_server: - try: - logger.info("executing: %s", execute_server) - old_session_ids = set() - if message.flags & MessageFlags.STRING.value: - old_session_ids = set(self.coreemu.sessions.keys()) - sys.argv = shlex.split(execute_server) - file_path = Path(sys.argv[0]) - if file_path.suffix == ".xml": - session = self.coreemu.create_session() - try: - session.open_xml(file_path) - except Exception: - self.coreemu.delete_session(session.id) - raise - else: - utils.execute_script(self.coreemu, file_path, execute_server) - - if message.flags & MessageFlags.STRING.value: - new_session_ids = set(self.coreemu.sessions.keys()) - new_sid = new_session_ids.difference(old_session_ids) - try: - sid = new_sid.pop() - logger.info("executed: %s as session %d", execute_server, sid) - except KeyError: - logger.info( - "executed %s with unknown session ID", execute_server - ) - return replies - - logger.debug("checking session %d for RUNTIME state", sid) - session = self.coreemu.sessions.get(sid) - retries = 10 - # wait for session to enter RUNTIME state, to prevent GUI from - # connecting while nodes are still being instantiated - while session.state != EventTypes.RUNTIME_STATE: - logger.debug( - "waiting for session %d to enter RUNTIME state", sid - ) - time.sleep(1) - retries -= 1 - if retries <= 0: - logger.debug("session %d did not enter RUNTIME state", sid) - return replies - - tlv_data = coreapi.CoreRegisterTlv.pack( - RegisterTlvs.EXECUTE_SERVER.value, execute_server - ) - tlv_data += coreapi.CoreRegisterTlv.pack( - RegisterTlvs.SESSION.value, str(sid) - ) - message = coreapi.CoreRegMessage.pack(0, tlv_data) - replies.append(message) - except Exception as e: - logger.exception("error executing: %s", execute_server) - tlv_data = coreapi.CoreExceptionTlv.pack(ExceptionTlvs.LEVEL.value, 2) - tlv_data += coreapi.CoreExceptionTlv.pack( - ExceptionTlvs.TEXT.value, str(e) - ) - message = coreapi.CoreExceptionMessage.pack(0, tlv_data) - replies.append(message) - - return replies - - gui = message.get_tlv(RegisterTlvs.GUI.value) - if gui is None: - logger.debug("ignoring Register message") - else: - # register capabilities with the GUI - replies.append(self.register()) - replies.append(self.session_message()) - - return replies - - def handle_config_message(self, message): - """ - Configuration Message handler - - :param core.api.tlv.coreapi.CoreConfMessage message: configuration message to handle - :return: reply messages - """ - # convert config message to standard config data object - config_data = ConfigData( - node=message.get_tlv(ConfigTlvs.NODE.value), - object=message.get_tlv(ConfigTlvs.OBJECT.value), - type=message.get_tlv(ConfigTlvs.TYPE.value), - data_types=message.get_tlv(ConfigTlvs.DATA_TYPES.value), - data_values=message.get_tlv(ConfigTlvs.VALUES.value), - captions=message.get_tlv(ConfigTlvs.CAPTIONS.value), - bitmap=message.get_tlv(ConfigTlvs.BITMAP.value), - possible_values=message.get_tlv(ConfigTlvs.POSSIBLE_VALUES.value), - groups=message.get_tlv(ConfigTlvs.GROUPS.value), - session=message.get_tlv(ConfigTlvs.SESSION.value), - iface_id=message.get_tlv(ConfigTlvs.IFACE_ID.value), - network_id=message.get_tlv(ConfigTlvs.NETWORK_ID.value), - opaque=message.get_tlv(ConfigTlvs.OPAQUE.value), - ) - logger.debug( - "configuration message for %s node %s", config_data.object, config_data.node - ) - message_type = ConfigFlags(config_data.type) - - replies = [] - - # handle session configuration - if config_data.object == "all": - replies = self.handle_config_all(message_type, config_data) - elif config_data.object == self.session.options.name: - replies = self.handle_config_session(message_type, config_data) - elif config_data.object == self.session.location.name: - self.handle_config_location(message_type, config_data) - elif config_data.object == "metadata": - replies = self.handle_config_metadata(message_type, config_data) - elif config_data.object == "broker": - self.handle_config_broker(message_type, config_data) - elif config_data.object == self.session.services.name: - replies = self.handle_config_services(message_type, config_data) - elif config_data.object == self.session.mobility.name: - self.handle_config_mobility(message_type, config_data) - elif config_data.object in self.session.mobility.models: - replies = self.handle_config_mobility_models(message_type, config_data) - elif config_data.object in EmaneModelManager.models: - replies = self.handle_config_emane_models(message_type, config_data) - else: - raise Exception("no handler for configuration: %s", config_data.object) - - for reply in replies: - self.handle_broadcast_config(reply) - - return [] - - def handle_config_all(self, message_type, config_data): - replies = [] - - if message_type == ConfigFlags.RESET: - node_id = config_data.node - if node_id is not None: - self.session.mobility.config_reset(node_id) - self.session.emane.config_reset(node_id) - else: - self.session.location.reset() - self.session.services.reset() - self.session.mobility.config_reset() - self.session.emane.config_reset() - else: - raise Exception(f"cant handle config all: {message_type}") - - return replies - - def handle_config_session(self, message_type, config_data): - replies = [] - if message_type == ConfigFlags.REQUEST: - type_flags = ConfigFlags.NONE.value - config = self.session.options.get_configs() - config_response = ConfigShim.config_data( - 0, None, type_flags, self.session.options, config - ) - replies.append(config_response) - elif message_type != ConfigFlags.RESET and config_data.data_values: - values = ConfigShim.str_to_dict(config_data.data_values) - for key in values: - value = values[key] - self.session.options.set_config(key, value) - return replies - - def handle_config_location(self, message_type, config_data): - if message_type == ConfigFlags.RESET: - self.session.location.reset() - else: - if not config_data.data_values: - logger.warning("location data missing") - else: - values = [float(x) for x in config_data.data_values.split("|")] - - # Cartesian coordinate reference point - refx, refy = values[0], values[1] - refz = 0.0 - lat, lon, alt = values[2], values[3], values[4] - # xyz point - self.session.location.refxyz = (refx, refy, refz) - # geographic reference point - self.session.location.setrefgeo(lat, lon, alt) - self.session.location.refscale = values[5] - logger.info( - "location configured: %s = %s scale=%s", - self.session.location.refxyz, - self.session.location.refgeo, - self.session.location.refscale, - ) - - def handle_config_metadata(self, message_type, config_data): - replies = [] - if message_type == ConfigFlags.REQUEST: - node_id = config_data.node - metadata_configs = self.session.metadata - if metadata_configs is None: - metadata_configs = {} - data_values = "|".join( - [f"{x}={metadata_configs[x]}" for x in metadata_configs] - ) - data_types = tuple(ConfigDataTypes.STRING.value for _ in metadata_configs) - config_response = ConfigData( - message_type=0, - node=node_id, - object="metadata", - type=ConfigFlags.NONE.value, - data_types=data_types, - data_values=data_values, - ) - replies.append(config_response) - elif message_type != ConfigFlags.RESET and config_data.data_values: - values = ConfigShim.str_to_dict(config_data.data_values) - for key in values: - value = values[key] - self.session.metadata[key] = value - return replies - - def handle_config_broker(self, message_type, config_data): - if message_type not in [ConfigFlags.REQUEST, ConfigFlags.RESET]: - if not config_data.data_values: - logger.info("emulation server data missing") - else: - values = config_data.data_values.split("|") - - # string of "server:ip:port,server:ip:port,..." - server_strings = values[0] - server_list = server_strings.split(",") - - for server in server_list: - server_items = server.split(":") - name, host, _ = server_items[:3] - self.session.distributed.add_server(name, host) - elif message_type == ConfigFlags.RESET: - self.session.distributed.shutdown() - - def handle_config_services(self, message_type, config_data): - replies = [] - node_id = config_data.node - opaque = config_data.opaque - - if message_type == ConfigFlags.REQUEST: - session_id = config_data.session - opaque = config_data.opaque - - logger.debug( - "configuration request: node(%s) session(%s) opaque(%s)", - node_id, - session_id, - opaque, - ) - - # send back a list of available services - if opaque is None: - type_flag = ConfigFlags.NONE.value - data_types = tuple( - repeat(ConfigDataTypes.BOOL.value, len(ServiceManager.services)) - ) - - # sort groups by name and map services to groups - groups = set() - group_map = {} - for name in ServiceManager.services: - service_name = ServiceManager.services[name] - group = service_name.group - groups.add(group) - group_map.setdefault(group, []).append(service_name) - groups = sorted(groups, key=lambda x: x.lower()) - - # define tlv values in proper order - captions = [] - possible_values = [] - values = [] - group_strings = [] - start_index = 1 - logger.debug("sorted groups: %s", groups) - for group in groups: - services = sorted(group_map[group], key=lambda x: x.name.lower()) - logger.debug("sorted services for group(%s): %s", group, services) - end_index = start_index + len(services) - 1 - group_strings.append(f"{group}:{start_index}-{end_index}") - start_index += len(services) - for service_name in services: - captions.append(service_name.name) - values.append("0") - if service_name.custom_needed: - possible_values.append("1") - else: - possible_values.append("") - - # format for tlv - captions = "|".join(captions) - possible_values = "|".join(possible_values) - values = "|".join(values) - groups = "|".join(group_strings) - # send back the properties for this service - else: - if not node_id: - return replies - - node = self.session.get_node(node_id, CoreNodeBase) - if node is None: - logger.warning( - "request to configure service for unknown node %s", node_id - ) - return replies - - services = ServiceShim.servicesfromopaque(opaque) - if not services: - return replies - - servicesstring = opaque.split(":") - if len(servicesstring) == 3: - # a file request: e.g. "service:zebra:quagga.conf" - file_name = servicesstring[2] - service_name = services[0] - file_data = self.session.services.get_service_file( - node, service_name, file_name - ) - self.session.broadcast_file(file_data) - # short circuit this request early to avoid returning response below - return replies - - # the first service in the list is the one being configured - service_name = services[0] - # send back: - # dirs, configs, startindex, startup, shutdown, metadata, config - type_flag = ConfigFlags.UPDATE.value - data_types = tuple( - repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys)) - ) - service = self.session.services.get_service( - node_id, service_name, default_service=True - ) - values = ServiceShim.tovaluelist(node, service) - captions = None - possible_values = None - groups = None - - config_response = ConfigData( - message_type=0, - node=node_id, - object=self.session.services.name, - type=type_flag, - data_types=data_types, - data_values=values, - captions=captions, - possible_values=possible_values, - groups=groups, - session=session_id, - opaque=opaque, - ) - replies.append(config_response) - elif message_type == ConfigFlags.RESET: - self.session.services.reset() - else: - data_types = config_data.data_types - values = config_data.data_values - - error_message = "services config message that I don't know how to handle" - if values is None: - logger.error(error_message) - else: - if opaque is None: - values = values.split("|") - # store default services for a node type in self.defaultservices[] - if ( - data_types is None - or data_types[0] != ConfigDataTypes.STRING.value - ): - logger.info(error_message) - return None - key = values.pop(0) - self.session.services.default_services[key] = values - logger.debug("default services for type %s set to %s", key, values) - elif node_id: - services = ServiceShim.servicesfromopaque(opaque) - if services: - service_name = services[0] - - # set custom service for node - self.session.services.set_service(node_id, service_name) - - # set custom values for custom service - service = self.session.services.get_service( - node_id, service_name - ) - if not service: - raise ValueError( - "custom service(%s) for node(%s) does not exist", - service_name, - node_id, - ) - - values = ConfigShim.str_to_dict(values) - for name in values: - value = values[name] - ServiceShim.setvalue(service, name, value) - - return replies - - def handle_config_mobility(self, message_type, _): - if message_type == ConfigFlags.RESET: - self.session.mobility.reset() - - def handle_config_mobility_models(self, message_type, config_data): - replies = [] - node_id = config_data.node - object_name = config_data.object - iface_id = config_data.iface_id - values_str = config_data.data_values - - node_id = utils.iface_config_id(node_id, iface_id) - logger.debug( - "received configure message for %s nodenum: %s", object_name, node_id - ) - if message_type == ConfigFlags.REQUEST: - logger.info("replying to configure request for model: %s", object_name) - typeflags = ConfigFlags.NONE.value - - model_class = self.session.mobility.models.get(object_name) - if not model_class: - logger.warning("model class does not exist: %s", object_name) - return [] - - config = self.session.mobility.get_model_config(node_id, object_name) - config_response = ConfigShim.config_data( - 0, node_id, typeflags, model_class, config - ) - replies.append(config_response) - elif message_type != ConfigFlags.RESET: - # store the configuration values for later use, when the node - if not object_name: - logger.warning("no configuration object for node: %s", node_id) - return [] - - parsed_config = {} - if values_str: - parsed_config = ConfigShim.str_to_dict(values_str) - - self.session.mobility.set_model_config(node_id, object_name, parsed_config) - if self.session.state == EventTypes.RUNTIME_STATE and parsed_config: - try: - node = self.session.get_node(node_id, WlanNode) - if object_name == BasicRangeModel.name: - node.updatemodel(parsed_config) - except CoreError: - logger.error( - "skipping mobility configuration for unknown node: %s", node_id - ) - - return replies - - def handle_config_emane_models(self, message_type, config_data): - replies = [] - node_id = config_data.node - object_name = config_data.object - iface_id = config_data.iface_id - values_str = config_data.data_values - - node_id = utils.iface_config_id(node_id, iface_id) - logger.debug( - "received configure message for %s nodenum: %s", object_name, node_id - ) - if message_type == ConfigFlags.REQUEST: - logger.info("replying to configure request for model: %s", object_name) - typeflags = ConfigFlags.NONE.value - - model_class = self.session.emane.get_model(object_name) - if not model_class: - logger.warning("model class does not exist: %s", object_name) - return [] - - config = self.session.emane.get_config(node_id, object_name) - config_response = ConfigShim.config_data( - 0, node_id, typeflags, model_class, config - ) - replies.append(config_response) - elif message_type != ConfigFlags.RESET: - # store the configuration values for later use, when the node - if not object_name: - logger.warning("no configuration object for node: %s", node_id) - return [] - parsed_config = {} - if values_str: - parsed_config = ConfigShim.str_to_dict(values_str) - self.session.emane.node_models[node_id] = object_name - self.session.emane.set_config(node_id, object_name, parsed_config) - - return replies - - def handle_file_message(self, message): - """ - File Message handler - - :param core.api.tlv.coreapi.CoreFileMessage message: file message to handle - :return: reply messages - """ - if message.flags & MessageFlags.ADD.value: - node_id = message.get_tlv(FileTlvs.NODE.value) - file_name = message.get_tlv(FileTlvs.NAME.value) - file_type = message.get_tlv(FileTlvs.TYPE.value) - src_path = message.get_tlv(FileTlvs.SOURCE_NAME.value) - if src_path: - src_path = Path(src_path) - data = message.get_tlv(FileTlvs.DATA.value) - compressed_data = message.get_tlv(FileTlvs.COMPRESSED_DATA.value) - - if compressed_data: - logger.warning("Compressed file data not implemented for File message.") - return () - - if src_path and data: - logger.warning( - "ignoring invalid File message: source and data TLVs are both present" - ) - return () - - # some File Messages store custom files in services, - # prior to node creation - if file_type is not None: - if file_type.startswith("service:"): - _, service_name = file_type.split(":")[:2] - self.session.services.set_service_file( - node_id, service_name, file_name, data - ) - return () - elif file_type.startswith("hook:"): - _, state = file_type.split(":")[:2] - if not state.isdigit(): - logger.error("error setting hook having state '%s'", state) - return () - state = int(state) - state = EventTypes(state) - self.session.add_hook(state, file_name, data, src_path) - return () - - # writing a file to the host - if node_id is None: - if src_path is not None: - shutil.copy2(src_path, file_name) - else: - with file_name.open("w") as f: - f.write(data) - return () - - file_path = Path(file_name) - self.session.add_node_file(node_id, src_path, file_path, data) - else: - raise NotImplementedError - - return () - - def handle_iface_message(self, message): - """ - Interface Message handler. - - :param message: interface message to handle - :return: reply messages - """ - logger.info("ignoring Interface message") - return () - - def handle_event_message(self, message): - """ - Event Message handler - - :param core.api.tlv.coreapi.CoreEventMessage message: event message to handle - :return: reply messages - :raises core.CoreError: when event type <= SHUTDOWN_STATE and not a known node id - """ - event_type_value = message.get_tlv(EventTlvs.TYPE.value) - event_type = EventTypes(event_type_value) - event_data = EventData( - node=message.get_tlv(EventTlvs.NODE.value), - event_type=event_type, - name=message.get_tlv(EventTlvs.NAME.value), - data=message.get_tlv(EventTlvs.DATA.value), - time=message.get_tlv(EventTlvs.TIME.value), - session=message.get_tlv(EventTlvs.SESSION.value), - ) - - if event_data.event_type is None: - raise NotImplementedError("Event message missing event type") - node_id = event_data.node - - logger.debug("handling event %s at %s", event_type.name, time.ctime()) - if event_type.value <= EventTypes.SHUTDOWN_STATE.value: - if node_id is not None: - node = self.session.get_node(node_id, NodeBase) - - # configure mobility models for WLAN added during runtime - if event_type == EventTypes.INSTANTIATION_STATE and isinstance( - node, WlanNode - ): - self.session.start_mobility(node_ids=[node.id]) - return () - - logger.warning( - "dropping unhandled event message for node: %s", node.name - ) - return () - - if event_type == EventTypes.DEFINITION_STATE: - self.session.set_state(event_type) - # clear all session objects in order to receive new definitions - self.session.clear() - elif event_type == EventTypes.CONFIGURATION_STATE: - self.session.set_state(event_type) - elif event_type == EventTypes.INSTANTIATION_STATE: - self.session.set_state(event_type) - if len(self.handler_threads) > 1: - # TODO: sync handler threads here before continuing - time.sleep(2.0) # XXX - # done receiving node/link configuration, ready to instantiate - self.session.instantiate() - - # after booting nodes attempt to send emulation id for nodes - # waiting on status - for _id in self.session.nodes: - self.send_node_emulation_id(_id) - elif event_type == EventTypes.RUNTIME_STATE: - self.session.set_state(event_type) - logger.warning("Unexpected event message: RUNTIME state received") - elif event_type == EventTypes.DATACOLLECT_STATE: - self.session.data_collect() - elif event_type == EventTypes.SHUTDOWN_STATE: - self.session.set_state(event_type) - logger.warning("Unexpected event message: SHUTDOWN state received") - elif event_type in { - EventTypes.START, - EventTypes.STOP, - EventTypes.RESTART, - EventTypes.PAUSE, - EventTypes.RECONFIGURE, - }: - handled = False - name = event_data.name - if name: - # TODO: register system for event message handlers, - # like confobjs - if name.startswith("service:"): - self.handle_service_event(event_data) - handled = True - elif name.startswith("mobility:"): - self.session.mobility_event(event_data) - handled = True - if not handled: - logger.warning( - "unhandled event message: event type %s, name %s ", - event_type.name, - name, - ) - elif event_type == EventTypes.FILE_OPEN: - file_path = Path(event_data.name) - self.session.open_xml(file_path, start=False) - self.send_objects() - return () - elif event_type == EventTypes.FILE_SAVE: - file_path = Path(event_data.name) - self.session.save_xml(file_path) - elif event_type == EventTypes.SCHEDULED: - etime = event_data.time - node_id = event_data.node - name = event_data.name - data = event_data.data - if etime is None: - logger.warning("Event message scheduled event missing start time") - return () - if message.flags & MessageFlags.ADD.value: - self.session.add_event( - float(etime), node_id=node_id, name=name, data=data - ) - else: - raise NotImplementedError - - return () - - def handle_service_event(self, event_data): - """ - Handle an Event Message used to start, stop, restart, or validate - a service on a given node. - - :param core.emulator.enumerations.EventData event_data: event data to handle - :return: nothing - """ - event_type = event_data.event_type - node_id = event_data.node - name = event_data.name - - try: - node = self.session.get_node(node_id, CoreNodeBase) - except CoreError: - logger.warning( - "ignoring event for service '%s', unknown node '%s'", name, node_id - ) - return - - fail = "" - unknown = [] - services = ServiceShim.servicesfromopaque(name) - for service_name in services: - service = self.session.services.get_service( - node_id, service_name, default_service=True - ) - if not service: - unknown.append(service_name) - continue - - if event_type in [EventTypes.STOP, EventTypes.RESTART]: - status = self.session.services.stop_service(node, service) - if status: - fail += f"Stop {service.name}," - if event_type in [EventTypes.START, EventTypes.RESTART]: - status = self.session.services.startup_service(node, service) - if status: - fail += f"Start ({service.name})," - if event_type == EventTypes.PAUSE: - status = self.session.services.validate_service(node, service) - if status: - fail += f"{service.name}," - if event_type == EventTypes.RECONFIGURE: - self.session.services.service_reconfigure(node, service) - - fail_data = "" - if len(fail) > 0: - fail_data += f"Fail:{fail}" - unknown_data = "" - num = len(unknown) - if num > 0: - for u in unknown: - unknown_data += u - if num > 1: - unknown_data += ", " - num -= 1 - logger.warning("Event requested for unknown service(s): %s", unknown_data) - unknown_data = f"Unknown:{unknown_data}" - - event_data = EventData( - node=node_id, - event_type=event_type, - name=name, - data=fail_data + ";" + unknown_data, - time=str(time.monotonic()), - ) - - self.session.broadcast_event(event_data) - - def handle_session_message(self, message): - """ - Session Message handler - - :param core.api.tlv.coreapi.CoreSessionMessage message: session message to handle - :return: reply messages - """ - session_id_str = message.get_tlv(SessionTlvs.NUMBER.value) - session_ids = coreapi.str_to_list(session_id_str) - name_str = message.get_tlv(SessionTlvs.NAME.value) - names = coreapi.str_to_list(name_str) - file_str = message.get_tlv(SessionTlvs.FILE.value) - files = coreapi.str_to_list(file_str) - thumb = message.get_tlv(SessionTlvs.THUMB.value) - user = message.get_tlv(SessionTlvs.USER.value) - logger.debug( - "SESSION message flags=0x%x sessions=%s", message.flags, session_id_str - ) - - if message.flags == 0: - for index, session_id in enumerate(session_ids): - session_id = int(session_id) - if session_id == 0: - session = self.session - else: - session = self.coreemu.sessions.get(session_id) - if session is None: - logger.warning("session %s not found", session_id) - continue - if names is not None: - session.name = names[index] - if files is not None: - session.file_path = Path(files[index]) - if thumb: - thumb = Path(thumb) - session.set_thumbnail(thumb) - if user: - session.set_user(user) - elif ( - message.flags & MessageFlags.STRING.value - and not message.flags & MessageFlags.ADD.value - ): - # status request flag: send list of sessions - return (self.session_message(),) - else: - # handle ADD or DEL flags - for session_id in session_ids: - session_id = int(session_id) - session = self.coreemu.sessions.get(session_id) - - if session is None: - logger.info( - "session %s not found (flags=0x%x)", session_id, message.flags - ) - continue - - if message.flags & MessageFlags.ADD.value: - # connect to the first session that exists - logger.info("request to connect to session %s", session_id) - - # remove client from session broker and shutdown if needed - self.remove_session_handlers() - clients = self.session_clients[self.session.id] - clients.remove(self) - if not clients and not self.session.is_active(): - self.coreemu.delete_session(self.session.id) - - # set session to join - self.session = session - - # add client to session broker - clients = self.session_clients.setdefault(self.session.id, []) - clients.append(self) - - # add broadcast handlers - logger.info("adding session broadcast handlers") - self.add_session_handlers() - - if user: - self.session.set_user(user) - - if message.flags & MessageFlags.STRING.value: - self.send_objects() - elif message.flags & MessageFlags.DELETE.value: - # shut down the specified session(s) - logger.info("request to terminate session %s", session_id) - self.coreemu.delete_session(session_id) - else: - logger.warning("unhandled session flags for session %s", session_id) - - return () - - def send_node_emulation_id(self, node_id): - """ - Node emulation id to send. - - :param int node_id: node id to send - :return: nothing - """ - if node_id in self.node_status_request: - tlv_data = b"" - tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.NUMBER.value, node_id) - tlv_data += coreapi.CoreNodeTlv.pack(NodeTlvs.EMULATION_ID.value, node_id) - reply = coreapi.CoreNodeMessage.pack( - MessageFlags.ADD.value | MessageFlags.LOCAL.value, tlv_data - ) - - try: - self.sendall(reply) - except IOError: - logger.exception("error sending node emulation id message: %s", node_id) - - del self.node_status_request[node_id] - - def send_objects(self): - """ - Return API messages that describe the current session. - """ - # find all nodes and links - all_links = [] - with self.session.nodes_lock: - for node_id in self.session.nodes: - node = self.session.nodes[node_id] - self.session.broadcast_node(node, MessageFlags.ADD) - links = node.links(flags=MessageFlags.ADD) - all_links.extend(links) - - for link in all_links: - self.session.broadcast_link(link) - - # send mobility model info - for node_id in self.session.mobility.nodes(): - mobility_configs = self.session.mobility.get_all_configs(node_id) - for model_name in mobility_configs: - config = mobility_configs[model_name] - model_class = self.session.mobility.models[model_name] - logger.debug( - "mobility config: node(%s) class(%s) values(%s)", - node_id, - model_class, - config, - ) - config_data = ConfigShim.config_data( - 0, node_id, ConfigFlags.UPDATE.value, model_class, config - ) - self.session.broadcast_config(config_data) - - # send emane model configs - for node_id, model_configs in self.session.emane.node_configs.items(): - for model_name, config in model_configs.items(): - model_class = self.session.emane.get_model(model_name) - logger.debug( - "emane config: node(%s) class(%s) values(%s)", - node_id, - model_class, - config, - ) - config_data = ConfigShim.config_data( - 0, node_id, ConfigFlags.UPDATE.value, model_class, config - ) - self.session.broadcast_config(config_data) - - # service customizations - service_configs = self.session.services.all_configs() - for node_id, service in service_configs: - opaque = f"service:{service.name}" - data_types = tuple( - repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys)) - ) - node = self.session.get_node(node_id, CoreNodeBase) - values = ServiceShim.tovaluelist(node, service) - config_data = ConfigData( - message_type=0, - node=node_id, - object=self.session.services.name, - type=ConfigFlags.UPDATE.value, - data_types=data_types, - data_values=values, - session=self.session.id, - opaque=opaque, - ) - self.session.broadcast_config(config_data) - - for file_name, config_data in self.session.services.all_files(service): - file_data = FileData( - message_type=MessageFlags.ADD, - node=node_id, - name=str(file_name), - type=opaque, - data=str(config_data), - ) - self.session.broadcast_file(file_data) - - # TODO: send location info - - # send hook scripts - for state in sorted(self.session.hooks): - for file_name, config_data in self.session.hooks[state]: - file_data = FileData( - message_type=MessageFlags.ADD, - name=str(file_name), - type=f"hook:{state.value}", - data=str(config_data), - ) - self.session.broadcast_file(file_data) - - # send session configuration - session_config = self.session.options.get_configs() - config_data = ConfigShim.config_data( - 0, None, ConfigFlags.UPDATE.value, self.session.options, session_config - ) - self.session.broadcast_config(config_data) - - # send session metadata - metadata_configs = self.session.metadata - if metadata_configs: - data_values = "|".join( - [f"{x}={metadata_configs[x]}" for x in metadata_configs] - ) - data_types = tuple( - ConfigDataTypes.STRING.value for _ in self.session.metadata - ) - config_data = ConfigData( - message_type=0, - object="metadata", - type=ConfigFlags.NONE.value, - data_types=data_types, - data_values=data_values, - ) - self.session.broadcast_config(config_data) - - node_count = self.session.get_node_count() - logger.info( - "informed GUI about %d nodes and %d links", node_count, len(all_links) - ) - - -class CoreUdpHandler(CoreHandler): - def __init__(self, request, client_address, server): - self.message_handlers = { - MessageTypes.NODE.value: self.handle_node_message, - MessageTypes.LINK.value: self.handle_link_message, - MessageTypes.EXECUTE.value: self.handle_execute_message, - MessageTypes.REGISTER.value: self.handle_register_message, - MessageTypes.CONFIG.value: self.handle_config_message, - MessageTypes.FILE.value: self.handle_file_message, - MessageTypes.INTERFACE.value: self.handle_iface_message, - MessageTypes.EVENT.value: self.handle_event_message, - MessageTypes.SESSION.value: self.handle_session_message, - } - self.session = None - self.coreemu = server.mainserver.coreemu - self.tcp_handler = server.RequestHandlerClass - socketserver.BaseRequestHandler.__init__(self, request, client_address, server) - - def setup(self): - """ - Client has connected, set up a new connection. - :return: nothing - """ - pass - - def receive_message(self): - data = self.request[0] - header = data[: coreapi.CoreMessage.header_len] - if len(header) < coreapi.CoreMessage.header_len: - raise IOError(f"error receiving header (received {len(header)} bytes)") - - message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header( - header - ) - if message_len == 0: - logger.warning("received message with no data") - return - - if len(data) != coreapi.CoreMessage.header_len + message_len: - logger.error( - "received message length does not match received data (%s != %s)", - len(data), - coreapi.CoreMessage.header_len + message_len, - ) - raise IOError - - try: - message_class = coreapi.CLASS_MAP[message_type] - message = message_class( - message_flags, header, data[coreapi.CoreMessage.header_len :] - ) - return message - except KeyError: - message = coreapi.CoreMessage( - message_flags, header, data[coreapi.CoreMessage.header_len :] - ) - message.msgtype = message_type - logger.exception("unimplemented core message type: %s", message.type_str()) - - def handle(self): - message = self.receive_message() - sessions = message.session_numbers() - message.queuedtimes = 0 - if sessions: - for session_id in sessions: - session = self.server.mainserver.coreemu.sessions.get(session_id) - if session: - logger.debug("session handling message: %s", session.id) - self.session = session - self.handle_message(message) - self.broadcast(message) - else: - logger.error( - "session %d in %s message not found.", - session_id, - message.type_str(), - ) - else: - # no session specified, find an existing one - session = None - node_count = 0 - for session_id in self.server.mainserver.coreemu.sessions: - current_session = self.server.mainserver.coreemu.sessions[session_id] - current_node_count = current_session.get_node_count() - if ( - current_session.state == EventTypes.RUNTIME_STATE - and current_node_count > node_count - ): - node_count = current_node_count - session = current_session - - if session or message.message_type == MessageTypes.REGISTER.value: - self.session = session - self.handle_message(message) - self.broadcast(message) - else: - logger.error( - "no active session, dropping %s message.", message.type_str() - ) - - def broadcast(self, message): - if not isinstance(message, (coreapi.CoreNodeMessage, coreapi.CoreLinkMessage)): - return - - clients = self.tcp_handler.session_clients.get(self.session.id, []) - for client in clients: - try: - client.sendall(message.raw_message) - except IOError: - logger.error("error broadcasting") - - def finish(self): - return socketserver.BaseRequestHandler.finish(self) - - def queuemsg(self, msg): - """ - UDP handlers are short-lived and do not have message queues. - - :param bytes msg: message to queue - :return: - """ - raise Exception( - f"Unable to queue {msg} message for later processing using UDP!" - ) - - def sendall(self, data): - """ - Use sendto() on the connectionless UDP socket. - - :param data: - :return: - """ - self.request[1].sendto(data, self.client_address) diff --git a/daemon/core/api/tlv/coreserver.py b/daemon/core/api/tlv/coreserver.py deleted file mode 100644 index c51e8023..00000000 --- a/daemon/core/api/tlv/coreserver.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Defines core server for handling TCP connections. -""" - -import socketserver - -from core.emulator.coreemu import CoreEmu - - -class CoreServer(socketserver.ThreadingMixIn, socketserver.TCPServer): - """ - TCP server class, manages sessions and spawns request handlers for - incoming connections. - """ - - daemon_threads = True - allow_reuse_address = True - - def __init__(self, server_address, handler_class, config=None): - """ - Server class initialization takes configuration data and calls - the socketserver constructor. - - :param tuple[str, int] server_address: server host and port to use - :param class handler_class: request handler - :param dict config: configuration setting - """ - self.coreemu = CoreEmu(config) - self.config = config - socketserver.TCPServer.__init__(self, server_address, handler_class) - - -class CoreUdpServer(socketserver.ThreadingMixIn, socketserver.UDPServer): - """ - UDP server class, manages sessions and spawns request handlers for - incoming connections. - """ - - daemon_threads = True - allow_reuse_address = True - - def __init__(self, server_address, handler_class, mainserver): - """ - Server class initialization takes configuration data and calls - the SocketServer constructor - - :param server_address: - :param class handler_class: request handler - :param mainserver: - """ - self.mainserver = mainserver - socketserver.UDPServer.__init__(self, server_address, handler_class) - - def start(self): - """ - Thread target to run concurrently with the TCP server. - - :return: nothing - """ - self.serve_forever() diff --git a/daemon/core/api/tlv/dataconversion.py b/daemon/core/api/tlv/dataconversion.py deleted file mode 100644 index d625a615..00000000 --- a/daemon/core/api/tlv/dataconversion.py +++ /dev/null @@ -1,178 +0,0 @@ -""" -Converts CORE data objects into legacy API messages. -""" -import logging -from collections import OrderedDict -from typing import Dict, List - -from core.api.tlv import coreapi, structutils -from core.api.tlv.enumerations import ConfigTlvs, NodeTlvs -from core.config import ConfigGroup, ConfigurableOptions -from core.emulator.data import ConfigData, NodeData - -logger = logging.getLogger(__name__) - - -def convert_node(node_data: NodeData): - """ - Convenience method for converting NodeData to a packed TLV message. - - :param core.emulator.data.NodeData node_data: node data to convert - :return: packed node message - """ - node = node_data.node - services = None - if node.services is not None: - services = "|".join([x.name for x in node.services]) - server = None - if node.server is not None: - server = node.server.name - tlv_data = structutils.pack_values( - coreapi.CoreNodeTlv, - [ - (NodeTlvs.NUMBER, node.id), - (NodeTlvs.TYPE, node.apitype.value), - (NodeTlvs.NAME, node.name), - (NodeTlvs.MODEL, node.type), - (NodeTlvs.EMULATION_SERVER, server), - (NodeTlvs.X_POSITION, int(node.position.x)), - (NodeTlvs.Y_POSITION, int(node.position.y)), - (NodeTlvs.CANVAS, node.canvas), - (NodeTlvs.SERVICES, services), - (NodeTlvs.LATITUDE, str(node.position.lat)), - (NodeTlvs.LONGITUDE, str(node.position.lon)), - (NodeTlvs.ALTITUDE, str(node.position.alt)), - (NodeTlvs.ICON, node.icon), - ], - ) - return coreapi.CoreNodeMessage.pack(node_data.message_type.value, tlv_data) - - -def convert_config(config_data): - """ - Convenience method for converting ConfigData to a packed TLV message. - - :param core.emulator.data.ConfigData config_data: config data to convert - :return: packed message - """ - session = None - if config_data.session is not None: - session = str(config_data.session) - tlv_data = structutils.pack_values( - coreapi.CoreConfigTlv, - [ - (ConfigTlvs.NODE, config_data.node), - (ConfigTlvs.OBJECT, config_data.object), - (ConfigTlvs.TYPE, config_data.type), - (ConfigTlvs.DATA_TYPES, config_data.data_types), - (ConfigTlvs.VALUES, config_data.data_values), - (ConfigTlvs.CAPTIONS, config_data.captions), - (ConfigTlvs.BITMAP, config_data.bitmap), - (ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values), - (ConfigTlvs.GROUPS, config_data.groups), - (ConfigTlvs.SESSION, session), - (ConfigTlvs.IFACE_ID, config_data.iface_id), - (ConfigTlvs.NETWORK_ID, config_data.network_id), - (ConfigTlvs.OPAQUE, config_data.opaque), - ], - ) - return coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data) - - -class ConfigShim: - """ - Provides helper methods for converting newer configuration values into TLV - compatible formats. - """ - - @classmethod - def str_to_dict(cls, key_values: str) -> Dict[str, str]: - """ - Converts a TLV key/value string into an ordered mapping. - - :param key_values: - :return: ordered mapping of key/value pairs - """ - key_values = key_values.split("|") - values = OrderedDict() - for key_value in key_values: - key, value = key_value.split("=", 1) - values[key] = value - return values - - @classmethod - def groups_to_str(cls, config_groups: List[ConfigGroup]) -> str: - """ - Converts configuration groups to a TLV formatted string. - - :param config_groups: configuration groups to format - :return: TLV configuration group string - """ - group_strings = [] - for config_group in config_groups: - group_string = ( - f"{config_group.name}:{config_group.start}-{config_group.stop}" - ) - group_strings.append(group_string) - return "|".join(group_strings) - - @classmethod - def config_data( - cls, - flags: int, - node_id: int, - type_flags: int, - configurable_options: ConfigurableOptions, - config: Dict[str, str], - ) -> ConfigData: - """ - Convert this class to a Config API message. Some TLVs are defined - by the class, but node number, conf type flags, and values must - be passed in. - - :param flags: message flags - :param node_id: node id - :param type_flags: type flags - :param configurable_options: options to create config data for - :param config: configuration values for options - :return: configuration data object - """ - key_values = None - captions = None - data_types = [] - possible_values = [] - logger.debug("configurable: %s", configurable_options) - logger.debug("configuration options: %s", configurable_options.configurations) - logger.debug("configuration data: %s", config) - for configuration in configurable_options.configurations(): - if not captions: - captions = configuration.label - else: - captions += f"|{configuration.label}" - - data_types.append(configuration.type.value) - - options = ",".join(configuration.options) - possible_values.append(options) - - _id = configuration.id - config_value = config.get(_id, configuration.default) - key_value = f"{_id}={config_value}" - if not key_values: - key_values = key_value - else: - key_values += f"|{key_value}" - - groups_str = cls.groups_to_str(configurable_options.config_groups()) - return ConfigData( - message_type=flags, - node=node_id, - object=configurable_options.name, - type=type_flags, - data_types=tuple(data_types), - data_values=key_values, - captions=captions, - possible_values="|".join(possible_values), - bitmap=configurable_options.bitmap, - groups=groups_str, - ) diff --git a/daemon/core/api/tlv/enumerations.py b/daemon/core/api/tlv/enumerations.py deleted file mode 100644 index f2b35703..00000000 --- a/daemon/core/api/tlv/enumerations.py +++ /dev/null @@ -1,212 +0,0 @@ -""" -Enumerations specific to the CORE TLV API. -""" -from enum import Enum - -CORE_API_PORT = 4038 - - -class MessageTypes(Enum): - """ - CORE message types. - """ - - NODE = 0x01 - LINK = 0x02 - EXECUTE = 0x03 - REGISTER = 0x04 - CONFIG = 0x05 - FILE = 0x06 - INTERFACE = 0x07 - EVENT = 0x08 - SESSION = 0x09 - EXCEPTION = 0x0A - - -class NodeTlvs(Enum): - """ - Node type, length, value enumerations. - """ - - NUMBER = 0x01 - TYPE = 0x02 - NAME = 0x03 - IP_ADDRESS = 0x04 - MAC_ADDRESS = 0x05 - IP6_ADDRESS = 0x06 - MODEL = 0x07 - EMULATION_SERVER = 0x08 - SESSION = 0x0A - X_POSITION = 0x20 - Y_POSITION = 0x21 - CANVAS = 0x22 - EMULATION_ID = 0x23 - NETWORK_ID = 0x24 - SERVICES = 0x25 - LATITUDE = 0x30 - LONGITUDE = 0x31 - ALTITUDE = 0x32 - ICON = 0x42 - OPAQUE = 0x50 - - -class LinkTlvs(Enum): - """ - Link type, length, value enumerations. - """ - - N1_NUMBER = 0x01 - N2_NUMBER = 0x02 - DELAY = 0x03 - BANDWIDTH = 0x04 - LOSS = 0x05 - DUP = 0x06 - JITTER = 0x07 - MER = 0x08 - BURST = 0x09 - SESSION = 0x0A - MBURST = 0x10 - TYPE = 0x20 - GUI_ATTRIBUTES = 0x21 - UNIDIRECTIONAL = 0x22 - EMULATION_ID = 0x23 - NETWORK_ID = 0x24 - KEY = 0x25 - IFACE1_NUMBER = 0x30 - IFACE1_IP4 = 0x31 - IFACE1_IP4_MASK = 0x32 - IFACE1_MAC = 0x33 - IFACE1_IP6 = 0x34 - IFACE1_IP6_MASK = 0x35 - IFACE2_NUMBER = 0x36 - IFACE2_IP4 = 0x37 - IFACE2_IP4_MASK = 0x38 - IFACE2_MAC = 0x39 - IFACE2_IP6 = 0x40 - IFACE2_IP6_MASK = 0x41 - IFACE1_NAME = 0x42 - IFACE2_NAME = 0x43 - OPAQUE = 0x50 - - -class ExecuteTlvs(Enum): - """ - Execute type, length, value enumerations. - """ - - NODE = 0x01 - NUMBER = 0x02 - TIME = 0x03 - COMMAND = 0x04 - RESULT = 0x05 - STATUS = 0x06 - SESSION = 0x0A - - -class ConfigTlvs(Enum): - """ - Configuration type, length, value enumerations. - """ - - NODE = 0x01 - OBJECT = 0x02 - TYPE = 0x03 - DATA_TYPES = 0x04 - VALUES = 0x05 - CAPTIONS = 0x06 - BITMAP = 0x07 - POSSIBLE_VALUES = 0x08 - GROUPS = 0x09 - SESSION = 0x0A - IFACE_ID = 0x0B - NETWORK_ID = 0x24 - OPAQUE = 0x50 - - -class ConfigFlags(Enum): - """ - Configuration flags. - """ - - NONE = 0x00 - REQUEST = 0x01 - UPDATE = 0x02 - RESET = 0x03 - - -class FileTlvs(Enum): - """ - File type, length, value enumerations. - """ - - NODE = 0x01 - NAME = 0x02 - MODE = 0x03 - NUMBER = 0x04 - TYPE = 0x05 - SOURCE_NAME = 0x06 - SESSION = 0x0A - DATA = 0x10 - COMPRESSED_DATA = 0x11 - - -class InterfaceTlvs(Enum): - """ - Interface type, length, value enumerations. - """ - - NODE = 0x01 - NUMBER = 0x02 - NAME = 0x03 - IP_ADDRESS = 0x04 - MASK = 0x05 - MAC_ADDRESS = 0x06 - IP6_ADDRESS = 0x07 - IP6_MASK = 0x08 - TYPE = 0x09 - SESSION = 0x0A - STATE = 0x0B - EMULATION_ID = 0x23 - NETWORK_ID = 0x24 - - -class EventTlvs(Enum): - """ - Event type, length, value enumerations. - """ - - NODE = 0x01 - TYPE = 0x02 - NAME = 0x03 - DATA = 0x04 - TIME = 0x05 - SESSION = 0x0A - - -class SessionTlvs(Enum): - """ - Session type, length, value enumerations. - """ - - NUMBER = 0x01 - NAME = 0x02 - FILE = 0x03 - NODE_COUNT = 0x04 - DATE = 0x05 - THUMB = 0x06 - USER = 0x07 - OPAQUE = 0x0A - - -class ExceptionTlvs(Enum): - """ - Exception type, length, value enumerations. - """ - - NODE = 0x01 - SESSION = 0x02 - LEVEL = 0x03 - SOURCE = 0x04 - DATE = 0x05 - TEXT = 0x06 - OPAQUE = 0x0A diff --git a/daemon/core/api/tlv/structutils.py b/daemon/core/api/tlv/structutils.py deleted file mode 100644 index d67f388e..00000000 --- a/daemon/core/api/tlv/structutils.py +++ /dev/null @@ -1,45 +0,0 @@ -""" -Utilities for working with python struct data. -""" - -import logging - -logger = logging.getLogger(__name__) - - -def pack_values(clazz, packers): - """ - Pack values for a given legacy class. - - :param class clazz: class that will provide a pack method - :param list packers: a list of tuples that are used to pack values and transform them - :return: packed data string of all values - """ - - # iterate through tuples of values to pack - logger.debug("packing: %s", packers) - data = b"" - for packer in packers: - # check if a transformer was provided for valid values - transformer = None - if len(packer) == 2: - tlv_type, value = packer - elif len(packer) == 3: - tlv_type, value, transformer = packer - else: - raise RuntimeError("packer had more than 3 arguments") - - # only pack actual values and avoid packing empty strings - # protobuf defaults to empty strings and does no imply a value to set - if value is None or (isinstance(value, str) and not value): - continue - - # transform values as needed - if transformer: - value = transformer(value) - - # pack and add to existing data - logger.debug("packing: %s - %s type(%s)", tlv_type, value, type(value)) - data += clazz.pack(tlv_type.value, value) - - return data diff --git a/daemon/core/config.py b/daemon/core/config.py index b705e8b6..ae40627e 100644 --- a/daemon/core/config.py +++ b/daemon/core/config.py @@ -44,6 +44,7 @@ class Configuration: label: str = None default: str = "" options: List[str] = field(default_factory=list) + group: str = "Configuration" def __post_init__(self) -> None: self.label = self.label if self.label else self.id @@ -78,6 +79,7 @@ class ConfigBool(Configuration): """ type: ConfigDataTypes = ConfigDataTypes.BOOL + value: bool = False @dataclass @@ -87,6 +89,7 @@ class ConfigFloat(Configuration): """ type: ConfigDataTypes = ConfigDataTypes.FLOAT + value: float = 0.0 @dataclass @@ -96,6 +99,7 @@ class ConfigInt(Configuration): """ type: ConfigDataTypes = ConfigDataTypes.INT32 + value: int = 0 @dataclass @@ -105,6 +109,7 @@ class ConfigString(Configuration): """ type: ConfigDataTypes = ConfigDataTypes.STRING + value: str = "" class ConfigurableOptions: @@ -113,7 +118,6 @@ class ConfigurableOptions: """ name: Optional[str] = None - bitmap: Optional[str] = None options: List[Configuration] = [] @classmethod diff --git a/daemon/core/configservice/base.py b/daemon/core/configservice/base.py index e74b0567..3d61edcc 100644 --- a/daemon/core/configservice/base.py +++ b/daemon/core/configservice/base.py @@ -331,6 +331,33 @@ class ConfigService(abc.ABC): templates[file] = template return templates + def get_rendered_templates(self) -> Dict[str, str]: + templates = {} + data = self.data() + for file in sorted(self.files): + rendered = self._get_rendered_template(file, data) + templates[file] = rendered + return templates + + def _get_rendered_template(self, file: str, data: Dict[str, Any]) -> str: + file_path = Path(file) + template_path = get_template_path(file_path) + if file in self.custom_templates: + text = self.custom_templates[file] + rendered = self.render_text(text, data) + elif self.templates.has_template(template_path): + rendered = self.render_template(template_path, data) + else: + try: + text = self.get_text_template(file) + except Exception as e: + raise ConfigServiceTemplateError( + f"node({self.node.name}) service({self.name}) file({file}) " + f"failure getting template: {e}" + ) + rendered = self.render_text(text, data) + return rendered + def create_files(self) -> None: """ Creates service files inside associated node. @@ -342,22 +369,8 @@ class ConfigService(abc.ABC): logger.debug( "node(%s) service(%s) template(%s)", self.node.name, self.name, file ) + rendered = self._get_rendered_template(file, data) file_path = Path(file) - template_path = get_template_path(file_path) - if file in self.custom_templates: - text = self.custom_templates[file] - rendered = self.render_text(text, data) - elif self.templates.has_template(template_path): - rendered = self.render_template(template_path, data) - else: - try: - text = self.get_text_template(file) - except Exception as e: - raise ConfigServiceTemplateError( - f"node({self.node.name}) service({self.name}) file({file}) " - f"failure getting template: {e}" - ) - rendered = self.render_text(text, data) self.node.create_file(file_path, rendered) def run_startup(self, wait: bool) -> None: @@ -459,7 +472,7 @@ class ConfigService(abc.ABC): except Exception: raise CoreError( f"node({self.node.name}) service({self.name}) file({template_path})" - f"{exceptions.text_error_template().render_template()}" + f"{exceptions.text_error_template().render_unicode()}" ) def _define_config(self, configs: List[Configuration]) -> None: diff --git a/daemon/core/configservices/frrservices/services.py b/daemon/core/configservices/frrservices/services.py index f09428ca..7ed965be 100644 --- a/daemon/core/configservices/frrservices/services.py +++ b/daemon/core/configservices/frrservices/services.py @@ -4,14 +4,26 @@ from typing import Any, Dict, List from core.config import Configuration from core.configservice.base import ConfigService, ConfigServiceMode from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNodeBase +from core.nodes.base import CoreNodeBase, NodeBase from core.nodes.interface import DEFAULT_MTU, CoreInterface -from core.nodes.network import WlanNode +from core.nodes.network import PtpNet, WlanNode +from core.nodes.physical import Rj45Node +from core.nodes.wireless import WirelessNode GROUP: str = "FRR" FRR_STATE_DIR: str = "/var/run/frr" +def is_wireless(node: NodeBase) -> bool: + """ + Check if the node is a wireless type node. + + :param node: node to check type for + :return: True if wireless type, False otherwise + """ + return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) + + def has_mtu_mismatch(iface: CoreInterface) -> bool: """ Helper to detect MTU mismatch and add the appropriate FRR @@ -53,6 +65,20 @@ def get_router_id(node: CoreNodeBase) -> str: return "0.0.0.0" +def rj45_check(iface: CoreInterface) -> bool: + """ + Helper to detect whether interface is connected an external RJ45 + link. + """ + if iface.net: + for peer_iface in iface.net.get_ifaces(): + if peer_iface == iface: + continue + if isinstance(peer_iface.node, Rj45Node): + return True + return False + + class FRRZebra(ConfigService): name: str = "FRRzebra" group: str = GROUP @@ -74,10 +100,10 @@ class FRRZebra(ConfigService): def data(self) -> Dict[str, Any]: frr_conf = self.files[0] - frr_bin_search = self.node.session.options.get_config( + frr_bin_search = self.node.session.options.get( "frr_bin_search", default="/usr/local/bin /usr/bin /usr/lib/frr" ).strip('"') - frr_sbin_search = self.node.session.options.get_config( + frr_sbin_search = self.node.session.options.get( "frr_sbin_search", default="/usr/local/sbin /usr/sbin /usr/lib/frr" ).strip('"') @@ -158,7 +184,7 @@ class FRROspfv2(FrrService, ConfigService): addresses = [] for iface in self.node.get_ifaces(control=False): for ip4 in iface.ip4s: - addresses.append(str(ip4.ip)) + addresses.append(str(ip4)) data = dict(router_id=router_id, addresses=addresses) text = """ router ospf @@ -166,15 +192,31 @@ class FRROspfv2(FrrService, ConfigService): % for addr in addresses: network ${addr} area 0 % endfor + ospf opaque-lsa ! """ return self.render_text(text, data) def frr_iface_config(self, iface: CoreInterface) -> str: - if has_mtu_mismatch(iface): - return "ip ospf mtu-ignore" - else: - return "" + has_mtu = has_mtu_mismatch(iface) + has_rj45 = rj45_check(iface) + is_ptp = isinstance(iface.net, PtpNet) + data = dict(has_mtu=has_mtu, is_ptp=is_ptp, has_rj45=has_rj45) + text = """ + % if has_mtu: + ip ospf mtu-ignore + % endif + % if has_rj45: + <% return STOP_RENDERING %> + % endif + % if is_ptp: + ip ospf network point-to-point + % endif + ip ospf hello-interval 2 + ip ospf dead-interval 6 + ip ospf retransmit-interval 5 + """ + return self.render_text(text, data) class FRROspfv3(FrrService, ConfigService): @@ -324,7 +366,7 @@ class FRRBabel(FrrService, ConfigService): return self.render_text(text, data) def frr_iface_config(self, iface: CoreInterface) -> str: - if isinstance(iface.net, (WlanNode, EmaneNet)): + if is_wireless(iface.net): text = """ babel wireless no babel split-horizon diff --git a/daemon/core/configservices/frrservices/templates/frrboot.sh b/daemon/core/configservices/frrservices/templates/frrboot.sh index db47b6d1..c1c11d28 100644 --- a/daemon/core/configservices/frrservices/templates/frrboot.sh +++ b/daemon/core/configservices/frrservices/templates/frrboot.sh @@ -48,6 +48,10 @@ bootdaemon() flags="$flags -6" fi + if [ "$1" = "ospfd" ]; then + flags="$flags --apiserver" + fi + #force FRR to use CORE generated conf file flags="$flags -d -f $FRR_CONF" $FRR_SBIN_DIR/$1 $flags diff --git a/daemon/core/configservices/nrlservices/services.py b/daemon/core/configservices/nrlservices/services.py index 3f911aef..ba9ef29c 100644 --- a/daemon/core/configservices/nrlservices/services.py +++ b/daemon/core/configservices/nrlservices/services.py @@ -66,7 +66,6 @@ class NrlSmf(ConfigService): modes: Dict[str, Dict[str, str]] = {} def data(self) -> Dict[str, Any]: - has_arouted = "arouted" in self.node.config_services has_nhdp = "NHDP" in self.node.config_services has_olsr = "OLSR" in self.node.config_services ifnames = [] @@ -78,11 +77,7 @@ class NrlSmf(ConfigService): ip4_prefix = f"{ip4.ip}/{24}" break return dict( - has_arouted=has_arouted, - has_nhdp=has_nhdp, - has_olsr=has_olsr, - ifnames=ifnames, - ip4_prefix=ip4_prefix, + has_nhdp=has_nhdp, has_olsr=has_olsr, ifnames=ifnames, ip4_prefix=ip4_prefix ) @@ -167,27 +162,3 @@ class MgenActor(ConfigService): validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING default_configs: List[Configuration] = [] modes: Dict[str, Dict[str, str]] = {} - - -class Arouted(ConfigService): - name: str = "arouted" - group: str = GROUP - directories: List[str] = [] - files: List[str] = ["startarouted.sh"] - executables: List[str] = ["arouted"] - dependencies: List[str] = [] - startup: List[str] = ["bash startarouted.sh"] - validate: List[str] = ["pidof arouted"] - shutdown: List[str] = ["pkill arouted"] - validation_mode: ConfigServiceMode = ConfigServiceMode.BLOCKING - default_configs: List[Configuration] = [] - modes: Dict[str, Dict[str, str]] = {} - - def data(self) -> Dict[str, Any]: - ip4_prefix = None - for iface in self.node.get_ifaces(control=False): - ip4 = iface.get_ip4() - if ip4: - ip4_prefix = f"{ip4.ip}/{24}" - break - return dict(ip4_prefix=ip4_prefix) diff --git a/daemon/core/configservices/nrlservices/templates/startarouted.sh b/daemon/core/configservices/nrlservices/templates/startarouted.sh deleted file mode 100644 index 20bcc45e..00000000 --- a/daemon/core/configservices/nrlservices/templates/startarouted.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh -for f in "/tmp/${node.name}_smf"; do - count=1 - until [ -e "$f" ]; do - if [ $count -eq 10 ]; then - echo "ERROR: nrlmsf pipe not found: $f" >&2 - exit 1 - fi - sleep 0.1 - count=$(($count + 1)) - done -done - -ip route add ${ip4_prefix} dev lo -arouted instance ${node.name}_smf tap ${node.name}_tap stability 10 2>&1 > /var/log/arouted.log & diff --git a/daemon/core/configservices/nrlservices/templates/startsmf.sh b/daemon/core/configservices/nrlservices/templates/startsmf.sh index 921568de..458b3ee9 100644 --- a/daemon/core/configservices/nrlservices/templates/startsmf.sh +++ b/daemon/core/configservices/nrlservices/templates/startsmf.sh @@ -1,8 +1,5 @@ <% ifaces = ",".join(ifnames) - arouted = "" - if has_arouted: - arouted = "tap %s_tap unicast %s push lo,%s resequence on" % (node.name, ip4_prefix, ifnames[0]) if has_nhdp: flood = "ecds" elif has_olsr: @@ -12,4 +9,4 @@ %> #!/bin/sh # auto-generated by NrlSmf service -nrlsmf instance ${node.name}_smf ${ifaces} ${arouted} ${flood} hash MD5 log /var/log/nrlsmf.log < /dev/null > /dev/null 2>&1 & +nrlsmf instance ${node.name}_smf ${flood} ${ifaces} hash MD5 log /var/log/nrlsmf.log < /dev/null > /dev/null 2>&1 & diff --git a/daemon/core/configservices/quaggaservices/services.py b/daemon/core/configservices/quaggaservices/services.py index a4ee157d..8aa85807 100644 --- a/daemon/core/configservices/quaggaservices/services.py +++ b/daemon/core/configservices/quaggaservices/services.py @@ -5,16 +5,27 @@ from typing import Any, Dict, List from core.config import Configuration from core.configservice.base import ConfigService, ConfigServiceMode from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNodeBase +from core.nodes.base import CoreNodeBase, NodeBase from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.network import PtpNet, WlanNode from core.nodes.physical import Rj45Node +from core.nodes.wireless import WirelessNode logger = logging.getLogger(__name__) GROUP: str = "Quagga" QUAGGA_STATE_DIR: str = "/var/run/quagga" +def is_wireless(node: NodeBase) -> bool: + """ + Check if the node is a wireless type node. + + :param node: node to check type for + :return: True if wireless type, False otherwise + """ + return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) + + def has_mtu_mismatch(iface: CoreInterface) -> bool: """ Helper to detect MTU mismatch and add the appropriate OSPF @@ -89,10 +100,10 @@ class Zebra(ConfigService): modes: Dict[str, Dict[str, str]] = {} def data(self) -> Dict[str, Any]: - quagga_bin_search = self.node.session.options.get_config( + quagga_bin_search = self.node.session.options.get( "quagga_bin_search", default="/usr/local/bin /usr/bin /usr/lib/quagga" ).strip('"') - quagga_sbin_search = self.node.session.options.get_config( + quagga_sbin_search = self.node.session.options.get( "quagga_sbin_search", default="/usr/local/sbin /usr/sbin /usr/lib/quagga" ).strip('"') quagga_state_dir = QUAGGA_STATE_DIR @@ -265,7 +276,7 @@ class Ospfv3mdr(Ospfv3): def quagga_iface_config(self, iface: CoreInterface) -> str: config = super().quagga_iface_config(iface) - if isinstance(iface.net, (WlanNode, EmaneNet)): + if is_wireless(iface.net): config = self.clean_text( f""" {config} @@ -295,9 +306,6 @@ class Bgp(QuaggaService, ConfigService): ipv6_routing: bool = True def quagga_config(self) -> str: - return "" - - def quagga_iface_config(self, iface: CoreInterface) -> str: router_id = get_router_id(self.node) text = f""" ! BGP configuration @@ -311,6 +319,9 @@ class Bgp(QuaggaService, ConfigService): """ return self.clean_text(text) + def quagga_iface_config(self, iface: CoreInterface) -> str: + return "" + class Rip(QuaggaService, ConfigService): """ @@ -390,7 +401,7 @@ class Babel(QuaggaService, ConfigService): return self.render_text(text, data) def quagga_iface_config(self, iface: CoreInterface) -> str: - if isinstance(iface.net, (WlanNode, EmaneNet)): + if is_wireless(iface.net): text = """ babel wireless no babel split-horizon diff --git a/daemon/core/emane/emanemanager.py b/daemon/core/emane/emanemanager.py index 4ffed725..294ae528 100644 --- a/daemon/core/emane/emanemanager.py +++ b/daemon/core/emane/emanemanager.py @@ -12,12 +12,12 @@ from core import utils from core.emane.emanemodel import EmaneModel from core.emane.linkmonitor import EmaneLinkMonitor from core.emane.modelmanager import EmaneModelManager -from core.emane.nodes import EmaneNet +from core.emane.nodes import EmaneNet, TunTap from core.emulator.data import LinkData from core.emulator.enumerations import LinkTypes, MessageFlags, RegisterTlvs from core.errors import CoreCommandError, CoreError -from core.nodes.base import CoreNetworkBase, CoreNode, NodeBase -from core.nodes.interface import CoreInterface, TunTap +from core.nodes.base import CoreNode, NodeBase +from core.nodes.interface import CoreInterface from core.xml import emanexml logger = logging.getLogger(__name__) @@ -45,8 +45,6 @@ except ImportError: EventServiceException = None logger.debug("compatible emane python bindings not installed") -DEFAULT_EMANE_PREFIX = "/usr" -DEFAULT_DEV = "ctrl0" DEFAULT_LOG_LEVEL: int = 3 @@ -133,10 +131,10 @@ class EmaneManager: self._emane_nets: Dict[int, EmaneNet] = {} self._emane_node_lock: threading.Lock = threading.Lock() # port numbers are allocated from these counters - self.platformport: int = self.session.options.get_config_int( + self.platformport: int = self.session.options.get_int( "emane_platform_port", 8100 ) - self.transformport: int = self.session.options.get_config_int( + self.transformport: int = self.session.options.get_int( "emane_transform_port", 8200 ) self.doeventloop: bool = False @@ -153,7 +151,7 @@ class EmaneManager: self.nem_service: Dict[int, EmaneEventService] = {} def next_nem_id(self, iface: CoreInterface) -> int: - nem_id = self.session.options.get_config_int("nem_id_start") + nem_id = self.session.options.get_int("nem_id_start") while nem_id in self.nems_to_ifaces: nem_id += 1 self.nems_to_ifaces[nem_id] = iface @@ -223,12 +221,10 @@ class EmaneManager: :param iface: interface running emane :return: net, node, or interface model configuration """ - model_name = emane_net.model.name - config = None + model_name = emane_net.wireless_model.name # try to retrieve interface specific configuration - if iface.node_id is not None: - key = utils.iface_config_id(iface.node.id, iface.node_id) - config = self.get_config(key, model_name, default=False) + key = utils.iface_config_id(iface.node.id, iface.id) + config = self.get_config(key, model_name, default=False) # attempt to retrieve node specific config, when iface config is not present if not config: config = self.get_config(iface.node.id, model_name, default=False) @@ -239,7 +235,7 @@ class EmaneManager: config = self.get_config(emane_net.id, model_name, default=False) # return default config values, when a config is not present if not config: - config = emane_net.model.default_values() + config = emane_net.wireless_model.default_values() return config def config_reset(self, node_id: int = None) -> None: @@ -272,7 +268,8 @@ class EmaneManager: nodes = set() for emane_net in self._emane_nets.values(): for iface in emane_net.get_ifaces(): - nodes.add(iface.node) + if isinstance(iface.node, CoreNode): + nodes.add(iface.node) return nodes def setup(self) -> EmaneState: @@ -323,7 +320,7 @@ class EmaneManager: for emane_net, iface in self.get_ifaces(): self.start_iface(emane_net, iface) - def start_iface(self, emane_net: EmaneNet, iface: CoreInterface) -> None: + def start_iface(self, emane_net: EmaneNet, iface: TunTap) -> None: nem_id = self.next_nem_id(iface) nem_port = self.get_nem_port(iface) logger.info( @@ -338,10 +335,10 @@ class EmaneManager: self.start_daemon(iface) self.install_iface(iface, config) - def get_ifaces(self) -> List[Tuple[EmaneNet, CoreInterface]]: + def get_ifaces(self) -> List[Tuple[EmaneNet, TunTap]]: ifaces = [] for emane_net in self._emane_nets.values(): - if not emane_net.model: + if not emane_net.wireless_model: logger.error("emane net(%s) has no model", emane_net.name) continue for iface in emane_net.get_ifaces(): @@ -352,8 +349,9 @@ class EmaneManager: iface.name, ) continue - ifaces.append((emane_net, iface)) - return sorted(ifaces, key=lambda x: (x[1].node.id, x[1].node_id)) + if isinstance(iface, TunTap): + ifaces.append((emane_net, iface)) + return sorted(ifaces, key=lambda x: (x[1].node.id, x[1].id)) def setup_control_channels( self, nem_id: int, iface: CoreInterface, config: Dict[str, str] @@ -384,6 +382,8 @@ class EmaneManager: service = EmaneEventService( self, event_net.brname, eventgroup, int(eventport) ) + if self.doeventmonitor(): + service.start() self.services[event_net.brname] = service self.nem_service[nem_id] = service except EventServiceException: @@ -484,7 +484,7 @@ class EmaneManager: logger.exception("error writing to emane nem file") def links_enabled(self) -> bool: - return self.session.options.get_config_int("link_enabled") == 1 + return self.session.options.get_int("link_enabled") == 1 def poststartup(self) -> None: """ @@ -498,7 +498,7 @@ class EmaneManager: "post startup for emane node: %s - %s", emane_net.id, emane_net.name ) for iface in emane_net.get_ifaces(): - emane_net.model.post_startup(iface) + emane_net.wireless_model.post_startup(iface) if events_enabled: iface.setposition() @@ -550,9 +550,11 @@ class EmaneManager: emane_net = self._emane_nets[node_id] logger.debug("checking emane model for node: %s", node_id) # skip nodes that already have a model set - if emane_net.model: + if emane_net.wireless_model: logger.debug( - "node(%s) already has model(%s)", emane_net.id, emane_net.model.name + "node(%s) already has model(%s)", + emane_net.id, + emane_net.wireless_model.name, ) continue # set model configured for node, due to legacy messaging configuration @@ -602,8 +604,8 @@ class EmaneManager: """ node = iface.node loglevel = str(DEFAULT_LOG_LEVEL) - cfgloglevel = self.session.options.get_config_int("emane_log_level") - realtime = self.session.options.get_config_bool("emane_realtime", default=True) + cfgloglevel = self.session.options.get_int("emane_log_level", 2) + realtime = self.session.options.get_bool("emane_realtime", True) if cfgloglevel: logger.info("setting user-defined emane log level: %d", cfgloglevel) loglevel = str(cfgloglevel) @@ -622,9 +624,9 @@ class EmaneManager: args = f"{emanecmd} -f {log_file} {platform_xml}" node.host_cmd(args, cwd=self.session.directory) - def install_iface(self, iface: CoreInterface, config: Dict[str, str]) -> None: + def install_iface(self, iface: TunTap, config: Dict[str, str]) -> None: external = config.get("external", "0") - if isinstance(iface, TunTap) and external == "0": + if external == "0": iface.set_ips() # at this point we register location handlers for generating # EMANE location events @@ -636,20 +638,13 @@ class EmaneManager: """ Returns boolean whether or not EMANE events will be monitored. """ - # this support must be explicitly turned on; by default, CORE will - # generate the EMANE events when nodes are moved - return self.session.options.get_config_bool("emane_event_monitor") + return self.session.options.get_bool("emane_event_monitor", False) def genlocationevents(self) -> bool: """ Returns boolean whether or not EMANE events will be generated. """ - # By default, CORE generates EMANE location events when nodes - # are moved; this can be explicitly disabled in core.conf - tmp = self.session.options.get_config_bool("emane_event_generate") - if tmp is None: - tmp = not self.doeventmonitor() - return tmp + return self.session.options.get_bool("emane_event_generate", True) def handlelocationevent(self, rxnemid: int, eid: int, data: str) -> None: """ @@ -732,9 +727,6 @@ class EmaneManager: self.session.broadcast_node(node) return True - def is_emane_net(self, net: Optional[CoreNetworkBase]) -> bool: - return isinstance(net, EmaneNet) - def emanerunning(self, node: CoreNode) -> bool: """ Return True if an EMANE process associated with the given node is running, diff --git a/daemon/core/emane/linkmonitor.py b/daemon/core/emane/linkmonitor.py index 9b18bae2..5ed6d49d 100644 --- a/daemon/core/emane/linkmonitor.py +++ b/daemon/core/emane/linkmonitor.py @@ -190,9 +190,9 @@ class EmaneLinkMonitor: def start(self) -> None: options = self.emane_manager.session.options - self.loss_threshold = options.get_config_int("loss_threshold") - self.link_interval = options.get_config_int("link_interval") - self.link_timeout = options.get_config_int("link_timeout") + self.loss_threshold = options.get_int("loss_threshold") + self.link_interval = options.get_int("link_interval") + self.link_timeout = options.get_int("link_timeout") self.initialize() if not self.clients: logger.info("no valid emane models to monitor links") diff --git a/daemon/core/emane/nodes.py b/daemon/core/emane/nodes.py index 76a93767..1a0b6e75 100644 --- a/daemon/core/emane/nodes.py +++ b/daemon/core/emane/nodes.py @@ -4,19 +4,15 @@ share the same MAC+PHY model. """ import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Type +import time +from dataclasses import dataclass +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type, Union from core.emulator.data import InterfaceData, LinkData, LinkOptions from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import ( - EventTypes, - LinkTypes, - MessageFlags, - NodeTypes, - RegisterTlvs, -) -from core.errors import CoreError -from core.nodes.base import CoreNetworkBase, CoreNode +from core.emulator.enumerations import EventTypes, MessageFlags, RegisterTlvs +from core.errors import CoreCommandError, CoreError +from core.nodes.base import CoreNetworkBase, CoreNode, NodeOptions from core.nodes.interface import CoreInterface logger = logging.getLogger(__name__) @@ -24,10 +20,7 @@ logger = logging.getLogger(__name__) if TYPE_CHECKING: from core.emane.emanemodel import EmaneModel from core.emulator.session import Session - from core.location.mobility import WirelessModel, WayPointMobility - - OptionalEmaneModel = Optional[EmaneModel] - WirelessModelType = Type[WirelessModel] + from core.location.mobility import WayPointMobility try: from emane.events import LocationEvent @@ -39,6 +32,120 @@ except ImportError: logger.debug("compatible emane python bindings not installed") +class TunTap(CoreInterface): + """ + TUN/TAP virtual device in TAP mode + """ + + def __init__( + self, + _id: int, + name: str, + localname: str, + use_ovs: bool, + node: CoreNode = None, + server: "DistributedServer" = None, + ) -> None: + super().__init__(_id, name, localname, use_ovs, node=node, server=server) + self.node: CoreNode = node + + def startup(self) -> None: + """ + Startup logic for a tunnel tap. + + :return: nothing + """ + self.up = True + + def shutdown(self) -> None: + """ + Shutdown functionality for a tunnel tap. + + :return: nothing + """ + if not self.up: + return + self.up = False + + def waitfor( + self, func: Callable[[], int], attempts: int = 10, maxretrydelay: float = 0.25 + ) -> bool: + """ + Wait for func() to return zero with exponential backoff. + + :param func: function to wait for a result of zero + :param attempts: number of attempts to wait for a zero result + :param maxretrydelay: maximum retry delay + :return: True if wait succeeded, False otherwise + """ + delay = 0.01 + result = False + for i in range(1, attempts + 1): + r = func() + if r == 0: + result = True + break + msg = f"attempt {i} failed with nonzero exit status {r}" + if i < attempts + 1: + msg += ", retrying..." + logger.info(msg) + time.sleep(delay) + delay += delay + if delay > maxretrydelay: + delay = maxretrydelay + else: + msg += ", giving up" + logger.info(msg) + return result + + def nodedevexists(self) -> int: + """ + Checks if device exists. + + :return: 0 if device exists, 1 otherwise + """ + try: + self.node.node_net_client.device_show(self.name) + return 0 + except CoreCommandError: + return 1 + + def waitfordevicenode(self) -> None: + """ + Check for presence of a node device - tap device may not appear right away waits. + + :return: nothing + """ + logger.debug("waiting for device node: %s", self.name) + count = 0 + while True: + result = self.waitfor(self.nodedevexists) + if result: + break + should_retry = count < 5 + is_emane_running = self.node.session.emane.emanerunning(self.node) + if all([should_retry, is_emane_running]): + count += 1 + else: + raise RuntimeError("node device failed to exist") + + def set_ips(self) -> None: + """ + Set interface ip addresses. + + :return: nothing + """ + self.waitfordevicenode() + for ip in self.ips(): + self.node.node_net_client.create_address(self.name, str(ip)) + + +@dataclass +class EmaneOptions(NodeOptions): + emane_model: str = None + """name of emane model to associate an emane network to""" + + class EmaneNet(CoreNetworkBase): """ EMANE node contains NEM configuration and causes connected nodes @@ -46,22 +153,26 @@ class EmaneNet(CoreNetworkBase): Emane controller object that exists in a session. """ - apitype: NodeTypes = NodeTypes.EMANE - linktype: LinkTypes = LinkTypes.WIRED - type: str = "wlan" - has_custom_iface: bool = True - def __init__( self, session: "Session", _id: int = None, name: str = None, server: DistributedServer = None, + options: EmaneOptions = None, ) -> None: - super().__init__(session, _id, name, server) + options = options or EmaneOptions() + super().__init__(session, _id, name, server, options) self.conf: str = "" - self.model: "OptionalEmaneModel" = None self.mobility: Optional[WayPointMobility] = None + model_class = self.session.emane.get_model(options.emane_model) + self.wireless_model: Optional["EmaneModel"] = model_class(self.session, self.id) + if self.session.state == EventTypes.RUNTIME_STATE: + self.session.emane.add_node(self) + + @classmethod + def create_options(cls) -> EmaneOptions: + return EmaneOptions() def linkconfig( self, iface: CoreInterface, options: LinkOptions, iface2: CoreInterface = None @@ -69,18 +180,15 @@ class EmaneNet(CoreNetworkBase): """ The CommEffect model supports link configuration. """ - if not self.model: + if not self.wireless_model: return - self.model.linkconfig(iface, options, iface2) - - def config(self, conf: str) -> None: - self.conf = conf + self.wireless_model.linkconfig(iface, options, iface2) def startup(self) -> None: - pass + self.up = True def shutdown(self) -> None: - pass + self.up = False def link(self, iface1: CoreInterface, iface2: CoreInterface) -> None: pass @@ -88,30 +196,37 @@ class EmaneNet(CoreNetworkBase): def unlink(self, iface1: CoreInterface, iface2: CoreInterface) -> None: pass - def linknet(self, net: "CoreNetworkBase") -> CoreInterface: - raise CoreError("emane networks cannot be linked to other networks") - def updatemodel(self, config: Dict[str, str]) -> None: - if not self.model: - raise CoreError(f"no model set to update for node({self.name})") - logger.info("node(%s) updating model(%s): %s", self.id, self.model.name, config) - self.model.update_config(config) + """ + Update configuration for the current model. - def setmodel(self, model: "WirelessModelType", config: Dict[str, str]) -> None: + :param config: configuration to update model with + :return: nothing + """ + if not self.wireless_model: + raise CoreError(f"no model set to update for node({self.name})") + logger.info( + "node(%s) updating model(%s): %s", self.id, self.wireless_model.name, config + ) + self.wireless_model.update_config(config) + + def setmodel( + self, + model: Union[Type["EmaneModel"], Type["WayPointMobility"]], + config: Dict[str, str], + ) -> None: """ set the EmaneModel associated with this node """ if model.config_type == RegisterTlvs.WIRELESS: - # EmaneModel really uses values from ConfigurableManager - # when buildnemxml() is called, not during init() - self.model = model(session=self.session, _id=self.id) - self.model.update_config(config) + self.wireless_model = model(session=self.session, _id=self.id) + self.wireless_model.update_config(config) elif model.config_type == RegisterTlvs.MOBILITY: self.mobility = model(session=self.session, _id=self.id) self.mobility.update_config(config) def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]: - links = super().links(flags) + links = [] emane_manager = self.session.emane # gather current emane links nem_ids = set() @@ -132,22 +247,44 @@ class EmaneNet(CoreNetworkBase): # ignore incomplete links if (nem2, nem1) not in emane_links: continue - link = emane_manager.get_nem_link(nem1, nem2) + link = emane_manager.get_nem_link(nem1, nem2, flags) if link: links.append(link) return links - def custom_iface(self, node: CoreNode, iface_data: InterfaceData) -> CoreInterface: - # TUN/TAP is not ready for addressing yet; the device may - # take some time to appear, and installing it into a - # namespace after it has been bound removes addressing; - # save addresses with the interface now - iface_id = node.newtuntap(iface_data.id, iface_data.name) - node.attachnet(iface_id, self) - iface = node.get_iface(iface_id) - iface.set_mac(iface_data.mac) - for ip in iface_data.get_ips(): - iface.add_ip(ip) + def create_tuntap(self, node: CoreNode, iface_data: InterfaceData) -> CoreInterface: + """ + Create a tuntap interface for the provided node. + + :param node: node to create tuntap interface for + :param iface_data: interface data to create interface with + :return: created tuntap interface + """ + with node.lock: + if iface_data.id is not None and iface_data.id in node.ifaces: + raise CoreError( + f"node({self.id}) interface({iface_data.id}) already exists" + ) + iface_id = ( + iface_data.id if iface_data.id is not None else node.next_iface_id() + ) + name = iface_data.name if iface_data.name is not None else f"eth{iface_id}" + session_id = self.session.short_session_id() + localname = f"tap{node.id}.{iface_id}.{session_id}" + iface = TunTap(iface_id, name, localname, self.session.use_ovs(), node=node) + if iface_data.mac: + iface.set_mac(iface_data.mac) + for ip in iface_data.get_ips(): + iface.add_ip(ip) + node.ifaces[iface_id] = iface + self.attach(iface) + if self.up: + iface.startup() if self.session.state == EventTypes.RUNTIME_STATE: self.session.emane.start_iface(self, iface) return iface + + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + raise CoreError( + f"emane network({self.name}) do not support adopting interfaces" + ) diff --git a/daemon/core/emulator/coreemu.py b/daemon/core/emulator/coreemu.py index 9baadb54..a4b0be6a 100644 --- a/daemon/core/emulator/coreemu.py +++ b/daemon/core/emulator/coreemu.py @@ -1,8 +1,5 @@ -import atexit import logging import os -import signal -import sys from pathlib import Path from typing import Dict, List, Type @@ -18,25 +15,6 @@ logger = logging.getLogger(__name__) DEFAULT_EMANE_PREFIX: str = "/usr" -def signal_handler(signal_number: int, _) -> None: - """ - Handle signals and force an exit with cleanup. - - :param signal_number: signal number - :param _: ignored - :return: nothing - """ - logger.info("caught signal: %s", signal_number) - sys.exit(signal_number) - - -signal.signal(signal.SIGHUP, signal_handler) -signal.signal(signal.SIGINT, signal_handler) -signal.signal(signal.SIGTERM, signal_handler) -signal.signal(signal.SIGUSR1, signal_handler) -signal.signal(signal.SIGUSR2, signal_handler) - - class CoreEmu: """ Provides logic for creating and configuring CORE sessions and the nodes within them. @@ -70,9 +48,6 @@ class CoreEmu: # check executables exist on path self._validate_env() - # catch exit event - atexit.register(self.shutdown) - def _validate_env(self) -> None: """ Validates executables CORE depends on exist on path. @@ -140,10 +115,8 @@ class CoreEmu: :return: nothing """ logger.info("shutting down all sessions") - sessions = self.sessions.copy() - self.sessions.clear() - for _id in sessions: - session = sessions[_id] + while self.sessions: + _, session = self.sessions.popitem() session.shutdown() def create_session(self, _id: int = None, _cls: Type[Session] = Session) -> Session: diff --git a/daemon/core/emulator/data.py b/daemon/core/emulator/data.py index 28dcb813..de5b3559 100644 --- a/daemon/core/emulator/data.py +++ b/daemon/core/emulator/data.py @@ -92,6 +92,10 @@ class NodeOptions: image: str = None emane: str = None legacy: bool = False + # src, dst + binds: List[Tuple[str, str]] = field(default_factory=list) + # src, dst, unique, delete + volumes: List[Tuple[str, str, bool, bool]] = field(default_factory=list) def set_position(self, x: float, y: float) -> None: """ diff --git a/daemon/core/emulator/distributed.py b/daemon/core/emulator/distributed.py index 2b4830ad..1d09ce1e 100644 --- a/daemon/core/emulator/distributed.py +++ b/daemon/core/emulator/distributed.py @@ -15,6 +15,7 @@ from fabric import Connection from invoke import UnexpectedExit from core import utils +from core.emulator.links import CoreLink from core.errors import CoreCommandError, CoreError from core.executables import get_requirements from core.nodes.interface import GreTap @@ -124,9 +125,7 @@ class DistributedController: self.session: "Session" = session self.servers: Dict[str, DistributedServer] = OrderedDict() self.tunnels: Dict[int, Tuple[GreTap, GreTap]] = {} - self.address: str = self.session.options.get_config( - "distributed_address", default=None - ) + self.address: str = self.session.options.get("distributed_address") def add_server(self, name: str, host: str) -> None: """ @@ -183,21 +182,36 @@ class DistributedController: def start(self) -> None: """ - Start distributed network tunnels. + Start distributed network tunnels for control networks. :return: nothing """ - mtu = self.session.options.get_config_int("mtu") + mtu = self.session.options.get_int("mtu") for node_id in self.session.nodes: node = self.session.nodes[node_id] - if not isinstance(node, CoreNetwork): - continue - if isinstance(node, CtrlNet) and node.serverintf is not None: + if not isinstance(node, CtrlNet) or node.serverintf is not None: continue for name in self.servers: server = self.servers[name] self.create_gre_tunnel(node, server, mtu, True) + def create_gre_tunnels(self, core_link: CoreLink) -> None: + """ + Creates gre tunnels for a core link with a ptp network connection. + + :param core_link: core link to create gre tunnel for + :return: nothing + """ + if not self.servers: + return + if not core_link.ptp: + raise CoreError( + "attempted to create gre tunnel for core link without a ptp network" + ) + mtu = self.session.options.get_int("mtu") + for server in self.servers.values(): + self.create_gre_tunnel(core_link.ptp, server, mtu, True) + def create_gre_tunnel( self, node: CoreNetwork, server: DistributedServer, mtu: int, start: bool ) -> Tuple[GreTap, GreTap]: diff --git a/daemon/core/emulator/enumerations.py b/daemon/core/emulator/enumerations.py index 83e7bffd..e04d382b 100644 --- a/daemon/core/emulator/enumerations.py +++ b/daemon/core/emulator/enumerations.py @@ -20,6 +20,17 @@ class MessageFlags(Enum): TTY = 0x40 +class ConfigFlags(Enum): + """ + Configuration flags. + """ + + NONE = 0x00 + REQUEST = 0x01 + UPDATE = 0x02 + RESET = 0x03 + + class NodeTypes(Enum): """ Node types. @@ -38,6 +49,7 @@ class NodeTypes(Enum): CONTROL_NET = 13 DOCKER = 15 LXC = 16 + WIRELESS = 17 class LinkTypes(Enum): diff --git a/daemon/core/emulator/links.py b/daemon/core/emulator/links.py new file mode 100644 index 00000000..22f75b98 --- /dev/null +++ b/daemon/core/emulator/links.py @@ -0,0 +1,256 @@ +""" +Provides functionality for maintaining information about known links +for a session. +""" + +import logging +from dataclasses import dataclass +from typing import Dict, Optional, Tuple, ValuesView + +from core.emulator.data import LinkData, LinkOptions +from core.emulator.enumerations import LinkTypes, MessageFlags +from core.errors import CoreError +from core.nodes.base import NodeBase +from core.nodes.interface import CoreInterface +from core.nodes.network import PtpNet + +logger = logging.getLogger(__name__) +LinkKeyType = Tuple[int, Optional[int], int, Optional[int]] + + +def create_key( + node1: NodeBase, + iface1: Optional[CoreInterface], + node2: NodeBase, + iface2: Optional[CoreInterface], +) -> LinkKeyType: + """ + Creates a unique key for tracking links. + + :param node1: first node in link + :param iface1: node1 interface + :param node2: second node in link + :param iface2: node2 interface + :return: link key + """ + iface1_id = iface1.id if iface1 else None + iface2_id = iface2.id if iface2 else None + if node1.id < node2.id: + return node1.id, iface1_id, node2.id, iface2_id + else: + return node2.id, iface2_id, node1.id, iface1_id + + +@dataclass +class CoreLink: + """ + Provides a core link data structure. + """ + + node1: NodeBase + iface1: Optional[CoreInterface] + node2: NodeBase + iface2: Optional[CoreInterface] + ptp: PtpNet = None + label: str = None + color: str = None + + def key(self) -> LinkKeyType: + """ + Retrieve the key for this link. + + :return: link key + """ + return create_key(self.node1, self.iface1, self.node2, self.iface2) + + def is_unidirectional(self) -> bool: + """ + Checks if this link is considered unidirectional, due to current + iface configurations. + + :return: True if unidirectional, False otherwise + """ + unidirectional = False + if self.iface1 and self.iface2: + unidirectional = self.iface1.options != self.iface2.options + return unidirectional + + def options(self) -> LinkOptions: + """ + Retrieve the options for this link. + + :return: options for this link + """ + if self.is_unidirectional(): + options = self.iface1.options + else: + if self.iface1: + options = self.iface1.options + else: + options = self.iface2.options + return options + + def get_data(self, message_type: MessageFlags, source: str = None) -> LinkData: + """ + Create link data for this link. + + :param message_type: link data message type + :param source: source for this data + :return: link data + """ + iface1_data = self.iface1.get_data() if self.iface1 else None + iface2_data = self.iface2.get_data() if self.iface2 else None + return LinkData( + message_type=message_type, + type=LinkTypes.WIRED, + node1_id=self.node1.id, + node2_id=self.node2.id, + iface1=iface1_data, + iface2=iface2_data, + options=self.options(), + label=self.label, + color=self.color, + source=source, + ) + + def get_data_unidirectional(self, source: str = None) -> LinkData: + """ + Create other unidirectional link data. + + :param source: source for this data + :return: unidirectional link data + """ + iface1_data = self.iface1.get_data() if self.iface1 else None + iface2_data = self.iface2.get_data() if self.iface2 else None + return LinkData( + message_type=MessageFlags.NONE, + type=LinkTypes.WIRED, + node1_id=self.node2.id, + node2_id=self.node1.id, + iface1=iface2_data, + iface2=iface1_data, + options=self.iface2.options, + label=self.label, + color=self.color, + source=source, + ) + + +class LinkManager: + """ + Provides core link management. + """ + + def __init__(self) -> None: + """ + Create a LinkManager instance. + """ + self._links: Dict[LinkKeyType, CoreLink] = {} + self._node_links: Dict[int, Dict[LinkKeyType, CoreLink]] = {} + + def add(self, core_link: CoreLink) -> None: + """ + Add a core link to be tracked. + + :param core_link: link to track + :return: nothing + """ + node1, iface1 = core_link.node1, core_link.iface1 + node2, iface2 = core_link.node2, core_link.iface2 + if core_link.key() in self._links: + raise CoreError( + f"node1({node1.name}) iface1({iface1.id}) " + f"node2({node2.name}) iface2({iface2.id}) link already exists" + ) + logger.info( + "adding link from node(%s:%s) to node(%s:%s)", + node1.name, + iface1.name if iface1 else None, + node2.name, + iface2.name if iface2 else None, + ) + self._links[core_link.key()] = core_link + node1_links = self._node_links.setdefault(node1.id, {}) + node1_links[core_link.key()] = core_link + node2_links = self._node_links.setdefault(node2.id, {}) + node2_links[core_link.key()] = core_link + + def delete( + self, + node1: NodeBase, + iface1: Optional[CoreInterface], + node2: NodeBase, + iface2: Optional[CoreInterface], + ) -> CoreLink: + """ + Remove a link from being tracked. + + :param node1: first node in link + :param iface1: node1 interface + :param node2: second node in link + :param iface2: node2 interface + :return: removed core link + """ + key = create_key(node1, iface1, node2, iface2) + if key not in self._links: + raise CoreError( + f"node1({node1.name}) iface1({iface1.id}) " + f"node2({node2.name}) iface2({iface2.id}) is not linked" + ) + logger.info( + "deleting link from node(%s:%s) to node(%s:%s)", + node1.name, + iface1.name if iface1 else None, + node2.name, + iface2.name if iface2 else None, + ) + node1_links = self._node_links[node1.id] + node1_links.pop(key) + node2_links = self._node_links[node2.id] + node2_links.pop(key) + return self._links.pop(key) + + def reset(self) -> None: + """ + Resets and clears all tracking information. + + :return: nothing + """ + self._links.clear() + self._node_links.clear() + + def get_link( + self, + node1: NodeBase, + iface1: Optional[CoreInterface], + node2: NodeBase, + iface2: Optional[CoreInterface], + ) -> Optional[CoreLink]: + """ + Retrieve a link for provided values. + + :param node1: first node in link + :param iface1: interface for node1 + :param node2: second node in link + :param iface2: interface for node2 + :return: core link if present, None otherwise + """ + key = create_key(node1, iface1, node2, iface2) + return self._links.get(key) + + def links(self) -> ValuesView[CoreLink]: + """ + Retrieve all known links + + :return: iterator for all known links + """ + return self._links.values() + + def node_links(self, node: NodeBase) -> ValuesView[CoreLink]: + """ + Retrieve all links for a given node. + + :param node: node to get links for + :return: node links + """ + return self._node_links.get(node.id, {}).values() diff --git a/daemon/core/emulator/session.py b/daemon/core/emulator/session.py index 219555d5..436f80c1 100644 --- a/daemon/core/emulator/session.py +++ b/daemon/core/emulator/session.py @@ -14,7 +14,7 @@ import tempfile import threading import time from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, TypeVar, Union +from typing import Callable, Dict, List, Optional, Set, Tuple, Type, TypeVar, Union from core import constants, utils from core.configservice.manager import ConfigServiceManager @@ -29,22 +29,21 @@ from core.emulator.data import ( LinkData, LinkOptions, NodeData, - NodeOptions, ) from core.emulator.distributed import DistributedController from core.emulator.enumerations import ( EventTypes, ExceptionLevels, - LinkTypes, MessageFlags, NodeTypes, ) +from core.emulator.links import CoreLink, LinkManager from core.emulator.sessionconfig import SessionConfig from core.errors import CoreError from core.location.event import EventLoop from core.location.geo import GeoLocation from core.location.mobility import BasicRangeModel, MobilityManager -from core.nodes.base import CoreNetworkBase, CoreNode, CoreNodeBase, NodeBase +from core.nodes.base import CoreNode, CoreNodeBase, NodeBase, NodeOptions, Position from core.nodes.docker import DockerNode from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.lxd import LxcNode @@ -58,6 +57,7 @@ from core.nodes.network import ( WlanNode, ) from core.nodes.physical import PhysicalNode, Rj45Node +from core.nodes.wireless import WirelessNode from core.plugins.sdt import Sdt from core.services.coreservices import CoreServices from core.xml import corexml, corexmldeployment @@ -80,12 +80,18 @@ NODES: Dict[NodeTypes, Type[NodeBase]] = { NodeTypes.CONTROL_NET: CtrlNet, NodeTypes.DOCKER: DockerNode, NodeTypes.LXC: LxcNode, + NodeTypes.WIRELESS: WirelessNode, } NODES_TYPE: Dict[Type[NodeBase], NodeTypes] = {NODES[x]: x for x in NODES} CONTAINER_NODES: Set[Type[NodeBase]] = {DockerNode, LxcNode} CTRL_NET_ID: int = 9001 LINK_COLORS: List[str] = ["green", "blue", "orange", "purple", "turquoise"] NT: TypeVar = TypeVar("NT", bound=NodeBase) +WIRELESS_TYPE: Tuple[Type[WlanNode], Type[EmaneNet], Type[WirelessNode]] = ( + WlanNode, + EmaneNet, + WirelessNode, +) class Session: @@ -119,7 +125,8 @@ class Session: # dict of nodes: all nodes and nets self.nodes: Dict[int, NodeBase] = {} - self.nodes_lock = threading.Lock() + self.nodes_lock: threading.Lock = threading.Lock() + self.link_manager: LinkManager = LinkManager() # states and hooks handlers self.state: EventTypes = EventTypes.DEFINITION_STATE @@ -139,12 +146,7 @@ class Session: self.config_handlers: List[Callable[[ConfigData], None]] = [] # session options/metadata - self.options: SessionConfig = SessionConfig() - if not config: - config = {} - for key in config: - value = config[key] - self.options.set_config(key, value) + self.options: SessionConfig = SessionConfig(config) self.metadata: Dict[str, str] = {} # distributed support and logic @@ -187,42 +189,47 @@ class Session: raise CoreError(f"invalid node class: {_class}") return node_type - def _link_wireless( - self, node1: CoreNodeBase, node2: CoreNodeBase, connect: bool + def use_ovs(self) -> bool: + return self.options.get_int("ovs") == 1 + + def linked( + self, node1_id: int, node2_id: int, iface1_id: int, iface2_id: int, linked: bool ) -> None: """ - Objects to deal with when connecting/disconnecting wireless links. + Links or unlinks wired core link interfaces from being connected to the same + bridge. - :param node1: node one for wireless link - :param node2: node two for wireless link - :param connect: link interfaces if True, unlink otherwise + :param node1_id: first node in link + :param node2_id: second node in link + :param iface1_id: node1 interface + :param iface2_id: node2 interface + :param linked: True if interfaces should be connected, False for disconnected :return: nothing - :raises core.CoreError: when objects to link is less than 2, or no common - networks are found """ + node1 = self.get_node(node1_id, NodeBase) + node2 = self.get_node(node2_id, NodeBase) logger.info( - "handling wireless linking node1(%s) node2(%s): %s", + "link node(%s):interface(%s) node(%s):interface(%s) linked(%s)", node1.name, + iface1_id, node2.name, - connect, + iface2_id, + linked, ) - common_networks = node1.commonnets(node1) - if not common_networks: - raise CoreError("no common network found for wireless link/unlink") - for common_network, iface1, iface2 in common_networks: - if not isinstance(common_network, (WlanNode, EmaneNet)): - logger.info( - "skipping common network that is not wireless/emane: %s", - common_network, - ) - continue - if connect: - common_network.link(iface1, iface2) - else: - common_network.unlink(iface1, iface2) - - def use_ovs(self) -> bool: - return self.options.get_config("ovs") == "1" + iface1 = node1.get_iface(iface1_id) + iface2 = node2.get_iface(iface2_id) + core_link = self.link_manager.get_link(node1, iface1, node2, iface2) + if not core_link: + raise CoreError( + f"there is no link for node({node1.name}):interface({iface1_id}) " + f"node({node2.name}):interface({iface2_id})" + ) + if linked: + core_link.ptp.attach(iface1) + core_link.ptp.attach(iface2) + else: + core_link.ptp.detach(iface1) + core_link.ptp.detach(iface2) def add_link( self, @@ -231,8 +238,7 @@ class Session: iface1_data: InterfaceData = None, iface2_data: InterfaceData = None, options: LinkOptions = None, - link_type: LinkTypes = LinkTypes.WIRED, - ) -> Tuple[CoreInterface, CoreInterface]: + ) -> Tuple[Optional[CoreInterface], Optional[CoreInterface]]: """ Add a link between nodes. @@ -244,89 +250,129 @@ class Session: data, defaults to none :param options: data for creating link, defaults to no options - :param link_type: type of link to add :return: tuple of created core interfaces, depending on link """ - if not options: - options = LinkOptions() - node1 = self.get_node(node1_id, NodeBase) - node2 = self.get_node(node2_id, NodeBase) - iface1 = None - iface2 = None + options = options if options else LinkOptions() # set mtu - mtu = self.options.get_config_int("mtu") or DEFAULT_MTU + mtu = self.options.get_int("mtu") or DEFAULT_MTU if iface1_data: iface1_data.mtu = mtu if iface2_data: iface2_data.mtu = mtu - # wireless link - if link_type == LinkTypes.WIRELESS: - if isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNodeBase): - self._link_wireless(node1, node2, connect=True) - else: - raise CoreError( - f"cannot wireless link node1({type(node1)}) node2({type(node2)})" - ) - # wired link + node1 = self.get_node(node1_id, NodeBase) + node2 = self.get_node(node2_id, NodeBase) + # check for invalid linking + if ( + isinstance(node1, WIRELESS_TYPE) + and isinstance(node2, WIRELESS_TYPE) + or isinstance(node1, WIRELESS_TYPE) + and not isinstance(node2, CoreNodeBase) + or not isinstance(node1, CoreNodeBase) + and isinstance(node2, WIRELESS_TYPE) + ): + raise CoreError(f"cannot link node({type(node1)}) node({type(node2)})") + # custom links + iface1 = None + iface2 = None + if isinstance(node1, (WlanNode, WirelessNode)): + iface2 = self._add_wlan_link(node2, iface2_data, node1) + elif isinstance(node2, (WlanNode, WirelessNode)): + iface1 = self._add_wlan_link(node1, iface1_data, node2) + elif isinstance(node1, EmaneNet) and isinstance(node2, CoreNode): + iface2 = self._add_emane_link(node2, iface2_data, node1) + elif isinstance(node2, EmaneNet) and isinstance(node1, CoreNode): + iface1 = self._add_emane_link(node1, iface1_data, node2) else: - # peer to peer link - if isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNodeBase): - logger.info("linking ptp: %s - %s", node1.name, node2.name) - start = self.state.should_start() - ptp = self.create_node(PtpNet, start) - iface1 = node1.new_iface(ptp, iface1_data) - iface2 = node2.new_iface(ptp, iface2_data) - iface1.config(options) - if not options.unidirectional: - iface2.config(options) - # link node to net - elif isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNetworkBase): - logger.info("linking node to net: %s - %s", node1.name, node2.name) - iface1 = node1.new_iface(node2, iface1_data) - if not isinstance(node2, (EmaneNet, WlanNode)): - iface1.config(options) - # link net to node - elif isinstance(node2, CoreNodeBase) and isinstance(node1, CoreNetworkBase): - logger.info("linking net to node: %s - %s", node1.name, node2.name) - iface2 = node2.new_iface(node1, iface2_data) - wireless_net = isinstance(node1, (EmaneNet, WlanNode)) - if not options.unidirectional and not wireless_net: - iface2.config(options) - # network to network - elif isinstance(node1, CoreNetworkBase) and isinstance( - node2, CoreNetworkBase - ): - logger.info( - "linking network to network: %s - %s", node1.name, node2.name - ) - iface1 = node1.linknet(node2) - use_local = iface1.net == node1 - iface1.config(options, use_local=use_local) - if not options.unidirectional: - iface1.config(options, use_local=not use_local) - else: - raise CoreError( - f"cannot link node1({type(node1)}) node2({type(node2)})" - ) - - # configure tunnel nodes - key = options.key - if isinstance(node1, TunnelNode): - logger.info("setting tunnel key for: %s", node1.name) - node1.setkey(key, iface1_data) - if isinstance(node2, TunnelNode): - logger.info("setting tunnel key for: %s", node2.name) - node2.setkey(key, iface2_data) + iface1, iface2 = self._add_wired_link( + node1, node2, iface1_data, iface2_data, options + ) + # configure tunnel nodes + key = options.key + if isinstance(node1, TunnelNode): + logger.info("setting tunnel key for: %s", node1.name) + node1.setkey(key, iface1_data) + if isinstance(node2, TunnelNode): + logger.info("setting tunnel key for: %s", node2.name) + node2.setkey(key, iface2_data) self.sdt.add_link(node1_id, node2_id) return iface1, iface2 - def delete_link( + def _add_wlan_link( self, - node1_id: int, - node2_id: int, - iface1_id: int = None, - iface2_id: int = None, - link_type: LinkTypes = LinkTypes.WIRED, + node: NodeBase, + iface_data: InterfaceData, + net: Union[WlanNode, WirelessNode], + ) -> CoreInterface: + """ + Create a wlan link. + + :param node: node to link to wlan network + :param iface_data: data to create interface with + :param net: wlan network to link to + :return: interface created for node + """ + # create interface + iface = node.create_iface(iface_data) + # attach to wlan + net.attach(iface) + # track link + core_link = CoreLink(node, iface, net, None) + self.link_manager.add(core_link) + return iface + + def _add_emane_link( + self, node: CoreNode, iface_data: InterfaceData, net: EmaneNet + ) -> CoreInterface: + """ + Create am emane link. + + :param node: node to link to emane network + :param iface_data: data to create interface with + :param net: emane network to link to + :return: interface created for node + """ + # create iface tuntap + iface = net.create_tuntap(node, iface_data) + # track link + core_link = CoreLink(node, iface, net, None) + self.link_manager.add(core_link) + return iface + + def _add_wired_link( + self, + node1: NodeBase, + node2: NodeBase, + iface1_data: InterfaceData = None, + iface2_data: InterfaceData = None, + options: LinkOptions = None, + ) -> Tuple[CoreInterface, CoreInterface]: + """ + Create a wired link between two nodes. + + :param node1: first node to be linked + :param node2: second node to be linked + :param iface1_data: data to create interface for node1 + :param iface2_data: data to create interface for node2 + :param options: options to configure interfaces with + :return: interfaces created for both nodes + """ + # create interfaces + iface1 = node1.create_iface(iface1_data, options) + iface2 = node2.create_iface(iface2_data, options) + # join and attach to ptp bridge + ptp = self.create_node(PtpNet, self.state.should_start()) + ptp.attach(iface1) + ptp.attach(iface2) + # track link + core_link = CoreLink(node1, iface1, node2, iface2, ptp) + self.link_manager.add(core_link) + # setup link for gre tunnels if needed + if ptp.up: + self.distributed.create_gre_tunnels(core_link) + return iface1, iface2 + + def delete_link( + self, node1_id: int, node2_id: int, iface1_id: int = None, iface2_id: int = None ) -> None: """ Delete a link between nodes. @@ -335,63 +381,38 @@ class Session: :param node2_id: node two id :param iface1_id: interface id for node one :param iface2_id: interface id for node two - :param link_type: link type to delete :return: nothing :raises core.CoreError: when no common network is found for link being deleted """ node1 = self.get_node(node1_id, NodeBase) node2 = self.get_node(node2_id, NodeBase) logger.info( - "deleting link(%s) node(%s):interface(%s) node(%s):interface(%s)", - link_type.name, + "deleting link node(%s):interface(%s) node(%s):interface(%s)", node1.name, iface1_id, node2.name, iface2_id, ) - - # wireless link - if link_type == LinkTypes.WIRELESS: - if isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNodeBase): - self._link_wireless(node1, node2, connect=False) - else: - raise CoreError( - "cannot delete wireless link " - f"node1({type(node1)}) node2({type(node2)})" - ) - # wired link + iface1 = None + iface2 = None + if isinstance(node1, (WlanNode, WirelessNode)): + iface2 = node2.delete_iface(iface2_id) + node1.detach(iface2) + elif isinstance(node2, (WlanNode, WirelessNode)): + iface1 = node1.delete_iface(iface1_id) + node2.detach(iface1) + elif isinstance(node1, EmaneNet): + iface2 = node2.delete_iface(iface2_id) + node1.detach(iface2) + elif isinstance(node2, EmaneNet): + iface1 = node1.delete_iface(iface1_id) + node2.detach(iface1) else: - if isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNodeBase): - iface1 = node1.get_iface(iface1_id) - iface2 = node2.get_iface(iface2_id) - if iface1.net != iface2.net: - raise CoreError( - f"node1({node1.name}) node2({node2.name}) " - "not connected to same net" - ) - ptp = iface1.net - node1.delete_iface(iface1_id) - node2.delete_iface(iface2_id) - self.delete_node(ptp.id) - elif isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNetworkBase): - node1.delete_iface(iface1_id) - elif isinstance(node2, CoreNodeBase) and isinstance(node1, CoreNetworkBase): - node2.delete_iface(iface2_id) - elif isinstance(node1, CoreNetworkBase) and isinstance( - node2, CoreNetworkBase - ): - iface1 = node1.get_linked_iface(node2) - if iface1: - node1.detach(iface1) - iface1.shutdown() - iface2 = node2.get_linked_iface(node1) - if iface2: - node2.detach(iface2) - iface2.shutdown() - if not iface1 and not iface2: - raise CoreError( - f"node1({node1.name}) and node2({node2.name}) are not connected" - ) + iface1 = node1.delete_iface(iface1_id) + iface2 = node2.delete_iface(iface2_id) + core_link = self.link_manager.delete(node1, iface1, node2, iface2) + if core_link.ptp: + self.delete_node(core_link.ptp.id) self.sdt.delete_link(node1_id, node2_id) def update_link( @@ -401,7 +422,6 @@ class Session: iface1_id: int = None, iface2_id: int = None, options: LinkOptions = None, - link_type: LinkTypes = LinkTypes.WIRED, ) -> None: """ Update link information between nodes. @@ -411,7 +431,6 @@ class Session: :param iface1_id: interface id for node one :param iface2_id: interface id for node two :param options: data to update link with - :param link_type: type of link to update :return: nothing :raises core.CoreError: when updating a wireless type link, when there is a unknown link between networks @@ -421,72 +440,26 @@ class Session: node1 = self.get_node(node1_id, NodeBase) node2 = self.get_node(node2_id, NodeBase) logger.info( - "update link(%s) node(%s):interface(%s) node(%s):interface(%s)", - link_type.name, + "update link node(%s):interface(%s) node(%s):interface(%s)", node1.name, iface1_id, node2.name, iface2_id, ) - - # wireless link - if link_type == LinkTypes.WIRELESS: - raise CoreError("cannot update wireless link") - else: - if isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNodeBase): - iface1 = node1.ifaces.get(iface1_id) - if not iface1: - raise CoreError( - f"node({node1.name}) missing interface({iface1_id})" - ) - iface2 = node2.ifaces.get(iface2_id) - if not iface2: - raise CoreError( - f"node({node2.name}) missing interface({iface2_id})" - ) - if iface1.net != iface2.net: - raise CoreError( - f"node1({node1.name}) node2({node2.name}) " - "not connected to same net" - ) - iface1.config(options) - if not options.unidirectional: - iface2.config(options) - elif isinstance(node1, CoreNodeBase) and isinstance(node2, CoreNetworkBase): - iface = node1.get_iface(iface1_id) - if iface.net != node2: - raise CoreError( - f"node1({node1.name}) iface1({iface1_id})" - f" is not linked to node1({node2.name})" - ) - iface.config(options) - elif isinstance(node2, CoreNodeBase) and isinstance(node1, CoreNetworkBase): - iface = node2.get_iface(iface2_id) - if iface.net != node1: - raise CoreError( - f"node2({node2.name}) iface2({iface2_id})" - f" is not linked to node1({node1.name})" - ) - iface.config(options) - elif isinstance(node1, CoreNetworkBase) and isinstance( - node2, CoreNetworkBase - ): - iface = node1.get_linked_iface(node2) - if not iface: - iface = node2.get_linked_iface(node1) - if iface: - use_local = iface.net == node1 - iface.config(options, use_local=use_local) - if not options.unidirectional: - iface.config(options, use_local=not use_local) - else: - raise CoreError( - f"node1({node1.name}) and node2({node2.name}) are not linked" - ) - else: - raise CoreError( - f"cannot update link node1({type(node1)}) node2({type(node2)})" - ) + iface1 = node1.get_iface(iface1_id) if iface1_id is not None else None + iface2 = node2.get_iface(iface2_id) if iface2_id is not None else None + core_link = self.link_manager.get_link(node1, iface1, node2, iface2) + if not core_link: + raise CoreError( + f"there is no link for node({node1.name}):interface({iface1_id}) " + f"node({node2.name}):interface({iface2_id})" + ) + if iface1: + iface1.options.update(options) + iface1.set_config() + if iface2 and not options.unidirectional: + iface2.options.update(options) + iface2.set_config() def next_node_id(self) -> int: """ @@ -502,103 +475,55 @@ class Session: return _id def add_node( - self, _class: Type[NT], _id: int = None, options: NodeOptions = None + self, + _class: Type[NT], + _id: int = None, + name: str = None, + server: str = None, + position: Position = None, + options: NodeOptions = None, ) -> NT: """ Add a node to the session, based on the provided node data. :param _class: node class to create :param _id: id for node, defaults to None for generated id - :param options: data to create node with + :param name: name to assign to node + :param server: distributed server for node, if desired + :param position: geo or x/y/z position to set + :param options: options to create node with :return: created node :raises core.CoreError: when an invalid node type is given """ # set node start based on current session state, override and check when rj45 start = self.state.should_start() - enable_rj45 = self.options.get_config("enablerj45") == "1" + enable_rj45 = self.options.get_int("enablerj45") == 1 if _class == Rj45Node and not enable_rj45: start = False - - # determine node id - if not _id: - _id = self.next_node_id() - - # generate name if not provided - if not options: - options = NodeOptions() - options.set_position(0, 0) - name = options.name - if not name: - name = f"{_class.__name__}{_id}" - + # generate options if not provided + options = options if options else _class.create_options() # verify distributed server - server = self.distributed.servers.get(options.server) - if options.server is not None and server is None: - raise CoreError(f"invalid distributed server: {options.server}") - + dist_server = None + if server is not None: + dist_server = self.distributed.servers.get(server) + if not dist_server: + raise CoreError(f"invalid distributed server: {server}") # create node - logger.info( - "creating node(%s) id(%s) name(%s) start(%s)", - _class.__name__, - _id, - name, - start, - ) - kwargs = dict(_id=_id, name=name, server=server) - if _class in CONTAINER_NODES: - kwargs["image"] = options.image - node = self.create_node(_class, start, **kwargs) - - # set node attributes - node.icon = options.icon - node.canvas = options.canvas - - # set node position and broadcast it - has_geo = all(i is not None for i in [options.lon, options.lat, options.alt]) - if has_geo: - self.set_node_geo(node, options.lon, options.lat, options.alt) + node = self.create_node(_class, start, _id, name, dist_server, options) + # set node position + position = position or Position() + if position.has_geo(): + self.set_node_geo(node, position.lon, position.lat, position.alt) else: - self.set_node_pos(node, options.x, options.y) - - # add services to needed nodes - if isinstance(node, (CoreNode, PhysicalNode)): - node.type = options.model - if options.legacy or options.services: - logger.debug("set node type: %s", node.type) - self.services.add_services(node, node.type, options.services) - - # add config services - config_services = options.config_services - if not options.legacy and not config_services and not node.services: - config_services = self.services.default_services.get(node.type, []) - logger.info("setting node config services: %s", config_services) - for name in config_services: - service_class = self.service_manager.get_service(name) - node.add_config_service(service_class) - - # set network mtu, if configured - mtu = self.options.get_config_int("mtu") - if isinstance(node, CoreNetworkBase) and mtu > 0: - node.mtu = mtu - - # ensure default emane configuration - if isinstance(node, EmaneNet) and options.emane: - model_class = self.emane.get_model(options.emane) - node.model = model_class(self, node.id) - if self.state == EventTypes.RUNTIME_STATE: - self.emane.add_node(node) - - # set default wlan config if needed + self.set_node_pos(node, position.x, position.y) + # setup default wlan if isinstance(node, WlanNode): - self.mobility.set_model_config(_id, BasicRangeModel.name) - - # boot nodes after runtime CoreNodes and PhysicalNodes - is_boot_node = isinstance(node, (CoreNode, PhysicalNode)) - if self.state == EventTypes.RUNTIME_STATE and is_boot_node: - self.write_nodes() + self.mobility.set_model_config(self.id, BasicRangeModel.name) + # boot core nodes after runtime + is_runtime = self.state == EventTypes.RUNTIME_STATE + if is_runtime and isinstance(node, CoreNode): self.add_remove_control_iface(node, remove=False) self.boot_node(node) - self.sdt.add_node(node) return node @@ -618,26 +543,6 @@ class Session: node.position.set_geo(lon, lat, alt) self.sdt.edit_node(node, lon, lat, alt) - def start_mobility(self, node_ids: List[int] = None) -> None: - """ - Start mobility for the provided node ids. - - :param node_ids: nodes to start mobility for - :return: nothing - """ - self.mobility.startup(node_ids) - - def is_active(self) -> bool: - """ - Determine if this session is considered to be active. - (Runtime or Data collect states) - - :return: True if active, False otherwise - """ - result = self.state in {EventTypes.RUNTIME_STATE, EventTypes.DATACOLLECT_STATE} - logger.info("session(%s) checking if active: %s", self.id, result) - return result - def open_xml(self, file_path: Path, start: bool = False) -> None: """ Import a session from the EmulationScript XML format. @@ -693,28 +598,6 @@ class Session: logger.info("immediately running new state hook") self.run_hook(hook) - def add_node_file( - self, - node_id: int, - src_path: Optional[Path], - file_path: Path, - data: Optional[str], - ) -> None: - """ - Add a file to a node. - - :param node_id: node to add file to - :param src_path: source file path - :param file_path: file path to add - :param data: file data - :return: nothing - """ - node = self.get_node(node_id, CoreNode) - if src_path is not None: - node.copy_file(src_path, file_path) - elif data is not None: - node.create_file(file_path, data) - def clear(self) -> None: """ Clear all CORE session data. (nodes, hooks, etc) @@ -723,6 +606,7 @@ class Session: """ self.emane.shutdown() self.delete_nodes() + self.link_manager.reset() self.distributed.shutdown() self.hooks.clear() self.emane.reset() @@ -732,23 +616,6 @@ class Session: self.mobility.config_reset() self.link_colors.clear() - def start_events(self) -> None: - """ - Start event loop. - - :return: nothing - """ - self.event_loop.run() - - def mobility_event(self, event_data: EventData) -> None: - """ - Handle a mobility event. - - :param event_data: event data to handle - :return: nothing - """ - self.mobility.handleevent(event_data) - def set_location(self, lat: float, lon: float, alt: float, scale: float) -> None: """ Set session geospatial location. @@ -776,7 +643,7 @@ class Session: # shutdown sdt self.sdt.shutdown() # remove this sessions working directory - preserve = self.options.get_config("preservedir") == "1" + preserve = self.options.get_int("preservedir") == 1 if not preserve: shutil.rmtree(self.directory, ignore_errors=True) @@ -814,8 +681,6 @@ class Session: :param source: source of broadcast, None by default :return: nothing """ - if not node.apitype: - return node_data = NodeData(node=node, message_type=message_type, source=source) for handler in self.node_handlers: handler(node_data) @@ -863,27 +728,12 @@ class Session: self.state = state self.state_time = time.monotonic() logger.info("changing session(%s) to state %s", self.id, state.name) - self.write_state(state) self.run_hooks(state) self.run_state_hooks(state) if send_event: event_data = EventData(event_type=state, time=str(time.monotonic())) self.broadcast_event(event_data) - def write_state(self, state: EventTypes) -> None: - """ - Write the state to a state file in the session dir. - - :param state: state to write to file - :return: nothing - """ - state_file = self.directory / "state" - try: - with state_file.open("w") as f: - f.write(f"{state.value} {state.name}\n") - except IOError: - logger.exception("error writing state file: %s", state.name) - def run_hooks(self, state: EventTypes) -> None: """ Run hook scripts upon changing states. If hooks is not specified, run all hooks @@ -1007,15 +857,14 @@ class Session: env["SESSION_STATE"] = str(self.state) # try reading and merging optional environments from: # /etc/core/environment - # /home/user/.core/environment + # /home/user/.coregui/environment # /tmp/pycore./environment core_env_path = constants.CORE_CONF_DIR / "environment" session_env_path = self.directory / "environment" if self.user: user_home_path = Path(f"~{self.user}").expanduser() - user_env1 = user_home_path / ".core" / "environment" - user_env2 = user_home_path / ".coregui" / "environment" - paths = [core_env_path, user_env1, user_env2, session_env_path] + user_env = user_home_path / ".coregui" / "environment" + paths = [core_env_path, user_env, session_env_path] else: paths = [core_env_path, session_env_path] for path in paths: @@ -1026,21 +875,6 @@ class Session: logger.exception("error reading environment file: %s", path) return env - def set_thumbnail(self, thumb_file: Path) -> None: - """ - Set the thumbnail filename. Move files from /tmp to session dir. - - :param thumb_file: tumbnail file to set for session - :return: nothing - """ - if not thumb_file.is_file(): - logger.error("thumbnail file to set does not exist: %s", thumb_file) - self.thumbnail = None - return - dst_path = self.directory / thumb_file.name - shutil.copy(thumb_file, dst_path) - self.thumbnail = dst_path - def set_user(self, user: str) -> None: """ Set the username for this session. Update the permissions of the @@ -1049,34 +883,48 @@ class Session: :param user: user to give write permissions to for the session directory :return: nothing """ - if user: - try: - uid = pwd.getpwnam(user).pw_uid - gid = self.directory.stat().st_gid - os.chown(self.directory, uid, gid) - except IOError: - logger.exception("failed to set permission on %s", self.directory) self.user = user + try: + uid = pwd.getpwnam(user).pw_uid + gid = self.directory.stat().st_gid + os.chown(self.directory, uid, gid) + except IOError: + logger.exception("failed to set permission on %s", self.directory) def create_node( - self, _class: Type[NT], start: bool, *args: Any, **kwargs: Any + self, + _class: Type[NT], + start: bool, + _id: int = None, + name: str = None, + server: str = None, + options: NodeOptions = None, ) -> NT: """ Create an emulation node. :param _class: node class to create :param start: True to start node, False otherwise - :param args: list of arguments for the class to create - :param kwargs: dictionary of arguments for the class to create + :param _id: id for node, defaults to None for generated id + :param name: name to assign to node + :param server: distributed server for node, if desired + :param options: options to create node with :return: the created node instance :raises core.CoreError: when id of the node to create already exists """ with self.nodes_lock: - node = _class(self, *args, **kwargs) + node = _class(self, _id=_id, name=name, server=server, options=options) if node.id in self.nodes: node.shutdown() raise CoreError(f"duplicate node id {node.id} for {node.name}") self.nodes[node.id] = node + logger.info( + "created node(%s) id(%s) name(%s) start(%s)", + _class.__name__, + node.id, + node.name, + start, + ) if start: node.startup() return node @@ -1133,20 +981,6 @@ class Session: for node_id in nodes_ids: self.sdt.delete_node(node_id) - def write_nodes(self) -> None: - """ - Write nodes to a 'nodes' file in the session dir. - The 'nodes' file lists: number, name, api-type, class-type - """ - file_path = self.directory / "nodes" - try: - with self.nodes_lock: - with file_path.open("w") as f: - for _id, node in self.nodes.items(): - f.write(f"{_id} {node.name} {node.apitype} {type(node)}\n") - except IOError: - logger.exception("error writing nodes file") - def exception( self, level: ExceptionLevels, source: str, text: str, node_id: int = None ) -> None: @@ -1180,8 +1014,6 @@ class Session: if self.state == EventTypes.RUNTIME_STATE: logger.warning("ignoring instantiate, already in runtime state") return [] - # write current nodes out to session directory file - self.write_nodes() # create control net interfaces and network tunnels # which need to exist for emane to sync on location events # in distributed scenarios @@ -1194,6 +1026,10 @@ class Session: # boot node services and then start mobility exceptions = self.boot_nodes() if not exceptions: + # complete wireless node + for node in self.nodes.values(): + if isinstance(node, WirelessNode): + node.post_startup() self.mobility.startup() # notify listeners that instantiation is complete event = EventData(event_type=EventTypes.INSTANTIATION_COMPLETE) @@ -1293,7 +1129,7 @@ class Session: funcs = [] start = time.monotonic() for node in self.nodes.values(): - if isinstance(node, (CoreNode, PhysicalNode)): + if isinstance(node, CoreNode): self.add_remove_control_iface(node, remove=False) funcs.append((self.boot_node, (node,), {})) results, exceptions = utils.threadpool(funcs) @@ -1309,11 +1145,11 @@ class Session: :return: control net prefix list """ - p = self.options.get_config("controlnet") - p0 = self.options.get_config("controlnet0") - p1 = self.options.get_config("controlnet1") - p2 = self.options.get_config("controlnet2") - p3 = self.options.get_config("controlnet3") + p = self.options.get("controlnet") + p0 = self.options.get("controlnet0") + p1 = self.options.get("controlnet1") + p2 = self.options.get("controlnet2") + p3 = self.options.get("controlnet3") if not p0 and p: p0 = p return [p0, p1, p2, p3] @@ -1324,12 +1160,12 @@ class Session: :return: list of control net server interfaces """ - d0 = self.options.get_config("controlnetif0") + d0 = self.options.get("controlnetif0") if d0: logger.error("controlnet0 cannot be assigned with a host interface") - d1 = self.options.get_config("controlnetif1") - d2 = self.options.get_config("controlnetif2") - d3 = self.options.get_config("controlnetif3") + d1 = self.options.get("controlnetif1") + d2 = self.options.get("controlnetif2") + d3 = self.options.get("controlnetif3") return [None, d1, d2, d3] def get_control_net_index(self, dev: str) -> int: @@ -1404,9 +1240,8 @@ class Session: # use the updown script for control net 0 only. updown_script = None - if net_index == 0: - updown_script = self.options.get_config("controlnet_updown_script") + updown_script = self.options.get("controlnet_updown_script") or None if not updown_script: logger.debug("controlnet updown script not configured") @@ -1429,21 +1264,18 @@ class Session: updown_script, server_iface, ) - control_net = self.create_node( - CtrlNet, - start=False, - prefix=prefix, - _id=_id, - updown_script=updown_script, - serverintf=server_iface, - ) + options = CtrlNet.create_options() + options.prefix = prefix + options.updown_script = updown_script + options.serverintf = server_iface + control_net = self.create_node(CtrlNet, False, _id, options=options) control_net.brname = f"ctrl{net_index}.{self.short_session_id()}" control_net.startup() return control_net def add_remove_control_iface( self, - node: Union[CoreNode, PhysicalNode], + node: CoreNode, net_index: int = 0, remove: bool = False, conf_required: bool = True, @@ -1480,7 +1312,8 @@ class Session: ip4_mask=ip4_mask, mtu=DEFAULT_MTU, ) - iface = node.new_iface(control_net, iface_data) + iface = node.create_iface(iface_data) + control_net.attach(iface) iface.control = True except ValueError: msg = f"Control interface not added to node {node.id}. " @@ -1498,7 +1331,7 @@ class Session: :param remove: flag to check if it should be removed :return: nothing """ - if not self.options.get_config_bool("update_etc_hosts", default=False): + if not self.options.get_bool("update_etc_hosts", False): return try: diff --git a/daemon/core/emulator/sessionconfig.py b/daemon/core/emulator/sessionconfig.py index 028d4e66..ead9e9e5 100644 --- a/daemon/core/emulator/sessionconfig.py +++ b/daemon/core/emulator/sessionconfig.py @@ -1,23 +1,15 @@ -from typing import Any, List +from typing import Dict, List, Optional -from core.config import ( - ConfigBool, - ConfigInt, - ConfigString, - ConfigurableManager, - ConfigurableOptions, - Configuration, -) -from core.emulator.enumerations import RegisterTlvs +from core.config import ConfigBool, ConfigInt, ConfigString, Configuration +from core.errors import CoreError from core.plugins.sdt import Sdt -class SessionConfig(ConfigurableManager, ConfigurableOptions): +class SessionConfig: """ Provides session configuration. """ - name: str = "session" options: List[Configuration] = [ ConfigString(id="controlnet", label="Control Network"), ConfigString(id="controlnet0", label="Control Network 0"), @@ -42,34 +34,54 @@ class SessionConfig(ConfigurableManager, ConfigurableOptions): ConfigInt(id="link_timeout", default="4", label="EMANE Link Timeout (sec)"), ConfigInt(id="mtu", default="0", label="MTU for All Devices"), ] - config_type: RegisterTlvs = RegisterTlvs.UTILITY - def __init__(self) -> None: - super().__init__() - self.set_configs(self.default_values()) - - def get_config( - self, - _id: str, - node_id: int = ConfigurableManager._default_node, - config_type: str = ConfigurableManager._default_type, - default: Any = None, - ) -> str: + def __init__(self, config: Dict[str, str] = None) -> None: """ - Retrieves a specific configuration for a node and configuration type. + Create a SessionConfig instance. - :param _id: specific configuration to retrieve - :param node_id: node id to store configuration for - :param config_type: configuration type to store configuration for - :param default: default value to return when value is not found - :return: configuration value + :param config: configuration to initialize with """ - value = super().get_config(_id, node_id, config_type, default) - if value == "": - value = default - return value + self._config: Dict[str, str] = {x.id: x.default for x in self.options} + self._config.update(config or {}) - def get_config_bool(self, name: str, default: Any = None) -> bool: + def update(self, config: Dict[str, str]) -> None: + """ + Update current configuration with provided values. + + :param config: configuration to update with + :return: nothing + """ + self._config.update(config) + + def set(self, name: str, value: str) -> None: + """ + Set a configuration value. + + :param name: name of configuration to set + :param value: value to set + :return: nothing + """ + self._config[name] = value + + def get(self, name: str, default: str = None) -> Optional[str]: + """ + Retrieve configuration value. + + :param name: name of configuration to get + :param default: value to return as default + :return: return found configuration value or default + """ + return self._config.get(name, default) + + def all(self) -> Dict[str, str]: + """ + Retrieve all configuration options. + + :return: configuration value dict + """ + return self._config + + def get_bool(self, name: str, default: bool = None) -> bool: """ Get configuration value as a boolean. @@ -77,12 +89,15 @@ class SessionConfig(ConfigurableManager, ConfigurableOptions): :param default: default value if not found :return: boolean for configuration value """ - value = self.get_config(name) + value = self._config.get(name) + if value is None and default is None: + raise CoreError(f"missing session options for {name}") if value is None: return default - return value.lower() == "true" + else: + return value.lower() == "true" - def get_config_int(self, name: str, default: Any = None) -> int: + def get_int(self, name: str, default: int = None) -> int: """ Get configuration value as int. @@ -90,17 +105,10 @@ class SessionConfig(ConfigurableManager, ConfigurableOptions): :param default: default value if not found :return: int for configuration value """ - value = self.get_config(name, default=default) - if value is not None: - value = int(value) - return value - - def config_reset(self, node_id: int = None) -> None: - """ - Clear prior configuration files and reset to default values. - - :param node_id: node id to store configuration for - :return: nothing - """ - super().config_reset(node_id) - self.set_configs(self.default_values()) + value = self._config.get(name) + if value is None and default is None: + raise CoreError(f"missing session options for {name}") + if value is None: + return default + else: + return int(value) diff --git a/daemon/core/executables.py b/daemon/core/executables.py index 3d0e80f6..95c97378 100644 --- a/daemon/core/executables.py +++ b/daemon/core/executables.py @@ -1,30 +1,31 @@ from typing import List BASH: str = "bash" -VNODED: str = "vnoded" -VCMD: str = "vcmd" -SYSCTL: str = "sysctl" -IP: str = "ip" ETHTOOL: str = "ethtool" -TC: str = "tc" +IP: str = "ip" MOUNT: str = "mount" -UMOUNT: str = "umount" -OVS_VSCTL: str = "ovs-vsctl" -TEST: str = "test" NFTABLES: str = "nft" +OVS_VSCTL: str = "ovs-vsctl" +SYSCTL: str = "sysctl" +TC: str = "tc" +TEST: str = "test" +UMOUNT: str = "umount" +VCMD: str = "vcmd" +VNODED: str = "vnoded" COMMON_REQUIREMENTS: List[str] = [ BASH, - NFTABLES, ETHTOOL, IP, MOUNT, + NFTABLES, SYSCTL, TC, - UMOUNT, TEST, + UMOUNT, + VCMD, + VNODED, ] -VCMD_REQUIREMENTS: List[str] = [VNODED, VCMD] OVS_REQUIREMENTS: List[str] = [OVS_VSCTL] @@ -38,6 +39,4 @@ def get_requirements(use_ovs: bool) -> List[str]: requirements = COMMON_REQUIREMENTS if use_ovs: requirements += OVS_REQUIREMENTS - else: - requirements += VCMD_REQUIREMENTS return requirements diff --git a/daemon/core/gui/coreclient.py b/daemon/core/gui/coreclient.py index 4905ac8a..6f098118 100644 --- a/daemon/core/gui/coreclient.py +++ b/daemon/core/gui/coreclient.py @@ -70,6 +70,9 @@ class CoreClient: self.session: Optional[Session] = None self.user = getpass.getuser() + # menu options + self.show_throughputs: tk.BooleanVar = tk.BooleanVar(value=False) + # global service settings self.services: Dict[str, Set[str]] = {} self.config_services_groups: Dict[str, Set[str]] = {} @@ -242,9 +245,10 @@ class CoreClient: logger.warning("unknown node event: %s", event) def enable_throughputs(self) -> None: - self.handling_throughputs = self.client.throughputs( - self.session.id, self.handle_throughputs - ) + if not self.handling_throughputs: + self.handling_throughputs = self.client.throughputs( + self.session.id, self.handle_throughputs + ) def cancel_throughputs(self) -> None: if self.handling_throughputs: @@ -404,9 +408,11 @@ class CoreClient: for edge in self.links.values(): link = edge.link if not definition: - if link.iface1 and not link.iface1.mac: + node1 = self.session.nodes[link.node1_id] + node2 = self.session.nodes[link.node2_id] + if nutils.is_container(node1) and link.iface1 and not link.iface1.mac: link.iface1.mac = self.ifaces_manager.next_mac() - if link.iface2 and not link.iface2.mac: + if nutils.is_container(node2) and link.iface2 and not link.iface2.mac: link.iface2.mac = self.ifaces_manager.next_mac() links.append(link) if edge.asymmetric_link: @@ -429,13 +435,15 @@ class CoreClient: definition, result, ) + if self.show_throughputs.get(): + self.enable_throughputs() except grpc.RpcError as e: self.app.show_grpc_exception("Start Session Error", e) return result, exceptions def stop_session(self, session_id: int = None) -> bool: - if not session_id: - session_id = self.session.id + session_id = session_id or self.session.id + self.cancel_throughputs() result = False try: result = self.client.stop_session(session_id) @@ -665,10 +673,10 @@ class CoreClient: self.links[edge.token] = edge src_node = edge.src.core_node dst_node = edge.dst.core_node - if nutils.is_container(src_node): + if edge.link.iface1: src_iface_id = edge.link.iface1.id self.iface_to_edge[(src_node.id, src_iface_id)] = edge - if nutils.is_container(dst_node): + if edge.link.iface2: dst_iface_id = edge.link.iface2.id self.iface_to_edge[(dst_node.id, dst_iface_id)] = edge @@ -741,6 +749,9 @@ class CoreClient: configs.append(config) return configs + def get_config_service_rendered(self, node_id: int, name: str) -> Dict[str, str]: + return self.client.get_config_service_rendered(self.session.id, node_id, name) + def get_config_service_configs_proto( self ) -> List[configservices_pb2.ConfigServiceConfig]: @@ -774,6 +785,9 @@ class CoreClient: ) return config + def get_wireless_config(self, node_id: int) -> Dict[str, ConfigOption]: + return self.client.get_wireless_config(self.session.id, node_id) + def get_mobility_config(self, node_id: int) -> Dict[str, ConfigOption]: config = self.client.get_mobility_config(self.session.id, node_id) logger.debug( diff --git a/daemon/core/gui/data/icons/wireless.png b/daemon/core/gui/data/icons/wireless.png new file mode 100644 index 00000000..2b42b8dd Binary files /dev/null and b/daemon/core/gui/data/icons/wireless.png differ diff --git a/daemon/core/gui/dialogs/configserviceconfig.py b/daemon/core/gui/dialogs/configserviceconfig.py index 870f9639..62c0bfc5 100644 --- a/daemon/core/gui/dialogs/configserviceconfig.py +++ b/daemon/core/gui/dialogs/configserviceconfig.py @@ -34,10 +34,10 @@ class ConfigServiceConfigDialog(Dialog): self.core: "CoreClient" = app.core self.node: Node = node self.service_name: str = service_name - self.radiovar: tk.IntVar = tk.IntVar() - self.radiovar.set(2) + self.radiovar: tk.IntVar = tk.IntVar(value=2) self.directories: List[str] = [] self.templates: List[str] = [] + self.rendered: Dict[str, str] = {} self.dependencies: List[str] = [] self.executables: List[str] = [] self.startup_commands: List[str] = [] @@ -48,10 +48,9 @@ class ConfigServiceConfigDialog(Dialog): self.default_shutdown: List[str] = [] self.validation_mode: Optional[ServiceValidationMode] = None self.validation_time: Optional[int] = None - self.validation_period: tk.StringVar = tk.StringVar() + self.validation_period: tk.DoubleVar = tk.DoubleVar() self.modes: List[str] = [] self.mode_configs: Dict[str, Dict[str, str]] = {} - self.notebook: Optional[ttk.Notebook] = None self.templates_combobox: Optional[ttk.Combobox] = None self.modes_combobox: Optional[ttk.Combobox] = None @@ -61,6 +60,7 @@ class ConfigServiceConfigDialog(Dialog): self.validation_time_entry: Optional[ttk.Entry] = None self.validation_mode_entry: Optional[ttk.Entry] = None self.template_text: Optional[CodeText] = None + self.rendered_text: Optional[CodeText] = None self.validation_period_entry: Optional[ttk.Entry] = None self.original_service_files: Dict[str, str] = {} self.temp_service_files: Dict[str, str] = {} @@ -87,7 +87,6 @@ class ConfigServiceConfigDialog(Dialog): self.validation_mode = service.validation_mode self.validation_time = service.validation_timer self.validation_period.set(service.validation_period) - defaults = self.core.client.get_config_service_defaults(self.service_name) self.original_service_files = defaults.templates self.temp_service_files = dict(self.original_service_files) @@ -95,6 +94,9 @@ class ConfigServiceConfigDialog(Dialog): self.mode_configs = defaults.modes self.config = ConfigOption.from_dict(defaults.config) self.default_config = {x.name: x.value for x in self.config.values()} + self.rendered = self.core.get_config_service_rendered( + self.node.id, self.service_name + ) service_config = self.node.config_service_configs.get(self.service_name) if service_config: for key, value in service_config.config.items(): @@ -110,7 +112,6 @@ class ConfigServiceConfigDialog(Dialog): def draw(self) -> None: self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) - # draw notebook self.notebook = ttk.Notebook(self.top) self.notebook.grid(sticky=tk.NSEW, pady=PADY) @@ -125,6 +126,7 @@ class ConfigServiceConfigDialog(Dialog): tab = ttk.Frame(self.notebook, padding=FRAME_PAD) tab.grid(sticky=tk.NSEW) tab.columnconfigure(0, weight=1) + tab.rowconfigure(2, weight=1) self.notebook.add(tab, text="Directories/Files") label = ttk.Label( @@ -137,33 +139,54 @@ class ConfigServiceConfigDialog(Dialog): frame.columnconfigure(1, weight=1) label = ttk.Label(frame, text="Directories") label.grid(row=0, column=0, sticky=tk.W, padx=PADX) - directories_combobox = ttk.Combobox( - frame, values=self.directories, state="readonly" - ) + state = "readonly" if self.directories else tk.DISABLED + directories_combobox = ttk.Combobox(frame, values=self.directories, state=state) directories_combobox.grid(row=0, column=1, sticky=tk.EW, pady=PADY) if self.directories: directories_combobox.current(0) - - label = ttk.Label(frame, text="Templates") + label = ttk.Label(frame, text="Files") label.grid(row=1, column=0, sticky=tk.W, padx=PADX) + state = "readonly" if self.templates else tk.DISABLED self.templates_combobox = ttk.Combobox( - frame, values=self.templates, state="readonly" + frame, values=self.templates, state=state ) self.templates_combobox.bind( "<>", self.handle_template_changed ) self.templates_combobox.grid(row=1, column=1, sticky=tk.EW, pady=PADY) - - self.template_text = CodeText(tab) + # draw file template tab + notebook = ttk.Notebook(tab) + notebook.rowconfigure(0, weight=1) + notebook.columnconfigure(0, weight=1) + notebook.grid(sticky=tk.NSEW, pady=PADY) + # draw rendered file tab + rendered_tab = ttk.Frame(notebook, padding=FRAME_PAD) + rendered_tab.grid(sticky=tk.NSEW) + rendered_tab.rowconfigure(0, weight=1) + rendered_tab.columnconfigure(0, weight=1) + notebook.add(rendered_tab, text="Rendered") + self.rendered_text = CodeText(rendered_tab) + self.rendered_text.grid(sticky=tk.NSEW) + self.rendered_text.text.bind("", self.update_template_file_data) + # draw template file tab + template_tab = ttk.Frame(notebook, padding=FRAME_PAD) + template_tab.grid(sticky=tk.NSEW) + template_tab.rowconfigure(0, weight=1) + template_tab.columnconfigure(0, weight=1) + notebook.add(template_tab, text="Template") + self.template_text = CodeText(template_tab) self.template_text.grid(sticky=tk.NSEW) - tab.rowconfigure(self.template_text.grid_info()["row"], weight=1) + self.template_text.text.bind("", self.update_template_file_data) if self.templates: self.templates_combobox.current(0) - self.template_text.text.delete(1.0, "end") - self.template_text.text.insert( - "end", self.temp_service_files[self.templates[0]] - ) - self.template_text.text.bind("", self.update_template_file_data) + template_name = self.templates[0] + temp_data = self.temp_service_files[template_name] + self.template_text.set_text(temp_data) + rendered_data = self.rendered[template_name] + self.rendered_text.set_text(rendered_data) + else: + self.template_text.text.configure(state=tk.DISABLED) + self.rendered_text.text.configure(state=tk.DISABLED) def draw_tab_config(self) -> None: tab = ttk.Frame(self.notebook, padding=FRAME_PAD) @@ -243,7 +266,7 @@ class ConfigServiceConfigDialog(Dialog): label = ttk.Label(frame, text="Validation Time") label.grid(row=0, column=0, sticky=tk.W, padx=PADX) self.validation_time_entry = ttk.Entry(frame) - self.validation_time_entry.insert("end", self.validation_time) + self.validation_time_entry.insert("end", str(self.validation_time)) self.validation_time_entry.config(state=tk.DISABLED) self.validation_time_entry.grid(row=0, column=1, sticky=tk.EW, pady=PADY) @@ -323,9 +346,11 @@ class ConfigServiceConfigDialog(Dialog): self.destroy() def handle_template_changed(self, event: tk.Event) -> None: - template = self.templates_combobox.get() - self.template_text.text.delete(1.0, "end") - self.template_text.text.insert("end", self.temp_service_files[template]) + template_name = self.templates_combobox.get() + temp_data = self.temp_service_files[template_name] + self.template_text.set_text(temp_data) + rendered = self.rendered[template_name] + self.rendered_text.set_text(rendered) def handle_mode_changed(self, event: tk.Event) -> None: mode = self.modes_combobox.get() @@ -333,10 +358,13 @@ class ConfigServiceConfigDialog(Dialog): logger.info("mode config: %s", config) self.config_frame.set_values(config) - def update_template_file_data(self, event: tk.Event) -> None: - scrolledtext = event.widget + def update_template_file_data(self, _event: tk.Event) -> None: template = self.templates_combobox.get() - self.temp_service_files[template] = scrolledtext.get(1.0, "end") + self.temp_service_files[template] = self.rendered_text.get_text() + if self.rendered[template] != self.temp_service_files[template]: + self.modified_files.add(template) + return + self.temp_service_files[template] = self.template_text.get_text() if self.temp_service_files[template] != self.original_service_files[template]: self.modified_files.add(template) else: @@ -351,14 +379,24 @@ class ConfigServiceConfigDialog(Dialog): return has_custom_templates or has_custom_config def click_defaults(self) -> None: + # clear all saved state data + self.modified_files.clear() self.node.config_service_configs.pop(self.service_name, None) + self.temp_service_files = dict(self.original_service_files) + # reset session definition and retrieve default rendered templates + self.core.start_session(definition=True) + self.rendered = self.core.get_config_service_rendered( + self.node.id, self.service_name + ) logger.info( "cleared config service config: %s", self.node.config_service_configs ) - self.temp_service_files = dict(self.original_service_files) - filename = self.templates_combobox.get() - self.template_text.text.delete(1.0, "end") - self.template_text.text.insert("end", self.temp_service_files[filename]) + # reset current selected file data and config data, if present + template_name = self.templates_combobox.get() + temp_data = self.temp_service_files[template_name] + self.template_text.set_text(temp_data) + rendered_data = self.rendered[template_name] + self.rendered_text.set_text(rendered_data) if self.config_frame: logger.info("resetting defaults: %s", self.default_config) self.config_frame.set_values(self.default_config) diff --git a/daemon/core/gui/dialogs/customnodes.py b/daemon/core/gui/dialogs/customnodes.py index 065cc43e..d6dac44a 100644 --- a/daemon/core/gui/dialogs/customnodes.py +++ b/daemon/core/gui/dialogs/customnodes.py @@ -23,7 +23,7 @@ class ServicesSelectDialog(Dialog): def __init__( self, master: tk.BaseWidget, app: "Application", current_services: Set[str] ) -> None: - super().__init__(app, "Node Services", master=master) + super().__init__(app, "Node Config Services", master=master) self.groups: Optional[ListboxScroll] = None self.services: Optional[CheckboxList] = None self.current: Optional[ListboxScroll] = None @@ -45,7 +45,7 @@ class ServicesSelectDialog(Dialog): label_frame.columnconfigure(0, weight=1) self.groups = ListboxScroll(label_frame) self.groups.grid(sticky=tk.NSEW) - for group in sorted(self.app.core.services): + for group in sorted(self.app.core.config_services_groups): self.groups.listbox.insert(tk.END, group) self.groups.listbox.bind("<>", self.handle_group_change) self.groups.listbox.selection_set(0) @@ -86,7 +86,7 @@ class ServicesSelectDialog(Dialog): index = selection[0] group = self.groups.listbox.get(index) self.services.clear() - for name in sorted(self.app.core.services[group]): + for name in sorted(self.app.core.config_services_groups[group]): checked = name in self.current_services self.services.add(name, checked) @@ -147,7 +147,7 @@ class CustomNodesDialog(Dialog): frame, text="Icon", compound=tk.LEFT, command=self.click_icon ) self.image_button.grid(sticky=tk.EW, pady=PADY) - button = ttk.Button(frame, text="Services", command=self.click_services) + button = ttk.Button(frame, text="Config Services", command=self.click_services) button.grid(sticky=tk.EW) def draw_node_buttons(self) -> None: diff --git a/daemon/core/gui/dialogs/nodeconfig.py b/daemon/core/gui/dialogs/nodeconfig.py index ee0d7b81..c9ca67f5 100644 --- a/daemon/core/gui/dialogs/nodeconfig.py +++ b/daemon/core/gui/dialogs/nodeconfig.py @@ -230,13 +230,8 @@ class NodeConfigDialog(Dialog): if nutils.is_model(self.node): label = ttk.Label(frame, text="Type") label.grid(row=row, column=0, sticky=tk.EW, padx=PADX, pady=PADY) - combobox = ttk.Combobox( - frame, - textvariable=self.type, - values=list(nutils.NODE_MODELS), - state=combo_state, - ) - combobox.grid(row=row, column=1, sticky=tk.EW) + entry = ttk.Entry(frame, textvariable=self.type, state=tk.DISABLED) + entry.grid(row=row, column=1, sticky=tk.EW) row += 1 # container image field @@ -275,7 +270,7 @@ class NodeConfigDialog(Dialog): ifaces_scroll.listbox.bind("<>", self.iface_select) # interfaces - if self.canvas_node.ifaces: + if nutils.is_container(self.node): self.draw_ifaces() self.draw_spacer() diff --git a/daemon/core/gui/dialogs/wirelessconfig.py b/daemon/core/gui/dialogs/wirelessconfig.py new file mode 100644 index 00000000..97e37b5f --- /dev/null +++ b/daemon/core/gui/dialogs/wirelessconfig.py @@ -0,0 +1,55 @@ +import tkinter as tk +from tkinter import ttk +from typing import TYPE_CHECKING, Dict, Optional + +import grpc + +from core.api.grpc.wrappers import ConfigOption, Node +from core.gui.dialogs.dialog import Dialog +from core.gui.themes import PADX, PADY +from core.gui.widgets import ConfigFrame + +if TYPE_CHECKING: + from core.gui.app import Application + from core.gui.graph.node import CanvasNode + + +class WirelessConfigDialog(Dialog): + def __init__(self, app: "Application", canvas_node: "CanvasNode"): + super().__init__(app, f"Wireless Configuration - {canvas_node.core_node.name}") + self.node: Node = canvas_node.core_node + self.config_frame: Optional[ConfigFrame] = None + self.config: Dict[str, ConfigOption] = {} + try: + config = self.node.wireless_config + if not config: + config = self.app.core.get_wireless_config(self.node.id) + self.config: Dict[str, ConfigOption] = config + self.draw() + except grpc.RpcError as e: + self.app.show_grpc_exception("Wireless Config Error", e) + self.has_error: bool = True + self.destroy() + + def draw(self) -> None: + self.top.columnconfigure(0, weight=1) + self.top.rowconfigure(0, weight=1) + self.config_frame = ConfigFrame(self.top, self.app, self.config) + self.config_frame.draw_config() + self.config_frame.grid(sticky=tk.NSEW, pady=PADY) + self.draw_buttons() + + def draw_buttons(self) -> None: + frame = ttk.Frame(self.top) + frame.grid(sticky=tk.EW) + for i in range(2): + frame.columnconfigure(i, weight=1) + button = ttk.Button(frame, text="Apply", command=self.click_apply) + button.grid(row=0, column=0, padx=PADX, sticky=tk.EW) + button = ttk.Button(frame, text="Cancel", command=self.destroy) + button.grid(row=0, column=1, sticky=tk.EW) + + def click_apply(self) -> None: + self.config_frame.parse_config() + self.node.wireless_config = self.config + self.destroy() diff --git a/daemon/core/gui/graph/edges.py b/daemon/core/gui/graph/edges.py index 405ef658..82fd0b97 100644 --- a/daemon/core/gui/graph/edges.py +++ b/daemon/core/gui/graph/edges.py @@ -416,6 +416,8 @@ class Edge: self.src_label2 = None self.dst_label = None self.dst_label2 = None + if self.dst: + self.arc_common_edges() def hide(self) -> None: self.hidden = True @@ -507,6 +509,7 @@ class CanvasWirelessEdge(Edge): if self.src.hidden or self.dst.hidden: self.hide() self.set_binding() + self.arc_common_edges() def set_binding(self) -> None: self.src.canvas.tag_bind(self.id, "", self.show_info) @@ -758,6 +761,4 @@ class CanvasEdge(Edge): self.src.delete_antenna() self.app.core.deleted_canvas_edges([self]) super().delete() - if self.dst: - self.arc_common_edges() self.manager.edges.pop(self.token, None) diff --git a/daemon/core/gui/graph/node.py b/daemon/core/gui/graph/node.py index 1de7319f..b3d0aae9 100644 --- a/daemon/core/gui/graph/node.py +++ b/daemon/core/gui/graph/node.py @@ -16,6 +16,7 @@ from core.gui.dialogs.mobilityconfig import MobilityConfigDialog from core.gui.dialogs.nodeconfig import NodeConfigDialog from core.gui.dialogs.nodeconfigservice import NodeConfigServiceDialog from core.gui.dialogs.nodeservice import NodeServiceDialog +from core.gui.dialogs.wirelessconfig import WirelessConfigDialog from core.gui.dialogs.wlanconfig import WlanConfigDialog from core.gui.frames.node import NodeInfoFrame from core.gui.graph import tags @@ -219,6 +220,7 @@ class CanvasNode: # clear existing menu self.context.delete(0, tk.END) is_wlan = self.core_node.type == NodeType.WIRELESS_LAN + is_wireless = self.core_node.type == NodeType.WIRELESS is_emane = self.core_node.type == NodeType.EMANE is_mobility = is_wlan or is_emane if self.app.core.is_runtime(): @@ -231,6 +233,10 @@ class CanvasNode: self.context.add_command( label="WLAN Config", command=self.show_wlan_config ) + if is_wireless: + self.context.add_command( + label="Wireless Config", command=self.show_wireless_config + ) if is_mobility and self.core_node.id in self.app.core.mobility_players: self.context.add_command( label="Mobility Player", command=self.show_mobility_player @@ -268,6 +274,10 @@ class CanvasNode: self.context.add_command( label="WLAN Config", command=self.show_wlan_config ) + if is_wireless: + self.context.add_command( + label="Wireless Config", command=self.show_wireless_config + ) if is_mobility: self.context.add_command( label="Mobility Config", command=self.show_mobility_config @@ -298,7 +308,10 @@ class CanvasNode: other_iface = edge.other_iface(self) label = other_node.core_node.name if other_iface: - label = f"{label}:{other_iface.name}" + iface_label = other_iface.id + if other_iface.name: + iface_label = other_iface.name + label = f"{label}:{iface_label}" func_unlink = functools.partial(self.click_unlink, edge) unlink_menu.add_command(label=label, command=func_unlink) themes.style_menu(unlink_menu) @@ -343,6 +356,10 @@ class CanvasNode: dialog = NodeConfigDialog(self.app, self) dialog.show() + def show_wireless_config(self) -> None: + dialog = WirelessConfigDialog(self.app, self) + dialog.show() + def show_wlan_config(self) -> None: dialog = WlanConfigDialog(self.app, self) if not dialog.has_error: diff --git a/daemon/core/gui/images.py b/daemon/core/gui/images.py index 7c5897e5..aed4cfcc 100644 --- a/daemon/core/gui/images.py +++ b/daemon/core/gui/images.py @@ -53,6 +53,7 @@ class ImageEnum(Enum): LINK = "link" HUB = "hub" WLAN = "wlan" + WIRELESS = "wireless" EMANE = "emane" RJ45 = "rj45" TUNNEL = "tunnel" @@ -92,14 +93,15 @@ TYPE_MAP: Dict[Tuple[NodeType, str], ImageEnum] = { (NodeType.DEFAULT, "host"): ImageEnum.HOST, (NodeType.DEFAULT, "mdr"): ImageEnum.MDR, (NodeType.DEFAULT, "prouter"): ImageEnum.PROUTER, - (NodeType.HUB, ""): ImageEnum.HUB, - (NodeType.SWITCH, ""): ImageEnum.SWITCH, - (NodeType.WIRELESS_LAN, ""): ImageEnum.WLAN, - (NodeType.EMANE, ""): ImageEnum.EMANE, - (NodeType.RJ45, ""): ImageEnum.RJ45, - (NodeType.TUNNEL, ""): ImageEnum.TUNNEL, - (NodeType.DOCKER, ""): ImageEnum.DOCKER, - (NodeType.LXC, ""): ImageEnum.LXC, + (NodeType.HUB, None): ImageEnum.HUB, + (NodeType.SWITCH, None): ImageEnum.SWITCH, + (NodeType.WIRELESS_LAN, None): ImageEnum.WLAN, + (NodeType.WIRELESS, None): ImageEnum.WIRELESS, + (NodeType.EMANE, None): ImageEnum.EMANE, + (NodeType.RJ45, None): ImageEnum.RJ45, + (NodeType.TUNNEL, None): ImageEnum.TUNNEL, + (NodeType.DOCKER, None): ImageEnum.DOCKER, + (NodeType.LXC, None): ImageEnum.LXC, } diff --git a/daemon/core/gui/interface.py b/daemon/core/gui/interface.py index d4d09443..83fba104 100644 --- a/daemon/core/gui/interface.py +++ b/daemon/core/gui/interface.py @@ -241,10 +241,10 @@ class InterfaceManager: dst_node = edge.dst.core_node self.determine_subnets(edge.src, edge.dst) src_iface = None - if nutils.is_container(src_node): + if nutils.is_iface_node(src_node): src_iface = self.create_iface(edge.src, edge.linked_wireless) dst_iface = None - if nutils.is_container(dst_node): + if nutils.is_iface_node(dst_node): dst_iface = self.create_iface(edge.dst, edge.linked_wireless) link = Link( type=LinkType.WIRED, @@ -258,22 +258,26 @@ class InterfaceManager: def create_iface(self, canvas_node: CanvasNode, wireless_link: bool) -> Interface: node = canvas_node.core_node - ip4, ip6 = self.get_ips(node) - if wireless_link: - ip4_mask = WIRELESS_IP4_MASK - ip6_mask = WIRELESS_IP6_MASK + if nutils.is_bridge(node): + iface_id = canvas_node.next_iface_id() + iface = Interface(id=iface_id) else: - ip4_mask = IP4_MASK - ip6_mask = IP6_MASK - iface_id = canvas_node.next_iface_id() - name = f"eth{iface_id}" - iface = Interface( - id=iface_id, - name=name, - ip4=ip4, - ip4_mask=ip4_mask, - ip6=ip6, - ip6_mask=ip6_mask, - ) + ip4, ip6 = self.get_ips(node) + if wireless_link: + ip4_mask = WIRELESS_IP4_MASK + ip6_mask = WIRELESS_IP6_MASK + else: + ip4_mask = IP4_MASK + ip6_mask = IP6_MASK + iface_id = canvas_node.next_iface_id() + name = f"eth{iface_id}" + iface = Interface( + id=iface_id, + name=name, + ip4=ip4, + ip4_mask=ip4_mask, + ip6=ip6, + ip6_mask=ip6_mask, + ) logger.info("create node(%s) interface(%s)", node.name, iface) return iface diff --git a/daemon/core/gui/menubar.py b/daemon/core/gui/menubar.py index e2df2f92..16e57cb6 100644 --- a/daemon/core/gui/menubar.py +++ b/daemon/core/gui/menubar.py @@ -235,7 +235,11 @@ class Menubar(tk.Menu): menu.add_command( label="Configure Throughput", command=self.click_config_throughput ) - menu.add_checkbutton(label="Enable Throughput?", command=self.click_throughput) + menu.add_checkbutton( + label="Enable Throughput?", + command=self.click_throughput, + variable=self.core.show_throughputs, + ) widget_menu.add_cascade(label="Throughput", menu=menu) def draw_widgets_menu(self) -> None: @@ -393,7 +397,7 @@ class Menubar(tk.Menu): dialog.show() def click_throughput(self) -> None: - if not self.core.handling_throughputs: + if self.core.show_throughputs.get(): self.core.enable_throughputs() else: self.core.cancel_throughputs() diff --git a/daemon/core/gui/nodeutils.py b/daemon/core/gui/nodeutils.py index 537cedf2..0357f23d 100644 --- a/daemon/core/gui/nodeutils.py +++ b/daemon/core/gui/nodeutils.py @@ -18,12 +18,16 @@ NETWORK_NODES: List["NodeDraw"] = [] NODE_ICONS = {} CONTAINER_NODES: Set[NodeType] = {NodeType.DEFAULT, NodeType.DOCKER, NodeType.LXC} IMAGE_NODES: Set[NodeType] = {NodeType.DOCKER, NodeType.LXC} -WIRELESS_NODES: Set[NodeType] = {NodeType.WIRELESS_LAN, NodeType.EMANE} +WIRELESS_NODES: Set[NodeType] = { + NodeType.WIRELESS_LAN, + NodeType.EMANE, + NodeType.WIRELESS, +} RJ45_NODES: Set[NodeType] = {NodeType.RJ45} BRIDGE_NODES: Set[NodeType] = {NodeType.HUB, NodeType.SWITCH} IGNORE_NODES: Set[NodeType] = {NodeType.CONTROL_NET} MOBILITY_NODES: Set[NodeType] = {NodeType.WIRELESS_LAN, NodeType.EMANE} -NODE_MODELS: Set[str] = {"router", "host", "PC", "mdr", "prouter"} +NODE_MODELS: Set[str] = {"router", "PC", "mdr", "prouter"} ROUTER_NODES: Set[str] = {"router", "mdr"} ANTENNA_ICON: Optional[PhotoImage] = None @@ -46,6 +50,7 @@ def setup() -> None: (ImageEnum.HUB, NodeType.HUB, "Hub"), (ImageEnum.SWITCH, NodeType.SWITCH, "Switch"), (ImageEnum.WLAN, NodeType.WIRELESS_LAN, "WLAN"), + (ImageEnum.WIRELESS, NodeType.WIRELESS, "Wireless"), (ImageEnum.EMANE, NodeType.EMANE, "EMANE"), (ImageEnum.RJ45, NodeType.RJ45, "RJ45"), (ImageEnum.TUNNEL, NodeType.TUNNEL, "Tunnel"), @@ -97,6 +102,10 @@ def is_custom(node: Node) -> bool: return is_model(node) and node.model not in NODE_MODELS +def is_iface_node(node: Node) -> bool: + return is_container(node) or is_bridge(node) + + def get_custom_services(gui_config: GuiConfig, name: str) -> List[str]: for custom_node in gui_config.nodes: if custom_node.name == name: @@ -114,7 +123,7 @@ def _get_custom_file(config: GuiConfig, name: str) -> Optional[str]: def get_icon(node: Node, app: "Application") -> PhotoImage: scale = app.app_scale image = None - # node icon was overriden with a specific value + # node icon was overridden with a specific value if node.icon: try: image = images.from_file(node.icon, width=images.NODE_SIZE, scale=scale) diff --git a/daemon/core/gui/widgets.py b/daemon/core/gui/widgets.py index 1f6cd637..7dfd2666 100644 --- a/daemon/core/gui/widgets.py +++ b/daemon/core/gui/widgets.py @@ -257,6 +257,13 @@ class CodeText(ttk.Frame): yscrollbar.grid(row=0, column=1, sticky=tk.NS) self.text.configure(yscrollcommand=yscrollbar.set) + def get_text(self) -> str: + return self.text.get(1.0, tk.END) + + def set_text(self, text: str) -> None: + self.text.delete(1.0, tk.END) + self.text.insert(tk.END, text.rstrip()) + class Spinbox(ttk.Entry): def __init__(self, master: tk.BaseWidget = None, **kwargs: Any) -> None: diff --git a/daemon/core/location/mobility.py b/daemon/core/location/mobility.py index ebcb8fe4..28040650 100644 --- a/daemon/core/location/mobility.py +++ b/daemon/core/location/mobility.py @@ -225,7 +225,6 @@ class WirelessModel(ConfigurableOptions): """ config_type: RegisterTlvs = RegisterTlvs.WIRELESS - bitmap: str = None position_callback: Callable[[CoreInterface], None] = None def __init__(self, session: "Session", _id: int) -> None: @@ -321,7 +320,8 @@ class BasicRangeModel(WirelessModel): loss=self.loss, jitter=self.jitter, ) - iface.config(options) + iface.options.update(options) + iface.set_config() def get_position(self, iface: CoreInterface) -> Tuple[float, float, float]: """ @@ -627,7 +627,7 @@ class WayPointMobility(WirelessModel): moved_ifaces.append(iface) # calculate all ranges after moving nodes; this saves calculations - self.net.model.update(moved_ifaces) + self.net.wireless_model.update(moved_ifaces) # TODO: check session state self.session.event_loop.add_event(0.001 * self.refresh_ms, self.runround) @@ -705,7 +705,7 @@ class WayPointMobility(WirelessModel): x, y, z = self.initial[node.id].coords self.setnodeposition(node, x, y, z) moved_ifaces.append(iface) - self.net.model.update(moved_ifaces) + self.net.wireless_model.update(moved_ifaces) def addwaypoint( self, diff --git a/daemon/core/nodes/base.py b/daemon/core/nodes/base.py index 34bf6601..d3adf5c3 100644 --- a/daemon/core/nodes/base.py +++ b/daemon/core/nodes/base.py @@ -3,8 +3,10 @@ Defines the base logic for nodes used within core. """ import abc import logging +import shlex import shutil import threading +from dataclasses import dataclass, field from pathlib import Path from threading import RLock from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type, Union @@ -13,11 +15,10 @@ import netaddr from core import utils from core.configservice.dependencies import ConfigServiceDependencies -from core.emulator.data import InterfaceData, LinkData -from core.emulator.enumerations import LinkTypes, MessageFlags, NodeTypes +from core.emulator.data import InterfaceData, LinkOptions from core.errors import CoreCommandError, CoreError from core.executables import BASH, MOUNT, TEST, VCMD, VNODED -from core.nodes.interface import DEFAULT_MTU, CoreInterface, TunTap, Veth +from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.netclient import LinuxNetClient, get_net_client logger = logging.getLogger(__name__) @@ -34,19 +35,106 @@ if TYPE_CHECKING: PRIVATE_DIRS: List[Path] = [Path("/var/run"), Path("/var/log")] +@dataclass +class Position: + """ + Helper class for Cartesian coordinate position + """ + + x: float = 0.0 + y: float = 0.0 + z: float = 0.0 + lon: float = None + lat: float = None + alt: float = None + + def set(self, x: float = None, y: float = None, z: float = None) -> bool: + """ + Returns True if the position has actually changed. + + :param x: x position + :param y: y position + :param z: z position + :return: True if position changed, False otherwise + """ + if self.x == x and self.y == y and self.z == z: + return False + self.x = x + self.y = y + self.z = z + return True + + def get(self) -> Tuple[float, float, float]: + """ + Retrieve x,y,z position. + + :return: x,y,z position tuple + """ + return self.x, self.y, self.z + + def has_geo(self) -> bool: + return all(x is not None for x in [self.lon, self.lat, self.alt]) + + def set_geo(self, lon: float, lat: float, alt: float) -> None: + """ + Set geo position lon, lat, alt. + + :param lon: longitude value + :param lat: latitude value + :param alt: altitude value + :return: nothing + """ + self.lon = lon + self.lat = lat + self.alt = alt + + def get_geo(self) -> Tuple[float, float, float]: + """ + Retrieve current geo position lon, lat, alt. + + :return: lon, lat, alt position tuple + """ + return self.lon, self.lat, self.alt + + +@dataclass +class NodeOptions: + """ + Base options for configuring a node. + """ + + canvas: int = None + """id of canvas for display within gui""" + icon: str = None + """custom icon for display, None for default""" + + +@dataclass +class CoreNodeOptions(NodeOptions): + model: str = "PC" + """model is used for providing a default set of services""" + services: List[str] = field(default_factory=list) + """services to start within node""" + config_services: List[str] = field(default_factory=list) + """config services to start within node""" + directory: Path = None + """directory to define node, defaults to path under the session directory""" + legacy: bool = False + """legacy nodes default to standard services""" + + class NodeBase(abc.ABC): """ Base class for CORE nodes (nodes and networks) """ - apitype: Optional[NodeTypes] = None - def __init__( self, session: "Session", _id: int = None, name: str = None, server: "DistributedServer" = None, + options: NodeOptions = None, ) -> None: """ Creates a NodeBase instance. @@ -56,27 +144,29 @@ class NodeBase(abc.ABC): :param name: object name :param server: remote server node will run on, default is None for localhost + :param options: options to create node with """ - self.session: "Session" = session - if _id is None: - _id = session.next_node_id() - self.id: int = _id - if name is None: - name = f"o{self.id}" - self.name: str = name + self.id: int = _id if _id is not None else self.session.next_node_id() + self.name: str = name or f"{self.__class__.__name__}{self.id}" self.server: "DistributedServer" = server - self.type: Optional[str] = None + self.model: Optional[str] = None self.services: CoreServices = [] self.ifaces: Dict[int, CoreInterface] = {} self.iface_id: int = 0 - self.canvas: Optional[int] = None - self.icon: Optional[str] = None self.position: Position = Position() self.up: bool = False + self.lock: RLock = RLock() self.net_client: LinuxNetClient = get_net_client( self.session.use_ovs(), self.host_cmd ) + options = options if options else NodeOptions() + self.canvas: Optional[int] = options.canvas + self.icon: Optional[str] = options.icon + + @classmethod + def create_options(cls) -> NodeOptions: + return NodeOptions() @abc.abstractmethod def startup(self) -> None: @@ -96,6 +186,18 @@ class NodeBase(abc.ABC): """ raise NotImplementedError + @abc.abstractmethod + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + """ + Adopt an interface, placing within network namespacing for containers + and setting to bridge masters for network like nodes. + + :param iface: interface to adopt + :param name: proper name to use for interface + :return: nothing + """ + raise NotImplementedError + def host_cmd( self, args: str, @@ -120,6 +222,19 @@ class NodeBase(abc.ABC): else: return self.server.remote_cmd(args, env, cwd, wait) + def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: + """ + Runs a command that is in the context of a node, default is to run a standard + host command. + + :param args: command to run + :param wait: True to wait for status, False otherwise + :param shell: True to use shell, False otherwise + :return: combined stdout and stderr + :raises CoreCommandError: when a non-zero exit status occurs + """ + return self.host_cmd(args, wait=wait, shell=shell) + def setposition(self, x: float = None, y: float = None, z: float = None) -> bool: """ Set the (x,y,z) position of the object. @@ -139,6 +254,71 @@ class NodeBase(abc.ABC): """ return self.position.get() + def create_iface( + self, iface_data: InterfaceData = None, options: LinkOptions = None + ) -> CoreInterface: + """ + Creates an interface and adopts it to a node. + + :param iface_data: data to create interface with + :param options: options to create interface with + :return: created interface + """ + with self.lock: + if iface_data and iface_data.id is not None: + if iface_data.id in self.ifaces: + raise CoreError( + f"node({self.id}) interface({iface_data.id}) already exists" + ) + iface_id = iface_data.id + else: + iface_id = self.next_iface_id() + mtu = DEFAULT_MTU + if iface_data and iface_data.mtu is not None: + mtu = iface_data.mtu + unique_name = f"{self.id}.{iface_id}.{self.session.short_session_id()}" + name = f"veth{unique_name}" + localname = f"beth{unique_name}" + iface = CoreInterface( + iface_id, + name, + localname, + self.session.use_ovs(), + mtu, + self, + self.server, + ) + if iface_data: + if iface_data.mac: + iface.set_mac(iface_data.mac) + for ip in iface_data.get_ips(): + iface.add_ip(ip) + if iface_data.name: + name = iface_data.name + if options: + iface.options.update(options) + self.ifaces[iface_id] = iface + if self.up: + iface.startup() + self.adopt_iface(iface, name) + else: + iface.name = name + return iface + + def delete_iface(self, iface_id: int) -> CoreInterface: + """ + Delete an interface. + + :param iface_id: interface id to delete + :return: the removed interface + """ + if iface_id not in self.ifaces: + raise CoreError(f"node({self.name}) interface({iface_id}) does not exist") + iface = self.ifaces.pop(iface_id) + logger.info("node(%s) removing interface(%s)", self.name, iface.name) + iface.shutdown() + return iface + def get_iface(self, iface_id: int) -> CoreInterface: """ Retrieve interface based on id. @@ -191,15 +371,6 @@ class NodeBase(abc.ABC): self.iface_id += 1 return iface_id - def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]: - """ - Build link data for this node. - - :param flags: message flags - :return: list of link data - """ - return [] - class CoreNodeBase(NodeBase): """ @@ -212,6 +383,7 @@ class CoreNodeBase(NodeBase): _id: int = None, name: str = None, server: "DistributedServer" = None, + options: NodeOptions = None, ) -> None: """ Create a CoreNodeBase instance. @@ -222,19 +394,11 @@ class CoreNodeBase(NodeBase): :param server: remote server node will run on, default is None for localhost """ - super().__init__(session, _id, name, server) + super().__init__(session, _id, name, server, options) self.config_services: Dict[str, "ConfigService"] = {} self.directory: Optional[Path] = None self.tmpnodedir: bool = False - @abc.abstractmethod - def startup(self) -> None: - raise NotImplementedError - - @abc.abstractmethod - def shutdown(self) -> None: - raise NotImplementedError - @abc.abstractmethod def create_dir(self, dir_path: Path) -> None: """ @@ -270,19 +434,6 @@ class CoreNodeBase(NodeBase): """ raise NotImplementedError - @abc.abstractmethod - def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - """ - Runs a command within a node container. - - :param args: command to run - :param wait: True to wait for status, False otherwise - :param shell: True to use shell, False otherwise - :return: combined stdout and stderr - :raises CoreCommandError: when a non-zero exit status occurs - """ - raise NotImplementedError - @abc.abstractmethod def termcmdstring(self, sh: str) -> str: """ @@ -293,19 +444,6 @@ class CoreNodeBase(NodeBase): """ raise NotImplementedError - @abc.abstractmethod - def new_iface( - self, net: "CoreNetworkBase", iface_data: InterfaceData - ) -> CoreInterface: - """ - Create a new interface. - - :param net: network to associate with - :param iface_data: interface data for new interface - :return: interface index - """ - raise NotImplementedError - @abc.abstractmethod def path_exists(self, path: str) -> bool: """ @@ -318,7 +456,7 @@ class CoreNodeBase(NodeBase): def host_path(self, path: Path, is_dir: bool = False) -> Path: """ - Return the name of a node"s file on the host filesystem. + Return the name of a node's file on the host filesystem. :param path: path to translate to host path :param is_dir: True if path is a directory path, False otherwise @@ -387,60 +525,12 @@ class CoreNodeBase(NodeBase): :return: nothing """ - preserve = self.session.options.get_config("preservedir") == "1" + preserve = self.session.options.get_int("preservedir") == 1 if preserve: return if self.tmpnodedir: self.host_cmd(f"rm -rf {self.directory}") - def add_iface(self, iface: CoreInterface, iface_id: int) -> None: - """ - Add network interface to node and set the network interface index if successful. - - :param iface: network interface to add - :param iface_id: interface id - :return: nothing - """ - if iface_id in self.ifaces: - raise CoreError(f"interface({iface_id}) already exists") - self.ifaces[iface_id] = iface - iface.node_id = iface_id - - def delete_iface(self, iface_id: int) -> None: - """ - Delete a network interface - - :param iface_id: interface index to delete - :return: nothing - """ - if iface_id not in self.ifaces: - raise CoreError(f"node({self.name}) interface({iface_id}) does not exist") - iface = self.ifaces.pop(iface_id) - logger.info("node(%s) removing interface(%s)", self.name, iface.name) - iface.detachnet() - iface.shutdown() - - def attachnet(self, iface_id: int, net: "CoreNetworkBase") -> None: - """ - Attach a network. - - :param iface_id: interface of index to attach - :param net: network to attach - :return: nothing - """ - iface = self.get_iface(iface_id) - iface.attachnet(net) - - def detachnet(self, iface_id: int) -> None: - """ - Detach network interface. - - :param iface_id: interface id to detach - :return: nothing - """ - iface = self.get_iface(iface_id) - iface.detachnet() - def setposition(self, x: float = None, y: float = None, z: float = None) -> None: """ Set position. @@ -455,40 +545,19 @@ class CoreNodeBase(NodeBase): for iface in self.get_ifaces(): iface.setposition() - def commonnets( - self, node: "CoreNodeBase", want_ctrl: bool = False - ) -> List[Tuple["CoreNetworkBase", CoreInterface, CoreInterface]]: - """ - Given another node or net object, return common networks between - this node and that object. A list of tuples is returned, with each tuple - consisting of (network, interface1, interface2). - - :param node: node to get common network with - :param want_ctrl: flag set to determine if control network are wanted - :return: tuples of common networks - """ - common = [] - for iface1 in self.get_ifaces(control=want_ctrl): - for iface2 in node.get_ifaces(): - if iface1.net == iface2.net: - common.append((iface1.net, iface1, iface2)) - return common - class CoreNode(CoreNodeBase): """ Provides standard core node logic. """ - apitype: NodeTypes = NodeTypes.DEFAULT - def __init__( self, session: "Session", _id: int = None, name: str = None, - directory: Path = None, server: "DistributedServer" = None, + options: CoreNodeOptions = None, ) -> None: """ Create a CoreNode instance. @@ -496,19 +565,37 @@ class CoreNode(CoreNodeBase): :param session: core session instance :param _id: object id :param name: object name - :param directory: node directory :param server: remote server node will run on, default is None for localhost + :param options: options to create node with """ - super().__init__(session, _id, name, server) - self.directory: Optional[Path] = directory + options = options or CoreNodeOptions() + super().__init__(session, _id, name, server, options) + self.directory: Optional[Path] = options.directory self.ctrlchnlname: Path = self.session.directory / self.name self.pid: Optional[int] = None - self.lock: RLock = RLock() self._mounts: List[Tuple[Path, Path]] = [] self.node_net_client: LinuxNetClient = self.create_node_net_client( self.session.use_ovs() ) + options = options or CoreNodeOptions() + self.model: Optional[str] = options.model + # setup services + if options.legacy or options.services: + logger.debug("set node type: %s", self.model) + self.session.services.add_services(self, self.model, options.services) + # add config services + config_services = options.config_services + if not options.legacy and not config_services and not options.services: + config_services = self.session.services.default_services.get(self.model, []) + logger.info("setting node config services: %s", config_services) + for name in config_services: + service_class = self.session.service_manager.get_service(name) + self.add_config_service(service_class) + + @classmethod + def create_options(cls) -> CoreNodeOptions: + return CoreNodeOptions() def create_node_net_client(self, use_ovs: bool) -> LinuxNetClient: """ @@ -585,6 +672,10 @@ class CoreNode(CoreNodeBase): self._mounts = [] # shutdown all interfaces for iface in self.get_ifaces(): + try: + self.node_net_client.device_flush(iface.name) + except CoreCommandError: + pass iface.shutdown() # kill node process if present try: @@ -604,7 +695,7 @@ class CoreNode(CoreNodeBase): finally: self.rmnodedir() - def _create_cmd(self, args: str, shell: bool = False) -> str: + def create_cmd(self, args: str, shell: bool = False) -> str: """ Create command used to run commands within the context of a node. @@ -613,7 +704,7 @@ class CoreNode(CoreNodeBase): :return: node command """ if shell: - args = f'{BASH} -c "{args}"' + args = f"{BASH} -c {shlex.quote(args)}" return f"{VCMD} -c {self.ctrlchnlname} -- {args}" def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: @@ -627,7 +718,7 @@ class CoreNode(CoreNodeBase): :return: combined stdout and stderr :raises CoreCommandError: when a non-zero exit status occurs """ - args = self._create_cmd(args, shell) + args = self.create_cmd(args, shell) if self.server is None: return utils.cmd(args, wait=wait, shell=shell) else: @@ -653,7 +744,7 @@ class CoreNode(CoreNodeBase): :param sh: shell to execute command in :return: str """ - terminal = self._create_cmd(sh) + terminal = self.create_cmd(sh) if self.server is None: return terminal else: @@ -691,150 +782,6 @@ class CoreNode(CoreNodeBase): self.cmd(f"{MOUNT} -n --bind {src_path} {target_path}") self._mounts.append((src_path, target_path)) - def next_iface_id(self) -> int: - """ - Retrieve a new interface index. - - :return: new interface index - """ - with self.lock: - return super().next_iface_id() - - def newveth(self, iface_id: int = None, ifname: str = None, mtu: int = None) -> int: - """ - Create a new interface. - - :param iface_id: id for the new interface - :param ifname: name for the new interface - :param mtu: mtu for interface - :return: nothing - """ - with self.lock: - mtu = mtu if mtu is not None else DEFAULT_MTU - iface_id = iface_id if iface_id is not None else self.next_iface_id() - ifname = ifname if ifname is not None else f"eth{iface_id}" - sessionid = self.session.short_session_id() - try: - suffix = f"{self.id:x}.{iface_id}.{sessionid}" - except TypeError: - suffix = f"{self.id}.{iface_id}.{sessionid}" - localname = f"veth{suffix}" - name = f"{localname}p" - veth = Veth(self.session, name, localname, mtu, self.server, self) - veth.adopt_node(iface_id, ifname, self.up) - return iface_id - - def newtuntap(self, iface_id: int = None, ifname: str = None) -> int: - """ - Create a new tunnel tap. - - :param iface_id: interface id - :param ifname: interface name - :return: interface index - """ - with self.lock: - iface_id = iface_id if iface_id is not None else self.next_iface_id() - ifname = ifname if ifname is not None else f"eth{iface_id}" - sessionid = self.session.short_session_id() - localname = f"tap{self.id}.{iface_id}.{sessionid}" - name = ifname - tuntap = TunTap(self.session, name, localname, node=self) - if self.up: - tuntap.startup() - try: - self.add_iface(tuntap, iface_id) - except CoreError as e: - tuntap.shutdown() - raise e - return iface_id - - def set_mac(self, iface_id: int, mac: str) -> None: - """ - Set hardware address for an interface. - - :param iface_id: id of interface to set hardware address for - :param mac: mac address to set - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - iface = self.get_iface(iface_id) - iface.set_mac(mac) - if self.up: - self.node_net_client.device_mac(iface.name, str(iface.mac)) - - def add_ip(self, iface_id: int, ip: str) -> None: - """ - Add an ip address to an interface in the format "10.0.0.1/24". - - :param iface_id: id of interface to add address to - :param ip: address to add to interface - :return: nothing - :raises CoreError: when ip address provided is invalid - :raises CoreCommandError: when a non-zero exit status occurs - """ - iface = self.get_iface(iface_id) - iface.add_ip(ip) - if self.up: - # ipv4 check - broadcast = None - if netaddr.valid_ipv4(ip): - broadcast = "+" - self.node_net_client.create_address(iface.name, ip, broadcast) - - def remove_ip(self, iface_id: int, ip: str) -> None: - """ - Remove an ip address from an interface in the format "10.0.0.1/24". - - :param iface_id: id of interface to delete address from - :param ip: ip address to remove from interface - :return: nothing - :raises CoreError: when ip address provided is invalid - :raises CoreCommandError: when a non-zero exit status occurs - """ - iface = self.get_iface(iface_id) - iface.remove_ip(ip) - if self.up: - self.node_net_client.delete_address(iface.name, ip) - - def ifup(self, iface_id: int) -> None: - """ - Bring an interface up. - - :param iface_id: index of interface to bring up - :return: nothing - """ - if self.up: - iface = self.get_iface(iface_id) - self.node_net_client.device_up(iface.name) - - def new_iface( - self, net: "CoreNetworkBase", iface_data: InterfaceData - ) -> CoreInterface: - """ - Create a new network interface. - - :param net: network to associate with - :param iface_data: interface data for new interface - :return: interface index - """ - with self.lock: - if net.has_custom_iface: - return net.custom_iface(self, iface_data) - else: - iface_id = iface_data.id - if iface_id is not None and iface_id in self.ifaces: - raise CoreError( - f"node({self.name}) already has interface({iface_id})" - ) - iface_id = self.newveth(iface_id, iface_data.name, iface_data.mtu) - self.attachnet(iface_id, net) - if iface_data.mac: - self.set_mac(iface_id, iface_data.mac) - for ip in iface_data.get_ips(): - self.add_ip(iface_id, ip) - self.ifup(iface_id) - return self.get_iface(iface_id) - def _find_parent_path(self, path: Path) -> Optional[Path]: """ Check if there is a mounted parent directory created for this node. @@ -910,21 +857,62 @@ class CoreNode(CoreNodeBase): if mode is not None: self.host_cmd(f"chmod {mode:o} {host_path}") + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + """ + Adopt interface to the network namespace of the node and setting + the proper name provided. + + :param iface: interface to adopt + :param name: proper name for interface + :return: nothing + """ + # TODO: container, checksums off (container only?) + # TODO: container, get flow id (container only?) + # validate iface belongs to node and get id + iface_id = self.get_iface_id(iface) + if iface_id == -1: + raise CoreError(f"adopting unknown iface({iface.name})") + # add iface to container namespace + self.net_client.device_ns(iface.name, str(self.pid)) + # use default iface name for container, if a unique name was not provided + if iface.name == name: + name = f"eth{iface_id}" + self.node_net_client.device_name(iface.name, name) + iface.name = name + # turn checksums off + self.node_net_client.checksums_off(iface.name) + # retrieve flow id for container + iface.flow_id = self.node_net_client.get_ifindex(iface.name) + logger.debug("interface flow index: %s - %s", iface.name, iface.flow_id) + # set mac address + if iface.mac: + self.node_net_client.device_mac(iface.name, str(iface.mac)) + logger.debug("interface mac: %s - %s", iface.name, iface.mac) + # set all addresses + for ip in iface.ips(): + # ipv4 check + broadcast = None + if netaddr.valid_ipv4(ip): + broadcast = "+" + self.node_net_client.create_address(iface.name, str(ip), broadcast) + # configure iface options + iface.set_config() + # set iface up + self.node_net_client.device_up(iface.name) + class CoreNetworkBase(NodeBase): """ Base class for networks """ - linktype: LinkTypes = LinkTypes.WIRED - has_custom_iface: bool = False - def __init__( self, session: "Session", _id: int, name: str, server: "DistributedServer" = None, + options: NodeOptions = None, ) -> None: """ Create a CoreNetworkBase instance. @@ -934,64 +922,15 @@ class CoreNetworkBase(NodeBase): :param name: object name :param server: remote server node will run on, default is None for localhost + :param options: options to create node with """ - super().__init__(session, _id, name, server) - self.mtu: int = DEFAULT_MTU + super().__init__(session, _id, name, server, options) + mtu = self.session.options.get_int("mtu") + self.mtu: int = mtu if mtu > 0 else DEFAULT_MTU self.brname: Optional[str] = None self.linked: Dict[CoreInterface, Dict[CoreInterface, bool]] = {} self.linked_lock: threading.Lock = threading.Lock() - @abc.abstractmethod - def startup(self) -> None: - """ - Each object implements its own startup method. - - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def shutdown(self) -> None: - """ - Each object implements its own shutdown method. - - :return: nothing - """ - raise NotImplementedError - - @abc.abstractmethod - def linknet(self, net: "CoreNetworkBase") -> CoreInterface: - """ - Link network to another. - - :param net: network to link with - :return: created interface - """ - raise NotImplementedError - - @abc.abstractmethod - def custom_iface(self, node: CoreNode, iface_data: InterfaceData) -> CoreInterface: - """ - Defines custom logic for creating an interface, if required. - - :param node: node to create interface for - :param iface_data: data for creating interface - :return: created interface - """ - raise NotImplementedError - - def get_linked_iface(self, net: "CoreNetworkBase") -> Optional[CoreInterface]: - """ - Return the interface that links this net with another net. - - :param net: interface to get link for - :return: interface the provided network is linked to - """ - for iface in self.get_ifaces(): - if iface.othernet == net: - return iface - return None - def attach(self, iface: CoreInterface) -> None: """ Attach network interface. @@ -999,9 +938,10 @@ class CoreNetworkBase(NodeBase): :param iface: network interface to attach :return: nothing """ - i = self.next_iface_id() - self.ifaces[i] = iface - iface.net_id = i + iface_id = self.next_iface_id() + self.ifaces[iface_id] = iface + iface.net = self + iface.net_id = iface_id with self.linked_lock: self.linked[iface] = {} @@ -1013,118 +953,7 @@ class CoreNetworkBase(NodeBase): :return: nothing """ del self.ifaces[iface.net_id] + iface.net = None iface.net_id = None with self.linked_lock: del self.linked[iface] - - def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]: - """ - Build link data objects for this network. Each link object describes a link - between this network and a node. - - :param flags: message type - :return: list of link data - """ - all_links = [] - # build a link message from this network node to each node having a - # connected interface - for iface in self.get_ifaces(): - unidirectional = 0 - linked_node = iface.node - if linked_node is None: - # two layer-2 switches/hubs linked together - if not iface.othernet: - continue - linked_node = iface.othernet - if linked_node.id == self.id: - continue - if iface.local_options != iface.options: - unidirectional = 1 - iface_data = iface.get_data() - link_data = LinkData( - message_type=flags, - type=self.linktype, - node1_id=self.id, - node2_id=linked_node.id, - iface2=iface_data, - options=iface.local_options, - ) - link_data.options.unidirectional = unidirectional - all_links.append(link_data) - if unidirectional: - link_data = LinkData( - message_type=MessageFlags.NONE, - type=self.linktype, - node1_id=linked_node.id, - node2_id=self.id, - options=iface.options, - ) - link_data.options.unidirectional = unidirectional - all_links.append(link_data) - return all_links - - -class Position: - """ - Helper class for Cartesian coordinate position - """ - - def __init__(self, x: float = None, y: float = None, z: float = None) -> None: - """ - Creates a Position instance. - - :param x: x position - :param y: y position - :param z: z position - """ - self.x: float = x - self.y: float = y - self.z: float = z - self.lon: Optional[float] = None - self.lat: Optional[float] = None - self.alt: Optional[float] = None - - def set(self, x: float = None, y: float = None, z: float = None) -> bool: - """ - Returns True if the position has actually changed. - - :param x: x position - :param y: y position - :param z: z position - :return: True if position changed, False otherwise - """ - if self.x == x and self.y == y and self.z == z: - return False - self.x = x - self.y = y - self.z = z - return True - - def get(self) -> Tuple[float, float, float]: - """ - Retrieve x,y,z position. - - :return: x,y,z position tuple - """ - return self.x, self.y, self.z - - def set_geo(self, lon: float, lat: float, alt: float) -> None: - """ - Set geo position lon, lat, alt. - - :param lon: longitude value - :param lat: latitude value - :param alt: altitude value - :return: nothing - """ - self.lon = lon - self.lat = lat - self.alt = alt - - def get_geo(self) -> Tuple[float, float, float]: - """ - Retrieve current geo position lon, lat, alt. - - :return: lon, lat, alt position tuple - """ - return self.lon, self.lat, self.alt diff --git a/daemon/core/nodes/docker.py b/daemon/core/nodes/docker.py index d5e928de..45d2b892 100644 --- a/daemon/core/nodes/docker.py +++ b/daemon/core/nodes/docker.py @@ -1,112 +1,114 @@ import json import logging +import shlex +from dataclasses import dataclass, field from pathlib import Path from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING, Callable, Dict, Optional +from typing import TYPE_CHECKING, Dict, List, Tuple -from core import utils from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import NodeTypes -from core.errors import CoreCommandError -from core.nodes.base import CoreNode -from core.nodes.netclient import LinuxNetClient, get_net_client +from core.errors import CoreCommandError, CoreError +from core.executables import BASH +from core.nodes.base import CoreNode, CoreNodeOptions logger = logging.getLogger(__name__) if TYPE_CHECKING: from core.emulator.session import Session +DOCKER: str = "docker" -class DockerClient: - def __init__(self, name: str, image: str, run: Callable[..., str]) -> None: - self.name: str = name - self.image: str = image - self.run: Callable[..., str] = run - self.pid: Optional[str] = None - def create_container(self) -> str: - self.run( - f"docker run -td --init --net=none --hostname {self.name} " - f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " - f"--privileged {self.image} /bin/bash" - ) - self.pid = self.get_pid() - return self.pid +@dataclass +class DockerOptions(CoreNodeOptions): + image: str = "ubuntu" + """image used when creating container""" + binds: List[Tuple[str, str]] = field(default_factory=list) + """bind mount source and destinations to setup within container""" + volumes: List[Tuple[str, str, bool, bool]] = field(default_factory=list) + """ + volume mount source, destination, unique, delete to setup within container - def get_info(self) -> Dict: - args = f"docker inspect {self.name}" - output = self.run(args) - data = json.loads(output) - if not data: - raise CoreCommandError(1, args, f"docker({self.name}) not present") - return data[0] + unique is True for node unique volume naming + delete is True for deleting volume mount during shutdown + """ - def is_alive(self) -> bool: - try: - data = self.get_info() - return data["State"]["Running"] - except CoreCommandError: - return False - def stop_container(self) -> None: - self.run(f"docker rm -f {self.name}") - - def check_cmd(self, cmd: str, wait: bool = True, shell: bool = False) -> str: - logger.info("docker cmd output: %s", cmd) - return utils.cmd(f"docker exec {self.name} {cmd}", wait=wait, shell=shell) - - def create_ns_cmd(self, cmd: str) -> str: - return f"nsenter -t {self.pid} -a {cmd}" - - def get_pid(self) -> str: - args = f"docker inspect -f '{{{{.State.Pid}}}}' {self.name}" - output = self.run(args) - self.pid = output - logger.debug("node(%s) pid: %s", self.name, self.pid) - return output - - def copy_file(self, src_path: Path, dst_path: Path) -> str: - args = f"docker cp {src_path} {self.name}:{dst_path}" - return self.run(args) +@dataclass +class DockerVolume: + src: str + """volume mount name""" + dst: str + """volume mount destination directory""" + unique: bool = True + """True to create a node unique prefixed name for this volume""" + delete: bool = True + """True to delete the volume during shutdown""" + path: str = None + """path to the volume on the host""" class DockerNode(CoreNode): - apitype = NodeTypes.DOCKER + """ + Provides logic for creating a Docker based node. + """ def __init__( self, session: "Session", _id: int = None, name: str = None, - directory: str = None, server: DistributedServer = None, - image: str = None, + options: DockerOptions = None, ) -> None: """ Create a DockerNode instance. :param session: core session instance - :param _id: object id - :param name: object name - :param directory: node directory + :param _id: node id + :param name: node name :param server: remote server node will run on, default is None for localhost - :param image: image to start container with + :param options: options for creating node """ - if image is None: - image = "ubuntu" - self.image: str = image - super().__init__(session, _id, name, directory, server) + options = options or DockerOptions() + super().__init__(session, _id, name, server, options) + self.image: str = options.image + self.binds: List[Tuple[str, str]] = options.binds + self.volumes: Dict[str, DockerVolume] = {} + for src, dst, unique, delete in options.volumes: + src_name = self._unique_name(src) if unique else src + self.volumes[src] = DockerVolume(src_name, dst, unique, delete) - def create_node_net_client(self, use_ovs: bool) -> LinuxNetClient: + @classmethod + def create_options(cls) -> DockerOptions: """ - Create node network client for running network commands within the nodes - container. + Return default creation options, which can be used during node creation. - :param use_ovs: True for OVS bridges, False for Linux bridges - :return:node network client + :return: docker options """ - return get_net_client(use_ovs, self.nsenter_cmd) + return DockerOptions() + + def create_cmd(self, args: str, shell: bool = False) -> str: + """ + Create command used to run commands within the context of a node. + + :param args: command arguments + :param shell: True to run shell like, False otherwise + :return: node command + """ + if shell: + args = f"{BASH} -c {shlex.quote(args)}" + return f"nsenter -t {self.pid} -m -u -i -p -n {args}" + + def _unique_name(self, name: str) -> str: + """ + Creates a session/node unique prefixed name for the provided input. + + :param name: name to make unique + :return: unique session/node prefixed name + """ + return f"{self.session.id}.{self.id}.{name}" def alive(self) -> bool: """ @@ -114,22 +116,52 @@ class DockerNode(CoreNode): :return: True if node is alive, False otherwise """ - return self.client.is_alive() + try: + running = self.host_cmd( + f"{DOCKER} inspect -f '{{{{.State.Running}}}}' {self.name}" + ) + return json.loads(running) + except CoreCommandError: + return False def startup(self) -> None: """ - Start a new namespace node by invoking the vnoded process that - allocates a new namespace. Bring up the loopback device and set - the hostname. + Create a docker container instance for the specified image. :return: nothing """ with self.lock: if self.up: - raise ValueError("starting a node that is already up") + raise CoreError(f"starting node({self.name}) that is already up") self.makenodedir() - self.client = DockerClient(self.name, self.image, self.host_cmd) - self.pid = self.client.create_container() + binds = "" + for src, dst in self.binds: + binds += f"--mount type=bind,source={src},target={dst} " + volumes = "" + for volume in self.volumes.values(): + volumes += ( + f"--mount type=volume," f"source={volume.src},target={volume.dst} " + ) + hostname = self.name.replace("_", "-") + self.host_cmd( + f"{DOCKER} run -td --init --net=none --hostname {hostname} " + f"--name {self.name} --sysctl net.ipv6.conf.all.disable_ipv6=0 " + f"{binds} {volumes} " + f"--privileged {self.image} tail -f /dev/null" + ) + self.pid = self.host_cmd( + f"{DOCKER} inspect -f '{{{{.State.Pid}}}}' {self.name}" + ) + for src, dst in self.binds: + link_path = self.host_path(Path(dst), True) + self.host_cmd(f"ln -s {src} {link_path}") + for volume in self.volumes.values(): + volume.path = self.host_cmd( + f"{DOCKER} volume inspect -f '{{{{.Mountpoint}}}}' {volume.src}" + ) + link_path = self.host_path(Path(volume.dst), True) + self.host_cmd(f"ln -s {volume.path} {link_path}") + logger.debug("node(%s) pid: %s", self.name, self.pid) self.up = True def shutdown(self) -> None: @@ -141,20 +173,14 @@ class DockerNode(CoreNode): # nothing to do if node is not up if not self.up: return - with self.lock: self.ifaces.clear() - self.client.stop_container() + self.host_cmd(f"{DOCKER} rm -f {self.name}") + for volume in self.volumes.values(): + if volume.delete: + self.host_cmd(f"{DOCKER} volume rm {volume.src}") self.up = False - def nsenter_cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - if self.server is None: - args = self.client.create_ns_cmd(args) - return utils.cmd(args, wait=wait, shell=shell) - else: - args = self.client.create_ns_cmd(args) - return self.server.remote_cmd(args, wait=wait) - def termcmdstring(self, sh: str = "/bin/sh") -> str: """ Create a terminal command string. @@ -162,7 +188,11 @@ class DockerNode(CoreNode): :param sh: shell to execute command in :return: str """ - return f"docker exec -it {self.name} bash" + terminal = f"{DOCKER} exec -it {self.name} {sh}" + if self.server is None: + return terminal + else: + return f"ssh -X -f {self.server.host} xterm -e {terminal}" def create_dir(self, dir_path: Path) -> None: """ @@ -172,8 +202,7 @@ class DockerNode(CoreNode): :return: nothing """ logger.debug("creating node dir: %s", dir_path) - args = f"mkdir -p {dir_path}" - self.cmd(args) + self.cmd(f"mkdir -p {dir_path}") def mount(self, src_path: str, target_path: str) -> None: """ @@ -206,7 +235,7 @@ class DockerNode(CoreNode): self.cmd(f"mkdir -m {0o755:o} -p {directory}") if self.server is not None: self.server.remote_put(temp_path, temp_path) - self.client.copy_file(temp_path, file_path) + self.host_cmd(f"{DOCKER} cp {temp_path} {self.name}:{file_path}") self.cmd(f"chmod {mode:o} {file_path}") if self.server is not None: self.host_cmd(f"rm -f {temp_path}") @@ -231,6 +260,6 @@ class DockerNode(CoreNode): temp_path = Path(temp.name) src_path = temp_path self.server.remote_put(src_path, temp_path) - self.client.copy_file(src_path, dst_path) + self.host_cmd(f"{DOCKER} cp {src_path} {self.name}:{dst_path}") if mode is not None: self.cmd(f"chmod {mode:o} {dst_path}") diff --git a/daemon/core/nodes/interface.py b/daemon/core/nodes/interface.py index 70eb679f..bb90653f 100644 --- a/daemon/core/nodes/interface.py +++ b/daemon/core/nodes/interface.py @@ -4,7 +4,6 @@ virtual ethernet classes that implement the interfaces available under Linux. import logging import math -import time from pathlib import Path from typing import TYPE_CHECKING, Callable, Dict, List, Optional @@ -20,11 +19,12 @@ from core.nodes.netclient import LinuxNetClient, get_net_client logger = logging.getLogger(__name__) if TYPE_CHECKING: - from core.emulator.distributed import DistributedServer from core.emulator.session import Session - from core.nodes.base import CoreNetworkBase, CoreNode + from core.emulator.distributed import DistributedServer + from core.nodes.base import CoreNetworkBase, CoreNode, NodeBase DEFAULT_MTU: int = 1500 +IFACE_NAME_LENGTH: int = 15 def tc_clear_cmd(name: str) -> str: @@ -78,35 +78,42 @@ class CoreInterface: def __init__( self, - session: "Session", + _id: int, name: str, localname: str, + use_ovs: bool, mtu: int = DEFAULT_MTU, + node: "NodeBase" = None, server: "DistributedServer" = None, - node: "CoreNode" = None, ) -> None: """ Creates a CoreInterface instance. - :param session: core session instance + :param _id: interface id for associated node :param name: interface name :param localname: interface local name + :param use_ovs: True to use ovs, False otherwise :param mtu: mtu value + :param node: node associated with this interface :param server: remote server node will run on, default is None for localhost - :param node: node for interface """ - if len(name) >= 16: - raise CoreError(f"interface name ({name}) too long, max 16") - if len(localname) >= 16: - raise CoreError(f"interface local name ({localname}) too long, max 16") - self.session: "Session" = session - self.node: Optional["CoreNode"] = node + if len(name) >= IFACE_NAME_LENGTH: + raise CoreError( + f"interface name ({name}) too long, max {IFACE_NAME_LENGTH}" + ) + if len(localname) >= IFACE_NAME_LENGTH: + raise CoreError( + f"interface local name ({localname}) too long, max {IFACE_NAME_LENGTH}" + ) + self.id: int = _id + self.node: Optional["NodeBase"] = node + # id of interface for network, used by wlan/emane + self.net_id: Optional[int] = None self.name: str = name self.localname: str = localname self.up: bool = False self.mtu: int = mtu self.net: Optional[CoreNetworkBase] = None - self.othernet: Optional[CoreNetworkBase] = None self.ip4s: List[netaddr.IPNetwork] = [] self.ip6s: List[netaddr.IPNetwork] = [] self.mac: Optional[netaddr.EUI] = None @@ -114,20 +121,12 @@ class CoreInterface: self.poshook: Callable[[CoreInterface], None] = lambda x: None # used with EMANE self.transport_type: TransportType = TransportType.VIRTUAL - # id of interface for node - self.node_id: Optional[int] = None - # id of interface for network - self.net_id: Optional[int] = None # id used to find flow data self.flow_id: Optional[int] = None self.server: Optional["DistributedServer"] = server - self.net_client: LinuxNetClient = get_net_client( - self.session.use_ovs(), self.host_cmd - ) + self.net_client: LinuxNetClient = get_net_client(use_ovs, self.host_cmd) self.control: bool = False # configuration data - self.has_local_netem: bool = False - self.local_options: LinkOptions = LinkOptions() self.has_netem: bool = False self.options: LinkOptions = LinkOptions() @@ -161,7 +160,13 @@ class CoreInterface: :return: nothing """ - pass + self.net_client.create_veth(self.localname, self.name) + if self.mtu > 0: + self.net_client.set_mtu(self.name, self.mtu) + self.net_client.set_mtu(self.localname, self.mtu) + self.net_client.device_up(self.name) + self.net_client.device_up(self.localname) + self.up = True def shutdown(self) -> None: """ @@ -169,29 +174,14 @@ class CoreInterface: :return: nothing """ - pass - - def attachnet(self, net: "CoreNetworkBase") -> None: - """ - Attach network. - - :param net: network to attach - :return: nothing - """ - if self.net: - self.detachnet() - self.net = None - net.attach(self) - self.net = net - - def detachnet(self) -> None: - """ - Detach from a network. - - :return: nothing - """ - if self.net is not None: - self.net.detach(self) + if not self.up: + return + if self.localname: + try: + self.net_client.delete_device(self.localname) + except CoreCommandError: + pass + self.up = False def add_ip(self, ip: str) -> None: """ @@ -303,41 +293,24 @@ class CoreInterface: """ return self.transport_type == TransportType.VIRTUAL - def config(self, options: LinkOptions, use_local: bool = True) -> None: - """ - Configure interface using tc based on existing state and provided - link options. - - :param options: options to configure with - :param use_local: True to use localname for device, False for name - :return: nothing - """ - # determine name, options, and if anything has changed - name = self.localname if use_local else self.name - current_options = self.local_options if use_local else self.options - changed = current_options.update(options) - # nothing more to do when nothing has changed or not up - if not changed or not self.up: - return + def set_config(self) -> None: # clear current settings - if current_options.is_clear(): - clear_local_netem = use_local and self.has_local_netem - clear_netem = not use_local and self.has_netem - if clear_local_netem or clear_netem: - cmd = tc_clear_cmd(name) - self.host_cmd(cmd) - if use_local: - self.has_local_netem = False + if self.options.is_clear(): + if self.has_netem: + cmd = tc_clear_cmd(self.name) + if self.node: + self.node.cmd(cmd) else: - self.has_netem = False + self.host_cmd(cmd) + self.has_netem = False # set updated settings else: - cmd = tc_cmd(name, current_options, self.mtu) - self.host_cmd(cmd) - if use_local: - self.has_local_netem = True + cmd = tc_cmd(self.name, self.options, self.mtu) + if self.node: + self.node.cmd(cmd) else: - self.has_netem = True + self.host_cmd(cmd) + self.has_netem = True def get_data(self) -> InterfaceData: """ @@ -345,231 +318,22 @@ class CoreInterface: :return: interface data """ - if self.node: - iface_id = self.node.get_iface_id(self) - else: - iface_id = self.othernet.get_iface_id(self) - data = InterfaceData( - id=iface_id, name=self.name, mac=str(self.mac) if self.mac else None - ) ip4 = self.get_ip4() - if ip4: - data.ip4 = str(ip4.ip) - data.ip4_mask = ip4.prefixlen + ip4_addr = str(ip4.ip) if ip4 else None + ip4_mask = ip4.prefixlen if ip4 else None ip6 = self.get_ip6() - if ip6: - data.ip6 = str(ip6.ip) - data.ip6_mask = ip6.prefixlen - return data - - -class Veth(CoreInterface): - """ - Provides virtual ethernet functionality for core nodes. - """ - - def adopt_node(self, iface_id: int, name: str, start: bool) -> None: - """ - Adopt this interface to the provided node, configuring and associating - with the node as needed. - - :param iface_id: interface id for node - :param name: name of interface fo rnode - :param start: True to start interface, False otherwise - :return: nothing - """ - if start: - self.startup() - self.net_client.device_ns(self.name, str(self.node.pid)) - self.node.node_net_client.checksums_off(self.name) - self.flow_id = self.node.node_net_client.get_ifindex(self.name) - logger.debug("interface flow index: %s - %s", self.name, self.flow_id) - mac = self.node.node_net_client.get_mac(self.name) - logger.debug("interface mac: %s - %s", self.name, mac) - self.set_mac(mac) - self.node.node_net_client.device_name(self.name, name) - self.name = name - try: - self.node.add_iface(self, iface_id) - except CoreError as e: - self.shutdown() - raise e - - def startup(self) -> None: - """ - Interface startup logic. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - self.net_client.create_veth(self.localname, self.name) - if self.mtu > 0: - self.net_client.set_mtu(self.name, self.mtu) - self.net_client.set_mtu(self.localname, self.mtu) - self.net_client.device_up(self.localname) - self.up = True - - def shutdown(self) -> None: - """ - Interface shutdown logic. - - :return: nothing - """ - if not self.up: - return - if self.node: - try: - self.node.node_net_client.device_flush(self.name) - except CoreCommandError: - pass - if self.localname: - try: - self.net_client.delete_device(self.localname) - except CoreCommandError: - pass - self.up = False - - -class TunTap(CoreInterface): - """ - TUN/TAP virtual device in TAP mode - """ - - def startup(self) -> None: - """ - Startup logic for a tunnel tap. - - :return: nothing - """ - # TODO: more sophisticated TAP creation here - # Debian does not support -p (tap) option, RedHat does. - # For now, this is disabled to allow the TAP to be created by another - # system (e.g. EMANE"s emanetransportd) - # check_call(["tunctl", "-t", self.name]) - # self.install() - self.up = True - - def shutdown(self) -> None: - """ - Shutdown functionality for a tunnel tap. - - :return: nothing - """ - if not self.up: - return - try: - self.node.node_net_client.device_flush(self.name) - except CoreCommandError: - logger.exception("error shutting down tunnel tap") - self.up = False - - def waitfor( - self, func: Callable[[], int], attempts: int = 10, maxretrydelay: float = 0.25 - ) -> bool: - """ - Wait for func() to return zero with exponential backoff. - - :param func: function to wait for a result of zero - :param attempts: number of attempts to wait for a zero result - :param maxretrydelay: maximum retry delay - :return: True if wait succeeded, False otherwise - """ - delay = 0.01 - result = False - for i in range(1, attempts + 1): - r = func() - if r == 0: - result = True - break - msg = f"attempt {i} failed with nonzero exit status {r}" - if i < attempts + 1: - msg += ", retrying..." - logger.info(msg) - time.sleep(delay) - delay += delay - if delay > maxretrydelay: - delay = maxretrydelay - else: - msg += ", giving up" - logger.info(msg) - - return result - - def waitfordevicelocal(self) -> None: - """ - Check for presence of a local device - tap device may not - appear right away waits - - :return: wait for device local response - """ - logger.debug("waiting for device local: %s", self.localname) - - def localdevexists(): - try: - self.net_client.device_show(self.localname) - return 0 - except CoreCommandError: - return 1 - - self.waitfor(localdevexists) - - def waitfordevicenode(self) -> None: - """ - Check for presence of a node device - tap device may not appear right away waits. - - :return: nothing - """ - logger.debug("waiting for device node: %s", self.name) - - def nodedevexists(): - try: - self.node.node_net_client.device_show(self.name) - return 0 - except CoreCommandError: - return 1 - - count = 0 - while True: - result = self.waitfor(nodedevexists) - if result: - break - - # TODO: emane specific code - # check if this is an EMANE interface; if so, continue - # waiting if EMANE is still running - should_retry = count < 5 - is_emane = self.session.emane.is_emane_net(self.net) - is_emane_running = self.session.emane.emanerunning(self.node) - if all([should_retry, is_emane, is_emane_running]): - count += 1 - else: - raise RuntimeError("node device failed to exist") - - def install(self) -> None: - """ - Install this TAP into its namespace. This is not done from the - startup() method but called at a later time when a userspace - program (running on the host) has had a chance to open the socket - end of the TAP. - - :return: nothing - :raises CoreCommandError: when there is a command exception - """ - self.waitfordevicelocal() - netns = str(self.node.pid) - self.net_client.device_ns(self.localname, netns) - self.node.node_net_client.device_name(self.localname, self.name) - self.node.node_net_client.device_up(self.name) - - def set_ips(self) -> None: - """ - Set interface ip addresses. - - :return: nothing - """ - self.waitfordevicenode() - for ip in self.ips(): - self.node.node_net_client.create_address(self.name, str(ip)) + ip6_addr = str(ip6.ip) if ip6 else None + ip6_mask = ip6.prefixlen if ip6 else None + mac = str(self.mac) if self.mac else None + return InterfaceData( + id=self.id, + name=self.name, + mac=mac, + ip4=ip4_addr, + ip4_mask=ip4_mask, + ip6=ip6_addr, + ip6_mask=ip6_mask, + ) class GreTap(CoreInterface): @@ -594,7 +358,7 @@ class GreTap(CoreInterface): """ Creates a GreTap instance. - :param session: core session instance + :param session: session for this gre tap :param remoteip: remote address :param key: gre tap key :param node: related core node @@ -612,7 +376,7 @@ class GreTap(CoreInterface): sessionid = session.short_session_id() localname = f"gt.{self.id}.{sessionid}" name = f"{localname}p" - super().__init__(session, name, localname, mtu, server, node) + super().__init__(0, name, localname, session.use_ovs(), mtu, node, server) self.transport_type: TransportType = TransportType.RAW self.remote_ip: str = remoteip self.ttl: int = ttl diff --git a/daemon/core/nodes/lxd.py b/daemon/core/nodes/lxd.py index 54fc8341..01bd2db7 100644 --- a/daemon/core/nodes/lxd.py +++ b/daemon/core/nodes/lxd.py @@ -1,15 +1,17 @@ import json import logging +import shlex import time +from dataclasses import dataclass, field from pathlib import Path from tempfile import NamedTemporaryFile -from typing import TYPE_CHECKING, Callable, Dict, Optional +from typing import TYPE_CHECKING, Dict, List, Tuple -from core import utils +from core.emulator.data import InterfaceData, LinkOptions from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import NodeTypes from core.errors import CoreCommandError -from core.nodes.base import CoreNode +from core.executables import BASH +from core.nodes.base import CoreNode, CoreNodeOptions from core.nodes.interface import CoreInterface logger = logging.getLogger(__name__) @@ -18,65 +20,29 @@ if TYPE_CHECKING: from core.emulator.session import Session -class LxdClient: - def __init__(self, name: str, image: str, run: Callable[..., str]) -> None: - self.name: str = name - self.image: str = image - self.run: Callable[..., str] = run - self.pid: Optional[int] = None +@dataclass +class LxcOptions(CoreNodeOptions): + image: str = "ubuntu" + """image used when creating container""" + binds: List[Tuple[str, str]] = field(default_factory=list) + """bind mount source and destinations to setup within container""" + volumes: List[Tuple[str, str, bool, bool]] = field(default_factory=list) + """ + volume mount source, destination, unique, delete to setup within container - def create_container(self) -> int: - self.run(f"lxc launch {self.image} {self.name}") - data = self.get_info() - self.pid = data["state"]["pid"] - return self.pid - - def get_info(self) -> Dict: - args = f"lxc list {self.name} --format json" - output = self.run(args) - data = json.loads(output) - if not data: - raise CoreCommandError(1, args, f"LXC({self.name}) not present") - return data[0] - - def is_alive(self) -> bool: - try: - data = self.get_info() - return data["state"]["status"] == "Running" - except CoreCommandError: - return False - - def stop_container(self) -> None: - self.run(f"lxc delete --force {self.name}") - - def create_cmd(self, cmd: str) -> str: - return f"lxc exec -nT {self.name} -- {cmd}" - - def create_ns_cmd(self, cmd: str) -> str: - return f"nsenter -t {self.pid} -m -u -i -p -n {cmd}" - - def check_cmd(self, cmd: str, wait: bool = True, shell: bool = False) -> str: - args = self.create_cmd(cmd) - return utils.cmd(args, wait=wait, shell=shell) - - def copy_file(self, src_path: Path, dst_path: Path) -> None: - if not str(dst_path).startswith("/"): - dst_path = Path("/root/") / dst_path - args = f"lxc file push {src_path} {self.name}/{dst_path}" - self.run(args) + unique is True for node unique volume naming + delete is True for deleting volume mount during shutdown + """ class LxcNode(CoreNode): - apitype = NodeTypes.LXC - def __init__( self, session: "Session", _id: int = None, name: str = None, - directory: str = None, server: DistributedServer = None, - image: str = None, + options: LxcOptions = None, ) -> None: """ Create a LxcNode instance. @@ -84,15 +50,37 @@ class LxcNode(CoreNode): :param session: core session instance :param _id: object id :param name: object name - :param directory: node directory :param server: remote server node will run on, default is None for localhost - :param image: image to start container with + :param options: option to create node with """ - if image is None: - image = "ubuntu" - self.image: str = image - super().__init__(session, _id, name, directory, server) + options = options or LxcOptions() + super().__init__(session, _id, name, server, options) + self.image: str = options.image + + @classmethod + def create_options(cls) -> LxcOptions: + return LxcOptions() + + def create_cmd(self, args: str, shell: bool = False) -> str: + """ + Create command used to run commands within the context of a node. + + :param args: command arguments + :param shell: True to run shell like, False otherwise + :return: node command + """ + if shell: + args = f"{BASH} -c {shlex.quote(args)}" + return f"nsenter -t {self.pid} -m -u -i -p -n {args}" + + def _get_info(self) -> Dict: + args = f"lxc list {self.name} --format json" + output = self.host_cmd(args) + data = json.loads(output) + if not data: + raise CoreCommandError(1, args, f"LXC({self.name}) not present") + return data[0] def alive(self) -> bool: """ @@ -100,7 +88,11 @@ class LxcNode(CoreNode): :return: True if node is alive, False otherwise """ - return self.client.is_alive() + try: + data = self._get_info() + return data["state"]["status"] == "Running" + except CoreCommandError: + return False def startup(self) -> None: """ @@ -112,8 +104,9 @@ class LxcNode(CoreNode): if self.up: raise ValueError("starting a node that is already up") self.makenodedir() - self.client = LxdClient(self.name, self.image, self.host_cmd) - self.pid = self.client.create_container() + self.host_cmd(f"lxc launch {self.image} {self.name}") + data = self._get_info() + self.pid = data["state"]["pid"] self.up = True def shutdown(self) -> None: @@ -125,10 +118,9 @@ class LxcNode(CoreNode): # nothing to do if node is not up if not self.up: return - with self.lock: self.ifaces.clear() - self.client.stop_container() + self.host_cmd(f"lxc delete --force {self.name}") self.up = False def termcmdstring(self, sh: str = "/bin/sh") -> str: @@ -138,7 +130,11 @@ class LxcNode(CoreNode): :param sh: shell to execute command in :return: str """ - return f"lxc exec {self.name} -- {sh}" + terminal = f"lxc exec {self.name} -- {sh}" + if self.server is None: + return terminal + else: + return f"ssh -X -f {self.server.host} xterm -e {terminal}" def create_dir(self, dir_path: Path) -> None: """ @@ -182,7 +178,9 @@ class LxcNode(CoreNode): self.cmd(f"mkdir -m {0o755:o} -p {directory}") if self.server is not None: self.server.remote_put(temp_path, temp_path) - self.client.copy_file(temp_path, file_path) + if not str(file_path).startswith("/"): + file_path = Path("/root/") / file_path + self.host_cmd(f"lxc file push {temp_path} {self.name}/{file_path}") self.cmd(f"chmod {mode:o} {file_path}") if self.server is not None: self.host_cmd(f"rm -f {temp_path}") @@ -208,11 +206,16 @@ class LxcNode(CoreNode): temp_path = Path(temp.name) src_path = temp_path self.server.remote_put(src_path, temp_path) - self.client.copy_file(src_path, dst_path) + if not str(dst_path).startswith("/"): + dst_path = Path("/root/") / dst_path + self.host_cmd(f"lxc file push {src_path} {self.name}/{dst_path}") if mode is not None: self.cmd(f"chmod {mode:o} {dst_path}") - def add_iface(self, iface: CoreInterface, iface_id: int) -> None: - super().add_iface(iface, iface_id) + def create_iface( + self, iface_data: InterfaceData = None, options: LinkOptions = None + ) -> CoreInterface: + iface = super().create_iface(iface_data, options) # adding small delay to allow time for adding addresses to work correctly time.sleep(0.5) + return iface diff --git a/daemon/core/nodes/netclient.py b/daemon/core/nodes/netclient.py index 09cf94ec..e0a409f4 100644 --- a/daemon/core/nodes/netclient.py +++ b/daemon/core/nodes/netclient.py @@ -28,6 +28,7 @@ class LinuxNetClient: :param name: name for hostname :return: nothing """ + name = name.replace("_", "-") self.run(f"hostname {name}") def create_route(self, route: str, device: str) -> None: diff --git a/daemon/core/nodes/network.py b/daemon/core/nodes/network.py index 262d422c..2b52cbdf 100644 --- a/daemon/core/nodes/network.py +++ b/daemon/core/nodes/network.py @@ -4,26 +4,19 @@ Defines network nodes used within core. import logging import threading -from collections import OrderedDict +from dataclasses import dataclass from pathlib import Path -from queue import Queue from typing import TYPE_CHECKING, Dict, List, Optional, Type import netaddr from core import utils from core.emulator.data import InterfaceData, LinkData -from core.emulator.enumerations import ( - LinkTypes, - MessageFlags, - NetworkPolicy, - NodeTypes, - RegisterTlvs, -) +from core.emulator.enumerations import MessageFlags, NetworkPolicy, RegisterTlvs from core.errors import CoreCommandError, CoreError from core.executables import NFTABLES -from core.nodes.base import CoreNetworkBase, CoreNode -from core.nodes.interface import CoreInterface, GreTap, Veth +from core.nodes.base import CoreNetworkBase, NodeOptions +from core.nodes.interface import CoreInterface, GreTap from core.nodes.netclient import get_net_client logger = logging.getLogger(__name__) @@ -33,27 +26,9 @@ if TYPE_CHECKING: from core.emulator.session import Session from core.location.mobility import WirelessModel, WayPointMobility - WirelessModelType = Type[WirelessModel] - LEARNING_DISABLED: int = 0 -class SetQueue(Queue): - """ - Set backed queue to avoid duplicate submissions. - """ - - def _init(self, maxsize): - self.queue: OrderedDict = OrderedDict() - - def _put(self, item): - self.queue[item] = None - - def _get(self): - key, _ = self.queue.popitem(last=False) - return key - - class NftablesQueue: """ Helper class for queuing up nftables commands into rate-limited @@ -78,7 +53,7 @@ class NftablesQueue: # list of pending nftables commands self.cmds: List[str] = [] # list of WLANs requiring update - self.updates: SetQueue = SetQueue() + self.updates: utils.SetQueue = utils.SetQueue() def start(self) -> None: """ @@ -206,6 +181,12 @@ class NftablesQueue: nft_queue: NftablesQueue = NftablesQueue() +@dataclass +class NetworkOptions(NodeOptions): + policy: NetworkPolicy = None + """allows overriding the network policy, otherwise uses class defined default""" + + class CoreNetwork(CoreNetworkBase): """ Provides linux bridge network functionality for core nodes. @@ -219,28 +200,29 @@ class CoreNetwork(CoreNetworkBase): _id: int = None, name: str = None, server: "DistributedServer" = None, - policy: NetworkPolicy = None, + options: NetworkOptions = None, ) -> None: """ - Creates a LxBrNet instance. + Creates a CoreNetwork instance. :param session: core session instance :param _id: object id :param name: object name :param server: remote server node will run on, default is None for localhost - :param policy: network policy + :param options: options to create node with """ - super().__init__(session, _id, name, server) - if name is None: - name = str(self.id) - if policy is not None: - self.policy: NetworkPolicy = policy - self.name: Optional[str] = name + options = options or NetworkOptions() + super().__init__(session, _id, name, server, options) + self.policy: NetworkPolicy = options.policy if options.policy else self.policy sessionid = self.session.short_session_id() self.brname: str = f"b.{self.id}.{sessionid}" self.has_nftables_chain: bool = False + @classmethod + def create_options(cls) -> NetworkOptions: + return NetworkOptions() + def host_cmd( self, args: str, @@ -280,6 +262,17 @@ class CoreNetwork(CoreNetworkBase): self.up = True nft_queue.start() + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + """ + Adopt interface and set it to use this bridge as master. + + :param iface: interface to adpopt + :param name: formal name for interface + :return: nothing + """ + iface.net_client.set_iface_master(self.brname, iface.name) + iface.set_config() + def shutdown(self) -> None: """ Linux bridge shutdown logic. @@ -309,9 +302,9 @@ class CoreNetwork(CoreNetworkBase): :param iface: network interface to attach :return: nothing """ + super().attach(iface) if self.up: iface.net_client.set_iface_master(self.brname, iface.localname) - super().attach(iface) def detach(self, iface: CoreInterface) -> None: """ @@ -320,9 +313,9 @@ class CoreNetwork(CoreNetworkBase): :param iface: network interface to detach :return: nothing """ + super().detach(iface) if self.up: iface.net_client.delete_iface(self.brname, iface.localname) - super().detach(iface) def is_linked(self, iface1: CoreInterface, iface2: CoreInterface) -> bool: """ @@ -378,67 +371,6 @@ class CoreNetwork(CoreNetworkBase): self.linked[iface1][iface2] = True nft_queue.update(self) - def linknet(self, net: CoreNetworkBase) -> CoreInterface: - """ - Link this bridge with another by creating a veth pair and installing - each device into each bridge. - - :param net: network to link with - :return: created interface - """ - sessionid = self.session.short_session_id() - try: - _id = f"{self.id:x}" - except TypeError: - _id = str(self.id) - try: - net_id = f"{net.id:x}" - except TypeError: - net_id = str(net.id) - localname = f"veth{_id}.{net_id}.{sessionid}" - name = f"veth{net_id}.{_id}.{sessionid}" - iface = Veth(self.session, name, localname) - if self.up: - iface.startup() - self.attach(iface) - if net.up and net.brname: - iface.net_client.set_iface_master(net.brname, iface.name) - i = net.next_iface_id() - net.ifaces[i] = iface - with net.linked_lock: - net.linked[iface] = {} - iface.net = self - iface.othernet = net - return iface - - def get_linked_iface(self, net: CoreNetworkBase) -> Optional[CoreInterface]: - """ - Return the interface of that links this net with another net - (that were linked using linknet()). - - :param net: interface to get link for - :return: interface the provided network is linked to - """ - for iface in self.get_ifaces(): - if iface.othernet == net: - return iface - return None - - def add_ips(self, ips: List[str]) -> None: - """ - Add ip addresses on the bridge in the format "10.0.0.1/24". - - :param ips: ip address to add - :return: nothing - """ - if not self.up: - return - for ip in ips: - self.net_client.create_address(self.brname, ip) - - def custom_iface(self, node: CoreNode, iface_data: InterfaceData) -> CoreInterface: - raise CoreError(f"{type(self).__name__} does not support, custom interfaces") - class GreTapBridge(CoreNetwork): """ @@ -558,6 +490,20 @@ class GreTapBridge(CoreNetwork): self.add_ips(ips) +@dataclass +class CtrlNetOptions(NetworkOptions): + prefix: str = None + """ip4 network prefix to use for generating an address""" + updown_script: str = None + """script to execute during startup and shutdown""" + serverintf: str = None + """used to associate an interface with the control network bridge""" + assign_address: bool = True + """used to determine if a specific address should be assign using hostid""" + hostid: int = None + """used with assign address to """ + + class CtrlNet(CoreNetwork): """ Control network functionality. @@ -576,36 +522,32 @@ class CtrlNet(CoreNetwork): def __init__( self, session: "Session", - prefix: str, _id: int = None, name: str = None, - hostid: int = None, server: "DistributedServer" = None, - assign_address: bool = True, - updown_script: str = None, - serverintf: str = None, + options: CtrlNetOptions = None, ) -> None: """ Creates a CtrlNet instance. :param session: core session instance :param _id: node id - :param name: node namee - :param prefix: control network ipv4 prefix - :param hostid: host id + :param name: node name :param server: remote server node will run on, default is None for localhost - :param assign_address: assigned address - :param updown_script: updown script - :param serverintf: server interface - :return: + :param options: node options for creation """ - self.prefix: netaddr.IPNetwork = netaddr.IPNetwork(prefix).cidr - self.hostid: Optional[int] = hostid - self.assign_address: bool = assign_address - self.updown_script: Optional[str] = updown_script - self.serverintf: Optional[str] = serverintf - super().__init__(session, _id, name, server) + options = options or CtrlNetOptions() + super().__init__(session, _id, name, server, options) + self.prefix: netaddr.IPNetwork = netaddr.IPNetwork(options.prefix).cidr + self.hostid: Optional[int] = options.hostid + self.assign_address: bool = options.assign_address + self.updown_script: Optional[str] = options.updown_script + self.serverintf: Optional[str] = options.serverintf + + @classmethod + def create_options(cls) -> CtrlNetOptions: + return CtrlNetOptions() def add_addresses(self, index: int) -> None: """ @@ -686,15 +628,6 @@ class CtrlNet(CoreNetwork): super().shutdown() - def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]: - """ - Do not include CtrlNet in link messages describing this session. - - :param flags: message flags - :return: list of link data - """ - return [] - class PtpNet(CoreNetwork): """ @@ -714,59 +647,13 @@ class PtpNet(CoreNetwork): raise CoreError("ptp links support at most 2 network interfaces") super().attach(iface) - def links(self, flags: MessageFlags = MessageFlags.NONE) -> List[LinkData]: - """ - Build CORE API TLVs for a point-to-point link. One Link message - describes this network. - - :param flags: message flags - :return: list of link data - """ - all_links = [] - if len(self.ifaces) != 2: - return all_links - ifaces = self.get_ifaces() - iface1 = ifaces[0] - iface2 = ifaces[1] - unidirectional = 0 if iface1.local_options == iface2.local_options else 1 - iface1_data = iface1.get_data() - iface2_data = iface2.get_data() - link_data = LinkData( - message_type=flags, - type=self.linktype, - node1_id=iface1.node.id, - node2_id=iface2.node.id, - iface1=iface1_data, - iface2=iface2_data, - options=iface1.local_options, - ) - link_data.options.unidirectional = unidirectional - all_links.append(link_data) - # build a 2nd link message for the upstream link parameters - # (swap if1 and if2) - if unidirectional: - link_data = LinkData( - message_type=MessageFlags.NONE, - type=self.linktype, - node1_id=iface2.node.id, - node2_id=iface1.node.id, - iface1=InterfaceData(id=iface2_data.id), - iface2=InterfaceData(id=iface1_data.id), - options=iface2.local_options, - ) - link_data.options.unidirectional = unidirectional - all_links.append(link_data) - return all_links - class SwitchNode(CoreNetwork): """ Provides switch functionality within a core node. """ - apitype: NodeTypes = NodeTypes.SWITCH policy: NetworkPolicy = NetworkPolicy.ACCEPT - type: str = "lanswitch" class HubNode(CoreNetwork): @@ -775,9 +662,7 @@ class HubNode(CoreNetwork): ports by turning off MAC address learning. """ - apitype: NodeTypes = NodeTypes.HUB policy: NetworkPolicy = NetworkPolicy.ACCEPT - type: str = "hub" def startup(self) -> None: """ @@ -794,10 +679,7 @@ class WlanNode(CoreNetwork): Provides wireless lan functionality within a core node. """ - apitype: NodeTypes = NodeTypes.WIRELESS_LAN - linktype: LinkTypes = LinkTypes.WIRED policy: NetworkPolicy = NetworkPolicy.DROP - type: str = "wlan" def __init__( self, @@ -805,7 +687,7 @@ class WlanNode(CoreNetwork): _id: int = None, name: str = None, server: "DistributedServer" = None, - policy: NetworkPolicy = None, + options: NetworkOptions = None, ) -> None: """ Create a WlanNode instance. @@ -815,11 +697,11 @@ class WlanNode(CoreNetwork): :param name: node name :param server: remote server node will run on, default is None for localhost - :param policy: wlan policy + :param options: options to create node with """ - super().__init__(session, _id, name, server, policy) + super().__init__(session, _id, name, server, options) # wireless and mobility models (BasicRangeModel, Ns2WaypointMobility) - self.model: Optional[WirelessModel] = None + self.wireless_model: Optional[WirelessModel] = None self.mobility: Optional[WayPointMobility] = None def startup(self) -> None: @@ -839,27 +721,27 @@ class WlanNode(CoreNetwork): :return: nothing """ super().attach(iface) - if self.model: - iface.poshook = self.model.position_callback + if self.wireless_model: + iface.poshook = self.wireless_model.position_callback iface.setposition() - def setmodel(self, model: "WirelessModelType", config: Dict[str, str]): + def setmodel(self, wireless_model: Type["WirelessModel"], config: Dict[str, str]): """ Sets the mobility and wireless model. - :param model: wireless model to set to + :param wireless_model: wireless model to set to :param config: configuration for model being set :return: nothing """ - logger.debug("node(%s) setting model: %s", self.name, model.name) - if model.config_type == RegisterTlvs.WIRELESS: - self.model = model(session=self.session, _id=self.id) + logger.debug("node(%s) setting model: %s", self.name, wireless_model.name) + if wireless_model.config_type == RegisterTlvs.WIRELESS: + self.wireless_model = wireless_model(session=self.session, _id=self.id) for iface in self.get_ifaces(): - iface.poshook = self.model.position_callback + iface.poshook = self.wireless_model.position_callback iface.setposition() self.updatemodel(config) - elif model.config_type == RegisterTlvs.MOBILITY: - self.mobility = model(session=self.session, _id=self.id) + elif wireless_model.config_type == RegisterTlvs.MOBILITY: + self.mobility = wireless_model(session=self.session, _id=self.id) self.mobility.update_config(config) def update_mobility(self, config: Dict[str, str]) -> None: @@ -868,12 +750,12 @@ class WlanNode(CoreNetwork): self.mobility.update_config(config) def updatemodel(self, config: Dict[str, str]) -> None: - if not self.model: + if not self.wireless_model: raise CoreError(f"no model set to update for node({self.name})") logger.debug( - "node(%s) updating model(%s): %s", self.id, self.model.name, config + "node(%s) updating model(%s): %s", self.id, self.wireless_model.name, config ) - self.model.update_config(config) + self.wireless_model.update_config(config) for iface in self.get_ifaces(): iface.setposition() @@ -884,10 +766,10 @@ class WlanNode(CoreNetwork): :param flags: message flags :return: list of link data """ - links = super().links(flags) - if self.model: - links.extend(self.model.links(flags)) - return links + if self.wireless_model: + return self.wireless_model.links(flags) + else: + return [] class TunnelNode(GreTapBridge): @@ -895,6 +777,4 @@ class TunnelNode(GreTapBridge): Provides tunnel functionality in a core node. """ - apitype: NodeTypes = NodeTypes.TUNNEL policy: NetworkPolicy = NetworkPolicy.ACCEPT - type: str = "tunnel" diff --git a/daemon/core/nodes/physical.py b/daemon/core/nodes/physical.py index 0a686da8..8ab13f20 100644 --- a/daemon/core/nodes/physical.py +++ b/daemon/core/nodes/physical.py @@ -3,17 +3,18 @@ PhysicalNode class for including real systems in the emulated network. """ import logging -import threading from pathlib import Path from typing import TYPE_CHECKING, List, Optional, Tuple -from core.emulator.data import InterfaceData +import netaddr + +from core.emulator.data import InterfaceData, LinkOptions from core.emulator.distributed import DistributedServer -from core.emulator.enumerations import NodeTypes, TransportType +from core.emulator.enumerations import TransportType from core.errors import CoreCommandError, CoreError -from core.executables import MOUNT, TEST, UMOUNT -from core.nodes.base import CoreNetworkBase, CoreNodeBase -from core.nodes.interface import DEFAULT_MTU, CoreInterface +from core.executables import BASH, TEST, UMOUNT +from core.nodes.base import CoreNode, CoreNodeBase, CoreNodeOptions, NodeOptions +from core.nodes.interface import CoreInterface logger = logging.getLogger(__name__) @@ -21,201 +22,19 @@ if TYPE_CHECKING: from core.emulator.session import Session -class PhysicalNode(CoreNodeBase): - def __init__( - self, - session: "Session", - _id: int = None, - name: str = None, - directory: Path = None, - server: DistributedServer = None, - ) -> None: - super().__init__(session, _id, name, server) - if not self.server: - raise CoreError("physical nodes must be assigned to a remote server") - self.directory: Optional[Path] = directory - self.lock: threading.RLock = threading.RLock() - self._mounts: List[Tuple[Path, Path]] = [] - - def startup(self) -> None: - with self.lock: - self.makenodedir() - self.up = True - - def shutdown(self) -> None: - if not self.up: - return - with self.lock: - while self._mounts: - _, target_path = self._mounts.pop(-1) - self.umount(target_path) - for iface in self.get_ifaces(): - iface.shutdown() - self.rmnodedir() - - def path_exists(self, path: str) -> bool: - """ - Determines if a file or directory path exists. - - :param path: path to file or directory - :return: True if path exists, False otherwise - """ - try: - self.host_cmd(f"{TEST} -e {path}") - return True - except CoreCommandError: - return False - - def termcmdstring(self, sh: str = "/bin/sh") -> str: - """ - Create a terminal command string. - - :param sh: shell to execute command in - :return: str - """ - return sh - - def set_mac(self, iface_id: int, mac: str) -> None: - """ - Set mac address for an interface. - - :param iface_id: index of interface to set hardware address for - :param mac: mac address to set - :return: nothing - :raises CoreCommandError: when a non-zero exit status occurs - """ - iface = self.get_iface(iface_id) - iface.set_mac(mac) - if self.up: - self.net_client.device_mac(iface.name, str(iface.mac)) - - def add_ip(self, iface_id: int, ip: str) -> None: - """ - Add an ip address to an interface in the format "10.0.0.1/24". - - :param iface_id: id of interface to add address to - :param ip: address to add to interface - :return: nothing - :raises CoreError: when ip address provided is invalid - :raises CoreCommandError: when a non-zero exit status occurs - """ - iface = self.get_iface(iface_id) - iface.add_ip(ip) - if self.up: - self.net_client.create_address(iface.name, ip) - - def remove_ip(self, iface_id: int, ip: str) -> None: - """ - Remove an ip address from an interface in the format "10.0.0.1/24". - - :param iface_id: id of interface to delete address from - :param ip: ip address to remove from interface - :return: nothing - :raises CoreError: when ip address provided is invalid - :raises CoreCommandError: when a non-zero exit status occurs - """ - iface = self.get_iface(iface_id) - iface.remove_ip(ip) - if self.up: - self.net_client.delete_address(iface.name, ip) - - def adopt_iface( - self, iface: CoreInterface, iface_id: int, mac: str, ips: List[str] - ) -> None: - """ - When a link message is received linking this node to another part of - the emulation, no new interface is created; instead, adopt the - GreTap interface as the node interface. - """ - iface.name = f"gt{iface_id}" - iface.node = self - self.add_iface(iface, iface_id) - # use a more reasonable name, e.g. "gt0" instead of "gt.56286.150" - if self.up: - self.net_client.device_down(iface.localname) - self.net_client.device_name(iface.localname, iface.name) - iface.localname = iface.name - if mac: - self.set_mac(iface_id, mac) - for ip in ips: - self.add_ip(iface_id, ip) - if self.up: - self.net_client.device_up(iface.localname) - - def next_iface_id(self) -> int: - with self.lock: - while self.iface_id in self.ifaces: - self.iface_id += 1 - iface_id = self.iface_id - self.iface_id += 1 - return iface_id - - def new_iface( - self, net: CoreNetworkBase, iface_data: InterfaceData - ) -> CoreInterface: - logger.info("creating interface") - ips = iface_data.get_ips() - iface_id = iface_data.id - if iface_id is None: - iface_id = self.next_iface_id() - name = iface_data.name - if name is None: - name = f"gt{iface_id}" - _, remote_tap = self.session.distributed.create_gre_tunnel( - net, self.server, iface_data.mtu, self.up - ) - self.adopt_iface(remote_tap, iface_id, iface_data.mac, ips) - return remote_tap - - def privatedir(self, dir_path: Path) -> None: - if not str(dir_path).startswith("/"): - raise CoreError(f"private directory path not fully qualified: {dir_path}") - host_path = self.host_path(dir_path, is_dir=True) - self.host_cmd(f"mkdir -p {host_path}") - self.mount(host_path, dir_path) - - def mount(self, src_path: Path, target_path: Path) -> None: - logger.debug("node(%s) mounting: %s at %s", self.name, src_path, target_path) - self.cmd(f"mkdir -p {target_path}") - self.host_cmd(f"{MOUNT} --bind {src_path} {target_path}", cwd=self.directory) - self._mounts.append((src_path, target_path)) - - def umount(self, target_path: Path) -> None: - logger.info("unmounting '%s'", target_path) - try: - self.host_cmd(f"{UMOUNT} -l {target_path}", cwd=self.directory) - except CoreCommandError: - logger.exception("unmounting failed for %s", target_path) - - def cmd(self, args: str, wait: bool = True, shell: bool = False) -> str: - return self.host_cmd(args, wait=wait) - - def create_dir(self, dir_path: Path) -> None: - raise CoreError("physical node does not support creating directories") - - def create_file(self, file_path: Path, contents: str, mode: int = 0o644) -> None: - raise CoreError("physical node does not support creating files") - - def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: - raise CoreError("physical node does not support copying files") - - class Rj45Node(CoreNodeBase): """ RJ45Node is a physical interface on the host linked to the emulated network. """ - apitype: NodeTypes = NodeTypes.RJ45 - type: str = "rj45" - def __init__( self, session: "Session", _id: int = None, name: str = None, - mtu: int = DEFAULT_MTU, server: DistributedServer = None, + options: NodeOptions = None, ) -> None: """ Create an RJ45Node instance. @@ -223,17 +42,15 @@ class Rj45Node(CoreNodeBase): :param session: core session instance :param _id: node id :param name: node name - :param mtu: rj45 mtu :param server: remote server node will run on, default is None for localhost + :param options: option to create node with """ - super().__init__(session, _id, name, server) + super().__init__(session, _id, name, server, options) self.iface: CoreInterface = CoreInterface( - session, name, name, mtu, server, self + self.iface_id, name, name, session.use_ovs(), node=self, server=server ) self.iface.transport_type = TransportType.RAW - self.lock: threading.RLock = threading.RLock() - self.iface_id: Optional[int] = None self.old_up: bool = False self.old_addrs: List[Tuple[str, Optional[str]]] = [] @@ -245,7 +62,7 @@ class Rj45Node(CoreNodeBase): :raises CoreCommandError: when there is a command exception """ # interface will also be marked up during net.attach() - self.savestate() + self.save_state() self.net_client.device_up(self.iface.localname) self.up = True @@ -266,7 +83,7 @@ class Rj45Node(CoreNodeBase): except CoreCommandError: pass self.up = False - self.restorestate() + self.restore_state() def path_exists(self, path: str) -> bool: """ @@ -281,33 +98,28 @@ class Rj45Node(CoreNodeBase): except CoreCommandError: return False - def new_iface( - self, net: CoreNetworkBase, iface_data: InterfaceData + def create_iface( + self, iface_data: InterfaceData = None, options: LinkOptions = None ) -> CoreInterface: - """ - This is called when linking with another node. Since this node - represents an interface, we do not create another object here, - but attach ourselves to the given network. - - :param net: new network instance - :param iface_data: interface data for new interface - :return: interface index - :raises ValueError: when an interface has already been created, one max - """ with self.lock: - iface_id = iface_data.id - if iface_id is None: - iface_id = 0 - if self.iface.net is not None: + if self.iface.id in self.ifaces: raise CoreError( - f"RJ45({self.name}) nodes support at most 1 network interface" + f"rj45({self.name}) nodes support at most 1 network interface" ) - self.ifaces[iface_id] = self.iface - self.iface_id = iface_id - self.iface.attachnet(net) + if iface_data and iface_data.mtu is not None: + self.iface.mtu = iface_data.mtu + self.iface.ip4s.clear() + self.iface.ip6s.clear() for ip in iface_data.get_ips(): - self.add_ip(ip) - return self.iface + self.iface.add_ip(ip) + self.ifaces[self.iface.id] = self.iface + if self.up: + for ip in self.iface.ips(): + self.net_client.create_address(self.iface.name, str(ip)) + return self.iface + + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + raise CoreError(f"rj45({self.name}) does not support adopt interface") def delete_iface(self, iface_id: int) -> None: """ @@ -318,16 +130,10 @@ class Rj45Node(CoreNodeBase): """ self.get_iface(iface_id) self.ifaces.pop(iface_id) - if self.iface.net is None: - raise CoreError( - f"RJ45({self.name}) is not currently connected to a network" - ) - self.iface.detachnet() - self.iface.net = None self.shutdown() def get_iface(self, iface_id: int) -> CoreInterface: - if iface_id != self.iface_id or iface_id not in self.ifaces: + if iface_id not in self.ifaces: raise CoreError(f"node({self.name}) interface({iface_id}) does not exist") return self.iface @@ -341,42 +147,17 @@ class Rj45Node(CoreNodeBase): """ if iface is not self.iface: raise CoreError(f"node({self.name}) does not have interface({iface.name})") - return self.iface_id + return self.iface.id - def add_ip(self, ip: str) -> None: - """ - Add an ip address to an interface in the format "10.0.0.1/24". - - :param ip: address to add to interface - :return: nothing - :raises CoreError: when ip address provided is invalid - :raises CoreCommandError: when a non-zero exit status occurs - """ - self.iface.add_ip(ip) - if self.up: - self.net_client.create_address(self.name, ip) - - def remove_ip(self, ip: str) -> None: - """ - Remove an ip address from an interface in the format "10.0.0.1/24". - - :param ip: ip address to remove from interface - :return: nothing - :raises CoreError: when ip address provided is invalid - :raises CoreCommandError: when a non-zero exit status occurs - """ - self.iface.remove_ip(ip) - if self.up: - self.net_client.delete_address(self.name, ip) - - def savestate(self) -> None: + def save_state(self) -> None: """ Save the addresses and other interface state before using the - interface for emulation purposes. TODO: save/restore the PROMISC flag + interface for emulation purposes. :return: nothing :raises CoreCommandError: when there is a command exception """ + # TODO: save/restore the PROMISC flag self.old_up = False self.old_addrs: List[Tuple[str, Optional[str]]] = [] localname = self.iface.localname @@ -397,7 +178,7 @@ class Rj45Node(CoreNodeBase): self.old_addrs.append((items[1], None)) logger.info("saved rj45 state: addrs(%s) up(%s)", self.old_addrs, self.old_up) - def restorestate(self) -> None: + def restore_state(self) -> None: """ Restore the addresses and other interface state after using it. @@ -437,3 +218,69 @@ class Rj45Node(CoreNodeBase): def copy_file(self, src_path: Path, dst_path: Path, mode: int = None) -> None: raise CoreError("rj45 does not support copying files") + + +class PhysicalNode(CoreNode): + def __init__( + self, + session: "Session", + _id: int = None, + name: str = None, + server: DistributedServer = None, + options: CoreNodeOptions = None, + ) -> None: + if not self.server: + raise CoreError("physical nodes must be assigned to a remote server") + super().__init__(session, _id, name, server, options) + + def startup(self) -> None: + with self.lock: + self.makenodedir() + self.up = True + + def shutdown(self) -> None: + if not self.up: + return + with self.lock: + while self._mounts: + _, target_path = self._mounts.pop(-1) + self.umount(target_path) + for iface in self.get_ifaces(): + iface.shutdown() + self.rmnodedir() + + def create_cmd(self, args: str, shell: bool = False) -> str: + if shell: + args = f'{BASH} -c "{args}"' + return args + + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + # validate iface belongs to node and get id + iface_id = self.get_iface_id(iface) + if iface_id == -1: + raise CoreError(f"adopting unknown iface({iface.name})") + # turn checksums off + self.node_net_client.checksums_off(iface.name) + # retrieve flow id for container + iface.flow_id = self.node_net_client.get_ifindex(iface.name) + logger.debug("interface flow index: %s - %s", iface.name, iface.flow_id) + if iface.mac: + self.net_client.device_mac(iface.name, str(iface.mac)) + # set all addresses + for ip in iface.ips(): + # ipv4 check + broadcast = None + if netaddr.valid_ipv4(ip): + broadcast = "+" + self.node_net_client.create_address(iface.name, str(ip), broadcast) + # configure iface options + iface.set_config() + # set iface up + self.net_client.device_up(iface.name) + + def umount(self, target_path: Path) -> None: + logger.info("unmounting '%s'", target_path) + try: + self.host_cmd(f"{UMOUNT} -l {target_path}", cwd=self.directory) + except CoreCommandError: + logger.exception("unmounting failed for %s", target_path) diff --git a/daemon/core/nodes/wireless.py b/daemon/core/nodes/wireless.py new file mode 100644 index 00000000..ef37db35 --- /dev/null +++ b/daemon/core/nodes/wireless.py @@ -0,0 +1,345 @@ +""" +Defines a wireless node that allows programmatic link connectivity and +configuration between pairs of nodes. +""" +import copy +import logging +import math +import secrets +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Set, Tuple + +from core.config import ConfigBool, ConfigFloat, ConfigInt, Configuration +from core.emulator.data import LinkData, LinkOptions +from core.emulator.enumerations import LinkTypes, MessageFlags +from core.errors import CoreError +from core.executables import NFTABLES +from core.nodes.base import CoreNetworkBase, NodeOptions +from core.nodes.interface import CoreInterface + +if TYPE_CHECKING: + from core.emulator.session import Session + from core.emulator.distributed import DistributedServer + +logger = logging.getLogger(__name__) +CONFIG_ENABLED: bool = True +CONFIG_RANGE: float = 400.0 +CONFIG_LOSS_RANGE: float = 300.0 +CONFIG_LOSS_FACTOR: float = 1.0 +CONFIG_LOSS: float = 0.0 +CONFIG_DELAY: int = 5000 +CONFIG_BANDWIDTH: int = 54_000_000 +CONFIG_JITTER: int = 0 +KEY_ENABLED: str = "movement" +KEY_RANGE: str = "max-range" +KEY_BANDWIDTH: str = "bandwidth" +KEY_DELAY: str = "delay" +KEY_JITTER: str = "jitter" +KEY_LOSS_RANGE: str = "loss-range" +KEY_LOSS_FACTOR: str = "loss-factor" +KEY_LOSS: str = "loss" + + +def calc_distance( + point1: Tuple[float, float, float], point2: Tuple[float, float, float] +) -> float: + a = point1[0] - point2[0] + b = point1[1] - point2[1] + c = 0 + if point1[2] is not None and point2[2] is not None: + c = point1[2] - point2[2] + return math.hypot(math.hypot(a, b), c) + + +def get_key(node1_id: int, node2_id: int) -> Tuple[int, int]: + return (node1_id, node2_id) if node1_id < node2_id else (node2_id, node1_id) + + +@dataclass +class WirelessLink: + bridge1: str + bridge2: str + iface: CoreInterface + linked: bool + label: str = None + + +class WirelessNode(CoreNetworkBase): + options: List[Configuration] = [ + ConfigBool( + id=KEY_ENABLED, default="1" if CONFIG_ENABLED else "0", label="Enabled?" + ), + ConfigFloat( + id=KEY_RANGE, default=str(CONFIG_RANGE), label="Max Range (pixels)" + ), + ConfigInt( + id=KEY_BANDWIDTH, default=str(CONFIG_BANDWIDTH), label="Bandwidth (bps)" + ), + ConfigInt(id=KEY_DELAY, default=str(CONFIG_DELAY), label="Delay (usec)"), + ConfigInt(id=KEY_JITTER, default=str(CONFIG_JITTER), label="Jitter (usec)"), + ConfigFloat( + id=KEY_LOSS_RANGE, + default=str(CONFIG_LOSS_RANGE), + label="Loss Start Range (pixels)", + ), + ConfigFloat( + id=KEY_LOSS_FACTOR, default=str(CONFIG_LOSS_FACTOR), label="Loss Factor" + ), + ConfigFloat(id=KEY_LOSS, default=str(CONFIG_LOSS), label="Loss Initial"), + ] + devices: Set[str] = set() + + @classmethod + def add_device(cls) -> str: + while True: + name = f"we{secrets.token_hex(6)}" + if name not in cls.devices: + cls.devices.add(name) + break + return name + + @classmethod + def delete_device(cls, name: str) -> None: + cls.devices.discard(name) + + def __init__( + self, + session: "Session", + _id: int, + name: str, + server: "DistributedServer" = None, + options: NodeOptions = None, + ): + super().__init__(session, _id, name, server, options) + self.bridges: Dict[int, Tuple[CoreInterface, str]] = {} + self.links: Dict[Tuple[int, int], WirelessLink] = {} + self.position_enabled: bool = CONFIG_ENABLED + self.bandwidth: int = CONFIG_BANDWIDTH + self.delay: int = CONFIG_DELAY + self.jitter: int = CONFIG_JITTER + self.max_range: float = CONFIG_RANGE + self.loss_initial: float = CONFIG_LOSS + self.loss_range: float = CONFIG_LOSS_RANGE + self.loss_factor: float = CONFIG_LOSS_FACTOR + + def startup(self) -> None: + if self.up: + return + self.up = True + + def shutdown(self) -> None: + while self.bridges: + _, (_, bridge_name) = self.bridges.popitem() + self.net_client.delete_bridge(bridge_name) + self.host_cmd(f"{NFTABLES} delete table bridge {bridge_name}") + while self.links: + _, link = self.links.popitem() + link.iface.shutdown() + self.up = False + + def attach(self, iface: CoreInterface) -> None: + super().attach(iface) + logging.info("attaching node(%s) iface(%s)", iface.node.name, iface.name) + if self.up: + # create node unique bridge + bridge_name = f"wb{iface.node.id}.{self.id}.{self.session.id}" + self.net_client.create_bridge(bridge_name) + # setup initial bridge rules + self.host_cmd(f'{NFTABLES} "add table bridge {bridge_name}"') + self.host_cmd( + f"{NFTABLES} " + f"'add chain bridge {bridge_name} forward {{type filter hook " + f"forward priority -1; policy drop;}}'" + ) + self.host_cmd( + f"{NFTABLES} " + f"'add rule bridge {bridge_name} forward " + f"ibriport != {bridge_name} accept'" + ) + # associate node iface with bridge + iface.net_client.set_iface_master(bridge_name, iface.localname) + # assign position callback, when enabled + if self.position_enabled: + iface.poshook = self.position_callback + # save created bridge + self.bridges[iface.node.id] = (iface, bridge_name) + + def post_startup(self) -> None: + routes = {} + for node_id, (iface, bridge_name) in self.bridges.items(): + for onode_id, (oiface, obridge_name) in self.bridges.items(): + if node_id == onode_id: + continue + if node_id < onode_id: + node1, node2 = iface.node, oiface.node + bridge1, bridge2 = bridge_name, obridge_name + else: + node1, node2 = oiface.node, iface.node + bridge1, bridge2 = obridge_name, bridge_name + key = (node1.id, node2.id) + if key in self.links: + continue + # create node to node link + name1 = self.add_device() + name2 = self.add_device() + link_iface = CoreInterface(0, name1, name2, self.session.use_ovs()) + link_iface.startup() + link = WirelessLink(bridge1, bridge2, link_iface, False) + self.links[key] = link + # track bridge routes + node1_routes = routes.setdefault(node1.id, set()) + node1_routes.add(name1) + node2_routes = routes.setdefault(node2.id, set()) + node2_routes.add(name2) + if self.position_enabled: + link.linked = True + # assign ifaces to respective bridges + self.net_client.set_iface_master(bridge1, link_iface.name) + self.net_client.set_iface_master(bridge2, link_iface.localname) + # calculate link data + self.calc_link(iface, oiface) + for node_id, ifaces in routes.items(): + iface, bridge_name = self.bridges[node_id] + ifaces = ",".join(ifaces) + # out routes + self.host_cmd( + f"{NFTABLES} " + f'"add rule bridge {bridge_name} forward ' + f"iif {iface.localname} oif {{{ifaces}}} " + f'accept"' + ) + # in routes + self.host_cmd( + f"{NFTABLES} " + f'"add rule bridge {bridge_name} forward ' + f"iif {{{ifaces}}} oif {iface.localname} " + f'accept"' + ) + + def link_control(self, node1_id: int, node2_id: int, linked: bool) -> None: + key = get_key(node1_id, node2_id) + link = self.links.get(key) + if not link: + raise CoreError(f"invalid node links node1({node1_id}) node2({node2_id})") + bridge1, bridge2 = link.bridge1, link.bridge2 + iface = link.iface + if not link.linked and linked: + link.linked = True + self.net_client.set_iface_master(bridge1, iface.name) + self.net_client.set_iface_master(bridge2, iface.localname) + self.send_link(key[0], key[1], MessageFlags.ADD, link.label) + elif link.linked and not linked: + link.linked = False + self.net_client.delete_iface(bridge1, iface.name) + self.net_client.delete_iface(bridge2, iface.localname) + self.send_link(key[0], key[1], MessageFlags.DELETE, link.label) + + def link_config( + self, node1_id: int, node2_id: int, options1: LinkOptions, options2: LinkOptions + ) -> None: + key = get_key(node1_id, node2_id) + link = self.links.get(key) + if not link: + raise CoreError(f"invalid node links node1({node1_id}) node2({node2_id})") + iface = link.iface + has_netem = iface.has_netem + iface.options.update(options1) + iface.set_config() + name, localname = iface.name, iface.localname + iface.name, iface.localname = localname, name + iface.options.update(options2) + iface.has_netem = has_netem + iface.set_config() + iface.name, iface.localname = name, localname + if options1 == options2: + link.label = f"{options1.loss:.2f}%/{options1.delay}us" + else: + link.label = ( + f"({options1.loss:.2f}%/{options1.delay}us) " + f"({options2.loss:.2f}%/{options2.delay}us)" + ) + self.send_link(key[0], key[1], MessageFlags.NONE, link.label) + + def send_link( + self, + node1_id: int, + node2_id: int, + message_type: MessageFlags, + label: str = None, + ) -> None: + """ + Broadcasts out a wireless link/unlink message. + + :param node1_id: first node in link + :param node2_id: second node in link + :param message_type: type of link message to send + :param label: label to display for link + :return: nothing + """ + color = self.session.get_link_color(self.id) + link_data = LinkData( + message_type=message_type, + type=LinkTypes.WIRELESS, + node1_id=node1_id, + node2_id=node2_id, + network_id=self.id, + color=color, + label=label, + ) + self.session.broadcast_link(link_data) + + def position_callback(self, iface: CoreInterface) -> None: + for oiface, bridge_name in self.bridges.values(): + if iface == oiface: + continue + self.calc_link(iface, oiface) + + def calc_link(self, iface1: CoreInterface, iface2: CoreInterface) -> None: + key = get_key(iface1.node.id, iface2.node.id) + link = self.links.get(key) + point1 = iface1.node.position.get() + point2 = iface2.node.position.get() + distance = calc_distance(point1, point2) + if distance >= self.max_range: + if link.linked: + self.link_control(iface1.node.id, iface2.node.id, False) + else: + if not link.linked: + self.link_control(iface1.node.id, iface2.node.id, True) + loss_distance = max(distance - self.loss_range, 0.0) + max_distance = max(self.max_range - self.loss_range, 0.0) + loss = min((loss_distance / max_distance) * 100.0 * self.loss_factor, 100.0) + loss = max(self.loss_initial, loss) + options = LinkOptions( + loss=loss, + delay=self.delay, + bandwidth=self.bandwidth, + jitter=self.jitter, + ) + self.link_config(iface1.node.id, iface2.node.id, options, options) + + def adopt_iface(self, iface: CoreInterface, name: str) -> None: + raise CoreError(f"{type(self)} does not support adopt interface") + + def get_config(self) -> Dict[str, Configuration]: + config = {x.id: x for x in copy.copy(self.options)} + config[KEY_ENABLED].default = "1" if self.position_enabled else "0" + config[KEY_RANGE].default = str(self.max_range) + config[KEY_LOSS_RANGE].default = str(self.loss_range) + config[KEY_LOSS_FACTOR].default = str(self.loss_factor) + config[KEY_LOSS].default = str(self.loss_initial) + config[KEY_BANDWIDTH].default = str(self.bandwidth) + config[KEY_DELAY].default = str(self.delay) + config[KEY_JITTER].default = str(self.jitter) + return config + + def set_config(self, config: Dict[str, str]) -> None: + logger.info("wireless config: %s", config) + self.position_enabled = config[KEY_ENABLED] == "1" + self.max_range = float(config[KEY_RANGE]) + self.loss_range = float(config[KEY_LOSS_RANGE]) + self.loss_factor = float(config[KEY_LOSS_FACTOR]) + self.loss_initial = float(config[KEY_LOSS]) + self.bandwidth = int(config[KEY_BANDWIDTH]) + self.delay = int(config[KEY_DELAY]) + self.jitter = int(config[KEY_JITTER]) diff --git a/daemon/core/player.py b/daemon/core/player.py new file mode 100644 index 00000000..6ba0d602 --- /dev/null +++ b/daemon/core/player.py @@ -0,0 +1,450 @@ +import ast +import csv +import enum +import logging +import sched +from pathlib import Path +from threading import Thread +from typing import IO, Callable, Dict, Optional + +import grpc + +from core.api.grpc.client import CoreGrpcClient, MoveNodesStreamer +from core.api.grpc.wrappers import LinkOptions + +logger = logging.getLogger(__name__) + + +@enum.unique +class PlayerEvents(enum.Enum): + """ + Provides event types for processing file events. + """ + + XY = enum.auto() + GEO = enum.auto() + CMD = enum.auto() + WLINK = enum.auto() + WILINK = enum.auto() + WICONFIG = enum.auto() + + @classmethod + def get(cls, value: str) -> Optional["PlayerEvents"]: + """ + Retrieves a valid event type from read input. + + :param value: value to get event type for + :return: valid event type, None otherwise + """ + event = None + try: + event = cls[value] + except KeyError: + pass + return event + + +class CorePlayerWriter: + """ + Provides conveniences for programatically creating a core file for playback. + """ + + def __init__(self, file_path: str): + """ + Create a CorePlayerWriter instance. + + :param file_path: path to create core file + """ + self._time: float = 0.0 + self._file_path: str = file_path + self._file: Optional[IO] = None + self._csv_file: Optional[csv.writer] = None + + def open(self) -> None: + """ + Opens the provided file path for writing and csv creation. + + :return: nothing + """ + logger.info("core player write file(%s)", self._file_path) + self._file = open(self._file_path, "w", newline="") + self._csv_file = csv.writer(self._file, quoting=csv.QUOTE_MINIMAL) + + def close(self) -> None: + """ + Closes the file being written to. + + :return: nothing + """ + if self._file: + self._file.close() + + def update(self, delay: float) -> None: + """ + Update and move the current play time forward by delay amount. + + :param delay: amount to move time forward by + :return: nothing + """ + self._time += delay + + def write_xy(self, node_id: int, x: float, y: float) -> None: + """ + Write a node xy movement event. + + :param node_id: id of node to move + :param x: x position + :param y: y position + :return: nothing + """ + self._csv_file.writerow([self._time, PlayerEvents.XY.name, node_id, x, y]) + + def write_geo(self, node_id: int, lon: float, lat: float, alt: float) -> None: + """ + Write a node geo movement event. + + :param node_id: id of node to move + :param lon: longitude position + :param lat: latitude position + :param alt: altitude position + :return: nothing + """ + self._csv_file.writerow( + [self._time, PlayerEvents.GEO.name, node_id, lon, lat, alt] + ) + + def write_cmd(self, node_id: int, wait: bool, shell: bool, cmd: str) -> None: + """ + Write a node command event. + + :param node_id: id of node to run command on + :param wait: should command wait for successful execution + :param shell: should command run under shell context + :param cmd: command to run + :return: nothing + """ + self._csv_file.writerow( + [self._time, PlayerEvents.CMD.name, node_id, wait, shell, f"'{cmd}'"] + ) + + def write_wlan_link( + self, wireless_id: int, node1_id: int, node2_id: int, linked: bool + ) -> None: + """ + Write a wlan link event. + + :param wireless_id: id of wlan network for link + :param node1_id: first node connected to wlan + :param node2_id: second node connected to wlan + :param linked: True if nodes are linked, False otherwise + :return: nothing + """ + self._csv_file.writerow( + [ + self._time, + PlayerEvents.WLINK.name, + wireless_id, + node1_id, + node2_id, + linked, + ] + ) + + def write_wireless_link( + self, wireless_id: int, node1_id: int, node2_id: int, linked: bool + ) -> None: + """ + Write a wireless link event. + + :param wireless_id: id of wireless network for link + :param node1_id: first node connected to wireless + :param node2_id: second node connected to wireless + :param linked: True if nodes are linked, False otherwise + :return: nothing + """ + self._csv_file.writerow( + [ + self._time, + PlayerEvents.WILINK.name, + wireless_id, + node1_id, + node2_id, + linked, + ] + ) + + def write_wireless_config( + self, + wireless_id: int, + node1_id: int, + node2_id: int, + loss1: float, + delay1: int, + loss2: float = None, + delay2: float = None, + ) -> None: + """ + Write a wireless link config event. + + :param wireless_id: id of wireless network for link + :param node1_id: first node connected to wireless + :param node2_id: second node connected to wireless + :param loss1: loss for the first interface + :param delay1: delay for the first interface + :param loss2: loss for the second interface, defaults to first interface loss + :param delay2: delay for second interface, defaults to first interface delay + :return: nothing + """ + loss2 = loss2 if loss2 is not None else loss1 + delay2 = delay2 if delay2 is not None else delay1 + self._csv_file.writerow( + [ + self._time, + PlayerEvents.WICONFIG.name, + wireless_id, + node1_id, + node2_id, + loss1, + delay1, + loss2, + delay2, + ] + ) + + +class CorePlayer: + """ + Provides core player functionality for reading a file with timed events + and playing them out. + """ + + def __init__(self, file_path: Path): + """ + Creates a CorePlayer instance. + + :param file_path: file to play path + """ + self.file_path: Path = file_path + self.core: CoreGrpcClient = CoreGrpcClient() + self.session_id: Optional[int] = None + self.node_streamer: Optional[MoveNodesStreamer] = None + self.node_streamer_thread: Optional[Thread] = None + self.scheduler: sched.scheduler = sched.scheduler() + self.handlers: Dict[PlayerEvents, Callable] = { + PlayerEvents.XY: self.handle_xy, + PlayerEvents.GEO: self.handle_geo, + PlayerEvents.CMD: self.handle_cmd, + PlayerEvents.WLINK: self.handle_wlink, + PlayerEvents.WILINK: self.handle_wireless_link, + PlayerEvents.WICONFIG: self.handle_wireless_config, + } + + def init(self, session_id: Optional[int]) -> bool: + """ + Initialize core connections, settings to or retrieving session to use. + Also setup node streamer for xy/geo movements. + + :param session_id: session id to use, None for default session + :return: True if init was successful, False otherwise + """ + self.core.connect() + try: + if session_id is None: + sessions = self.core.get_sessions() + if len(sessions): + session_id = sessions[0].id + if session_id is None: + logger.error("no core sessions found") + return False + self.session_id = session_id + logger.info("playing to session(%s)", self.session_id) + self.node_streamer = MoveNodesStreamer(self.session_id) + self.node_streamer_thread = Thread( + target=self.core.move_nodes, args=(self.node_streamer,), daemon=True + ) + self.node_streamer_thread.start() + except grpc.RpcError as e: + logger.error("core is not running: %s", e.details()) + return False + return True + + def start(self) -> None: + """ + Starts playing file, reading the csv data line by line, then handling + each line event type. Delay is tracked and calculated, while processing, + to ensure we wait for the event time to be active. + + :return: nothing + """ + current_time = 0.0 + with self.file_path.open("r", newline="") as f: + for row in csv.reader(f): + # determine delay + input_time = float(row[0]) + delay = input_time - current_time + current_time = input_time + # determine event + event_value = row[1] + event = PlayerEvents.get(event_value) + if not event: + logger.error("unknown event type: %s", ",".join(row)) + continue + # get args and event functions + args = tuple(ast.literal_eval(x) for x in row[2:]) + event_func = self.handlers.get(event) + if not event_func: + logger.error("unknown event type handler: %s", ",".join(row)) + continue + logger.info( + "processing line time(%s) event(%s) args(%s)", + input_time, + event.name, + args, + ) + # schedule and run event + self.scheduler.enter(delay, 1, event_func, argument=args) + self.scheduler.run() + self.stop() + + def stop(self) -> None: + """ + Stop and cleanup playback. + + :return: nothing + """ + logger.info("stopping playback, cleaning up") + self.node_streamer.stop() + self.node_streamer_thread.join() + self.node_streamer_thread = None + + def handle_xy(self, node_id: int, x: float, y: float) -> None: + """ + Handle node xy movement event. + + :param node_id: id of node to move + :param x: x position + :param y: y position + :return: nothing + """ + logger.debug("handling xy node(%s) x(%s) y(%s)", node_id, x, y) + self.node_streamer.send_position(node_id, x, y) + + def handle_geo(self, node_id: int, lon: float, lat: float, alt: float) -> None: + """ + Handle node geo movement event. + + :param node_id: id of node to move + :param lon: longitude position + :param lat: latitude position + :param alt: altitude position + :return: nothing + """ + logger.debug( + "handling geo node(%s) lon(%s) lat(%s) alt(%s)", node_id, lon, lat, alt + ) + self.node_streamer.send_geo(node_id, lon, lat, alt) + + def handle_cmd(self, node_id: int, wait: bool, shell: bool, cmd: str) -> None: + """ + Handle node command event. + + :param node_id: id of node to run command + :param wait: True to wait for successful command, False otherwise + :param shell: True to run command in shell context, False otherwise + :param cmd: command to run + :return: nothing + """ + logger.debug( + "handling cmd node(%s) wait(%s) shell(%s) cmd(%s)", + node_id, + wait, + shell, + cmd, + ) + status, output = self.core.node_command( + self.session_id, node_id, cmd, wait, shell + ) + logger.info("cmd result(%s): %s", status, output) + + def handle_wlink( + self, net_id: int, node1_id: int, node2_id: int, linked: bool + ) -> None: + """ + Handle wlan link event. + + :param net_id: id of wlan network + :param node1_id: first node in link + :param node2_id: second node in link + :param linked: True if linked, Flase otherwise + :return: nothing + """ + logger.debug( + "handling wlink node1(%s) node2(%s) net(%s) linked(%s)", + node1_id, + node2_id, + net_id, + linked, + ) + self.core.wlan_link(self.session_id, net_id, node1_id, node2_id, linked) + + def handle_wireless_link( + self, wireless_id: int, node1_id: int, node2_id: int, linked: bool + ) -> None: + """ + Handle wireless link event. + + :param wireless_id: id of wireless network + :param node1_id: first node in link + :param node2_id: second node in link + :param linked: True if linked, Flase otherwise + :return: nothing + """ + logger.debug( + "handling link wireless(%s) node1(%s) node2(%s) linked(%s)", + wireless_id, + node1_id, + node2_id, + linked, + ) + self.core.wireless_linked( + self.session_id, wireless_id, node1_id, node2_id, linked + ) + + def handle_wireless_config( + self, + wireless_id: int, + node1_id: int, + node2_id: int, + loss1: float, + delay1: int, + loss2: float, + delay2: int, + ) -> None: + """ + Handle wireless config event. + + :param wireless_id: id of wireless network + :param node1_id: first node in link + :param node2_id: second node in link + :param loss1: first interface loss + :param delay1: first interface delay + :param loss2: second interface loss + :param delay2: second interface delay + :return: nothing + """ + logger.debug( + "handling config wireless(%s) node1(%s) node2(%s) " + "options1(%s/%s) options2(%s/%s)", + wireless_id, + node1_id, + node2_id, + loss1, + delay1, + loss2, + delay2, + ) + options1 = LinkOptions(loss=loss1, delay=delay1) + options2 = LinkOptions(loss=loss2, delay=delay2) + self.core.wireless_config( + self.session_id, wireless_id, node1_id, node2_id, options1, options2 + ) diff --git a/daemon/core/plugins/sdt.py b/daemon/core/plugins/sdt.py index 575f9257..48a6cdf0 100644 --- a/daemon/core/plugins/sdt.py +++ b/daemon/core/plugins/sdt.py @@ -4,22 +4,46 @@ sdt.py: Scripted Display Tool (SDT3D) helper import logging import socket -from typing import IO, TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from pathlib import Path +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, Type from urllib.parse import urlparse -from core.constants import CORE_CONF_DIR, CORE_DATA_DIR +from core.constants import CORE_CONF_DIR from core.emane.nodes import EmaneNet from core.emulator.data import LinkData, NodeData from core.emulator.enumerations import EventTypes, MessageFlags from core.errors import CoreError -from core.nodes.base import CoreNetworkBase, NodeBase -from core.nodes.network import WlanNode +from core.nodes.base import CoreNode, NodeBase +from core.nodes.network import HubNode, SwitchNode, TunnelNode, WlanNode +from core.nodes.physical import Rj45Node +from core.nodes.wireless import WirelessNode logger = logging.getLogger(__name__) if TYPE_CHECKING: from core.emulator.session import Session +LOCAL_ICONS_PATH: Path = Path(__file__).parent.parent / "gui" / "data" / "icons" +CORE_LAYER: str = "CORE" +NODE_LAYER: str = "CORE::Nodes" +LINK_LAYER: str = "CORE::Links" +WIRED_LINK_LAYER: str = f"{LINK_LAYER}::wired" +CORE_LAYERS: List[str] = [CORE_LAYER, LINK_LAYER, NODE_LAYER, WIRED_LINK_LAYER] +DEFAULT_LINK_COLOR: str = "red" +NODE_TYPES: Dict[Type[NodeBase], str] = { + HubNode: "hub", + SwitchNode: "lanswitch", + TunnelNode: "tunnel", + WlanNode: "wlan", + EmaneNet: "emane", + WirelessNode: "wireless", + Rj45Node: "rj45", +} + + +def is_wireless(node: NodeBase) -> bool: + return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) + def get_link_id(node1_id: int, node2_id: int, network_id: int) -> str: link_id = f"{node1_id}-{node2_id}" @@ -28,14 +52,6 @@ def get_link_id(node1_id: int, node2_id: int, network_id: int) -> str: return link_id -CORE_LAYER: str = "CORE" -NODE_LAYER: str = "CORE::Nodes" -LINK_LAYER: str = "CORE::Links" -WIRED_LINK_LAYER: str = f"{LINK_LAYER}::wired" -CORE_LAYERS: List[str] = [CORE_LAYER, LINK_LAYER, NODE_LAYER, WIRED_LINK_LAYER] -DEFAULT_LINK_COLOR: str = "red" - - class Sdt: """ Helper class for exporting session objects to NRL"s SDT3D. @@ -48,16 +64,18 @@ class Sdt: DEFAULT_ALT: int = 2500 # TODO: read in user"s nodes.conf here; below are default node types from the GUI DEFAULT_SPRITES: Dict[str, str] = [ - ("router", "router.gif"), - ("host", "host.gif"), - ("PC", "pc.gif"), - ("mdr", "mdr.gif"), - ("prouter", "router_green.gif"), - ("hub", "hub.gif"), - ("lanswitch", "lanswitch.gif"), - ("wlan", "wlan.gif"), - ("rj45", "rj45.gif"), - ("tunnel", "tunnel.gif"), + ("router", "router.png"), + ("host", "host.png"), + ("PC", "pc.png"), + ("mdr", "mdr.png"), + ("prouter", "prouter.png"), + ("hub", "hub.png"), + ("lanswitch", "lanswitch.png"), + ("wlan", "wlan.png"), + ("emane", "emane.png"), + ("wireless", "wireless.png"), + ("rj45", "rj45.png"), + ("tunnel", "tunnel.png"), ] def __init__(self, session: "Session") -> None: @@ -67,7 +85,7 @@ class Sdt: :param session: session this manager is tied to """ self.session: "Session" = session - self.sock: Optional[IO] = None + self.sock: Optional[socket.socket] = None self.connected: bool = False self.url: str = self.DEFAULT_SDT_URL self.address: Optional[Tuple[Optional[str], Optional[int]]] = None @@ -83,7 +101,7 @@ class Sdt: :return: True if enabled, False otherwise """ - return self.session.options.get_config("enablesdt") == "1" + return self.session.options.get_int("enablesdt") == 1 def seturl(self) -> None: """ @@ -92,7 +110,7 @@ class Sdt: :return: nothing """ - url = self.session.options.get_config("stdurl", default=self.DEFAULT_SDT_URL) + url = self.session.options.get("stdurl", self.DEFAULT_SDT_URL) self.url = urlparse(url) self.address = (self.url.hostname, self.url.port) self.protocol = self.url.scheme @@ -140,7 +158,7 @@ class Sdt: :return: initialize command status """ - if not self.cmd(f'path "{CORE_DATA_DIR}/icons/normal"'): + if not self.cmd(f'path "{LOCAL_ICONS_PATH.absolute()}"'): return False # send node type to icon mappings for node_type, icon in self.DEFAULT_SPRITES: @@ -162,7 +180,6 @@ class Sdt: logger.error("error closing socket") finally: self.sock = None - self.connected = False def shutdown(self) -> None: @@ -190,7 +207,6 @@ class Sdt: """ if self.sock is None: return False - try: cmd = f"{cmdstr}\n".encode() logger.debug("sdt cmd: %s", cmd) @@ -210,26 +226,23 @@ class Sdt: :return: nothing """ - nets = [] - # create layers for layer in CORE_LAYERS: self.cmd(f"layer {layer}") - with self.session.nodes_lock: - for node_id in self.session.nodes: - node = self.session.nodes[node_id] - if isinstance(node, CoreNetworkBase): + nets = [] + for node in self.session.nodes.values(): + if isinstance(node, (EmaneNet, WlanNode)): nets.append(node) if not isinstance(node, NodeBase): continue self.add_node(node) - + for link in self.session.link_manager.links(): + if is_wireless(link.node1) or is_wireless(link.node2): + continue + link_data = link.get_data(MessageFlags.ADD) + self.handle_link_update(link_data) for net in nets: - all_links = net.links(flags=MessageFlags.ADD) - for link_data in all_links: - is_wireless = isinstance(net, (WlanNode, EmaneNet)) - if is_wireless and link_data.node1_id == net.id: - continue + for link_data in net.links(MessageFlags.ADD): self.handle_link_update(link_data) def get_node_position(self, node: NodeBase) -> Optional[str]: @@ -258,13 +271,14 @@ class Sdt: pos = self.get_node_position(node) if not pos: return - node_type = node.type - if node_type is None: - node_type = type(node).type + if isinstance(node, CoreNode): + node_type = node.model + else: + node_type = NODE_TYPES.get(type(node), "PC") icon = node.icon if icon: node_type = node.name - icon = icon.replace("$CORE_DATA_DIR", str(CORE_DATA_DIR)) + icon = icon.replace("$CORE_DATA_DIR", str(LOCAL_ICONS_PATH.absolute())) icon = icon.replace("$CORE_CONF_DIR", str(CORE_CONF_DIR)) self.cmd(f"sprite {node_type} image {icon}") self.cmd( @@ -341,7 +355,7 @@ class Sdt: result = False try: node = self.session.get_node(node_id, NodeBase) - result = isinstance(node, (WlanNode, EmaneNet)) + result = isinstance(node, (WlanNode, EmaneNet, WirelessNode)) except CoreError: pass return result diff --git a/daemon/core/api/tlv/__init__.py b/daemon/core/scripts/__init__.py similarity index 100% rename from daemon/core/api/tlv/__init__.py rename to daemon/core/scripts/__init__.py diff --git a/daemon/core/scripts/cleanup.py b/daemon/core/scripts/cleanup.py new file mode 100755 index 00000000..3606fc13 --- /dev/null +++ b/daemon/core/scripts/cleanup.py @@ -0,0 +1,104 @@ +import argparse +import os +import subprocess +import sys +import time + + +def check_root() -> None: + if os.geteuid() != 0: + print("permission denied, run this script as root") + sys.exit(1) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="helps cleanup lingering core processes and files", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "-d", "--daemon", action="store_true", help="also kill core-daemon" + ) + return parser.parse_args() + + +def cleanup_daemon() -> None: + print("killing core-daemon process ... ", end="") + result = subprocess.call("pkill -9 core-daemon", shell=True) + if result: + print("not found") + else: + print("done") + + +def cleanup_nodes() -> None: + print("killing vnoded processes ... ", end="") + result = subprocess.call("pkill -KILL vnoded", shell=True) + if result: + print("none found") + else: + time.sleep(1) + print("done") + + +def cleanup_emane() -> None: + print("killing emane processes ... ", end="") + result = subprocess.call("pkill emane", shell=True) + if result: + print("none found") + else: + print("done") + + +def cleanup_sessions() -> None: + print("removing session directories ... ", end="") + result = subprocess.call("rm -rf /tmp/pycore*", shell=True) + if result: + print("none found") + else: + print("done") + + +def cleanup_interfaces() -> None: + print("cleaning up devices") + output = subprocess.check_output("ip -o -br link show", shell=True) + lines = output.decode().strip().split("\n") + for line in lines: + values = line.split() + name = values[0] + if ( + name.startswith("veth") + or name.startswith("beth") + or name.startswith("gt.") + or name.startswith("b.") + or name.startswith("ctrl") + ): + result = subprocess.call(f"ip link delete {name}", shell=True) + if result: + print(f"failed to remove {name}") + else: + print(f"removed {name}") + if name.startswith("b."): + result = subprocess.call( + f"nft delete table bridge {name}", + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=True, + ) + if not result: + print(f"cleared nft rules for {name}") + + +def main() -> None: + check_root() + args = parse_args() + if args.daemon: + cleanup_daemon() + cleanup_nodes() + cleanup_emane() + cleanup_interfaces() + cleanup_sessions() + + +if __name__ == "__main__": + main() diff --git a/daemon/scripts/core-cli b/daemon/core/scripts/cli.py similarity index 88% rename from daemon/scripts/core-cli rename to daemon/core/scripts/cli.py index 6f2c1f5b..31ad086e 100755 --- a/daemon/scripts/core-cli +++ b/daemon/core/scripts/cli.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 import json import sys from argparse import ( @@ -28,11 +27,13 @@ from core.api.grpc.wrappers import ( Position, ) -NODE_TYPES = [x for x in NodeType if x != NodeType.PEER_TO_PEER] +NODE_TYPES = [x.name for x in NodeType if x != NodeType.PEER_TO_PEER] def protobuf_to_json(message: Any) -> Dict[str, Any]: - return MessageToDict(message, including_default_value_fields=True, preserving_proto_field_name=True) + return MessageToDict( + message, including_default_value_fields=True, preserving_proto_field_name=True + ) def print_json(data: Any) -> None: @@ -122,18 +123,15 @@ def get_current_session(core: CoreGrpcClient, session_id: Optional[int]) -> int: return sessions[0].id -def create_iface(iface_id: int, mac: str, ip4_net: IPNetwork, ip6_net: IPNetwork) -> Interface: +def create_iface( + iface_id: int, mac: str, ip4_net: IPNetwork, ip6_net: IPNetwork +) -> Interface: ip4 = str(ip4_net.ip) if ip4_net else None ip4_mask = ip4_net.prefixlen if ip4_net else None ip6 = str(ip6_net.ip) if ip6_net else None ip6_mask = ip6_net.prefixlen if ip6_net else None return Interface( - id=iface_id, - mac=mac, - ip4=ip4, - ip4_mask=ip4_mask, - ip6=ip6, - ip6_mask=ip6_mask, + id=iface_id, mac=mac, ip4=ip4, ip4_mask=ip4_mask, ip6=ip6, ip6_mask=ip6_mask ) @@ -216,12 +214,14 @@ def query_session(core: CoreGrpcClient, args: Namespace) -> None: for node in session.nodes.values(): xy_pos = f"{int(node.position.x)},{int(node.position.y)}" geo_pos = f"{node.geo.lon:.7f},{node.geo.lat:.7f},{node.geo.alt:f}" - print(f"{node.id:<7} | {node.name[:7]:<7} | {node.type.name[:7]:<7} | {xy_pos:<9} | {geo_pos}") + print( + f"{node.id:<7} | {node.name[:7]:<7} | {node.type.name[:7]:<7} | {xy_pos:<9} | {geo_pos}" + ) print("\nLinks") for link in session.links: n1 = session.nodes[link.node1_id].name n2 = session.nodes[link.node2_id].name - print(f"Node | ", end="") + print("Node | ", end="") print_iface_header() print(f"{n1:<6} | ", end="") if link.iface1: @@ -248,7 +248,9 @@ def query_node(core: CoreGrpcClient, args: Namespace) -> None: print("ID | Name | Type | XY | Geo") xy_pos = f"{int(node.position.x)},{int(node.position.y)}" geo_pos = f"{node.geo.lon:.7f},{node.geo.lat:.7f},{node.geo.alt:f}" - print(f"{node.id:<7} | {node.name[:7]:<7} | {node.type.name[:7]:<7} | {xy_pos:<9} | {geo_pos}") + print( + f"{node.id:<7} | {node.name[:7]:<7} | {node.type.name[:7]:<7} | {xy_pos:<9} | {geo_pos}" + ) if ifaces: print("Interfaces") print("Connected To | ", end="") @@ -348,10 +350,14 @@ def add_link(core: CoreGrpcClient, args: Namespace) -> None: session_id = get_current_session(core, args.session) iface1 = None if args.iface1_id is not None: - iface1 = create_iface(args.iface1_id, args.iface1_mac, args.iface1_ip4, args.iface1_ip6) + iface1 = create_iface( + args.iface1_id, args.iface1_mac, args.iface1_ip4, args.iface1_ip6 + ) iface2 = None if args.iface2_id is not None: - iface2 = create_iface(args.iface2_id, args.iface2_mac, args.iface2_ip4, args.iface2_ip6) + iface2 = create_iface( + args.iface2_id, args.iface2_mac, args.iface2_ip4, args.iface2_ip6 + ) options = LinkOptions( bandwidth=args.bandwidth, loss=args.loss, @@ -432,13 +438,17 @@ def setup_node_parser(parent) -> None: add_parser.add_argument( "-t", "--type", choices=NODE_TYPES, default="DEFAULT", help="type of node" ) - add_parser.add_argument("-m", "--model", help="used to determine services, optional") + add_parser.add_argument( + "-m", "--model", help="used to determine services, optional" + ) group = add_parser.add_mutually_exclusive_group(required=True) group.add_argument("-p", "--pos", type=position_type, help="x,y position") group.add_argument("-g", "--geo", type=geo_type, help="lon,lat,alt position") add_parser.add_argument("-ic", "--icon", help="icon to use, optional") add_parser.add_argument("-im", "--image", help="container image, optional") - add_parser.add_argument("-e", "--emane", help="emane model, only required for emane nodes") + add_parser.add_argument( + "-e", "--emane", help="emane model, only required for emane nodes" + ) add_parser.set_defaults(func=add_node) edit_parser = subparsers.add_parser("edit", help="edit a node") @@ -449,7 +459,9 @@ def setup_node_parser(parent) -> None: move_parser = subparsers.add_parser("move", help="move a node") move_parser.formatter_class = ArgumentDefaultsHelpFormatter - move_parser.add_argument("-i", "--id", type=int, help="id to use, optional", required=True) + move_parser.add_argument( + "-i", "--id", type=int, help="id to use, optional", required=True + ) group = move_parser.add_mutually_exclusive_group(required=True) group.add_argument("-p", "--pos", type=position_type, help="x,y position") group.add_argument("-g", "--geo", type=geo_type, help="lon,lat,alt position") @@ -474,19 +486,33 @@ def setup_link_parser(parent) -> None: add_parser.add_argument("-n1", "--node1", type=int, help="node1 id", required=True) add_parser.add_argument("-n2", "--node2", type=int, help="node2 id", required=True) add_parser.add_argument("-i1-i", "--iface1-id", type=int, help="node1 interface id") - add_parser.add_argument("-i1-m", "--iface1-mac", type=mac_type, help="node1 interface mac") - add_parser.add_argument("-i1-4", "--iface1-ip4", type=ip4_type, help="node1 interface ip4") - add_parser.add_argument("-i1-6", "--iface1-ip6", type=ip6_type, help="node1 interface ip6") + add_parser.add_argument( + "-i1-m", "--iface1-mac", type=mac_type, help="node1 interface mac" + ) + add_parser.add_argument( + "-i1-4", "--iface1-ip4", type=ip4_type, help="node1 interface ip4" + ) + add_parser.add_argument( + "-i1-6", "--iface1-ip6", type=ip6_type, help="node1 interface ip6" + ) add_parser.add_argument("-i2-i", "--iface2-id", type=int, help="node2 interface id") - add_parser.add_argument("-i2-m", "--iface2-mac", type=mac_type, help="node2 interface mac") - add_parser.add_argument("-i2-4", "--iface2-ip4", type=ip4_type, help="node2 interface ip4") - add_parser.add_argument("-i2-6", "--iface2-ip6", type=ip6_type, help="node2 interface ip6") + add_parser.add_argument( + "-i2-m", "--iface2-mac", type=mac_type, help="node2 interface mac" + ) + add_parser.add_argument( + "-i2-4", "--iface2-ip4", type=ip4_type, help="node2 interface ip4" + ) + add_parser.add_argument( + "-i2-6", "--iface2-ip6", type=ip6_type, help="node2 interface ip6" + ) add_parser.add_argument("-b", "--bandwidth", type=int, help="bandwidth (bps)") add_parser.add_argument("-l", "--loss", type=float, help="loss (%%)") add_parser.add_argument("-j", "--jitter", type=int, help="jitter (us)") add_parser.add_argument("-de", "--delay", type=int, help="delay (us)") add_parser.add_argument("-du", "--duplicate", type=int, help="duplicate (%%)") - add_parser.add_argument("-u", "--uni", action="store_true", help="is link unidirectional?") + add_parser.add_argument( + "-u", "--uni", action="store_true", help="is link unidirectional?" + ) add_parser.set_defaults(func=add_link) edit_parser = subparsers.add_parser("edit", help="edit a link") @@ -507,8 +533,12 @@ def setup_link_parser(parent) -> None: delete_parser = subparsers.add_parser("delete", help="delete a link") delete_parser.formatter_class = ArgumentDefaultsHelpFormatter - delete_parser.add_argument("-n1", "--node1", type=int, help="node1 id", required=True) - delete_parser.add_argument("-n2", "--node2", type=int, help="node1 id", required=True) + delete_parser.add_argument( + "-n1", "--node1", type=int, help="node1 id", required=True + ) + delete_parser.add_argument( + "-n2", "--node2", type=int, help="node1 id", required=True + ) delete_parser.add_argument("-i1", "--iface1", type=int, help="node1 interface id") delete_parser.add_argument("-i2", "--iface2", type=int, help="node2 interface id") delete_parser.set_defaults(func=delete_link) @@ -526,20 +556,28 @@ def setup_query_parser(parent) -> None: session_parser = subparsers.add_parser("session", help="query session") session_parser.formatter_class = ArgumentDefaultsHelpFormatter - session_parser.add_argument("-i", "--id", type=int, help="session to query", required=True) + session_parser.add_argument( + "-i", "--id", type=int, help="session to query", required=True + ) session_parser.set_defaults(func=query_session) node_parser = subparsers.add_parser("node", help="query node") node_parser.formatter_class = ArgumentDefaultsHelpFormatter - node_parser.add_argument("-i", "--id", type=int, help="session to query", required=True) - node_parser.add_argument("-n", "--node", type=int, help="node to query", required=True) + node_parser.add_argument( + "-i", "--id", type=int, help="session to query", required=True + ) + node_parser.add_argument( + "-n", "--node", type=int, help="node to query", required=True + ) node_parser.set_defaults(func=query_node) def setup_xml_parser(parent) -> None: parser = parent.add_parser("xml", help="open session xml") parser.formatter_class = ArgumentDefaultsHelpFormatter - parser.add_argument("-f", "--file", type=file_type, help="xml file to open", required=True) + parser.add_argument( + "-f", "--file", type=file_type, help="xml file to open", required=True + ) parser.add_argument("-s", "--start", action="store_true", help="start the session?") parser.set_defaults(func=open_xml) diff --git a/daemon/scripts/core-daemon b/daemon/core/scripts/daemon.py similarity index 52% rename from daemon/scripts/core-daemon rename to daemon/core/scripts/daemon.py index 0ff4ca77..6b9caa54 100755 --- a/daemon/scripts/core-daemon +++ b/daemon/core/scripts/daemon.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 """ core-daemon: the CORE daemon is a server process that receives CORE API messages and instantiates emulated nodes and networks within the kernel. Various @@ -8,19 +7,15 @@ message handlers are defined and some support for sending messages. import argparse import logging import os -import sys -import threading import time from configparser import ConfigParser from pathlib import Path from core import constants from core.api.grpc.server import CoreGrpcServer -from core.api.tlv.corehandlers import CoreHandler, CoreUdpHandler -from core.api.tlv.coreserver import CoreServer, CoreUdpServer -from core.api.tlv.enumerations import CORE_API_PORT from core.constants import CORE_CONF_DIR, COREDPY_VERSION -from core.utils import close_onexec, load_logging_config +from core.emulator.coreemu import CoreEmu +from core.utils import load_logging_config logger = logging.getLogger(__name__) @@ -34,20 +29,6 @@ def banner(): logger.info("CORE daemon v.%s started %s", constants.COREDPY_VERSION, time.ctime()) -def start_udp(mainserver, server_address): - """ - Start a thread running a UDP server on the same host,port for - connectionless requests. - - :param CoreServer mainserver: main core tcp server to piggy back off of - :param server_address: - :return: CoreUdpServer - """ - mainserver.udpserver = CoreUdpServer(server_address, CoreUdpHandler, mainserver) - mainserver.udpthread = threading.Thread(target=mainserver.udpserver.start, daemon=True) - mainserver.udpthread.start() - - def cored(cfg): """ Start the CoreServer object and enter the server loop. @@ -55,34 +36,13 @@ def cored(cfg): :param dict cfg: core configuration :return: nothing """ - host = cfg["listenaddr"] - port = int(cfg["port"]) - if host == "" or host is None: - host = "localhost" - - try: - address = (host, port) - server = CoreServer(address, CoreHandler, cfg) - except: - logger.exception("error starting main server on: %s:%s", host, port) - sys.exit(1) - # initialize grpc api - grpc_server = CoreGrpcServer(server.coreemu) + coreemu = CoreEmu(cfg) + grpc_server = CoreGrpcServer(coreemu) address_config = cfg["grpcaddress"] port_config = cfg["grpcport"] grpc_address = f"{address_config}:{port_config}" - grpc_thread = threading.Thread(target=grpc_server.listen, args=(grpc_address,), daemon=True) - grpc_thread.start() - - # start udp server - start_udp(server, address) - - # close handlers - close_onexec(server.fileno()) - - logger.info("CORE TLV API TCP/UDP listening on: %s:%s", host, port) - server.serve_forever() + grpc_server.listen(grpc_address) def get_merged_config(filename): @@ -98,49 +58,55 @@ def get_merged_config(filename): default_grpc_port = "50051" default_address = "localhost" defaults = { - "port": str(CORE_API_PORT), - "listenaddr": default_address, "grpcport": default_grpc_port, "grpcaddress": default_address, - "logfile": default_log + "logfile": default_log, } - parser = argparse.ArgumentParser( - description=f"CORE daemon v.{COREDPY_VERSION} instantiates Linux network namespace nodes.") - parser.add_argument("-f", "--configfile", dest="configfile", - help=f"read config from specified file; default = {filename}") - parser.add_argument("-p", "--port", dest="port", type=int, - help=f"port number to listen on; default = {CORE_API_PORT}") - parser.add_argument("--ovs", action="store_true", help="enable experimental ovs mode, default is false") - parser.add_argument("--grpc-port", dest="grpcport", - help=f"grpc port to listen on; default {default_grpc_port}") - parser.add_argument("--grpc-address", dest="grpcaddress", - help=f"grpc address to listen on; default {default_address}") - parser.add_argument("-l", "--logfile", help=f"core logging configuration; default {default_log}") - + description=f"CORE daemon v.{COREDPY_VERSION} instantiates Linux network namespace nodes." + ) + parser.add_argument( + "-f", + "--configfile", + dest="configfile", + help=f"read config from specified file; default = {filename}", + ) + parser.add_argument( + "--ovs", + action="store_true", + help="enable experimental ovs mode, default is false", + ) + parser.add_argument( + "--grpc-port", + dest="grpcport", + help=f"grpc port to listen on; default {default_grpc_port}", + ) + parser.add_argument( + "--grpc-address", + dest="grpcaddress", + help=f"grpc address to listen on; default {default_address}", + ) + parser.add_argument( + "-l", "--logfile", help=f"core logging configuration; default {default_log}" + ) # parse command line options args = parser.parse_args() - # convert ovs to internal format args.ovs = "1" if args.ovs else "0" - # read the config file if args.configfile is not None: filename = args.configfile del args.configfile cfg = ConfigParser(defaults) cfg.read(filename) - section = "core-daemon" if not cfg.has_section(section): cfg.add_section(section) - # merge argparse with configparser for opt in vars(args): val = getattr(args, opt) if val is not None: cfg.set(section, opt, str(val)) - return dict(cfg.items(section)) diff --git a/daemon/scripts/core-gui b/daemon/core/scripts/gui.py similarity index 60% rename from daemon/scripts/core-gui rename to daemon/core/scripts/gui.py index ff7795a3..9c0560b2 100755 --- a/daemon/scripts/core-gui +++ b/daemon/core/scripts/gui.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 import argparse import logging from logging.handlers import TimedRotatingFileHandler @@ -9,12 +8,19 @@ from core.gui.app import Application def main() -> None: # parse flags - parser = argparse.ArgumentParser(description=f"CORE Python GUI") - parser.add_argument("-l", "--level", choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], default="INFO", - help="logging level") + parser = argparse.ArgumentParser(description="CORE Python GUI") + parser.add_argument( + "-l", + "--level", + choices=["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"], + default="INFO", + help="logging level", + ) parser.add_argument("-p", "--proxy", action="store_true", help="enable proxy") parser.add_argument("-s", "--session", type=int, help="session id to join") - parser.add_argument("--create-dir", action="store_true", help="create gui directory and exit") + parser.add_argument( + "--create-dir", action="store_true", help="create gui directory and exit" + ) args = parser.parse_args() # check home directory exists and create if necessary @@ -25,9 +31,13 @@ def main() -> None: # setup logging log_format = "%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - %(message)s" stream_handler = logging.StreamHandler() - file_handler = TimedRotatingFileHandler(filename=appconfig.LOG_PATH, when="D", backupCount=5) + file_handler = TimedRotatingFileHandler( + filename=appconfig.LOG_PATH, when="D", backupCount=5 + ) log_level = logging.getLevelName(args.level) - logging.basicConfig(level=log_level, format=log_format, handlers=[stream_handler, file_handler]) + logging.basicConfig( + level=log_level, format=log_format, handlers=[stream_handler, file_handler] + ) logging.getLogger("PIL").setLevel(logging.ERROR) # start app diff --git a/daemon/core/scripts/player.py b/daemon/core/scripts/player.py new file mode 100755 index 00000000..07728939 --- /dev/null +++ b/daemon/core/scripts/player.py @@ -0,0 +1,51 @@ +import argparse +import logging +import sys +from pathlib import Path + +from core.player import CorePlayer + +logger = logging.getLogger(__name__) + + +def path_type(value: str) -> Path: + file_path = Path(value) + if not file_path.is_file(): + raise argparse.ArgumentTypeError(f"file does not exist: {value}") + return file_path + + +def parse_args() -> argparse.Namespace: + """ + Setup and parse command line arguments. + + :return: parsed arguments + """ + parser = argparse.ArgumentParser( + description="core player runs files that can move nodes and send commands", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + parser.add_argument( + "-f", "--file", required=True, type=path_type, help="core file to play" + ) + parser.add_argument( + "-s", + "--session", + type=int, + help="session to play to, first found session otherwise", + ) + return parser.parse_args() + + +def main() -> None: + logging.basicConfig(level=logging.INFO) + args = parse_args() + player = CorePlayer(args.file) + result = player.init(args.session) + if not result: + sys.exit(1) + player.start() + + +if __name__ == "__main__": + main() diff --git a/daemon/scripts/core-route-monitor b/daemon/core/scripts/routemonitor.py similarity index 97% rename from daemon/scripts/core-route-monitor rename to daemon/core/scripts/routemonitor.py index bc61f6fa..2ebfdfad 100755 --- a/daemon/scripts/core-route-monitor +++ b/daemon/core/scripts/routemonitor.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 import argparse import enum import select @@ -60,15 +59,15 @@ class SdtClient: class RouterMonitor: def __init__( - self, - session: int, - src: str, - dst: str, - pkt: str, - rate: int, - dead: int, - sdt_host: str, - sdt_port: int, + self, + session: int, + src: str, + dst: str, + pkt: str, + rate: int, + dead: int, + sdt_host: str, + sdt_port: int, ) -> None: self.queue = Queue() self.core = CoreGrpcClient() diff --git a/daemon/scripts/core-service-update b/daemon/core/scripts/serviceupdate.py similarity index 50% rename from daemon/scripts/core-service-update rename to daemon/core/scripts/serviceupdate.py index d0ca863f..50ada96d 100755 --- a/daemon/scripts/core-service-update +++ b/daemon/core/scripts/serviceupdate.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python3 import argparse import re from io import TextIOWrapper @@ -6,9 +5,15 @@ from io import TextIOWrapper def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser( - description=f"Helps transition older CORE services to work with newer versions") - parser.add_argument("-f", "--file", dest="file", type=argparse.FileType("r"), - help=f"service file to update") + description="Helps transition older CORE services to work with newer versions" + ) + parser.add_argument( + "-f", + "--file", + dest="file", + type=argparse.FileType("r"), + help="service file to update", + ) return parser.parse_args() @@ -20,17 +25,32 @@ def update_service(service_file: TextIOWrapper) -> None: # rename dirs to directories line = re.sub(r"^(\s+)dirs", r"\1directories", line) # fix import states for service - line = re.sub(r"^.+import.+CoreService.+$", - r"from core.services.coreservices import CoreService", line) + line = re.sub( + r"^.+import.+CoreService.+$", + r"from core.services.coreservices import CoreService", + line, + ) # fix method signatures - line = re.sub(r"def generateconfig\(cls, node, filename, services\)", - r"def generate_config(cls, node, filename)", line) - line = re.sub(r"def getvalidate\(cls, node, services\)", - r"def get_validate(cls, node)", line) - line = re.sub(r"def getstartup\(cls, node, services\)", - r"def get_startup(cls, node)", line) - line = re.sub(r"def getconfigfilenames\(cls, nodenum, services\)", - r"def get_configs(cls, node)", line) + line = re.sub( + r"def generateconfig\(cls, node, filename, services\)", + r"def generate_config(cls, node, filename)", + line, + ) + line = re.sub( + r"def getvalidate\(cls, node, services\)", + r"def get_validate(cls, node)", + line, + ) + line = re.sub( + r"def getstartup\(cls, node, services\)", + r"def get_startup(cls, node)", + line, + ) + line = re.sub( + r"def getconfigfilenames\(cls, nodenum, services\)", + r"def get_configs(cls, node)", + line, + ) # remove unwanted lines if re.search(r"addservice\(", line): continue diff --git a/daemon/core/services/coreservices.py b/daemon/core/services/coreservices.py index 8d611d4b..6e52b5d6 100644 --- a/daemon/core/services/coreservices.py +++ b/daemon/core/services/coreservices.py @@ -109,114 +109,6 @@ class ServiceDependencies: return self.boot_paths -class ServiceShim: - keys: List[str] = [ - "dirs", - "files", - "startidx", - "cmdup", - "cmddown", - "cmdval", - "meta", - "starttime", - ] - - @classmethod - def tovaluelist(cls, node: CoreNode, service: "CoreService") -> str: - """ - Convert service properties into a string list of key=value pairs, - separated by "|". - - :param node: node to get value list for - :param service: service to get value list for - :return: value list string - """ - start_time = 0 - start_index = 0 - valmap = [ - service.dirs, - service.configs, - start_index, - service.startup, - service.shutdown, - service.validate, - service.meta, - start_time, - ] - if not service.custom: - valmap[1] = service.get_configs(node) - valmap[3] = service.get_startup(node) - vals = ["%s=%s" % (x, y) for x, y in zip(cls.keys, valmap)] - return "|".join(vals) - - @classmethod - def fromvaluelist(cls, service: "CoreService", values: List[str]) -> None: - """ - Convert list of values into properties for this instantiated - (customized) service. - - :param service: service to get value list for - :param values: value list to set properties from - :return: nothing - """ - # TODO: support empty value? e.g. override default meta with '' - for key in cls.keys: - try: - cls.setvalue(service, key, values[cls.keys.index(key)]) - except IndexError: - # old config does not need to have new keys - logger.exception("error indexing into key") - - @classmethod - def setvalue(cls, service: "CoreService", key: str, value: str) -> None: - """ - Set values for this service. - - :param service: service to get value list for - :param key: key to set value for - :param value: value of key to set - :return: nothing - """ - if key not in cls.keys: - raise ValueError("key `%s` not in `%s`" % (key, cls.keys)) - # this handles data conversion to int, string, and tuples - if value: - if key == "startidx": - value = int(value) - elif key == "starttime": - value = float(value) - elif key == "meta": - value = str(value) - else: - value = utils.make_tuple_fromstr(value, str) - - if key == "dirs": - service.dirs = value - elif key == "files": - service.configs = value - elif key == "cmdup": - service.startup = value - elif key == "cmddown": - service.shutdown = value - elif key == "cmdval": - service.validate = value - elif key == "meta": - service.meta = value - - @classmethod - def servicesfromopaque(cls, opaque: str) -> List[str]: - """ - Build a list of services from an opaque data string. - - :param opaque: opaque data string - :return: services - """ - servicesstring = opaque.split(":") - if servicesstring[0] != "service": - return [] - return servicesstring[1].split(",") - - class ServiceManager: """ Manages services available for CORE nodes to use. @@ -342,26 +234,6 @@ class CoreServices: """ self.custom_services.clear() - def get_default_services(self, node_type: str) -> List[Type["CoreService"]]: - """ - Get the list of default services that should be enabled for a - node for the given node type. - - :param node_type: node type to get default services for - :return: default services - """ - logger.debug("getting default services for type: %s", node_type) - results = [] - defaults = self.default_services.get(node_type, []) - for name in defaults: - logger.debug("checking for service with service manager: %s", name) - service = ServiceManager.get(name) - if not service: - logger.warning("default service %s is unknown", name) - else: - results.append(service) - return results - def get_service( self, node_id: int, service_name: str, default_service: bool = False ) -> "CoreService": @@ -401,21 +273,21 @@ class CoreServices: node_services[service.name] = service def add_services( - self, node: CoreNode, node_type: str, services: List[str] = None + self, node: CoreNode, model: str, services: List[str] = None ) -> None: """ Add services to a node. :param node: node to add services to - :param node_type: node type to add services to + :param model: node model type to add services for :param services: names of services to add to node :return: nothing """ if not services: logger.info( - "using default services for node(%s) type(%s)", node.name, node_type + "using default services for node(%s) type(%s)", node.name, model ) - services = self.default_services.get(node_type, []) + services = self.default_services.get(model, []) logger.info("setting services for node(%s): %s", node.name, services) for service_name in services: service = self.get_service(node.id, service_name, default_service=True) diff --git a/daemon/core/services/frr.py b/daemon/core/services/frr.py index 9029daec..5fbacf42 100644 --- a/daemon/core/services/frr.py +++ b/daemon/core/services/frr.py @@ -7,15 +7,26 @@ from typing import Optional, Tuple import netaddr from core.emane.nodes import EmaneNet -from core.nodes.base import CoreNode +from core.nodes.base import CoreNode, NodeBase from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.network import PtpNet, WlanNode from core.nodes.physical import Rj45Node +from core.nodes.wireless import WirelessNode from core.services.coreservices import CoreService FRR_STATE_DIR: str = "/var/run/frr" +def is_wireless(node: NodeBase) -> bool: + """ + Check if the node is a wireless type node. + + :param node: node to check type for + :return: True if wireless type, False otherwise + """ + return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) + + class FRRZebra(CoreService): name: str = "FRRzebra" group: str = "FRR" @@ -127,11 +138,11 @@ class FRRZebra(CoreService): """ Generate a shell script used to boot the FRR daemons. """ - frr_bin_search = node.session.options.get_config( - "frr_bin_search", default='"/usr/local/bin /usr/bin /usr/lib/frr"' + frr_bin_search = node.session.options.get( + "frr_bin_search", '"/usr/local/bin /usr/bin /usr/lib/frr"' ) - frr_sbin_search = node.session.options.get_config( - "frr_sbin_search", default='"/usr/local/sbin /usr/sbin /usr/lib/frr"' + frr_sbin_search = node.session.options.get( + "frr_sbin_search", '"/usr/local/sbin /usr/sbin /usr/lib/frr"' ) cfg = """\ #!/bin/sh @@ -184,6 +195,10 @@ bootdaemon() flags="$flags -6" fi + if [ "$1" = "ospfd" ]; then + flags="$flags --apiserver" + fi + #force FRR to use CORE generated conf file flags="$flags -d -f $FRR_CONF" $FRR_SBIN_DIR/$1 $flags @@ -414,12 +429,25 @@ class FRROspfv2(FrrService): for iface in node.get_ifaces(control=False): for ip4 in iface.ip4s: cfg += f" network {ip4} area 0\n" + cfg += " ospf opaque-lsa\n" cfg += "!\n" return cfg @classmethod def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - return cls.mtu_check(iface) + cfg = cls.mtu_check(iface) + # external RJ45 connections will use default OSPF timers + if cls.rj45check(iface): + return cfg + cfg += cls.ptp_check(iface) + return ( + cfg + + """\ + ip ospf hello-interval 2 + ip ospf dead-interval 6 + ip ospf retransmit-interval 5 +""" + ) class FRROspfv3(FrrService): @@ -485,18 +513,6 @@ class FRROspfv3(FrrService): @classmethod def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: return cls.mtu_check(iface) - # cfg = cls.mtucheck(ifc) - # external RJ45 connections will use default OSPF timers - # if cls.rj45check(ifc): - # return cfg - # cfg += cls.ptpcheck(ifc) - # return cfg + """\ - - -# ipv6 ospf6 hello-interval 2 -# ipv6 ospf6 dead-interval 6 -# ipv6 ospf6 retransmit-interval 5 -# """ class FRRBgp(FrrService): @@ -593,7 +609,7 @@ class FRRBabel(FrrService): @classmethod def generate_frr_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - if iface.net and isinstance(iface.net, (EmaneNet, WlanNode)): + if is_wireless(iface.net): return " babel wireless\n no babel split-horizon\n" else: return " babel wired\n babel split-horizon\n" diff --git a/daemon/core/services/nrl.py b/daemon/core/services/nrl.py index 91e053b2..9ef4e1d8 100644 --- a/daemon/core/services/nrl.py +++ b/daemon/core/services/nrl.py @@ -118,12 +118,6 @@ class NrlSmf(NrlService): ifaces = node.get_ifaces(control=False) if len(ifaces) == 0: return "" - - if "arouted" in servicenames: - comments += "# arouted service is enabled\n" - cmd += " tap %s_tap" % (node.name,) - cmd += " unicast %s" % cls.firstipv4prefix(node, 24) - cmd += " push lo,%s resequence on" % ifaces[0].name if len(ifaces) > 0: if "NHDP" in servicenames: comments += "# NHDP service is enabled\n" @@ -586,46 +580,3 @@ class MgenActor(NrlService): return "" cfg += comments + cmd + " < /dev/null > /dev/null 2>&1 &\n\n" return cfg - - -class Arouted(NrlService): - """ - Adaptive Routing - """ - - name: str = "arouted" - executables: Tuple[str, ...] = ("arouted",) - configs: Tuple[str, ...] = ("startarouted.sh",) - startup: Tuple[str, ...] = ("bash startarouted.sh",) - shutdown: Tuple[str, ...] = ("pkill arouted",) - validate: Tuple[str, ...] = ("pidof arouted",) - - @classmethod - def generate_config(cls, node: CoreNode, filename: str) -> str: - """ - Return the Quagga.conf or quaggaboot.sh file contents. - """ - cfg = ( - """ -#!/bin/sh -for f in "/tmp/%s_smf"; do - count=1 - until [ -e "$f" ]; do - if [ $count -eq 10 ]; then - echo "ERROR: nrlmsf pipe not found: $f" >&2 - exit 1 - fi - sleep 0.1 - count=$(($count + 1)) - done -done - -""" - % node.name - ) - cfg += "ip route add %s dev lo\n" % cls.firstipv4prefix(node, 24) - cfg += "arouted instance %s_smf tap %s_tap" % (node.name, node.name) - # seconds to consider a new route valid - cfg += " stability 10" - cfg += " 2>&1 > /var/log/arouted.log &\n\n" - return cfg diff --git a/daemon/core/services/quagga.py b/daemon/core/services/quagga.py index fa71feee..a2f06bec 100644 --- a/daemon/core/services/quagga.py +++ b/daemon/core/services/quagga.py @@ -6,16 +6,26 @@ from typing import Optional, Tuple import netaddr from core.emane.nodes import EmaneNet -from core.emulator.enumerations import LinkTypes -from core.nodes.base import CoreNode +from core.nodes.base import CoreNode, NodeBase from core.nodes.interface import DEFAULT_MTU, CoreInterface from core.nodes.network import PtpNet, WlanNode from core.nodes.physical import Rj45Node +from core.nodes.wireless import WirelessNode from core.services.coreservices import CoreService QUAGGA_STATE_DIR: str = "/var/run/quagga" +def is_wireless(node: NodeBase) -> bool: + """ + Check if the node is a wireless type node. + + :param node: node to check type for + :return: True if wireless type, False otherwise + """ + return isinstance(node, (WlanNode, EmaneNet, WirelessNode)) + + class Zebra(CoreService): name: str = "zebra" group: str = "Quagga" @@ -124,11 +134,11 @@ class Zebra(CoreService): """ Generate a shell script used to boot the Quagga daemons. """ - quagga_bin_search = node.session.options.get_config( - "quagga_bin_search", default='"/usr/local/bin /usr/bin /usr/lib/quagga"' + quagga_bin_search = node.session.options.get( + "quagga_bin_search", '"/usr/local/bin /usr/bin /usr/lib/quagga"' ) - quagga_sbin_search = node.session.options.get_config( - "quagga_sbin_search", default='"/usr/local/sbin /usr/sbin /usr/lib/quagga"' + quagga_sbin_search = node.session.options.get( + "quagga_sbin_search", '"/usr/local/sbin /usr/sbin /usr/lib/quagga"' ) return """\ #!/bin/sh @@ -431,7 +441,7 @@ class Ospfv3mdr(Ospfv3): @classmethod def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: cfg = cls.mtu_check(iface) - if iface.net is not None and isinstance(iface.net, (WlanNode, EmaneNet)): + if is_wireless(iface.net): return ( cfg + """\ @@ -542,7 +552,7 @@ class Babel(QuaggaService): @classmethod def generate_quagga_iface_config(cls, node: CoreNode, iface: CoreInterface) -> str: - if iface.net and iface.net.linktype == LinkTypes.WIRELESS: + if is_wireless(iface.net): return " babel wireless\n no babel split-horizon\n" else: return " babel wired\n babel split-horizon\n" diff --git a/daemon/core/services/ucarp.py b/daemon/core/services/ucarp.py index 522eeaf6..aa0d9a1a 100644 --- a/daemon/core/services/ucarp.py +++ b/daemon/core/services/ucarp.py @@ -44,9 +44,7 @@ class Ucarp(CoreService): """ Returns configuration file text. """ - ucarp_bin = node.session.options.get_config( - "ucarp_bin", default="/usr/sbin/ucarp" - ) + ucarp_bin = node.session.options.get("ucarp_bin", "/usr/sbin/ucarp") return """\ #!/bin/sh # Location of UCARP executable diff --git a/daemon/core/utils.py b/daemon/core/utils.py index c9604f08..244590f8 100644 --- a/daemon/core/utils.py +++ b/daemon/core/utils.py @@ -16,7 +16,9 @@ import shlex import shutil import sys import threading +from collections import OrderedDict from pathlib import Path +from queue import Queue from subprocess import PIPE, STDOUT, Popen from typing import ( TYPE_CHECKING, @@ -214,8 +216,7 @@ def cmd( shell: bool = False, ) -> str: """ - Execute a command on the host and return a tuple containing the exit status and - result string. stderr output is folded into the stdout result string. + Execute a command on the host and returns the combined stderr stdout output. :param args: command arguments :param env: environment to run command with @@ -248,6 +249,25 @@ def cmd( raise CoreCommandError(1, input_args, "", e.strerror) +def run_cmds(args: List[str], wait: bool = True, shell: bool = False) -> List[str]: + """ + Execute a series of commands on the host and returns a list of the combined stderr + stdout output. + + :param args: command arguments + :param wait: True to wait for status, False otherwise + :param shell: True to use shell, False otherwise + :return: combined stdout and stderr + :raises CoreCommandError: when there is a non-zero exit status or the file to + execute is not found + """ + outputs = [] + for arg in args: + output = cmd(arg, wait=wait, shell=shell) + outputs.append(output) + return outputs + + def file_munge(pathname: str, header: str, text: str) -> None: """ Insert text at the end of a file, surrounded by header comments. @@ -405,6 +425,101 @@ def load_logging_config(config_path: Path) -> None: logging.config.dictConfig(log_config) +def run_cmds_threaded( + nodes: List["CoreNode"], + cmds: List[str], + wait: bool = True, + shell: bool = False, + workers: int = None, +) -> Tuple[Dict[int, List[str]], List[Exception]]: + """ + Run a set of commands in order across a provided set of nodes. Each node will + run the commands within the context of a threadpool. + + :param nodes: nodes to run commands in + :param cmds: commands to run in nodes + :param wait: True to wait for status, False otherwise + :param shell: True to run shell like, False otherwise + :param workers: number of workers for threadpool, uses library default otherwise + :return: tuple including dict of node id to list of command output and a list of + exceptions if any + """ + + def _node_cmds( + _target: "CoreNode", _cmds: List[str], _wait: bool, _shell: bool + ) -> List[str]: + outputs = [] + for _cmd in _cmds: + output = _target.cmd(_cmd, wait=_wait, shell=_shell) + outputs.append(output) + return outputs + + with concurrent.futures.ThreadPoolExecutor(max_workers=workers) as executor: + futures = [] + node_mappings = {} + for node in nodes: + future = executor.submit(_node_cmds, node, cmds, wait, shell) + node_mappings[future] = node + futures.append(future) + outputs = {} + exceptions = [] + for future in concurrent.futures.as_completed(futures): + try: + result = future.result() + node = node_mappings[future] + outputs[node.id] = result + except Exception as e: + logger.exception("thread pool exception") + exceptions.append(e) + return outputs, exceptions + + +def run_cmds_mp( + nodes: List["CoreNode"], + cmds: List[str], + wait: bool = True, + shell: bool = False, + workers: int = None, +) -> Tuple[Dict[int, List[str]], List[Exception]]: + """ + Run a set of commands in order across a provided set of nodes. Each node will + run the commands within the context of a process pool. This will not work + for distributed nodes and throws an exception when encountered. + + :param nodes: nodes to run commands in + :param cmds: commands to run in nodes + :param wait: True to wait for status, False otherwise + :param shell: True to run shell like, False otherwise + :param workers: number of workers for threadpool, uses library default otherwise + :return: tuple including dict of node id to list of command output and a list of + exceptions if any + :raises CoreError: when a distributed node is provided as input + """ + with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor: + futures = [] + node_mapping = {} + for node in nodes: + node_cmds = [node.create_cmd(x) for x in cmds] + if node.server: + raise CoreError( + f"{node.name} uses a distributed server and not supported" + ) + future = executor.submit(run_cmds, node_cmds, wait=wait, shell=shell) + node_mapping[future] = node + futures.append(future) + exceptions = [] + outputs = {} + for future in concurrent.futures.as_completed(futures): + try: + result = future.result() + node = node_mapping[future] + outputs[node.id] = result + except Exception as e: + logger.exception("thread pool exception") + exceptions.append(e) + return outputs, exceptions + + def threadpool( funcs: List[Tuple[Callable, Iterable[Any], Dict[Any, Any]]], workers: int = 10 ) -> Tuple[List[Any], List[Exception]]: @@ -474,3 +589,19 @@ def parse_iface_config_id(config_id: int) -> Tuple[int, Optional[int]]: iface_id = config_id % IFACE_CONFIG_FACTOR node_id = config_id // IFACE_CONFIG_FACTOR return node_id, iface_id + + +class SetQueue(Queue): + """ + Set backed queue to avoid duplicate submissions. + """ + + def _init(self, maxsize): + self.queue: OrderedDict = OrderedDict() + + def _put(self, item): + self.queue[item] = None + + def _get(self): + key, _ = self.queue.popitem(last=False) + return key diff --git a/daemon/core/xml/corexml.py b/daemon/core/xml/corexml.py index 647300fc..a483a8ee 100644 --- a/daemon/core/xml/corexml.py +++ b/daemon/core/xml/corexml.py @@ -1,20 +1,23 @@ import logging from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, Type, TypeVar +from typing import TYPE_CHECKING, Any, Dict, Generic, Optional, Type, TypeVar from lxml import etree import core.nodes.base import core.nodes.physical from core import utils -from core.emane.nodes import EmaneNet -from core.emulator.data import InterfaceData, LinkData, LinkOptions, NodeOptions +from core.config import Configuration +from core.emane.nodes import EmaneNet, EmaneOptions +from core.emulator.data import InterfaceData, LinkOptions from core.emulator.enumerations import EventTypes, NodeTypes from core.errors import CoreXmlError -from core.nodes.base import CoreNodeBase, NodeBase -from core.nodes.docker import DockerNode +from core.nodes.base import CoreNodeBase, CoreNodeOptions, NodeBase, Position +from core.nodes.docker import DockerNode, DockerOptions +from core.nodes.interface import CoreInterface from core.nodes.lxd import LxcNode -from core.nodes.network import CtrlNet, GreTapBridge, WlanNode +from core.nodes.network import CtrlNet, GreTapBridge, PtpNet, WlanNode +from core.nodes.wireless import WirelessNode from core.services.coreservices import CoreService logger = logging.getLogger(__name__) @@ -209,7 +212,7 @@ class ServiceElement: class DeviceElement(NodeElement): def __init__(self, session: "Session", node: NodeBase) -> None: super().__init__(session, node, "device") - add_attribute(self.element, "type", node.type) + add_attribute(self.element, "type", node.model) self.add_class() self.add_services() @@ -242,21 +245,31 @@ class DeviceElement(NodeElement): class NetworkElement(NodeElement): def __init__(self, session: "Session", node: NodeBase) -> None: super().__init__(session, node, "network") - if isinstance(self.node, (WlanNode, EmaneNet)): - if self.node.model: - add_attribute(self.element, "model", self.node.model.name) + if isinstance(self.node, WlanNode): + if self.node.wireless_model: + add_attribute(self.element, "model", self.node.wireless_model.name) + if self.node.mobility: + add_attribute(self.element, "mobility", self.node.mobility.name) + if isinstance(self.node, EmaneNet): + if self.node.wireless_model: + add_attribute(self.element, "model", self.node.wireless_model.name) if self.node.mobility: add_attribute(self.element, "mobility", self.node.mobility.name) if isinstance(self.node, GreTapBridge): add_attribute(self.element, "grekey", self.node.grekey) + if isinstance(self.node, WirelessNode): + config = self.node.get_config() + self.add_wireless_config(config) self.add_type() def add_type(self) -> None: - if self.node.apitype: - node_type = self.node.apitype.name - else: - node_type = self.node.__class__.__name__ - add_attribute(self.element, "type", node_type) + node_type = self.session.get_node_type(type(self.node)) + add_attribute(self.element, "type", node_type.name) + + def add_wireless_config(self, config: Dict[str, Configuration]) -> None: + wireless_element = etree.SubElement(self.element, "wireless") + for config_item in config.values(): + add_configuration(wireless_element, config_item.id, config_item.default) class CoreXmlWriter: @@ -269,8 +282,8 @@ class CoreXmlWriter: def write_session(self) -> None: # generate xml content - links = self.write_nodes() - self.write_links(links) + self.write_nodes() + self.write_links() self.write_mobility_configs() self.write_emane_configs() self.write_service_configs() @@ -334,16 +347,9 @@ class CoreXmlWriter: def write_session_options(self) -> None: option_elements = etree.Element("session_options") - options_config = self.session.options.get_configs() - if not options_config: - return - - default_options = self.session.options.default_values() - for _id in default_options: - default_value = default_options[_id] - value = options_config.get(_id, default_value) - add_configuration(option_elements, _id, value) - + for option in self.session.options.options: + value = self.session.options.get(option.id) + add_configuration(option_elements, option.id, value) if option_elements.getchildren(): self.scenario.append(option_elements) @@ -439,52 +445,48 @@ class CoreXmlWriter: self.scenario.append(service_configurations) def write_default_services(self) -> None: - node_types = etree.Element("default_services") - for node_type in self.session.services.default_services: - services = self.session.services.default_services[node_type] - node_type = etree.SubElement(node_types, "node", type=node_type) + models = etree.Element("default_services") + for model in self.session.services.default_services: + services = self.session.services.default_services[model] + model = etree.SubElement(models, "node", type=model) for service in services: - etree.SubElement(node_type, "service", name=service) + etree.SubElement(model, "service", name=service) + if models.getchildren(): + self.scenario.append(models) - if node_types.getchildren(): - self.scenario.append(node_types) - - def write_nodes(self) -> List[LinkData]: - links = [] - for node_id in self.session.nodes: - node = self.session.nodes[node_id] + def write_nodes(self) -> None: + for node in self.session.nodes.values(): # network node is_network_or_rj45 = isinstance( node, (core.nodes.base.CoreNetworkBase, core.nodes.physical.Rj45Node) ) is_controlnet = isinstance(node, CtrlNet) - if is_network_or_rj45 and not is_controlnet: + is_ptp = isinstance(node, PtpNet) + if is_network_or_rj45 and not (is_controlnet or is_ptp): self.write_network(node) # device node elif isinstance(node, core.nodes.base.CoreNodeBase): self.write_device(node) - # add known links - links.extend(node.links()) - return links - def write_network(self, node: NodeBase) -> None: - # ignore p2p and other nodes that are not part of the api - if not node.apitype: - return - network = NetworkElement(self.session, node) self.networks.append(network.element) - def write_links(self, links: List[LinkData]) -> None: + def write_links(self) -> None: link_elements = etree.Element("links") - # add link data - for link_data in links: - # skip basic range links - if link_data.iface1 is None and link_data.iface2 is None: - continue - link_element = self.create_link_element(link_data) + for core_link in self.session.link_manager.links(): + node1, iface1 = core_link.node1, core_link.iface1 + node2, iface2 = core_link.node2, core_link.iface2 + unidirectional = core_link.is_unidirectional() + link_element = self.create_link_element( + node1, iface1, node2, iface2, core_link.options(), unidirectional + ) link_elements.append(link_element) + if unidirectional: + link_element = self.create_link_element( + node2, iface2, node1, iface1, iface2.options, unidirectional + ) + link_elements.append(link_element) if link_elements.getchildren(): self.scenario.append(link_elements) @@ -493,67 +495,71 @@ class CoreXmlWriter: self.devices.append(device.element) def create_iface_element( - self, element_name: str, node_id: int, iface_data: InterfaceData + self, element_name: str, iface: CoreInterface ) -> etree.Element: iface_element = etree.Element(element_name) - node = self.session.get_node(node_id, NodeBase) - if isinstance(node, CoreNodeBase): - iface = node.get_iface(iface_data.id) - # check if emane interface - if isinstance(iface.net, EmaneNet): - nem_id = self.session.emane.get_nem_id(iface) - add_attribute(iface_element, "nem", nem_id) - add_attribute(iface_element, "id", iface_data.id) - add_attribute(iface_element, "name", iface_data.name) - add_attribute(iface_element, "mac", iface_data.mac) - add_attribute(iface_element, "ip4", iface_data.ip4) - add_attribute(iface_element, "ip4_mask", iface_data.ip4_mask) - add_attribute(iface_element, "ip6", iface_data.ip6) - add_attribute(iface_element, "ip6_mask", iface_data.ip6_mask) + # check if interface if connected to emane + if isinstance(iface.node, CoreNodeBase) and isinstance(iface.net, EmaneNet): + nem_id = self.session.emane.get_nem_id(iface) + add_attribute(iface_element, "nem", nem_id) + ip4 = iface.get_ip4() + ip4_mask = None + if ip4: + ip4_mask = ip4.prefixlen + ip4 = str(ip4.ip) + ip6 = iface.get_ip6() + ip6_mask = None + if ip6: + ip6_mask = ip6.prefixlen + ip6 = str(ip6.ip) + add_attribute(iface_element, "id", iface.id) + add_attribute(iface_element, "name", iface.name) + add_attribute(iface_element, "mac", iface.mac) + add_attribute(iface_element, "ip4", ip4) + add_attribute(iface_element, "ip4_mask", ip4_mask) + add_attribute(iface_element, "ip6", ip6) + add_attribute(iface_element, "ip6_mask", ip6_mask) return iface_element - def create_link_element(self, link_data: LinkData) -> etree.Element: + def create_link_element( + self, + node1: NodeBase, + iface1: Optional[CoreInterface], + node2: NodeBase, + iface2: Optional[CoreInterface], + options: LinkOptions, + unidirectional: bool, + ) -> etree.Element: link_element = etree.Element("link") - add_attribute(link_element, "node1", link_data.node1_id) - add_attribute(link_element, "node2", link_data.node2_id) - + add_attribute(link_element, "node1", node1.id) + add_attribute(link_element, "node2", node2.id) # check for interface one - if link_data.iface1 is not None: - iface1 = self.create_iface_element( - "iface1", link_data.node1_id, link_data.iface1 - ) + if iface1 is not None: + iface1 = self.create_iface_element("iface1", iface1) link_element.append(iface1) - # check for interface two - if link_data.iface2 is not None: - iface2 = self.create_iface_element( - "iface2", link_data.node2_id, link_data.iface2 - ) + if iface2 is not None: + iface2 = self.create_iface_element("iface2", iface2) link_element.append(iface2) - # check for options, don't write for emane/wlan links - node1 = self.session.get_node(link_data.node1_id, NodeBase) - node2 = self.session.get_node(link_data.node2_id, NodeBase) - is_node1_wireless = isinstance(node1, (WlanNode, EmaneNet)) - is_node2_wireless = isinstance(node2, (WlanNode, EmaneNet)) - if not any([is_node1_wireless, is_node2_wireless]): - options_data = link_data.options - options = etree.Element("options") - add_attribute(options, "delay", options_data.delay) - add_attribute(options, "bandwidth", options_data.bandwidth) - add_attribute(options, "loss", options_data.loss) - add_attribute(options, "dup", options_data.dup) - add_attribute(options, "jitter", options_data.jitter) - add_attribute(options, "mer", options_data.mer) - add_attribute(options, "burst", options_data.burst) - add_attribute(options, "mburst", options_data.mburst) - add_attribute(options, "unidirectional", options_data.unidirectional) - add_attribute(options, "network_id", link_data.network_id) - add_attribute(options, "key", options_data.key) - add_attribute(options, "buffer", options_data.buffer) - if options.items(): - link_element.append(options) - + is_node1_wireless = isinstance(node1, (WlanNode, EmaneNet, WirelessNode)) + is_node2_wireless = isinstance(node2, (WlanNode, EmaneNet, WirelessNode)) + if not (is_node1_wireless or is_node2_wireless): + unidirectional = 1 if unidirectional else 0 + options_element = etree.Element("options") + add_attribute(options_element, "delay", options.delay) + add_attribute(options_element, "bandwidth", options.bandwidth) + add_attribute(options_element, "loss", options.loss) + add_attribute(options_element, "dup", options.dup) + add_attribute(options_element, "jitter", options.jitter) + add_attribute(options_element, "mer", options.mer) + add_attribute(options_element, "burst", options.burst) + add_attribute(options_element, "mburst", options.mburst) + add_attribute(options_element, "unidirectional", unidirectional) + add_attribute(options_element, "key", options.key) + add_attribute(options_element, "buffer", options.buffer) + if options_element.items(): + link_element.append(options_element) return link_element @@ -586,14 +592,12 @@ class CoreXmlReader: return for node in default_services.iterchildren(): - node_type = node.get("type") + model = node.get("type") services = [] for service in node.iterchildren(): services.append(service.get("name")) - logger.info( - "reading default services for nodes(%s): %s", node_type, services - ) - self.session.services.default_services[node_type] = services + logger.info("reading default services for nodes(%s): %s", model, services) + self.session.services.default_services[model] = services def read_session_metadata(self) -> None: session_metadata = self.scenario.find("session_metadata") @@ -618,8 +622,7 @@ class CoreXmlReader: value = configuration.get("value") xml_config[name] = value logger.info("reading session options: %s", xml_config) - config = self.session.options.get_configs() - config.update(xml_config) + self.session.options.update(xml_config) def read_session_hooks(self) -> None: session_hooks = self.scenario.find("session_hooks") @@ -799,71 +802,85 @@ class CoreXmlReader: clazz = device_element.get("class") image = device_element.get("image") server = device_element.get("server") - options = NodeOptions( - name=name, model=model, image=image, icon=icon, server=server - ) + canvas = get_int(device_element, "canvas") node_type = NodeTypes.DEFAULT if clazz == "docker": node_type = NodeTypes.DOCKER elif clazz == "lxc": node_type = NodeTypes.LXC _class = self.session.get_node_class(node_type) - - service_elements = device_element.find("services") - if service_elements is not None: - options.services = [x.get("name") for x in service_elements.iterchildren()] - - config_service_elements = device_element.find("configservices") - if config_service_elements is not None: - options.config_services = [ - x.get("name") for x in config_service_elements.iterchildren() - ] - + options = _class.create_options() + options.icon = icon + options.canvas = canvas + # check for special options + if isinstance(options, CoreNodeOptions): + options.model = model + service_elements = device_element.find("services") + if service_elements is not None: + options.services.extend( + x.get("name") for x in service_elements.iterchildren() + ) + config_service_elements = device_element.find("configservices") + if config_service_elements is not None: + options.config_services.extend( + x.get("name") for x in config_service_elements.iterchildren() + ) + if isinstance(options, DockerOptions): + options.image = image + # get position information position_element = device_element.find("position") + position = None if position_element is not None: + position = Position() x = get_float(position_element, "x") y = get_float(position_element, "y") if all([x, y]): - options.set_position(x, y) - + position.set(x, y) lat = get_float(position_element, "lat") lon = get_float(position_element, "lon") alt = get_float(position_element, "alt") if all([lat, lon, alt]): - options.set_location(lat, lon, alt) - + position.set_geo(lon, lat, alt) logger.info("reading node id(%s) model(%s) name(%s)", node_id, model, name) - self.session.add_node(_class, node_id, options) + self.session.add_node(_class, node_id, name, server, position, options) def read_network(self, network_element: etree.Element) -> None: node_id = get_int(network_element, "id") name = network_element.get("name") + server = network_element.get("server") node_type = NodeTypes[network_element.get("type")] _class = self.session.get_node_class(node_type) - icon = network_element.get("icon") - server = network_element.get("server") - options = NodeOptions(name=name, icon=icon, server=server) - if node_type == NodeTypes.EMANE: - model = network_element.get("model") - options.emane = model - + options = _class.create_options() + options.canvas = get_int(network_element, "canvas") + options.icon = network_element.get("icon") + if isinstance(options, EmaneOptions): + options.emane_model = network_element.get("model") position_element = network_element.find("position") + position = None if position_element is not None: + position = Position() x = get_float(position_element, "x") y = get_float(position_element, "y") if all([x, y]): - options.set_position(x, y) - + position.set(x, y) lat = get_float(position_element, "lat") lon = get_float(position_element, "lon") alt = get_float(position_element, "alt") if all([lat, lon, alt]): - options.set_location(lat, lon, alt) - + position.set_geo(lon, lat, alt) logger.info( "reading node id(%s) node_type(%s) name(%s)", node_id, node_type, name ) - self.session.add_node(_class, node_id, options) + node = self.session.add_node(_class, node_id, name, server, position, options) + if isinstance(node, WirelessNode): + wireless_element = network_element.find("wireless") + if wireless_element: + config = {} + for config_element in wireless_element.iterchildren(): + name = config_element.get("name") + value = config_element.get("value") + config[name] = value + node.set_config(config) def read_configservice_configs(self) -> None: configservice_configs = self.scenario.find("configservice_configurations") diff --git a/daemon/core/xml/emanexml.py b/daemon/core/xml/emanexml.py index c45259f7..91d8ce28 100644 --- a/daemon/core/xml/emanexml.py +++ b/daemon/core/xml/emanexml.py @@ -162,12 +162,14 @@ def build_platform_xml( """ # create top level platform element platform_element = etree.Element("platform") - for configuration in emane_net.model.platform_config: + for configuration in emane_net.wireless_model.platform_config: name = configuration.id value = config[configuration.id] add_param(platform_element, name, value) add_param( - platform_element, emane_net.model.platform_controlport, f"0.0.0.0:{nem_port}" + platform_element, + emane_net.wireless_model.platform_controlport, + f"0.0.0.0:{nem_port}", ) # build nem xml @@ -177,7 +179,7 @@ def build_platform_xml( ) # create model based xml files - emane_net.model.build_xml_files(config, iface) + emane_net.wireless_model.build_xml_files(config, iface) # check if this is an external transport if is_external(config): diff --git a/daemon/doc/Makefile.am b/daemon/doc/Makefile.am index e46f7d32..9ce90bfa 100644 --- a/daemon/doc/Makefile.am +++ b/daemon/doc/Makefile.am @@ -1,8 +1,4 @@ # CORE -# (c)2012 the Boeing Company. -# See the LICENSE file included in this distribution. -# -# author: Jeff Ahrenholz # # Builds html and pdf documentation using Sphinx. # diff --git a/daemon/poetry.lock b/daemon/poetry.lock index 9055a0b5..158c87dc 100644 --- a/daemon/poetry.lock +++ b/daemon/poetry.lock @@ -8,7 +8,7 @@ python-versions = "*" [[package]] name = "atomicwrites" -version = "1.4.0" +version = "1.4.1" description = "Atomic file writes." category = "dev" optional = false @@ -16,30 +16,26 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] name = "attrs" -version = "21.4.0" +version = "22.1.0" description = "Classes Without Boilerplate" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +python-versions = ">=3.5" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] -docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] -tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] -tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests_no_zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] [[package]] name = "bcrypt" -version = "3.2.0" +version = "4.0.1" description = "Modern password hashing for your software and your servers" category = "main" optional = false python-versions = ">=3.6" -[package.dependencies] -cffi = ">=1.1" -six = ">=1.4.1" - [package.extras] tests = ["pytest (>=3.2.1,!=3.3.0)"] typecheck = ["mypy"] @@ -61,9 +57,17 @@ toml = ">=0.9.4" [package.extras] d = ["aiohttp (>=3.3.2)", "aiohttp-cors"] +[[package]] +name = "certifi" +version = "2022.9.24" +description = "Python package for providing Mozilla's CA Bundle." +category = "main" +optional = false +python-versions = ">=3.6" + [[package]] name = "cffi" -version = "1.15.0" +version = "1.15.1" description = "Foreign Function Interface for Python calling C code." category = "main" optional = false @@ -82,7 +86,7 @@ python-versions = ">=3.6" [[package]] name = "click" -version = "8.0.3" +version = "8.0.4" description = "Composable command line interface toolkit" category = "dev" optional = false @@ -90,11 +94,10 @@ python-versions = ">=3.6" [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} [[package]] name = "colorama" -version = "0.4.4" +version = "0.4.5" description = "Cross-platform colored terminal text." category = "dev" optional = false @@ -102,7 +105,7 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [[package]] name = "cryptography" -version = "36.0.1" +version = "38.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false @@ -113,23 +116,15 @@ cffi = ">=1.12" [package.extras] docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] -docstest = ["pyenchant (>=1.6.11)", "twine (>=1.12.0)", "sphinxcontrib-spelling (>=4.0.1)"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] -sdist = ["setuptools_rust (>=0.11.4)"] +sdist = ["setuptools-rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pytest (>=6.2.0)", "pytest-cov", "pytest-subtests", "pytest-xdist", "pretend", "iso8601", "pytz", "hypothesis (>=1.11.4,!=3.79.2)"] - -[[package]] -name = "dataclasses" -version = "0.8" -description = "A backport of the dataclasses module for Python 3.6" -category = "main" -optional = false -python-versions = ">=3.6, <3.7" +test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] [[package]] name = "distlib" -version = "0.3.4" +version = "0.3.6" description = "Distribution utilities" category = "dev" optional = false @@ -137,7 +132,7 @@ python-versions = "*" [[package]] name = "fabric" -version = "2.5.0" +version = "2.7.1" description = "High level SSH command execution" category = "main" optional = false @@ -146,6 +141,7 @@ python-versions = "*" [package.dependencies] invoke = ">=1.3,<2.0" paramiko = ">=2.4" +pathlib2 = "*" [package.extras] pytest = ["mock (>=2.0.0,<3.0)", "pytest (>=3.2.5,<4.0)"] @@ -172,33 +168,36 @@ optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" [package.dependencies] -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} mccabe = ">=0.6.0,<0.7.0" pycodestyle = ">=2.6.0a1,<2.7.0" pyflakes = ">=2.2.0,<2.3.0" [[package]] name = "grpcio" -version = "1.27.2" +version = "1.49.1" description = "HTTP/2-based RPC framework" category = "main" optional = false -python-versions = "*" +python-versions = ">=3.7" [package.dependencies] six = ">=1.5.2" +[package.extras] +protobuf = ["grpcio-tools (>=1.49.1)"] + [[package]] name = "grpcio-tools" -version = "1.27.2" +version = "1.43.0" description = "Protobuf code generator for gRPC" category = "dev" optional = false -python-versions = "*" +python-versions = ">=3.6" [package.dependencies] -grpcio = ">=1.27.2" -protobuf = ">=3.5.0.post1" +grpcio = ">=1.43.0" +protobuf = ">=3.5.0.post1,<4.0dev" +setuptools = "*" [[package]] name = "identify" @@ -212,36 +211,12 @@ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" license = ["editdistance"] [[package]] -name = "importlib-metadata" -version = "4.8.3" -description = "Read metadata from Python packages" +name = "iniconfig" +version = "1.1.1" +description = "iniconfig: brain-dead simple config-ini parsing" category = "dev" optional = false -python-versions = ">=3.6" - -[package.dependencies] -typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} -zipp = ">=0.5" - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -perf = ["ipython"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] - -[[package]] -name = "importlib-resources" -version = "5.4.0" -description = "Read resources from Python packages" -category = "dev" -optional = false -python-versions = ">=3.6" - -[package.dependencies] -zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} - -[package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy"] +python-versions = "*" [[package]] name = "invoke" @@ -262,12 +237,12 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [package.extras] pipfile = ["pipreqs", "requirementslib"] pyproject = ["toml"] -requirements = ["pipreqs", "pip-api"] +requirements = ["pip-api", "pipreqs"] xdg_home = ["appdirs (>=1.4.0)"] [[package]] name = "lxml" -version = "4.6.5" +version = "4.9.1" description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API." category = "main" optional = false @@ -276,26 +251,27 @@ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" [package.extras] cssselect = ["cssselect (>=0.7)"] html5 = ["html5lib"] -htmlsoup = ["beautifulsoup4"] +htmlsoup = ["BeautifulSoup4"] source = ["Cython (>=0.29.7)"] [[package]] -name = "mako" -version = "1.1.3" -description = "A super-fast templating language that borrows the best ideas from the existing templating languages." +name = "Mako" +version = "1.2.3" +description = "A super-fast templating language that borrows the best ideas from the existing templating languages." category = "main" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=3.7" [package.dependencies] MarkupSafe = ">=0.9.2" [package.extras] -babel = ["babel"] +babel = ["Babel"] lingua = ["lingua"] +testing = ["pytest"] [[package]] -name = "markupsafe" +name = "MarkupSafe" version = "2.0.1" description = "Safely add untrusted strings to HTML/XML markup." category = "main" @@ -319,18 +295,10 @@ optional = false python-versions = ">=3.6" [package.extras] -build = ["twine", "wheel", "blurb"] +build = ["blurb", "twine", "wheel"] docs = ["sphinx"] test = ["pytest", "pytest-cov"] -[[package]] -name = "more-itertools" -version = "8.12.0" -description = "More routines for operating on iterables, beyond itertools" -category = "dev" -optional = false -python-versions = ">=3.5" - [[package]] name = "netaddr" version = "0.7.19" @@ -360,7 +328,7 @@ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" [[package]] name = "paramiko" -version = "2.9.2" +version = "2.11.0" description = "SSH2 protocol library" category = "main" optional = false @@ -370,20 +338,36 @@ python-versions = "*" bcrypt = ">=3.1.3" cryptography = ">=2.5" pynacl = ">=1.0.1" +six = "*" [package.extras] -all = ["pyasn1 (>=0.1.7)", "pynacl (>=1.0.1)", "bcrypt (>=3.1.3)", "invoke (>=1.3)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] -ed25519 = ["pynacl (>=1.0.1)", "bcrypt (>=3.1.3)"] -gssapi = ["pyasn1 (>=0.1.7)", "gssapi (>=1.4.1)", "pywin32 (>=2.1.8)"] +all = ["bcrypt (>=3.1.3)", "gssapi (>=1.4.1)", "invoke (>=1.3)", "pyasn1 (>=0.1.7)", "pynacl (>=1.0.1)", "pywin32 (>=2.1.8)"] +ed25519 = ["bcrypt (>=3.1.3)", "pynacl (>=1.0.1)"] +gssapi = ["gssapi (>=1.4.1)", "pyasn1 (>=0.1.7)", "pywin32 (>=2.1.8)"] invoke = ["invoke (>=1.3)"] [[package]] -name = "pillow" -version = "8.3.2" +name = "pathlib2" +version = "2.3.7.post1" +description = "Object-oriented filesystem paths" +category = "main" +optional = false +python-versions = "*" + +[package.dependencies] +six = "*" + +[[package]] +name = "Pillow" +version = "9.2.0" description = "Python Imaging Library (Fork)" category = "main" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" + +[package.extras] +docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] +tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] [[package]] name = "platformdirs" @@ -399,17 +383,15 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock [[package]] name = "pluggy" -version = "0.13.1" +version = "1.0.0" description = "plugin and hook calling mechanisms for python" category = "dev" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" - -[package.dependencies] -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +python-versions = ">=3.6" [package.extras] dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] [[package]] name = "pre-commit" @@ -422,8 +404,6 @@ python-versions = ">=3.6" [package.dependencies] cfgv = ">=2.0.0" identify = ">=1.0.0" -importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} -importlib-resources = {version = "*", markers = "python_version < \"3.7\""} nodeenv = ">=0.11.1" pyyaml = ">=5.1" toml = "*" @@ -431,7 +411,7 @@ virtualenv = ">=15.2" [[package]] name = "protobuf" -version = "3.19.4" +version = "3.19.5" description = "Protocol Buffers" category = "main" optional = false @@ -470,7 +450,7 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" [[package]] -name = "pynacl" +name = "PyNaCl" version = "1.5.0" description = "Python binding to the Networking and Cryptography (NaCl) library" category = "main" @@ -481,8 +461,8 @@ python-versions = ">=3.6" cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] -tests = ["pytest (>=3.2.1,!=3.3.0)", "hypothesis (>=3.27.0)"] +docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] +tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyparsing" @@ -497,43 +477,56 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pyproj" -version = "2.6.1.post1" +version = "3.3.1" description = "Python interface to PROJ (cartographic projections and coordinate transformations library)" category = "main" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" + +[package.dependencies] +certifi = "*" [[package]] name = "pytest" -version = "5.4.3" +version = "6.2.5" description = "pytest: simple powerful testing with Python" category = "dev" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" [package.dependencies] atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} -attrs = ">=17.4.0" +attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} -more-itertools = ">=4.0.0" +iniconfig = "*" packaging = "*" -pluggy = ">=0.12,<1.0" -py = ">=1.5.0" -wcwidth = "*" +pluggy = ">=0.12,<2.0" +py = ">=1.8.2" +toml = "*" [package.extras] -checkqa-mypy = ["mypy (==v0.761)"] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] [[package]] -name = "pyyaml" +name = "PyYAML" version = "5.4" description = "YAML parser and emitter for Python" category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +[[package]] +name = "setuptools" +version = "59.6.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=8.2)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx", "sphinx-inline-tabs", "sphinxcontrib-towncrier"] +testing = ["flake8-2020", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "mock", "paver", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-flake8", "pytest-mypy", "pytest-virtualenv (>=1.2.7)", "pytest-xdist", "sphinx", "virtualenv (>=13.0.0)", "wheel"] + [[package]] name = "six" version = "1.16.0" @@ -550,58 +543,27 @@ category = "dev" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" -[[package]] -name = "typing-extensions" -version = "4.1.1" -description = "Backported and Experimental Type Hints for Python 3.6+" -category = "dev" -optional = false -python-versions = ">=3.6" - [[package]] name = "virtualenv" -version = "20.13.1" +version = "20.16.5" description = "Virtual Python Environment builder" category = "dev" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" - -[package.dependencies] -distlib = ">=0.3.1,<1" -filelock = ">=3.2,<4" -importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} -importlib-resources = {version = ">=1.0", markers = "python_version < \"3.7\""} -platformdirs = ">=2,<3" -six = ">=1.9.0,<2" - -[package.extras] -docs = ["proselint (>=0.10.2)", "sphinx (>=3)", "sphinx-argparse (>=0.2.5)", "sphinx-rtd-theme (>=0.4.3)", "towncrier (>=21.3)"] -testing = ["coverage (>=4)", "coverage-enable-subprocess (>=1)", "flaky (>=3)", "pytest (>=4)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.1)", "pytest-mock (>=2)", "pytest-randomly (>=1)", "pytest-timeout (>=1)", "packaging (>=20.0)"] - -[[package]] -name = "wcwidth" -version = "0.2.5" -description = "Measures the displayed width of unicode strings in a terminal" -category = "dev" -optional = false -python-versions = "*" - -[[package]] -name = "zipp" -version = "3.6.0" -description = "Backport of pathlib-compatible object wrapper for zip files" -category = "dev" -optional = false python-versions = ">=3.6" +[package.dependencies] +distlib = ">=0.3.5,<1" +filelock = ">=3.4.1,<4" +platformdirs = ">=2.4,<3" + [package.extras] -docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +docs = ["proselint (>=0.13)", "sphinx (>=5.1.1)", "sphinx-argparse (>=0.3.1)", "sphinx-rtd-theme (>=1)", "towncrier (>=21.9)"] +testing = ["coverage (>=6.2)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=21.3)", "pytest (>=7.0.1)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.6.1)", "pytest-randomly (>=3.10.3)", "pytest-timeout (>=2.1)"] [metadata] lock-version = "1.1" -python-versions = "^3.6" -content-hash = "64ea28583e46b32b3aa2be3627ee8f68c1bbf36622ec6f575062d5059745a6f9" +python-versions = "^3.9" +content-hash = "4d32d2fc7c11e6fc3b61cd16c4f13bb3ce264db289e75e26f9ee50bdf4d4e1c7" [metadata.files] appdirs = [ @@ -609,123 +571,156 @@ appdirs = [ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, ] atomicwrites = [ - {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, - {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, + {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, ] attrs = [ - {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, - {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, + {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, + {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, ] bcrypt = [ - {file = "bcrypt-3.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c95d4cbebffafcdd28bd28bb4e25b31c50f6da605c81ffd9ad8a3d1b2ab7b1b6"}, - {file = "bcrypt-3.2.0-cp36-abi3-manylinux1_x86_64.whl", hash = "sha256:63d4e3ff96188e5898779b6057878fecf3f11cfe6ec3b313ea09955d587ec7a7"}, - {file = "bcrypt-3.2.0-cp36-abi3-manylinux2010_x86_64.whl", hash = "sha256:cd1ea2ff3038509ea95f687256c46b79f5fc382ad0aa3664d200047546d511d1"}, - {file = "bcrypt-3.2.0-cp36-abi3-manylinux2014_aarch64.whl", hash = "sha256:cdcdcb3972027f83fe24a48b1e90ea4b584d35f1cc279d76de6fc4b13376239d"}, - {file = "bcrypt-3.2.0-cp36-abi3-win32.whl", hash = "sha256:a67fb841b35c28a59cebed05fbd3e80eea26e6d75851f0574a9273c80f3e9b55"}, - {file = "bcrypt-3.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:81fec756feff5b6818ea7ab031205e1d323d8943d237303baca2c5f9c7846f34"}, - {file = "bcrypt-3.2.0.tar.gz", hash = "sha256:5b93c1726e50a93a033c36e5ca7fdcd29a5c7395af50a6892f5d9e7c6cfbfb29"}, + {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, + {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, + {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, + {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, ] black = [ {file = "black-19.3b0-py36-none-any.whl", hash = "sha256:09a9dcb7c46ed496a9850b76e4e825d6049ecd38b611f1224857a79bd985a8cf"}, {file = "black-19.3b0.tar.gz", hash = "sha256:68950ffd4d9169716bcb8719a56c07a2f4485354fec061cdd5910aa07369731c"}, ] +certifi = [ + {file = "certifi-2022.9.24-py3-none-any.whl", hash = "sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"}, + {file = "certifi-2022.9.24.tar.gz", hash = "sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14"}, +] cffi = [ - {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"}, - {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"}, - {file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"}, - {file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"}, - {file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"}, - {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"}, - {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"}, - {file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"}, - {file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"}, - {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"}, - {file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"}, - {file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"}, - {file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"}, - {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"}, - {file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"}, - {file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"}, - {file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"}, - {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"}, - {file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"}, - {file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"}, - {file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"}, - {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"}, - {file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"}, - {file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"}, - {file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"}, - {file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"}, - {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"}, - {file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"}, - {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"}, - {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"}, + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, ] cfgv = [ {file = "cfgv-3.0.0-py2.py3-none-any.whl", hash = "sha256:f22b426ed59cd2ab2b54ff96608d846c33dfb8766a67f0b4a6ce130ce244414f"}, {file = "cfgv-3.0.0.tar.gz", hash = "sha256:04b093b14ddf9fd4d17c53ebfd55582d27b76ed30050193c14e560770c5360eb"}, ] click = [ - {file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"}, - {file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"}, + {file = "click-8.0.4-py3-none-any.whl", hash = "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1"}, + {file = "click-8.0.4.tar.gz", hash = "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"}, ] colorama = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, + {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"}, + {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"}, ] cryptography = [ - {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:73bc2d3f2444bcfeac67dd130ff2ea598ea5f20b40e36d19821b4df8c9c5037b"}, - {file = "cryptography-36.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:2d87cdcb378d3cfed944dac30596da1968f88fb96d7fc34fdae30a99054b2e31"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74d6c7e80609c0f4c2434b97b80c7f8fdfaa072ca4baab7e239a15d6d70ed73a"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:6c0c021f35b421ebf5976abf2daacc47e235f8b6082d3396a2fe3ccd537ab173"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59a9d55027a8b88fd9fd2826c4392bd487d74bf628bb9d39beecc62a644c12"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a817b961b46894c5ca8a66b599c745b9a3d9f822725221f0e0fe49dc043a3a3"}, - {file = "cryptography-36.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:94ae132f0e40fe48f310bba63f477f14a43116f05ddb69d6fa31e93f05848ae2"}, - {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7be0eec337359c155df191d6ae00a5e8bbb63933883f4f5dffc439dac5348c3f"}, - {file = "cryptography-36.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e0344c14c9cb89e76eb6a060e67980c9e35b3f36691e15e1b7a9e58a0a6c6dc3"}, - {file = "cryptography-36.0.1-cp36-abi3-win32.whl", hash = "sha256:4caa4b893d8fad33cf1964d3e51842cd78ba87401ab1d2e44556826df849a8ca"}, - {file = "cryptography-36.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:391432971a66cfaf94b21c24ab465a4cc3e8bf4a939c1ca5c3e3a6e0abebdbcf"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bb5829d027ff82aa872d76158919045a7c1e91fbf241aec32cb07956e9ebd3c9"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc15b1c22e55c4d5566e3ca4db8689470a0ca2babef8e3a9ee057a8b82ce4b1"}, - {file = "cryptography-36.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:596f3cd67e1b950bc372c33f1a28a0692080625592ea6392987dba7f09f17a94"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:30ee1eb3ebe1644d1c3f183d115a8c04e4e603ed6ce8e394ed39eea4a98469ac"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec63da4e7e4a5f924b90af42eddf20b698a70e58d86a72d943857c4c6045b3ee"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca238ceb7ba0bdf6ce88c1b74a87bffcee5afbfa1e41e173b1ceb095b39add46"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:ca28641954f767f9822c24e927ad894d45d5a1e501767599647259cbf030b903"}, - {file = "cryptography-36.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:39bdf8e70eee6b1c7b289ec6e5d84d49a6bfa11f8b8646b5b3dfe41219153316"}, - {file = "cryptography-36.0.1.tar.gz", hash = "sha256:53e5c1dc3d7a953de055d77bef2ff607ceef7a2aac0353b5d630ab67f7423638"}, -] -dataclasses = [ - {file = "dataclasses-0.8-py3-none-any.whl", hash = "sha256:0201d89fa866f68c8ebd9d08ee6ff50c0b255f8ec63a71c16fda7af82bb887bf"}, - {file = "dataclasses-0.8.tar.gz", hash = "sha256:8479067f342acf957dc82ec415d355ab5edb7e7646b90dc6e2fd1d96ad084c97"}, + {file = "cryptography-38.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:10d1f29d6292fc95acb597bacefd5b9e812099d75a6469004fd38ba5471a977f"}, + {file = "cryptography-38.0.1-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:3fc26e22840b77326a764ceb5f02ca2d342305fba08f002a8c1f139540cdfaad"}, + {file = "cryptography-38.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:3b72c360427889b40f36dc214630e688c2fe03e16c162ef0aa41da7ab1455153"}, + {file = "cryptography-38.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:194044c6b89a2f9f169df475cc167f6157eb9151cc69af8a2a163481d45cc407"}, + {file = "cryptography-38.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca9f6784ea96b55ff41708b92c3f6aeaebde4c560308e5fbbd3173fbc466e94e"}, + {file = "cryptography-38.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:16fa61e7481f4b77ef53991075de29fc5bacb582a1244046d2e8b4bb72ef66d0"}, + {file = "cryptography-38.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d4ef6cc305394ed669d4d9eebf10d3a101059bdcf2669c366ec1d14e4fb227bd"}, + {file = "cryptography-38.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3261725c0ef84e7592597606f6583385fed2a5ec3909f43bc475ade9729a41d6"}, + {file = "cryptography-38.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:0297ffc478bdd237f5ca3a7dc96fc0d315670bfa099c04dc3a4a2172008a405a"}, + {file = "cryptography-38.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:89ed49784ba88c221756ff4d4755dbc03b3c8d2c5103f6d6b4f83a0fb1e85294"}, + {file = "cryptography-38.0.1-cp36-abi3-win32.whl", hash = "sha256:ac7e48f7e7261207d750fa7e55eac2d45f720027d5703cd9007e9b37bbb59ac0"}, + {file = "cryptography-38.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ad7353f6ddf285aeadfaf79e5a6829110106ff8189391704c1d8801aa0bae45a"}, + {file = "cryptography-38.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:896dd3a66959d3a5ddcfc140a53391f69ff1e8f25d93f0e2e7830c6de90ceb9d"}, + {file = "cryptography-38.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:d3971e2749a723e9084dd507584e2a2761f78ad2c638aa31e80bc7a15c9db4f9"}, + {file = "cryptography-38.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:79473cf8a5cbc471979bd9378c9f425384980fcf2ab6534b18ed7d0d9843987d"}, + {file = "cryptography-38.0.1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:d9e69ae01f99abe6ad646947bba8941e896cb3aa805be2597a0400e0764b5818"}, + {file = "cryptography-38.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5067ee7f2bce36b11d0e334abcd1ccf8c541fc0bbdaf57cdd511fdee53e879b6"}, + {file = "cryptography-38.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:3e3a2599e640927089f932295a9a247fc40a5bdf69b0484532f530471a382750"}, + {file = "cryptography-38.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2e5856248a416767322c8668ef1845ad46ee62629266f84a8f007a317141013"}, + {file = "cryptography-38.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:64760ba5331e3f1794d0bcaabc0d0c39e8c60bf67d09c93dc0e54189dfd7cfe5"}, + {file = "cryptography-38.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b6c9b706316d7b5a137c35e14f4103e2115b088c412140fdbd5f87c73284df61"}, + {file = "cryptography-38.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0163a849b6f315bf52815e238bc2b2346604413fa7c1601eea84bcddb5fb9ac"}, + {file = "cryptography-38.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:d1a5bd52d684e49a36582193e0b89ff267704cd4025abefb9e26803adeb3e5fb"}, + {file = "cryptography-38.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:765fa194a0f3372d83005ab83ab35d7c5526c4e22951e46059b8ac678b44fa5a"}, + {file = "cryptography-38.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:52e7bee800ec869b4031093875279f1ff2ed12c1e2f74923e8f49c916afd1d3b"}, + {file = "cryptography-38.0.1.tar.gz", hash = "sha256:1db3d807a14931fa317f96435695d9ec386be7b84b618cc61cfa5d08b0ae33d7"}, ] distlib = [ - {file = "distlib-0.3.4-py2.py3-none-any.whl", hash = "sha256:6564fe0a8f51e734df6333d08b8b94d4ea8ee6b99b5ed50613f731fd4089f34b"}, - {file = "distlib-0.3.4.zip", hash = "sha256:e4b58818180336dc9c529bfb9a0b58728ffc09ad92027a3f30b7cd91e3458579"}, + {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, + {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, ] fabric = [ - {file = "fabric-2.5.0-py2.py3-none-any.whl", hash = "sha256:160331934ea60036604928e792fa8e9f813266b098ef5562aa82b88527740389"}, - {file = "fabric-2.5.0.tar.gz", hash = "sha256:24842d7d51556adcabd885ac3cf5e1df73fc622a1708bf3667bf5927576cdfa6"}, + {file = "fabric-2.7.1-py2.py3-none-any.whl", hash = "sha256:7610362318ef2d391cc65d4befb684393975d889ed5720f23499394ec0e136fa"}, + {file = "fabric-2.7.1.tar.gz", hash = "sha256:76f8fef59cf2061dbd849bbce4fe49bdd820884385004b0ca59136ac3db129e4"}, ] filelock = [ {file = "filelock-3.4.1-py3-none-any.whl", hash = "sha256:a4bc51381e01502a30e9f06dd4fa19a1712eab852b6fb0f84fd7cce0793d8ca3"}, @@ -736,106 +731,105 @@ flake8 = [ {file = "flake8-3.8.2.tar.gz", hash = "sha256:c69ac1668e434d37a2d2880b3ca9aafd54b3a10a3ac1ab101d22f29e29cf8634"}, ] grpcio = [ - {file = "grpcio-1.27.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:dbec0a3a154dbf2eb85b38abaddf24964fa1c059ee0a4ad55d6f39211b1a4bca"}, - {file = "grpcio-1.27.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:1ef949b15a1f5f30651532a9b54edf3bd7c0b699a10931505fa2c80b2d395942"}, - {file = "grpcio-1.27.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:ed123037896a8db6709b8ad5acc0ed435453726ea0b63361d12de369624c2ab5"}, - {file = "grpcio-1.27.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:f9d632ce9fd485119c968ec6a7a343de698c5e014d17602ae2f110f1b05925ed"}, - {file = "grpcio-1.27.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:80c3d1ce8820dd819d1c9d6b63b6f445148480a831173b572a9174a55e7abd47"}, - {file = "grpcio-1.27.2-cp27-cp27m-win32.whl", hash = "sha256:07f82aefb4a56c7e1e52b78afb77d446847d27120a838a1a0489260182096045"}, - {file = "grpcio-1.27.2-cp27-cp27m-win_amd64.whl", hash = "sha256:28f27c64dd699b8b10f70da5f9320c1cffcaefca7dd76275b44571bd097f276c"}, - {file = "grpcio-1.27.2-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:a25b84e10018875a0f294a7649d07c43e8bc3e6a821714e39e5cd607a36386d7"}, - {file = "grpcio-1.27.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:57949756a3ce1f096fa2b00f812755f5ab2effeccedb19feeb7d0deafa3d1de7"}, - {file = "grpcio-1.27.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f3614dabd2cc8741850597b418bcf644d4f60e73615906c3acc407b78ff720b3"}, - {file = "grpcio-1.27.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:25c77692ea8c0929d4ad400ea9c3dcbcc4936cee84e437e0ef80da58fa73d88a"}, - {file = "grpcio-1.27.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:5dab393ab96b2ce4012823b2f2ed4ee907150424d2f02b97bd6f8dd8f17cc866"}, - {file = "grpcio-1.27.2-cp35-cp35m-linux_armv7l.whl", hash = "sha256:bb2987eb3af9bcf46019be39b82c120c3d35639a95bc4ee2d08f36ecdf469345"}, - {file = "grpcio-1.27.2-cp35-cp35m-macosx_10_7_intel.whl", hash = "sha256:6f328a3faaf81a2546a3022b3dfc137cc6d50d81082dbc0c94d1678943f05df3"}, - {file = "grpcio-1.27.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:5ebc13451246de82f130e8ee7e723e8d7ae1827f14b7b0218867667b1b12c88d"}, - {file = "grpcio-1.27.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:355bd7d7ce5ff2917d217f0e8ddac568cb7403e1ce1639b35a924db7d13a39b6"}, - {file = "grpcio-1.27.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:d1e5563e3b7f844dbc48d709c9e4a75647e11d0387cc1fa0c861d3e9d34bc844"}, - {file = "grpcio-1.27.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:1ec8fc865d8da6d0713e2092a27eee344cd54628b2c2065a0e77fff94df4ae00"}, - {file = "grpcio-1.27.2-cp35-cp35m-win32.whl", hash = "sha256:706e2dea3de33b0d8884c4d35ecd5911b4ff04d0697c4138096666ce983671a6"}, - {file = "grpcio-1.27.2-cp35-cp35m-win_amd64.whl", hash = "sha256:d18b4c8cacbb141979bb44355ee5813dd4d307e9d79b3a36d66eca7e0a203df8"}, - {file = "grpcio-1.27.2-cp36-cp36m-linux_armv7l.whl", hash = "sha256:02aef8ef1a5ac5f0836b543e462eb421df6048a7974211a906148053b8055ea6"}, - {file = "grpcio-1.27.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b78af4d42985ab3143d9882d0006f48d12f1bc4ba88e78f23762777c3ee64571"}, - {file = "grpcio-1.27.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:9c0669ba9aebad540fb05a33beb7e659ea6e5ca35833fc5229c20f057db760e8"}, - {file = "grpcio-1.27.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:68a149a0482d0bc697aac702ec6efb9d380e0afebf9484db5b7e634146528371"}, - {file = "grpcio-1.27.2-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:a71138366d57901597bfcc52af7f076ab61c046f409c7b429011cd68de8f9fe6"}, - {file = "grpcio-1.27.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:9e9cfe55dc7ac2aa47e0fd3285ff829685f96803197042c9d2f0fb44e4b39b2c"}, - {file = "grpcio-1.27.2-cp36-cp36m-win32.whl", hash = "sha256:d22c897b65b1408509099f1c3334bd3704f5e4eb7c0486c57d0e212f71cb8f54"}, - {file = "grpcio-1.27.2-cp36-cp36m-win_amd64.whl", hash = "sha256:c59b9280284b791377b3524c8e39ca7b74ae2881ba1a6c51b36f4f1bb94cee49"}, - {file = "grpcio-1.27.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6e545908bcc2ae28e5b190ce3170f92d0438cf26a82b269611390114de0106eb"}, - {file = "grpcio-1.27.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6db7ded10b82592c472eeeba34b9f12d7b0ab1e2dcad12f081b08ebdea78d7d6"}, - {file = "grpcio-1.27.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:4d3b6e66f32528bf43ca2297caca768280a8e068820b1c3dca0fcf9f03c7d6f1"}, - {file = "grpcio-1.27.2-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:586d931736912865c9790c60ca2db29e8dc4eace160d5a79fec3e58df79a9386"}, - {file = "grpcio-1.27.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:c03ce53690fe492845e14f4ab7e67d5a429a06db99b226b5c7caa23081c1e2bb"}, - {file = "grpcio-1.27.2-cp37-cp37m-win32.whl", hash = "sha256:209927e65395feb449783943d62a3036982f871d7f4045fadb90b2d82b153ea8"}, - {file = "grpcio-1.27.2-cp37-cp37m-win_amd64.whl", hash = "sha256:9713578f187fb1c4d00ac554fe1edcc6b3ddd62f5d4eb578b81261115802df8e"}, - {file = "grpcio-1.27.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b4efde5524579a9ce0459ca35a57a48ca878a4973514b8bb88cb80d7c9d34c85"}, - {file = "grpcio-1.27.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:fb62996c61eeff56b59ab8abfcaa0859ec2223392c03d6085048b576b567459b"}, - {file = "grpcio-1.27.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:a22daaf30037b8e59d6968c76fe0f7ff062c976c7a026e92fbefc4c4bf3fc5a4"}, - {file = "grpcio-1.27.2-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:4a0a33ada3f6f94f855f92460896ef08c798dcc5f17d9364d1735c5adc9d7e4a"}, - {file = "grpcio-1.27.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:8111b61eee12d7af5c58f82f2c97c2664677a05df9225ef5cbc2f25398c8c454"}, - {file = "grpcio-1.27.2-cp38-cp38-win32.whl", hash = "sha256:5121fa96c79fc0ec81825091d0be5c16865f834f41b31da40b08ee60552f9961"}, - {file = "grpcio-1.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:1cff47297ee614e7ef66243dc34a776883ab6da9ca129ea114a802c5e58af5c1"}, - {file = "grpcio-1.27.2.tar.gz", hash = "sha256:5ae532b93cf9ce5a2a549b74a2c35e3b690b171ece9358519b3039c7b84c887e"}, + {file = "grpcio-1.49.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:fd86040232e805b8e6378b2348c928490ee595b058ce9aaa27ed8e4b0f172b20"}, + {file = "grpcio-1.49.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6fd0c9cede9552bf00f8c5791d257d5bf3790d7057b26c59df08be5e7a1e021d"}, + {file = "grpcio-1.49.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:d0d402e158d4e84e49c158cb5204119d55e1baf363ee98d6cb5dce321c3a065d"}, + {file = "grpcio-1.49.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:822ceec743d42a627e64ea266059a62d214c5a3cdfcd0d7fe2b7a8e4e82527c7"}, + {file = "grpcio-1.49.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2106d9c16527f0a85e2eea6e6b91a74fc99579c60dd810d8690843ea02bc0f5f"}, + {file = "grpcio-1.49.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:52dd02b7e7868233c571b49bc38ebd347c3bb1ff8907bb0cb74cb5f00c790afc"}, + {file = "grpcio-1.49.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:120fecba2ec5d14b5a15d11063b39783fda8dc8d24addd83196acb6582cabd9b"}, + {file = "grpcio-1.49.1-cp310-cp310-win32.whl", hash = "sha256:f1a3b88e3c53c1a6e6bed635ec1bbb92201bb6a1f2db186179f7f3f244829788"}, + {file = "grpcio-1.49.1-cp310-cp310-win_amd64.whl", hash = "sha256:a7d0017b92d3850abea87c1bdec6ea41104e71c77bca44c3e17f175c6700af62"}, + {file = "grpcio-1.49.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:9fb17ff8c0d56099ac6ebfa84f670c5a62228d6b5c695cf21c02160c2ac1446b"}, + {file = "grpcio-1.49.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:075f2d06e3db6b48a2157a1bcd52d6cbdca980dd18988fe6afdb41795d51625f"}, + {file = "grpcio-1.49.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:46d93a1b4572b461a227f1db6b8d35a88952db1c47e5fadcf8b8a2f0e1dd9201"}, + {file = "grpcio-1.49.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc79b2b37d779ac42341ddef40ad5bf0966a64af412c89fc2b062e3ddabb093f"}, + {file = "grpcio-1.49.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5f8b3a971c7820ea9878f3fd70086240a36aeee15d1b7e9ecbc2743b0e785568"}, + {file = "grpcio-1.49.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:49b301740cf5bc8fed4fee4c877570189ae3951432d79fa8e524b09353659811"}, + {file = "grpcio-1.49.1-cp311-cp311-win32.whl", hash = "sha256:1c66a25afc6c71d357867b341da594a5587db5849b48f4b7d5908d236bb62ede"}, + {file = "grpcio-1.49.1-cp311-cp311-win_amd64.whl", hash = "sha256:6b6c3a95d27846f4145d6967899b3ab25fffc6ae99544415e1adcacef84842d2"}, + {file = "grpcio-1.49.1-cp37-cp37m-linux_armv7l.whl", hash = "sha256:1cc400c8a2173d1c042997d98a9563e12d9bb3fb6ad36b7f355bc77c7663b8af"}, + {file = "grpcio-1.49.1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:34f736bd4d0deae90015c0e383885b431444fe6b6c591dea288173df20603146"}, + {file = "grpcio-1.49.1-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:196082b9c89ebf0961dcd77cb114bed8171964c8e3063b9da2fb33536a6938ed"}, + {file = "grpcio-1.49.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c9f89c42749890618cd3c2464e1fbf88446e3d2f67f1e334c8e5db2f3272bbd"}, + {file = "grpcio-1.49.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64419cb8a5b612cdb1550c2fd4acbb7d4fb263556cf4625f25522337e461509e"}, + {file = "grpcio-1.49.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8a5272061826e6164f96e3255405ef6f73b88fd3e8bef464c7d061af8585ac62"}, + {file = "grpcio-1.49.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ea9d0172445241ad7cb49577314e39d0af2c5267395b3561d7ced5d70458a9f3"}, + {file = "grpcio-1.49.1-cp37-cp37m-win32.whl", hash = "sha256:2070e87d95991473244c72d96d13596c751cb35558e11f5df5414981e7ed2492"}, + {file = "grpcio-1.49.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fcedcab49baaa9db4a2d240ac81f2d57eb0052b1c6a9501b46b8ae912720fbf"}, + {file = "grpcio-1.49.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:afbb3475cf7f4f7d380c2ca37ee826e51974f3e2665613996a91d6a58583a534"}, + {file = "grpcio-1.49.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:a4f9ba141380abde6c3adc1727f21529137a2552002243fa87c41a07e528245c"}, + {file = "grpcio-1.49.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:cf0a1fb18a7204b9c44623dfbd1465b363236ce70c7a4ed30402f9f60d8b743b"}, + {file = "grpcio-1.49.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17bb6fe72784b630728c6cff9c9d10ccc3b6d04e85da6e0a7b27fb1d135fac62"}, + {file = "grpcio-1.49.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18305d5a082d1593b005a895c10041f833b16788e88b02bb81061f5ebcc465df"}, + {file = "grpcio-1.49.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:b6a1b39e59ac5a3067794a0e498911cf2e37e4b19ee9e9977dc5e7051714f13f"}, + {file = "grpcio-1.49.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0e20d59aafc086b1cc68400463bddda6e41d3e5ed30851d1e2e0f6a2e7e342d3"}, + {file = "grpcio-1.49.1-cp38-cp38-win32.whl", hash = "sha256:e1e83233d4680863a421f3ee4a7a9b80d33cd27ee9ed7593bc93f6128302d3f2"}, + {file = "grpcio-1.49.1-cp38-cp38-win_amd64.whl", hash = "sha256:221d42c654d2a41fa31323216279c73ed17d92f533bc140a3390cc1bd78bf63c"}, + {file = "grpcio-1.49.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:fa9e6e61391e99708ac87fc3436f6b7b9c6b845dc4639b406e5e61901e1aacde"}, + {file = "grpcio-1.49.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9b449e966ef518ce9c860d21f8afe0b0f055220d95bc710301752ac1db96dd6a"}, + {file = "grpcio-1.49.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:aa34d2ad9f24e47fa9a3172801c676e4037d862247e39030165fe83821a7aafd"}, + {file = "grpcio-1.49.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5207f4eed1b775d264fcfe379d8541e1c43b878f2b63c0698f8f5c56c40f3d68"}, + {file = "grpcio-1.49.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b24a74651438d45619ac67004638856f76cc13d78b7478f2457754cbcb1c8ad"}, + {file = "grpcio-1.49.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fe763781669790dc8b9618e7e677c839c87eae6cf28b655ee1fa69ae04eea03f"}, + {file = "grpcio-1.49.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2f2ff7ba0f8f431f32d4b4bc3a3713426949d3533b08466c4ff1b2b475932ca8"}, + {file = "grpcio-1.49.1-cp39-cp39-win32.whl", hash = "sha256:08ff74aec8ff457a89b97152d36cb811dcc1d17cd5a92a65933524e363327394"}, + {file = "grpcio-1.49.1-cp39-cp39-win_amd64.whl", hash = "sha256:274ffbb39717918c514b35176510ae9be06e1d93121e84d50b350861dcb9a705"}, + {file = "grpcio-1.49.1.tar.gz", hash = "sha256:d4725fc9ec8e8822906ae26bb26f5546891aa7fbc3443de970cc556d43a5c99f"}, ] grpcio-tools = [ - {file = "grpcio-tools-1.27.2.tar.gz", hash = "sha256:845a51305af9fc7f9e2078edaec9a759153195f6cf1fbb12b1fa6f077e56b260"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:7a2d5fb558ac153a326e742ebfd7020eb781c43d3ffd920abd42b2e6c6fdfb37"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:99961156a36aae4a402d6b14c1e7efde642794b3ddbf32c51db0cb3a199e8b11"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:069826dd02ce1886444cf4519c4fe1b05ac9ef41491f26e97400640531db47f6"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:fae91f30dc050a8d0b32d20dc700e6092f0bd2138d83e9570fff3f0372c1b27e"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a14dc7a36c845991d908a7179502ca47bcba5ae1817c4426ce68cf2c97b20ad9"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-win32.whl", hash = "sha256:d1a5e5fa47ba9557a7d3b31605631805adc66cdba9d95b5d10dfc52cca1fed53"}, - {file = "grpcio_tools-1.27.2-cp27-cp27m-win_amd64.whl", hash = "sha256:7b54b283ec83190680903a9037376dc915e1f03852a2d574ba4d981b7a1fd3d0"}, - {file = "grpcio_tools-1.27.2-cp27-cp27mu-linux_armv7l.whl", hash = "sha256:4698c6b6a57f73b14d91a542c69ff33a2da8729691b7060a5d7f6383624d045e"}, - {file = "grpcio_tools-1.27.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:87e8ca2c2d2d3e09b2a2bed5d740d7b3e64028dafb7d6be543b77eec85590736"}, - {file = "grpcio_tools-1.27.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bd7f59ff1252a3db8a143b13ea1c1e93d4b8cf4b852eb48b22ef1e6942f62a84"}, - {file = "grpcio_tools-1.27.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:a8f892378b0b02526635b806f59141abbb429d19bec56e869e04f396502c9651"}, - {file = "grpcio_tools-1.27.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:69c4a63919b9007e845d9f8980becd2f89d808a4a431ca32b9723ee37b521cb1"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-linux_armv7l.whl", hash = "sha256:dcbc06556f3713a9348c4fce02d05d91e678fc320fb2bcf0ddf8e4bb11d17867"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-macosx_10_9_intel.whl", hash = "sha256:16dc3fad04fe18d50777c56af7b2d9b9984cd1cfc71184646eb431196d1645c6"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:1de5a273eaffeb3d126a63345e9e848ea7db740762f700eb8b5d84c5e3e7687d"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6016c07d6566e3109a3c032cf3861902d66501ecc08a5a84c47e43027302f367"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:915a695bc112517af48126ee0ecdb6aff05ed33f3eeef28f0d076f1f6b52ef5e"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:ea4b3ad696d976d5eac74ec8df9a2c692113e455446ee38d5b3bd87f8e034fa6"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-win32.whl", hash = "sha256:a140bf853edb2b5e8692fe94869e3e34077d7599170c113d07a58286c604f4fe"}, - {file = "grpcio_tools-1.27.2-cp35-cp35m-win_amd64.whl", hash = "sha256:77e25c241e33b75612f2aa62985f746c6f6803ec4e452da508bb7f8d90a69db4"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-linux_armv7l.whl", hash = "sha256:5fd7efc2fd3370bd2c72dc58f31a407a5dff5498befa145da211b2e8c6a52c63"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9ba88c2d99bcaf7b9cb720925e3290d73b2367d238c5779363fd5598b2dc98c7"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:b56caecc16307b088a431a4038c3b3bb7d0e7f9988cbd0e9fa04ac937455ea38"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f8514453411d72cc3cf7d481f2b6057e5b7436736d0cd39ee2b2f72088bbf497"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:c1bb8f47d58e9f7c4825abfe01e6b85eda53c8b31d2267ca4cddf3c4d0829b80"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:e17b2e0936b04ced99769e26111e1e86ba81619d1b2691b1364f795e45560953"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-win32.whl", hash = "sha256:520b7dafddd0f82cb7e4f6e9c6ba1049aa804d0e207870def9fe7f94d1e14090"}, - {file = "grpcio_tools-1.27.2-cp36-cp36m-win_amd64.whl", hash = "sha256:ee50b0cf0d28748ef9f941894eb50fc464bd61b8e96aaf80c5056bea9b80d580"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:627c91923df75091d8c4d244af38d5ab7ed8d786d480751d6c2b9267fbb92fe0"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ef624b6134aef737b3daa4fb7e806cb8c5749efecd0b1fa9ce4f7e060c7a0221"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e6932518db389ede8bf06b4119bbd3e17f42d4626e72dec2b8955b20ec732cb6"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:43a1573400527a23e4174d88604fde7a9d9a69bf9473c21936b7f409858f8ebb"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:57f8b9e2c7f55cd45f6dd930d6de61deb42d3eb7f9788137fbc7155cf724132a"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-win32.whl", hash = "sha256:2ca280af2cae1a014a238057bd3c0a254527569a6a9169a01c07f0590081d530"}, - {file = "grpcio_tools-1.27.2-cp37-cp37m-win_amd64.whl", hash = "sha256:59fbeb5bb9a7b94eb61642ac2cee1db5233b8094ca76fc56d4e0c6c20b5dd85f"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:00c5080cfb197ed20ecf0d0ff2d07f1fc9c42c724cad21c40ff2d048de5712b1"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:f5450aa904e720f9c6407b59e96a8951ed6a95463f49444b6d2594b067d39588"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:aaa5ae26883c3d58d1a4323981f96b941fa09bb8f0f368d97c6225585280cf04"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1266b577abe7c720fd16a83d0a4999a192e87c4a98fc9f97e0b99b106b3e155f"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:a3d2aec4b09c8e59fee8b0d1ed668d09e8c48b738f03f5d8401d7eb409111c47"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-win32.whl", hash = "sha256:8e7738a4b93842bca1158cde81a3587c9b7111823e40a1ddf73292ca9d58e08b"}, - {file = "grpcio_tools-1.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:84724458c86ff9b14c29b49e321f34d80445b379f4cd4d0494c694b49b1d6f88"}, + {file = "grpcio-tools-1.43.0.tar.gz", hash = "sha256:f42f1d713096808b1b0472dd2a3749b712d13f0092dab9442d9c096446e860b2"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:766771ef5b60ebcba0a3bdb302dd92fda988552eb8508451ff6d97371eac38e5"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:178a881db5de0f89abf3aeeb260ecfd1116cc31f88fb600a45fb5b19c3323b33"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:019f55929e963214471825c7a4cdab7a57069109d5621b24e4db7b428b5fe47d"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6c0e1d1b47554c580882d392b739df91a55b6a8ec696b2b2e1bbc127d63df2c"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c5c80098fa69593b828d119973744de03c3f9a6935df8a02e4329a39b7072f5"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-win32.whl", hash = "sha256:53f7dcaa4218df1b64b39d0fc7236a8270e8ab2db4ab8cd1d2fda0e6d4544946"}, + {file = "grpcio_tools-1.43.0-cp310-cp310-win_amd64.whl", hash = "sha256:5be6d402b0cafef20ba3abb3baa37444961d9a9c4a6434d3d7c1f082f7697deb"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-linux_armv7l.whl", hash = "sha256:8953fdebef6905d7ff13a5a376b21b6fecd808d18bf4f0d3990ffe4a215d56eb"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:18870dcc8369ac4c37213e6796d8dc20494ea770670204f5e573f88e69eaaf0b"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:010a4be6a2fccbd6741a4809c5da7f2e39a1e9e227745e6b495be567638bbeb9"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:426f16b6b14d533ce61249a18fbcd1a23a4fa0c71a6d7ab347b1c7f862847bb8"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux_2_17_aarch64.whl", hash = "sha256:f974cb0bea88bac892c3ed16da92c6ac88cff0fea17f24bf0e1892eb4d27cd00"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55c2e604536e06248e2f81e549737fb3a180c8117832e494a0a8a81fbde44837"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f97f9ffa49348fb24692751d2d4455ef2968bd07fe536d65597caaec14222629"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-win32.whl", hash = "sha256:6eaf97414237b8670ae9fa623879a26eabcc4c635b550c79a81e17eb600d6ae3"}, + {file = "grpcio_tools-1.43.0-cp36-cp36m-win_amd64.whl", hash = "sha256:04f100c1f6a7c72c537760c33582f6970070bd6fa6676b529bccfa31cc58bc79"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:9dbb6d1f58f26d88ae689f1b49de84cfaf4786c81c01b9001d3ceea178116a07"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:63862a441a77f6326ea9fe4bb005882f0e363441a5968d9cf8621c34d3dadc2b"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:6dea0cb2e79b67593553ed8662f70e4310599fa8850fc0e056b19fcb63572b7f"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:3eb4aa5b0e578c3d9d9da8e37a2ef73654287a498b8081543acd0db7f0ec1a9c"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:09464c6b17663088144b7e6ea10e9465efdcee03d4b2ffefab39a799bd8360f8"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2458d6b0404f83d95aef00cec01f310d30e9719564a25be50e39b259f6a2da5d"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e9bb5da437364b7dcd2d3c6850747081ecbec0ba645c96c6d471f7e21fdcadb"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-win32.whl", hash = "sha256:2737f749a6ab965748629e619b35f3e1cbe5820fc79e34c88f73cb99efc71dde"}, + {file = "grpcio_tools-1.43.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c39cbe7b902bb92f9afaa035091f5e2b8be35acbac501fec8cb6a0be7d7cdbbd"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:05550ba473cff7c09e905fcfb2263fd1f7600389660194ec022b5d5a3802534b"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:ce13a922db8f5f95c5041d3a4cbf04d942b353f0cba9b251a674f69a31a2d3a6"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:f19d40690c97365c1c1bde81474e6f496d7ab76f87e6d2889c72ad01bac98f2d"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:ba3da574eb08fcaed541b3fc97ce217360fd86d954fa9ad6a604803d57a2e049"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:efd1eb5880001f5189cfa3a774675cc9bbc8cc51586a3e90fe796394ac8626b8"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:234c7a5af653357df5c616e013173eddda6193146c8ab38f3108c4784f66be26"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7e3662f62d410b3f81823b5fa0f79c6e0e250977a1058e4131867b85138a661"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-win32.whl", hash = "sha256:5f2e584d7644ef924e9e042fa151a3bb9f7c28ef1ae260ee6c9cb327982b5e94"}, + {file = "grpcio_tools-1.43.0-cp38-cp38-win_amd64.whl", hash = "sha256:98dcb5b756855110fb661ccd6a93a716610b7efcd5720a3aec01358a1a892c30"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:61ef6cb6ccf9b9c27bb85fffc5338194bcf444df502196c2ad0ff8df4706d41e"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:1def9b68ac9e62674929bc6590a33d89635f1cf16016657d9e16a69f41aa5c36"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:b68cc0c95a0f8c757e8d69b5fa46111d5c9d887ae62af28f827649b1d1b70fe1"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:e956b5c3b586d7b27eae49fb06f544a26288596fe12e22ffec768109717276d1"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:671e61bbc91d8d568f12c3654bb5a91fce9f3fdfd5ec2cfc60c2d3a840449aa6"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7173ed19854d1066bce9bdc09f735ca9c13e74a25d47a1cc5d1fe803b53bffb"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1adb0dbcc1c10b86dcda910b8f56e39210e401bcee923dba166ba923a5f4696a"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-win32.whl", hash = "sha256:ebfb94ddb454a6dc3a505d9531dc81c948e6364e181b8795bfad3f3f479974dc"}, + {file = "grpcio_tools-1.43.0-cp39-cp39-win_amd64.whl", hash = "sha256:d21928b680e6e29538688cffbf53f3d5a53cff0ec8f0c33139641700045bdf1a"}, ] identify = [ {file = "identify-1.6.2-py2.py3-none-any.whl", hash = "sha256:8f9879b5b7cca553878d31548a419ec2f227d3328da92fe8202bc5e546d5cbc3"}, {file = "identify-1.6.2.tar.gz", hash = "sha256:1c2014f6985ed02e62b2e6955578acf069cb2c54859e17853be474bfe7e13bed"}, ] -importlib-metadata = [ - {file = "importlib_metadata-4.8.3-py3-none-any.whl", hash = "sha256:65a9576a5b2d58ca44d133c42a241905cc45e34d2c06fd5ba2bafa221e5d7b5e"}, - {file = "importlib_metadata-4.8.3.tar.gz", hash = "sha256:766abffff765960fcc18003801f7044eb6755ffae4521c8e8ce8e83b9c9b0668"}, -] -importlib-resources = [ - {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"}, - {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"}, +iniconfig = [ + {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, + {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, ] invoke = [ {file = "invoke-1.4.1-py2-none-any.whl", hash = "sha256:93e12876d88130c8e0d7fd6618dd5387d6b36da55ad541481dfa5e001656f134"}, @@ -847,72 +841,82 @@ isort = [ {file = "isort-4.3.21.tar.gz", hash = "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1"}, ] lxml = [ - {file = "lxml-4.6.5-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:abcf7daa5ebcc89328326254f6dd6d566adb483d4d00178892afd386ab389de2"}, - {file = "lxml-4.6.5-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3884476a90d415be79adfa4e0e393048630d0d5bcd5757c4c07d8b4b00a1096b"}, - {file = "lxml-4.6.5-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:add017c5bd6b9ec3a5f09248396b6ee2ce61c5621f087eb2269c813cd8813808"}, - {file = "lxml-4.6.5-cp27-cp27m-win32.whl", hash = "sha256:a702005e447d712375433ed0499cb6e1503fadd6c96a47f51d707b4d37b76d3c"}, - {file = "lxml-4.6.5-cp27-cp27m-win_amd64.whl", hash = "sha256:da07c7e7fc9a3f40446b78c54dbba8bfd5c9100dfecb21b65bfe3f57844f5e71"}, - {file = "lxml-4.6.5-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a708c291900c40a7ecf23f1d2384ed0bc0604e24094dd13417c7e7f8f7a50d93"}, - {file = "lxml-4.6.5-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:f33d8efb42e4fc2b31b3b4527940b25cdebb3026fb56a80c1c1c11a4271d2352"}, - {file = "lxml-4.6.5-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:f6befb83bca720b71d6bd6326a3b26e9496ae6649e26585de024890fe50f49b8"}, - {file = "lxml-4.6.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:59d77bfa3bea13caee95bc0d3f1c518b15049b97dd61ea8b3d71ce677a67f808"}, - {file = "lxml-4.6.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:68a851176c931e2b3de6214347b767451243eeed3bea34c172127bbb5bf6c210"}, - {file = "lxml-4.6.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a7790a273225b0c46e5f859c1327f0f659896cc72eaa537d23aa3ad9ff2a1cc1"}, - {file = "lxml-4.6.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6548fc551de15f310dd0564751d9dc3d405278d45ea9b2b369ed1eccf142e1f5"}, - {file = "lxml-4.6.5-cp310-cp310-win32.whl", hash = "sha256:dc8a0dbb2a10ae8bb609584f5c504789f0f3d0d81840da4849102ec84289f952"}, - {file = "lxml-4.6.5-cp310-cp310-win_amd64.whl", hash = "sha256:1ccbfe5d17835db906f2bab6f15b34194db1a5b07929cba3cf45a96dbfbfefc0"}, - {file = "lxml-4.6.5-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca9a40497f7e97a2a961c04fa8a6f23d790b0521350a8b455759d786b0bcb203"}, - {file = "lxml-4.6.5-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e5b4b0d9440046ead3bd425eb2b852499241ee0cef1ae151038e4f87ede888c4"}, - {file = "lxml-4.6.5-cp35-cp35m-win32.whl", hash = "sha256:87f8f7df70b90fbe7b49969f07b347e3f978f8bd1046bb8ecae659921869202b"}, - {file = "lxml-4.6.5-cp35-cp35m-win_amd64.whl", hash = "sha256:ce52aad32ec6e46d1a91ff8b8014a91538800dd533914bfc4a82f5018d971408"}, - {file = "lxml-4.6.5-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:8021eeff7fabde21b9858ed058a8250ad230cede91764d598c2466b0ba70db8b"}, - {file = "lxml-4.6.5-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:cab343b265e38d4e00649cbbad9278b734c5715f9bcbb72c85a1f99b1a58e19a"}, - {file = "lxml-4.6.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:3534d7c468c044f6aef3c0aff541db2826986a29ea73f2ca831f5d5284d9b570"}, - {file = "lxml-4.6.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdb98f4c9e8a1735efddfaa995b0c96559792da15d56b76428bdfc29f77c4cdb"}, - {file = "lxml-4.6.5-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:5ea121cb66d7e5cb396b4c3ca90471252b94e01809805cfe3e4e44be2db3a99c"}, - {file = "lxml-4.6.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:121fc6f71c692b49af6c963b84ab7084402624ffbe605287da362f8af0668ea3"}, - {file = "lxml-4.6.5-cp36-cp36m-win32.whl", hash = "sha256:1a2a7659b8eb93c6daee350a0d844994d49245a0f6c05c747f619386fb90ba04"}, - {file = "lxml-4.6.5-cp36-cp36m-win_amd64.whl", hash = "sha256:2f77556266a8fe5428b8759fbfc4bd70be1d1d9c9b25d2a414f6a0c0b0f09120"}, - {file = "lxml-4.6.5-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:558485218ee06458643b929765ac1eb04519ca3d1e2dcc288517de864c747c33"}, - {file = "lxml-4.6.5-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ba0006799f21d83c3717fe20e2707a10bbc296475155aadf4f5850f6659b96b9"}, - {file = "lxml-4.6.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:916d457ad84e05b7db52700bad0a15c56e0c3000dcaf1263b2fb7a56fe148996"}, - {file = "lxml-4.6.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c580c2a61d8297a6e47f4d01f066517dbb019be98032880d19ece7f337a9401d"}, - {file = "lxml-4.6.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a21b78af7e2e13bec6bea12fc33bc05730197674f3e5402ce214d07026ccfebd"}, - {file = "lxml-4.6.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:46515773570a33eae13e451c8fcf440222ef24bd3b26f40774dd0bd8b6db15b2"}, - {file = "lxml-4.6.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:124f09614f999551ac65e5b9875981ce4b66ac4b8e2ba9284572f741935df3d9"}, - {file = "lxml-4.6.5-cp37-cp37m-win32.whl", hash = "sha256:b4015baed99d046c760f09a4c59d234d8f398a454380c3cf0b859aba97136090"}, - {file = "lxml-4.6.5-cp37-cp37m-win_amd64.whl", hash = "sha256:12ae2339d32a2b15010972e1e2467345b7bf962e155671239fba74c229564b7f"}, - {file = "lxml-4.6.5-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:76b6c296e4f7a1a8a128aec42d128646897f9ae9a700ef6839cdc9b3900db9b5"}, - {file = "lxml-4.6.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:534032a5ceb34bba1da193b7d386ac575127cc39338379f39a164b10d97ade89"}, - {file = "lxml-4.6.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:60aeb14ff9022d2687ef98ce55f6342944c40d00916452bb90899a191802137a"}, - {file = "lxml-4.6.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9801bcd52ac9c795a7d81ea67471a42cffe532e46cfb750cd5713befc5c019c0"}, - {file = "lxml-4.6.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3b95fb7e6f9c2f53db88f4642231fc2b8907d854e614710996a96f1f32018d5c"}, - {file = "lxml-4.6.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:642eb4cabd997c9b949a994f9643cd8ae00cf4ca8c5cd9c273962296fadf1c44"}, - {file = "lxml-4.6.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af4139172ff0263d269abdcc641e944c9de4b5d660894a3ec7e9f9db63b56ac9"}, - {file = "lxml-4.6.5-cp38-cp38-win32.whl", hash = "sha256:57cf05466917e08f90e323f025b96f493f92c0344694f5702579ab4b7e2eb10d"}, - {file = "lxml-4.6.5-cp38-cp38-win_amd64.whl", hash = "sha256:4f415624cf8b065796649a5e4621773dc5c9ea574a944c76a7f8a6d3d2906b41"}, - {file = "lxml-4.6.5-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:7679bb6e4d9a3978a46ab19a3560e8d2b7265ef3c88152e7fdc130d649789887"}, - {file = "lxml-4.6.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c34234a1bc9e466c104372af74d11a9f98338a3f72fae22b80485171a64e0144"}, - {file = "lxml-4.6.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4b9390bf973e3907d967b75be199cf1978ca8443183cf1e78ad80ad8be9cf242"}, - {file = "lxml-4.6.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fcc849b28f584ed1dbf277291ded5c32bb3476a37032df4a1d523b55faa5f944"}, - {file = "lxml-4.6.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:46f21f2600d001af10e847df9eb3b832e8a439f696c04891bcb8a8cedd859af9"}, - {file = "lxml-4.6.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:99cf827f5a783038eb313beee6533dddb8bdb086d7269c5c144c1c952d142ace"}, - {file = "lxml-4.6.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:925174cafb0f1179a7fd38da90302555d7445e34c9ece68019e53c946be7f542"}, - {file = "lxml-4.6.5-cp39-cp39-win32.whl", hash = "sha256:12d8d6fe3ddef629ac1349fa89a638b296a34b6529573f5055d1cb4e5245f73b"}, - {file = "lxml-4.6.5-cp39-cp39-win_amd64.whl", hash = "sha256:a52e8f317336a44836475e9c802f51c2dc38d612eaa76532cb1d17690338b63b"}, - {file = "lxml-4.6.5-pp37-pypy37_pp73-macosx_10_14_x86_64.whl", hash = "sha256:11ae552a78612620afd15625be9f1b82e3cc2e634f90d6b11709b10a100cba59"}, - {file = "lxml-4.6.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:473701599665d874919d05bb33b56180447b3a9da8d52d6d9799f381ce23f95c"}, - {file = "lxml-4.6.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7f00cc64b49d2ef19ddae898a3def9dd8fda9c3d27c8a174c2889ee757918e71"}, - {file = "lxml-4.6.5-pp38-pypy38_pp73-macosx_10_14_x86_64.whl", hash = "sha256:73e8614258404b2689a26cb5d002512b8bc4dfa18aca86382f68f959aee9b0c8"}, - {file = "lxml-4.6.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ff44de36772b05c2eb74f2b4b6d1ae29b8f41ed5506310ce1258d44826ee38c1"}, - {file = "lxml-4.6.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5d5254c815c186744c8f922e2ce861a2bdeabc06520b4b30b2f7d9767791ce6e"}, - {file = "lxml-4.6.5.tar.gz", hash = "sha256:6e84edecc3a82f90d44ddee2ee2a2630d4994b8471816e226d2b771cda7ac4ca"}, + {file = "lxml-4.9.1-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:98cafc618614d72b02185ac583c6f7796202062c41d2eeecdf07820bad3295ed"}, + {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c62e8dd9754b7debda0c5ba59d34509c4688f853588d75b53c3791983faa96fc"}, + {file = "lxml-4.9.1-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:21fb3d24ab430fc538a96e9fbb9b150029914805d551deeac7d7822f64631dfc"}, + {file = "lxml-4.9.1-cp27-cp27m-win32.whl", hash = "sha256:86e92728ef3fc842c50a5cb1d5ba2bc66db7da08a7af53fb3da79e202d1b2cd3"}, + {file = "lxml-4.9.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4cfbe42c686f33944e12f45a27d25a492cc0e43e1dc1da5d6a87cbcaf2e95627"}, + {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:dad7b164905d3e534883281c050180afcf1e230c3d4a54e8038aa5cfcf312b84"}, + {file = "lxml-4.9.1-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a614e4afed58c14254e67862456d212c4dcceebab2eaa44d627c2ca04bf86837"}, + {file = "lxml-4.9.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f9ced82717c7ec65a67667bb05865ffe38af0e835cdd78728f1209c8fffe0cad"}, + {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d9fc0bf3ff86c17348dfc5d322f627d78273eba545db865c3cd14b3f19e57fa5"}, + {file = "lxml-4.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e5f66bdf0976ec667fc4594d2812a00b07ed14d1b44259d19a41ae3fff99f2b8"}, + {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:fe17d10b97fdf58155f858606bddb4e037b805a60ae023c009f760d8361a4eb8"}, + {file = "lxml-4.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8caf4d16b31961e964c62194ea3e26a0e9561cdf72eecb1781458b67ec83423d"}, + {file = "lxml-4.9.1-cp310-cp310-win32.whl", hash = "sha256:4780677767dd52b99f0af1f123bc2c22873d30b474aa0e2fc3fe5e02217687c7"}, + {file = "lxml-4.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:b122a188cd292c4d2fcd78d04f863b789ef43aa129b233d7c9004de08693728b"}, + {file = "lxml-4.9.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:be9eb06489bc975c38706902cbc6888f39e946b81383abc2838d186f0e8b6a9d"}, + {file = "lxml-4.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f1be258c4d3dc609e654a1dc59d37b17d7fef05df912c01fc2e15eb43a9735f3"}, + {file = "lxml-4.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:927a9dd016d6033bc12e0bf5dee1dde140235fc8d0d51099353c76081c03dc29"}, + {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9232b09f5efee6a495a99ae6824881940d6447debe272ea400c02e3b68aad85d"}, + {file = "lxml-4.9.1-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:04da965dfebb5dac2619cb90fcf93efdb35b3c6994fea58a157a834f2f94b318"}, + {file = "lxml-4.9.1-cp35-cp35m-win32.whl", hash = "sha256:4d5bae0a37af799207140652a700f21a85946f107a199bcb06720b13a4f1f0b7"}, + {file = "lxml-4.9.1-cp35-cp35m-win_amd64.whl", hash = "sha256:4878e667ebabe9b65e785ac8da4d48886fe81193a84bbe49f12acff8f7a383a4"}, + {file = "lxml-4.9.1-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:1355755b62c28950f9ce123c7a41460ed9743c699905cbe664a5bcc5c9c7c7fb"}, + {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:bcaa1c495ce623966d9fc8a187da80082334236a2a1c7e141763ffaf7a405067"}, + {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6eafc048ea3f1b3c136c71a86db393be36b5b3d9c87b1c25204e7d397cee9536"}, + {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:13c90064b224e10c14dcdf8086688d3f0e612db53766e7478d7754703295c7c8"}, + {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206a51077773c6c5d2ce1991327cda719063a47adc02bd703c56a662cdb6c58b"}, + {file = "lxml-4.9.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e8f0c9d65da595cfe91713bc1222af9ecabd37971762cb830dea2fc3b3bb2acf"}, + {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:8f0a4d179c9a941eb80c3a63cdb495e539e064f8054230844dcf2fcb812b71d3"}, + {file = "lxml-4.9.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:830c88747dce8a3e7525defa68afd742b4580df6aa2fdd6f0855481e3994d391"}, + {file = "lxml-4.9.1-cp36-cp36m-win32.whl", hash = "sha256:1e1cf47774373777936c5aabad489fef7b1c087dcd1f426b621fda9dcc12994e"}, + {file = "lxml-4.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:5974895115737a74a00b321e339b9c3f45c20275d226398ae79ac008d908bff7"}, + {file = "lxml-4.9.1-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:1423631e3d51008871299525b541413c9b6c6423593e89f9c4cfbe8460afc0a2"}, + {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:2aaf6a0a6465d39b5ca69688fce82d20088c1838534982996ec46633dc7ad6cc"}, + {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:9f36de4cd0c262dd9927886cc2305aa3f2210db437aa4fed3fb4940b8bf4592c"}, + {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae06c1e4bc60ee076292e582a7512f304abdf6c70db59b56745cca1684f875a4"}, + {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:57e4d637258703d14171b54203fd6822fda218c6c2658a7d30816b10995f29f3"}, + {file = "lxml-4.9.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6d279033bf614953c3fc4a0aa9ac33a21e8044ca72d4fa8b9273fe75359d5cca"}, + {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a60f90bba4c37962cbf210f0188ecca87daafdf60271f4c6948606e4dabf8785"}, + {file = "lxml-4.9.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ca2264f341dd81e41f3fffecec6e446aa2121e0b8d026fb5130e02de1402785"}, + {file = "lxml-4.9.1-cp37-cp37m-win32.whl", hash = "sha256:27e590352c76156f50f538dbcebd1925317a0f70540f7dc8c97d2931c595783a"}, + {file = "lxml-4.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:eea5d6443b093e1545ad0210e6cf27f920482bfcf5c77cdc8596aec73523bb7e"}, + {file = "lxml-4.9.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f05251bbc2145349b8d0b77c0d4e5f3b228418807b1ee27cefb11f69ed3d233b"}, + {file = "lxml-4.9.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:487c8e61d7acc50b8be82bda8c8d21d20e133c3cbf41bd8ad7eb1aaeb3f07c97"}, + {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d1a92d8e90b286d491e5626af53afef2ba04da33e82e30744795c71880eaa21"}, + {file = "lxml-4.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:b570da8cd0012f4af9fa76a5635cd31f707473e65a5a335b186069d5c7121ff2"}, + {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5ef87fca280fb15342726bd5f980f6faf8b84a5287fcc2d4962ea8af88b35130"}, + {file = "lxml-4.9.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:93e414e3206779ef41e5ff2448067213febf260ba747fc65389a3ddaa3fb8715"}, + {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6653071f4f9bac46fbc30f3c7838b0e9063ee335908c5d61fb7a4a86c8fd2036"}, + {file = "lxml-4.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:32a73c53783becdb7eaf75a2a1525ea8e49379fb7248c3eeefb9412123536387"}, + {file = "lxml-4.9.1-cp38-cp38-win32.whl", hash = "sha256:1a7c59c6ffd6ef5db362b798f350e24ab2cfa5700d53ac6681918f314a4d3b94"}, + {file = "lxml-4.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:1436cf0063bba7888e43f1ba8d58824f085410ea2025befe81150aceb123e345"}, + {file = "lxml-4.9.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:4beea0f31491bc086991b97517b9683e5cfb369205dac0148ef685ac12a20a67"}, + {file = "lxml-4.9.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:41fb58868b816c202e8881fd0f179a4644ce6e7cbbb248ef0283a34b73ec73bb"}, + {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bd34f6d1810d9354dc7e35158aa6cc33456be7706df4420819af6ed966e85448"}, + {file = "lxml-4.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:edffbe3c510d8f4bf8640e02ca019e48a9b72357318383ca60e3330c23aaffc7"}, + {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d949f53ad4fc7cf02c44d6678e7ff05ec5f5552b235b9e136bd52e9bf730b91"}, + {file = "lxml-4.9.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:079b68f197c796e42aa80b1f739f058dcee796dc725cc9a1be0cdb08fc45b000"}, + {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9c3a88d20e4fe4a2a4a84bf439a5ac9c9aba400b85244c63a1ab7088f85d9d25"}, + {file = "lxml-4.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4e285b5f2bf321fc0857b491b5028c5f276ec0c873b985d58d7748ece1d770dd"}, + {file = "lxml-4.9.1-cp39-cp39-win32.whl", hash = "sha256:ef72013e20dd5ba86a8ae1aed7f56f31d3374189aa8b433e7b12ad182c0d2dfb"}, + {file = "lxml-4.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:10d2017f9150248563bb579cd0d07c61c58da85c922b780060dcc9a3aa9f432d"}, + {file = "lxml-4.9.1-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538747a9d7827ce3e16a8fdd201a99e661c7dee3c96c885d8ecba3c35d1032c"}, + {file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0645e934e940107e2fdbe7c5b6fb8ec6232444260752598bc4d09511bd056c0b"}, + {file = "lxml-4.9.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:6daa662aba22ef3258934105be2dd9afa5bb45748f4f702a3b39a5bf53a1f4dc"}, + {file = "lxml-4.9.1-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:603a464c2e67d8a546ddaa206d98e3246e5db05594b97db844c2f0a1af37cf5b"}, + {file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c4b2e0559b68455c085fb0f6178e9752c4be3bba104d6e881eb5573b399d1eb2"}, + {file = "lxml-4.9.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0f3f0059891d3254c7b5fb935330d6db38d6519ecd238ca4fce93c234b4a0f73"}, + {file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c852b1530083a620cb0de5f3cd6826f19862bafeaf77586f1aef326e49d95f0c"}, + {file = "lxml-4.9.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:287605bede6bd36e930577c5925fcea17cb30453d96a7b4c63c14a257118dbb9"}, + {file = "lxml-4.9.1.tar.gz", hash = "sha256:fe749b052bb7233fe5d072fcb549221a8cb1a16725c47c37e42b0b9cb3ff2c3f"}, ] -mako = [ - {file = "Mako-1.1.3-py2.py3-none-any.whl", hash = "sha256:93729a258e4ff0747c876bd9e20df1b9758028946e976324ccd2d68245c7b6a9"}, - {file = "Mako-1.1.3.tar.gz", hash = "sha256:8195c8c1400ceb53496064314c6736719c6f25e7479cd24c77be3d9361cddc27"}, +Mako = [ + {file = "Mako-1.2.3-py3-none-any.whl", hash = "sha256:c413a086e38cd885088d5e165305ee8eed04e8b3f8f62df343480da0a385735f"}, + {file = "Mako-1.2.3.tar.gz", hash = "sha256:7fde96466fcfeedb0eed94f187f20b23d85e4cb41444be0e542e2c8c65c396cd"}, ] -markupsafe = [ +MarkupSafe = [ {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"}, {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"}, {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"}, @@ -991,10 +995,6 @@ mock = [ {file = "mock-4.0.2-py3-none-any.whl", hash = "sha256:3f9b2c0196c60d21838f307f5825a7b86b678cedc58ab9e50a8988187b4d81e0"}, {file = "mock-4.0.2.tar.gz", hash = "sha256:dd33eb70232b6118298d516bbcecd26704689c386594f0f3c4f13867b2c56f72"}, ] -more-itertools = [ - {file = "more-itertools-8.12.0.tar.gz", hash = "sha256:7dc6ad46f05f545f900dd59e8dfb4e84a4827b97b3cfecb175ea0c7d247f6064"}, - {file = "more_itertools-8.12.0-py3-none-any.whl", hash = "sha256:43e6dd9942dffd72661a2c4ef383ad7da1e6a3e968a927ad7a6083ab410a688b"}, -] netaddr = [ {file = "netaddr-0.7.19-py2.py3-none-any.whl", hash = "sha256:56b3558bd71f3f6999e4c52e349f38660e54a7a8a9943335f73dfc96883e08ca"}, {file = "netaddr-0.7.19.tar.gz", hash = "sha256:38aeec7cdd035081d3a4c306394b19d677623bf76fa0913f6695127c7753aefd"}, @@ -1008,103 +1008,111 @@ packaging = [ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, ] paramiko = [ - {file = "paramiko-2.9.2-py2.py3-none-any.whl", hash = "sha256:04097dbd96871691cdb34c13db1883066b8a13a0df2afd4cb0a92221f51c2603"}, - {file = "paramiko-2.9.2.tar.gz", hash = "sha256:944a9e5dbdd413ab6c7951ea46b0ab40713235a9c4c5ca81cfe45c6f14fa677b"}, + {file = "paramiko-2.11.0-py2.py3-none-any.whl", hash = "sha256:655f25dc8baf763277b933dfcea101d636581df8d6b9774d1fb653426b72c270"}, + {file = "paramiko-2.11.0.tar.gz", hash = "sha256:003e6bee7c034c21fbb051bf83dc0a9ee4106204dd3c53054c71452cc4ec3938"}, ] -pillow = [ - {file = "Pillow-8.3.2-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:c691b26283c3a31594683217d746f1dad59a7ae1d4cfc24626d7a064a11197d4"}, - {file = "Pillow-8.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f514c2717012859ccb349c97862568fdc0479aad85b0270d6b5a6509dbc142e2"}, - {file = "Pillow-8.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be25cb93442c6d2f8702c599b51184bd3ccd83adebd08886b682173e09ef0c3f"}, - {file = "Pillow-8.3.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d675a876b295afa114ca8bf42d7f86b5fb1298e1b6bb9a24405a3f6c8338811c"}, - {file = "Pillow-8.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59697568a0455764a094585b2551fd76bfd6b959c9f92d4bdec9d0e14616303a"}, - {file = "Pillow-8.3.2-cp310-cp310-win32.whl", hash = "sha256:2d5e9dc0bf1b5d9048a94c48d0813b6c96fccfa4ccf276d9c36308840f40c228"}, - {file = "Pillow-8.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:11c27e74bab423eb3c9232d97553111cc0be81b74b47165f07ebfdd29d825875"}, - {file = "Pillow-8.3.2-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:11eb7f98165d56042545c9e6db3ce394ed8b45089a67124298f0473b29cb60b2"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f23b2d3079522fdf3c09de6517f625f7a964f916c956527bed805ac043799b8"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19ec4cfe4b961edc249b0e04b5618666c23a83bc35842dea2bfd5dfa0157f81b"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e5a31c07cea5edbaeb4bdba6f2b87db7d3dc0f446f379d907e51cc70ea375629"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15ccb81a6ffc57ea0137f9f3ac2737ffa1d11f786244d719639df17476d399a7"}, - {file = "Pillow-8.3.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8f284dc1695caf71a74f24993b7c7473d77bc760be45f776a2c2f4e04c170550"}, - {file = "Pillow-8.3.2-cp36-cp36m-win32.whl", hash = "sha256:4abc247b31a98f29e5224f2d31ef15f86a71f79c7f4d2ac345a5d551d6393073"}, - {file = "Pillow-8.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:a048dad5ed6ad1fad338c02c609b862dfaa921fcd065d747194a6805f91f2196"}, - {file = "Pillow-8.3.2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:06d1adaa284696785375fa80a6a8eb309be722cf4ef8949518beb34487a3df71"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd24054aaf21e70a51e2a2a5ed1183560d3a69e6f9594a4bfe360a46f94eba83"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a330bf7014ee034046db43ccbb05c766aa9e70b8d6c5260bfc38d73103b0ba"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13654b521fb98abdecec105ea3fb5ba863d1548c9b58831dd5105bb3873569f1"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1bd983c565f92779be456ece2479840ec39d386007cd4ae83382646293d681b"}, - {file = "Pillow-8.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:4326ea1e2722f3dc00ed77c36d3b5354b8fb7399fb59230249ea6d59cbed90da"}, - {file = "Pillow-8.3.2-cp37-cp37m-win32.whl", hash = "sha256:085a90a99404b859a4b6c3daa42afde17cb3ad3115e44a75f0d7b4a32f06a6c9"}, - {file = "Pillow-8.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:18a07a683805d32826c09acfce44a90bf474e6a66ce482b1c7fcd3757d588df3"}, - {file = "Pillow-8.3.2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:4e59e99fd680e2b8b11bbd463f3c9450ab799305d5f2bafb74fefba6ac058616"}, - {file = "Pillow-8.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d89a2e9219a526401015153c0e9dd48319ea6ab9fe3b066a20aa9aee23d9fd3"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56fd98c8294f57636084f4b076b75f86c57b2a63a8410c0cd172bc93695ee979"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b11c9d310a3522b0fd3c35667914271f570576a0e387701f370eb39d45f08a4"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0412516dcc9de9b0a1e0ae25a280015809de8270f134cc2c1e32c4eeb397cf30"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bcb04ff12e79b28be6c9988f275e7ab69f01cc2ba319fb3114f87817bb7c74b6"}, - {file = "Pillow-8.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:0b9911ec70731711c3b6ebcde26caea620cbdd9dcb73c67b0730c8817f24711b"}, - {file = "Pillow-8.3.2-cp38-cp38-win32.whl", hash = "sha256:ce2e5e04bb86da6187f96d7bab3f93a7877830981b37f0287dd6479e27a10341"}, - {file = "Pillow-8.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:35d27687f027ad25a8d0ef45dd5208ef044c588003cdcedf05afb00dbc5c2deb"}, - {file = "Pillow-8.3.2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:04835e68ef12904bc3e1fd002b33eea0779320d4346082bd5b24bec12ad9c3e9"}, - {file = "Pillow-8.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:10e00f7336780ca7d3653cf3ac26f068fa11b5a96894ea29a64d3dc4b810d630"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cde7a4d3687f21cffdf5bb171172070bb95e02af448c4c8b2f223d783214056"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c3ff00110835bdda2b1e2b07f4a2548a39744bb7de5946dc8e95517c4fb2ca6"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35d409030bf3bd05fa66fb5fdedc39c521b397f61ad04309c90444e893d05f7d"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bff50ba9891be0a004ef48828e012babaaf7da204d81ab9be37480b9020a82b"}, - {file = "Pillow-8.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7dbfbc0020aa1d9bc1b0b8bcf255a7d73f4ad0336f8fd2533fcc54a4ccfb9441"}, - {file = "Pillow-8.3.2-cp39-cp39-win32.whl", hash = "sha256:963ebdc5365d748185fdb06daf2ac758116deecb2277ec5ae98139f93844bc09"}, - {file = "Pillow-8.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:cc9d0dec711c914ed500f1d0d3822868760954dce98dfb0b7382a854aee55d19"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:2c661542c6f71dfd9dc82d9d29a8386287e82813b0375b3a02983feac69ef864"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:548794f99ff52a73a156771a0402f5e1c35285bd981046a502d7e4793e8facaa"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8b68f565a4175e12e68ca900af8910e8fe48aaa48fd3ca853494f384e11c8bcd"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:838eb85de6d9307c19c655c726f8d13b8b646f144ca6b3771fa62b711ebf7624"}, - {file = "Pillow-8.3.2-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:feb5db446e96bfecfec078b943cc07744cc759893cef045aa8b8b6d6aaa8274e"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:fc0db32f7223b094964e71729c0361f93db43664dd1ec86d3df217853cedda87"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:fd4fd83aa912d7b89b4b4a1580d30e2a4242f3936882a3f433586e5ab97ed0d5"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:d0c8ebbfd439c37624db98f3877d9ed12c137cadd99dde2d2eae0dab0bbfc355"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6cb3dd7f23b044b0737317f892d399f9e2f0b3a02b22b2c692851fb8120d82c6"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a66566f8a22561fc1a88dc87606c69b84fa9ce724f99522cf922c801ec68f5c1"}, - {file = "Pillow-8.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ce651ca46d0202c302a535d3047c55a0131a720cf554a578fc1b8a2aff0e7d96"}, - {file = "Pillow-8.3.2.tar.gz", hash = "sha256:dde3f3ed8d00c72631bc19cbfff8ad3b6215062a5eed402381ad365f82f0c18c"}, +pathlib2 = [ + {file = "pathlib2-2.3.7.post1-py2.py3-none-any.whl", hash = "sha256:5266a0fd000452f1b3467d782f079a4343c63aaa119221fbdc4e39577489ca5b"}, + {file = "pathlib2-2.3.7.post1.tar.gz", hash = "sha256:9fe0edad898b83c0c3e199c842b27ed216645d2e177757b2dd67384d4113c641"}, +] +Pillow = [ + {file = "Pillow-9.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a9c9bc489f8ab30906d7a85afac4b4944a572a7432e00698a7239f44a44e6efb"}, + {file = "Pillow-9.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:510cef4a3f401c246cfd8227b300828715dd055463cdca6176c2e4036df8bd4f"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7888310f6214f19ab2b6df90f3f06afa3df7ef7355fc025e78a3044737fab1f5"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831e648102c82f152e14c1a0938689dbb22480c548c8d4b8b248b3e50967b88c"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cc1d2451e8a3b4bfdb9caf745b58e6c7a77d2e469159b0d527a4554d73694d1"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:136659638f61a251e8ed3b331fc6ccd124590eeff539de57c5f80ef3a9594e58"}, + {file = "Pillow-9.2.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:6e8c66f70fb539301e064f6478d7453e820d8a2c631da948a23384865cd95544"}, + {file = "Pillow-9.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:37ff6b522a26d0538b753f0b4e8e164fdada12db6c6f00f62145d732d8a3152e"}, + {file = "Pillow-9.2.0-cp310-cp310-win32.whl", hash = "sha256:c79698d4cd9318d9481d89a77e2d3fcaeff5486be641e60a4b49f3d2ecca4e28"}, + {file = "Pillow-9.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:254164c57bab4b459f14c64e93df11eff5ded575192c294a0c49270f22c5d93d"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:adabc0bce035467fb537ef3e5e74f2847c8af217ee0be0455d4fec8adc0462fc"}, + {file = "Pillow-9.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:336b9036127eab855beec9662ac3ea13a4544a523ae273cbf108b228ecac8437"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50dff9cc21826d2977ef2d2a205504034e3a4563ca6f5db739b0d1026658e004"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cb6259196a589123d755380b65127ddc60f4c64b21fc3bb46ce3a6ea663659b0"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b0554af24df2bf96618dac71ddada02420f946be943b181108cac55a7a2dcd4"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:15928f824870535c85dbf949c09d6ae7d3d6ac2d6efec80f3227f73eefba741c"}, + {file = "Pillow-9.2.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:bdd0de2d64688ecae88dd8935012c4a72681e5df632af903a1dca8c5e7aa871a"}, + {file = "Pillow-9.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d5b87da55a08acb586bad5c3aa3b86505f559b84f39035b233d5bf844b0834b1"}, + {file = "Pillow-9.2.0-cp311-cp311-win32.whl", hash = "sha256:b6d5e92df2b77665e07ddb2e4dbd6d644b78e4c0d2e9272a852627cdba0d75cf"}, + {file = "Pillow-9.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6bf088c1ce160f50ea40764f825ec9b72ed9da25346216b91361eef8ad1b8f8c"}, + {file = "Pillow-9.2.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:2c58b24e3a63efd22554c676d81b0e57f80e0a7d3a5874a7e14ce90ec40d3069"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eef7592281f7c174d3d6cbfbb7ee5984a671fcd77e3fc78e973d492e9bf0eb3f"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcd7b9c7139dc8258d164b55696ecd16c04607f1cc33ba7af86613881ffe4ac8"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a138441e95562b3c078746a22f8fca8ff1c22c014f856278bdbdd89ca36cff1b"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:93689632949aff41199090eff5474f3990b6823404e45d66a5d44304e9cdc467"}, + {file = "Pillow-9.2.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:f3fac744f9b540148fa7715a435d2283b71f68bfb6d4aae24482a890aed18b59"}, + {file = "Pillow-9.2.0-cp37-cp37m-win32.whl", hash = "sha256:fa768eff5f9f958270b081bb33581b4b569faabf8774726b283edb06617101dc"}, + {file = "Pillow-9.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:69bd1a15d7ba3694631e00df8de65a8cb031911ca11f44929c97fe05eb9b6c1d"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:030e3460861488e249731c3e7ab59b07c7853838ff3b8e16aac9561bb345da14"}, + {file = "Pillow-9.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:74a04183e6e64930b667d321524e3c5361094bb4af9083db5c301db64cd341f3"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d33a11f601213dcd5718109c09a52c2a1c893e7461f0be2d6febc2879ec2402"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fd6f5e3c0e4697fa7eb45b6e93996299f3feee73a3175fa451f49a74d092b9f"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a647c0d4478b995c5e54615a2e5360ccedd2f85e70ab57fbe817ca613d5e63b8"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4134d3f1ba5f15027ff5c04296f13328fecd46921424084516bdb1b2548e66ff"}, + {file = "Pillow-9.2.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:bc431b065722a5ad1dfb4df354fb9333b7a582a5ee39a90e6ffff688d72f27a1"}, + {file = "Pillow-9.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1536ad017a9f789430fb6b8be8bf99d2f214c76502becc196c6f2d9a75b01b76"}, + {file = "Pillow-9.2.0-cp38-cp38-win32.whl", hash = "sha256:2ad0d4df0f5ef2247e27fc790d5c9b5a0af8ade9ba340db4a73bb1a4a3e5fb4f"}, + {file = "Pillow-9.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:ec52c351b35ca269cb1f8069d610fc45c5bd38c3e91f9ab4cbbf0aebc136d9c8"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0ed2c4ef2451de908c90436d6e8092e13a43992f1860275b4d8082667fbb2ffc"}, + {file = "Pillow-9.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ad2f835e0ad81d1689f1b7e3fbac7b01bb8777d5a985c8962bedee0cc6d43da"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea98f633d45f7e815db648fd7ff0f19e328302ac36427343e4432c84432e7ff4"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7761afe0126d046974a01e030ae7529ed0ca6a196de3ec6937c11df0df1bc91c"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a54614049a18a2d6fe156e68e188da02a046a4a93cf24f373bffd977e943421"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:5aed7dde98403cd91d86a1115c78d8145c83078e864c1de1064f52e6feb61b20"}, + {file = "Pillow-9.2.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:13b725463f32df1bfeacbf3dd197fb358ae8ebcd8c5548faa75126ea425ccb60"}, + {file = "Pillow-9.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:808add66ea764ed97d44dda1ac4f2cfec4c1867d9efb16a33d158be79f32b8a4"}, + {file = "Pillow-9.2.0-cp39-cp39-win32.whl", hash = "sha256:337a74fd2f291c607d220c793a8135273c4c2ab001b03e601c36766005f36885"}, + {file = "Pillow-9.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:fac2d65901fb0fdf20363fbd345c01958a742f2dc62a8dd4495af66e3ff502a4"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ad2277b185ebce47a63f4dc6302e30f05762b688f8dc3de55dbae4651872cdf3"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7c7b502bc34f6e32ba022b4a209638f9e097d7a9098104ae420eb8186217ebbb"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d1f14f5f691f55e1b47f824ca4fdcb4b19b4323fe43cc7bb105988cad7496be"}, + {file = "Pillow-9.2.0-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:dfe4c1fedfde4e2fbc009d5ad420647f7730d719786388b7de0999bf32c0d9fd"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:f07f1f00e22b231dd3d9b9208692042e29792d6bd4f6639415d2f23158a80013"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1802f34298f5ba11d55e5bb09c31997dc0c6aed919658dfdf0198a2fe75d5490"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17d4cafe22f050b46d983b71c707162d63d796a1235cdf8b9d7a112e97b15bac"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:96b5e6874431df16aee0c1ba237574cb6dff1dcb173798faa6a9d8b399a05d0e"}, + {file = "Pillow-9.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:0030fdbd926fb85844b8b92e2f9449ba89607231d3dd597a21ae72dc7fe26927"}, + {file = "Pillow-9.2.0.tar.gz", hash = "sha256:75e636fd3e0fb872693f23ccb8a5ff2cd578801251f3a4f6854c6a5d437d3c04"}, ] platformdirs = [ {file = "platformdirs-2.4.0-py3-none-any.whl", hash = "sha256:8868bbe3c3c80d42f20156f22e7131d2fb321f5bc86a2a345375c6481a67021d"}, {file = "platformdirs-2.4.0.tar.gz", hash = "sha256:367a5e80b3d04d2428ffa76d33f124cf11e8fff2acdaa9b43d545f5c7d661ef2"}, ] pluggy = [ - {file = "pluggy-0.13.1-py2.py3-none-any.whl", hash = "sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"}, - {file = "pluggy-0.13.1.tar.gz", hash = "sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0"}, + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, ] pre-commit = [ {file = "pre_commit-2.1.1-py2.py3-none-any.whl", hash = "sha256:09ebe467f43ce24377f8c2f200fe3cd2570d328eb2ce0568c8e96ce19da45fa6"}, {file = "pre_commit-2.1.1.tar.gz", hash = "sha256:f8d555e31e2051892c7f7b3ad9f620bd2c09271d87e9eedb2ad831737d6211eb"}, ] protobuf = [ - {file = "protobuf-3.19.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f51d5a9f137f7a2cec2d326a74b6e3fc79d635d69ffe1b036d39fc7d75430d37"}, - {file = "protobuf-3.19.4-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:09297b7972da685ce269ec52af761743714996b4381c085205914c41fcab59fb"}, - {file = "protobuf-3.19.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:072fbc78d705d3edc7ccac58a62c4c8e0cec856987da7df8aca86e647be4e35c"}, - {file = "protobuf-3.19.4-cp310-cp310-win32.whl", hash = "sha256:7bb03bc2873a2842e5ebb4801f5c7ff1bfbdf426f85d0172f7644fcda0671ae0"}, - {file = "protobuf-3.19.4-cp310-cp310-win_amd64.whl", hash = "sha256:f358aa33e03b7a84e0d91270a4d4d8f5df6921abe99a377828839e8ed0c04e07"}, - {file = "protobuf-3.19.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:1c91ef4110fdd2c590effb5dca8fdbdcb3bf563eece99287019c4204f53d81a4"}, - {file = "protobuf-3.19.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c438268eebb8cf039552897d78f402d734a404f1360592fef55297285f7f953f"}, - {file = "protobuf-3.19.4-cp36-cp36m-win32.whl", hash = "sha256:835a9c949dc193953c319603b2961c5c8f4327957fe23d914ca80d982665e8ee"}, - {file = "protobuf-3.19.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4276cdec4447bd5015453e41bdc0c0c1234eda08420b7c9a18b8d647add51e4b"}, - {file = "protobuf-3.19.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6cbc312be5e71869d9d5ea25147cdf652a6781cf4d906497ca7690b7b9b5df13"}, - {file = "protobuf-3.19.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:54a1473077f3b616779ce31f477351a45b4fef8c9fd7892d6d87e287a38df368"}, - {file = "protobuf-3.19.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:435bb78b37fc386f9275a7035fe4fb1364484e38980d0dd91bc834a02c5ec909"}, - {file = "protobuf-3.19.4-cp37-cp37m-win32.whl", hash = "sha256:16f519de1313f1b7139ad70772e7db515b1420d208cb16c6d7858ea989fc64a9"}, - {file = "protobuf-3.19.4-cp37-cp37m-win_amd64.whl", hash = "sha256:cdc076c03381f5c1d9bb1abdcc5503d9ca8b53cf0a9d31a9f6754ec9e6c8af0f"}, - {file = "protobuf-3.19.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:69da7d39e39942bd52848438462674c463e23963a1fdaa84d88df7fbd7e749b2"}, - {file = "protobuf-3.19.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:48ed3877fa43e22bcacc852ca76d4775741f9709dd9575881a373bd3e85e54b2"}, - {file = "protobuf-3.19.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd95d1dfb9c4f4563e6093a9aa19d9c186bf98fa54da5252531cc0d3a07977e7"}, - {file = "protobuf-3.19.4-cp38-cp38-win32.whl", hash = "sha256:b38057450a0c566cbd04890a40edf916db890f2818e8682221611d78dc32ae26"}, - {file = "protobuf-3.19.4-cp38-cp38-win_amd64.whl", hash = "sha256:7ca7da9c339ca8890d66958f5462beabd611eca6c958691a8fe6eccbd1eb0c6e"}, - {file = "protobuf-3.19.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:36cecbabbda242915529b8ff364f2263cd4de7c46bbe361418b5ed859677ba58"}, - {file = "protobuf-3.19.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:c1068287025f8ea025103e37d62ffd63fec8e9e636246b89c341aeda8a67c934"}, - {file = "protobuf-3.19.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96bd766831596d6014ca88d86dc8fe0fb2e428c0b02432fd9db3943202bf8c5e"}, - {file = "protobuf-3.19.4-cp39-cp39-win32.whl", hash = "sha256:84123274d982b9e248a143dadd1b9815049f4477dc783bf84efe6250eb4b836a"}, - {file = "protobuf-3.19.4-cp39-cp39-win_amd64.whl", hash = "sha256:3112b58aac3bac9c8be2b60a9daf6b558ca3f7681c130dcdd788ade7c9ffbdca"}, - {file = "protobuf-3.19.4-py2.py3-none-any.whl", hash = "sha256:8961c3a78ebfcd000920c9060a262f082f29838682b1f7201889300c1fbe0616"}, - {file = "protobuf-3.19.4.tar.gz", hash = "sha256:9df0c10adf3e83015ced42a9a7bd64e13d06c4cf45c340d2c63020ea04499d0a"}, + {file = "protobuf-3.19.5-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:f2b599a21c9a32e171ec29a2ac54e03297736c578698e11b099d031f79da114b"}, + {file = "protobuf-3.19.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f976234e20ab2785f54224bcdafa027674e23663b132fa3ca0caa291a6cfbde7"}, + {file = "protobuf-3.19.5-cp310-cp310-win32.whl", hash = "sha256:4ee2af7051d3b10c8a4fe6fd1a2c69f201fea36aeee7086cf202a692e1b99ee1"}, + {file = "protobuf-3.19.5-cp310-cp310-win_amd64.whl", hash = "sha256:dca2284378a5f2a86ffed35c6ac147d14c48b525eefcd1083e5a9ce28dfa8657"}, + {file = "protobuf-3.19.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c0f80876a8ff0ae7064084ed094eb86497bd5a3812e6fc96a05318b92301674e"}, + {file = "protobuf-3.19.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c4160b601220627f7e91154e572baf5e161a9c3f445a8242d536ee3d0b7b17c"}, + {file = "protobuf-3.19.5-cp36-cp36m-win32.whl", hash = "sha256:f2bde37667b18c2b5280df83bc799204394a5d2d774e4deaf9de0eb741df6833"}, + {file = "protobuf-3.19.5-cp36-cp36m-win_amd64.whl", hash = "sha256:1867f93b06a183f87696871bb8d1e99ee71dbb69d468ce1f0cc8bf3d30f982f3"}, + {file = "protobuf-3.19.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a89aa0c042e61e11ade320b802d6db4ee5391d8d973e46d3a48172c1597789f8"}, + {file = "protobuf-3.19.5-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:f9cebda093c2f6bfed88f1c17cdade09d4d96096421b344026feee236532d4de"}, + {file = "protobuf-3.19.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67efb5d20618020aa9596e17bfc37ca068c28ec0c1507d9507f73c93d46c9855"}, + {file = "protobuf-3.19.5-cp37-cp37m-win32.whl", hash = "sha256:950abd6c00e7b51f87ae8b18a0ce4d69fea217f62f171426e77de5061f6d9850"}, + {file = "protobuf-3.19.5-cp37-cp37m-win_amd64.whl", hash = "sha256:d3973a2d58aefc7d1230725c2447ce7f86a71cbc094b86a77c6ee1505ac7cdb1"}, + {file = "protobuf-3.19.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9e1d74032f56ff25f417cfe84c8147047732e5059137ca42efad20cbbd25f5e0"}, + {file = "protobuf-3.19.5-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:d249519ba5ecf5dd6b18150c9b6bcde510b273714b696f3923ff8308fc11ae49"}, + {file = "protobuf-3.19.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f957ef53e872d58a0afd3bf6d80d48535d28c99b40e75e6634cbc33ea42fd54"}, + {file = "protobuf-3.19.5-cp38-cp38-win32.whl", hash = "sha256:5470f892961af464ae6eaf0f3099e2c1190ae8c7f36f174b89491281341f79ca"}, + {file = "protobuf-3.19.5-cp38-cp38-win_amd64.whl", hash = "sha256:c44e3282cff74ad18c7e8a0375f407f69ee50c2116364b44492a196293e08b21"}, + {file = "protobuf-3.19.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:66d14b5b90090353efe75c9fb1bf65ef7267383034688d255b500822e37d5c2f"}, + {file = "protobuf-3.19.5-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f4f909f4dde413dec435a44b0894956d55bb928ded7d6e3c726556ca4c796e84"}, + {file = "protobuf-3.19.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5266c36cc0af3bb3dbf44f199d225b33da66a9a5c3bdc2b14865ad10eddf0e37"}, + {file = "protobuf-3.19.5-cp39-cp39-win32.whl", hash = "sha256:6a02172b9650f819d01fb8e224fc69b0706458fc1ab4f1c669281243c71c1a5e"}, + {file = "protobuf-3.19.5-cp39-cp39-win_amd64.whl", hash = "sha256:696e6cfab94cc15a14946f2bf72719dced087d437adbd994fff34f38986628bc"}, + {file = "protobuf-3.19.5-py2.py3-none-any.whl", hash = "sha256:9e42b1cf2ecd8a1bd161239e693f22035ba99905ae6d7efeac8a0546c7ec1a27"}, + {file = "protobuf-3.19.5.tar.gz", hash = "sha256:e63b0b3c42e51c94add62b010366cd4979cb6d5f06158bcae8faac4c294f91e1"}, ] py = [ {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, @@ -1122,7 +1130,7 @@ pyflakes = [ {file = "pyflakes-2.2.0-py2.py3-none-any.whl", hash = "sha256:0d94e0e05a19e57a99444b6ddcf9a6eb2e5c68d3ca1e98e90707af8152c90a92"}, {file = "pyflakes-2.2.0.tar.gz", hash = "sha256:35b2d75ee967ea93b55750aa9edbbf72813e06a66ba54438df2cfac9e3c27fc8"}, ] -pynacl = [ +PyNaCl = [ {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, @@ -1139,35 +1147,37 @@ pyparsing = [ {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"}, ] pyproj = [ - {file = "pyproj-2.6.1.post1-cp35-cp35m-macosx_10_6_intel.whl", hash = "sha256:457ad3856014ac26af1d86def6dc8cf69c1fa377b6e2fd6e97912d51cf66bdbe"}, - {file = "pyproj-2.6.1.post1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:6f3f36440ea61f5f6da4e6beb365dddcbe159815450001d9fb753545affa45ff"}, - {file = "pyproj-2.6.1.post1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6a212d0e5c7efa33d039f0c8b0a489e2204fcd28b56206567852ad7f5f2a653e"}, - {file = "pyproj-2.6.1.post1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:451a3d1c563b672458029ebc04acbb3266cd8b3025268eb871a9176dc3638911"}, - {file = "pyproj-2.6.1.post1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e015f900b4b84e908f8035ab16ebf02d67389c1c216c17a2196fc2e515c00762"}, - {file = "pyproj-2.6.1.post1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a13e5731b3a360ee7fbd1e9199ec9203fafcece8ebd0b1351f16d0a90cad6828"}, - {file = "pyproj-2.6.1.post1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:33c1c2968a4f4f87d517c4275a18b557e5c13907cf2609371fadea8463c3ba05"}, - {file = "pyproj-2.6.1.post1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3fef83a01c1e86dd9fa99d8214f749837cfafc34d9d6230b4b0a998fa7a68a1a"}, - {file = "pyproj-2.6.1.post1-cp36-cp36m-win32.whl", hash = "sha256:a6ac4861979cd05a0f5400fefa41d26c0269a5fb8237618aef7c998907db39e1"}, - {file = "pyproj-2.6.1.post1-cp36-cp36m-win_amd64.whl", hash = "sha256:cbf6ccf990860b06c5262ff97c4b78e1d07883981635cd53a6aa438a68d92945"}, - {file = "pyproj-2.6.1.post1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:adacb67a9f71fb54ca1b887a6ab20f32dd536fcdf2acec84a19e25ad768f7965"}, - {file = "pyproj-2.6.1.post1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:e50d5d20b87758acf8f13f39a3b3eb21d5ef32339d2bc8cdeb8092416e0051df"}, - {file = "pyproj-2.6.1.post1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:2518d1606e2229b82318e704b40290e02a2a52d77b40cdcb2978973d6fc27b20"}, - {file = "pyproj-2.6.1.post1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:33a5d1cfbb40a019422eb80709a0e270704390ecde7278fdc0b88f3647c56a39"}, - {file = "pyproj-2.6.1.post1-cp37-cp37m-win32.whl", hash = "sha256:daf2998e3f5bcdd579a18faf009f37f53538e9b7d0a252581a610297d31e8536"}, - {file = "pyproj-2.6.1.post1-cp37-cp37m-win_amd64.whl", hash = "sha256:a8b7c8accdc61dac8e91acab7c1f7b4590d1e102f2ee9b1f1e6399fad225958e"}, - {file = "pyproj-2.6.1.post1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9f097e8f341a162438918e908be86d105a28194ff6224633b2e9616c5031153f"}, - {file = "pyproj-2.6.1.post1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:d90a5d1fdd066b0e9b22409b0f5e81933469918fa04c2cf7f9a76ce84cb29dad"}, - {file = "pyproj-2.6.1.post1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:f5a8015c74ec8f6508aebf493b58ba20ccb4da8168bf05f0c2a37faccb518da9"}, - {file = "pyproj-2.6.1.post1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d87836be6b720fb4d9c112136aa47621b6ca09a554e645c1081561eb8e2fa1f4"}, - {file = "pyproj-2.6.1.post1-cp38-cp38-win32.whl", hash = "sha256:bc2f3a15d065e206d63edd2cc4739aa0a35c05338ee276ab1dc72f56f1944bda"}, - {file = "pyproj-2.6.1.post1-cp38-cp38-win_amd64.whl", hash = "sha256:93cbad7b699e8e80def7de80c350617f35e6a0b82862f8ce3c014657c25fdb3c"}, - {file = "pyproj-2.6.1.post1.tar.gz", hash = "sha256:4f5b02b4abbd41610397c635b275a8ee4a2b5bc72a75572b98ac6ae7befa471e"}, + {file = "pyproj-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:473961faef7a9fd723c5d432f65220ea6ab3854e606bf84b4d409a75a4261c78"}, + {file = "pyproj-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07c9d8d7ec009bbac09e233cfc725601586fe06880e5538a3a44eaf560ba3a62"}, + {file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2fef9c1e339f25c57f6ae0558b5ab1bbdf7994529a30d8d7504fc6302ea51c03"}, + {file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:140fa649fedd04f680a39f8ad339799a55cb1c49f6a84e1b32b97e49646647aa"}, + {file = "pyproj-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b59c08aea13ee428cf8a919212d55c036cc94784805ed77c8f31a4d1f541058c"}, + {file = "pyproj-3.3.1-cp310-cp310-win32.whl", hash = "sha256:1adc9ccd1bf04998493b6a2e87e60656c75ab790653b36cfe351e9ef214828ed"}, + {file = "pyproj-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:42eea10afc750fccd1c5c4ba56de29ab791ab4d83c1f7db72705566282ac5396"}, + {file = "pyproj-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:531ea36519fa7b581466d4b6ab32f66ae4dadd9499d726352f71ee5e19c3d1c5"}, + {file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67025e37598a6bbed2c9c6c9e4c911f6dd39315d3e1148ead935a5c4d64309d5"}, + {file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aed1a3c0cd4182425f91b48d5db39f459bc2fe0d88017ead6425a1bc85faee33"}, + {file = "pyproj-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cc4771403db54494e1e55bca8e6d33cde322f8cf0ed39f1557ff109c66d2cd1"}, + {file = "pyproj-3.3.1-cp38-cp38-win32.whl", hash = "sha256:c99f7b5757a28040a2dd4a28c9805fdf13eef79a796f4a566ab5cb362d10630d"}, + {file = "pyproj-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:5dac03d4338a4c8bd0f69144c527474f517b4cbd7d2d8c532cd8937799723248"}, + {file = "pyproj-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:56b0f9ee2c5b2520b18db30a393a7b86130cf527ddbb8c96e7f3c837474a9d79"}, + {file = "pyproj-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1032e5dfb50eae06382bcc7b9011b994f7104d932fe91bd83a722275e30e8ce"}, + {file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f92d8f6514516124abb714dce912b20867831162cfff9fae2678ef07b6fcf0f"}, + {file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ef1bfbe2dcc558c7a98e2f1836abdcd630390f3160724a6f4f5c818b2be0ad5"}, + {file = "pyproj-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ca5f32b56210429b367ca4f9a57ffe67975c487af82e179a24370879a3daf68"}, + {file = "pyproj-3.3.1-cp39-cp39-win32.whl", hash = "sha256:aba199704c824fb84ab64927e7bc9ef71e603e483130ec0f7e09e97259b8f61f"}, + {file = "pyproj-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:120d45ed73144c65e9677dc73ba8a531c495d179dd9f9f0471ac5acc02d7ac4b"}, + {file = "pyproj-3.3.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:52efb681647dfac185cc655a709bc0caaf910031a0390f816f5fc8ce150cbedc"}, + {file = "pyproj-3.3.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ab0d6e38fda7c13726afacaf62e9f9dd858089d67910471758afd9cb24e0ecd"}, + {file = "pyproj-3.3.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45487942c19c5a8b09c91964ea3201f4e094518e34743cae373889a36e3d9260"}, + {file = "pyproj-3.3.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:797ad5655d484feac14b0fbb4a4efeaac0cf780a223046e2465494c767fd1c3b"}, + {file = "pyproj-3.3.1.tar.gz", hash = "sha256:b3d8e14d91cc95fb3dbc03a9d0588ac58326803eefa5bbb0978d109de3304fbe"}, ] pytest = [ - {file = "pytest-5.4.3-py3-none-any.whl", hash = "sha256:5c0db86b698e8f170ba4582a492248919255fcd4c79b1ee64ace34301fb589a1"}, - {file = "pytest-5.4.3.tar.gz", hash = "sha256:7979331bfcba207414f5e1263b5a0f8f521d0f457318836a7355531ed1a4c7d8"}, + {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, + {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, ] -pyyaml = [ +PyYAML = [ {file = "PyYAML-5.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f7a21e3d99aa3095ef0553e7ceba36fb693998fbb1226f1392ce33681047465f"}, {file = "PyYAML-5.4-cp27-cp27m-win32.whl", hash = "sha256:52bf0930903818e600ae6c2901f748bc4869c0c406056f679ab9614e5d21a166"}, {file = "PyYAML-5.4-cp27-cp27m-win_amd64.whl", hash = "sha256:a36a48a51e5471513a5aea920cdad84cbd56d70a5057cca3499a637496ea379c"}, @@ -1190,6 +1200,10 @@ pyyaml = [ {file = "PyYAML-5.4-cp39-cp39-win_amd64.whl", hash = "sha256:8bf38641b4713d77da19e91f8b5296b832e4db87338d6aeffe422d42f1ca896d"}, {file = "PyYAML-5.4.tar.gz", hash = "sha256:3c49e39ac034fd64fd576d63bb4db53cda89b362768a67f07749d55f128ac18a"}, ] +setuptools = [ + {file = "setuptools-59.6.0-py3-none-any.whl", hash = "sha256:4ce92f1e1f8f01233ee9952c04f6b81d1e02939d6e1b488428154974a4d0783e"}, + {file = "setuptools-59.6.0.tar.gz", hash = "sha256:22c7348c6d2976a52632c67f7ab0cdf40147db7789f9aed18734643fe9cf3373"}, +] six = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -1198,19 +1212,7 @@ toml = [ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, ] -typing-extensions = [ - {file = "typing_extensions-4.1.1-py3-none-any.whl", hash = "sha256:21c85e0fe4b9a155d0799430b0ad741cdce7e359660ccbd8b530613e8df88ce2"}, - {file = "typing_extensions-4.1.1.tar.gz", hash = "sha256:1a9462dcc3347a79b1f1c0271fbe79e844580bb598bafa1ed208b94da3cdcd42"}, -] virtualenv = [ - {file = "virtualenv-20.13.1-py2.py3-none-any.whl", hash = "sha256:45e1d053cad4cd453181ae877c4ffc053546ae99e7dd049b9ff1d9be7491abf7"}, - {file = "virtualenv-20.13.1.tar.gz", hash = "sha256:e0621bcbf4160e4e1030f05065c8834b4e93f4fcc223255db2a823440aca9c14"}, -] -wcwidth = [ - {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"}, - {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"}, -] -zipp = [ - {file = "zipp-3.6.0-py3-none-any.whl", hash = "sha256:9fe5ea21568a0a70e50f273397638d39b03353731e6cbbb3fd8502a33fec40bc"}, - {file = "zipp-3.6.0.tar.gz", hash = "sha256:71c644c5369f4a6e07636f0aa966270449561fcea2e3d6747b8d23efaa9d7832"}, + {file = "virtualenv-20.16.5-py3-none-any.whl", hash = "sha256:d07dfc5df5e4e0dbc92862350ad87a36ed505b978f6c39609dc489eadd5b0d27"}, + {file = "virtualenv-20.16.5.tar.gz", hash = "sha256:227ea1b9994fdc5ea31977ba3383ef296d7472ea85be9d6732e42a91c04e80da"}, ] diff --git a/daemon/proto/core/api/grpc/configservices.proto b/daemon/proto/core/api/grpc/configservices.proto index 189a2892..28e00bcb 100644 --- a/daemon/proto/core/api/grpc/configservices.proto +++ b/daemon/proto/core/api/grpc/configservices.proto @@ -58,3 +58,13 @@ message GetNodeConfigServiceRequest { message GetNodeConfigServiceResponse { map config = 1; } + +message GetConfigServiceRenderedRequest { + int32 session_id = 1; + int32 node_id = 2; + string name = 3; +} + +message GetConfigServiceRenderedResponse { + map rendered = 1; +} diff --git a/daemon/proto/core/api/grpc/core.proto b/daemon/proto/core/api/grpc/core.proto index f0cb242d..d2f024da 100644 --- a/daemon/proto/core/api/grpc/core.proto +++ b/daemon/proto/core/api/grpc/core.proto @@ -61,6 +61,8 @@ service CoreApi { } rpc DeleteLink (DeleteLinkRequest) returns (DeleteLinkResponse) { } + rpc Linked (LinkedRequest) returns (LinkedResponse) { + } // mobility rpc rpc GetMobilityConfig (mobility.GetMobilityConfigRequest) returns (mobility.GetMobilityConfigResponse) { @@ -89,6 +91,8 @@ service CoreApi { } rpc ConfigServiceAction (services.ServiceActionRequest) returns (services.ServiceActionResponse) { } + rpc GetConfigServiceRendered (configservices.GetConfigServiceRenderedRequest) returns (configservices.GetConfigServiceRenderedResponse) { + } // wlan rpc rpc GetWlanConfig (wlan.GetWlanConfigRequest) returns (wlan.GetWlanConfigResponse) { @@ -98,6 +102,14 @@ service CoreApi { rpc WlanLink (wlan.WlanLinkRequest) returns (wlan.WlanLinkResponse) { } + // wireless rpc + rpc WirelessLinked (WirelessLinkedRequest) returns (WirelessLinkedResponse) { + } + rpc WirelessConfig (WirelessConfigRequest) returns (WirelessConfigResponse) { + } + rpc GetWirelessConfig (GetWirelessConfigRequest) returns (GetWirelessConfigResponse) { + } + // emane rpc rpc GetEmaneModelConfig (emane.GetEmaneModelConfigRequest) returns (emane.GetEmaneModelConfigResponse) { } @@ -280,12 +292,11 @@ message ConfigEvent { repeated int32 data_types = 5; string data_values = 6; string captions = 7; - string bitmap = 8; - string possible_values = 9; - string groups = 10; - int32 iface_id = 11; - int32 network_id = 12; - string opaque = 13; + string possible_values = 8; + string groups = 9; + int32 iface_id = 10; + int32 network_id = 11; + string opaque = 12; } message ExceptionEvent { @@ -615,6 +626,7 @@ message Node { map service_configs = 18; map config_service_configs= 19; repeated emane.NodeEmaneConfig emane_configs = 20; + map wireless_config = 21; } message Link { @@ -656,6 +668,8 @@ message Interface { int32 mtu = 10; int32 node_id = 11; int32 net2_id = 12; + int32 nem_id = 13; + int32 nem_port = 14; } message SessionLocation { @@ -684,3 +698,47 @@ message Server { string name = 1; string host = 2; } + +message LinkedRequest { + int32 session_id = 1; + int32 node1_id = 2; + int32 node2_id = 3; + int32 iface1_id = 4; + int32 iface2_id = 5; + bool linked = 6; +} + +message LinkedResponse { +} + +message WirelessLinkedRequest { + int32 session_id = 1; + int32 wireless_id = 2; + int32 node1_id = 3; + int32 node2_id = 4; + bool linked = 5; +} + +message WirelessLinkedResponse { +} + +message WirelessConfigRequest { + int32 session_id = 1; + int32 wireless_id = 2; + int32 node1_id = 3; + int32 node2_id = 4; + LinkOptions options1 = 5; + LinkOptions options2 = 6; +} + +message WirelessConfigResponse { +} + +message GetWirelessConfigRequest { + int32 session_id = 1; + int32 node_id = 2; +} + +message GetWirelessConfigResponse { + map config = 1; +} diff --git a/daemon/proto/core/api/grpc/services.proto b/daemon/proto/core/api/grpc/services.proto index dc451c40..1b430f99 100644 --- a/daemon/proto/core/api/grpc/services.proto +++ b/daemon/proto/core/api/grpc/services.proto @@ -37,7 +37,7 @@ message ServiceAction { } message ServiceDefaults { - string node_type = 1; + string model = 1; repeated string services = 2; } diff --git a/daemon/pyproject.toml b/daemon/pyproject.toml index 9e11d3f6..92fb7c4f 100644 --- a/daemon/pyproject.toml +++ b/daemon/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "core" -version = "8.2.0" +version = "9.0.0" description = "CORE Common Open Research Emulator" authors = ["Boeing Research and Technology"] license = "BSD-2-Clause" @@ -14,29 +14,38 @@ include = [ ] exclude = ["core/constants.py.in"] +[tool.poetry.scripts] +core-daemon = "core.scripts.daemon:main" +core-cli = "core.scripts.cli:main" +core-gui = "core.scripts.gui:main" +core-player = "core.scripts.player:main" +core-route-monitor = "core.scripts.routemonitor:main" +core-service-update = "core.scripts.serviceupdate:main" +core-cleanup = "core.scripts.cleanup:main" [tool.poetry.dependencies] -python = "^3.6" -dataclasses = { version = "*", python = "~3.6" } -fabric = "2.5.0" -grpcio = "1.27.2" +python = "^3.9" +fabric = "2.7.1" +grpcio = "1.49.1" invoke = "1.4.1" -lxml = "4.6.5" -mako = "1.1.3" +lxml = "4.9.1" netaddr = "0.7.19" -pillow = "8.3.2" -protobuf = "3.19.4" -pyproj = "2.6.1.post1" +protobuf = "3.19.5" +pyproj = "3.3.1" pyyaml = "5.4" +Pillow = "9.2.0" +Mako = "1.2.3" [tool.poetry.dev-dependencies] black = "==19.3b0" flake8 = "3.8.2" -grpcio-tools = "1.27.2" +grpcio-tools = "1.43.0" isort = "4.3.21" mock = "4.0.2" pre-commit = "2.1.1" -pytest = "5.4.3" + +[tool.poetry.group.dev.dependencies] +pytest = "6.2.5" [tool.isort] skip_glob = "*_pb2*.py,doc,build" diff --git a/daemon/scripts/core-cleanup b/daemon/scripts/core-cleanup deleted file mode 100755 index ced76634..00000000 --- a/daemon/scripts/core-cleanup +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -if [ "z$1" = "z-h" -o "z$1" = "z--help" ]; then - echo "usage: $0 [-d [-l]]" - echo -n " Clean up all CORE namespaces processes, bridges, interfaces, " - echo "and session\n directories. Options:" - echo " -h show this help message and exit" - echo " -d also kill the Python daemon" - echo " -l remove the core-daemon.log file" - exit 0 -fi - -if [ `id -u` != 0 ]; then - echo "Permission denied. Re-run this script as root." - exit 1 -fi - -PATH="/sbin:/bin:/usr/sbin:/usr/bin" -export PATH - -if [ "z$1" = "z-d" ]; then - pypids=`pidof python3 python` - for p in $pypids; do - grep -q core-daemon /proc/$p/cmdline - if [ $? = 0 ]; then - echo "cleaning up core-daemon process: $p" - kill -9 $p - fi - done -fi - -if [ "z$2" = "z-l" ]; then - rm -f /var/log/core-daemon.log -fi - -kaopts="-v" -killall --help 2>&1 | grep -q namespace -if [ $? = 0 ]; then - kaopts="$kaopts --ns 0" -fi - -vnodedpids=`pidof vnoded` -if [ "z$vnodedpids" != "z" ]; then - echo "cleaning up old vnoded processes: $vnodedpids" - killall $kaopts -KILL vnoded - # pause for 1 second for interfaces to disappear - sleep 1 -fi -killall -q emane -killall -q emanetransportd -killall -q emaneeventservice - -if [ -d /sys/class/net ]; then - ifcommand="ls -1 /sys/class/net" -else - ifcommand="ip -o link show | sed -r -e 's/[0-9]+: ([^[:space:]]+): .*/\1/'" -fi - -eval "$ifcommand" | awk ' - /^veth[0-9]+\./ {print "removing interface " $1; system("ip link del " $1);} - /tmp\./ {print "removing interface " $1; system("ip link del " $1);} - /gt\./ {print "removing interface " $1; system("ip link del " $1);} - /b\./ {print "removing bridge " $1; system("ip link set " $1 " down; ip link del " $1);} - /ctrl[0-9]+\./ {print "removing bridge " $1; system("ip link set " $1 " down; ip link del " $1);} -' - -nft list ruleset | awk ' - $3 ~ /^b\./ {print "removing nftables " $3; system("nft delete table bridge " $3);} -' - -rm -rf /tmp/pycore* diff --git a/daemon/scripts/core-imn-to-xml b/daemon/scripts/core-imn-to-xml deleted file mode 100755 index c11533a4..00000000 --- a/daemon/scripts/core-imn-to-xml +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -import argparse -import re -import sys -from pathlib import Path - -from core import utils -from core.api.grpc.client import CoreGrpcClient -from core.errors import CoreCommandError - -if __name__ == "__main__": - # parse flags - parser = argparse.ArgumentParser(description="Converts CORE imn files to xml") - parser.add_argument("-f", "--file", dest="file", help="imn file to convert") - parser.add_argument( - "-d", "--dest", dest="dest", default=None, help="destination for xml file, defaults to same location as imn" - ) - args = parser.parse_args() - - # validate provided file exists - imn_file = Path(args.file) - if not imn_file.exists(): - print(f"{args.file} does not exist") - sys.exit(1) - - # validate destination - if args.dest is not None: - dest = Path(args.dest) - if not dest.exists() or not dest.is_dir(): - print(f"{dest.resolve()} does not exist or is not a directory") - sys.exit(1) - xml_file = Path(dest, imn_file.with_suffix(".xml").name) - else: - xml_file = Path(imn_file.with_suffix(".xml").name) - - # validate xml file - if xml_file.exists(): - print(f"{xml_file.resolve()} already exists") - sys.exit(1) - - # run provided imn using core-gui batch mode - try: - print(f"running {imn_file.resolve()} in batch mode") - output = utils.cmd(f"core-gui --batch {imn_file.resolve()}") - last_line = output.split("\n")[-1].strip() - - # check for active session - if last_line == "Another session is active.": - print("need to restart core-daemon or shutdown previous batch session") - sys.exit(1) - - # parse session id - m = re.search(r"Session id is (\d+)\.", last_line) - if not m: - print(f"failed to find session id: {output}") - sys.exit(1) - session_id = int(m.group(1)) - print(f"created session {session_id}") - - # save xml and delete session - client = CoreGrpcClient() - with client.context_connect(): - print(f"saving xml {xml_file.resolve()}") - client.save_xml(session_id, str(xml_file)) - - print(f"deleting session {session_id}") - client.delete_session(session_id) - except CoreCommandError as e: - print(f"core-gui batch failed for {imn_file.resolve()}: {e}") - sys.exit(1) diff --git a/daemon/scripts/core-manage b/daemon/scripts/core-manage deleted file mode 100755 index 5587c9ae..00000000 --- a/daemon/scripts/core-manage +++ /dev/null @@ -1,247 +0,0 @@ -#!/usr/bin/env python3 -""" -core-manage: Helper tool to add, remove, or check for services, models, and -node types in a CORE installation. -""" - -import ast -import optparse -import os -import re -import sys - -from core import services -from core.constants import CORE_CONF_DIR - - -class FileUpdater: - """ - Helper class for changing configuration files. - """ - actions = ("add", "remove", "check") - targets = ("service", "model", "nodetype") - - def __init__(self, action, target, data, options): - """ - """ - self.action = action - self.target = target - self.data = data - self.options = options - self.verbose = options.verbose - self.search, self.filename = self.get_filename(target) - - def process(self): - """ Invoke update_file() using a helper method depending on target. - """ - if self.verbose: - txt = "Updating" - if self.action == "check": - txt = "Checking" - sys.stdout.write(f"{txt} file: {self.filename}\n") - - if self.target == "service": - r = self.update_file(fn=self.update_services) - elif self.target == "model": - r = self.update_file(fn=self.update_emane_models) - elif self.target == "nodetype": - r = self.update_nodes_conf() - - if self.verbose: - txt = "" - if not r: - txt = "NOT " - if self.action == "check": - sys.stdout.write(f"String {txt} found.\n") - else: - sys.stdout.write(f"File {txt} updated.\n") - - return r - - def update_services(self, line): - """ Modify the __init__.py file having this format: - __all__ = ["quagga", "nrl", "xorp", "bird", ] - Returns True or False when "check" is the action, a modified line - otherwise. - """ - line = line.strip("\n") - key, valstr = line.split("= ") - vals = ast.literal_eval(valstr) - r = self.update_keyvals(key, vals) - if self.action == "check": - return r - valstr = str(r) - return "= ".join([key, valstr]) + "\n" - - def update_emane_models(self, line): - """ Modify the core.conf file having this format: - emane_models = RfPipe, Ieee80211abg, CommEffect, Bypass - Returns True or False when "check" is the action, a modified line - otherwise. - """ - line = line.strip("\n") - key, valstr = line.split("= ") - vals = valstr.split(", ") - r = self.update_keyvals(key, vals) - if self.action == "check": - return r - valstr = ", ".join(r) - return "= ".join([key, valstr]) + "\n" - - def update_keyvals(self, key, vals): - """ Perform self.action on (key, vals). - Returns True or False when "check" is the action, a modified line - otherwise. - """ - if self.action == "check": - if self.data in vals: - return True - else: - return False - elif self.action == "add": - if self.data not in vals: - vals.append(self.data) - elif self.action == "remove": - try: - vals.remove(self.data) - except ValueError: - pass - return vals - - def get_filename(self, target): - """ Return search string and filename based on target. - """ - if target == "service": - filename = os.path.abspath(services.__file__) - search = "__all__ =" - elif target == "model": - filename = os.path.join(CORE_CONF_DIR, "core.conf") - search = "emane_models =" - elif target == "nodetype": - if self.options.userpath is None: - raise ValueError("missing user path") - filename = os.path.join(self.options.userpath, "nodes.conf") - search = self.data - else: - raise ValueError("unknown target") - if not os.path.exists(filename): - raise ValueError(f"file {filename} does not exist") - return search, filename - - def update_file(self, fn=None): - """ Open a file and search for self.search, invoking the supplied - function on the matching line. Write file changes if necessary. - Returns True if the file has changed (or action is "check" and the - search string is found), False otherwise. - """ - changed = False - output = "" # this accumulates output, assumes input is small - with open(self.filename, "r") as f: - for line in f: - if line[:len(self.search)] == self.search: - r = fn(line) # line may be modified by fn() here - if self.action == "check": - return r - else: - if line != r: - changed = True - line = r - output += line - if changed: - with open(self.filename, "w") as f: - f.write(output) - - return changed - - def update_nodes_conf(self): - """ Add/remove/check entries from nodes.conf. This file - contains a Tcl-formatted array of node types. The array index must be - properly set for new entries. Uses self.{action, filename, search, - data} variables as input and returns the same value as update_file(). - """ - changed = False - output = "" # this accumulates output, assumes input is small - with open(self.filename, "r") as f: - for line in f: - # make sure data is not added twice - if line.find(self.search) >= 0: - if self.action == "check": - return True - elif self.action == "add": - return False - elif self.action == "remove": - changed = True - continue - else: - output += line - - if self.action == "add": - index = int(re.match("^\d+", line).group(0)) - output += str(index + 1) + " " + self.data + "\n" - changed = True - if changed: - with open(self.filename, "w") as f: - f.write(output) - - return changed - - -def main(): - actions = ", ".join(FileUpdater.actions) - targets = ", ".join(FileUpdater.targets) - usagestr = "usage: %prog [-h] [options] \n" - usagestr += "\nHelper tool to add, remove, or check for " - usagestr += "services, models, and node types\nin a CORE installation.\n" - usagestr += "\nExamples:\n %prog add service newrouting" - usagestr += "\n %prog -v check model RfPipe" - usagestr += "\n %prog --userpath=\"$HOME/.core\" add nodetype \"{ftp ftp.gif ftp.gif {DefaultRoute FTP} netns {FTP server} }\" \n" - usagestr += f"\nArguments:\n should be one of: {actions}" - usagestr += f"\n should be one of: {targets}" - usagestr += f"\n is the text to {actions}" - parser = optparse.OptionParser(usage=usagestr) - parser.set_defaults(userpath=None, verbose=False, ) - - parser.add_option("--userpath", dest="userpath", type="string", - help="use the specified user path (e.g. \"$HOME/.core" \ - "\") to access nodes.conf") - parser.add_option("-v", "--verbose", dest="verbose", action="store_true", - help="be verbose when performing action") - - def usage(msg=None, err=0): - sys.stdout.write("\n") - if msg: - sys.stdout.write(msg + "\n\n") - parser.print_help() - sys.exit(err) - - (options, args) = parser.parse_args() - - if len(args) != 3: - usage("Missing required arguments!", 1) - - action = args[0] - if action not in FileUpdater.actions: - usage(f"invalid action {action}", 1) - - target = args[1] - if target not in FileUpdater.targets: - usage(f"invalid target {target}", 1) - - if target == "nodetype" and not options.userpath: - usage(f"user path option required for this target ({target})") - - data = args[2] - - try: - up = FileUpdater(action, target, data, options) - r = up.process() - except Exception as e: - sys.stderr.write(f"Exception: {e}\n") - sys.exit(1) - if not r: - sys.exit(1) - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/daemon/scripts/coresendmsg b/daemon/scripts/coresendmsg deleted file mode 100755 index 13e20b5c..00000000 --- a/daemon/scripts/coresendmsg +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python3 -""" -coresendmsg: utility for generating CORE messages -""" - -import optparse -import os -import socket -import sys - -from core.api.tlv import coreapi -from core.api.tlv.enumerations import CORE_API_PORT, MessageTypes, SessionTlvs -from core.emulator.enumerations import MessageFlags - - -def print_available_tlvs(t, tlv_class): - """ - Print a TLV list. - """ - print(f"TLVs available for {t} message:") - for tlv in sorted([tlv for tlv in tlv_class.tlv_type_map], key=lambda x: x.name): - print(tlv.name.lower()) - - -def print_examples(name): - """ - Print example usage of this script. - """ - examples = [ - ("node number=3 x_position=125 y_position=525", - "move node number 3 to x,y=(125,525)"), - ("node number=4 icon=/usr/local/share/core/icons/normal/router_red.gif", - "change node number 4\"s icon to red"), - ("node flags=add number=5 type=0 name=\"n5\" x_position=500 y_position=500", - "add a new router node n5"), - ("link n1_number=2 n2_number=3 delay=15000", - "set a 15ms delay on the link between n2 and n3"), - ("link n1_number=2 n2_number=3 gui_attributes=\"color=blue\"", - "change the color of the link between n2 and n3"), - ("link flags=add n1_number=4 n2_number=5 interface1_ip4=\"10.0.3.2\" " - "interface1_ip4_mask=24 interface2_ip4=\"10.0.3.1\" interface2_ip4_mask=24", - "link node n5 with n4 using the given interface addresses"), - ("execute flags=string,text node=1 number=1000 command=\"uname -a\" -l", - "run a command on node 1 and wait for the result"), - ("execute node=2 number=1001 command=\"killall ospfd\"", - "run a command on node 2 and ignore the result"), - ("file flags=add node=1 name=\"/var/log/test.log\" data=\"hello world.\"", - "write a test.log file on node 1 with the given contents"), - ("file flags=add node=2 name=\"test.log\" source_name=\"./test.log\"", - "move a test.log file from host to node 2"), - ] - print(f"Example {name} invocations:") - for cmd, descr in examples: - print(f" {name} {cmd}\n\t\t{descr}") - - -def receive_message(sock): - """ - Retrieve a message from a socket and return the CoreMessage object or - None upon disconnect. Socket data beyond the first message is dropped. - """ - try: - # large receive buffer used for UDP sockets, instead of just receiving - # the 4-byte header - data = sock.recv(4096) - msghdr = data[:coreapi.CoreMessage.header_len] - except KeyboardInterrupt: - print("CTRL+C pressed") - sys.exit(1) - - if len(msghdr) == 0: - return None - - msgdata = None - msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr) - - if msglen: - msgdata = data[coreapi.CoreMessage.header_len:] - try: - msgcls = coreapi.CLASS_MAP[msgtype] - except KeyError: - msg = coreapi.CoreMessage(msgflags, msghdr, msgdata) - msg.message_type = msgtype - print(f"unimplemented CORE message type: {msg.type_str()}") - return msg - if len(data) > msglen + coreapi.CoreMessage.header_len: - data_size = len(data) - (msglen + coreapi.CoreMessage.header_len) - print(f"received a message of type {msgtype}, dropping {data_size} bytes of extra data") - return msgcls(msgflags, msghdr, msgdata) - - -def connect_to_session(sock, requested): - """ - Use Session Messages to retrieve the current list of sessions and - connect to the first one. - """ - # request the session list - tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, "") - flags = MessageFlags.STRING.value - smsg = coreapi.CoreSessionMessage.pack(flags, tlvdata) - sock.sendall(smsg) - - print("waiting for session list...") - smsgreply = receive_message(sock) - if smsgreply is None: - print("disconnected") - return False - - sessstr = smsgreply.get_tlv(SessionTlvs.NUMBER.value) - if sessstr is None: - print("missing session numbers") - return False - - # join the first session (that is not our own connection) - tmp, localport = sock.getsockname() - sessions = sessstr.split("|") - sessions.remove(str(localport)) - if len(sessions) == 0: - print("no sessions to join") - return False - - if not requested: - session = sessions[0] - elif requested in sessions: - session = requested - else: - print("requested session not found!") - return False - - print(f"joining session: {session}") - tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session) - flags = MessageFlags.ADD.value - smsg = coreapi.CoreSessionMessage.pack(flags, tlvdata) - sock.sendall(smsg) - return True - - -def receive_response(sock, opt): - """ - Receive and print a CORE message from the given socket. - """ - print("waiting for response...") - msg = receive_message(sock) - if msg is None: - print(f"disconnected from {opt.address}:{opt.port}") - sys.exit(0) - print(f"received message: {msg}") - - -def main(): - """ - Parse command-line arguments to build and send a CORE message. - """ - types = [message_type.name.lower() for message_type in MessageTypes] - flags = [flag.name.lower() for flag in MessageFlags] - types_usage = " ".join(types) - flags_usage = " ".join(flags) - usagestr = ( - "usage: %prog [-h|-H] [options] [message-type] [flags=flags] " - "[message-TLVs]\n\n" - f"Supported message types:\n {types_usage}\n" - f"Supported message flags (flags=f1,f2,...):\n {flags_usage}" - ) - parser = optparse.OptionParser(usage=usagestr) - default_address = "localhost" - default_session = None - default_tcp = False - parser.set_defaults( - port=CORE_API_PORT, - address=default_address, - session=default_session, - listen=False, - examples=False, - tlvs=False, - tcp=default_tcp - ) - parser.add_option("-H", dest="examples", action="store_true", - help="show example usage help message and exit") - parser.add_option("-p", "--port", dest="port", type=int, - help=f"TCP port to connect to, default: {CORE_API_PORT}") - parser.add_option("-a", "--address", dest="address", type=str, - help=f"Address to connect to, default: {default_address}") - parser.add_option("-s", "--session", dest="session", type=str, - help=f"Session to join, default: {default_session}") - parser.add_option("-l", "--listen", dest="listen", action="store_true", - help="Listen for a response message and print it.") - parser.add_option("-t", "--list-tlvs", dest="tlvs", action="store_true", - help="List TLVs for the specified message type.") - parser.add_option("--tcp", dest="tcp", action="store_true", - help=f"Use TCP instead of UDP and connect to a session default: {default_tcp}") - - def usage(msg=None, err=0): - print() - if msg: - print(f"{msg}\n") - parser.print_help() - sys.exit(err) - - # parse command line opt - opt, args = parser.parse_args() - if opt.examples: - print_examples(os.path.basename(sys.argv[0])) - sys.exit(0) - if len(args) == 0: - usage("Please specify a message type to send.") - - # given a message type t, determine the message and TLV classes - t = args.pop(0) - t = t.lower() - if t not in types: - usage(f"Unknown message type requested: {t}") - message_type = MessageTypes[t.upper()] - msg_cls = coreapi.CLASS_MAP[message_type.value] - tlv_cls = msg_cls.tlv_class - - # list TLV types for this message type - if opt.tlvs: - print_available_tlvs(t, tlv_cls) - sys.exit(0) - - # build a message consisting of TLVs from "type=value" arguments - flagstr = "" - tlvdata = b"" - for a in args: - typevalue = a.split("=") - if len(typevalue) < 2: - usage(f"Use \"type=value\" syntax instead of \"{a}\".") - tlv_typestr = typevalue[0].lower() - tlv_valstr = "=".join(typevalue[1:]) - if tlv_typestr == "flags": - flagstr = tlv_valstr - continue - try: - tlv_type = tlv_cls.tlv_type_map[tlv_typestr.upper()] - tlvdata += tlv_cls.pack_string(tlv_type.value, tlv_valstr) - except KeyError: - usage(f"Unknown TLV: \"{tlv_typestr}\"") - - flags = 0 - for f in flagstr.split(","): - if f == "": - continue - try: - flag_enum = MessageFlags[f.upper()] - n = flag_enum.value - flags |= n - except KeyError: - usage(f"Invalid flag \"{f}\".") - - msg = msg_cls.pack(flags, tlvdata) - - if opt.tcp: - protocol = socket.SOCK_STREAM - else: - protocol = socket.SOCK_DGRAM - - sock = socket.socket(socket.AF_INET, protocol) - sock.setblocking(True) - - try: - sock.connect((opt.address, opt.port)) - except Exception as e: - print(f"Error connecting to {opt.address}:{opt.port}:\n\t{e}") - sys.exit(1) - - if opt.tcp and not connect_to_session(sock, opt.session): - print("warning: continuing without joining a session!") - - sock.sendall(msg) - if opt.listen: - receive_response(sock, opt) - if opt.tcp: - sock.shutdown(socket.SHUT_RDWR) - sock.close() - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/daemon/tests/conftest.py b/daemon/tests/conftest.py index 98552540..b668fb07 100644 --- a/daemon/tests/conftest.py +++ b/daemon/tests/conftest.py @@ -7,11 +7,9 @@ import time import mock import pytest -from mock.mock import MagicMock from core.api.grpc.client import InterfaceHelper from core.api.grpc.server import CoreGrpcServer -from core.api.tlv.corehandlers import CoreHandler from core.emulator.coreemu import CoreEmu from core.emulator.data import IpPrefixes from core.emulator.distributed import DistributedServer @@ -61,8 +59,6 @@ def patcher(request): LinuxNetClient, "get_mac", return_value="00:00:00:00:00:00" ) patch_manager.patch_obj(CoreNode, "create_file") - patch_manager.patch_obj(Session, "write_state") - patch_manager.patch_obj(Session, "write_nodes") yield patch_manager patch_manager.shutdown() @@ -104,17 +100,6 @@ def module_grpc(global_coreemu): grpc_server.server.stop(None) -@pytest.fixture(scope="module") -def module_coretlv(patcher, global_coreemu, global_session): - request_mock = MagicMock() - request_mock.fileno = MagicMock(return_value=1) - server = MockServer(global_coreemu) - request_handler = CoreHandler(request_mock, "", server) - request_handler.session = global_session - request_handler.add_session_handlers() - yield request_handler - - @pytest.fixture def grpc_server(module_grpc): yield module_grpc @@ -130,16 +115,6 @@ def session(global_session): global_session.clear() -@pytest.fixture -def coretlv(module_coretlv): - session = module_coretlv.session - session.set_state(EventTypes.CONFIGURATION_STATE) - coreemu = module_coretlv.coreemu - coreemu.sessions[session.id] = session - yield module_coretlv - coreemu.shutdown() - - def pytest_addoption(parser): parser.addoption("--distributed", help="distributed server address") parser.addoption("--mock", action="store_true", help="run without mocking") diff --git a/daemon/tests/emane/test_emane.py b/daemon/tests/emane/test_emane.py index 5cb14bdc..2ddb1a5d 100644 --- a/daemon/tests/emane/test_emane.py +++ b/daemon/tests/emane/test_emane.py @@ -16,10 +16,10 @@ from core.emane.models.ieee80211abg import EmaneIeee80211abgModel from core.emane.models.rfpipe import EmaneRfPipeModel from core.emane.models.tdma import EmaneTdmaModel from core.emane.nodes import EmaneNet -from core.emulator.data import IpPrefixes, NodeOptions +from core.emulator.data import IpPrefixes from core.emulator.session import Session from core.errors import CoreCommandError, CoreError -from core.nodes.base import CoreNode +from core.nodes.base import CoreNode, Position _EMANE_MODELS = [ EmaneIeee80211abgModel, @@ -53,19 +53,22 @@ class TestEmane: """ # create emane node for networking the core nodes session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = NodeOptions() - options.set_position(80, 50) - options.emane = EmaneIeee80211abgModel.name - emane_net1 = session.add_node(EmaneNet, options=options) - options.emane = EmaneRfPipeModel.name - emane_net2 = session.add_node(EmaneNet, options=options) + options = EmaneNet.create_options() + options.emane_model = EmaneIeee80211abgModel.name + position = Position(x=80, y=50) + emane_net1 = session.add_node(EmaneNet, position=position, options=options) + options = EmaneNet.create_options() + options.emane_model = EmaneRfPipeModel.name + position = Position(x=80, y=50) + emane_net2 = session.add_node(EmaneNet, position=position, options=options) # create nodes - options = NodeOptions(model="mdr") - options.set_position(150, 150) - node1 = session.add_node(CoreNode, options=options) - options.set_position(300, 150) - node2 = session.add_node(CoreNode, options=options) + options = CoreNode.create_options() + options.model = "mdr" + position = Position(x=150, y=150) + node1 = session.add_node(CoreNode, position=position, options=options) + position = Position(x=300, y=150) + node2 = session.add_node(CoreNode, position=position, options=options) # create interfaces ip_prefix1 = IpPrefixes("10.0.0.0/24") @@ -100,9 +103,10 @@ class TestEmane: # create emane node for networking the core nodes session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = NodeOptions(emane=model.name) - options.set_position(80, 50) - emane_network = session.add_node(EmaneNet, options=options) + options = EmaneNet.create_options() + options.emane_model = model.name + position = Position(x=80, y=50) + emane_network = session.add_node(EmaneNet, position=position, options=options) # configure tdma if model == EmaneTdmaModel: @@ -111,11 +115,12 @@ class TestEmane: ) # create nodes - options = NodeOptions(model="mdr") - options.set_position(150, 150) - node1 = session.add_node(CoreNode, options=options) - options.set_position(300, 150) - node2 = session.add_node(CoreNode, options=options) + options = CoreNode.create_options() + options.model = "mdr" + position = Position(x=150, y=150) + node1 = session.add_node(CoreNode, position=position, options=options) + position = Position(x=300, y=150) + node2 = session.add_node(CoreNode, position=position, options=options) for i, node in enumerate([node1, node2]): node.setposition(x=150 * (i + 1), y=150) @@ -141,9 +146,10 @@ class TestEmane: """ # create emane node for networking the core nodes session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = NodeOptions(emane=EmaneIeee80211abgModel.name) - options.set_position(80, 50) - emane_network = session.add_node(EmaneNet, options=options) + options = EmaneNet.create_options() + options.emane_model = EmaneIeee80211abgModel.name + position = Position(x=80, y=50) + emane_network = session.add_node(EmaneNet, position=position, options=options) config_key = "txpower" config_value = "10" session.emane.set_config( @@ -151,11 +157,12 @@ class TestEmane: ) # create nodes - options = NodeOptions(model="mdr") - options.set_position(150, 150) - node1 = session.add_node(CoreNode, options=options) - options.set_position(300, 150) - node2 = session.add_node(CoreNode, options=options) + options = CoreNode.create_options() + options.model = "mdr" + position = Position(x=150, y=150) + node1 = session.add_node(CoreNode, position=position, options=options) + position = Position(x=300, y=150) + node2 = session.add_node(CoreNode, position=position, options=options) for i, node in enumerate([node1, node2]): node.setposition(x=150 * (i + 1), y=150) @@ -205,14 +212,17 @@ class TestEmane: self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes ): # create nodes - options = NodeOptions(model="mdr", x=50, y=50) - node1 = session.add_node(CoreNode, options=options) + options = CoreNode.create_options() + options.model = "mdr" + position = Position(x=50, y=50) + node1 = session.add_node(CoreNode, position=position, options=options) iface1_data = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode, options=options) + node2 = session.add_node(CoreNode, position=position, options=options) iface2_data = ip_prefixes.create_iface(node2) # create emane node - options = NodeOptions(model=None, emane=EmaneRfPipeModel.name) + options = EmaneNet.create_options() + options.emane_model = EmaneRfPipeModel.name emane_node = session.add_node(EmaneNet, options=options) # create links @@ -255,11 +265,7 @@ class TestEmane: assert session.get_node(node1.id, CoreNode) assert session.get_node(node2.id, CoreNode) assert session.get_node(emane_node.id, EmaneNet) - links = [] - for node_id in session.nodes: - node = session.nodes[node_id] - links += node.links() - assert len(links) == 2 + assert len(session.link_manager.links()) == 2 config = session.emane.get_config(node1.id, EmaneRfPipeModel.name) assert config["datarate"] == datarate @@ -267,14 +273,17 @@ class TestEmane: self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes ): # create nodes - options = NodeOptions(model="mdr", x=50, y=50) - node1 = session.add_node(CoreNode, options=options) + options = CoreNode.create_options() + options.model = "mdr" + position = Position(x=50, y=50) + node1 = session.add_node(CoreNode, position=position, options=options) iface1_data = ip_prefixes.create_iface(node1) - node2 = session.add_node(CoreNode, options=options) + node2 = session.add_node(CoreNode, position=position, options=options) iface2_data = ip_prefixes.create_iface(node2) # create emane node - options = NodeOptions(model=None, emane=EmaneRfPipeModel.name) + options = EmaneNet.create_options() + options.emane_model = EmaneRfPipeModel.name emane_node = session.add_node(EmaneNet, options=options) # create links @@ -318,10 +327,6 @@ class TestEmane: assert session.get_node(node1.id, CoreNode) assert session.get_node(node2.id, CoreNode) assert session.get_node(emane_node.id, EmaneNet) - links = [] - for node_id in session.nodes: - node = session.nodes[node_id] - links += node.links() - assert len(links) == 2 + assert len(session.link_manager.links()) == 2 config = session.emane.get_config(config_id, EmaneRfPipeModel.name) assert config["datarate"] == datarate diff --git a/daemon/tests/test_core.py b/daemon/tests/test_core.py index 3fbd91cb..919e4478 100644 --- a/daemon/tests/test_core.py +++ b/daemon/tests/test_core.py @@ -8,8 +8,7 @@ from typing import List, Type import pytest -from core.emulator.data import IpPrefixes, NodeOptions -from core.emulator.enumerations import MessageFlags +from core.emulator.data import IpPrefixes from core.emulator.session import Session from core.errors import CoreCommandError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility @@ -63,44 +62,6 @@ class TestCore: status = ping(node1, node2, ip_prefixes) assert not status - def test_iface(self, session: Session, ip_prefixes: IpPrefixes): - """ - Test interface methods. - - :param session: session for test - :param ip_prefixes: generates ip addresses for nodes - """ - - # create ptp - ptp_node = session.add_node(PtpNet) - - # create nodes - node1 = session.add_node(CoreNode) - node2 = session.add_node(CoreNode) - - # link nodes to ptp net - for node in [node1, node2]: - iface = ip_prefixes.create_iface(node) - session.add_link(node.id, ptp_node.id, iface1_data=iface) - - # instantiate session - session.instantiate() - - # check link data gets generated - assert ptp_node.links(MessageFlags.ADD) - - # check common nets exist between linked nodes - assert node1.commonnets(node2) - assert node2.commonnets(node1) - - # check we can retrieve interface id - assert 0 in node1.ifaces - assert 0 in node2.ifaces - - # delete interface and test that if no longer exists - node1.delete_iface(0) - assert 0 not in node1.ifaces - def test_wlan_ping(self, session: Session, ip_prefixes: IpPrefixes): """ Test basic wlan network. @@ -114,8 +75,8 @@ class TestCore: session.mobility.set_model(wlan_node, BasicRangeModel) # create nodes - options = NodeOptions(model="mdr") - options.set_position(0, 0) + options = CoreNode.create_options() + options.model = "mdr" node1 = session.add_node(CoreNode, options=options) node2 = session.add_node(CoreNode, options=options) @@ -144,8 +105,8 @@ class TestCore: session.mobility.set_model(wlan_node, BasicRangeModel) # create nodes - options = NodeOptions(model="mdr") - options.set_position(0, 0) + options = CoreNode.create_options() + options.model = "mdr" node1 = session.add_node(CoreNode, options=options) node2 = session.add_node(CoreNode, options=options) diff --git a/daemon/tests/test_distributed.py b/daemon/tests/test_distributed.py index 01362cae..3a9d43fb 100644 --- a/daemon/tests/test_distributed.py +++ b/daemon/tests/test_distributed.py @@ -1,4 +1,3 @@ -from core.emulator.data import NodeOptions from core.emulator.session import Session from core.nodes.base import CoreNode from core.nodes.network import HubNode @@ -12,8 +11,7 @@ class TestDistributed: # when session.distributed.add_server(server_name, host) - options = NodeOptions(server=server_name) - node = session.add_node(CoreNode, options=options) + node = session.add_node(CoreNode, server=server_name) session.instantiate() # then @@ -29,12 +27,13 @@ class TestDistributed: # when session.distributed.add_server(server_name, host) - options = NodeOptions(server=server_name) - node = session.add_node(HubNode, options=options) + node1 = session.add_node(HubNode) + node2 = session.add_node(HubNode, server=server_name) + session.add_link(node1.id, node2.id) session.instantiate() # then - assert node.server is not None - assert node.server.name == server_name - assert node.server.host == host - assert len(session.distributed.tunnels) > 0 + assert node2.server is not None + assert node2.server.name == server_name + assert node2.server.host == host + assert len(session.distributed.tunnels) == 1 diff --git a/daemon/tests/test_grpc.py b/daemon/tests/test_grpc.py index e56322ad..9aed3395 100644 --- a/daemon/tests/test_grpc.py +++ b/daemon/tests/test_grpc.py @@ -8,7 +8,7 @@ import grpc import pytest from mock import patch -from core.api.grpc import core_pb2, wrappers +from core.api.grpc import wrappers from core.api.grpc.client import CoreGrpcClient, InterfaceHelper, MoveNodesStreamer from core.api.grpc.server import CoreGrpcServer from core.api.grpc.wrappers import ( @@ -22,6 +22,7 @@ from core.api.grpc.wrappers import ( Link, LinkOptions, MobilityAction, + MoveNodesRequest, Node, NodeServiceData, NodeType, @@ -31,12 +32,10 @@ from core.api.grpc.wrappers import ( SessionLocation, SessionState, ) -from core.api.tlv.dataconversion import ConfigShim -from core.api.tlv.enumerations import ConfigFlags from core.emane.models.ieee80211abg import EmaneIeee80211abgModel from core.emane.nodes import EmaneNet -from core.emulator.data import EventData, IpPrefixes, NodeData, NodeOptions -from core.emulator.enumerations import EventTypes, ExceptionLevels +from core.emulator.data import EventData, IpPrefixes, NodeData +from core.emulator.enumerations import EventTypes, ExceptionLevels, MessageFlags from core.errors import CoreError from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility from core.nodes.base import CoreNode @@ -163,7 +162,7 @@ class TestGrpc: real_node1, service_name, service_file ) assert service_file.data == service_file_data - assert option_value == real_session.options.get_config(option_key) + assert option_value == real_session.options.get(option_key) @pytest.mark.parametrize("session_id", [None, 6013]) def test_create_session( @@ -351,8 +350,7 @@ class TestGrpc: client = CoreGrpcClient() session = grpc_server.coreemu.create_session() session.set_state(EventTypes.CONFIGURATION_STATE) - options = NodeOptions(model="Host") - node = session.add_node(CoreNode, options=options) + node = session.add_node(CoreNode) session.instantiate() expected_output = "hello world" expected_status = 0 @@ -370,8 +368,7 @@ class TestGrpc: client = CoreGrpcClient() session = grpc_server.coreemu.create_session() session.set_state(EventTypes.CONFIGURATION_STATE) - options = NodeOptions(model="Host") - node = session.add_node(CoreNode, options=options) + node = session.add_node(CoreNode) session.instantiate() # then @@ -415,7 +412,7 @@ class TestGrpc: session = grpc_server.coreemu.create_session() switch = session.add_node(SwitchNode) node = session.add_node(CoreNode) - assert len(switch.links()) == 0 + assert len(session.link_manager.links()) == 0 iface = InterfaceHelper("10.0.0.0/24").create_iface(node.id, 0) link = Link(node.id, switch.id, iface1=iface) @@ -425,7 +422,7 @@ class TestGrpc: # then assert result is True - assert len(switch.links()) == 1 + assert len(session.link_manager.links()) == 1 assert iface1.id == iface.id assert iface1.ip4 == iface.ip4 @@ -445,13 +442,14 @@ class TestGrpc: # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) switch = session.add_node(SwitchNode) node = session.add_node(CoreNode) - iface = ip_prefixes.create_iface(node) - session.add_link(node.id, switch.id, iface) + iface_data = ip_prefixes.create_iface(node) + iface, _ = session.add_link(node.id, switch.id, iface_data) + session.instantiate() options = LinkOptions(bandwidth=30000) - link = switch.links()[0] - assert options.bandwidth != link.options.bandwidth + assert iface.options.bandwidth != options.bandwidth link = Link(node.id, switch.id, iface1=Interface(id=iface.id), options=options) # then @@ -460,8 +458,7 @@ class TestGrpc: # then assert result is True - link = switch.links()[0] - assert options.bandwidth == link.options.bandwidth + assert options.bandwidth == iface.options.bandwidth def test_delete_link(self, grpc_server: CoreGrpcServer, ip_prefixes: IpPrefixes): # given @@ -472,13 +469,7 @@ class TestGrpc: node2 = session.add_node(CoreNode) iface2 = ip_prefixes.create_iface(node2) session.add_link(node1.id, node2.id, iface1, iface2) - link_node = None - for node_id in session.nodes: - node = session.nodes[node_id] - if node.id not in {node1.id, node2.id}: - link_node = node - break - assert len(link_node.links()) == 1 + assert len(session.link_manager.links()) == 1 link = Link( node1.id, node2.id, @@ -492,7 +483,7 @@ class TestGrpc: # then assert result is True - assert len(link_node.links()) == 0 + assert len(session.link_manager.links()) == 0 def test_get_wlan_config(self, grpc_server: CoreGrpcServer): # given @@ -537,14 +528,15 @@ class TestGrpc: assert result is True config = session.mobility.get_model_config(wlan.id, BasicRangeModel.name) assert config[range_key] == range_value - assert wlan.model.range == int(range_value) + assert wlan.wireless_model.range == int(range_value) def test_set_emane_model_config(self, grpc_server: CoreGrpcServer): # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = NodeOptions(emane=EmaneIeee80211abgModel.name) + options = EmaneNet.create_options() + options.emane_model = EmaneIeee80211abgModel.name emane_network = session.add_node(EmaneNet, options=options) session.emane.node_models[emane_network.id] = EmaneIeee80211abgModel.name config_key = "bandwidth" @@ -574,7 +566,8 @@ class TestGrpc: client = CoreGrpcClient() session = grpc_server.coreemu.create_session() session.set_location(47.57917, -122.13232, 2.00000, 1.0) - options = NodeOptions(emane=EmaneIeee80211abgModel.name) + options = EmaneNet.create_options() + options.emane_model = EmaneIeee80211abgModel.name emane_network = session.add_node(EmaneNet, options=options) session.emane.node_models[emane_network.id] = EmaneIeee80211abgModel.name @@ -651,16 +644,16 @@ class TestGrpc: # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - node_type = "test" + model = "test" services = ["SSH"] # then with client.context_connect(): - result = client.set_service_defaults(session.id, {node_type: services}) + result = client.set_service_defaults(session.id, {model: services}) # then assert result is True - assert session.services.default_services[node_type] == services + assert session.services.default_services[model] == services def test_get_node_service(self, grpc_server: CoreGrpcServer): # given @@ -694,7 +687,8 @@ class TestGrpc: # given client = CoreGrpcClient() session = grpc_server.coreemu.create_session() - options = NodeOptions(legacy=True) + options = CoreNode.create_options() + options.legacy = True node = session.add_node(CoreNode, options=options) service_name = "DefaultRoute" @@ -757,9 +751,11 @@ class TestGrpc: session = grpc_server.coreemu.create_session() wlan = session.add_node(WlanNode) node = session.add_node(CoreNode) - iface = ip_prefixes.create_iface(node) - session.add_link(node.id, wlan.id, iface) - link_data = wlan.links()[0] + iface_data = ip_prefixes.create_iface(node) + session.add_link(node.id, wlan.id, iface_data) + core_link = list(session.link_manager.links())[0] + link_data = core_link.get_data(MessageFlags.ADD) + queue = Queue() def handle_event(event: Event) -> None: @@ -820,30 +816,6 @@ class TestGrpc: # then queue.get(timeout=5) - def test_config_events(self, grpc_server: CoreGrpcServer): - # given - client = CoreGrpcClient() - session = grpc_server.coreemu.create_session() - queue = Queue() - - def handle_event(event: Event) -> None: - assert event.session_id == session.id - assert event.config_event is not None - queue.put(event) - - # then - with client.context_connect(): - client.events(session.id, handle_event) - time.sleep(0.1) - session_config = session.options.get_configs() - config_data = ConfigShim.config_data( - 0, None, ConfigFlags.UPDATE.value, session.options, session_config - ) - session.broadcast_config(config_data) - - # then - queue.get(timeout=5) - def test_exception_events(self, grpc_server: CoreGrpcServer): # given client = CoreGrpcClient() @@ -950,7 +922,7 @@ class TestGrpc: client = CoreGrpcClient() session = grpc_server.coreemu.create_session() streamer = MoveNodesStreamer(session.id) - request = core_pb2.MoveNodesRequest() + request = MoveNodesRequest(session.id + 1, 1) streamer.send(request) streamer.stop() @@ -958,3 +930,27 @@ class TestGrpc: with pytest.raises(grpc.RpcError): with client.context_connect(): client.move_nodes(streamer) + + def test_wlan_link(self, grpc_server: CoreGrpcServer, ip_prefixes: IpPrefixes): + # given + client = CoreGrpcClient() + session = grpc_server.coreemu.create_session() + session.set_state(EventTypes.CONFIGURATION_STATE) + wlan = session.add_node(WlanNode) + node1 = session.add_node(CoreNode) + node2 = session.add_node(CoreNode) + iface1_data = ip_prefixes.create_iface(node1) + iface2_data = ip_prefixes.create_iface(node2) + session.add_link(node1.id, wlan.id, iface1_data) + session.add_link(node2.id, wlan.id, iface2_data) + session.instantiate() + assert len(session.link_manager.links()) == 2 + + # when + with client.context_connect(): + result1 = client.wlan_link(session.id, wlan.id, node1.id, node2.id, True) + result2 = client.wlan_link(session.id, wlan.id, node1.id, node2.id, False) + + # then + assert result1 is True + assert result2 is True diff --git a/daemon/tests/test_gui.py b/daemon/tests/test_gui.py deleted file mode 100644 index b14f1fb1..00000000 --- a/daemon/tests/test_gui.py +++ /dev/null @@ -1,941 +0,0 @@ -""" -Tests for testing tlv message handling. -""" -import time -from pathlib import Path -from typing import Optional - -import mock -import netaddr -import pytest -from mock import MagicMock - -from core.api.tlv import coreapi -from core.api.tlv.corehandlers import CoreHandler -from core.api.tlv.enumerations import ( - ConfigFlags, - ConfigTlvs, - EventTlvs, - ExecuteTlvs, - FileTlvs, - LinkTlvs, - NodeTlvs, - SessionTlvs, -) -from core.emane.models.ieee80211abg import EmaneIeee80211abgModel -from core.emulator.enumerations import EventTypes, MessageFlags, NodeTypes, RegisterTlvs -from core.errors import CoreError -from core.location.mobility import BasicRangeModel -from core.nodes.base import CoreNode, NodeBase -from core.nodes.network import SwitchNode, WlanNode - - -def dict_to_str(values) -> str: - return "|".join(f"{x}={values[x]}" for x in values) - - -class TestGui: - @pytest.mark.parametrize( - "node_type, model", - [ - (NodeTypes.DEFAULT, "PC"), - (NodeTypes.EMANE, None), - (NodeTypes.HUB, None), - (NodeTypes.SWITCH, None), - (NodeTypes.WIRELESS_LAN, None), - (NodeTypes.TUNNEL, None), - ], - ) - def test_node_add( - self, coretlv: CoreHandler, node_type: NodeTypes, model: Optional[str] - ): - node_id = 1 - name = "node1" - message = coreapi.CoreNodeMessage.create( - MessageFlags.ADD.value, - [ - (NodeTlvs.NUMBER, node_id), - (NodeTlvs.TYPE, node_type.value), - (NodeTlvs.NAME, name), - (NodeTlvs.X_POSITION, 0), - (NodeTlvs.Y_POSITION, 0), - (NodeTlvs.MODEL, model), - ], - ) - - coretlv.handle_message(message) - node = coretlv.session.get_node(node_id, NodeBase) - assert node - assert node.name == name - - def test_node_update(self, coretlv: CoreHandler): - node_id = 1 - coretlv.session.add_node(CoreNode, _id=node_id) - x = 50 - y = 100 - message = coreapi.CoreNodeMessage.create( - 0, - [ - (NodeTlvs.NUMBER, node_id), - (NodeTlvs.X_POSITION, x), - (NodeTlvs.Y_POSITION, y), - ], - ) - - coretlv.handle_message(message) - - node = coretlv.session.get_node(node_id, NodeBase) - assert node is not None - assert node.position.x == x - assert node.position.y == y - - def test_node_delete(self, coretlv: CoreHandler): - node_id = 1 - coretlv.session.add_node(CoreNode, _id=node_id) - message = coreapi.CoreNodeMessage.create( - MessageFlags.DELETE.value, [(NodeTlvs.NUMBER, node_id)] - ) - - coretlv.handle_message(message) - - with pytest.raises(CoreError): - coretlv.session.get_node(node_id, NodeBase) - - def test_link_add_node_to_net(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - switch_id = 2 - coretlv.session.add_node(SwitchNode, _id=switch_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface1_ip4 = str(ip_prefix[node1_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, switch_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE1_IP4, iface1_ip4), - (LinkTlvs.IFACE1_IP4_MASK, 24), - ], - ) - - coretlv.handle_message(message) - - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 1 - - def test_link_add_net_to_node(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - switch_id = 2 - coretlv.session.add_node(SwitchNode, _id=switch_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface2_ip4 = str(ip_prefix[node1_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, switch_id), - (LinkTlvs.N2_NUMBER, node1_id), - (LinkTlvs.IFACE2_NUMBER, 0), - (LinkTlvs.IFACE2_IP4, iface2_ip4), - (LinkTlvs.IFACE2_IP4_MASK, 24), - ], - ) - - coretlv.handle_message(message) - - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 1 - - def test_link_add_node_to_node(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - node2_id = 2 - coretlv.session.add_node(CoreNode, _id=node2_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface1_ip4 = str(ip_prefix[node1_id]) - iface2_ip4 = str(ip_prefix[node2_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, node2_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE1_IP4, iface1_ip4), - (LinkTlvs.IFACE1_IP4_MASK, 24), - (LinkTlvs.IFACE2_NUMBER, 0), - (LinkTlvs.IFACE2_IP4, iface2_ip4), - (LinkTlvs.IFACE2_IP4_MASK, 24), - ], - ) - - coretlv.handle_message(message) - - all_links = [] - for node_id in coretlv.session.nodes: - node = coretlv.session.nodes[node_id] - all_links += node.links() - assert len(all_links) == 1 - - def test_link_update(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - switch_id = 2 - coretlv.session.add_node(SwitchNode, _id=switch_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface1_ip4 = str(ip_prefix[node1_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, switch_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE1_IP4, iface1_ip4), - (LinkTlvs.IFACE1_IP4_MASK, 24), - ], - ) - coretlv.handle_message(message) - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 1 - link = all_links[0] - assert link.options.bandwidth is None - - bandwidth = 50000 - message = coreapi.CoreLinkMessage.create( - 0, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, switch_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.BANDWIDTH, bandwidth), - ], - ) - coretlv.handle_message(message) - - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 1 - link = all_links[0] - assert link.options.bandwidth == bandwidth - - def test_link_delete_node_to_node(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - node2_id = 2 - coretlv.session.add_node(CoreNode, _id=node2_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface1_ip4 = str(ip_prefix[node1_id]) - iface2_ip4 = str(ip_prefix[node2_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, node2_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE1_IP4, iface1_ip4), - (LinkTlvs.IFACE1_IP4_MASK, 24), - (LinkTlvs.IFACE2_IP4, iface2_ip4), - (LinkTlvs.IFACE2_IP4_MASK, 24), - ], - ) - coretlv.handle_message(message) - all_links = [] - for node_id in coretlv.session.nodes: - node = coretlv.session.nodes[node_id] - all_links += node.links() - assert len(all_links) == 1 - - message = coreapi.CoreLinkMessage.create( - MessageFlags.DELETE.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, node2_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE2_NUMBER, 0), - ], - ) - coretlv.handle_message(message) - - all_links = [] - for node_id in coretlv.session.nodes: - node = coretlv.session.nodes[node_id] - all_links += node.links() - assert len(all_links) == 0 - - def test_link_delete_node_to_net(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - switch_id = 2 - coretlv.session.add_node(SwitchNode, _id=switch_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface1_ip4 = str(ip_prefix[node1_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, switch_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE1_IP4, iface1_ip4), - (LinkTlvs.IFACE1_IP4_MASK, 24), - ], - ) - coretlv.handle_message(message) - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 1 - - message = coreapi.CoreLinkMessage.create( - MessageFlags.DELETE.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, switch_id), - (LinkTlvs.IFACE1_NUMBER, 0), - ], - ) - coretlv.handle_message(message) - - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 0 - - def test_link_delete_net_to_node(self, coretlv: CoreHandler): - node1_id = 1 - coretlv.session.add_node(CoreNode, _id=node1_id) - switch_id = 2 - coretlv.session.add_node(SwitchNode, _id=switch_id) - ip_prefix = netaddr.IPNetwork("10.0.0.0/24") - iface1_ip4 = str(ip_prefix[node1_id]) - message = coreapi.CoreLinkMessage.create( - MessageFlags.ADD.value, - [ - (LinkTlvs.N1_NUMBER, node1_id), - (LinkTlvs.N2_NUMBER, switch_id), - (LinkTlvs.IFACE1_NUMBER, 0), - (LinkTlvs.IFACE1_IP4, iface1_ip4), - (LinkTlvs.IFACE1_IP4_MASK, 24), - ], - ) - coretlv.handle_message(message) - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 1 - - message = coreapi.CoreLinkMessage.create( - MessageFlags.DELETE.value, - [ - (LinkTlvs.N1_NUMBER, switch_id), - (LinkTlvs.N2_NUMBER, node1_id), - (LinkTlvs.IFACE2_NUMBER, 0), - ], - ) - coretlv.handle_message(message) - - switch_node = coretlv.session.get_node(switch_id, SwitchNode) - all_links = switch_node.links() - assert len(all_links) == 0 - - def test_session_update(self, coretlv: CoreHandler): - session_id = coretlv.session.id - name = "test" - message = coreapi.CoreSessionMessage.create( - 0, [(SessionTlvs.NUMBER, str(session_id)), (SessionTlvs.NAME, name)] - ) - - coretlv.handle_message(message) - - assert coretlv.session.name == name - - def test_session_query(self, coretlv: CoreHandler): - coretlv.dispatch_replies = mock.MagicMock() - message = coreapi.CoreSessionMessage.create(MessageFlags.STRING.value, []) - - coretlv.handle_message(message) - - args, _ = coretlv.dispatch_replies.call_args - replies = args[0] - assert len(replies) == 1 - - def test_session_join(self, coretlv: CoreHandler): - coretlv.dispatch_replies = mock.MagicMock() - session_id = coretlv.session.id - message = coreapi.CoreSessionMessage.create( - MessageFlags.ADD.value, [(SessionTlvs.NUMBER, str(session_id))] - ) - - coretlv.handle_message(message) - - assert coretlv.session.id == session_id - - def test_session_delete(self, coretlv: CoreHandler): - assert len(coretlv.coreemu.sessions) == 1 - session_id = coretlv.session.id - message = coreapi.CoreSessionMessage.create( - MessageFlags.DELETE.value, [(SessionTlvs.NUMBER, str(session_id))] - ) - - coretlv.handle_message(message) - - assert len(coretlv.coreemu.sessions) == 0 - - def test_file_hook_add(self, coretlv: CoreHandler): - state = EventTypes.DATACOLLECT_STATE - assert coretlv.session.hooks.get(state) is None - file_name = "test.sh" - file_data = "echo hello" - message = coreapi.CoreFileMessage.create( - MessageFlags.ADD.value, - [ - (FileTlvs.TYPE, f"hook:{state.value}"), - (FileTlvs.NAME, file_name), - (FileTlvs.DATA, file_data), - ], - ) - - coretlv.handle_message(message) - - hooks = coretlv.session.hooks.get(state) - assert len(hooks) == 1 - name, data = hooks[0] - assert file_name == name - assert file_data == data - - def test_file_service_file_set(self, coretlv: CoreHandler): - node = coretlv.session.add_node(CoreNode) - service = "DefaultRoute" - file_name = "defaultroute.sh" - file_data = "echo hello" - message = coreapi.CoreFileMessage.create( - MessageFlags.ADD.value, - [ - (FileTlvs.NODE, node.id), - (FileTlvs.TYPE, f"service:{service}"), - (FileTlvs.NAME, file_name), - (FileTlvs.DATA, file_data), - ], - ) - - coretlv.handle_message(message) - - service_file = coretlv.session.services.get_service_file( - node, service, file_name - ) - assert file_data == service_file.data - - def test_file_node_file_copy(self, request, coretlv: CoreHandler): - file_path = Path("/var/log/test/node.log") - node = coretlv.session.add_node(CoreNode) - node.makenodedir() - file_data = "echo hello" - message = coreapi.CoreFileMessage.create( - MessageFlags.ADD.value, - [ - (FileTlvs.NODE, node.id), - (FileTlvs.NAME, str(file_path)), - (FileTlvs.DATA, file_data), - ], - ) - - coretlv.handle_message(message) - - if not request.config.getoption("mock"): - expected_path = node.directory / "var.log/test" / file_path.name - assert expected_path.exists() - - def test_exec_node_tty(self, coretlv: CoreHandler): - coretlv.dispatch_replies = mock.MagicMock() - node = coretlv.session.add_node(CoreNode) - message = coreapi.CoreExecMessage.create( - MessageFlags.TTY.value, - [ - (ExecuteTlvs.NODE, node.id), - (ExecuteTlvs.NUMBER, 1), - (ExecuteTlvs.COMMAND, "bash"), - ], - ) - - coretlv.handle_message(message) - - args, _ = coretlv.dispatch_replies.call_args - replies = args[0] - assert len(replies) == 1 - - def test_exec_local_command(self, request, coretlv: CoreHandler): - if request.config.getoption("mock"): - pytest.skip("mocking calls") - - coretlv.dispatch_replies = mock.MagicMock() - node = coretlv.session.add_node(CoreNode) - cmd = "echo hello" - message = coreapi.CoreExecMessage.create( - MessageFlags.TEXT.value | MessageFlags.LOCAL.value, - [ - (ExecuteTlvs.NODE, node.id), - (ExecuteTlvs.NUMBER, 1), - (ExecuteTlvs.COMMAND, cmd), - ], - ) - - coretlv.handle_message(message) - - args, _ = coretlv.dispatch_replies.call_args - replies = args[0] - assert len(replies) == 1 - - def test_exec_node_command(self, coretlv: CoreHandler): - coretlv.dispatch_replies = mock.MagicMock() - node = coretlv.session.add_node(CoreNode) - cmd = "echo hello" - message = coreapi.CoreExecMessage.create( - MessageFlags.TEXT.value, - [ - (ExecuteTlvs.NODE, node.id), - (ExecuteTlvs.NUMBER, 1), - (ExecuteTlvs.COMMAND, cmd), - ], - ) - node.cmd = MagicMock(return_value="hello") - - coretlv.handle_message(message) - - node.cmd.assert_called_with(cmd) - - @pytest.mark.parametrize( - "state", - [ - EventTypes.SHUTDOWN_STATE, - EventTypes.RUNTIME_STATE, - EventTypes.DATACOLLECT_STATE, - EventTypes.CONFIGURATION_STATE, - EventTypes.DEFINITION_STATE, - ], - ) - def test_event_state(self, coretlv: CoreHandler, state: EventTypes): - message = coreapi.CoreEventMessage.create(0, [(EventTlvs.TYPE, state.value)]) - - coretlv.handle_message(message) - - assert coretlv.session.state == state - - def test_event_schedule(self, coretlv: CoreHandler): - coretlv.session.add_event = mock.MagicMock() - node = coretlv.session.add_node(CoreNode) - message = coreapi.CoreEventMessage.create( - MessageFlags.ADD.value, - [ - (EventTlvs.TYPE, EventTypes.SCHEDULED.value), - (EventTlvs.TIME, str(time.monotonic() + 100)), - (EventTlvs.NODE, node.id), - (EventTlvs.NAME, "event"), - (EventTlvs.DATA, "data"), - ], - ) - - coretlv.handle_message(message) - - coretlv.session.add_event.assert_called_once() - - def test_event_save_xml(self, coretlv: CoreHandler, tmpdir): - xml_file = tmpdir.join("coretlv.session.xml") - file_path = xml_file.strpath - coretlv.session.add_node(CoreNode) - message = coreapi.CoreEventMessage.create( - 0, - [(EventTlvs.TYPE, EventTypes.FILE_SAVE.value), (EventTlvs.NAME, file_path)], - ) - coretlv.handle_message(message) - assert Path(file_path).exists() - - def test_event_open_xml(self, coretlv: CoreHandler, tmpdir): - xml_file = tmpdir.join("coretlv.session.xml") - file_path = Path(xml_file.strpath) - node = coretlv.session.add_node(CoreNode) - coretlv.session.save_xml(file_path) - coretlv.session.delete_node(node.id) - message = coreapi.CoreEventMessage.create( - 0, - [ - (EventTlvs.TYPE, EventTypes.FILE_OPEN.value), - (EventTlvs.NAME, str(file_path)), - ], - ) - - coretlv.handle_message(message) - assert coretlv.session.get_node(node.id, NodeBase) - - @pytest.mark.parametrize( - "state", - [ - EventTypes.START, - EventTypes.STOP, - EventTypes.RESTART, - EventTypes.PAUSE, - EventTypes.RECONFIGURE, - ], - ) - def test_event_service(self, coretlv: CoreHandler, state: EventTypes): - coretlv.session.broadcast_event = mock.MagicMock() - node = coretlv.session.add_node(CoreNode) - message = coreapi.CoreEventMessage.create( - 0, - [ - (EventTlvs.TYPE, state.value), - (EventTlvs.NODE, node.id), - (EventTlvs.NAME, "service:DefaultRoute"), - ], - ) - - coretlv.handle_message(message) - - coretlv.session.broadcast_event.assert_called_once() - - @pytest.mark.parametrize( - "state", - [ - EventTypes.START, - EventTypes.STOP, - EventTypes.RESTART, - EventTypes.PAUSE, - EventTypes.RECONFIGURE, - ], - ) - def test_event_mobility(self, coretlv: CoreHandler, state: EventTypes): - message = coreapi.CoreEventMessage.create( - 0, [(EventTlvs.TYPE, state.value), (EventTlvs.NAME, "mobility:ns2script")] - ) - - coretlv.handle_message(message) - - def test_register_gui(self, coretlv: CoreHandler): - message = coreapi.CoreRegMessage.create(0, [(RegisterTlvs.GUI, "gui")]) - coretlv.handle_message(message) - - def test_register_xml(self, coretlv: CoreHandler, tmpdir): - xml_file = tmpdir.join("coretlv.session.xml") - file_path = xml_file.strpath - node = coretlv.session.add_node(CoreNode) - coretlv.session.save_xml(file_path) - coretlv.session.delete_node(node.id) - message = coreapi.CoreRegMessage.create( - 0, [(RegisterTlvs.EXECUTE_SERVER, file_path)] - ) - coretlv.session.instantiate() - - coretlv.handle_message(message) - - assert coretlv.coreemu.sessions[1].get_node(node.id, CoreNode) - - def test_register_python(self, coretlv: CoreHandler, tmpdir): - xml_file = tmpdir.join("test.py") - file_path = xml_file.strpath - with open(file_path, "w") as f: - f.write("from core.nodes.base import CoreNode\n") - f.write("coreemu = globals()['coreemu']\n") - f.write(f"session = coreemu.sessions[{coretlv.session.id}]\n") - f.write("session.add_node(CoreNode)\n") - message = coreapi.CoreRegMessage.create( - 0, [(RegisterTlvs.EXECUTE_SERVER, file_path)] - ) - coretlv.session.instantiate() - - coretlv.handle_message(message) - - assert len(coretlv.session.nodes) == 1 - - def test_config_all(self, coretlv: CoreHandler): - message = coreapi.CoreConfMessage.create( - MessageFlags.ADD.value, - [(ConfigTlvs.OBJECT, "all"), (ConfigTlvs.TYPE, ConfigFlags.RESET.value)], - ) - coretlv.session.location.refxyz = (10, 10, 10) - - coretlv.handle_message(message) - - assert coretlv.session.location.refxyz == (0, 0, 0) - - def test_config_options_request(self, coretlv: CoreHandler): - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "session"), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - ], - ) - coretlv.handle_broadcast_config = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.handle_broadcast_config.assert_called_once() - - def test_config_options_update(self, coretlv: CoreHandler): - test_key = "test" - test_value = "test" - values = {test_key: test_value} - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "session"), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.VALUES, dict_to_str(values)), - ], - ) - - coretlv.handle_message(message) - - assert coretlv.session.options.get_config(test_key) == test_value - - def test_config_location_reset(self, coretlv: CoreHandler): - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "location"), - (ConfigTlvs.TYPE, ConfigFlags.RESET.value), - ], - ) - coretlv.session.location.refxyz = (10, 10, 10) - - coretlv.handle_message(message) - - assert coretlv.session.location.refxyz == (0, 0, 0) - - def test_config_location_update(self, coretlv: CoreHandler): - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "location"), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.VALUES, "10|10|70|50|0|0.5"), - ], - ) - - coretlv.handle_message(message) - - assert coretlv.session.location.refxyz == (10, 10, 0.0) - assert coretlv.session.location.refgeo == (70, 50, 0) - assert coretlv.session.location.refscale == 0.5 - - def test_config_metadata_request(self, coretlv: CoreHandler): - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "metadata"), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - ], - ) - coretlv.handle_broadcast_config = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.handle_broadcast_config.assert_called_once() - - def test_config_metadata_update(self, coretlv: CoreHandler): - test_key = "test" - test_value = "test" - values = {test_key: test_value} - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "metadata"), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.VALUES, dict_to_str(values)), - ], - ) - - coretlv.handle_message(message) - - assert coretlv.session.metadata[test_key] == test_value - - def test_config_broker_request(self, coretlv: CoreHandler): - server = "test" - host = "10.0.0.1" - port = 50000 - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "broker"), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.VALUES, f"{server}:{host}:{port}"), - ], - ) - coretlv.session.distributed.add_server = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.session.distributed.add_server.assert_called_once_with(server, host) - - def test_config_services_request_all(self, coretlv: CoreHandler): - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "services"), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - ], - ) - coretlv.handle_broadcast_config = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.handle_broadcast_config.assert_called_once() - - def test_config_services_request_specific(self, coretlv: CoreHandler): - node = coretlv.session.add_node(CoreNode) - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, node.id), - (ConfigTlvs.OBJECT, "services"), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - (ConfigTlvs.OPAQUE, "service:DefaultRoute"), - ], - ) - coretlv.handle_broadcast_config = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.handle_broadcast_config.assert_called_once() - - def test_config_services_request_specific_file(self, coretlv: CoreHandler): - node = coretlv.session.add_node(CoreNode) - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, node.id), - (ConfigTlvs.OBJECT, "services"), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - (ConfigTlvs.OPAQUE, "service:DefaultRoute:defaultroute.sh"), - ], - ) - coretlv.session.broadcast_file = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.session.broadcast_file.assert_called_once() - - def test_config_services_reset(self, coretlv: CoreHandler): - node = coretlv.session.add_node(CoreNode) - service = "DefaultRoute" - coretlv.session.services.set_service(node.id, service) - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "services"), - (ConfigTlvs.TYPE, ConfigFlags.RESET.value), - ], - ) - assert coretlv.session.services.get_service(node.id, service) is not None - - coretlv.handle_message(message) - - assert coretlv.session.services.get_service(node.id, service) is None - - def test_config_services_set(self, coretlv: CoreHandler): - node = coretlv.session.add_node(CoreNode) - service = "DefaultRoute" - values = {"meta": "metadata"} - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, node.id), - (ConfigTlvs.OBJECT, "services"), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.OPAQUE, f"service:{service}"), - (ConfigTlvs.VALUES, dict_to_str(values)), - ], - ) - assert coretlv.session.services.get_service(node.id, service) is None - - coretlv.handle_message(message) - - assert coretlv.session.services.get_service(node.id, service) is not None - - def test_config_mobility_reset(self, coretlv: CoreHandler): - wlan = coretlv.session.add_node(WlanNode) - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.OBJECT, "MobilityManager"), - (ConfigTlvs.TYPE, ConfigFlags.RESET.value), - ], - ) - coretlv.session.mobility.set_model_config(wlan.id, BasicRangeModel.name, {}) - assert len(coretlv.session.mobility.node_configurations) == 1 - - coretlv.handle_message(message) - - assert len(coretlv.session.mobility.node_configurations) == 0 - - def test_config_mobility_model_request(self, coretlv: CoreHandler): - wlan = coretlv.session.add_node(WlanNode) - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, wlan.id), - (ConfigTlvs.OBJECT, BasicRangeModel.name), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - ], - ) - coretlv.handle_broadcast_config = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.handle_broadcast_config.assert_called_once() - - def test_config_mobility_model_update(self, coretlv: CoreHandler): - wlan = coretlv.session.add_node(WlanNode) - config_key = "range" - config_value = "1000" - values = {config_key: config_value} - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, wlan.id), - (ConfigTlvs.OBJECT, BasicRangeModel.name), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.VALUES, dict_to_str(values)), - ], - ) - - coretlv.handle_message(message) - - config = coretlv.session.mobility.get_model_config( - wlan.id, BasicRangeModel.name - ) - assert config[config_key] == config_value - - def test_config_emane_model_request(self, coretlv: CoreHandler): - wlan = coretlv.session.add_node(WlanNode) - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, wlan.id), - (ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name), - (ConfigTlvs.TYPE, ConfigFlags.REQUEST.value), - ], - ) - coretlv.handle_broadcast_config = mock.MagicMock() - - coretlv.handle_message(message) - - coretlv.handle_broadcast_config.assert_called_once() - - def test_config_emane_model_update(self, coretlv: CoreHandler): - wlan = coretlv.session.add_node(WlanNode) - config_key = "distance" - config_value = "50051" - values = {config_key: config_value} - message = coreapi.CoreConfMessage.create( - 0, - [ - (ConfigTlvs.NODE, wlan.id), - (ConfigTlvs.OBJECT, EmaneIeee80211abgModel.name), - (ConfigTlvs.TYPE, ConfigFlags.UPDATE.value), - (ConfigTlvs.VALUES, dict_to_str(values)), - ], - ) - - coretlv.handle_message(message) - - config = coretlv.session.emane.get_config(wlan.id, EmaneIeee80211abgModel.name) - assert config[config_key] == config_value diff --git a/daemon/tests/test_links.py b/daemon/tests/test_links.py index 791eb77a..eea88fb3 100644 --- a/daemon/tests/test_links.py +++ b/daemon/tests/test_links.py @@ -46,14 +46,17 @@ class TestLinks: ) # then + assert len(session.link_manager.links()) == 1 assert node1.get_iface(iface1_data.id) assert node2.get_iface(iface2_data.id) assert iface1 is not None + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem + assert node1.get_iface(iface1_data.id) assert iface2 is not None - assert iface1.local_options == LINK_OPTIONS - assert iface1.has_local_netem - assert iface2.local_options == LINK_OPTIONS - assert iface2.has_local_netem + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem + assert node1.get_iface(iface1_data.id) def test_add_node_to_net(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -62,16 +65,20 @@ class TestLinks: iface1_data = ip_prefixes.create_iface(node1) # when - iface, _ = session.add_link( + iface1, iface2 = session.add_link( node1.id, node2.id, iface1_data=iface1_data, options=LINK_OPTIONS ) # then - assert node2.links() + assert len(session.link_manager.links()) == 1 + assert iface1 is not None + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem assert node1.get_iface(iface1_data.id) - assert iface is not None - assert iface.local_options == LINK_OPTIONS - assert iface.has_local_netem + assert iface2 is not None + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem + assert node2.get_iface(iface1_data.id) def test_add_net_to_node(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -80,32 +87,37 @@ class TestLinks: iface2_data = ip_prefixes.create_iface(node2) # when - _, iface = session.add_link( + iface1, iface2 = session.add_link( node1.id, node2.id, iface2_data=iface2_data, options=LINK_OPTIONS ) # then - assert node1.links() - assert node2.get_iface(iface2_data.id) - assert iface is not None - assert iface.local_options == LINK_OPTIONS - assert iface.has_local_netem + assert len(session.link_manager.links()) == 1 + assert iface1 is not None + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem + assert node1.get_iface(iface1.id) + assert iface2 is not None + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem + assert node2.get_iface(iface2.id) - def test_add_net_to_net(self, session): + def test_add_net_to_net(self, session: Session): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(SwitchNode) # when - iface, _ = session.add_link(node1.id, node2.id, options=LINK_OPTIONS) + iface1, iface2 = session.add_link(node1.id, node2.id, options=LINK_OPTIONS) # then - assert node1.links() - assert iface is not None - assert iface.local_options == LINK_OPTIONS - assert iface.options == LINK_OPTIONS - assert iface.has_local_netem - assert iface.has_netem + assert len(session.link_manager.links()) == 1 + assert iface1 is not None + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem + assert iface2 is not None + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem def test_add_node_to_node_uni(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -141,48 +153,52 @@ class TestLinks: ) # then + assert len(session.link_manager.links()) == 1 assert node1.get_iface(iface1_data.id) assert node2.get_iface(iface2_data.id) assert iface1 is not None + assert iface1.options == link_options1 + assert iface1.has_netem assert iface2 is not None - assert iface1.local_options == link_options1 - assert iface1.has_local_netem - assert iface2.local_options == link_options2 - assert iface2.has_local_netem + assert iface2.options == link_options2 + assert iface2.has_netem def test_update_node_to_net(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(CoreNode) node2 = session.add_node(SwitchNode) iface1_data = ip_prefixes.create_iface(node1) - iface1, _ = session.add_link(node1.id, node2.id, iface1_data) - assert iface1.local_options != LINK_OPTIONS + iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data) + assert len(session.link_manager.links()) == 1 + assert iface1.options != LINK_OPTIONS + assert iface2.options != LINK_OPTIONS # when - session.update_link( - node1.id, node2.id, iface1_id=iface1_data.id, options=LINK_OPTIONS - ) + session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) # then - assert iface1.local_options == LINK_OPTIONS - assert iface1.has_local_netem + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem def test_update_net_to_node(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(CoreNode) iface2_data = ip_prefixes.create_iface(node2) - _, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) - assert iface2.local_options != LINK_OPTIONS + iface1, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) + assert iface1.options != LINK_OPTIONS + assert iface2.options != LINK_OPTIONS # when - session.update_link( - node1.id, node2.id, iface2_id=iface2_data.id, options=LINK_OPTIONS - ) + session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) # then - assert iface2.local_options == LINK_OPTIONS - assert iface2.has_local_netem + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem def test_update_ptp(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -191,55 +207,68 @@ class TestLinks: iface1_data = ip_prefixes.create_iface(node1) iface2_data = ip_prefixes.create_iface(node2) iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert iface1.local_options != LINK_OPTIONS - assert iface2.local_options != LINK_OPTIONS + assert iface1.options != LINK_OPTIONS + assert iface2.options != LINK_OPTIONS # when - session.update_link( - node1.id, node2.id, iface1_data.id, iface2_data.id, LINK_OPTIONS - ) + session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) # then - assert iface1.local_options == LINK_OPTIONS - assert iface1.has_local_netem - assert iface2.local_options == LINK_OPTIONS - assert iface2.has_local_netem + assert iface1.options == LINK_OPTIONS + assert iface1.has_netem + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem def test_update_net_to_net(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(SwitchNode) - iface1, _ = session.add_link(node1.id, node2.id) - assert iface1.local_options != LINK_OPTIONS + iface1, iface2 = session.add_link(node1.id, node2.id) + assert iface1.options != LINK_OPTIONS + assert iface2.options != LINK_OPTIONS # when - session.update_link(node1.id, node2.id, options=LINK_OPTIONS) + session.update_link(node1.id, node2.id, iface1.id, iface2.id, LINK_OPTIONS) # then - assert iface1.local_options == LINK_OPTIONS - assert iface1.has_local_netem assert iface1.options == LINK_OPTIONS assert iface1.has_netem + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem + + def test_update_error(self, session: Session, ip_prefixes: IpPrefixes): + # given + node1 = session.add_node(CoreNode) + node2 = session.add_node(CoreNode) + iface1_data = ip_prefixes.create_iface(node1) + iface2_data = ip_prefixes.create_iface(node2) + iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) + assert iface1.options != LINK_OPTIONS + assert iface2.options != LINK_OPTIONS + + # when + with pytest.raises(CoreError): + session.delete_link(node1.id, INVALID_ID, iface1.id, iface2.id) def test_clear_net_to_net(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(SwitchNode) - iface1, _ = session.add_link(node1.id, node2.id, options=LINK_OPTIONS) - assert iface1.local_options == LINK_OPTIONS - assert iface1.has_local_netem + iface1, iface2 = session.add_link(node1.id, node2.id, options=LINK_OPTIONS) assert iface1.options == LINK_OPTIONS assert iface1.has_netem + assert iface2.options == LINK_OPTIONS + assert iface2.has_netem # when options = LinkOptions(delay=0, bandwidth=0, loss=0.0, dup=0, jitter=0, buffer=0) - session.update_link(node1.id, node2.id, options=options) + session.update_link(node1.id, node2.id, iface1.id, iface2.id, options) # then - assert iface1.local_options.is_clear() - assert not iface1.has_local_netem assert iface1.options.is_clear() assert not iface1.has_netem + assert iface2.options.is_clear() + assert not iface2.has_netem def test_delete_node_to_node(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -247,82 +276,100 @@ class TestLinks: node2 = session.add_node(CoreNode) iface1_data = ip_prefixes.create_iface(node1) iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert node1.get_iface(iface1_data.id) - assert node2.get_iface(iface2_data.id) + iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when - session.delete_link(node1.id, node2.id, iface1_data.id, iface2_data.id) + session.delete_link(node1.id, node2.id, iface1.id, iface2.id) # then - assert iface1_data.id not in node1.ifaces - assert iface2_data.id not in node2.ifaces + assert len(session.link_manager.links()) == 0 + assert iface1.id not in node1.ifaces + assert iface2.id not in node2.ifaces def test_delete_node_to_net(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(CoreNode) node2 = session.add_node(SwitchNode) iface1_data = ip_prefixes.create_iface(node1) - session.add_link(node1.id, node2.id, iface1_data) - assert node1.get_iface(iface1_data.id) + iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when - session.delete_link(node1.id, node2.id, iface1_id=iface1_data.id) + session.delete_link(node1.id, node2.id, iface1.id, iface2.id) # then - assert iface1_data.id not in node1.ifaces + assert len(session.link_manager.links()) == 0 + assert iface1.id not in node1.ifaces + assert iface2.id not in node2.ifaces def test_delete_net_to_node(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(CoreNode) iface2_data = ip_prefixes.create_iface(node2) - session.add_link(node1.id, node2.id, iface2_data=iface2_data) - assert node2.get_iface(iface2_data.id) + iface1, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when - session.delete_link(node1.id, node2.id, iface2_id=iface2_data.id) + session.delete_link(node1.id, node2.id, iface1.id, iface2.id) # then - assert iface2_data.id not in node2.ifaces + assert len(session.link_manager.links()) == 0 + assert iface1.id not in node1.ifaces + assert iface2.id not in node2.ifaces def test_delete_net_to_net(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(SwitchNode) - session.add_link(node1.id, node2.id) - assert node1.get_linked_iface(node2) + iface1, iface2 = session.add_link(node1.id, node2.id) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when - session.delete_link(node1.id, node2.id) + session.delete_link(node1.id, node2.id, iface1.id, iface2.id) # then - assert not node1.get_linked_iface(node2) + assert len(session.link_manager.links()) == 0 + assert iface1.id not in node1.ifaces + assert iface2.id not in node2.ifaces def test_delete_node_error(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(SwitchNode) - session.add_link(node1.id, node2.id) - assert node1.get_linked_iface(node2) + iface1, iface2 = session.add_link(node1.id, node2.id) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when with pytest.raises(CoreError): - session.delete_link(node1.id, INVALID_ID) + session.delete_link(node1.id, INVALID_ID, iface1.id, iface2.id) with pytest.raises(CoreError): - session.delete_link(INVALID_ID, node2.id) + session.delete_link(INVALID_ID, node2.id, iface1.id, iface2.id) def test_delete_net_to_net_error(self, session: Session, ip_prefixes: IpPrefixes): # given node1 = session.add_node(SwitchNode) node2 = session.add_node(SwitchNode) node3 = session.add_node(SwitchNode) - session.add_link(node1.id, node2.id) - assert node1.get_linked_iface(node2) + iface1, iface2 = session.add_link(node1.id, node2.id) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id) + session.delete_link(node1.id, node3.id, iface1.id, iface2.id) def test_delete_node_to_net_error(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -330,12 +377,14 @@ class TestLinks: node2 = session.add_node(SwitchNode) node3 = session.add_node(SwitchNode) iface1_data = ip_prefixes.create_iface(node1) - iface1, _ = session.add_link(node1.id, node2.id, iface1_data) - assert iface1 + iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id) + session.delete_link(node1.id, node3.id, iface1.id, iface2.id) def test_delete_net_to_node_error(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -343,12 +392,14 @@ class TestLinks: node2 = session.add_node(CoreNode) node3 = session.add_node(SwitchNode) iface2_data = ip_prefixes.create_iface(node2) - _, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) - assert iface2 + iface1, iface2 = session.add_link(node1.id, node2.id, iface2_data=iface2_data) + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id) + session.delete_link(node1.id, node3.id, iface1.id, iface2.id) def test_delete_node_to_node_error(self, session: Session, ip_prefixes: IpPrefixes): # given @@ -358,9 +409,10 @@ class TestLinks: iface1_data = ip_prefixes.create_iface(node1) iface2_data = ip_prefixes.create_iface(node2) iface1, iface2 = session.add_link(node1.id, node2.id, iface1_data, iface2_data) - assert iface1 - assert iface2 + assert len(session.link_manager.links()) == 1 + assert node1.get_iface(iface1.id) + assert node2.get_iface(iface2.id) # when with pytest.raises(CoreError): - session.delete_link(node1.id, node3.id) + session.delete_link(node1.id, node3.id, iface1.id, iface2.id) diff --git a/daemon/tests/test_nodes.py b/daemon/tests/test_nodes.py index 3f0fbab1..bb76bb4e 100644 --- a/daemon/tests/test_nodes.py +++ b/daemon/tests/test_nodes.py @@ -1,6 +1,6 @@ import pytest -from core.emulator.data import InterfaceData, NodeOptions +from core.emulator.data import InterfaceData from core.emulator.session import Session from core.errors import CoreError from core.nodes.base import CoreNode @@ -14,7 +14,8 @@ class TestNodes: @pytest.mark.parametrize("model", MODELS) def test_node_add(self, session: Session, model: str): # given - options = NodeOptions(model=model) + options = CoreNode.create_options() + options.model = model # when node = session.add_node(CoreNode, options=options) @@ -60,6 +61,40 @@ class TestNodes: with pytest.raises(CoreError): session.get_node(node.id, CoreNode) + def test_node_add_iface(self, session: Session): + # given + node = session.add_node(CoreNode) + + # when + iface = node.create_iface() + + # then + assert iface.id in node.ifaces + + def test_node_get_iface(self, session: Session): + # given + node = session.add_node(CoreNode) + iface = node.create_iface() + assert iface.id in node.ifaces + + # when + iface2 = node.get_iface(iface.id) + + # then + assert iface == iface2 + + def test_node_delete_iface(self, session: Session): + # given + node = session.add_node(CoreNode) + iface = node.create_iface() + assert iface.id in node.ifaces + + # when + node.delete_iface(iface.id) + + # then + assert iface.id not in node.ifaces + @pytest.mark.parametrize( "mac,expected", [ @@ -70,12 +105,11 @@ class TestNodes: def test_node_set_mac(self, session: Session, mac: str, expected: str): # given node = session.add_node(CoreNode) - switch = session.add_node(SwitchNode) iface_data = InterfaceData() - iface = node.new_iface(switch, iface_data) + iface = node.create_iface(iface_data) # when - node.set_mac(iface.node_id, mac) + iface.set_mac(mac) # then assert str(iface.mac) == expected @@ -86,13 +120,12 @@ class TestNodes: def test_node_set_mac_exception(self, session: Session, mac: str): # given node = session.add_node(CoreNode) - switch = session.add_node(SwitchNode) iface_data = InterfaceData() - iface = node.new_iface(switch, iface_data) + iface = node.create_iface(iface_data) # when with pytest.raises(CoreError): - node.set_mac(iface.node_id, mac) + iface.set_mac(mac) @pytest.mark.parametrize( "ip,expected,is_ip6", @@ -106,12 +139,11 @@ class TestNodes: def test_node_add_ip(self, session: Session, ip: str, expected: str, is_ip6: bool): # given node = session.add_node(CoreNode) - switch = session.add_node(SwitchNode) iface_data = InterfaceData() - iface = node.new_iface(switch, iface_data) + iface = node.create_iface(iface_data) # when - node.add_ip(iface.node_id, ip) + iface.add_ip(ip) # then if is_ip6: @@ -122,14 +154,13 @@ class TestNodes: def test_node_add_ip_exception(self, session): # given node = session.add_node(CoreNode) - switch = session.add_node(SwitchNode) iface_data = InterfaceData() - iface = node.new_iface(switch, iface_data) + iface = node.create_iface(iface_data) ip = "256.168.0.1/24" # when with pytest.raises(CoreError): - node.add_ip(iface.node_id, ip) + iface.add_ip(ip) @pytest.mark.parametrize("net_type", NET_TYPES) def test_net(self, session, net_type): diff --git a/daemon/tests/test_services.py b/daemon/tests/test_services.py index bbccaaac..69234e3a 100644 --- a/daemon/tests/test_services.py +++ b/daemon/tests/test_services.py @@ -53,7 +53,7 @@ class TestServices: total_service = len(node.services) # when - session.services.add_services(node, node.type, [SERVICE_ONE, SERVICE_TWO]) + session.services.add_services(node, node.model, [SERVICE_ONE, SERVICE_TWO]) # then assert node.services diff --git a/daemon/tests/test_xml.py b/daemon/tests/test_xml.py index 653e77f6..6841da8e 100644 --- a/daemon/tests/test_xml.py +++ b/daemon/tests/test_xml.py @@ -4,13 +4,13 @@ from xml.etree import ElementTree import pytest -from core.emulator.data import IpPrefixes, LinkOptions, NodeOptions +from core.emulator.data import IpPrefixes, LinkOptions from core.emulator.enumerations import EventTypes from core.emulator.session import Session from core.errors import CoreError from core.location.mobility import BasicRangeModel from core.nodes.base import CoreNode -from core.nodes.network import PtpNet, SwitchNode, WlanNode +from core.nodes.network import SwitchNode, WlanNode from core.services.utility import SshService @@ -65,25 +65,18 @@ class TestXml: :param tmpdir: tmpdir to create data in :param ip_prefixes: generates ip addresses for nodes """ - # create ptp - ptp_node = session.add_node(PtpNet) - # create nodes node1 = session.add_node(CoreNode) node2 = session.add_node(CoreNode) - # link nodes to ptp net - for node in [node1, node2]: - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, ptp_node.id, iface1_data=iface_data) + # link nodes + iface1_data = ip_prefixes.create_iface(node1) + iface2_data = ip_prefixes.create_iface(node2) + session.add_link(node1.id, node2.id, iface1_data, iface2_data) # instantiate session session.instantiate() - # get ids for nodes - node1_id = node1.id - node2_id = node2.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -98,16 +91,19 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) + assert not session.get_node(node1.id, CoreNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, CoreNode) + assert not session.get_node(node2.id, CoreNode) + # verify no links are known + assert len(session.link_manager.links()) == 0 # load saved xml session.open_xml(file_path, start=True) # verify nodes have been recreated - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, CoreNode) + assert session.get_node(node1.id, CoreNode) + assert session.get_node(node2.id, CoreNode) + assert len(session.link_manager.links()) == 1 def test_xml_ptp_services( self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes @@ -119,18 +115,14 @@ class TestXml: :param tmpdir: tmpdir to create data in :param ip_prefixes: generates ip addresses for nodes """ - # create ptp - ptp_node = session.add_node(PtpNet) - # create nodes - options = NodeOptions(model="host") - node1 = session.add_node(CoreNode, options=options) + node1 = session.add_node(CoreNode) node2 = session.add_node(CoreNode) # link nodes to ptp net - for node in [node1, node2]: - iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, ptp_node.id, iface1_data=iface_data) + iface1_data = ip_prefixes.create_iface(node1) + iface2_data = ip_prefixes.create_iface(node2) + session.add_link(node1.id, node2.id, iface1_data, iface2_data) # set custom values for node service session.services.set_service(node1.id, SshService.name) @@ -143,10 +135,6 @@ class TestXml: # instantiate session session.instantiate() - # get ids for nodes - node1_id = node1.id - node2_id = node2.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -161,9 +149,9 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) + assert not session.get_node(node1.id, CoreNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, CoreNode) + assert not session.get_node(node2.id, CoreNode) # load saved xml session.open_xml(file_path, start=True) @@ -172,8 +160,8 @@ class TestXml: service = session.services.get_service(node1.id, SshService.name) # verify nodes have been recreated - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, CoreNode) + assert session.get_node(node1.id, CoreNode) + assert session.get_node(node2.id, CoreNode) assert service.config_data.get(service_file) == file_data def test_xml_mobility( @@ -187,28 +175,23 @@ class TestXml: :param ip_prefixes: generates ip addresses for nodes """ # create wlan - wlan_node = session.add_node(WlanNode) - session.mobility.set_model(wlan_node, BasicRangeModel, {"test": "1"}) + wlan = session.add_node(WlanNode) + session.mobility.set_model(wlan, BasicRangeModel, {"test": "1"}) # create nodes - options = NodeOptions(model="mdr") - options.set_position(0, 0) + options = CoreNode.create_options() + options.model = "mdr" node1 = session.add_node(CoreNode, options=options) node2 = session.add_node(CoreNode, options=options) # link nodes for node in [node1, node2]: iface_data = ip_prefixes.create_iface(node) - session.add_link(node.id, wlan_node.id, iface1_data=iface_data) + session.add_link(node.id, wlan.id, iface1_data=iface_data) # instantiate session session.instantiate() - # get ids for nodes - wlan_id = wlan_node.id - node1_id = node1.id - node2_id = node2.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -223,20 +206,20 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) + assert not session.get_node(node1.id, CoreNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, CoreNode) + assert not session.get_node(node2.id, CoreNode) # load saved xml session.open_xml(file_path, start=True) # retrieve configuration we set originally - value = str(session.mobility.get_config("test", wlan_id, BasicRangeModel.name)) + value = str(session.mobility.get_config("test", wlan.id, BasicRangeModel.name)) # verify nodes and configuration were restored - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, CoreNode) - assert session.get_node(wlan_id, WlanNode) + assert session.get_node(node1.id, CoreNode) + assert session.get_node(node2.id, CoreNode) + assert session.get_node(wlan.id, WlanNode) assert value == "1" def test_network_to_network(self, session: Session, tmpdir: TemporaryFile): @@ -256,10 +239,6 @@ class TestXml: # instantiate session session.instantiate() - # get ids for nodes - node1_id = switch1.id - node2_id = switch2.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -274,19 +253,19 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, SwitchNode) + assert not session.get_node(switch1.id, SwitchNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, SwitchNode) + assert not session.get_node(switch2.id, SwitchNode) # load saved xml session.open_xml(file_path, start=True) # verify nodes have been recreated - switch1 = session.get_node(node1_id, SwitchNode) - switch2 = session.get_node(node2_id, SwitchNode) + switch1 = session.get_node(switch1.id, SwitchNode) + switch2 = session.get_node(switch2.id, SwitchNode) assert switch1 assert switch2 - assert len(switch1.links() + switch2.links()) == 1 + assert len(session.link_manager.links()) == 1 def test_link_options( self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes @@ -316,10 +295,6 @@ class TestXml: # instantiate session session.instantiate() - # get ids for nodes - node1_id = node1.id - node2_id = switch.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -334,27 +309,25 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) + assert not session.get_node(node1.id, CoreNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, SwitchNode) + assert not session.get_node(switch.id, SwitchNode) # load saved xml session.open_xml(file_path, start=True) # verify nodes have been recreated - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, SwitchNode) - links = [] - for node_id in session.nodes: - node = session.nodes[node_id] - links += node.links() - link = links[0] - assert options.loss == link.options.loss - assert options.bandwidth == link.options.bandwidth - assert options.jitter == link.options.jitter - assert options.delay == link.options.delay - assert options.dup == link.options.dup - assert options.buffer == link.options.buffer + assert session.get_node(node1.id, CoreNode) + assert session.get_node(switch.id, SwitchNode) + assert len(session.link_manager.links()) == 1 + link = list(session.link_manager.links())[0] + link_options = link.options() + assert options.loss == link_options.loss + assert options.bandwidth == link_options.bandwidth + assert options.jitter == link_options.jitter + assert options.delay == link_options.delay + assert options.dup == link_options.dup + assert options.buffer == link_options.buffer def test_link_options_ptp( self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes @@ -385,10 +358,6 @@ class TestXml: # instantiate session session.instantiate() - # get ids for nodes - node1_id = node1.id - node2_id = node2.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -403,27 +372,25 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) + assert not session.get_node(node1.id, CoreNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, CoreNode) + assert not session.get_node(node2.id, CoreNode) # load saved xml session.open_xml(file_path, start=True) # verify nodes have been recreated - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, CoreNode) - links = [] - for node_id in session.nodes: - node = session.nodes[node_id] - links += node.links() - link = links[0] - assert options.loss == link.options.loss - assert options.bandwidth == link.options.bandwidth - assert options.jitter == link.options.jitter - assert options.delay == link.options.delay - assert options.dup == link.options.dup - assert options.buffer == link.options.buffer + assert session.get_node(node1.id, CoreNode) + assert session.get_node(node2.id, CoreNode) + assert len(session.link_manager.links()) == 1 + link = list(session.link_manager.links())[0] + link_options = link.options() + assert options.loss == link_options.loss + assert options.bandwidth == link_options.bandwidth + assert options.jitter == link_options.jitter + assert options.delay == link_options.delay + assert options.dup == link_options.dup + assert options.buffer == link_options.buffer def test_link_options_bidirectional( self, session: Session, tmpdir: TemporaryFile, ip_prefixes: IpPrefixes @@ -450,7 +417,9 @@ class TestXml: options1.dup = 5 options1.jitter = 5 options1.buffer = 50 - session.add_link(node1.id, node2.id, iface1_data, iface2_data, options1) + iface1, iface2 = session.add_link( + node1.id, node2.id, iface1_data, iface2_data, options1 + ) options2 = LinkOptions() options2.unidirectional = 1 options2.bandwidth = 10000 @@ -459,17 +428,11 @@ class TestXml: options2.dup = 10 options2.jitter = 10 options2.buffer = 100 - session.update_link( - node2.id, node1.id, iface2_data.id, iface1_data.id, options2 - ) + session.update_link(node2.id, node1.id, iface2.id, iface1.id, options2) # instantiate session session.instantiate() - # get ids for nodes - node1_id = node1.id - node2_id = node2.id - # save xml xml_file = tmpdir.join("session.xml") file_path = Path(xml_file.strpath) @@ -484,32 +447,26 @@ class TestXml: # verify nodes have been removed from session with pytest.raises(CoreError): - assert not session.get_node(node1_id, CoreNode) + assert not session.get_node(node1.id, CoreNode) with pytest.raises(CoreError): - assert not session.get_node(node2_id, CoreNode) + assert not session.get_node(node2.id, CoreNode) # load saved xml session.open_xml(file_path, start=True) # verify nodes have been recreated - assert session.get_node(node1_id, CoreNode) - assert session.get_node(node2_id, CoreNode) - links = [] - for node_id in session.nodes: - node = session.nodes[node_id] - links += node.links() - assert len(links) == 2 - link1 = links[0] - link2 = links[1] - assert options1.bandwidth == link1.options.bandwidth - assert options1.delay == link1.options.delay - assert options1.loss == link1.options.loss - assert options1.dup == link1.options.dup - assert options1.jitter == link1.options.jitter - assert options1.buffer == link1.options.buffer - assert options2.bandwidth == link2.options.bandwidth - assert options2.delay == link2.options.delay - assert options2.loss == link2.options.loss - assert options2.dup == link2.options.dup - assert options2.jitter == link2.options.jitter - assert options2.buffer == link2.options.buffer + assert session.get_node(node1.id, CoreNode) + assert session.get_node(node2.id, CoreNode) + assert len(session.link_manager.links()) == 1 + assert options1.bandwidth == iface1.options.bandwidth + assert options1.delay == iface1.options.delay + assert options1.loss == iface1.options.loss + assert options1.dup == iface1.options.dup + assert options1.jitter == iface1.options.jitter + assert options1.buffer == iface1.options.buffer + assert options2.bandwidth == iface2.options.bandwidth + assert options2.delay == iface2.options.delay + assert options2.loss == iface2.options.loss + assert options2.dup == iface2.options.dup + assert options2.jitter == iface2.options.jitter + assert options2.buffer == iface2.options.buffer diff --git a/dockerfiles/Dockerfile.centos b/dockerfiles/Dockerfile.centos new file mode 100644 index 00000000..738f649b --- /dev/null +++ b/dockerfiles/Dockerfile.centos @@ -0,0 +1,61 @@ +# syntax=docker/dockerfile:1 +FROM centos:7 +LABEL Description="CORE Docker CentOS Image" + +# define variables +ARG PREFIX=/usr +ARG BRANCH=master + +# define environment +ENV DEBIAN_FRONTEND=noninteractive +ENV LANG en_US.UTF-8 + +# install basic dependencies +RUN yum -y update && \ + yum install -y git sudo wget tzdata unzip + +# install python3.9 +WORKDIR /opt +RUN wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz +RUN tar xf Python-3.9.15.tgz +RUN yum install -y make && yum-builddep -y python3 +RUN cd Python-3.9.15 && \ + ./configure --enable-optimizations --with-ensurepip=install && \ + make -j$(nproc) altinstall +RUN python3.9 -m pip install --upgrade pip + +# install core +WORKDIR /opt +RUN git clone https://github.com/coreemu/core +WORKDIR /opt/core +RUN git checkout ${BRANCH} +RUN PYTHON=/usr/local/bin/python3.9 ./setup.sh +RUN . /root/.bashrc && PYTHON=/usr/local/bin/python3.9 inv install -v -p ${PREFIX} --no-python +ENV PATH "$PATH:/opt/core/venv/bin" + +# install emane +RUN yum install -y libpcap-devel libpcre3-devel libxml2-devel protobuf-devel unzip uuid-devel +WORKDIR /opt +RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz && \ + tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz && \ + cd emane-1.3.3-release-1/rpms/el7/x86_64 && \ + yum install -y epel-release && \ + yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm && \ + cd ../../../.. && \ + rm emane-1.3.3-release-1.el7.x86_64.tar.gz && \ + rm -rf emane-1.3.3-release-1 +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protoc-3.7.1-linux-x86_64.zip && \ + mkdir protoc && \ + unzip protoc-3.7.1-linux-x86_64.zip -d protoc +RUN git clone https://github.com/adjacentlink/emane.git +RUN PATH=/opt/protoc/bin:$PATH && \ + cd emane && \ + git checkout v1.3.3 && \ + ./autogen.sh && \ + PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr && \ + cd src/python && \ + make && \ + /opt/core/venv/bin/python -m pip install . + +# run daemon +CMD ["core-daemon"] diff --git a/dockerfiles/Dockerfile.centos-package b/dockerfiles/Dockerfile.centos-package new file mode 100644 index 00000000..227720af --- /dev/null +++ b/dockerfiles/Dockerfile.centos-package @@ -0,0 +1,58 @@ +# syntax=docker/dockerfile:1 +FROM centos:7 +LABEL Description="CORE CentOS Image" + +# install basic dependencies +RUN yum update -y && yum install -y wget + +# install python3.9 +WORKDIR /opt +RUN wget https://www.python.org/ftp/python/3.9.15/Python-3.9.15.tgz +RUN tar xf Python-3.9.15.tgz +RUN yum install -y make && yum-builddep -y python3 +RUN cd Python-3.9.15 && \ + ./configure --enable-optimizations --with-ensurepip=install && \ + make -j$(nproc) altinstall +RUN python3.9 -m pip install --upgrade pip + +# install core +WORKDIR /opt/core +COPY core_*.rpm . +RUN PYTHON=/usr/local/bin/python3.9 yum install -y ./core_*.rpm +ENV PATH "$PATH:/opt/core/venv/bin" + +# install ospf mdr +RUN yum install -y automake gawk git libreadline-devel libtool pkg-config +WORKDIR /opt +RUN git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git +RUN cd ospf-mdr && \ + ./bootstrap.sh && \ + ./configure --disable-doc --enable-user=root --enable-group=root \ + --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ + --localstatedir=/var/run/quagga && \ + make -j$(nproc) && \ + make install + +# install emane +RUN yum install -y libpcap-devel libpcre3-devel libxml2-devel protobuf-devel unzip uuid-devel +WORKDIR /opt +RUN wget -q https://adjacentlink.com/downloads/emane/emane-1.3.3-release-1.el7.x86_64.tar.gz && \ + tar xf emane-1.3.3-release-1.el7.x86_64.tar.gz && \ + cd emane-1.3.3-release-1/rpms/el7/x86_64 && \ + yum install -y epel-release && \ + yum install -y ./openstatistic*.rpm ./emane*.rpm ./python3-emane_*.rpm && \ + cd ../../../.. && \ + rm emane-1.3.3-release-1.el7.x86_64.tar.gz && \ + rm -rf emane-1.3.3-release-1 +RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/protoc-3.7.1-linux-x86_64.zip && \ + mkdir protoc && \ + unzip protoc-3.7.1-linux-x86_64.zip -d protoc +RUN git clone https://github.com/adjacentlink/emane.git +RUN PATH=/opt/protoc/bin:$PATH && \ + cd emane && \ + git checkout v1.3.3 && \ + ./autogen.sh && \ + PYTHON=/opt/core/venv/bin/python ./configure --prefix=/usr && \ + cd src/python && \ + make && \ + /opt/core/venv/bin/python -m pip install . diff --git a/dockerfiles/Dockerfile.ubuntu b/dockerfiles/Dockerfile.ubuntu new file mode 100644 index 00000000..3e8ccaac --- /dev/null +++ b/dockerfiles/Dockerfile.ubuntu @@ -0,0 +1,37 @@ +# syntax=docker/dockerfile:1 +FROM ubuntu:22.04 +LABEL Description="CORE Docker Ubuntu Image" + +# define variables +ARG PREFIX=/usr/local +ARG BRANCH=master + +# define environment +ENV DEBIAN_FRONTEND=noninteractive + +# install basic dependencies +RUN apt-get update && \ + apt-get install -y git sudo wget tzdata + +# install core +WORKDIR /opt +RUN git clone https://github.com/coreemu/core +WORKDIR /opt/core +RUN git checkout ${BRANCH} +RUN NO_SYSTEM=1 ./setup.sh +RUN . /root/.bashrc && inv install -v -p ${PREFIX} +ENV PATH "$PATH:/opt/core/venv/bin" + +# install emane +RUN apt-get install -y libpcap-dev libpcre3-dev libprotobuf-dev libxml2-dev protobuf-compiler uuid-dev +WORKDIR /opt +RUN git clone https://github.com/adjacentlink/emane.git +RUN cd emane && \ + ./autogen.sh && \ + ./configure --prefix=/usr && \ + make -j$(nproc) && \ + make install +RUN /opt/core/venv/bin/python -m pip install emane/src/python + +# run daemon +CMD ["core-daemon"] diff --git a/dockerfiles/Dockerfile.ubuntu-package b/dockerfiles/Dockerfile.ubuntu-package new file mode 100644 index 00000000..92933e91 --- /dev/null +++ b/dockerfiles/Dockerfile.ubuntu-package @@ -0,0 +1,39 @@ +# syntax=docker/dockerfile:1 +FROM ubuntu:22.04 +LABEL Description="CORE Docker Ubuntu Image" + +# define environment +ENV DEBIAN_FRONTEND=noninteractive + +# install basic dependencies +RUN apt-get update && apt-get install -y python3 python3-tk python3-pip python3-venv +RUN python3 -m pip install --upgrade pip + +# install core +WORKDIR /opt/core +COPY core_*.deb . +RUN apt-get install -y ./core_*.deb +ENV PATH "$PATH:/opt/core/venv/bin" + +# install ospf mdr +RUN apt-get install -y automake gawk git libreadline-dev libtool pkg-config +WORKDIR /opt +RUN git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git +RUN cd ospf-mdr && \ + ./bootstrap.sh && \ + ./configure --disable-doc --enable-user=root --enable-group=root \ + --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ + --localstatedir=/var/run/quagga && \ + make -j$(nproc) && \ + make install + +# install emane +RUN apt-get install -y libpcap-dev libpcre3-dev libprotobuf-dev libxml2-dev protobuf-compiler uuid-dev +WORKDIR /opt +RUN git clone https://github.com/adjacentlink/emane.git +RUN cd emane && \ + ./autogen.sh && \ + ./configure --prefix=/usr && \ + make -j$(nproc) && \ + make install +RUN /opt/core/venv/bin/python -m pip install emane/src/python diff --git a/docs/architecture.md b/docs/architecture.md index ceaf7cc2..410b37ac 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -10,17 +10,14 @@ * Nodes are created using Linux namespaces * Links are created using Linux bridges and virtual ethernet peers * Packets sent over links are manipulated using traffic control - * Controlled via the CORE GUI - * Provides both a custom TLV API and gRPC API - * Python program that leverages a small C binary for node creation + * Provides gRPC API * core-gui - * GUI and daemon communicate over the custom TLV API + * GUI and daemon communicate over gRPC API * Drag and drop creation for nodes and links * Can launch terminals for emulated nodes in running sessions * Can save/open scenario files to recreate previous sessions - * TCL/TK program -* coresendmsg - * Command line utility for sending TLV API messages to the core-daemon +* vnoded + * Command line utility for creating CORE node namespaces * vcmd * Command line utility for sending shell commands to nodes @@ -55,21 +52,10 @@ Nftables provides Ethernet frame filtering on Linux bridges. Wireless networks a emulated by controlling which interfaces can send and receive with nftables rules. -## Prior Work - -The Tcl/Tk CORE GUI was originally derived from the open source -[IMUNES](http://imunes.net) project from the University of Zagreb as a custom -project within Boeing Research and Technology's Network Technology research -group in 2004. Since then they have developed the CORE framework to use Linux -namespacing, have developed a Python framework, and made numerous user and -kernel-space developments, such as support for wireless networks, IPsec, -distribute emulation, simulation integration, and more. The IMUNES project -also consists of userspace and kernel components. - ## Open Source Project and Resources CORE has been released by Boeing to the open source community under the BSD license. If you find CORE useful for your work, please contribute back to the project. Contributions can be as simple as reporting a bug, dropping a line of -encouragement or technical suggestions to the mailing lists, or can also -include submitting patches or maintaining aspects of the tool. +encouragement, or can also include submitting patches or maintaining aspects +of the tool. diff --git a/docs/devguide.md b/docs/devguide.md index f8524da4..fe25e306 100644 --- a/docs/devguide.md +++ b/docs/devguide.md @@ -5,7 +5,7 @@ ## Repository Overview -The CORE source consists of several different programming languages for +The CORE source consists of several programming languages for historical reasons. Current development focuses on the Python modules and daemon. Here is a brief description of the source directories. @@ -13,7 +13,6 @@ daemon. Here is a brief description of the source directories. |-----------|--------------------------------------------------------------------------------------| | daemon | Python CORE daemon/gui code that handles receiving API calls and creating containers | | docs | Markdown Documentation currently hosted on GitHub | -| gui | Tcl/Tk GUI | | man | Template files for creating man pages for various CORE command line utilities | | netns | C program for creating CORE containers | @@ -58,7 +57,7 @@ sudo core-daemon # run python gui core-pygui -# run tcl gui +# run gui core-gui # run mocked unit tests diff --git a/docs/diagrams/architecture.plantuml b/docs/diagrams/architecture.plantuml index a43494d5..403886d9 100644 --- a/docs/diagrams/architecture.plantuml +++ b/docs/diagrams/architecture.plantuml @@ -1,5 +1,5 @@ @startuml -skinparam { +skinparam { RoundCorner 8 ComponentStyle uml2 ComponentBorderColor #Black @@ -9,7 +9,6 @@ skinparam { package User { component "core-gui" as gui #DeepSkyBlue - component "coresendmsg" #DeepSkyBlue component "python scripts" as scripts #DeepSkyBlue component vcmd #DeepSkyBlue } @@ -19,11 +18,11 @@ package Server { package Python { component core #LightSteelBlue } -package "Linux System" { +package "Linux System" { component nodes #SpringGreen [ nodes (linux namespaces) - ] + ] component links #SpringGreen [ links (bridging and traffic manipulation) @@ -31,19 +30,15 @@ package "Linux System" { } package API { - interface TLV as tlv interface gRPC as grpc } -gui <..> tlv -coresendmsg <..> tlv -scripts <..> tlv +gui <..> grpc scripts <..> grpc -tlv -- daemon grpc -- daemon scripts -- core daemon - core core <..> nodes core <..> links vcmd <..> nodes -@enduml \ No newline at end of file +@enduml diff --git a/docs/diagrams/workflow.plantuml b/docs/diagrams/workflow.plantuml index 9aa1c04f..cff943ad 100644 --- a/docs/diagrams/workflow.plantuml +++ b/docs/diagrams/workflow.plantuml @@ -1,11 +1,11 @@ @startuml -skinparam { +skinparam { RoundCorner 8 StateBorderColor #Black StateBackgroundColor #LightSteelBlue } -Definition: Session XML/IMN +Definition: Session XML Definition: GUI Drawing Definition: Scripts @@ -37,4 +37,4 @@ Configuration -> Instantiation Instantiation -> Runtime Runtime -> Datacollect Datacollect -> Shutdown -@enduml \ No newline at end of file +@enduml diff --git a/docs/distributed.md b/docs/distributed.md index 2d46ac96..65429f03 100644 --- a/docs/distributed.md +++ b/docs/distributed.md @@ -124,7 +124,7 @@ connect_kwargs: {"key_filename": "/home/user/.ssh/core"} Within the core-gui navigate to menu option: -**Session -> Emulation servers...** +**Session -> Servers...** Within the dialog box presented, add or modify an existing server if present to use the name, address, and port for the a server you plan to use. @@ -132,12 +132,6 @@ to use the name, address, and port for the a server you plan to use. Server configurations are loaded and written to in a configuration file for the GUI. -**~/.core/servers.conf** -```conf -# name address port -server2 192.168.0.2 4038 -``` - ## Assigning Nodes The user needs to assign nodes to emulation servers in the scenario. Making no diff --git a/docs/emane.md b/docs/emane.md index d4f4e42c..dfa30897 100644 --- a/docs/emane.md +++ b/docs/emane.md @@ -80,7 +80,7 @@ EMANE. An example emane section from the **core.conf** file is shown below: emane_platform_port = 8101 emane_transform_port = 8201 emane_event_monitor = False -#emane_models_dir = /home/username/.core/myemane +#emane_models_dir = /home//.coregui/custom_emane # EMANE log level range [0,4] default: 2 emane_log_level = 2 emane_realtime = True @@ -242,7 +242,7 @@ directory to find the generated EMANE xml files. One easy way to view this information is by double-clicking one of the virtual nodes and listing the files in the shell. -![](static/single-pc-emane.png) +![](static/emane-single-pc.png) ## Distributed EMANE @@ -277,7 +277,7 @@ it will be emulated locally. Using the EMANE node configuration dialog. You can change the EMANE model being used, along with changing any configuration setting from their defaults. -![](static/distributed-emane-configuration.png) +![](static/emane-configuration.png) > **NOTE:** Here is a quick checklist for distributed emulation with EMANE. @@ -304,5 +304,3 @@ Double-clicking on a node during runtime will cause the GUI to attempt to SSH to the emulation server for that node and run an interactive shell. The public key SSH configuration should be tested with all emulation servers prior to starting the emulation. - -![](static/distributed-emane-network.png) diff --git a/docs/emane/antenna.md b/docs/emane/antenna.md index 20c98304..c8a86eaa 100644 --- a/docs/emane/antenna.md +++ b/docs/emane/antenna.md @@ -335,7 +335,7 @@ Create `/tmp/emane/blockageaft.xml` with the following contents. ## Run Demo 1. Select `Open...` within the GUI 1. Load `emane-demo-antenna.xml` -1. Click ![Start Button](../static/gui/start.gif) +1. Click ![Start Button](../static/gui/start.png) 1. After startup completes, double click n1 to bring up the nodes terminal ## Example Demo diff --git a/docs/emane/eel.md b/docs/emane/eel.md index 0f41c357..ca094542 100644 --- a/docs/emane/eel.md +++ b/docs/emane/eel.md @@ -11,7 +11,7 @@ for more specifics. ## Run Demo 1. Select `Open...` within the GUI 1. Load `emane-demo-eel.xml` -1. Click ![Start Button](../static/gui/start.gif) +1. Click ![Start Button](../static/gui/start.png) 1. After startup completes, double click n1 to bring up the nodes terminal ## Example Demo diff --git a/docs/emane/files.md b/docs/emane/files.md index 62729ac8..c9bc35e8 100644 --- a/docs/emane/files.md +++ b/docs/emane/files.md @@ -12,7 +12,7 @@ may provide more helpful details. ## Run Demo 1. Select `Open...` within the GUI 1. Load `emane-demo-files.xml` -1. Click ![Start Button](../static/gui/start.gif) +1. Click ![Start Button](../static/gui/start.png) 1. After startup completes, double click n1 to bring up the nodes terminal ## Example Demo @@ -21,14 +21,14 @@ case we are running the RF Pipe model. ### Generated Files -|Name|Description| -|---|---| -|\-platform.xml|configuration file for the emulator instances| -|\-nem.xml|configuration for creating a NEM| -|\-mac.xml|configuration for defining a NEMs MAC layer| -|\-phy.xml|configuration for defining a NEMs PHY layer| -|\-trans-virtual.xml|configuration when a virtual transport is being used| -|\-trans.xml|configuration when a raw transport is being used| +| Name | Description | +|-------------------------------------|------------------------------------------------------| +| \-platform.xml | configuration file for the emulator instances | +| \-nem.xml | configuration for creating a NEM | +| \-mac.xml | configuration for defining a NEMs MAC layer | +| \-phy.xml | configuration for defining a NEMs PHY layer | +| \-trans-virtual.xml | configuration when a virtual transport is being used | +| \-trans.xml | configuration when a raw transport is being used | ### Listing File Below are the files within n1 after starting the demo session. diff --git a/docs/emane/gpsd.md b/docs/emane/gpsd.md index 06c44198..f20cc8fe 100644 --- a/docs/emane/gpsd.md +++ b/docs/emane/gpsd.md @@ -13,7 +13,7 @@ may provide more helpful details. ## Run Demo 1. Select `Open...` within the GUI 1. Load `emane-demo-gpsd.xml` -1. Click ![Start Button](../static/gui/start.gif) +1. Click ![Start Button](../static/gui/start.png) 1. After startup completes, double click n1 to bring up the nodes terminal ## Example Demo diff --git a/docs/emane/precomputed.md b/docs/emane/precomputed.md index f8064c97..53da75eb 100644 --- a/docs/emane/precomputed.md +++ b/docs/emane/precomputed.md @@ -11,7 +11,7 @@ for more specifics. ## Run Demo 1. Select `Open...` within the GUI 1. Load `emane-demo-precomputed.xml` -1. Click ![Start Button](../static/gui/start.gif) +1. Click ![Start Button](../static/gui/start.png) 1. After startup completes, double click n1 to bring up the nodes terminal ## Example Demo diff --git a/docs/gui.md b/docs/gui.md index d59f30c5..9914e30a 100644 --- a/docs/gui.md +++ b/docs/gui.md @@ -1,12 +1,10 @@ -# Using the CORE GUI +# CORE GUI * Table of Contents {:toc} -The following image shows the CORE GUI: -![](static/core_screenshot.png) - +![](static/core-gui.png) ## Overview @@ -28,45 +26,54 @@ Beyond installing CORE, you must have the CORE daemon running. This is done on the command line with either systemd or sysv. ```shell -# systemd +# systemd service sudo systemctl daemon-reload sudo systemctl start core-daemon -# sysv -sudo service core-daemon start -``` - -You can also invoke the daemon directly from the command line, which can be -useful if you'd like to see the logging output directly. - -```shell # direct invocation sudo core-daemon ``` +## GUI Files + +The GUI will create a directory in your home directory on first run called +~/.coregui. This directory will help layout various files that the GUI may use. + +* .coregui/ + * backgrounds/ + * place backgrounds used for display in the GUI + * custom_emane/ + * place to keep custom emane models to use with the core-daemon + * custom_services/ + * place to keep custom services to use with the core-daemon + * icons/ + * icons the GUI uses along with customs icons desired + * mobility/ + * place to keep custom mobility files + * scripts/ + * place to keep core related scripts + * xmls/ + * place to keep saved session xml files + * gui.log + * log file when running the gui, look here when issues occur for exceptions etc + * config.yaml + * configuration file used to save/load various gui related settings (custom nodes, layouts, addresses, etc) + ## Modes of Operation The CORE GUI has two primary modes of operation, **Edit** and **Execute** -modes. Running the GUI, by typing **core-gui** with no options, starts in -Edit mode. Nodes are drawn on a blank canvas using the toolbar on the left +modes. Running the GUI, by typing **core-pygui** with no options, starts in +Edit mode. Nodes are drawn on a blank canvas using the toolbar on the left and configured from right-click menus or by double-clicking them. The GUI does not need to be run as root. -Once editing is complete, pressing the green **Start** button (or choosing -**Execute** from the **Session** menu) instantiates the topology within the -Linux kernel and enters Execute mode. In execute mode, the user can interact -with the running emulated machines by double-clicking or right-clicking on -them. The editing toolbar disappears and is replaced by an execute toolbar, -which provides tools while running the emulation. Pressing the red **Stop** -button (or choosing **Terminate** from the **Session** menu) will destroy -the running emulation and return CORE to Edit mode. - -CORE can be started directly in Execute mode by specifying **--start** and a -topology file on the command line: - -```shell -core-gui --start ~/.core/configs/myfile.imn -``` +Once editing is complete, pressing the green **Start** button instantiates +the topology and enters Execute mode. In execute mode, +the user can interact with the running emulated machines by double-clicking or +right-clicking on them. The editing toolbar disappears and is replaced by an +execute toolbar, which provides tools while running the emulation. Pressing +the red **Stop** button will destroy the running emulation and return CORE +to Edit mode. Once the emulation is running, the GUI can be closed, and a prompt will appear asking if the emulation should be terminated. The emulation may be left @@ -74,11 +81,22 @@ running and the GUI can reconnect to an existing session at a later time. The GUI can be run as a normal user on Linux. -The GUI can be connected to a different address or TCP port using the -**--address** and/or **--port** options. The defaults are shown below. +The GUI currently provides the following options on startup. ```shell -core-gui --address 127.0.0.1 --port 4038 +usage: core-gui [-h] [-l {DEBUG,INFO,WARNING,ERROR,CRITICAL}] [-p] + [-s SESSION] [--create-dir] + +CORE Python GUI + +optional arguments: + -h, --help show this help message and exit + -l {DEBUG,INFO,WARNING,ERROR,CRITICAL}, --level {DEBUG,INFO,WARNING,ERROR,CRITICAL} + logging level + -p, --proxy enable proxy + -s SESSION, --session SESSION + session id to join + --create-dir create gui directory and exit ``` ## Toolbar @@ -95,22 +113,21 @@ sub-menus, which appear when you click on their group icon. | Icon | Name | Description | |----------------------------|----------------|----------------------------------------------------------------------------------------| -| ![](static/gui/select.gif) | Selection Tool | Tool for selecting, moving, configuring nodes. | -| ![](static/gui/start.gif) | Start Button | Starts Execute mode, instantiates the emulation. | -| ![](static/gui/link.gif) | Link | Allows network links to be drawn between two nodes by clicking and dragging the mouse. | +| ![](static/gui/select.png) | Selection Tool | Tool for selecting, moving, configuring nodes. | +| ![](static/gui/start.png) | Start Button | Starts Execute mode, instantiates the emulation. | +| ![](static/gui/link.png) | Link | Allows network links to be drawn between two nodes by clicking and dragging the mouse. | ### CORE Nodes These nodes will create a new node container and run associated services. -| Icon | Name | Description | -|-----------------------------------------|---------|------------------------------------------------------------------------------| -| ![](static/gui/router.gif) | Router | Runs Quagga OSPFv2 and OSPFv3 routing to forward packets. | -| ![](static/gui/host.gif) | Host | Emulated server machine having a default route, runs SSH server. | -| ![](static/gui/pc.gif) | PC | Basic emulated machine having a default route, runs no processes by default. | -| ![](static/gui/mdr.gif) | MDR | Runs Quagga OSPFv3 MDR routing for MANET-optimized routing. | -| ![](static/gui/router_green.gif) | PRouter | Physical router represents a real testbed machine. | -| ![](static/gui/document-properties.gif) | Edit | Bring up the custom node dialog. | +| Icon | Name | Description | +|----------------------------|---------|------------------------------------------------------------------------------| +| ![](static/gui/router.png) | Router | Runs Quagga OSPFv2 and OSPFv3 routing to forward packets. | +| ![](static/gui/host.png) | Host | Emulated server machine having a default route, runs SSH server. | +| ![](static/gui/pc.png) | PC | Basic emulated machine having a default route, runs no processes by default. | +| ![](static/gui/mdr.png) | MDR | Runs Quagga OSPFv3 MDR routing for MANET-optimized routing. | +| ![](static/gui/router.png) | PRouter | Physical router represents a real testbed machine. | ### Network Nodes @@ -119,20 +136,20 @@ purpose described below. | Icon | Name | Description | |-------------------------------|--------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ![](static/gui/hub.gif) | Hub | Ethernet hub forwards incoming packets to every connected node. | -| ![](static/gui/lanswitch.gif) | Switch | Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table. | -| ![](static/gui/wlan.gif) | Wireless LAN | When routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them. | -| ![](static/gui/rj45.gif) | RJ45 | RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation. | -| ![](static/gui/tunnel.gif) | Tunnel | Tool allows connecting together more than one CORE emulation using GRE tunnels. | +| ![](static/gui/hub.png) | Hub | Ethernet hub forwards incoming packets to every connected node. | +| ![](static/gui/lanswitch.png) | Switch | Ethernet switch intelligently forwards incoming packets to attached hosts using an Ethernet address hash table. | +| ![](static/gui/wlan.png) | Wireless LAN | When routers are connected to this WLAN node, they join a wireless network and an antenna is drawn instead of a connecting line; the WLAN node typically controls connectivity between attached wireless nodes based on the distance between them. | +| ![](static/gui/rj45.png) | RJ45 | RJ45 Physical Interface Tool, emulated nodes can be linked to real physical interfaces; using this tool, real networks and devices can be physically connected to the live-running emulation. | +| ![](static/gui/tunnel.png) | Tunnel | Tool allows connecting together more than one CORE emulation using GRE tunnels. | ### Annotation Tools | Icon | Name | Description | |-------------------------------|-----------|---------------------------------------------------------------------| -| ![](static/gui/marker.gif) | Marker | For drawing marks on the canvas. | -| ![](static/gui/oval.gif) | Oval | For drawing circles on the canvas that appear in the background. | -| ![](static/gui/rectangle.gif) | Rectangle | For drawing rectangles on the canvas that appear in the background. | -| ![](static/gui/text.gif) | Text | For placing text captions on the canvas. | +| ![](static/gui/marker.png) | Marker | For drawing marks on the canvas. | +| ![](static/gui/oval.png) | Oval | For drawing circles on the canvas that appear in the background. | +| ![](static/gui/rectangle.png) | Rectangle | For drawing rectangles on the canvas that appear in the background. | +| ![](static/gui/text.png) | Text | For placing text captions on the canvas. | ### Execution Toolbar @@ -140,14 +157,12 @@ When the Start button is pressed, CORE switches to Execute mode, and the Edit toolbar on the left of the CORE window is replaced with the Execution toolbar Below are the items on this toolbar, starting from the top. -| Icon | Name | Description | -|-----------------------------|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ![](static/gui/select.gif) | Selection Tool | In Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node. | -| ![](static/gui/stop.gif) | Stop Button | Stops Execute mode, terminates the emulation, returns CORE to edit mode. | -| ![](static/gui/observe.gif) | Observer Widgets Tool | Clicking on this magnifying glass icon invokes a menu for easily selecting an Observer Widget. The icon has a darker gray background when an Observer Widget is active, during which time moving the mouse over a node will pop up an information display for that node. | -| ![](static/gui/marker.gif) | Marker | For drawing freehand lines on the canvas, useful during demonstrations; markings are not saved. | -| ![](static/gui/twonode.gif) | Two-node Tool | Click to choose a starting and ending node, and run a one-time *traceroute* between those nodes or a continuous *ping -R* between nodes. The output is displayed in real time in a results box, while the IP addresses are parsed and the complete network path is highlighted on the CORE display. | -| ![](static/gui/run.gif) | Run Tool | This tool allows easily running a command on all or a subset of all nodes. A list box allows selecting any of the nodes. A text entry box allows entering any command. The command should return immediately, otherwise the display will block awaiting response. The *ping* command, for example, with no parameters, is not a good idea. The result of each command is displayed in a results box. The first occurrence of the special text "NODE" will be replaced with the node name. The command will not be attempted to run on nodes that are not routers, PCs, or hosts, even if they are selected. | +| Icon | Name | Description | +|----------------------------|----------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ![](static/gui/stop.png) | Stop Button | Stops Execute mode, terminates the emulation, returns CORE to edit mode. | +| ![](static/gui/select.png) | Selection Tool | In Execute mode, the Selection Tool can be used for moving nodes around the canvas, and double-clicking on a node will open a shell window for that node; right-clicking on a node invokes a pop-up menu of run-time options for that node. | +| ![](static/gui/marker.png) | Marker | For drawing freehand lines on the canvas, useful during demonstrations; markings are not saved. | +| ![](static/gui/run.png) | Run Tool | This tool allows easily running a command on all or a subset of all nodes. A list box allows selecting any of the nodes. A text entry box allows entering any command. The command should return immediately, otherwise the display will block awaiting response. The *ping* command, for example, with no parameters, is not a good idea. The result of each command is displayed in a results box. The first occurrence of the special text "NODE" will be replaced with the node name. The command will not be attempted to run on nodes that are not routers, PCs, or hosts, even if they are selected. | ## Menu @@ -157,98 +172,61 @@ menu, by clicking the dashed line at the top. ### File Menu -The File menu contains options for manipulating the **.imn** Configuration -Files. Generally, these menu items should not be used in Execute mode. +The File menu contains options for saving and opening saved sessions. -| Option | Description | -|------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| New | This starts a new file with an empty canvas. | -| Open | Invokes the File Open dialog box for selecting a new **.imn** or XML file to open. You can change the default path used for this dialog in the Preferences Dialog. | -| Save | Saves the current topology. If you have not yet specified a file name, the Save As dialog box is invoked. | -| Save As XML | Invokes the Save As dialog box for selecting a new **.xml** file for saving the current configuration in the XML file. | -| Save As imn | Invokes the Save As dialog box for selecting a new **.imn** topology file for saving the current configuration. Files are saved in the *IMUNES network configuration* file. | -| Export Python script | Prints Python snippets to the console, for inclusion in a CORE Python script. | -| Execute XML or Python script | Invokes a File Open dialog box for selecting an XML file to run or a Python script to run and automatically connect to. If a Python script, the script must create a new CORE Session and add this session to the daemon's list of sessions in order for this to work. | -| Execute Python script with options | Invokes a File Open dialog box for selecting a Python script to run and automatically connect to. After a selection is made, a Python Script Options dialog box is invoked to allow for command-line options to be added. The Python script must create a new CORE Session and add this session to the daemon's list of sessions in order for this to work. | -| Open current file in editor | This opens the current topology file in the **vim** text editor. First you need to save the file. Once the file has been edited with a text editor, you will need to reload the file to see your changes. The text editor can be changed from the Preferences Dialog. | -| Print | This uses the Tcl/Tk postscript command to print the current canvas to a printer. A dialog is invoked where you can specify a printing command, the default being **lpr**. The postscript output is piped to the print command. | -| Save screenshot | Saves the current canvas as a postscript graphic file. | -| Recently used files | Above the Quit menu command is a list of recently use files, if any have been opened. You can clear this list in the Preferences dialog box. You can specify the number of files to keep in this list from the Preferences dialog. Click on one of the file names listed to open that configuration file. | -| Quit | The Quit command should be used to exit the CORE GUI. CORE may prompt for termination if you are currently in Execute mode. Preferences and the recently-used files list are saved. | +| Option | Description | +|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| New Session | This starts a new session with an empty canvas. | +| Save | Saves the current topology. If you have not yet specified a file name, the Save As dialog box is invoked. | +| Save As | Invokes the Save As dialog box for selecting a new **.xml** file for saving the current configuration in the XML file. | +| Open | Invokes the File Open dialog box for selecting a new XML file to open. | +| Recently used files | Above the Quit menu command is a list of recently use files, if any have been opened. You can clear this list in the Preferences dialog box. You can specify the number of files to keep in this list from the Preferences dialog. Click on one of the file names listed to open that configuration file. | +| Execute Python Script | Invokes a File Open dialog box for selecting a Python script to run and automatically connect to. After a selection is made, a Python Script Options dialog box is invoked to allow for command-line options to be added. The Python script must create a new CORE Session and add this session to the daemon's list of sessions in order for this to work. | +| Quit | The Quit command should be used to exit the CORE GUI. CORE may prompt for termination if you are currently in Execute mode. Preferences and the recently-used files list are saved. | ### Edit Menu -| Option | Description | -|------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Undo | Attempts to undo the last edit in edit mode. | -| Redo | Attempts to redo an edit that has been undone. | -| Cut, Copy, Paste | Used to cut, copy, and paste a selection. When nodes are pasted, their node numbers are automatically incremented, and existing links are preserved with new IP addresses assigned. Services and their customizations are copied to the new node, but care should be taken as node IP addresses have changed with possibly old addresses remaining in any custom service configurations. Annotations may also be copied and pasted. | -| Select All | Selects all items on the canvas. Selected items can be moved as a group. | -| Select Adjacent | Select all nodes that are linked to the already selected node(s). For wireless nodes this simply selects the WLAN node(s) that the wireless node belongs to. You can use this by clicking on a node and pressing CTRL+N to select the adjacent nodes. | -| Find... | Invokes the *Find* dialog box. The Find dialog can be used to search for nodes by name or number. Results are listed in a table that includes the node or link location and details such as IP addresses or link parameters. Clicking on a result will focus the canvas on that node or link, switching canvases if necessary. | -| Clear marker | Clears any annotations drawn with the marker tool. Also clears any markings used to indicate a node's status. | -| Preferences... | Invokes the Preferences dialog box. | +| Option | Description | +|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Preferences | Invokes the Preferences dialog box. | +| Custom Nodes | Custom node creation dialog box. | +| Undo | (Disabled) Attempts to undo the last edit in edit mode. | +| Redo | (Disabled) Attempts to redo an edit that has been undone. | +| Cut, Copy, Paste, Delete | Used to cut, copy, paste, and delete a selection. When nodes are pasted, their node numbers are automatically incremented, and existing links are preserved with new IP addresses assigned. Services and their customizations are copied to the new node, but care should be taken as node IP addresses have changed with possibly old addresses remaining in any custom service configurations. Annotations may also be copied and pasted. | ### Canvas Menu -The canvas menu provides commands for adding, removing, changing, and switching -to different editing canvases. +The canvas menu provides commands related to the editing canvas. -| Option | Description | -|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| New | Creates a new empty canvas at the right of all existing canvases. | -| Manage... | Invokes the *Manage Canvases* dialog box, where canvases may be renamed and reordered, and you can easily switch to one of the canvases by selecting it. | -| Delete | Deletes the current canvas and all items that it contains. | -| Size/scale... | Invokes a Canvas Size and Scale dialog that allows configuring the canvas size, scale, and geographic reference point. The size controls allow changing the width and height of the current canvas, in pixels or meters. The scale allows specifying how many meters are equivalent to 100 pixels. The reference point controls specify the latitude, longitude, and altitude reference point used to convert between geographic and Cartesian coordinate systems. By clicking the *Save as default* option, all new canvases will be created with these properties. The default canvas size can also be changed in the Preferences dialog box. | -| Wallpaper... | Used for setting the canvas background image. | -| Previous, Next, First, Last | Used for switching the active canvas to the first, last, or adjacent canvas. | +| Option | Description | +|------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Size/scale | Invokes a Canvas Size and Scale dialog that allows configuring the canvas size, scale, and geographic reference point. The size controls allow changing the width and height of the current canvas, in pixels or meters. The scale allows specifying how many meters are equivalent to 100 pixels. The reference point controls specify the latitude, longitude, and altitude reference point used to convert between geographic and Cartesian coordinate systems. By clicking the *Save as default* option, all new canvases will be created with these properties. The default canvas size can also be changed in the Preferences dialog box. | +| Wallpaper | Used for setting the canvas background image. | ### View Menu -The View menu features items for controlling what is displayed on the drawing -canvas. +The View menu features items for toggling on and off their display on the canvas. -| Option | Description | -|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Show | Opens a submenu of items that can be displayed or hidden, such as interface names, addresses, and labels. Use these options to help declutter the display. These options are generally saved in the topology files, so scenarios have a more consistent look when copied from one computer to another. | -| Show hidden nodes | Reveal nodes that have been hidden. Nodes are hidden by selecting one or more nodes, right-clicking one and choosing *hide*. | -| Locked | Toggles locked view; when the view is locked, nodes cannot be moved around on the canvas with the mouse. This could be useful when sharing the topology with someone and you do not expect them to change things. | -| 3D GUI... | Launches a 3D GUI by running the command defined under Preferences, *3D GUI command*. This is typically a script that runs the SDT3D display. SDT is the Scripted Display Tool from NRL that is based on NASA's Java-based WorldWind virtual globe software. | -| Zoom In | Magnifies the display. You can also zoom in by clicking *zoom 100%* label in the status bar, or by pressing the **+** (plus) key. | -| Zoom Out | Reduces the size of the display. You can also zoom out by right-clicking *zoom 100%* label in the status bar or by pressing the **-** (minus) key. | +| Option | Description | +|-----------------|-----------------------------------| +| Interface Names | Display interface names on links. | +| IPv4 Addresses | Display IPv4 addresses on links. | +| IPv6 Addresses | Display IPv6 addresses on links. | +| Node Labels | Display node names. | +| Link Labels | Display link labels. | +| Annotations | Display annotations. | +| Canvas Grid | Display the canvas grid. | ### Tools Menu The tools menu lists different utility functions. -| Option | Description | -|------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Autorearrange all | Automatically arranges all nodes on the canvas. Nodes having a greater number of links are moved to the center. This mode can continue to run while placing nodes. To turn off this autorearrange mode, click on a blank area of the canvas with the select tool, or choose this menu option again. | -| Autorearrange selected | Automatically arranges the selected nodes on the canvas. | -| Align to grid | Moves nodes into a grid formation, starting with the smallest-numbered node in the upper-left corner of the canvas, arranging nodes in vertical columns. | -| Traffic... | Invokes the CORE Traffic Flows dialog box, which allows configuring, starting, and stopping MGEN traffic flows for the emulation. | -| IP addresses... | Invokes the IP Addresses dialog box for configuring which IPv4/IPv6 prefixes are used when automatically addressing new interfaces. | -| MAC addresses... | Invokes the MAC Addresses dialog box for configuring the starting number used as the lowest byte when generating each interface MAC address. This value should be changed when tunneling between CORE emulations to prevent MAC address conflicts. | -| Build hosts file... | Invokes the Build hosts File dialog box for generating **/etc/hosts** file entries based on IP addresses used in the emulation. | -| Renumber nodes... | Invokes the Renumber Nodes dialog box, which allows swapping one node number with another in a few clicks. | -| Experimental... | Menu of experimental options, such as a tool to convert ns-2 scripts to IMUNES imn topologies, supporting only basic ns-2 functionality, and a tool for automatically dividing up a topology into partitions. | -| Topology generator | Opens a submenu of topologies to generate. You can first select the type of node that the topology should consist of, or routers will be chosen by default. Nodes may be randomly placed, aligned in grids, or various other topology patterns. All of the supported patterns are listed in the table below. | -| Debugger... | Opens the CORE Debugger window for executing arbitrary Tcl/Tk commands. | - -#### Topology Generator - -| Pattern | Description | -|----------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Random | Nodes are randomly placed about the canvas, but are not linked together. This can be used in conjunction with a WLAN node to quickly create a wireless network. | -| Grid | Nodes are placed in horizontal rows starting in the upper-left corner, evenly spaced to the right; nodes are not linked to each other. | -| Connected Grid | Nodes are placed in an N x M (width and height) rectangular grid, and each node is linked to the node above, below, left and right of itself. | -| Chain | Nodes are linked together one after the other in a chain. | -| Star | One node is placed in the center with N nodes surrounding it in a circular pattern, with each node linked to the center node. | -| Cycle | Nodes are arranged in a circular pattern with every node connected to its neighbor to form a closed circular path. | -| Wheel | The wheel pattern links nodes in a combination of both Star and Cycle patterns. | -| Cube | Generate a cube graph of nodes. | -| Clique | Creates a clique graph of nodes, where every node is connected to every other node. | -| Bipartite | Creates a bipartite graph of nodes, having two disjoint sets of vertices. | +| Option | Description | +|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Find | Display find dialog used for highlighting a node on the canvas. | +| Auto Grid | Automatically layout nodes in a grid. | +| IP addresses | Invokes the IP Addresses dialog box for configuring which IPv4/IPv6 prefixes are used when automatically addressing new interfaces. | +| MAC addresses | Invokes the MAC Addresses dialog box for configuring the starting number used as the lowest byte when generating each interface MAC address. This value should be changed when tunneling between CORE emulations to prevent MAC address conflicts. | ### Widgets Menu @@ -275,30 +253,29 @@ Here are some standard widgets: Only half of the line is drawn because each router may be in a different adjacency state with respect to the other. * **Throughput** - displays the kilobits-per-second throughput above each link, - using statistics gathered from the ng_pipe Netgraph node that implements each - link. If the throughput exceeds a certain threshold, the link will become - highlighted. For wireless nodes which broadcast data to all nodes in range, - the throughput rate is displayed next to the node and the node will become - circled if the threshold is exceeded. + using statistics gathered from each link. If the throughput exceeds a certain + threshold, the link will become highlighted. For wireless nodes which broadcast + data to all nodes in range, the throughput rate is displayed next to the node and + the node will become circled if the threshold is exceeded. #### Observer Widgets -These Widgets are available from the *Observer Widgets* submenu of the -*Widgets* menu, and from the Widgets Tool on the toolbar. Only one Observer Widget may +These Widgets are available from the **Observer Widgets** submenu of the +**Widgets** menu, and from the Widgets Tool on the toolbar. Only one Observer Widget may be used at a time. Mouse over a node while the session is running to pop up an informational display about that node. Available Observer Widgets include IPv4 and IPv6 routing tables, socket information, list of running processes, and OSPFv2/v3 neighbor information. -Observer Widgets may be edited by the user and rearranged. Choosing *Edit...* -from the Observer Widget menu will invoke the Observer Widgets dialog. A list -of Observer Widgets is displayed along with up and down arrows for rearranging -the list. Controls are available for renaming each widget, for changing the -command that is run during mouse over, and for adding and deleting items from -the list. Note that specified commands should return immediately to avoid -delays in the GUI display. Changes are saved to a **widgets.conf** file in -the CORE configuration directory. +Observer Widgets may be edited by the user and rearranged. Choosing +**Widgets->Observer Widgets->Edit Observers** from the Observer Widget menu will +invoke the Observer Widgets dialog. A list of Observer Widgets is displayed along +with up and down arrows for rearranging the list. Controls are available for +renaming each widget, for changing the command that is run during mouse over, and +for adding and deleting items from the list. Note that specified commands should +return immediately to avoid delays in the GUI display. Changes are saved to a +**config.yaml** file in the CORE configuration directory. ### Session Menu @@ -306,28 +283,23 @@ The Session Menu has entries for starting, stopping, and managing sessions, in addition to global options such as node types, comments, hooks, servers, and options. -| Option | Description | -|----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Start or Stop | This starts or stops the emulation, performing the same function as the green Start or red Stop button. | -| Change sessions... | Invokes the CORE Sessions dialog box containing a list of active CORE sessions in the daemon. Basic session information such as name, node count, start time, and a thumbnail are displayed. This dialog allows connecting to different sessions, shutting them down, or starting a new session. | -| Node types... | Invokes the CORE Node Types dialog, performing the same function as the Edit button on the Network-Layer Nodes toolbar. | -| Comments... | Invokes the CORE Session Comments window where optional text comments may be specified. These comments are saved at the top of the configuration file, and can be useful for describing the topology or how to use the network. | -| Hooks... | Invokes the CORE Session Hooks window where scripts may be configured for a particular session state. The session states are defined in the [table](#session-states) below. The top of the window has a list of configured hooks, and buttons on the bottom left allow adding, editing, and removing hook scripts. The new or edit button will open a hook script editing window. A hook script is a shell script invoked on the host (not within a virtual node). | -| Reset node positions | If you have moved nodes around using the mouse or by using a mobility module, choosing this item will reset all nodes to their original position on the canvas. The node locations are remembered when you first press the Start button. | -| Emulation servers... | Invokes the CORE emulation servers dialog for configuring. | -| Change Sessions... | Invokes the Sessions dialog for switching between different running sessions. This dialog is presented during startup when one or more sessions are already running. | -| Options... | Presents per-session options, such as the IPv4 prefix to be used, if any, for a control network the ability to preserve the session directory; and an on/off switch for SDT3D support. | +| Option | Description | +|----------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Sessions | Invokes the CORE Sessions dialog box containing a list of active CORE sessions in the daemon. Basic session information such as name, node count, start time, and a thumbnail are displayed. This dialog allows connecting to different sessions, shutting them down, or starting a new session. | +| Servers | Invokes the CORE emulation servers dialog for configuring. | +| Options | Presents per-session options, such as the IPv4 prefix to be used, if any, for a control network the ability to preserve the session directory; and an on/off switch for SDT3D support. | +| Hooks | Invokes the CORE Session Hooks window where scripts may be configured for a particular session state. The session states are defined in the [table](#session-states) below. The top of the window has a list of configured hooks, and buttons on the bottom left allow adding, editing, and removing hook scripts. The new or edit button will open a hook script editing window. A hook script is a shell script invoked on the host (not within a virtual node). | #### Session States | State | Description | |---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| definition | Used by the GUI to tell the backend to clear any state. | -| configuration | When the user presses the *Start* button, node, link, and other configuration data is sent to the backend. This state is also reached when the user customizes a service. | -| instantiation | After configuration data has been sent, just before the nodes are created. | -| runtime | All nodes and networks have been built and are running. (This is the same state at which the previously-named *global experiment script* was run.) | -| datacollect | The user has pressed the *Stop* button, but before services have been stopped and nodes have been shut down. This is a good time to collect log files and other data from the nodes. | -| shutdown | All nodes and networks have been shut down and destroyed. | +| Definition | Used by the GUI to tell the backend to clear any state. | +| Configuration | When the user presses the *Start* button, node, link, and other configuration data is sent to the backend. This state is also reached when the user customizes a service. | +| Instantiation | After configuration data has been sent, just before the nodes are created. | +| Runtime | All nodes and networks have been built and are running. (This is the same state at which the previously-named *global experiment script* was run.) | +| Datacollect | The user has pressed the *Stop* button, but before services have been stopped and nodes have been shut down. This is a good time to collect log files and other data from the nodes. | +| Shutdown | All nodes and networks have been shut down and destroyed. | ### Help Menu @@ -341,13 +313,13 @@ and options. CORE's emulated networks run in real time, so they can be connected to live physical networks. The RJ45 tool and the Tunnel tool help with connecting to -the real world. These tools are available from the *Link-layer nodes* menu. +the real world. These tools are available from the **Link-layer nodes** menu. When connecting two or more CORE emulations together, MAC address collisions should be avoided. CORE automatically assigns MAC addresses to interfaces when the emulation is started, starting with **00:00:00:aa:00:00** and incrementing the bottom byte. The starting byte should be changed on the second CORE machine -using the *MAC addresses...* option from the *Tools* menu. +using the **Tools->MAC Addresses** option the menu. ### RJ45 Tool @@ -360,8 +332,8 @@ connection. When the physical interface is assigned to CORE, it may not be used for anything else. Another consideration is that the computer or network that you are connecting to must be co-located with the CORE machine. -To place an RJ45 connection, click on the *Link-layer nodes* toolbar and select -the *RJ45 Tool* from the submenu. Click on the canvas near the node you want to +To place an RJ45 connection, click on the **Link-layer nodes** toolbar and select +the **RJ45 Tool** from the submenu. Click on the canvas near the node you want to connect to. This could be a router, hub, switch, or WLAN, for example. Now click on the *Link Tool* and draw a link between the RJ45 and the other node. The RJ45 node will display "UNASSIGNED". Double-click the RJ45 node to assign a @@ -459,7 +431,7 @@ firewall is not blocking the GRE traffic. The host machine that runs the CORE GUI and/or daemon is not necessarily accessible from a node. Running an X11 application on a node, for example, requires some channel of communication for the application to connect with -the X server for graphical display. There are several different ways to +the X server for graphical display. There are different ways to connect from the node to the host and vice versa. #### Control Network @@ -476,12 +448,6 @@ the node, and SSH with X11 forwarding can be used from the host to the node. ssh -X 172.16.0.5 xclock ``` -Note that the **coresendmsg** utility can be used for a node to send -messages to the CORE daemon running on the host (if the **listenaddr = 0.0.0.0** -is set in the **/etc/core/core.conf** file) to interact with the running -emulation. For example, a node may move itself or other nodes, or change -its icon based on some node state. - #### Other Methods There are still other ways to connect a host with a node. The RJ45 Tool @@ -509,11 +475,11 @@ the node linked to the RJ45 may have the address **10.0.1.1**. ### Wired Networks -Wired networks are created using the *Link Tool* to draw a link between two +Wired networks are created using the **Link Tool** to draw a link between two nodes. This automatically draws a red line representing an Ethernet link and creates new interfaces on network-layer nodes. -Double-click on the link to invoke the *link configuration* dialog box. Here +Double-click on the link to invoke the **link configuration** dialog box. Here you can change the Bandwidth, Delay, Loss, and Duplicate rate parameters for that link. You can also modify the color and width of the link, affecting its display. @@ -529,32 +495,43 @@ The wireless LAN (WLAN) is covered in the next section. ### Wireless Networks -The wireless LAN node allows you to build wireless networks where moving nodes -around affects the connectivity between them. Connection between a pair of nodes is stronger -when the nodes are closer while connection is weaker when the nodes are further away. -The wireless LAN, or WLAN, node appears as a small cloud. The WLAN offers -several levels of wireless emulation fidelity, depending on your modeling needs. +Wireless networks allow moving nodes around to impact the connectivity between them. Connections between a +pair of nodes is stronger when the nodes are closer while connection is weaker when the nodes are further away. +CORE offers several levels of wireless emulation fidelity, depending on modeling needs and available +hardware. -The WLAN tool can be extended with plug-ins for different levels of wireless -fidelity. The basic on/off range is the default setting available on all -platforms. Other plug-ins offer higher fidelity at the expense of greater -complexity and CPU usage. The availability of certain plug-ins varies depending -on platform. See the table below for a brief overview of wireless model types. +* WLAN Node + * uses set bandwidth, delay, and loss + * links are enabled or disabled based on a set range + * uses the least CPU when moving, but nothing extra when not moving +* Wireless Node + * uses set bandwidth, delay, and initial loss + * loss dynamically changes based on distance between nodes, which can be configured with range parameters + * links are enabled or disabled based on a set range + * uses more CPU to calculate loss for every movement, but nothing extra when not moving +* EMANE Node + * uses a physical layer model to account for signal propagation, antenna profile effects and interference + sources in order to provide a realistic environment for wireless experimentation + * uses the most CPU for every packet, as complex calculations are used for fidelity + * See [Wiki](https://github.com/adjacentlink/emane/wiki) for details on general EMANE usage + * See [CORE EMANE](emane.md) for details on using EMANE in CORE -| Model | Type | Supported Platform(s) | Fidelity | Description | -|-------|---------|-----------------------|----------|-------------------------------------------------------------------------------| -| Basic | on/off | Linux | Low | Ethernet bridging with nftables | -| EMANE | Plug-in | Linux | High | TAP device connected to EMANE emulator with pluggable MAC and PHY radio types | +| Model | Type | Supported Platform(s) | Fidelity | Description | +|----------|--------|-----------------------|----------|-------------------------------------------------------------------------------| +| WLAN | On/Off | Linux | Low | Ethernet bridging with nftables | +| Wireless | On/Off | Linux | Medium | Ethernet bridging with nftables | +| EMANE | RF | Linux | High | TAP device connected to EMANE emulator with pluggable MAC and PHY radio types | + +#### Example WLAN Network Setup To quickly build a wireless network, you can first place several router nodes onto the canvas. If you have the Quagga MDR software installed, it is -recommended that you use the *mdr* node type for reduced routing overhead. Next -choose the *wireless LAN* from the *Link-layer nodes* submenu. First set the +recommended that you use the **mdr** node type for reduced routing overhead. Next +choose the **WLAN** from the **Link-layer nodes** submenu. First set the desired WLAN parameters by double-clicking the cloud icon. Then you can link -all of the routers by right-clicking on the WLAN and choosing *Link to all -routers*. +all selected right-clicking on the WLAN and choosing **Link to Selected**. Linking a router to the WLAN causes a small antenna to appear, but no red link line is drawn. Routers can have multiple wireless links and both wireless and @@ -564,28 +541,24 @@ enables OSPFv3 with MANET extensions. This is a Boeing-developed extension to Quagga's OSPFv3 that reduces flooding overhead and optimizes the flooding procedure for mobile ad-hoc (MANET) networks. -The default configuration of the WLAN is set to use the basic range model, -using the *Basic* tab in the WLAN configuration dialog. Having this model +The default configuration of the WLAN is set to use the basic range model. Having this model selected causes **core-daemon** to calculate the distance between nodes based on screen pixels. A numeric range in screen pixels is set for the wireless -network using the *Range* slider. When two wireless nodes are within range of +network using the **Range** slider. When two wireless nodes are within range of each other, a green line is drawn between them and they are linked. Two wireless nodes that are farther than the range pixels apart are not linked. During Execute mode, users may move wireless nodes around by clicking and dragging them, and wireless links will be dynamically made or broken. -The *EMANE* tab lists available EMANE models to use for wireless networking. -See the [EMANE](emane.md) chapter for details on using EMANE. - ### Mobility Scripting CORE has a few ways to script mobility. -| Option | Description | -|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| ns-2 script | The script specifies either absolute positions or waypoints with a velocity. Locations are given with Cartesian coordinates. | -| CORE API | An external entity can move nodes by sending CORE API Node messages with updated X,Y coordinates; the **coresendmsg** utility allows a shell script to generate these messages. | -| EMANE events | See [EMANE](emane.md) for details on using EMANE scripts to move nodes around. Location information is typically given as latitude, longitude, and altitude. | +| Option | Description | +|--------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------| +| ns-2 script | The script specifies either absolute positions or waypoints with a velocity. Locations are given with Cartesian coordinates. | +| gRPC API | An external entity can move nodes by leveraging the gRPC API | +| EMANE events | See [EMANE](emane.md) for details on using EMANE scripts to move nodes around. Location information is typically given as latitude, longitude, and altitude. | For the first method, you can create a mobility script using a text editor, or using a tool such as [BonnMotion](http://net.cs.uni-bonn.de/wg/cs/applications/bonnmotion/), and associate the script with one of the wireless @@ -604,7 +577,7 @@ bm NSFile -f sample When the Execute mode is started and one of the WLAN nodes has a mobility script, a mobility script window will appear. This window contains controls for starting, stopping, and resetting the running time for the mobility script. The -*loop* checkbox causes the script to play continuously. The *resolution* text +**loop** checkbox causes the script to play continuously. The **resolution** text box contains the number of milliseconds between each timer event; lower values cause the mobility to appear smoother but consumes greater CPU time. @@ -628,79 +601,28 @@ accurate. Examples mobility scripts (and their associated topology files) can be found in the **configs/** directory. -## Multiple Canvases +## Alerts -CORE supports multiple canvases for organizing emulated nodes. Nodes running on -different canvases may be linked together. +The alerts button is located in the bottom right-hand corner +of the status bar in the CORE GUI. This will change colors to indicate one or +more problems with the running emulation. Clicking on the alerts button will invoke the +alerts dialog. -To create a new canvas, choose *New* from the *Canvas* menu. A new canvas tab -appears in the bottom left corner. Clicking on a canvas tab switches to that -canvas. Double-click on one of the tabs to invoke the *Manage Canvases* dialog -box. Here, canvases may be renamed and reordered, and you can easily switch to -one of the canvases by selecting it. - -Each canvas maintains its own set of nodes and annotations. To link between -canvases, select a node and right-click on it, choose *Create link to*, choose -the target canvas from the list, and from that submenu the desired node. A -pseudo-link will be drawn, representing the link between the two nodes on -different canvases. Double-clicking on the label at the end of the arrow will -jump to the canvas that it links. - -## Check Emulation Light (CEL) - -The |cel| Check Emulation Light, or CEL, is located in the bottom right-hand corner -of the status bar in the CORE GUI. This is a yellow icon that indicates one or -more problems with the running emulation. Clicking on the CEL will invoke the -CEL dialog. - -The Check Emulation Light dialog contains a list of exceptions received from -the CORE daemon. An exception has a time, severity level, optional node number, -and source. When the CEL is blinking, this indicates one or more fatal -exceptions. An exception with a fatal severity level indicates that one or more +The alerts dialog contains a list of alerts received from +the CORE daemon. An alert has a time, severity level, optional node number, +and source. When the alerts button is red, this indicates one or more fatal +exceptions. An alert with a fatal severity level indicates that one or more of the basic pieces of emulation could not be created, such as failure to create a bridge or namespace, or the failure to launch EMANE processes for an EMANE-based network. -Clicking on an exception displays details for that -exception. If a node number is specified, that node is highlighted on the -canvas when the exception is selected. The exception source is a text string +Clicking on an alert displays details for that +exceptio. The exception source is a text string to help trace where the exception occurred; "service:UserDefined" for example, would appear for a failed validation command with the UserDefined service. -Buttons are available at the bottom of the dialog for clearing the exception -list and for viewing the CORE daemon and node log files. - -> **NOTE:** In batch mode, exceptions received from the CORE daemon are displayed on - the console. - -## Configuration Files - -Configurations are saved to **xml** or **.imn** topology files using -the *File* menu. You -can easily edit these files with a text editor. -Any time you edit the topology -file, you will need to stop the emulation if it were running and reload the -file. - -The **.imn** file format comes from IMUNES, and is -basically Tcl lists of nodes, links, etc. -Tabs and spacing in the topology files are important. The file starts by -listing every node, then links, annotations, canvases, and options. Each entity -has a block contained in braces. The first block is indented by four spaces. -Within the **network-config** block (and any *custom-*-config* block), the -indentation is one tab character. - -> **NOTE:** There are several topology examples included with CORE in - the **configs/** directory. - This directory can be found in **~/.core/configs**, or - installed to the filesystem - under **/usr[/local]/share/examples/configs**. - -> **NOTE:** When using the **.imn** file format, file paths for things like custom - icons may contain the special variables **$CORE_DATA_DIR** or **$CONFDIR** which - will be substituted with **/usr/share/core** or **~/.core/configs**. - -> **NOTE:** Feel free to edit the files directly using your favorite text editor. +A button is available at the bottom of the dialog for clearing the exception +list. ## Customizing your Topology's Look @@ -723,12 +645,3 @@ A background image for the canvas may be set using the *Wallpaper...* option from the *Canvas* menu. The image may be centered, tiled, or scaled to fit the canvas size. An existing terrain, map, or network diagram could be used as a background, for example, with CORE nodes drawn on top. - -## Preferences - -The *Preferences* Dialog can be accessed from the **Edit_Menu**. There are -numerous defaults that can be set with this dialog, which are stored in the -**~/.core/prefs.conf** preferences file. - - - diff --git a/docs/index.md b/docs/index.md index 2321fa54..e17a2e3b 100644 --- a/docs/index.md +++ b/docs/index.md @@ -23,8 +23,7 @@ networking scenarios, security studies, and increasing the size of physical test | [Installation](install.md) | How to install CORE and its requirements | | [Architecture](architecture.md) | Overview of the architecture | | [Node Types](nodetypes.md) | Overview of node types supported within CORE | -| [Python GUI](pygui.md) | How to use the default python based GUI | -| [Legacy GUI (deprecated)](gui.md) | How to use the deprecated Tcl based GUI | +| [GUI](gui.md) | How to use the GUI | | [Python API](python.md) | Covers how to control core directly using python | | [gRPC API](grpc.md) | Covers how control core using gRPC | | [Distributed](distributed.md) | Details for running CORE across multiple servers | @@ -34,17 +33,3 @@ networking scenarios, security studies, and increasing the size of physical test | [EMANE](emane.md) | Overview of EMANE integration and integrating custom EMANE models | | [Performance](performance.md) | Notes on performance when using CORE | | [Developers Guide](devguide.md) | Overview on how to contribute to CORE | - -## Credits - -The CORE project was derived from the open source IMUNES project from the University of Zagreb in 2004. In 2006, -changes for CORE were released back to that project, some items of which were adopted. Marko Zec is the -primary developer from the University of Zagreb responsible for the IMUNES (GUI) and VirtNet (kernel) projects. Ana -Kukec and Miljenko Mikuc are known contributors. - -Jeff Ahrenholz has been the primary Boeing developer of CORE, and has written this manual. Tom Goff designed the -Python framework and has made significant contributions. Claudiu Danilov, Rod Santiago, Kevin Larson, Gary Pei, -Phil Spagnolo, and Ian Chakeres have contributed code to CORE. Dan Mackley helped develop the CORE API, originally to -interface with a simulator. Jae Kim and Tom Henderson have supervised the project and provided direction. - -Copyright (c) 2005-2020, the Boeing Company. diff --git a/docs/install.md b/docs/install.md index 2f6dbce3..0fadea30 100644 --- a/docs/install.md +++ b/docs/install.md @@ -2,13 +2,16 @@ * Table of Contents {:toc} -## Overview -CORE provides a script to help automate the installation of dependencies, -build and install, and either generate a CORE specific python virtual environment -or build and install a python wheel. - > **WARNING:** if Docker is installed, the default iptable rules will block CORE traffic +## Overview +CORE currently supports and provides the following install options, with the package +option being preferred. + +* [Package based install (rpm/deb)](#package-based-install) +* [Script based install](#script-based-install) +* [Dockerfile based install](#dockerfile-based-install) + ### Requirements Any computer capable of running Linux should be able to run CORE. Since the physical machine will be hosting numerous containers, as a general rule you should select a machine having as much RAM and CPU resources as possible. @@ -21,73 +24,41 @@ containers, as a general rule you should select a machine having as much RAM and Plan is to support recent Ubuntu and CentOS LTS releases. Verified: -* Ubuntu - 18.04, 20.04 -* CentOS - 7.8, 8.0 - -> **NOTE:** CentOS 8 does not have the netem kernel mod available by default - -CentOS 8 Enabled netem: -```shell -sudo yum update -# restart into updated kernel -sudo yum install -y kernel-modules-extra -sudo modprobe sch_netem -``` - -### Tools Used -The following tools will be leveraged during installation: - -| Tool | Description | -|---------------------------------------------|-----------------------------------------------------------------------| -| [pip](https://pip.pypa.io/en/stable/) | used to install pipx | -| [pipx](https://pipxproject.github.io/pipx/) | used to install standalone python tools (invoke, poetry) | -| [invoke](http://www.pyinvoke.org/) | used to run provided tasks (install, uninstall, reinstall, etc) | -| [poetry](https://python-poetry.org/) | used to install python virtual environment or building a python wheel | +* Ubuntu - 18.04, 20.04, 22.04 +* CentOS - 7.8 ### Files -The following is a list of files that would be installed after running the automated installation. +The following is a list of files that would be installed after installation. -> **NOTE:** the default install prefix is /usr/local, but can be changed as noted below - -* executable files - * `/bin/{core-daemon, core-gui, vcmd, vnoded, etc}` -* tcl/tk gui files - * `/lib/core` - * `/share/core/icons` -* example imn files - * `/share/core/examples` +* executables + * `/bin/{vcmd, vnode}` + * can be adjusted using script based install , package will be /usr * python files - * poetry virtual env - * `cd /daemon && poetry env info` - * `~/.cache/pypoetry/virtualenvs/` - * local python install - * default install path for python3 installation of a wheel + * virtual environment `/opt/core/venv` + * local install will be local to the python version used * `python3 -c "import core; print(core.__file__)"` + * scripts {core-daemon, core-cleanup, etc} + * virtualenv `/opt/core/venv/bin` + * local `/usr/local/bin` * configuration files * `/etc/core/{core.conf, logging.conf}` -* ospf mdr repository files +* ospf mdr repository files when using script based install * `/../ospf-mdr` -* emane repository files - * `/../emane` -### Installed Executables -After the installation complete it will have installed the following scripts. +### Installed Scripts +The following python scripts are provided. | Name | Description | |---------------------|------------------------------------------------------------------------------| | core-cleanup | tool to help removed lingering core created containers, bridges, directories | | core-cli | tool to query, open xml files, and send commands using gRPC | -| core-daemon | runs the backed core server providing TLV and gRPC APIs | -| core-gui | runs the legacy tcl/tk based GUI | -| core-imn-to-xml | tool to help automate converting a .imn file to .xml format | -| core-manage | tool to add, remove, or check for services, models, and node types | -| core-pygui | runs the new python/tk based GUI | +| core-daemon | runs the backed core server providing a gRPC API | +| core-gui | starts GUI | | core-python | provides a convenience for running the core python virtual environment | | core-route-monitor | tool to help monitor traffic across nodes and feed that to SDT | | core-service-update | tool to update automate modifying a legacy service to match current naming | -| coresendmsg | tool to send TLV API commands from command line | -## Upgrading from Older Release +### Upgrading from Older Release Please make sure to uninstall any previous installations of CORE cleanly before proceeding to install. @@ -114,36 +85,81 @@ sudo yum remove core sudo apt remove core ``` -## Automated Install -First we will need to clone and navigate to the CORE repo. +## Package Based Install + +Starting with 9.0.0 there are pre-built rpm/deb packages. You can retrieve the +rpm/deb package from [releases](https://github.com/coreemu/core/releases) page. + +The built packages will require and install system level dependencies, as well as running +a post install script to install the provided CORE python wheel. A similar uninstall script +is ran when uninstalling and would require the same options as given, during the install. + +> **NOTE:** PYTHON defaults to python3 for installs below, CORE requires python3.9+, pip, +> tk compatibility for python gui, and venv for virtual environments + +Examples for install: ```shell -# clone CORE repo -git clone https://github.com/coreemu/core.git -cd core -# install dependencies to run installation task -./setup.sh -# run the following or open a new terminal -source ~/.bashrc -# Ubuntu -inv install -# CentOS -inv install -p /usr +# recommended to upgrade to the latest version of pip before installation +# in python, can help avoid building from source issues +sudo -m pip install --upgrade pip +# install vcmd/vnoded, system dependencies, +# and core python into a venv located at /opt/core/venv +sudo install -y ./ +# disable the venv and install to python directly +sudo NO_VENV=1 install -y ./ +# change python executable used to install for venv or direct installations +sudo PYTHON=python3.9 install -y ./ +# disable venv and change python executable +sudo NO_VENV=1 PYTHON=python3.9 install -y ./ +# skip installing the python portion entirely, as you plan to carry this out yourself +# core python wheel is located at /opt/core/core--py3-none-any.whl +sudo NO_PYTHON=1 install -y ./ +# install python wheel into python of your choosing +sudo -m pip install /opt/core/core--py3-none-any.whl ``` -First you can use `setup.sh` as a convenience to install tooling for running invoke tasks: +Example for removal, requires using the same options as install: +```shell +# remove a standard install +sudo remove core +# remove a local install +sudo NO_VENV=1 remove core +# remove install using alternative python +sudo PYTHON=python3.9 remove core +# remove install using alternative python and local install +sudo NO_VENV=1 PYTHON=python3.9 remove core +# remove install and skip python uninstall +sudo NO_PYTHON=1 remove core +``` -> **NOTE:** `setup.sh` will attempt to determine your OS by way of `/etc/os-release`, currently it supports -> attempts to install OSs that are debian/redhat like (yum/apt). +### Installing OSPF MDR +You will need to manually install OSPF MDR for routing nodes, since this is not +provided by the package. -* python3, pip, venv -* pipx 0.16.4 via pip -* invoke 1.4.1 via pipx -* poetry 1.1.12 via pipx +```shell +git clone https://github.com/USNavalResearchLaboratory/ospf-mdr.git +cd ospf-mdr +./bootstrap.sh +./configure --disable-doc --enable-user=root --enable-group=root \ + --with-cflags=-ggdb --sysconfdir=/usr/local/etc/quagga --enable-vtysh \ + --localstatedir=/var/run/quagga +make -j$(nproc) +sudo make install +``` -Then you can run `inv install `: +When done see [Post Install](#post-install). + +## Script Based Install +The script based installation will install system level dependencies, python library and +dependencies, as well as dependencies for building CORE. + +The script based install also automatically builds and installs OSPF MDR, used by default +on routing nodes. This can optionally be skipped. + +Installaion will carry out the following steps: * installs system dependencies for building core +* builds vcmd/vnoded and python grpc files * installs core into poetry managed virtual environment or locally, if flag is passed -* installs scripts pointing to appropriate python location based on install type * installs systemd service pointing to appropriate python location based on install type * clone/build/install working version of [OPSF MDR](https://github.com/USNavalResearchLaboratory/ospf-mdr) @@ -153,9 +169,39 @@ Then you can run `inv install `: > **NOTE:** provide a prefix that will be found on path when running as sudo, > if the default prefix /usr/local will not be valid -```shell -inv -h install +The following tools will be leveraged during installation: +| Tool | Description | +|---------------------------------------------|-----------------------------------------------------------------------| +| [pip](https://pip.pypa.io/en/stable/) | used to install pipx | +| [pipx](https://pipxproject.github.io/pipx/) | used to install standalone python tools (invoke, poetry) | +| [invoke](http://www.pyinvoke.org/) | used to run provided tasks (install, uninstall, reinstall, etc) | +| [poetry](https://python-poetry.org/) | used to install python virtual environment or building a python wheel | + +First we will need to clone and navigate to the CORE repo. +```shell +# clone CORE repo +git clone https://github.com/coreemu/core.git +cd core + +# install dependencies to run installation task +./setup.sh +# skip installing system packages, due to using python built from source +NO_SYSTEM=1 ./setup.sh + +# run the following or open a new terminal +source ~/.bashrc + +# Ubuntu +inv install +# CentOS +inv install -p /usr +# optionally skip python system packages +inv install --no-python +# optionally skip installing ospf mdr +inv install --no-ospf + +# install command options Usage: inv[oke] [--core-opts] install [--options] [other tasks here ...] Docstring: @@ -165,35 +211,99 @@ Options: -d, --dev install development mode -i STRING, --install-type=STRING used to force an install type, can be one of the following (redhat, debian) -l, --local determines if core will install to local system, default is False + -n, --no-python avoid installing python system dependencies -o, --[no-]ospf disable ospf installation -p STRING, --prefix=STRING prefix where scripts are installed, default is /usr/local - -v, --verbose enable verbose - -# install core to virtual environment -./install.sh -p - -# install core locally -./install.sh -p -l + -v, --verbose ``` -After installation has completed you should be able to run `core-daemon` and `core-gui`. +When done see [Post Install](#post-install). -## Using Invoke Tasks -The invoke tool installed by way of pipx provides conveniences for running -CORE tasks to help ensure usage of the create python virtual environment. +### Unsupported Linux Distribution +For unsupported OSs you could attempt to do the following to translate +an installation to your use case. + +* make sure you have python3.9+ with venv support +* make sure you have python3 invoke available to leverage `/tasks.py` ```shell -inv --list +# this will print the commands that would be ran for a given installation +# type without actually running them, they may help in being used as +# the basis for translating to your OS +inv install --dry -v -p -i +``` -Available tasks: +## Dockerfile Based Install +You can leverage one of the provided Dockerfiles, to run and launch CORE within a Docker container. - install install core, poetry, scripts, service, and ospf mdr - install-emane install emane python bindings into the core virtual environment - reinstall run the uninstall task, get latest from specified branch, and run install task - test run core tests - test-emane run core emane tests - test-mock run core tests using mock to avoid running as sudo - uninstall uninstall core, scripts, service, virtual environment, and clean build directory +Since CORE nodes will leverage software available within the system for a given use case, +make sure to update and build the Dockerfile with desired software. + +```shell +# clone core +git clone https://github.com/coreemu/core.git +cd core +# build image +sudo docker build -t core -f Dockerfile. . +# start container +sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged core +# enable xhost access to the root user +xhost +local:root +# launch core-gui +sudo docker exec -it core core-gui +``` + +When done see [Post Install](#post-install). + +## Installing EMANE +> **NOTE:** installing EMANE for the virtual environment is known to work for 1.21+ + +The recommended way to install EMANE is using prebuilt packages, otherwise +you can follow their instructions for installing from source. Installation +information can be found [here](https://github.com/adjacentlink/emane/wiki/Install). + +There is an invoke task to help install the EMANE bindings into the CORE virtual +environment, when needed. An example for running the task is below and the version +provided should match the version of the packages installed. +```shell +cd +# example version tag v1.3.3 +inv install-emane -e +``` + +## Post Install +After installation completes you are now ready to run CORE. + +### Resolving Path Issues +One problem running CORE you may run into, using the virtual environment or locally +can be issues related to your path. + +To add support for your user to run scripts from the virtual environment: +```shell +# can add to ~/.bashrc +export PATH=$PATH:/opt/core/venv/bin +``` + +This will not solve the path issue when running as sudo, so you can do either +of the following to compensate. +```shell +# run command passing in the right PATH to pickup from the user running the command +sudo env PATH=$PATH core-daemon + +# add an alias to ~/.bashrc or something similar +alias sudop='sudo env PATH=$PATH' +# now you can run commands like so +sudop core-daemon +``` + +### Running CORE +The following assumes I have resolved PATH issues and setup the `sudop` alias. + +```shell +# in one terminal run the server daemon using the alias above +sudop core-daemon +# in another terminal run the gui client +core-gui ``` ### Enabling Service @@ -204,65 +314,3 @@ service, run the following commands. sudo systemctl enable core-daemon sudo systemctl start core-daemon ``` - -### Unsupported Linux Distribution -For unsupported OSs you could attempt to do the following to translate -an installation to your use case. - -* make sure you have python3.6+ with venv support -* make sure you have python3 invoke available to leverage `/tasks.py` - -```shell -# this will print the commands that would be ran for a given installation -# type without actually running them, they may help in being used as -# the basis for translating to your OS -inv install --dry -v -p -i -``` - -## Dockerfile Install -You can leverage the provided Dockerfile, to run and launch CORE within a Docker container. - -```shell -# clone core -git clone https://github.com/coreemu/core.git -cd core -# build image -sudo docker build -t core . -# start container -sudo docker run -itd --name core -e DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix:rw --privileged core -# enable xhost access to the root user -xhost +local:root -# launch core-gui -sudo docker exec -it core core-gui -``` - -## Running User Scripts -If you create your own python scripts to run CORE directly or using the gRPC/TLV -APIs you will need to make sure you are running them within context of the -installed virtual environment. To help support this CORE provides the `core-python` -executable. This executable will allow you to enter CORE's python virtual -environment interpreter or to run a script within it. - -For installations installed to a virtual environment: -```shell -core-python