Merge branch 'develop' into coretk-create-node
This commit is contained in:
commit
0e036ddca9
91 changed files with 2788 additions and 5520 deletions
20
Jenkinsfile
vendored
20
Jenkinsfile
vendored
|
@ -1,20 +0,0 @@
|
|||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage('build core') {
|
||||
steps {
|
||||
sh './bootstrap.sh'
|
||||
sh './configure'
|
||||
sh 'make'
|
||||
sh 'sudo make install'
|
||||
}
|
||||
}
|
||||
stage('test core') {
|
||||
steps {
|
||||
sh 'pytest daemon/tests/test_core.py'
|
||||
sh 'pytest daemon/tests/test_gui.py'
|
||||
sh 'pytest daemon/tests/test_emane.py'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
13
Makefile.am
13
Makefile.am
|
@ -44,15 +44,6 @@ DISTCLEANFILES = aclocal.m4 \
|
|||
MAINTAINERCLEANFILES = .version \
|
||||
.version.date
|
||||
|
||||
|
||||
if PYTHON3
|
||||
PYTHON_DEB_DEP = python3 >= 3.6
|
||||
PYTHON_RPM_DEP = python3 >= 3.6
|
||||
else
|
||||
PYTHON_DEB_DEP = python (>= 2.7), python (<< 3.0)
|
||||
PYTHON_RPM_DEP = python >= 2.7, python < 3.0
|
||||
endif
|
||||
|
||||
define fpm-rpm =
|
||||
fpm -s dir -t rpm -n core \
|
||||
-m "$(PACKAGE_MAINTAINERS)" \
|
||||
|
@ -74,7 +65,7 @@ fpm -s dir -t rpm -n core \
|
|||
-d "iproute" \
|
||||
-d "libev" \
|
||||
-d "net-tools" \
|
||||
-d "$(PYTHON_RPM_DEP)" \
|
||||
-d "python3 >= 3.6" \
|
||||
-C $(DESTDIR)
|
||||
endef
|
||||
|
||||
|
@ -101,7 +92,7 @@ fpm -s dir -t deb -n core \
|
|||
-d "ebtables" \
|
||||
-d "iproute2" \
|
||||
-d "libev4" \
|
||||
-d "$(PYTHON_DEB_DEP)" \
|
||||
-d "python3 >= 3.6" \
|
||||
-C $(DESTDIR)
|
||||
endef
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ scripting network emulation.
|
|||
* Documentation hosted on GitHub
|
||||
* <http://coreemu.github.io/core/>
|
||||
* Basic Script Examples
|
||||
* [Examples](daemon/examples/api)
|
||||
* [Examples](daemon/examples/python)
|
||||
* Custom Service Example
|
||||
* [sample.py](daemon/examples/myservices/sample.py)
|
||||
* Custom Emane Model Example
|
||||
|
|
37
configure.ac
37
configure.ac
|
@ -2,7 +2,7 @@
|
|||
# Process this file with autoconf to produce a configure script.
|
||||
|
||||
# this defines the CORE version number, must be static for AC_INIT
|
||||
AC_INIT(core, 5.4.0)
|
||||
AC_INIT(core, 5.5.2)
|
||||
|
||||
# autoconf and automake initialization
|
||||
AC_CONFIG_SRCDIR([netns/version.h.in])
|
||||
|
@ -55,12 +55,6 @@ else
|
|||
want_python=no
|
||||
fi
|
||||
|
||||
AC_ARG_ENABLE([python3],
|
||||
[AS_HELP_STRING([--enable-python3],
|
||||
[sets python3 flag for building packages])],
|
||||
[enable_python3=yes], [enable_python3=no])
|
||||
AM_CONDITIONAL([PYTHON3], [test "x$enable_python3" == "xyes"])
|
||||
|
||||
AC_ARG_ENABLE([daemon],
|
||||
[AS_HELP_STRING([--enable-daemon[=ARG]],
|
||||
[build and install the daemon with Python modules
|
||||
|
@ -116,39 +110,62 @@ if test "x$enable_daemon" = "xyes"; then
|
|||
AC_FUNC_REALLOC
|
||||
AC_CHECK_FUNCS([atexit dup2 gettimeofday memset socket strerror uname])
|
||||
|
||||
AM_PATH_PYTHON(2.7)
|
||||
AM_PATH_PYTHON(3.6)
|
||||
AS_IF([$PYTHON -m grpc_tools.protoc -h &> /dev/null], [], [AC_MSG_ERROR([please install python grpcio-tools])])
|
||||
|
||||
AC_CHECK_PROG(brctl_path, brctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$brctl_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate brctl (from bridge-utils package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(sysctl_path, sysctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$sysctl_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate sysctl (from procps package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ebtables_path, ebtables, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ebtables_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ebtables (from ebtables package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ip_path, ip, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ip_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ip (from iproute package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(tc_path, tc, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$tc_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate tc (from iproute package).])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ethtool_path, ethtool, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ethtool_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate ethtool (from package ethtool)])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(mount_path, mount, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$mount_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate mount (from package mount)])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(umount_path, umount, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$umount_path" = "xno" ; then
|
||||
AC_MSG_ERROR([Could not locate umount (from package mount)])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(convert, convert, yes, no, $SEARCHPATH)
|
||||
if test "x$convert" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ImageMagick convert.])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ovs_vs_path, ovs-vsctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ovs_vs_path" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS nodes])
|
||||
AC_MSG_WARN([Could not locate ovs-vsctl cannot use OVS mode])
|
||||
fi
|
||||
|
||||
AC_CHECK_PROG(ovs_of_path, ovs-ofctl, $as_dir, no, $SEARCHPATH)
|
||||
if test "x$ovs_of_path" = "xno" ; then
|
||||
AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS nodes])
|
||||
AC_MSG_WARN([Could not locate ovs-ofctl cannot use OVS mode])
|
||||
fi
|
||||
|
||||
CFLAGS_save=$CFLAGS
|
||||
|
|
|
@ -14,8 +14,6 @@ if WANT_DOCS
|
|||
DOCS = doc
|
||||
endif
|
||||
|
||||
PYTHONLIBDIR=$(subst site-packages,dist-packages,$(pythondir))
|
||||
|
||||
SUBDIRS = proto $(DOCS)
|
||||
|
||||
SCRIPT_FILES := $(notdir $(wildcard scripts/*))
|
||||
|
@ -31,7 +29,7 @@ install-exec-hook:
|
|||
$(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) install \
|
||||
--root=/$(DESTDIR) \
|
||||
--prefix=$(prefix) \
|
||||
--install-lib=$(PYTHONLIBDIR) \
|
||||
--install-lib=$(pythondir) \
|
||||
--single-version-externally-managed
|
||||
|
||||
# Python package uninstall
|
||||
|
@ -40,8 +38,8 @@ uninstall-hook:
|
|||
rm -rf $(DESTDIR)/$(datadir)/core
|
||||
rm -f $(addprefix $(DESTDIR)/$(datarootdir)/man/man1/, $(MAN_FILES))
|
||||
rm -f $(addprefix $(DESTDIR)/$(bindir)/,$(SCRIPT_FILES))
|
||||
rm -rf $(DESTDIR)/$(PYTHONLIBDIR)/core-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info
|
||||
rm -rf $(DESTDIR)/$(PYTHONLIBDIR)/core
|
||||
rm -rf $(DESTDIR)/$(pythondir)/core-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info
|
||||
rm -rf $(DESTDIR)/$(pythondir)/core
|
||||
|
||||
# Python package cleanup
|
||||
clean-local:
|
||||
|
|
425
daemon/Pipfile.lock
generated
425
daemon/Pipfile.lock
generated
|
@ -14,68 +14,145 @@
|
|||
]
|
||||
},
|
||||
"default": {
|
||||
"configparser": {
|
||||
"bcrypt": {
|
||||
"hashes": [
|
||||
"sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c",
|
||||
"sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"
|
||||
"sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89",
|
||||
"sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42",
|
||||
"sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294",
|
||||
"sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161",
|
||||
"sha256:6305557019906466fc42dbc53b46da004e72fd7a551c044a827e572c82191752",
|
||||
"sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31",
|
||||
"sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5",
|
||||
"sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c",
|
||||
"sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0",
|
||||
"sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de",
|
||||
"sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e",
|
||||
"sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052",
|
||||
"sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09",
|
||||
"sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105",
|
||||
"sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133",
|
||||
"sha256:ce4e4f0deb51d38b1611a27f330426154f2980e66582dc5f438aad38b5f24fc1",
|
||||
"sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
|
||||
"sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
|
||||
],
|
||||
"version": "==4.0.2"
|
||||
"version": "==3.1.7"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
"sha256:00d890313797d9fe4420506613384b43099ad7d2b905c0752dbcc3a6f14d80fa",
|
||||
"sha256:0cf9e550ac6c5e57b713437e2f4ac2d7fd0cd10336525a27224f5fc1ec2ee59a",
|
||||
"sha256:0ea23c9c0cdd6778146a50d867d6405693ac3b80a68829966c98dd5e1bbae400",
|
||||
"sha256:193697c2918ecdb3865acf6557cddf5076bb39f1f654975e087b67efdff83365",
|
||||
"sha256:1ae14b542bf3b35e5229439c35653d2ef7d8316c1fffb980f9b7647e544baa98",
|
||||
"sha256:1e389e069450609c6ffa37f21f40cce36f9be7643bbe5051ab1de99d5a779526",
|
||||
"sha256:263242b6ace7f9cd4ea401428d2d45066b49a700852334fd55311bde36dcda14",
|
||||
"sha256:33142ae9807665fa6511cfa9857132b2c3ee6ddffb012b3f0933fc11e1e830d5",
|
||||
"sha256:364f8404034ae1b232335d8c7f7b57deac566f148f7222cef78cf8ae28ef764e",
|
||||
"sha256:47368f69fe6529f8f49a5d146ddee713fc9057e31d61e8b6dc86a6a5e38cecc1",
|
||||
"sha256:4895640844f17bec32943995dc8c96989226974dfeb9dd121cc45d36e0d0c434",
|
||||
"sha256:558b3afef987cf4b17abd849e7bedf64ee12b28175d564d05b628a0f9355599b",
|
||||
"sha256:5ba86e1d80d458b338bda676fd9f9d68cb4e7a03819632969cf6d46b01a26730",
|
||||
"sha256:63424daa6955e6b4c70dc2755897f5be1d719eabe71b2625948b222775ed5c43",
|
||||
"sha256:6381a7d8b1ebd0bc27c3bc85bc1bfadbb6e6f756b4d4db0aa1425c3719ba26b4",
|
||||
"sha256:6381ab708158c4e1639da1f2a7679a9bbe3e5a776fc6d1fd808076f0e3145331",
|
||||
"sha256:6fd58366747debfa5e6163ada468a90788411f10c92597d3b0a912d07e580c36",
|
||||
"sha256:728ec653964655d65408949b07f9b2219df78badd601d6c49e28d604efe40599",
|
||||
"sha256:7cfcfda59ef1f95b9f729c56fe8a4041899f96b72685d36ef16a3440a0f85da8",
|
||||
"sha256:819f8d5197c2684524637f940445c06e003c4a541f9983fd30d6deaa2a5487d8",
|
||||
"sha256:825ecffd9574557590e3225560a8a9d751f6ffe4a49e3c40918c9969b93395fa",
|
||||
"sha256:9009e917d8f5ef780c2626e29b6bc126f4cb2a4d43ca67aa2b40f2a5d6385e78",
|
||||
"sha256:9c77564a51d4d914ed5af096cd9843d90c45b784b511723bd46a8a9d09cf16fc",
|
||||
"sha256:a19089fa74ed19c4fe96502a291cfdb89223a9705b1d73b3005df4256976142e",
|
||||
"sha256:a40ed527bffa2b7ebe07acc5a3f782da072e262ca994b4f2085100b5a444bbb2",
|
||||
"sha256:bb75ba21d5716abc41af16eac1145ab2e471deedde1f22c6f99bd9f995504df0",
|
||||
"sha256:e22a00c0c81ffcecaf07c2bfb3672fa372c50e2bd1024ffee0da191c1b27fc71",
|
||||
"sha256:e55b5a746fb77f10c83e8af081979351722f6ea48facea79d470b3731c7b2891",
|
||||
"sha256:ec2fa3ee81707a5232bf2dfbd6623fdb278e070d596effc7e2d788f2ada71a05",
|
||||
"sha256:fd82eb4694be712fcae03c717ca2e0fc720657ac226b80bbb597e971fc6928c2"
|
||||
],
|
||||
"version": "==1.13.1"
|
||||
},
|
||||
"core": {
|
||||
"editable": true,
|
||||
"path": "."
|
||||
},
|
||||
"enum34": {
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850",
|
||||
"sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a",
|
||||
"sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79",
|
||||
"sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1"
|
||||
"sha256:02079a6addc7b5140ba0825f542c0869ff4df9a69c360e339ecead5baefa843c",
|
||||
"sha256:1df22371fbf2004c6f64e927668734070a8953362cd8370ddd336774d6743595",
|
||||
"sha256:369d2346db5934345787451504853ad9d342d7f721ae82d098083e1f49a582ad",
|
||||
"sha256:3cda1f0ed8747339bbdf71b9f38ca74c7b592f24f65cdb3ab3765e4b02871651",
|
||||
"sha256:44ff04138935882fef7c686878e1c8fd80a723161ad6a98da31e14b7553170c2",
|
||||
"sha256:4b1030728872c59687badcca1e225a9103440e467c17d6d1730ab3d2d64bfeff",
|
||||
"sha256:58363dbd966afb4f89b3b11dfb8ff200058fbc3b947507675c19ceb46104b48d",
|
||||
"sha256:6ec280fb24d27e3d97aa731e16207d58bd8ae94ef6eab97249a2afe4ba643d42",
|
||||
"sha256:7270a6c29199adc1297776937a05b59720e8a782531f1f122f2eb8467f9aab4d",
|
||||
"sha256:73fd30c57fa2d0a1d7a49c561c40c2f79c7d6c374cc7750e9ac7c99176f6428e",
|
||||
"sha256:7f09806ed4fbea8f51585231ba742b58cbcfbfe823ea197d8c89a5e433c7e912",
|
||||
"sha256:90df0cc93e1f8d2fba8365fb59a858f51a11a394d64dbf3ef844f783844cc793",
|
||||
"sha256:971221ed40f058f5662a604bd1ae6e4521d84e6cad0b7b170564cc34169c8f13",
|
||||
"sha256:a518c153a2b5ed6b8cc03f7ae79d5ffad7315ad4569b2d5333a13c38d64bd8d7",
|
||||
"sha256:b0de590a8b0979649ebeef8bb9f54394d3a41f66c5584fff4220901739b6b2f0",
|
||||
"sha256:b43f53f29816ba1db8525f006fa6f49292e9b029554b3eb56a189a70f2a40879",
|
||||
"sha256:d31402aad60ed889c7e57934a03477b572a03af7794fa8fb1780f21ea8f6551f",
|
||||
"sha256:de96157ec73458a7f14e3d26f17f8128c959084931e8997b9e655a39c8fde9f9",
|
||||
"sha256:df6b4dca2e11865e6cfbfb708e800efb18370f5a46fd601d3755bc7f85b3a8a2",
|
||||
"sha256:ecadccc7ba52193963c0475ac9f6fa28ac01e01349a2ca48509667ef41ffd2cf",
|
||||
"sha256:fb81c17e0ebe3358486cd8cc3ad78adbae58af12fc2bf2bc0bb84e8090fa5ce8"
|
||||
],
|
||||
"version": "==1.1.6"
|
||||
"version": "==2.8"
|
||||
},
|
||||
"future": {
|
||||
"fabric": {
|
||||
"hashes": [
|
||||
"sha256:67045236dcfd6816dc439556d009594abf643e5eb48992e36beac09c2ca659b8"
|
||||
"sha256:160331934ea60036604928e792fa8e9f813266b098ef5562aa82b88527740389",
|
||||
"sha256:24842d7d51556adcabd885ac3cf5e1df73fc622a1708bf3667bf5927576cdfa6"
|
||||
],
|
||||
"version": "==0.17.1"
|
||||
"version": "==2.5.0"
|
||||
},
|
||||
"grpcio": {
|
||||
"hashes": [
|
||||
"sha256:1303578092f1f6e4bfbc354c04ac422856c393723d3ffa032fff0f7cb5cfd693",
|
||||
"sha256:229c6b313cd82bec8f979b059d87f03cc1a48939b543fe170b5a9c5cf6a6bc69",
|
||||
"sha256:3cd3d99a8b5568d0d186f9520c16121a0f2a4bcad8e2b9884b76fb88a85a7774",
|
||||
"sha256:41cfb222db358227521f9638a6fbc397f310042a4db5539a19dea01547c621cd",
|
||||
"sha256:43330501660f636fd6547d1e196e395cd1e2c2ae57d62219d6184a668ffebda0",
|
||||
"sha256:45d7a2bd8b4f25a013296683f4140d636cdbb507d94a382ea5029a21e76b1648",
|
||||
"sha256:47dc935658a13b25108823dabd010194ddea9610357c5c1ef1ad7b3f5157ebee",
|
||||
"sha256:480aa7e2b56238badce0b9413a96d5b4c90c3bfbd79eba5a0501e92328d9669e",
|
||||
"sha256:4a0934c8b0f97e1d8c18e76c45afc0d02d33ab03125258179f2ac6c7a13f3626",
|
||||
"sha256:5624dab19e950f99e560400c59d87b685809e4cfcb2c724103f1ab14c06071f7",
|
||||
"sha256:60515b1405bb3dadc55e6ca99429072dad3e736afcf5048db5452df5572231ff",
|
||||
"sha256:610f97ebae742a57d336a69b09a9c7d7de1f62aa54aaa8adc635b38f55ba4382",
|
||||
"sha256:64ea189b2b0859d1f7b411a09185028744d494ef09029630200cc892e366f169",
|
||||
"sha256:686090c6c1e09e4f49585b8508d0a31d58bc3895e4049ea55b197d1381e9f70f",
|
||||
"sha256:7745c365195bb0605e3d47b480a2a4d1baa8a41a5fd0a20de5fa48900e2c886a",
|
||||
"sha256:79491e0d2b77a1c438116bf9e5f9e2e04e78b78524615e2ce453eff62db59a09",
|
||||
"sha256:825177dd4c601c487836b7d6b4ba268db59787157911c623ba59a7c03c8d3adc",
|
||||
"sha256:8a060e1f72fb94eee8a035ed29f1201ce903ad14cbe27bda56b4a22a8abda045",
|
||||
"sha256:90168cc6353e2766e47b650c963f21cfff294654b10b3a14c67e26a4e3683634",
|
||||
"sha256:94b7742734bceeff6d8db5edb31ac844cb68fc7f13617eca859ff1b78bb20ba1",
|
||||
"sha256:962aebf2dd01bbb2cdb64580e61760f1afc470781f9ecd5fe8f3d8dcd8cf4556",
|
||||
"sha256:9c8d9eacdce840b72eee7924c752c31b675f8aec74790e08cff184a4ea8aa9c1",
|
||||
"sha256:af5b929debc336f6bab9b0da6915f9ee5e41444012aed6a79a3c7e80d7662fdf",
|
||||
"sha256:b9cdb87fc77e9a3eabdc42a512368538d648fa0760ad30cf97788076985c790a",
|
||||
"sha256:c5e6380b90b389454669dc67d0a39fb4dc166416e01308fcddd694236b8329ef",
|
||||
"sha256:d60c90fe2bfbee735397bf75a2f2c4e70c5deab51cd40c6e4fa98fae018c8db6",
|
||||
"sha256:d8582c8b1b1063249da1588854251d8a91df1e210a328aeb0ece39da2b2b763b",
|
||||
"sha256:ddbf86ba3aa0ad8fed2867910d2913ee237d55920b55f1d619049b3399f04efc",
|
||||
"sha256:e46bc0664c5c8a0545857aa7a096289f8db148e7f9cca2d0b760113e8994bddc",
|
||||
"sha256:f6437f70ec7fed0ca3a0eef1146591bb754b418bb6c6b21db74f0333d624e135",
|
||||
"sha256:f71693c3396530c6b00773b029ea85e59272557e9bd6077195a6593e4229892a",
|
||||
"sha256:f79f7455f8fbd43e8e9d61914ecf7f48ba1c8e271801996fef8d6a8f3cc9f39f"
|
||||
"sha256:0302331e014fc4bac028b6ad480b33f7abfe20b9bdcca7be417124dda8f22115",
|
||||
"sha256:0aa0cce9c5eb1261b32173a20ed42b51308d55ce28ecc2021e868b3cb90d9503",
|
||||
"sha256:0c83947575300499adbc308e986d754e7f629be0bdd9bea1ffdd5cf76e1f1eff",
|
||||
"sha256:0ca26ff968d45efd4ef73447c4d4b34322ea8c7d06fbb6907ce9e5db78f1bbcb",
|
||||
"sha256:0cf80a7955760c2498f8821880242bb657d70998065ff0d2a082de5ffce230a7",
|
||||
"sha256:0d40706e57d9833fe0e023a08b468f33940e8909affa12547874216d36bba208",
|
||||
"sha256:11872069156de34c6f3f9a1deb46cc88bc35dfde88262c4c73eb22b39b16fc55",
|
||||
"sha256:16065227faae0ab0abf1789bfb92a2cd2ab5da87630663f93f8178026da40e0d",
|
||||
"sha256:1e33778277685f6fabb22539136269c87c029e39b6321ef1a639b756a1c0a408",
|
||||
"sha256:2b16be15b1ae656bc7a36642b8c7045be2dde2048bb4b67478003e9d9db8022a",
|
||||
"sha256:3701dfca3ada27ceef0d17f728ce9dfef155ed20c57979c2b05083082258c6c1",
|
||||
"sha256:41912ecaf482abf2de74c69f509878f99223f5dd6b2de1a09c955afd4de3cf9b",
|
||||
"sha256:4332cbd20544fe7406910137590f38b5b3a1f6170258e038652cf478c639430f",
|
||||
"sha256:44068ecbdc6467c2bff4d8198816c8a2701b6dd1ec16078fceb6adc7c1f577d6",
|
||||
"sha256:53115960e37059420e2d16a4b04b00dd2ab3b6c3c67babd01ffbfdcd7881a69b",
|
||||
"sha256:6e7027bcd4070414751e2a5e60706facb98a1fc636497c9bac5442fe37b8ae6b",
|
||||
"sha256:6ff57fb2f07b7226b5bec89e8e921ea9bd220f35f11e094f2ba38f09eecd49c6",
|
||||
"sha256:73240e244d7644654bbda1f309f4911748b6a1804b7a8897ddbe8a04c90f7407",
|
||||
"sha256:785234bbc469bc75e26c868789a2080ffb30bd6e93930167797729889ad06b0b",
|
||||
"sha256:82f9d3c7f91d2d1885631335c003c5d45ae1cd69cc0bc4893f21fef50b8151bc",
|
||||
"sha256:86bdc2a965510658407a1372eb61f0c92f763fdfb2795e4d038944da4320c950",
|
||||
"sha256:95e925b56676a55e6282b3de80a1cbad5774072159779c61eac02791dface049",
|
||||
"sha256:96673bb4f14bd3263613526d1e7e33fdb38a9130e3ce87bf52314965706e1900",
|
||||
"sha256:970014205e76920484679035b6fb4b16e02fc977e5aac4d22025da849c79dab9",
|
||||
"sha256:ace5e8bf11a1571f855f5dab38a9bd34109b6c9bc2864abf24a597598c7e3695",
|
||||
"sha256:ad375f03eb3b9cb75a24d91eab8609e134d34605f199efc41e20dd642bdac855",
|
||||
"sha256:b819c4c7dcf0de76788ce5f95daad6d4e753d6da2b6a5f84e5bb5b5ce95fddc4",
|
||||
"sha256:c17943fd340cbd906db49f3f03c7545e5a66b617e8348b2c7a0d2c759d216af1",
|
||||
"sha256:d21247150dea86dabd3b628d8bc4b563036db3d332b3f4db3c5b1b0b122cb4f6",
|
||||
"sha256:d4d500a7221116de9767229ff5dd10db91f789448d85befb0adf5a37b0cd83b5",
|
||||
"sha256:e2a942a3cfccbbca21a90c144867112698ef36486345c285da9e98c466f22b22",
|
||||
"sha256:e983273dca91cb8a5043bc88322eb48e2b8d4e4998ff441a1ee79ced89db3909"
|
||||
],
|
||||
"version": "==1.23.0"
|
||||
"version": "==1.24.1"
|
||||
},
|
||||
"invoke": {
|
||||
"hashes": [
|
||||
"sha256:c52274d2e8a6d64ef0d61093e1983268ea1fc0cd13facb9448c4ef0c9a7ac7da",
|
||||
"sha256:f4ec8a134c0122ea042c8912529f87652445d9f4de590b353d23f95bfa1f0efd",
|
||||
"sha256:fc803a5c9052f15e63310aa81a43498d7c55542beb18564db88a9d75a176fa44"
|
||||
],
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"lxml": {
|
||||
"hashes": [
|
||||
|
@ -104,6 +181,66 @@
|
|||
],
|
||||
"version": "==4.4.1"
|
||||
},
|
||||
"paramiko": {
|
||||
"hashes": [
|
||||
"sha256:99f0179bdc176281d21961a003ffdb2ec369daac1a1007241f53374e376576cf",
|
||||
"sha256:f4b2edfa0d226b70bd4ca31ea7e389325990283da23465d572ed1f70a7583041"
|
||||
],
|
||||
"version": "==2.6.0"
|
||||
},
|
||||
"protobuf": {
|
||||
"hashes": [
|
||||
"sha256:125713564d8cfed7610e52444c9769b8dcb0b55e25cc7841f2290ee7bc86636f",
|
||||
"sha256:1accdb7a47e51503be64d9a57543964ba674edac103215576399d2d0e34eac77",
|
||||
"sha256:27003d12d4f68e3cbea9eb67427cab3bfddd47ff90670cb367fcd7a3a89b9657",
|
||||
"sha256:3264f3c431a631b0b31e9db2ae8c927b79fc1a7b1b06b31e8e5bcf2af91fe896",
|
||||
"sha256:3c5ab0f5c71ca5af27143e60613729e3488bb45f6d3f143dc918a20af8bab0bf",
|
||||
"sha256:45dcf8758873e3f69feab075e5f3177270739f146255225474ee0b90429adef6",
|
||||
"sha256:56a77d61a91186cc5676d8e11b36a5feb513873e4ae88d2ee5cf530d52bbcd3b",
|
||||
"sha256:5984e4947bbcef5bd849d6244aec507d31786f2dd3344139adc1489fb403b300",
|
||||
"sha256:6b0441da73796dd00821763bb4119674eaf252776beb50ae3883bed179a60b2a",
|
||||
"sha256:6f6677c5ade94d4fe75a912926d6796d5c71a2a90c2aeefe0d6f211d75c74789",
|
||||
"sha256:84a825a9418d7196e2acc48f8746cf1ee75877ed2f30433ab92a133f3eaf8fbe",
|
||||
"sha256:b842c34fe043ccf78b4a6cf1019d7b80113707d68c88842d061fa2b8fb6ddedc",
|
||||
"sha256:ca33d2f09dae149a1dcf942d2d825ebb06343b77b437198c9e2ef115cf5d5bc1",
|
||||
"sha256:db83b5c12c0cd30150bb568e6feb2435c49ce4e68fe2d7b903113f0e221e58fe",
|
||||
"sha256:f50f3b1c5c1c1334ca7ce9cad5992f098f460ffd6388a3cabad10b66c2006b09",
|
||||
"sha256:f99f127909731cafb841c52f9216e447d3e4afb99b17bebfad327a75aee206de"
|
||||
],
|
||||
"version": "==3.10.0"
|
||||
},
|
||||
"pycparser": {
|
||||
"hashes": [
|
||||
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
|
||||
],
|
||||
"version": "==2.19"
|
||||
},
|
||||
"pynacl": {
|
||||
"hashes": [
|
||||
"sha256:05c26f93964373fc0abe332676cb6735f0ecad27711035b9472751faa8521255",
|
||||
"sha256:0c6100edd16fefd1557da078c7a31e7b7d7a52ce39fdca2bec29d4f7b6e7600c",
|
||||
"sha256:0d0a8171a68edf51add1e73d2159c4bc19fc0718e79dec51166e940856c2f28e",
|
||||
"sha256:1c780712b206317a746ace34c209b8c29dbfd841dfbc02aa27f2084dd3db77ae",
|
||||
"sha256:2424c8b9f41aa65bbdbd7a64e73a7450ebb4aa9ddedc6a081e7afcc4c97f7621",
|
||||
"sha256:2d23c04e8d709444220557ae48ed01f3f1086439f12dbf11976e849a4926db56",
|
||||
"sha256:30f36a9c70450c7878053fa1344aca0145fd47d845270b43a7ee9192a051bf39",
|
||||
"sha256:37aa336a317209f1bb099ad177fef0da45be36a2aa664507c5d72015f956c310",
|
||||
"sha256:4943decfc5b905748f0756fdd99d4f9498d7064815c4cf3643820c9028b711d1",
|
||||
"sha256:53126cd91356342dcae7e209f840212a58dcf1177ad52c1d938d428eebc9fee5",
|
||||
"sha256:57ef38a65056e7800859e5ba9e6091053cd06e1038983016effaffe0efcd594a",
|
||||
"sha256:5bd61e9b44c543016ce1f6aef48606280e45f892a928ca7068fba30021e9b786",
|
||||
"sha256:6482d3017a0c0327a49dddc8bd1074cc730d45db2ccb09c3bac1f8f32d1eb61b",
|
||||
"sha256:7d3ce02c0784b7cbcc771a2da6ea51f87e8716004512493a2b69016326301c3b",
|
||||
"sha256:a14e499c0f5955dcc3991f785f3f8e2130ed504fa3a7f44009ff458ad6bdd17f",
|
||||
"sha256:a39f54ccbcd2757d1d63b0ec00a00980c0b382c62865b61a505163943624ab20",
|
||||
"sha256:aabb0c5232910a20eec8563503c153a8e78bbf5459490c49ab31f6adf3f3a415",
|
||||
"sha256:bd4ecb473a96ad0f90c20acba4f0bf0df91a4e03a1f4dd6a4bdc9ca75aa3a715",
|
||||
"sha256:bf459128feb543cfca16a95f8da31e2e65e4c5257d2f3dfa8c0c1031139c9c92",
|
||||
"sha256:e2da3c13307eac601f3de04887624939aca8ee3c9488a0bb0eca4fb9401fc6b1",
|
||||
"sha256:f67814c38162f4deb31f68d590771a29d5ae3b1bd64b75cf232308e5c74777e0"
|
||||
],
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:3350809f0555b11f552448330d0b52d5f24c91a322ea4a15ef22629740f3761c",
|
||||
|
@ -136,10 +273,10 @@
|
|||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:69c0dbf2ed392de1cb5ec704444b08a5ef81680a61cb899dc08127123af36a79",
|
||||
"sha256:f0b870f674851ecbfbbbd364d6b5cbdff9dcedbc7f3f5e18a6891057f21fe399"
|
||||
"sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c",
|
||||
"sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"
|
||||
],
|
||||
"version": "==19.1.0"
|
||||
"version": "==19.3.0"
|
||||
},
|
||||
"black": {
|
||||
"hashes": [
|
||||
|
@ -180,78 +317,78 @@
|
|||
},
|
||||
"grpcio": {
|
||||
"hashes": [
|
||||
"sha256:1303578092f1f6e4bfbc354c04ac422856c393723d3ffa032fff0f7cb5cfd693",
|
||||
"sha256:229c6b313cd82bec8f979b059d87f03cc1a48939b543fe170b5a9c5cf6a6bc69",
|
||||
"sha256:3cd3d99a8b5568d0d186f9520c16121a0f2a4bcad8e2b9884b76fb88a85a7774",
|
||||
"sha256:41cfb222db358227521f9638a6fbc397f310042a4db5539a19dea01547c621cd",
|
||||
"sha256:43330501660f636fd6547d1e196e395cd1e2c2ae57d62219d6184a668ffebda0",
|
||||
"sha256:45d7a2bd8b4f25a013296683f4140d636cdbb507d94a382ea5029a21e76b1648",
|
||||
"sha256:47dc935658a13b25108823dabd010194ddea9610357c5c1ef1ad7b3f5157ebee",
|
||||
"sha256:480aa7e2b56238badce0b9413a96d5b4c90c3bfbd79eba5a0501e92328d9669e",
|
||||
"sha256:4a0934c8b0f97e1d8c18e76c45afc0d02d33ab03125258179f2ac6c7a13f3626",
|
||||
"sha256:5624dab19e950f99e560400c59d87b685809e4cfcb2c724103f1ab14c06071f7",
|
||||
"sha256:60515b1405bb3dadc55e6ca99429072dad3e736afcf5048db5452df5572231ff",
|
||||
"sha256:610f97ebae742a57d336a69b09a9c7d7de1f62aa54aaa8adc635b38f55ba4382",
|
||||
"sha256:64ea189b2b0859d1f7b411a09185028744d494ef09029630200cc892e366f169",
|
||||
"sha256:686090c6c1e09e4f49585b8508d0a31d58bc3895e4049ea55b197d1381e9f70f",
|
||||
"sha256:7745c365195bb0605e3d47b480a2a4d1baa8a41a5fd0a20de5fa48900e2c886a",
|
||||
"sha256:79491e0d2b77a1c438116bf9e5f9e2e04e78b78524615e2ce453eff62db59a09",
|
||||
"sha256:825177dd4c601c487836b7d6b4ba268db59787157911c623ba59a7c03c8d3adc",
|
||||
"sha256:8a060e1f72fb94eee8a035ed29f1201ce903ad14cbe27bda56b4a22a8abda045",
|
||||
"sha256:90168cc6353e2766e47b650c963f21cfff294654b10b3a14c67e26a4e3683634",
|
||||
"sha256:94b7742734bceeff6d8db5edb31ac844cb68fc7f13617eca859ff1b78bb20ba1",
|
||||
"sha256:962aebf2dd01bbb2cdb64580e61760f1afc470781f9ecd5fe8f3d8dcd8cf4556",
|
||||
"sha256:9c8d9eacdce840b72eee7924c752c31b675f8aec74790e08cff184a4ea8aa9c1",
|
||||
"sha256:af5b929debc336f6bab9b0da6915f9ee5e41444012aed6a79a3c7e80d7662fdf",
|
||||
"sha256:b9cdb87fc77e9a3eabdc42a512368538d648fa0760ad30cf97788076985c790a",
|
||||
"sha256:c5e6380b90b389454669dc67d0a39fb4dc166416e01308fcddd694236b8329ef",
|
||||
"sha256:d60c90fe2bfbee735397bf75a2f2c4e70c5deab51cd40c6e4fa98fae018c8db6",
|
||||
"sha256:d8582c8b1b1063249da1588854251d8a91df1e210a328aeb0ece39da2b2b763b",
|
||||
"sha256:ddbf86ba3aa0ad8fed2867910d2913ee237d55920b55f1d619049b3399f04efc",
|
||||
"sha256:e46bc0664c5c8a0545857aa7a096289f8db148e7f9cca2d0b760113e8994bddc",
|
||||
"sha256:f6437f70ec7fed0ca3a0eef1146591bb754b418bb6c6b21db74f0333d624e135",
|
||||
"sha256:f71693c3396530c6b00773b029ea85e59272557e9bd6077195a6593e4229892a",
|
||||
"sha256:f79f7455f8fbd43e8e9d61914ecf7f48ba1c8e271801996fef8d6a8f3cc9f39f"
|
||||
"sha256:0302331e014fc4bac028b6ad480b33f7abfe20b9bdcca7be417124dda8f22115",
|
||||
"sha256:0aa0cce9c5eb1261b32173a20ed42b51308d55ce28ecc2021e868b3cb90d9503",
|
||||
"sha256:0c83947575300499adbc308e986d754e7f629be0bdd9bea1ffdd5cf76e1f1eff",
|
||||
"sha256:0ca26ff968d45efd4ef73447c4d4b34322ea8c7d06fbb6907ce9e5db78f1bbcb",
|
||||
"sha256:0cf80a7955760c2498f8821880242bb657d70998065ff0d2a082de5ffce230a7",
|
||||
"sha256:0d40706e57d9833fe0e023a08b468f33940e8909affa12547874216d36bba208",
|
||||
"sha256:11872069156de34c6f3f9a1deb46cc88bc35dfde88262c4c73eb22b39b16fc55",
|
||||
"sha256:16065227faae0ab0abf1789bfb92a2cd2ab5da87630663f93f8178026da40e0d",
|
||||
"sha256:1e33778277685f6fabb22539136269c87c029e39b6321ef1a639b756a1c0a408",
|
||||
"sha256:2b16be15b1ae656bc7a36642b8c7045be2dde2048bb4b67478003e9d9db8022a",
|
||||
"sha256:3701dfca3ada27ceef0d17f728ce9dfef155ed20c57979c2b05083082258c6c1",
|
||||
"sha256:41912ecaf482abf2de74c69f509878f99223f5dd6b2de1a09c955afd4de3cf9b",
|
||||
"sha256:4332cbd20544fe7406910137590f38b5b3a1f6170258e038652cf478c639430f",
|
||||
"sha256:44068ecbdc6467c2bff4d8198816c8a2701b6dd1ec16078fceb6adc7c1f577d6",
|
||||
"sha256:53115960e37059420e2d16a4b04b00dd2ab3b6c3c67babd01ffbfdcd7881a69b",
|
||||
"sha256:6e7027bcd4070414751e2a5e60706facb98a1fc636497c9bac5442fe37b8ae6b",
|
||||
"sha256:6ff57fb2f07b7226b5bec89e8e921ea9bd220f35f11e094f2ba38f09eecd49c6",
|
||||
"sha256:73240e244d7644654bbda1f309f4911748b6a1804b7a8897ddbe8a04c90f7407",
|
||||
"sha256:785234bbc469bc75e26c868789a2080ffb30bd6e93930167797729889ad06b0b",
|
||||
"sha256:82f9d3c7f91d2d1885631335c003c5d45ae1cd69cc0bc4893f21fef50b8151bc",
|
||||
"sha256:86bdc2a965510658407a1372eb61f0c92f763fdfb2795e4d038944da4320c950",
|
||||
"sha256:95e925b56676a55e6282b3de80a1cbad5774072159779c61eac02791dface049",
|
||||
"sha256:96673bb4f14bd3263613526d1e7e33fdb38a9130e3ce87bf52314965706e1900",
|
||||
"sha256:970014205e76920484679035b6fb4b16e02fc977e5aac4d22025da849c79dab9",
|
||||
"sha256:ace5e8bf11a1571f855f5dab38a9bd34109b6c9bc2864abf24a597598c7e3695",
|
||||
"sha256:ad375f03eb3b9cb75a24d91eab8609e134d34605f199efc41e20dd642bdac855",
|
||||
"sha256:b819c4c7dcf0de76788ce5f95daad6d4e753d6da2b6a5f84e5bb5b5ce95fddc4",
|
||||
"sha256:c17943fd340cbd906db49f3f03c7545e5a66b617e8348b2c7a0d2c759d216af1",
|
||||
"sha256:d21247150dea86dabd3b628d8bc4b563036db3d332b3f4db3c5b1b0b122cb4f6",
|
||||
"sha256:d4d500a7221116de9767229ff5dd10db91f789448d85befb0adf5a37b0cd83b5",
|
||||
"sha256:e2a942a3cfccbbca21a90c144867112698ef36486345c285da9e98c466f22b22",
|
||||
"sha256:e983273dca91cb8a5043bc88322eb48e2b8d4e4998ff441a1ee79ced89db3909"
|
||||
],
|
||||
"version": "==1.23.0"
|
||||
"version": "==1.24.1"
|
||||
},
|
||||
"grpcio-tools": {
|
||||
"hashes": [
|
||||
"sha256:056f2a274edda4315e825ac2e3a9536f5415b43aa51669196860c8de6e76d847",
|
||||
"sha256:0c953251585fdcd422072e4b7f4243fce215f22e21db94ec83c5970e41db6e18",
|
||||
"sha256:142a73f5769f37bf2e4a8e4a77ef60f7af5f55635f60428322b49c87bd8f9cc0",
|
||||
"sha256:1b333e2a068d8ef89a01eb23a098d2a789659c3178de79da9bd3d0ffb944cc6d",
|
||||
"sha256:2124f19cc51d63405a0204ae38ef355732ab0a235975ab41ff6f6f9701905432",
|
||||
"sha256:24c3a04adfb6c6f1bc4a2f8498d7661ca296ae352b498e538832c22ddde7bf81",
|
||||
"sha256:3a2054e9640cbdd0ce8a345afb86be52875c5a8f9f5973a5c64791a8002da2dd",
|
||||
"sha256:3fd15a09eecef83440ac849dcda2ff522f8ee1603ebfcdbb0e9b320ef2012e41",
|
||||
"sha256:457e7a7dfa0b6bb608a766edba6f20c9d626a790df802016b930ad242fec4470",
|
||||
"sha256:49ad5661d54ff0d164e4b441ee5e05191187d497380afa16d36d72eb8ef048de",
|
||||
"sha256:561078e425d21a6720c3c3828385d949e24c0765e2852a46ecc3ad3fca2706e5",
|
||||
"sha256:5a4f65ab06b32dc34112ed114dee3b698c8463670474334ece5b0b531073804c",
|
||||
"sha256:8883e0e34676356d219a4cd37d239c3ead655cc550836236b52068e091416fff",
|
||||
"sha256:8d2b45b1faf81098780e07d6a1c207b328b07e913160b8baa7e2e8d89723e06c",
|
||||
"sha256:b0ebddb6ecc4c161369f93bb3a74c6120a498d3ddc759b64679709a885dd6d4f",
|
||||
"sha256:b786ba4842c50de865dd3885b5570690a743e84a327b7213dd440eb0e6b996f8",
|
||||
"sha256:be8efa010f5a80f1862ead80c3b19b5eb97dc954a0f59a1e2487078576105e03",
|
||||
"sha256:c29106eaff0e2e708a9a89628dc0134ef145d0d3631f0ef421c09f380c30e354",
|
||||
"sha256:c3c71236a056ec961b2b8b3b7c0b3b5a826283bc69c4a1c6415d23b70fea8243",
|
||||
"sha256:cbc35031ec2b29af36947d085a7fbbcd8b79b84d563adf6156103d82565f78db",
|
||||
"sha256:d47307c22744918e803c1eec7263a14f36aaf34fe496bff9ccbcae67c02b40ae",
|
||||
"sha256:db088c98e563c1bb070da5984c8df08b45b61e4d9c6d2a8a1ffeed2af89fd1f3",
|
||||
"sha256:df4dd1cb670062abdacc1fbce41cae4e08a4a212d28dd94fdbbf90615d027f73",
|
||||
"sha256:e3adcf1499ca08d1e036ff44aedf55ed78653d946f4c4426b6e72ab757cc4dec",
|
||||
"sha256:e3b3e32e0cda4dc382ec5bed8599dab644e4b3fc66a9ab54eb58248e207880b9",
|
||||
"sha256:ed524195b35304c670755efa1eca579e5c290a66981f97004a5b2c0d12d6897d",
|
||||
"sha256:edb42432790b1f8ec9f08faf9326d7e5dfe6e1d8c8fe4db39abc0a49c1c76537",
|
||||
"sha256:eff1f995e5aa4cc941b6bbc45b5b57842f8f62bbe1a99427281c2c70cf42312c",
|
||||
"sha256:f2fcdc2669662d77b400f80e20315a3661466e3cb3df1730f8083f9e49465cbc",
|
||||
"sha256:f52ec9926daf48f41389d39d01570967b99c7dbc12bffc134cc3a3c5b5540ba2",
|
||||
"sha256:fd007d67fdfbd2a13bf8a8c8ced8353b42a92ca72dbee54e951d8ddbc6ca12bc",
|
||||
"sha256:ff9045e928dbb7943ea8559bfabebee95a43a830e00bf52c16202d2d805780fb"
|
||||
"sha256:0a849994d7d6411ca6147bb1db042b61ba6232eb5c90c69de5380a441bf80a75",
|
||||
"sha256:0db96ed52816471ceec8807aedf5cb4fd133ca201f614464cb46ca58584edf84",
|
||||
"sha256:1b98720459204e9afa33928e4fd53aeec6598afb7f704ed497f6926c67f12b9b",
|
||||
"sha256:200479310cc083c41a5020f6e5e916a99ee0f7c588b6affe317b96a839120bf4",
|
||||
"sha256:25543b8f2e59ddcc9929d6f6111faa5c474b21580d2996f93347bb55f2ecba84",
|
||||
"sha256:2d4609996616114c155c1e697a9faf604d81f2508cd9a4168a0bafd53c799e24",
|
||||
"sha256:2fdb2a1ed2b3e43514d9c29c9de415c953a46caabbc8a9b7de1439a0c1bd3b89",
|
||||
"sha256:3886a7983d8ae19df0c11a54114d6546fcdf76cf18cdccf25c3b14200fd5478a",
|
||||
"sha256:408d111b9341f107bdafc523e2345471547ffe8a4104e6f2ce690b7a25c4bae5",
|
||||
"sha256:60b3dd5e76c1389fc836bf83675985b92d158ff9a8d3d6d3f0a670f0c227ef13",
|
||||
"sha256:629be7ce8504530b4adbf0425a44dd53007ccb6212344804294888c9662cc38f",
|
||||
"sha256:6af3dde07b1051e954230e650a6ef74073cf993cf473c2078580f8a73c4fe46a",
|
||||
"sha256:7a1e77539d28e90517c55561f40f7872f1348d0e23f25a38d68abbfb5b0eff88",
|
||||
"sha256:87917a18b3b5951b6c9badd7b5ef09f63f61611966b58427b856bdf5c1d68e91",
|
||||
"sha256:8823d0ebd185a77edb506e286c88d06847f75620a033ad96ef9c0fd7efc1d859",
|
||||
"sha256:8bd3e12e1969beb813b861a2a65d4f2d4faaa87de0b60bf7f848da2d8ffc4eb2",
|
||||
"sha256:8f37e9acc46e75ed9786ece89afeacd86182893eacc3f0642d81531b90fbe25f",
|
||||
"sha256:9b358dd2f4142e89d760a52a7a8f4ec5dbaf955e7ada09f703f3a5d05dddd12e",
|
||||
"sha256:9cb43007c4a8aa7adaacf896f5109b578028f23d259615e3fa5866e38855b311",
|
||||
"sha256:9cf594bfbfbf84dcd462b20a4a753362be7ed376d2b5020a083dac24400b7b6c",
|
||||
"sha256:ab79940e5c5ed949e1f95e7f417dd916b0992d29f45d073dd64501a76d128e2c",
|
||||
"sha256:ba8aab6c78a82755477bb8c79f3be0824b297422d1edb21b94ae5a45407bf3ba",
|
||||
"sha256:bcc00b83bf39f6e60a13f0b24ec3951f4d2ae810b01e6e125b7ff238a85da1ac",
|
||||
"sha256:c1fcf5cbe6a2ecdc587b469156520b9128ccdb7c5908060c7d9712cd97e76db5",
|
||||
"sha256:c6e640d39b9615388b59036b29970292b15f4519043e43833e28c674f740d1f7",
|
||||
"sha256:c6ea2c385da620049b17f0135cf9307a4750e9d9c9988e15bfeeaf1f209c4ada",
|
||||
"sha256:cec4f37120f93fe2ab4ab9a7eab9a877163d74c232c93a275a624971f8557b81",
|
||||
"sha256:d2dbb42d237bcdecb7284535ec074c85bbf880124c1cbbff362ed3bd81ed7d41",
|
||||
"sha256:d5c98a41abd4f7de43b256c21bbba2a97c57e25bf6a170927a90638b18f7509c",
|
||||
"sha256:dcf5965a24179aa7dcfa00b5ff70f4f2f202e663657e0c74a642307beecda053",
|
||||
"sha256:e11e3aacf0200d6e00a9b74534e0174738768fe1c41e5aa2f4aab881d6b43afd",
|
||||
"sha256:e550816bdb2e49bba94bcd7f342004a8adbc46e9a25c8c4ed3fd58f2435c655f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.23.0"
|
||||
"version": "==1.24.1"
|
||||
},
|
||||
"identify": {
|
||||
"hashes": [
|
||||
|
@ -262,11 +399,11 @@
|
|||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:652234b6ab8f2506ae58e528b6fbcc668831d3cc758e1bc01ef438d328b68cdb",
|
||||
"sha256:6f264986fb88042bc1f0535fa9a557e6a376cfe5679dc77caac7fe8b5d43d05f"
|
||||
"sha256:aa18d7378b00b40847790e7c27e11673d7fed219354109d0e7b9e5b25dc3ad26",
|
||||
"sha256:d5f18a79777f3aa179c145737780282e27b508fc8fd688cb17c7a813e8bd39af"
|
||||
],
|
||||
"markers": "python_version < '3.8'",
|
||||
"version": "==0.22"
|
||||
"version": "==0.23"
|
||||
},
|
||||
"importlib-resources": {
|
||||
"hashes": [
|
||||
|
@ -314,10 +451,10 @@
|
|||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:a7ac867b97fdc07ee80a8058fe4435ccd274ecc3b0ed61d852d7d53055528cf9",
|
||||
"sha256:c491ca87294da7cc01902edbe30a5bc6c4c28172b5138ab4e4aa1b9d7bfaeafe"
|
||||
"sha256:28b924174df7a2fa32c1953825ff29c61e2f5e082343165438812f00d3a7fc47",
|
||||
"sha256:d9551545c6d761f3def1677baf08ab2a3ca17c56879e70fecba2fc4dde4ed108"
|
||||
],
|
||||
"version": "==19.1"
|
||||
"version": "==19.2"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
|
@ -336,24 +473,24 @@
|
|||
},
|
||||
"protobuf": {
|
||||
"hashes": [
|
||||
"sha256:00a1b0b352dc7c809749526d1688a64b62ea400c5b05416f93cfb1b11a036295",
|
||||
"sha256:01acbca2d2c8c3f7f235f1842440adbe01bbc379fa1cbdd80753801432b3fae9",
|
||||
"sha256:0a795bca65987b62d6b8a2d934aa317fd1a4d06a6dd4df36312f5b0ade44a8d9",
|
||||
"sha256:0ec035114213b6d6e7713987a759d762dd94e9f82284515b3b7331f34bfaec7f",
|
||||
"sha256:31b18e1434b4907cb0113e7a372cd4d92c047ce7ba0fa7ea66a404d6388ed2c1",
|
||||
"sha256:32a3abf79b0bef073c70656e86d5bd68a28a1fbb138429912c4fc07b9d426b07",
|
||||
"sha256:55f85b7808766e5e3f526818f5e2aeb5ba2edcc45bcccede46a3ccc19b569cb0",
|
||||
"sha256:64ab9bc971989cbdd648c102a96253fdf0202b0c38f15bd34759a8707bdd5f64",
|
||||
"sha256:64cf847e843a465b6c1ba90fb6c7f7844d54dbe9eb731e86a60981d03f5b2e6e",
|
||||
"sha256:917c8662b585470e8fd42f052661fc66d59fccaae450a60044307dcbf82a3335",
|
||||
"sha256:afed9003d7f2be2c3df20f64220c30faec441073731511728a2cb4cab4cd46a6",
|
||||
"sha256:bf8e05d638b585d1752c5a84247134a0350d3a8b73d3632489a014a9f6f1e758",
|
||||
"sha256:d831b047bd69becaf64019a47179eb22118a50dd008340655266a906c69c6417",
|
||||
"sha256:de2760583ed28749ff885789c1cbc6c9c06d6de92fc825740ab99deb2f25ea4d",
|
||||
"sha256:eabc4cf1bc19689af8022ba52fd668564a8d96e0d08f3b4732d26a64255216a4",
|
||||
"sha256:fcff6086c86fb1628d94ea455c7b9de898afc50378042927a59df8065a79a549"
|
||||
"sha256:125713564d8cfed7610e52444c9769b8dcb0b55e25cc7841f2290ee7bc86636f",
|
||||
"sha256:1accdb7a47e51503be64d9a57543964ba674edac103215576399d2d0e34eac77",
|
||||
"sha256:27003d12d4f68e3cbea9eb67427cab3bfddd47ff90670cb367fcd7a3a89b9657",
|
||||
"sha256:3264f3c431a631b0b31e9db2ae8c927b79fc1a7b1b06b31e8e5bcf2af91fe896",
|
||||
"sha256:3c5ab0f5c71ca5af27143e60613729e3488bb45f6d3f143dc918a20af8bab0bf",
|
||||
"sha256:45dcf8758873e3f69feab075e5f3177270739f146255225474ee0b90429adef6",
|
||||
"sha256:56a77d61a91186cc5676d8e11b36a5feb513873e4ae88d2ee5cf530d52bbcd3b",
|
||||
"sha256:5984e4947bbcef5bd849d6244aec507d31786f2dd3344139adc1489fb403b300",
|
||||
"sha256:6b0441da73796dd00821763bb4119674eaf252776beb50ae3883bed179a60b2a",
|
||||
"sha256:6f6677c5ade94d4fe75a912926d6796d5c71a2a90c2aeefe0d6f211d75c74789",
|
||||
"sha256:84a825a9418d7196e2acc48f8746cf1ee75877ed2f30433ab92a133f3eaf8fbe",
|
||||
"sha256:b842c34fe043ccf78b4a6cf1019d7b80113707d68c88842d061fa2b8fb6ddedc",
|
||||
"sha256:ca33d2f09dae149a1dcf942d2d825ebb06343b77b437198c9e2ef115cf5d5bc1",
|
||||
"sha256:db83b5c12c0cd30150bb568e6feb2435c49ce4e68fe2d7b903113f0e221e58fe",
|
||||
"sha256:f50f3b1c5c1c1334ca7ce9cad5992f098f460ffd6388a3cabad10b66c2006b09",
|
||||
"sha256:f99f127909731cafb841c52f9216e447d3e4afb99b17bebfad327a75aee206de"
|
||||
],
|
||||
"version": "==3.9.1"
|
||||
"version": "==3.10.0"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
|
@ -385,11 +522,11 @@
|
|||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:95d13143cc14174ca1a01ec68e84d76ba5d9d493ac02716fd9706c949a505210",
|
||||
"sha256:b78fe2881323bd44fd9bd76e5317173d4316577e7b1cddebae9136a4495ec865"
|
||||
"sha256:7e4800063ccfc306a53c461442526c5571e1462f61583506ce97e4da6a1d88c8",
|
||||
"sha256:ca563435f4941d0cb34767301c27bc65c510cb82e90b9ecf9cb52dc2c63caaa0"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.1.2"
|
||||
"version": "==5.2.1"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
|
@ -425,10 +562,10 @@
|
|||
},
|
||||
"virtualenv": {
|
||||
"hashes": [
|
||||
"sha256:680af46846662bb38c5504b78bad9ed9e4f3ba2d54f54ba42494fdf94337fe30",
|
||||
"sha256:f78d81b62d3147396ac33fc9d77579ddc42cc2a98dd9ea38886f616b33bc7fb2"
|
||||
"sha256:3e3597e89c73df9313f5566e8fc582bd7037938d15b05329c232ec57a11a7ad5",
|
||||
"sha256:5d370508bf32e522d79096e8cbea3499d47e624ac7e11e9089f9397a0b3318df"
|
||||
],
|
||||
"version": "==16.7.5"
|
||||
"version": "==16.7.6"
|
||||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
|
|
|
@ -1,41 +1,7 @@
|
|||
import json
|
||||
import logging.config
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from core import constants
|
||||
|
||||
# setup default null handler
|
||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||
|
||||
|
||||
def load_logging_config(config_path=None):
|
||||
"""
|
||||
Load CORE logging configuration file.
|
||||
|
||||
:param str config_path: path to logging config file,
|
||||
when None defaults to /etc/core/logging.conf
|
||||
:return: nothing
|
||||
"""
|
||||
if not config_path:
|
||||
config_path = os.path.join(constants.CORE_CONF_DIR, "logging.conf")
|
||||
with open(config_path, "r") as log_config_file:
|
||||
log_config = json.load(log_config_file)
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
|
||||
class CoreCommandError(subprocess.CalledProcessError):
|
||||
"""
|
||||
Used when encountering internal CORE command errors.
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "Command(%s), Status(%s):\n%s" % (self.cmd, self.returncode, self.output)
|
||||
|
||||
|
||||
class CoreError(Exception):
|
||||
"""
|
||||
Used for errors when dealing with CoreEmu and Sessions.
|
||||
"""
|
||||
|
||||
pass
|
||||
# disable paramiko logging
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
|
|
|
@ -276,6 +276,22 @@ class CoreGrpcClient(object):
|
|||
request = core_pb2.SetSessionStateRequest(session_id=session_id, state=state)
|
||||
return self.stub.SetSessionState(request)
|
||||
|
||||
def add_session_server(self, session_id, name, host):
|
||||
"""
|
||||
Add distributed session server.
|
||||
|
||||
:param int session_id: id of session
|
||||
:param str name: name of server to add
|
||||
:param str host: host address to connect to
|
||||
:return: response with result of success or failure
|
||||
:rtype: core_pb2.AddSessionServerResponse
|
||||
:raises grpc.RpcError: when session doesn't exist
|
||||
"""
|
||||
request = core_pb2.AddSessionServerRequest(
|
||||
session_id=session_id, name=name, host=host
|
||||
)
|
||||
return self.stub.AddSessionServer(request)
|
||||
|
||||
def events(self, session_id, handler):
|
||||
"""
|
||||
Listen for session events.
|
||||
|
@ -326,19 +342,20 @@ class CoreGrpcClient(object):
|
|||
request = core_pb2.GetNodeRequest(session_id=session_id, node_id=node_id)
|
||||
return self.stub.GetNode(request)
|
||||
|
||||
def edit_node(self, session_id, node_id, position):
|
||||
def edit_node(self, session_id, node_id, position, icon=None):
|
||||
"""
|
||||
Edit a node, currently only changes position.
|
||||
|
||||
:param int session_id: session id
|
||||
:param int node_id: node id
|
||||
:param core_pb2.Position position: position to set node to
|
||||
:param str icon: path to icon for gui to use for node
|
||||
:return: response with result of success or failure
|
||||
:rtype: core_pb2.EditNodeResponse
|
||||
:raises grpc.RpcError: when session or node doesn't exist
|
||||
"""
|
||||
request = core_pb2.EditNodeRequest(
|
||||
session_id=session_id, node_id=node_id, position=position
|
||||
session_id=session_id, node_id=node_id, position=position, icon=icon
|
||||
)
|
||||
return self.stub.EditNode(request)
|
||||
|
||||
|
@ -864,6 +881,21 @@ class CoreGrpcClient(object):
|
|||
request = core_pb2.OpenXmlRequest(data=data)
|
||||
return self.stub.OpenXml(request)
|
||||
|
||||
def emane_link(self, session_id, nem_one, nem_two, linked):
|
||||
"""
|
||||
Helps broadcast wireless link/unlink between EMANE nodes.
|
||||
|
||||
:param int session_id: session id
|
||||
:param int nem_one:
|
||||
:param int nem_two:
|
||||
:param bool linked: True to link, False to unlink
|
||||
:return: core_pb2.EmaneLinkResponse
|
||||
"""
|
||||
request = core_pb2.EmaneLinkRequest(
|
||||
session_id=session_id, nem_one=nem_one, nem_two=nem_two, linked=linked
|
||||
)
|
||||
return self.stub.EmaneLink(request)
|
||||
|
||||
def connect(self):
|
||||
"""
|
||||
Open connection to server, must be closed manually.
|
||||
|
|
|
@ -4,15 +4,13 @@ import os
|
|||
import re
|
||||
import tempfile
|
||||
import time
|
||||
from builtins import int
|
||||
from concurrent import futures
|
||||
from queue import Empty, Queue
|
||||
|
||||
import grpc
|
||||
|
||||
from core import CoreError
|
||||
from core.api.grpc import core_pb2, core_pb2_grpc
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.data import (
|
||||
ConfigData,
|
||||
EventData,
|
||||
|
@ -22,7 +20,8 @@ from core.emulator.data import (
|
|||
NodeData,
|
||||
)
|
||||
from core.emulator.emudata import InterfaceData, LinkOptions, NodeOptions
|
||||
from core.emulator.enumerations import EventTypes, LinkTypes, NodeTypes
|
||||
from core.emulator.enumerations import EventTypes, LinkTypes, MessageFlags, NodeTypes
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility
|
||||
from core.nodes.base import CoreNetworkBase
|
||||
from core.nodes.docker import DockerNode
|
||||
|
@ -248,9 +247,7 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
"""
|
||||
session = self.coreemu.sessions.get(session_id)
|
||||
if not session:
|
||||
context.abort(
|
||||
grpc.StatusCode.NOT_FOUND, "session {} not found".format(session_id)
|
||||
)
|
||||
context.abort(grpc.StatusCode.NOT_FOUND, f"session {session_id} not found")
|
||||
return session
|
||||
|
||||
def get_node(self, session, node_id, context):
|
||||
|
@ -266,9 +263,7 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
try:
|
||||
return session.get_node(node_id)
|
||||
except CoreError:
|
||||
context.abort(
|
||||
grpc.StatusCode.NOT_FOUND, "node {} not found".format(node_id)
|
||||
)
|
||||
context.abort(grpc.StatusCode.NOT_FOUND, f"node {node_id} not found")
|
||||
|
||||
def CreateSession(self, request, context):
|
||||
"""
|
||||
|
@ -456,7 +451,7 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
services = [x.name for x in services]
|
||||
|
||||
emane_model = None
|
||||
if isinstance(node, EmaneNode):
|
||||
if isinstance(node, EmaneNet):
|
||||
emane_model = node.model.name
|
||||
|
||||
node_proto = core_pb2.Node(
|
||||
|
@ -478,6 +473,20 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
session_proto = core_pb2.Session(state=session.state, nodes=nodes, links=links)
|
||||
return core_pb2.GetSessionResponse(session=session_proto)
|
||||
|
||||
def AddSessionServer(self, request, context):
|
||||
"""
|
||||
Add distributed server to a session.
|
||||
|
||||
:param core.api.grpc.core_pb2.AddSessionServerRequest request: get-session
|
||||
request
|
||||
:param grpc.ServicerContext context: context object
|
||||
:return: add session server response
|
||||
:rtype: core.api.grpc.core_bp2.AddSessionServerResponse
|
||||
"""
|
||||
session = self.get_session(request.session_id, context)
|
||||
session.distributed.add_server(request.name, request.host)
|
||||
return core_pb2.AddSessionServerResponse(result=True)
|
||||
|
||||
def Events(self, request, context):
|
||||
session = self.get_session(request.session_id, context)
|
||||
queue = Queue()
|
||||
|
@ -766,6 +775,8 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
node_options.opaque = node_proto.opaque
|
||||
node_options.image = node_proto.image
|
||||
node_options.services = node_proto.services
|
||||
if node_proto.server:
|
||||
node_options.emulation_server = node_proto.server
|
||||
|
||||
position = node_proto.position
|
||||
node_options.set_position(position.x, position.y)
|
||||
|
@ -809,10 +820,13 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
interfaces.append(interface_proto)
|
||||
|
||||
emane_model = None
|
||||
if isinstance(node, EmaneNode):
|
||||
if isinstance(node, EmaneNet):
|
||||
emane_model = node.model.name
|
||||
|
||||
services = [x.name for x in getattr(node, "services", [])]
|
||||
services = []
|
||||
if node.services:
|
||||
services = [x.name for x in node.services]
|
||||
|
||||
position = core_pb2.Position(
|
||||
x=node.position.x, y=node.position.y, z=node.position.z
|
||||
)
|
||||
|
@ -842,8 +856,9 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
"""
|
||||
logging.debug("edit node: %s", request)
|
||||
session = self.get_session(request.session_id, context)
|
||||
node_id = request.node_id
|
||||
node = self.get_node(session, request.node_id, context)
|
||||
node_options = NodeOptions()
|
||||
node_options.icon = request.icon
|
||||
x = request.position.x
|
||||
y = request.position.y
|
||||
node_options.set_position(x, y)
|
||||
|
@ -853,7 +868,9 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
node_options.set_location(lat, lon, alt)
|
||||
result = True
|
||||
try:
|
||||
session.update_node(node_id, node_options)
|
||||
session.update_node(node.id, node_options)
|
||||
node_data = node.data(0)
|
||||
session.broadcast_node(node_data)
|
||||
except CoreError:
|
||||
result = False
|
||||
return core_pb2.EditNodeResponse(result=result)
|
||||
|
@ -882,7 +899,10 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
logging.debug("sending node command: %s", request)
|
||||
session = self.get_session(request.session_id, context)
|
||||
node = self.get_node(session, request.node_id, context)
|
||||
_, output = node.cmd_output(request.command)
|
||||
try:
|
||||
output = node.cmd(request.command)
|
||||
except CoreCommandError as e:
|
||||
output = e.stderr
|
||||
return core_pb2.NodeCommandResponse(output=output)
|
||||
|
||||
def GetNodeTerminal(self, request, context):
|
||||
|
@ -1557,3 +1577,43 @@ class CoreGrpcServer(core_pb2_grpc.CoreApiServicer):
|
|||
continue
|
||||
interfaces.append(interface)
|
||||
return core_pb2.GetInterfacesResponse(interfaces=interfaces)
|
||||
|
||||
def EmaneLink(self, request, context):
|
||||
"""
|
||||
Helps broadcast wireless link/unlink between EMANE nodes.
|
||||
|
||||
:param core.api.grpc.core_pb2.EmaneLinkRequest request: get-interfaces request
|
||||
:param grpc.ServicerContext context: context object
|
||||
:return: emane link response with success status
|
||||
:rtype: core.api.grpc.core_pb2.EmaneLinkResponse
|
||||
"""
|
||||
logging.debug("emane link: %s", request)
|
||||
session = self.get_session(request.session_id, context)
|
||||
nem_one = request.nem_one
|
||||
emane_one, netif = session.emane.nemlookup(nem_one)
|
||||
if not emane_one or not netif:
|
||||
context.abort(grpc.StatusCode.NOT_FOUND, f"nem one {nem_one} not found")
|
||||
node_one = netif.node
|
||||
|
||||
nem_two = request.nem_two
|
||||
emane_two, netif = session.emane.nemlookup(nem_two)
|
||||
if not emane_two or not netif:
|
||||
context.abort(grpc.StatusCode.NOT_FOUND, f"nem two {nem_two} not found")
|
||||
node_two = netif.node
|
||||
|
||||
if emane_one.id == emane_two.id:
|
||||
if request.linked:
|
||||
flag = MessageFlags.ADD.value
|
||||
else:
|
||||
flag = MessageFlags.DELETE.value
|
||||
link = LinkData(
|
||||
message_type=flag,
|
||||
link_type=LinkTypes.WIRELESS.value,
|
||||
node1_id=node_one.id,
|
||||
node2_id=node_two.id,
|
||||
network_id=emane_one.id,
|
||||
)
|
||||
session.broadcast_link(link)
|
||||
return core_pb2.EmaneLinkResponse(result=True)
|
||||
else:
|
||||
return core_pb2.EmaneLinkResponse(result=False)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -9,13 +9,10 @@ import socket
|
|||
import struct
|
||||
from enum import Enum
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from core.api.tlv import structutils
|
||||
from core.emulator.enumerations import (
|
||||
ConfigTlvs,
|
||||
EventTlvs,
|
||||
EventTypes,
|
||||
ExceptionTlvs,
|
||||
ExecuteTlvs,
|
||||
FileTlvs,
|
||||
|
@ -182,8 +179,8 @@ class CoreTlvDataString(CoreTlvData):
|
|||
:return: length of data packed and the packed data
|
||||
:rtype: tuple
|
||||
"""
|
||||
if not isinstance(value, basestring):
|
||||
raise ValueError("value not a string: %s" % type(value))
|
||||
if not isinstance(value, str):
|
||||
raise ValueError(f"value not a string: {type(value)}")
|
||||
value = value.encode("utf-8")
|
||||
|
||||
if len(value) < 256:
|
||||
|
@ -223,7 +220,7 @@ class CoreTlvDataUint16List(CoreTlvData):
|
|||
:rtype: tuple
|
||||
"""
|
||||
if not isinstance(values, tuple):
|
||||
raise ValueError("value not a tuple: %s" % values)
|
||||
raise ValueError(f"value not a tuple: {values}")
|
||||
|
||||
data = b""
|
||||
for value in values:
|
||||
|
@ -240,7 +237,8 @@ class CoreTlvDataUint16List(CoreTlvData):
|
|||
:param data: data to unpack
|
||||
:return: unpacked data
|
||||
"""
|
||||
data_format = "!%dH" % (len(data) / 2)
|
||||
size = int(len(data) / 2)
|
||||
data_format = f"!{size}H"
|
||||
return struct.unpack(data_format, data)
|
||||
|
||||
@classmethod
|
||||
|
@ -438,7 +436,7 @@ class CoreTlv(object):
|
|||
try:
|
||||
return self.tlv_type_map(self.tlv_type).name
|
||||
except ValueError:
|
||||
return "unknown tlv type: %s" % str(self.tlv_type)
|
||||
return f"unknown tlv type: {self.tlv_type}"
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
|
@ -447,11 +445,7 @@ class CoreTlv(object):
|
|||
:return: string representation
|
||||
:rtype: str
|
||||
"""
|
||||
return "%s <tlvtype = %s, value = %s>" % (
|
||||
self.__class__.__name__,
|
||||
self.type_str(),
|
||||
self.value,
|
||||
)
|
||||
return f"{self.__class__.__name__} <tlvtype = {self.type_str()}, value = {self.value}>"
|
||||
|
||||
|
||||
class CoreNodeTlv(CoreTlv):
|
||||
|
@ -737,7 +731,7 @@ class CoreMessage(object):
|
|||
:return: nothing
|
||||
"""
|
||||
if key in self.tlv_data:
|
||||
raise KeyError("key already exists: %s (val=%s)" % (key, value))
|
||||
raise KeyError(f"key already exists: {key} (val={value})")
|
||||
|
||||
self.tlv_data[key] = value
|
||||
|
||||
|
@ -796,7 +790,7 @@ class CoreMessage(object):
|
|||
try:
|
||||
return MessageTypes(self.message_type).name
|
||||
except ValueError:
|
||||
return "unknown message type: %s" % str(self.message_type)
|
||||
return f"unknown message type: {self.message_type}"
|
||||
|
||||
def flag_str(self):
|
||||
"""
|
||||
|
@ -813,12 +807,13 @@ class CoreMessage(object):
|
|||
try:
|
||||
message_flags.append(self.flag_map(flag).name)
|
||||
except ValueError:
|
||||
message_flags.append("0x%x" % flag)
|
||||
message_flags.append(f"0x{flag:x}")
|
||||
flag <<= 1
|
||||
if not (self.flags & ~(flag - 1)):
|
||||
break
|
||||
|
||||
return "0x%x <%s>" % (self.flags, " | ".join(message_flags))
|
||||
message_flags = " | ".join(message_flags)
|
||||
return f"0x{self.flags:x} <{message_flags}>"
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
|
@ -827,20 +822,16 @@ class CoreMessage(object):
|
|||
:return: string representation
|
||||
:rtype: str
|
||||
"""
|
||||
result = "%s <msgtype = %s, flags = %s>" % (
|
||||
self.__class__.__name__,
|
||||
self.type_str(),
|
||||
self.flag_str(),
|
||||
)
|
||||
result = f"{self.__class__.__name__} <msgtype = {self.type_str()}, flags = {self.flag_str()}>"
|
||||
|
||||
for key in self.tlv_data:
|
||||
value = self.tlv_data[key]
|
||||
try:
|
||||
tlv_type = self.tlv_class.tlv_type_map(key).name
|
||||
except ValueError:
|
||||
tlv_type = "tlv type %s" % key
|
||||
tlv_type = f"tlv type {key}"
|
||||
|
||||
result += "\n %s: %s" % (tlv_type, value)
|
||||
result += f"\n {tlv_type}: {value}"
|
||||
|
||||
return result
|
||||
|
||||
|
@ -1017,20 +1008,3 @@ def str_to_list(value):
|
|||
return None
|
||||
|
||||
return value.split("|")
|
||||
|
||||
|
||||
def state_name(value):
|
||||
"""
|
||||
Helper to convert state number into state name using event types.
|
||||
|
||||
:param int value: state value to derive name from
|
||||
:return: state name
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
try:
|
||||
value = EventTypes(value).name
|
||||
except ValueError:
|
||||
value = "unknown"
|
||||
|
||||
return value
|
||||
|
|
|
@ -10,11 +10,10 @@ import socketserver
|
|||
import sys
|
||||
import threading
|
||||
import time
|
||||
from builtins import range
|
||||
from itertools import repeat
|
||||
from queue import Empty, Queue
|
||||
|
||||
from core import CoreError, utils
|
||||
from core import utils
|
||||
from core.api.tlv import coreapi, dataconversion, structutils
|
||||
from core.config import ConfigShim
|
||||
from core.emulator.data import ConfigData, EventData, ExceptionData, FileData
|
||||
|
@ -37,6 +36,7 @@ from core.emulator.enumerations import (
|
|||
RegisterTlvs,
|
||||
SessionTlvs,
|
||||
)
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.location.mobility import BasicRangeModel
|
||||
from core.nodes.network import WlanNode
|
||||
from core.services.coreservices import ServiceManager, ServiceShim
|
||||
|
@ -75,7 +75,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
self.handler_threads = []
|
||||
num_threads = int(server.config["numthreads"])
|
||||
if num_threads < 1:
|
||||
raise ValueError("invalid number of threads: %s" % num_threads)
|
||||
raise ValueError(f"invalid number of threads: {num_threads}")
|
||||
|
||||
logging.debug("launching core server handler threads: %s", num_threads)
|
||||
for _ in range(num_threads):
|
||||
|
@ -85,6 +85,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
|
||||
self.master = False
|
||||
self.session = None
|
||||
self.session_clients = {}
|
||||
|
||||
# core emulator
|
||||
self.coreemu = server.coreemu
|
||||
|
@ -137,8 +138,9 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
if self.session:
|
||||
# remove client from session broker and shutdown if there are no clients
|
||||
self.remove_session_handlers()
|
||||
self.session.broker.session_clients.remove(self)
|
||||
if not self.session.broker.session_clients and not self.session.is_active():
|
||||
clients = self.session_clients[self.session.id]
|
||||
clients.remove(self)
|
||||
if not clients and not self.session.is_active():
|
||||
logging.info(
|
||||
"no session clients left and not active, initiating shutdown"
|
||||
)
|
||||
|
@ -406,9 +408,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
tlv_data += coreapi.CoreRegisterTlv.pack(
|
||||
RegisterTlvs.EMULATION_SERVER.value, "core-daemon"
|
||||
)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(
|
||||
self.session.broker.config_type, self.session.broker.name
|
||||
)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(RegisterTlvs.UTILITY.value, "broker")
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(
|
||||
self.session.location.config_type, self.session.location.name
|
||||
)
|
||||
|
@ -460,7 +460,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
try:
|
||||
header = self.request.recv(coreapi.CoreMessage.header_len)
|
||||
except IOError as e:
|
||||
raise IOError("error receiving header (%s)" % e)
|
||||
raise IOError(f"error receiving header ({e})")
|
||||
|
||||
if len(header) != coreapi.CoreMessage.header_len:
|
||||
if len(header) == 0:
|
||||
|
@ -478,10 +478,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
while len(data) < message_len:
|
||||
data += self.request.recv(message_len - len(data))
|
||||
if len(data) > message_len:
|
||||
error_message = (
|
||||
"received message length does not match received data (%s != %s)"
|
||||
% (len(data), message_len)
|
||||
)
|
||||
error_message = f"received message length does not match received data ({len(data)} != {message_len})"
|
||||
logging.error(error_message)
|
||||
raise IOError(error_message)
|
||||
|
||||
|
@ -532,10 +529,6 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
:param message: message to handle
|
||||
:return: nothing
|
||||
"""
|
||||
if self.session and self.session.broker.handle_message(message):
|
||||
logging.debug("message not being handled locally")
|
||||
return
|
||||
|
||||
logging.debug(
|
||||
"%s handling message:\n%s", threading.currentThread().getName(), message
|
||||
)
|
||||
|
@ -577,11 +570,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
)
|
||||
except KeyError:
|
||||
# multiple TLVs of same type cause KeyError exception
|
||||
reply_message = "CoreMessage (type %d flags %d length %d)" % (
|
||||
message_type,
|
||||
message_flags,
|
||||
message_length,
|
||||
)
|
||||
reply_message = f"CoreMessage (type {message_type} flags {message_flags} length {message_length})"
|
||||
|
||||
logging.debug("sending reply:\n%s", reply_message)
|
||||
|
||||
|
@ -605,12 +594,11 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
self.session = self.coreemu.create_session(port, master=False)
|
||||
logging.debug("created new session for client: %s", self.session.id)
|
||||
|
||||
# TODO: hack to associate this handler with this sessions broker for broadcasting
|
||||
# TODO: broker needs to be pulled out of session to the server/handler level
|
||||
if self.master:
|
||||
logging.debug("session set to master")
|
||||
self.session.master = True
|
||||
self.session.broker.session_clients.append(self)
|
||||
clients = self.session_clients.setdefault(self.session.id, [])
|
||||
clients.append(self)
|
||||
|
||||
# add handlers for various data
|
||||
self.add_session_handlers()
|
||||
|
@ -642,7 +630,8 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
]:
|
||||
continue
|
||||
|
||||
for client in self.session.broker.session_clients:
|
||||
clients = self.session_clients[self.session.id]
|
||||
for client in clients:
|
||||
if client == self:
|
||||
continue
|
||||
|
||||
|
@ -733,6 +722,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
node_options.icon = message.get_tlv(NodeTlvs.ICON.value)
|
||||
node_options.canvas = message.get_tlv(NodeTlvs.CANVAS.value)
|
||||
node_options.opaque = message.get_tlv(NodeTlvs.OPAQUE.value)
|
||||
node_options.emulation_server = message.get_tlv(NodeTlvs.EMULATION_SERVER.value)
|
||||
|
||||
services = message.get_tlv(NodeTlvs.SERVICES.value)
|
||||
if services:
|
||||
|
@ -886,11 +876,20 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
message.flags & MessageFlags.STRING.value
|
||||
or message.flags & MessageFlags.TEXT.value
|
||||
):
|
||||
# shlex.split() handles quotes within the string
|
||||
if message.flags & MessageFlags.LOCAL.value:
|
||||
status, res = utils.cmd_output(command)
|
||||
try:
|
||||
res = utils.cmd(command)
|
||||
status = 0
|
||||
except CoreCommandError as e:
|
||||
res = e.stderr
|
||||
status = e.returncode
|
||||
else:
|
||||
status, res = node.cmd_output(command)
|
||||
try:
|
||||
res = node.cmd(command)
|
||||
status = 0
|
||||
except CoreCommandError as e:
|
||||
res = e.stderr
|
||||
status = e.returncode
|
||||
logging.info(
|
||||
"done exec cmd=%s with status=%d res=(%d bytes)",
|
||||
command,
|
||||
|
@ -993,7 +992,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
RegisterTlvs.EXECUTE_SERVER.value, execute_server
|
||||
)
|
||||
tlv_data += coreapi.CoreRegisterTlv.pack(
|
||||
RegisterTlvs.SESSION.value, "%s" % sid
|
||||
RegisterTlvs.SESSION.value, str(sid)
|
||||
)
|
||||
message = coreapi.CoreRegMessage.pack(0, tlv_data)
|
||||
replies.append(message)
|
||||
|
@ -1017,8 +1016,9 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
|
||||
# find the session containing this client and set the session to master
|
||||
for _id in self.coreemu.sessions:
|
||||
session = self.coreemu.sessions[_id]
|
||||
if self in session.broker.session_clients:
|
||||
clients = self.session_clients.get(_id, [])
|
||||
if self in clients:
|
||||
session = self.coreemu.sessions[_id]
|
||||
logging.debug("setting session to master: %s", session.id)
|
||||
session.master = True
|
||||
break
|
||||
|
@ -1067,7 +1067,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
self.handle_config_location(message_type, config_data)
|
||||
elif config_data.object == self.session.metadata.name:
|
||||
replies = self.handle_config_metadata(message_type, config_data)
|
||||
elif config_data.object == self.session.broker.name:
|
||||
elif config_data.object == "broker":
|
||||
self.handle_config_broker(message_type, config_data)
|
||||
elif config_data.object == self.session.services.name:
|
||||
replies = self.handle_config_services(message_type, config_data)
|
||||
|
@ -1097,7 +1097,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
self.session.mobility.config_reset(node_id)
|
||||
self.session.emane.config_reset(node_id)
|
||||
else:
|
||||
raise Exception("cant handle config all: %s" % message_type)
|
||||
raise Exception(f"cant handle config all: {message_type}")
|
||||
|
||||
return replies
|
||||
|
||||
|
@ -1151,7 +1151,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
if metadata_configs is None:
|
||||
metadata_configs = {}
|
||||
data_values = "|".join(
|
||||
["%s=%s" % (x, metadata_configs[x]) for x in metadata_configs]
|
||||
[f"{x}={metadata_configs[x]}" for x in metadata_configs]
|
||||
)
|
||||
data_types = tuple(ConfigDataTypes.STRING.value for _ in metadata_configs)
|
||||
config_response = ConfigData(
|
||||
|
@ -1172,7 +1172,6 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
|
||||
def handle_config_broker(self, message_type, config_data):
|
||||
if message_type not in [ConfigFlags.REQUEST, ConfigFlags.RESET]:
|
||||
session_id = config_data.session
|
||||
if not config_data.data_values:
|
||||
logging.info("emulation server data missing")
|
||||
else:
|
||||
|
@ -1184,29 +1183,10 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
|
||||
for server in server_list:
|
||||
server_items = server.split(":")
|
||||
name, host, port = server_items[:3]
|
||||
|
||||
if host == "":
|
||||
host = None
|
||||
|
||||
if port == "":
|
||||
port = None
|
||||
else:
|
||||
port = int(port)
|
||||
|
||||
if session_id is not None:
|
||||
# receive session ID and my IP from master
|
||||
self.session.broker.session_id_master = int(
|
||||
session_id.split("|")[0]
|
||||
)
|
||||
self.session.broker.myip = host
|
||||
host = None
|
||||
port = None
|
||||
|
||||
# this connects to the server immediately; maybe we should wait
|
||||
# or spin off a new "client" thread here
|
||||
self.session.broker.addserver(name, host, port)
|
||||
self.session.broker.setupserver(name)
|
||||
name, host, _ = server_items[:3]
|
||||
self.session.distributed.add_server(name, host)
|
||||
elif message_type == ConfigFlags.RESET:
|
||||
self.session.distributed.shutdown()
|
||||
|
||||
def handle_config_services(self, message_type, config_data):
|
||||
replies = []
|
||||
|
@ -1247,12 +1227,12 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
values = []
|
||||
group_strings = []
|
||||
start_index = 1
|
||||
logging.info("sorted groups: %s", groups)
|
||||
logging.debug("sorted groups: %s", groups)
|
||||
for group in groups:
|
||||
services = sorted(group_map[group], key=lambda x: x.name.lower())
|
||||
logging.info("sorted services for group(%s): %s", group, services)
|
||||
logging.debug("sorted services for group(%s): %s", group, services)
|
||||
end_index = start_index + len(services) - 1
|
||||
group_strings.append("%s:%s-%s" % (group, start_index, end_index))
|
||||
group_strings.append(f"{group}:{start_index}-{end_index}")
|
||||
start_index += len(services)
|
||||
for service_name in services:
|
||||
captions.append(service_name.name)
|
||||
|
@ -1415,7 +1395,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
parsed_config = ConfigShim.str_to_dict(values_str)
|
||||
|
||||
self.session.mobility.set_model_config(node_id, object_name, parsed_config)
|
||||
if self.session.state == EventTypes.RUNTIME_STATE.value:
|
||||
if self.session.state == EventTypes.RUNTIME_STATE.value and parsed_config:
|
||||
try:
|
||||
node = self.session.get_node(node_id)
|
||||
if object_name == BasicRangeModel.name:
|
||||
|
@ -1727,24 +1707,24 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
):
|
||||
status = self.session.services.stop_service(node, service)
|
||||
if status:
|
||||
fail += "Stop %s," % service.name
|
||||
fail += f"Stop {service.name},"
|
||||
if (
|
||||
event_type == EventTypes.START.value
|
||||
or event_type == EventTypes.RESTART.value
|
||||
):
|
||||
status = self.session.services.startup_service(node, service)
|
||||
if status:
|
||||
fail += "Start %s(%s)," % service.name
|
||||
fail += f"Start ({service.name}),"
|
||||
if event_type == EventTypes.PAUSE.value:
|
||||
status = self.session.services.validate_service(node, service)
|
||||
if status:
|
||||
fail += "%s," % service.name
|
||||
fail += f"{service.name},"
|
||||
if event_type == EventTypes.RECONFIGURE.value:
|
||||
self.session.services.service_reconfigure(node, service)
|
||||
|
||||
fail_data = ""
|
||||
if len(fail) > 0:
|
||||
fail_data += "Fail:" + fail
|
||||
fail_data += f"Fail:{fail}"
|
||||
unknown_data = ""
|
||||
num = len(unknown)
|
||||
if num > 0:
|
||||
|
@ -1754,14 +1734,14 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
unknown_data += ", "
|
||||
num -= 1
|
||||
logging.warning("Event requested for unknown service(s): %s", unknown_data)
|
||||
unknown_data = "Unknown:" + unknown_data
|
||||
unknown_data = f"Unknown:{unknown_data}"
|
||||
|
||||
event_data = EventData(
|
||||
node=node_id,
|
||||
event_type=event_type,
|
||||
name=name,
|
||||
data=fail_data + ";" + unknown_data,
|
||||
time="%s" % time.time(),
|
||||
time=str(time.time()),
|
||||
)
|
||||
|
||||
self.session.broadcast_event(event_data)
|
||||
|
@ -1782,7 +1762,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
thumb = message.get_tlv(SessionTlvs.THUMB.value)
|
||||
user = message.get_tlv(SessionTlvs.USER.value)
|
||||
logging.debug(
|
||||
"SESSION message flags=0x%x sessions=%s" % (message.flags, session_id_str)
|
||||
"SESSION message flags=0x%x sessions=%s", message.flags, session_id_str
|
||||
)
|
||||
|
||||
if message.flags == 0:
|
||||
|
@ -1832,11 +1812,9 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
|
||||
# remove client from session broker and shutdown if needed
|
||||
self.remove_session_handlers()
|
||||
self.session.broker.session_clients.remove(self)
|
||||
if (
|
||||
not self.session.broker.session_clients
|
||||
and not self.session.is_active()
|
||||
):
|
||||
clients = self.session_clients[self.session.id]
|
||||
clients.remove(self)
|
||||
if not clients and not self.session.is_active():
|
||||
self.coreemu.delete_session(self.session.id)
|
||||
|
||||
# set session to join
|
||||
|
@ -1845,7 +1823,8 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
# add client to session broker and set master if needed
|
||||
if self.master:
|
||||
self.session.master = True
|
||||
self.session.broker.session_clients.append(self)
|
||||
clients = self.session_clients.setdefault(self.session.id, [])
|
||||
clients.append(self)
|
||||
|
||||
# add broadcast handlers
|
||||
logging.info("adding session broadcast handlers")
|
||||
|
@ -1954,7 +1933,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
# service customizations
|
||||
service_configs = self.session.services.all_configs()
|
||||
for node_id, service in service_configs:
|
||||
opaque = "service:%s" % service.name
|
||||
opaque = f"service:{service.name}"
|
||||
data_types = tuple(
|
||||
repeat(ConfigDataTypes.STRING.value, len(ServiceShim.keys))
|
||||
)
|
||||
|
@ -1990,7 +1969,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
file_data = FileData(
|
||||
message_type=MessageFlags.ADD.value,
|
||||
name=str(file_name),
|
||||
type="hook:%s" % state,
|
||||
type=f"hook:{state}",
|
||||
data=str(config_data),
|
||||
)
|
||||
self.session.broadcast_file(file_data)
|
||||
|
@ -2006,7 +1985,7 @@ class CoreHandler(socketserver.BaseRequestHandler):
|
|||
metadata_configs = self.session.metadata.get_configs()
|
||||
if metadata_configs:
|
||||
data_values = "|".join(
|
||||
["%s=%s" % (x, metadata_configs[x]) for x in metadata_configs]
|
||||
[f"{x}={metadata_configs[x]}" for x in metadata_configs]
|
||||
)
|
||||
data_types = tuple(
|
||||
ConfigDataTypes.STRING.value
|
||||
|
@ -2041,6 +2020,7 @@ class CoreUdpHandler(CoreHandler):
|
|||
}
|
||||
self.master = False
|
||||
self.session = None
|
||||
self.coreemu = server.mainserver.coreemu
|
||||
socketserver.BaseRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def setup(self):
|
||||
|
@ -2054,7 +2034,7 @@ class CoreUdpHandler(CoreHandler):
|
|||
data = self.request[0]
|
||||
header = data[: coreapi.CoreMessage.header_len]
|
||||
if len(header) < coreapi.CoreMessage.header_len:
|
||||
raise IOError("error receiving header (received %d bytes)" % len(header))
|
||||
raise IOError(f"error receiving header (received {len(header)} bytes)")
|
||||
|
||||
message_type, message_flags, message_len = coreapi.CoreMessage.unpack_header(
|
||||
header
|
||||
|
@ -2095,6 +2075,7 @@ class CoreUdpHandler(CoreHandler):
|
|||
logging.debug("session handling message: %s", session.session_id)
|
||||
self.session = session
|
||||
self.handle_message(message)
|
||||
self.session.sdt.handle_distributed(message)
|
||||
self.broadcast(message)
|
||||
else:
|
||||
logging.error(
|
||||
|
@ -2119,6 +2100,7 @@ class CoreUdpHandler(CoreHandler):
|
|||
if session or message.message_type == MessageTypes.REGISTER.value:
|
||||
self.session = session
|
||||
self.handle_message(message)
|
||||
self.session.sdt.handle_distributed(message)
|
||||
self.broadcast(message)
|
||||
else:
|
||||
logging.error(
|
||||
|
@ -2129,7 +2111,8 @@ class CoreUdpHandler(CoreHandler):
|
|||
if not isinstance(message, (coreapi.CoreNodeMessage, coreapi.CoreLinkMessage)):
|
||||
return
|
||||
|
||||
for client in self.session.broker.session_clients:
|
||||
clients = self.session_clients[self.session.id]
|
||||
for client in clients:
|
||||
try:
|
||||
client.sendall(message.raw_message)
|
||||
except IOError:
|
||||
|
@ -2146,7 +2129,7 @@ class CoreUdpHandler(CoreHandler):
|
|||
:return:
|
||||
"""
|
||||
raise Exception(
|
||||
"Unable to queue %s message for later processing using UDP!" % msg
|
||||
f"Unable to queue {msg} message for later processing using UDP!"
|
||||
)
|
||||
|
||||
def sendall(self, data):
|
||||
|
|
|
@ -4,8 +4,6 @@ Utilities for working with python struct data.
|
|||
|
||||
import logging
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
|
||||
def pack_values(clazz, packers):
|
||||
"""
|
||||
|
@ -31,7 +29,7 @@ def pack_values(clazz, packers):
|
|||
|
||||
# only pack actual values and avoid packing empty strings
|
||||
# protobuf defaults to empty strings and does no imply a value to set
|
||||
if value is None or (isinstance(value, basestring) and not value):
|
||||
if value is None or (isinstance(value, str) and not value):
|
||||
continue
|
||||
|
||||
# transform values as needed
|
||||
|
|
|
@ -40,10 +40,8 @@ class ConfigShim(object):
|
|||
"""
|
||||
group_strings = []
|
||||
for config_group in config_groups:
|
||||
group_string = "%s:%s-%s" % (
|
||||
config_group.name,
|
||||
config_group.start,
|
||||
config_group.stop,
|
||||
group_string = (
|
||||
f"{config_group.name}:{config_group.start}-{config_group.stop}"
|
||||
)
|
||||
group_strings.append(group_string)
|
||||
return "|".join(group_strings)
|
||||
|
@ -74,7 +72,7 @@ class ConfigShim(object):
|
|||
if not captions:
|
||||
captions = configuration.label
|
||||
else:
|
||||
captions += "|%s" % configuration.label
|
||||
captions += f"|{configuration.label}"
|
||||
|
||||
data_types.append(configuration.type.value)
|
||||
|
||||
|
@ -83,11 +81,11 @@ class ConfigShim(object):
|
|||
|
||||
_id = configuration.id
|
||||
config_value = config.get(_id, configuration.default)
|
||||
key_value = "%s=%s" % (_id, config_value)
|
||||
key_value = f"{_id}={config_value}"
|
||||
if not key_values:
|
||||
key_values = key_value
|
||||
else:
|
||||
key_values += "|%s" % key_value
|
||||
key_values += f"|{key_value}"
|
||||
|
||||
groups_str = cls.groups_to_str(configurable_options.config_groups())
|
||||
return ConfigData(
|
||||
|
@ -130,13 +128,7 @@ class Configuration(object):
|
|||
self.label = label
|
||||
|
||||
def __str__(self):
|
||||
return "%s(id=%s, type=%s, default=%s, options=%s)" % (
|
||||
self.__class__.__name__,
|
||||
self.id,
|
||||
self.type,
|
||||
self.default,
|
||||
self.options,
|
||||
)
|
||||
return f"{self.__class__.__name__}(id={self.id}, type={self.type}, default={self.default}, options={self.options})"
|
||||
|
||||
|
||||
class ConfigurableManager(object):
|
||||
|
@ -333,7 +325,7 @@ class ModelManager(ConfigurableManager):
|
|||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError("%s is an invalid model" % model_name)
|
||||
raise ValueError(f"{model_name} is an invalid model")
|
||||
|
||||
# retrieve default values
|
||||
model_config = self.get_model_config(node_id, model_name)
|
||||
|
@ -361,7 +353,7 @@ class ModelManager(ConfigurableManager):
|
|||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError("%s is an invalid model" % model_name)
|
||||
raise ValueError(f"{model_name} is an invalid model")
|
||||
|
||||
config = self.get_configs(node_id=node_id, config_type=model_name)
|
||||
if not config:
|
||||
|
|
|
@ -1,29 +1,20 @@
|
|||
import os
|
||||
from core.utils import which
|
||||
|
||||
COREDPY_VERSION = "@PACKAGE_VERSION@"
|
||||
CORE_STATE_DIR = "@CORE_STATE_DIR@"
|
||||
COREDPY_VERSION = "@PACKAGE_VERSION@"
|
||||
CORE_CONF_DIR = "@CORE_CONF_DIR@"
|
||||
CORE_DATA_DIR = "@CORE_DATA_DIR@"
|
||||
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
|
||||
FRR_STATE_DIR = "@CORE_STATE_DIR@/run/frr"
|
||||
|
||||
|
||||
def which(command):
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
command_path = os.path.join(path, command)
|
||||
if os.path.isfile(command_path) and os.access(command_path, os.X_OK):
|
||||
return command_path
|
||||
|
||||
|
||||
VNODED_BIN = which("vnoded")
|
||||
VCMD_BIN = which("vcmd")
|
||||
BRCTL_BIN = which("brctl")
|
||||
SYSCTL_BIN = which("sysctl")
|
||||
IP_BIN = which("ip")
|
||||
ETHTOOL_BIN = which("ethtool")
|
||||
TC_BIN = which("tc")
|
||||
EBTABLES_BIN = which("ebtables")
|
||||
MOUNT_BIN = which("mount")
|
||||
UMOUNT_BIN = which("umount")
|
||||
OVS_BIN = which("ovs-vsctl")
|
||||
OVS_FLOW_BIN = which("ovs-ofctl")
|
||||
VNODED_BIN = which("vnoded", required=True)
|
||||
VCMD_BIN = which("vcmd", required=True)
|
||||
BRCTL_BIN = which("brctl", required=True)
|
||||
SYSCTL_BIN = which("sysctl", required=True)
|
||||
IP_BIN = which("ip", required=True)
|
||||
ETHTOOL_BIN = which("ethtool", required=True)
|
||||
TC_BIN = which("tc", required=True)
|
||||
EBTABLES_BIN = which("ebtables", required=True)
|
||||
MOUNT_BIN = which("mount", required=True)
|
||||
UMOUNT_BIN = which("umount", required=True)
|
||||
OVS_BIN = which("ovs-vsctl", required=False)
|
||||
OVS_FLOW_BIN = which("ovs-ofctl", required=False)
|
||||
|
|
|
@ -4,10 +4,8 @@ commeffect.py: EMANE CommEffect model for CORE
|
|||
|
||||
import logging
|
||||
import os
|
||||
from builtins import int
|
||||
|
||||
from lxml import etree
|
||||
from past.builtins import basestring
|
||||
|
||||
from core.config import ConfigGroup
|
||||
from core.emane import emanemanifest, emanemodel
|
||||
|
@ -26,7 +24,7 @@ def convert_none(x):
|
|||
"""
|
||||
Helper to use 0 for None values.
|
||||
"""
|
||||
if isinstance(x, basestring):
|
||||
if isinstance(x, str):
|
||||
x = float(x)
|
||||
if x is None:
|
||||
return 0
|
||||
|
@ -75,9 +73,7 @@ class EmaneCommEffectModel(emanemodel.EmaneModel):
|
|||
shim_name = emanexml.shim_file_name(self, interface)
|
||||
|
||||
# create and write nem document
|
||||
nem_element = etree.Element(
|
||||
"nem", name="%s NEM" % self.name, type="unstructured"
|
||||
)
|
||||
nem_element = etree.Element("nem", name=f"{self.name} NEM", type="unstructured")
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
|
@ -92,7 +88,7 @@ class EmaneCommEffectModel(emanemodel.EmaneModel):
|
|||
|
||||
# create and write shim document
|
||||
shim_element = etree.Element(
|
||||
"shim", name="%s SHIM" % self.name, library=self.shim_library
|
||||
"shim", name=f"{self.name} SHIM", library=self.shim_library
|
||||
)
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
|
|
|
@ -2,30 +2,22 @@
|
|||
emane.py: definition of an Emane class for implementing configuration control of an EMANE emulation.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
|
||||
from core import CoreCommandError, CoreError, constants, utils
|
||||
from core.api.tlv import coreapi, dataconversion
|
||||
from core.config import ConfigGroup, ConfigShim, Configuration, ModelManager
|
||||
from core import utils
|
||||
from core.config import ConfigGroup, Configuration, ModelManager
|
||||
from core.emane import emanemanifest
|
||||
from core.emane.bypass import EmaneBypassModel
|
||||
from core.emane.commeffect import EmaneCommEffectModel
|
||||
from core.emane.emanemodel import EmaneModel
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emane.rfpipe import EmaneRfPipeModel
|
||||
from core.emane.tdma import EmaneTdmaModel
|
||||
from core.emulator.enumerations import (
|
||||
ConfigDataTypes,
|
||||
ConfigFlags,
|
||||
ConfigTlvs,
|
||||
MessageFlags,
|
||||
MessageTypes,
|
||||
RegisterTlvs,
|
||||
)
|
||||
from core.emulator.enumerations import ConfigDataTypes, RegisterTlvs
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.xml import emanexml
|
||||
|
||||
try:
|
||||
|
@ -53,8 +45,8 @@ DEFAULT_EMANE_PREFIX = "/usr"
|
|||
class EmaneManager(ModelManager):
|
||||
"""
|
||||
EMANE controller object. Lives in a Session instance and is used for
|
||||
building EMANE config files from all of the EmaneNode objects in this
|
||||
emulation, and for controlling the EMANE daemons.
|
||||
building EMANE config files for all EMANE networks in this emulation, and for
|
||||
controlling the EMANE daemons.
|
||||
"""
|
||||
|
||||
name = "emane"
|
||||
|
@ -72,10 +64,8 @@ class EmaneManager(ModelManager):
|
|||
"""
|
||||
super(EmaneManager, self).__init__()
|
||||
self.session = session
|
||||
self._emane_nodes = {}
|
||||
self._emane_nets = {}
|
||||
self._emane_node_lock = threading.Lock()
|
||||
self._ifccounts = {}
|
||||
self._ifccountslock = threading.Lock()
|
||||
# port numbers are allocated from these counters
|
||||
self.platformport = self.session.options.get_config_int(
|
||||
"emane_platform_port", 8100
|
||||
|
@ -90,7 +80,6 @@ class EmaneManager(ModelManager):
|
|||
self.emane_config = EmaneGlobalModel(session)
|
||||
self.set_configs(self.emane_config.default_values())
|
||||
|
||||
session.broker.handlers.add(self.handledistributed)
|
||||
self.service = None
|
||||
self.event_device = None
|
||||
self.emane_check()
|
||||
|
@ -150,8 +139,10 @@ class EmaneManager(ModelManager):
|
|||
"""
|
||||
try:
|
||||
# check for emane
|
||||
emane_version = utils.check_cmd(["emane", "--version"])
|
||||
args = "emane --version"
|
||||
emane_version = utils.cmd(args)
|
||||
logging.info("using EMANE: %s", emane_version)
|
||||
self.session.distributed.execute(lambda x: x.remote_cmd(args))
|
||||
|
||||
# load default emane models
|
||||
self.load_models(EMANE_MODELS)
|
||||
|
@ -226,38 +217,39 @@ class EmaneManager(ModelManager):
|
|||
emane_model.load(emane_prefix)
|
||||
self.models[emane_model.name] = emane_model
|
||||
|
||||
def add_node(self, emane_node):
|
||||
def add_node(self, emane_net):
|
||||
"""
|
||||
Add a new EmaneNode object to this Emane controller object
|
||||
Add EMANE network object to this manager.
|
||||
|
||||
:param core.emane.nodes.EmaneNode emane_node: emane node to add
|
||||
:param core.emane.nodes.EmaneNet emane_net: emane node to add
|
||||
:return: nothing
|
||||
"""
|
||||
with self._emane_node_lock:
|
||||
if emane_node.id in self._emane_nodes:
|
||||
if emane_net.id in self._emane_nets:
|
||||
raise KeyError(
|
||||
"non-unique EMANE object id %s for %s" % (emane_node.id, emane_node)
|
||||
f"non-unique EMANE object id {emane_net.id} for {emane_net}"
|
||||
)
|
||||
self._emane_nodes[emane_node.id] = emane_node
|
||||
self._emane_nets[emane_net.id] = emane_net
|
||||
|
||||
def getnodes(self):
|
||||
"""
|
||||
Return a set of CoreNodes that are linked to an EmaneNode,
|
||||
Return a set of CoreNodes that are linked to an EMANE network,
|
||||
e.g. containers having one or more radio interfaces.
|
||||
"""
|
||||
# assumes self._objslock already held
|
||||
nodes = set()
|
||||
for emane_node in self._emane_nodes.values():
|
||||
for netif in emane_node.netifs():
|
||||
for emane_net in self._emane_nets.values():
|
||||
for netif in emane_net.netifs():
|
||||
nodes.add(netif.node)
|
||||
return nodes
|
||||
|
||||
def setup(self):
|
||||
"""
|
||||
Populate self._objs with EmaneNodes; perform distributed setup;
|
||||
associate models with EmaneNodes from self.config. Returns
|
||||
Emane.(SUCCESS, NOT_NEEDED, NOT_READY) in order to delay session
|
||||
instantiation.
|
||||
Setup duties for EMANE manager.
|
||||
|
||||
:return: SUCCESS, NOT_NEEDED, NOT_READY in order to delay session
|
||||
instantiation
|
||||
:rtype: int
|
||||
"""
|
||||
logging.debug("emane setup")
|
||||
|
||||
|
@ -265,18 +257,17 @@ class EmaneManager(ModelManager):
|
|||
with self.session._nodes_lock:
|
||||
for node_id in self.session.nodes:
|
||||
node = self.session.nodes[node_id]
|
||||
if isinstance(node, EmaneNode):
|
||||
if isinstance(node, EmaneNet):
|
||||
logging.debug(
|
||||
"adding emane node: id(%s) name(%s)", node.id, node.name
|
||||
)
|
||||
self.add_node(node)
|
||||
|
||||
if not self._emane_nodes:
|
||||
if not self._emane_nets:
|
||||
logging.debug("no emane nodes in session")
|
||||
return EmaneManager.NOT_NEEDED
|
||||
|
||||
# control network bridge required for EMANE 0.9.2
|
||||
# - needs to be configured before checkdistributed() for distributed
|
||||
# - needs to exist when eventservice binds to it (initeventservice)
|
||||
if self.session.master:
|
||||
otadev = self.get_config("otamanagerdevice")
|
||||
|
@ -291,10 +282,9 @@ class EmaneManager(ModelManager):
|
|||
)
|
||||
return EmaneManager.NOT_READY
|
||||
|
||||
ctrlnet = self.session.add_remove_control_net(
|
||||
self.session.add_remove_control_net(
|
||||
net_index=netidx, remove=False, conf_required=False
|
||||
)
|
||||
self.distributedctrlnet(ctrlnet)
|
||||
eventdev = self.get_config("eventservicedevice")
|
||||
logging.debug("emane event service device: eventdev(%s)", eventdev)
|
||||
if eventdev != otadev:
|
||||
|
@ -307,27 +297,21 @@ class EmaneManager(ModelManager):
|
|||
)
|
||||
return EmaneManager.NOT_READY
|
||||
|
||||
ctrlnet = self.session.add_remove_control_net(
|
||||
self.session.add_remove_control_net(
|
||||
net_index=netidx, remove=False, conf_required=False
|
||||
)
|
||||
self.distributedctrlnet(ctrlnet)
|
||||
|
||||
if self.checkdistributed():
|
||||
# we are slave, but haven't received a platformid yet
|
||||
platform_id_start = "platform_id_start"
|
||||
default_values = self.emane_config.default_values()
|
||||
value = self.get_config(platform_id_start)
|
||||
if value == default_values[platform_id_start]:
|
||||
return EmaneManager.NOT_READY
|
||||
|
||||
self.check_node_models()
|
||||
return EmaneManager.SUCCESS
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
After all the EmaneNode objects have been added, build XML files
|
||||
and start the daemons. Returns Emane.(SUCCESS, NOT_NEEDED, or
|
||||
NOT_READY) which is used to delay session instantiation.
|
||||
After all the EMANE networks have been added, build XML files
|
||||
and start the daemons.
|
||||
|
||||
:return: SUCCESS, NOT_NEEDED, NOT_READY in order to delay session
|
||||
instantiation
|
||||
:rtype: int
|
||||
"""
|
||||
self.reset()
|
||||
r = self.setup()
|
||||
|
@ -346,8 +330,8 @@ class EmaneManager(ModelManager):
|
|||
self.startdaemons()
|
||||
self.installnetifs()
|
||||
|
||||
for node_id in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[node_id]
|
||||
for node_id in self._emane_nets:
|
||||
emane_node = self._emane_nets[node_id]
|
||||
for netif in emane_node.netifs():
|
||||
nems.append(
|
||||
(netif.node.name, netif.name, emane_node.getnemid(netif))
|
||||
|
@ -358,7 +342,7 @@ class EmaneManager(ModelManager):
|
|||
try:
|
||||
with open(emane_nems_filename, "w") as f:
|
||||
for nodename, ifname, nemid in nems:
|
||||
f.write("%s %s %s\n" % (nodename, ifname, nemid))
|
||||
f.write(f"{nodename} {ifname} {nemid}\n")
|
||||
except IOError:
|
||||
logging.exception("Error writing EMANE NEMs file: %s")
|
||||
|
||||
|
@ -372,8 +356,8 @@ class EmaneManager(ModelManager):
|
|||
return
|
||||
|
||||
with self._emane_node_lock:
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
for key in sorted(self._emane_nets.keys()):
|
||||
emane_node = self._emane_nets[key]
|
||||
logging.debug(
|
||||
"post startup for emane node: %s - %s",
|
||||
emane_node.id,
|
||||
|
@ -386,11 +370,11 @@ class EmaneManager(ModelManager):
|
|||
|
||||
def reset(self):
|
||||
"""
|
||||
remove all EmaneNode objects from the dictionary,
|
||||
reset port numbers and nem id counters
|
||||
Remove all EMANE networks from the dictionary, reset port numbers and
|
||||
nem id counters
|
||||
"""
|
||||
with self._emane_node_lock:
|
||||
self._emane_nodes.clear()
|
||||
self._emane_nets.clear()
|
||||
|
||||
# don't clear self._ifccounts here; NEM counts are needed for buildxml
|
||||
self.platformport = self.session.options.get_config_int(
|
||||
|
@ -404,103 +388,14 @@ class EmaneManager(ModelManager):
|
|||
"""
|
||||
stop all EMANE daemons
|
||||
"""
|
||||
with self._ifccountslock:
|
||||
self._ifccounts.clear()
|
||||
|
||||
with self._emane_node_lock:
|
||||
if not self._emane_nodes:
|
||||
if not self._emane_nets:
|
||||
return
|
||||
logging.info("stopping EMANE daemons.")
|
||||
self.deinstallnetifs()
|
||||
self.stopdaemons()
|
||||
self.stopeventmonitor()
|
||||
|
||||
def handledistributed(self, message):
|
||||
"""
|
||||
Broker handler for processing CORE API messages as they are
|
||||
received. This is used to snoop the Link add messages to get NEM
|
||||
counts of NEMs that exist on other servers.
|
||||
"""
|
||||
if (
|
||||
message.message_type == MessageTypes.LINK.value
|
||||
and message.flags & MessageFlags.ADD.value
|
||||
):
|
||||
nn = message.node_numbers()
|
||||
# first node is always link layer node in Link add message
|
||||
if nn[0] in self.session.broker.network_nodes:
|
||||
serverlist = self.session.broker.getserversbynode(nn[1])
|
||||
for server in serverlist:
|
||||
with self._ifccountslock:
|
||||
if server not in self._ifccounts:
|
||||
self._ifccounts[server] = 1
|
||||
else:
|
||||
self._ifccounts[server] += 1
|
||||
|
||||
def checkdistributed(self):
|
||||
"""
|
||||
Check for EMANE nodes that exist on multiple emulation servers and
|
||||
coordinate the NEM id and port number space.
|
||||
If we are the master EMANE node, return False so initialization will
|
||||
proceed as normal; otherwise slaves return True here and
|
||||
initialization is deferred.
|
||||
"""
|
||||
# check with the session if we are the "master" Emane object?
|
||||
master = False
|
||||
|
||||
with self._emane_node_lock:
|
||||
if self._emane_nodes:
|
||||
master = self.session.master
|
||||
logging.info("emane check distributed as master: %s.", master)
|
||||
|
||||
# we are not the master Emane object, wait for nem id and ports
|
||||
if not master:
|
||||
return True
|
||||
|
||||
nemcount = 0
|
||||
with self._emane_node_lock:
|
||||
for key in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[key]
|
||||
nemcount += emane_node.numnetif()
|
||||
|
||||
nemid = int(self.get_config("nem_id_start"))
|
||||
nemid += nemcount
|
||||
|
||||
platformid = int(self.get_config("platform_id_start"))
|
||||
|
||||
# build an ordered list of servers so platform ID is deterministic
|
||||
servers = []
|
||||
for key in sorted(self._emane_nodes):
|
||||
for server in self.session.broker.getserversbynode(key):
|
||||
if server not in servers:
|
||||
servers.append(server)
|
||||
|
||||
servers.sort(key=lambda x: x.name)
|
||||
for server in servers:
|
||||
if server.name == "localhost":
|
||||
continue
|
||||
|
||||
if server.sock is None:
|
||||
continue
|
||||
|
||||
platformid += 1
|
||||
|
||||
# create temporary config for updating distributed nodes
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
config = copy.deepcopy(self.get_configs())
|
||||
config["platform_id_start"] = str(platformid)
|
||||
config["nem_id_start"] = str(nemid)
|
||||
config_data = ConfigShim.config_data(
|
||||
0, None, typeflags, self.emane_config, config
|
||||
)
|
||||
message = dataconversion.convert_config(config_data)
|
||||
server.sock.send(message)
|
||||
# increment nemid for next server by number of interfaces
|
||||
with self._ifccountslock:
|
||||
if server in self._ifccounts:
|
||||
nemid += self._ifccounts[server]
|
||||
|
||||
return False
|
||||
|
||||
def buildxml(self):
|
||||
"""
|
||||
Build XML files required to run EMANE on each node.
|
||||
|
@ -517,59 +412,12 @@ class EmaneManager(ModelManager):
|
|||
self.buildnemxml()
|
||||
self.buildeventservicexml()
|
||||
|
||||
# TODO: remove need for tlv messaging
|
||||
def distributedctrlnet(self, ctrlnet):
|
||||
"""
|
||||
Distributed EMANE requires multiple control network prefixes to
|
||||
be configured. This generates configuration for slave control nets
|
||||
using the default list of prefixes.
|
||||
"""
|
||||
# slave server
|
||||
session = self.session
|
||||
if not session.master:
|
||||
return
|
||||
|
||||
# not distributed
|
||||
servers = session.broker.getservernames()
|
||||
if len(servers) < 2:
|
||||
return
|
||||
|
||||
# normal Config messaging will distribute controlnets
|
||||
prefix = session.options.get_config("controlnet", default="")
|
||||
prefixes = prefix.split()
|
||||
if len(prefixes) < len(servers):
|
||||
logging.info(
|
||||
"setting up default controlnet prefixes for distributed (%d configured)",
|
||||
len(prefixes),
|
||||
)
|
||||
prefix = ctrlnet.DEFAULT_PREFIX_LIST[0]
|
||||
prefixes = prefix.split()
|
||||
servers.remove("localhost")
|
||||
servers.insert(0, "localhost")
|
||||
prefix = " ".join("%s:%s" % (s, prefixes[i]) for i, s in enumerate(servers))
|
||||
|
||||
# this generates a config message having controlnet prefix assignments
|
||||
logging.info("setting up controlnet prefixes for distributed: %s", prefix)
|
||||
vals = "controlnet=%s" % prefix
|
||||
tlvdata = b""
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.OBJECT.value, "session")
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.TYPE.value, 0)
|
||||
tlvdata += coreapi.CoreConfigTlv.pack(ConfigTlvs.VALUES.value, vals)
|
||||
rawmsg = coreapi.CoreConfMessage.pack(0, tlvdata)
|
||||
msghdr = rawmsg[: coreapi.CoreMessage.header_len]
|
||||
msg = coreapi.CoreConfMessage(
|
||||
flags=0, hdr=msghdr, data=rawmsg[coreapi.CoreMessage.header_len :]
|
||||
)
|
||||
logging.debug("sending controlnet message:\n%s", msg)
|
||||
self.session.broker.handle_message(msg)
|
||||
|
||||
def check_node_models(self):
|
||||
"""
|
||||
Associate EmaneModel classes with EmaneNode nodes. The model
|
||||
configurations are stored in self.configs.
|
||||
Associate EMANE model classes with EMANE network nodes.
|
||||
"""
|
||||
for node_id in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[node_id]
|
||||
for node_id in self._emane_nets:
|
||||
emane_node = self._emane_nets[node_id]
|
||||
logging.debug("checking emane model for node: %s", node_id)
|
||||
|
||||
# skip nodes that already have a model set
|
||||
|
@ -595,13 +443,13 @@ class EmaneManager(ModelManager):
|
|||
def nemlookup(self, nemid):
|
||||
"""
|
||||
Look for the given numerical NEM ID and return the first matching
|
||||
EmaneNode and NEM interface.
|
||||
EMANE network and NEM interface.
|
||||
"""
|
||||
emane_node = None
|
||||
netif = None
|
||||
|
||||
for node_id in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[node_id]
|
||||
for node_id in self._emane_nets:
|
||||
emane_node = self._emane_nets[node_id]
|
||||
netif = emane_node.getnemnetif(nemid)
|
||||
if netif is not None:
|
||||
break
|
||||
|
@ -615,8 +463,8 @@ class EmaneManager(ModelManager):
|
|||
Return the number of NEMs emulated locally.
|
||||
"""
|
||||
count = 0
|
||||
for node_id in self._emane_nodes:
|
||||
emane_node = self._emane_nodes[node_id]
|
||||
for node_id in self._emane_nets:
|
||||
emane_node = self._emane_nets[node_id]
|
||||
count += len(emane_node.netifs())
|
||||
return count
|
||||
|
||||
|
@ -628,28 +476,19 @@ class EmaneManager(ModelManager):
|
|||
platform_xmls = {}
|
||||
|
||||
# assume self._objslock is already held here
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
for key in sorted(self._emane_nets.keys()):
|
||||
emane_node = self._emane_nets[key]
|
||||
nemid = emanexml.build_node_platform_xml(
|
||||
self, ctrlnet, emane_node, nemid, platform_xmls
|
||||
)
|
||||
|
||||
def buildnemxml(self):
|
||||
"""
|
||||
Builds the xxxnem.xml, xxxmac.xml, and xxxphy.xml files which
|
||||
are defined on a per-EmaneNode basis.
|
||||
Builds the nem, mac, and phy xml files for each EMANE network.
|
||||
"""
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
emanexml.build_xml_files(self, emane_node)
|
||||
|
||||
def buildtransportxml(self):
|
||||
"""
|
||||
Calls emanegentransportxml using a platform.xml file to build the transportdaemon*.xml.
|
||||
"""
|
||||
utils.check_cmd(
|
||||
["emanegentransportxml", "platform.xml"], cwd=self.session.session_dir
|
||||
)
|
||||
for key in sorted(self._emane_nets):
|
||||
emane_net = self._emane_nets[key]
|
||||
emanexml.build_xml_files(self, emane_net)
|
||||
|
||||
def buildeventservicexml(self):
|
||||
"""
|
||||
|
@ -676,8 +515,12 @@ class EmaneManager(ModelManager):
|
|||
return
|
||||
|
||||
dev = self.get_config("eventservicedevice")
|
||||
|
||||
emanexml.create_event_service_xml(group, port, dev, self.session.session_dir)
|
||||
self.session.distributed.execute(
|
||||
lambda x: emanexml.create_event_service_xml(
|
||||
group, port, dev, self.session.session_dir, x
|
||||
)
|
||||
)
|
||||
|
||||
def startdaemons(self):
|
||||
"""
|
||||
|
@ -692,9 +535,9 @@ class EmaneManager(ModelManager):
|
|||
logging.info("setting user-defined EMANE log level: %d", cfgloglevel)
|
||||
loglevel = str(cfgloglevel)
|
||||
|
||||
emanecmd = ["emane", "-d", "-l", loglevel]
|
||||
emanecmd = f"emane -d -l {loglevel}"
|
||||
if realtime:
|
||||
emanecmd += ("-r",)
|
||||
emanecmd += " -r"
|
||||
|
||||
otagroup, _otaport = self.get_config("otamanagergroup").split(":")
|
||||
otadev = self.get_config("otamanagerdevice")
|
||||
|
@ -730,21 +573,17 @@ class EmaneManager(ModelManager):
|
|||
)
|
||||
|
||||
# multicast route is needed for OTA data
|
||||
args = [constants.IP_BIN, "route", "add", otagroup, "dev", otadev]
|
||||
node.network_cmd(args)
|
||||
node.node_net_client.create_route(otagroup, otadev)
|
||||
|
||||
# multicast route is also needed for event data if on control network
|
||||
if eventservicenetidx >= 0 and eventgroup != otagroup:
|
||||
args = [constants.IP_BIN, "route", "add", eventgroup, "dev", eventdev]
|
||||
node.network_cmd(args)
|
||||
node.node_net_client.create_route(eventgroup, eventdev)
|
||||
|
||||
# start emane
|
||||
args = emanecmd + [
|
||||
"-f",
|
||||
os.path.join(path, "emane%d.log" % n),
|
||||
os.path.join(path, "platform%d.xml" % n),
|
||||
]
|
||||
output = node.check_cmd(args)
|
||||
log_file = os.path.join(path, f"emane{n}.log")
|
||||
platform_xml = os.path.join(path, f"platform{n}.xml")
|
||||
args = f"{emanecmd} -f {log_file} {platform_xml}"
|
||||
output = node.cmd(args)
|
||||
logging.info("node(%s) emane daemon running: %s", node.name, args)
|
||||
logging.info("node(%s) emane daemon output: %s", node.name, output)
|
||||
|
||||
|
@ -752,17 +591,21 @@ class EmaneManager(ModelManager):
|
|||
return
|
||||
|
||||
path = self.session.session_dir
|
||||
emanecmd += ["-f", os.path.join(path, "emane.log")]
|
||||
args = emanecmd + [os.path.join(path, "platform.xml")]
|
||||
utils.check_cmd(args, cwd=path)
|
||||
logging.info("host emane daemon running: %s", args)
|
||||
log_file = os.path.join(path, "emane.log")
|
||||
platform_xml = os.path.join(path, "platform.xml")
|
||||
emanecmd += f" -f {log_file} {platform_xml}"
|
||||
utils.cmd(emanecmd, cwd=path)
|
||||
self.session.distributed.execute(lambda x: x.remote_cmd(emanecmd, cwd=path))
|
||||
logging.info("host emane daemon running: %s", emanecmd)
|
||||
|
||||
def stopdaemons(self):
|
||||
"""
|
||||
Kill the appropriate EMANE daemons.
|
||||
"""
|
||||
# TODO: we may want to improve this if we had the PIDs from the specific EMANE daemons that we"ve started
|
||||
args = ["killall", "-q", "emane"]
|
||||
# TODO: we may want to improve this if we had the PIDs from the specific EMANE
|
||||
# daemons that we"ve started
|
||||
kill_emaned = "killall -q emane"
|
||||
kill_transortd = "killall -q emanetransportd"
|
||||
stop_emane_on_host = False
|
||||
for node in self.getnodes():
|
||||
if hasattr(node, "transport_type") and node.transport_type == "raw":
|
||||
|
@ -770,13 +613,15 @@ class EmaneManager(ModelManager):
|
|||
continue
|
||||
|
||||
if node.up:
|
||||
node.cmd(args, wait=False)
|
||||
node.cmd(kill_emaned, wait=False)
|
||||
# TODO: RJ45 node
|
||||
|
||||
if stop_emane_on_host:
|
||||
try:
|
||||
utils.check_cmd(args)
|
||||
utils.check_cmd(["killall", "-q", "emanetransportd"])
|
||||
utils.cmd(kill_emaned)
|
||||
utils.cmd(kill_transortd)
|
||||
self.session.distributed.execute(lambda x: x.remote_cmd(kill_emaned))
|
||||
self.session.distributed.execute(lambda x: x.remote_cmd(kill_transortd))
|
||||
except CoreCommandError:
|
||||
logging.exception("error shutting down emane daemons")
|
||||
|
||||
|
@ -785,8 +630,8 @@ class EmaneManager(ModelManager):
|
|||
Install TUN/TAP virtual interfaces into their proper namespaces
|
||||
now that the EMANE daemons are running.
|
||||
"""
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
for key in sorted(self._emane_nets.keys()):
|
||||
emane_node = self._emane_nets[key]
|
||||
logging.info("emane install netifs for node: %d", key)
|
||||
emane_node.installnetifs()
|
||||
|
||||
|
@ -794,8 +639,8 @@ class EmaneManager(ModelManager):
|
|||
"""
|
||||
Uninstall TUN/TAP virtual interfaces.
|
||||
"""
|
||||
for key in sorted(self._emane_nodes.keys()):
|
||||
emane_node = self._emane_nodes[key]
|
||||
for key in sorted(self._emane_nets.keys()):
|
||||
emane_node = self._emane_nets[key]
|
||||
emane_node.deinstallnetifs()
|
||||
|
||||
def doeventmonitor(self):
|
||||
|
@ -951,7 +796,7 @@ class EmaneManager(ModelManager):
|
|||
node = self.session.get_node(n)
|
||||
except CoreError:
|
||||
logging.exception(
|
||||
"location event NEM %s has no corresponding node %s" % (nemid, n)
|
||||
"location event NEM %s has no corresponding node %s", nemid, n
|
||||
)
|
||||
return False
|
||||
|
||||
|
@ -963,11 +808,17 @@ class EmaneManager(ModelManager):
|
|||
|
||||
def emanerunning(self, node):
|
||||
"""
|
||||
Return True if an EMANE process associated with the given node is running, False otherwise.
|
||||
Return True if an EMANE process associated with the given node is running,
|
||||
False otherwise.
|
||||
"""
|
||||
args = ["pkill", "-0", "-x", "emane"]
|
||||
status = node.cmd(args)
|
||||
return status == 0
|
||||
args = "pkill -0 -x emane"
|
||||
try:
|
||||
node.cmd(args)
|
||||
result = True
|
||||
except CoreCommandError:
|
||||
result = False
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class EmaneGlobalModel(EmaneModel):
|
||||
|
|
|
@ -115,7 +115,7 @@ def parse(manifest_path, defaults):
|
|||
# define description and account for gui quirks
|
||||
config_descriptions = config_name
|
||||
if config_name.endswith("uri"):
|
||||
config_descriptions = "%s file" % config_descriptions
|
||||
config_descriptions = f"{config_descriptions} file"
|
||||
|
||||
configuration = Configuration(
|
||||
_id=config_name,
|
||||
|
|
|
@ -4,10 +4,10 @@ Defines Emane Models used within CORE.
|
|||
import logging
|
||||
import os
|
||||
|
||||
from core import CoreError
|
||||
from core.config import ConfigGroup, Configuration
|
||||
from core.emane import emanemanifest
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import WirelessModel
|
||||
from core.xml import emanexml
|
||||
|
||||
|
@ -102,6 +102,11 @@ class EmaneModel(WirelessModel):
|
|||
mac_name = emanexml.mac_file_name(self, interface)
|
||||
phy_name = emanexml.phy_file_name(self, interface)
|
||||
|
||||
# remote server for file
|
||||
server = None
|
||||
if interface is not None:
|
||||
server = interface.node.server
|
||||
|
||||
# check if this is external
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
|
@ -111,16 +116,16 @@ class EmaneModel(WirelessModel):
|
|||
# create nem xml file
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_nem_xml(
|
||||
self, config, nem_file, transport_name, mac_name, phy_name
|
||||
self, config, nem_file, transport_name, mac_name, phy_name, server
|
||||
)
|
||||
|
||||
# create mac xml file
|
||||
mac_file = os.path.join(self.session.session_dir, mac_name)
|
||||
emanexml.create_mac_xml(self, config, mac_file)
|
||||
emanexml.create_mac_xml(self, config, mac_file, server)
|
||||
|
||||
# create phy xml file
|
||||
phy_file = os.path.join(self.session.session_dir, phy_name)
|
||||
emanexml.create_phy_xml(self, config, phy_file)
|
||||
emanexml.create_phy_xml(self, config, phy_file, server)
|
||||
|
||||
def post_startup(self):
|
||||
"""
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
"""
|
||||
nodes.py: definition of an EmaneNode class for implementing configuration
|
||||
control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
||||
Provides an EMANE network node class, which has several attached NEMs that
|
||||
share the same MAC+PHY model.
|
||||
"""
|
||||
|
||||
|
@ -19,26 +18,19 @@ except ImportError:
|
|||
|
||||
|
||||
class EmaneNet(CoreNetworkBase):
|
||||
"""
|
||||
EMANE network base class.
|
||||
"""
|
||||
|
||||
apitype = NodeTypes.EMANE.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
type = "wlan"
|
||||
|
||||
|
||||
class EmaneNode(EmaneNet):
|
||||
"""
|
||||
EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
"""
|
||||
|
||||
apitype = NodeTypes.EMANE.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
type = "wlan"
|
||||
is_emane = True
|
||||
|
||||
def __init__(self, session, _id=None, name=None, start=True):
|
||||
super(EmaneNode, self).__init__(session, _id, name, start)
|
||||
def __init__(self, session, _id=None, name=None, start=True, server=None):
|
||||
super(EmaneNet, self).__init__(session, _id, name, start, server)
|
||||
self.conf = ""
|
||||
self.up = False
|
||||
self.nemidmap = {}
|
||||
|
@ -218,7 +210,7 @@ class EmaneNode(EmaneNet):
|
|||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
logging.info("nemid for %s is unknown" % ifname)
|
||||
logging.info("nemid for %s is unknown", ifname)
|
||||
continue
|
||||
x, y, z = netif.node.getposition()
|
||||
lat, lon, alt = self.session.location.getgeo(x, y, z)
|
||||
|
|
|
@ -62,4 +62,5 @@ class EmaneTdmaModel(emanemodel.EmaneModel):
|
|||
logging.info(
|
||||
"setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device
|
||||
)
|
||||
utils.check_cmd(["emaneevent-tdmaschedule", "-i", event_device, schedule])
|
||||
args = f"emaneevent-tdmaschedule -i {event_device} {schedule}"
|
||||
utils.cmd(args)
|
||||
|
|
|
@ -85,12 +85,13 @@ class CoreEmu(object):
|
|||
session = sessions[_id]
|
||||
session.shutdown()
|
||||
|
||||
def create_session(self, _id=None, master=True):
|
||||
def create_session(self, _id=None, master=True, _cls=Session):
|
||||
"""
|
||||
Create a new CORE session, set to master if running standalone.
|
||||
|
||||
:param int _id: session id for new session
|
||||
:param bool master: sets session to master
|
||||
:param class _cls: Session class to use
|
||||
:return: created session
|
||||
:rtype: EmuSession
|
||||
"""
|
||||
|
@ -100,7 +101,7 @@ class CoreEmu(object):
|
|||
if _id not in self.sessions:
|
||||
break
|
||||
|
||||
session = Session(_id, config=self.config)
|
||||
session = _cls(_id, config=self.config)
|
||||
logging.info("created session: %s", _id)
|
||||
if master:
|
||||
session.master = True
|
||||
|
|
251
daemon/core/emulator/distributed.py
Normal file
251
daemon/core/emulator/distributed.py
Normal file
|
@ -0,0 +1,251 @@
|
|||
"""
|
||||
Defines distributed server functionality.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from collections import OrderedDict
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from fabric import Connection
|
||||
from invoke import UnexpectedExit
|
||||
|
||||
from core import utils
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes.interface import GreTap
|
||||
from core.nodes.ipaddress import IpAddress
|
||||
from core.nodes.network import CoreNetwork, CtrlNet
|
||||
|
||||
LOCK = threading.Lock()
|
||||
|
||||
|
||||
class DistributedServer(object):
|
||||
"""
|
||||
Provides distributed server interactions.
|
||||
"""
|
||||
|
||||
def __init__(self, name, host):
|
||||
"""
|
||||
Create a DistributedServer instance.
|
||||
|
||||
:param str name: convenience name to associate with host
|
||||
:param str host: host to connect to
|
||||
"""
|
||||
self.name = name
|
||||
self.host = host
|
||||
self.conn = Connection(host, user="root")
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def remote_cmd(self, cmd, env=None, cwd=None, wait=True):
|
||||
"""
|
||||
Run command remotely using server connection.
|
||||
|
||||
:param str cmd: command to run
|
||||
:param dict env: environment for remote command, default is None
|
||||
:param str cwd: directory to run command in, defaults to None, which is the
|
||||
user's home directory
|
||||
:param bool wait: True to wait for status, False to background process
|
||||
:return: stdout when success
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
|
||||
replace_env = env is not None
|
||||
if not wait:
|
||||
cmd += " &"
|
||||
logging.info(
|
||||
"remote cmd server(%s) cwd(%s) wait(%s): %s", self.host, cwd, wait, cmd
|
||||
)
|
||||
try:
|
||||
if cwd is None:
|
||||
result = self.conn.run(
|
||||
cmd, hide=False, env=env, replace_env=replace_env
|
||||
)
|
||||
else:
|
||||
with self.conn.cd(cwd):
|
||||
result = self.conn.run(
|
||||
cmd, hide=False, env=env, replace_env=replace_env
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except UnexpectedExit as e:
|
||||
stdout, stderr = e.streams_for_display()
|
||||
raise CoreCommandError(e.result.exited, cmd, stdout, stderr)
|
||||
|
||||
def remote_put(self, source, destination):
|
||||
"""
|
||||
Push file to remote server.
|
||||
|
||||
:param str source: source file to push
|
||||
:param str destination: destination file location
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
self.conn.put(source, destination)
|
||||
|
||||
def remote_put_temp(self, destination, data):
|
||||
"""
|
||||
Remote push file contents to a remote server, using a temp file as an
|
||||
intermediate step.
|
||||
|
||||
:param str destination: file destination for data
|
||||
:param str data: data to store in remote file
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
temp.write(data.encode("utf-8"))
|
||||
temp.close()
|
||||
self.conn.put(temp.name, destination)
|
||||
os.unlink(temp.name)
|
||||
|
||||
|
||||
class DistributedController(object):
|
||||
"""
|
||||
Provides logic for dealing with remote tunnels and distributed servers.
|
||||
"""
|
||||
|
||||
def __init__(self, session):
|
||||
"""
|
||||
Create
|
||||
|
||||
:param session:
|
||||
"""
|
||||
self.session = session
|
||||
self.servers = OrderedDict()
|
||||
self.tunnels = {}
|
||||
self.address = self.session.options.get_config(
|
||||
"distributed_address", default=None
|
||||
)
|
||||
|
||||
def add_server(self, name, host):
|
||||
"""
|
||||
Add distributed server configuration.
|
||||
|
||||
:param str name: distributed server name
|
||||
:param str host: distributed server host address
|
||||
:return: nothing
|
||||
"""
|
||||
server = DistributedServer(name, host)
|
||||
self.servers[name] = server
|
||||
cmd = f"mkdir -p {self.session.session_dir}"
|
||||
server.remote_cmd(cmd)
|
||||
|
||||
def execute(self, func):
|
||||
"""
|
||||
Convenience for executing logic against all distributed servers.
|
||||
|
||||
:param func: function to run, that takes a DistributedServer as a parameter
|
||||
:return: nothing
|
||||
"""
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
func(server)
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown logic for dealing with distributed tunnels and server session
|
||||
directories.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# shutdown all tunnels
|
||||
for key in self.tunnels:
|
||||
tunnels = self.tunnels[key]
|
||||
for tunnel in tunnels:
|
||||
tunnel.shutdown()
|
||||
|
||||
# remove all remote session directories
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
cmd = f"rm -rf {self.session.session_dir}"
|
||||
server.remote_cmd(cmd)
|
||||
|
||||
# clear tunnels
|
||||
self.tunnels.clear()
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start distributed network tunnels.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
for node_id in self.session.nodes:
|
||||
node = self.session.nodes[node_id]
|
||||
|
||||
if not isinstance(node, CoreNetwork):
|
||||
continue
|
||||
|
||||
if isinstance(node, CtrlNet) and node.serverintf is not None:
|
||||
continue
|
||||
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
self.create_gre_tunnel(node, server)
|
||||
|
||||
def create_gre_tunnel(self, node, server):
|
||||
"""
|
||||
Create gre tunnel using a pair of gre taps between the local and remote server.
|
||||
|
||||
|
||||
:param core.nodes.network.CoreNetwork node: node to create gre tunnel for
|
||||
:param core.emulator.distributed.DistributedServer server: server to create
|
||||
tunnel for
|
||||
:return: local and remote gre taps created for tunnel
|
||||
:rtype: tuple
|
||||
"""
|
||||
host = server.host
|
||||
key = self.tunnel_key(node.id, IpAddress.to_int(host))
|
||||
tunnel = self.tunnels.get(key)
|
||||
if tunnel is not None:
|
||||
return tunnel
|
||||
|
||||
# local to server
|
||||
logging.info(
|
||||
"local tunnel node(%s) to remote(%s) key(%s)", node.name, host, key
|
||||
)
|
||||
local_tap = GreTap(session=self.session, remoteip=host, key=key)
|
||||
local_tap.net_client.create_interface(node.brname, local_tap.localname)
|
||||
|
||||
# server to local
|
||||
logging.info(
|
||||
"remote tunnel node(%s) to local(%s) key(%s)", node.name, self.address, key
|
||||
)
|
||||
remote_tap = GreTap(
|
||||
session=self.session, remoteip=self.address, key=key, server=server
|
||||
)
|
||||
remote_tap.net_client.create_interface(node.brname, remote_tap.localname)
|
||||
|
||||
# save tunnels for shutdown
|
||||
tunnel = (local_tap, remote_tap)
|
||||
self.tunnels[key] = tunnel
|
||||
return tunnel
|
||||
|
||||
def tunnel_key(self, n1_id, n2_id):
|
||||
"""
|
||||
Compute a 32-bit key used to uniquely identify a GRE tunnel.
|
||||
The hash(n1num), hash(n2num) values are used, so node numbers may be
|
||||
None or string values (used for e.g. "ctrlnet").
|
||||
|
||||
:param int n1_id: node one id
|
||||
:param int n2_id: node two id
|
||||
:return: tunnel key for the node pair
|
||||
:rtype: int
|
||||
"""
|
||||
logging.debug("creating tunnel key for: %s, %s", n1_id, n2_id)
|
||||
key = (
|
||||
(self.session.id << 16) ^ utils.hashkey(n1_id) ^ (utils.hashkey(n2_id) << 8)
|
||||
)
|
||||
return key & 0xFFFFFFFF
|
||||
|
||||
def get_tunnel(self, n1_id, n2_id):
|
||||
"""
|
||||
Return the GreTap between two nodes if it exists.
|
||||
|
||||
:param int n1_id: node one id
|
||||
:param int n2_id: node two id
|
||||
:return: gre tap between nodes or None
|
||||
"""
|
||||
key = self.tunnel_key(n1_id, n2_id)
|
||||
logging.debug("checking for tunnel key(%s) in: %s", key, self.tunnels)
|
||||
return self.tunnels.get(key)
|
|
@ -1,4 +1,4 @@
|
|||
from core.emane.nodes import EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.enumerations import LinkTypes
|
||||
from core.nodes.ipaddress import Ipv4Prefix, Ipv6Prefix, MacAddress
|
||||
from core.nodes.physical import PhysicalNode
|
||||
|
@ -53,8 +53,9 @@ def link_config(network, interface, link_options, devname=None, interface_two=No
|
|||
"netif2": interface_two,
|
||||
}
|
||||
|
||||
# hacky check here, because physical and emane nodes do not conform to the same linkconfig interface
|
||||
if not isinstance(network, (EmaneNode, PhysicalNode)):
|
||||
# hacky check here, because physical and emane nodes do not conform to the same
|
||||
# linkconfig interface
|
||||
if not isinstance(network, (EmaneNet, PhysicalNode)):
|
||||
config["devname"] = devname
|
||||
|
||||
network.linkconfig(**config)
|
||||
|
@ -70,7 +71,8 @@ class NodeOptions(object):
|
|||
Create a NodeOptions object.
|
||||
|
||||
:param str name: name of node, defaults to node class name postfix with its id
|
||||
:param str model: defines services for default and physical nodes, defaults to "router"
|
||||
:param str model: defines services for default and physical nodes, defaults to
|
||||
"router"
|
||||
:param str image: image to use for docker nodes
|
||||
"""
|
||||
self.name = name
|
||||
|
@ -122,7 +124,8 @@ class LinkOptions(object):
|
|||
"""
|
||||
Create a LinkOptions object.
|
||||
|
||||
:param core.emulator.enumerations.LinkTypes _type: type of link, defaults to wired
|
||||
:param core.emulator.enumerations.LinkTypes _type: type of link, defaults to
|
||||
wired
|
||||
"""
|
||||
self.type = _type
|
||||
self.session = None
|
||||
|
@ -191,12 +194,13 @@ class IpPrefixes(object):
|
|||
|
||||
def create_interface(self, node, name=None, mac=None):
|
||||
"""
|
||||
Creates interface data for linking nodes, using the nodes unique id for generation, along with a random
|
||||
mac address, unless provided.
|
||||
Creates interface data for linking nodes, using the nodes unique id for
|
||||
generation, along with a random mac address, unless provided.
|
||||
|
||||
:param core.nodes.base.CoreNode node: node to create interface for
|
||||
:param str name: name to set for interface, default is eth{id}
|
||||
:param str mac: mac address to use for this interface, default is random generation
|
||||
:param str mac: mac address to use for this interface, default is random
|
||||
generation
|
||||
:return: new interface data for the provided node
|
||||
:rtype: InterfaceData
|
||||
"""
|
||||
|
@ -280,7 +284,7 @@ class InterfaceData(object):
|
|||
:return: ip4 string or None
|
||||
"""
|
||||
if self.has_ip4():
|
||||
return "%s/%s" % (self.ip4, self.ip4_mask)
|
||||
return f"{self.ip4}/{self.ip4_mask}"
|
||||
else:
|
||||
return None
|
||||
|
||||
|
@ -291,7 +295,7 @@ class InterfaceData(object):
|
|||
:return: ip4 string or None
|
||||
"""
|
||||
if self.has_ip6():
|
||||
return "%s/%s" % (self.ip6, self.ip6_mask)
|
||||
return f"{self.ip6}/{self.ip6_mask}"
|
||||
else:
|
||||
return None
|
||||
|
||||
|
|
|
@ -14,12 +14,11 @@ import threading
|
|||
import time
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from core import CoreError, constants, utils
|
||||
from core.api.tlv import coreapi
|
||||
from core.api.tlv.broker import CoreBroker
|
||||
from core import constants, utils
|
||||
from core.emane.emanemanager import EmaneManager
|
||||
from core.emane.nodes import EmaneNet, EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.data import EventData, ExceptionData, NodeData
|
||||
from core.emulator.distributed import DistributedController
|
||||
from core.emulator.emudata import (
|
||||
IdGen,
|
||||
LinkOptions,
|
||||
|
@ -29,6 +28,7 @@ from core.emulator.emudata import (
|
|||
)
|
||||
from core.emulator.enumerations import EventTypes, ExceptionLevels, LinkTypes, NodeTypes
|
||||
from core.emulator.sessionconfig import SessionConfig, SessionMetaData
|
||||
from core.errors import CoreError
|
||||
from core.location.corelocation import CoreLocation
|
||||
from core.location.event import EventLoop
|
||||
from core.location.mobility import MobilityManager
|
||||
|
@ -62,8 +62,8 @@ NODES = {
|
|||
NodeTypes.RJ45: Rj45Node,
|
||||
NodeTypes.TUNNEL: TunnelNode,
|
||||
NodeTypes.KTUNNEL: None,
|
||||
NodeTypes.EMANE: EmaneNode,
|
||||
NodeTypes.EMANE_NET: EmaneNet,
|
||||
NodeTypes.EMANE: EmaneNet,
|
||||
NodeTypes.EMANE_NET: None,
|
||||
NodeTypes.TAP_BRIDGE: GreTapBridge,
|
||||
NodeTypes.PEER_TO_PEER: PtpNet,
|
||||
NodeTypes.CONTROL_NET: CtrlNet,
|
||||
|
@ -71,6 +71,7 @@ NODES = {
|
|||
NodeTypes.LXC: LxcNode,
|
||||
}
|
||||
NODES_TYPE = {NODES[x]: x for x in NODES}
|
||||
CTRL_NET_ID = 9001
|
||||
|
||||
|
||||
class Session(object):
|
||||
|
@ -90,7 +91,7 @@ class Session(object):
|
|||
self.master = False
|
||||
|
||||
# define and create session directory when desired
|
||||
self.session_dir = os.path.join(tempfile.gettempdir(), "pycore.%s" % self.id)
|
||||
self.session_dir = os.path.join(tempfile.gettempdir(), f"pycore.{self.id}")
|
||||
if mkdir:
|
||||
os.mkdir(self.session_dir)
|
||||
|
||||
|
@ -135,8 +136,10 @@ class Session(object):
|
|||
self.options.set_config(key, value)
|
||||
self.metadata = SessionMetaData()
|
||||
|
||||
# distributed support and logic
|
||||
self.distributed = DistributedController(self)
|
||||
|
||||
# initialize session feature helpers
|
||||
self.broker = CoreBroker(session=self)
|
||||
self.location = CoreLocation()
|
||||
self.mobility = MobilityManager(session=self)
|
||||
self.services = CoreServices(session=self)
|
||||
|
@ -147,7 +150,7 @@ class Session(object):
|
|||
self.services.default_services = {
|
||||
"mdr": ("zebra", "OSPFv3MDR", "IPForward"),
|
||||
"PC": ("DefaultRoute",),
|
||||
"prouter": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
"prouter": (),
|
||||
"router": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
"host": ("DefaultRoute", "SSH"),
|
||||
}
|
||||
|
@ -162,7 +165,7 @@ class Session(object):
|
|||
"""
|
||||
node_class = NODES.get(_type)
|
||||
if node_class is None:
|
||||
raise CoreError("invalid node type: %s" % _type)
|
||||
raise CoreError(f"invalid node type: {_type}")
|
||||
return node_class
|
||||
|
||||
@classmethod
|
||||
|
@ -176,7 +179,7 @@ class Session(object):
|
|||
"""
|
||||
node_type = NODES_TYPE.get(_class)
|
||||
if node_type is None:
|
||||
raise CoreError("invalid node class: %s" % _class)
|
||||
raise CoreError(f"invalid node class: {_class}")
|
||||
return node_type
|
||||
|
||||
def _link_nodes(self, node_one_id, node_two_id):
|
||||
|
@ -201,7 +204,7 @@ class Session(object):
|
|||
node_two = self.get_node(node_two_id)
|
||||
|
||||
# both node ids are provided
|
||||
tunnel = self.broker.gettunnel(node_one_id, node_two_id)
|
||||
tunnel = self.distributed.get_tunnel(node_one_id, node_two_id)
|
||||
logging.debug("tunnel between nodes: %s", tunnel)
|
||||
if isinstance(tunnel, GreTapBridge):
|
||||
net_one = tunnel
|
||||
|
@ -252,7 +255,7 @@ class Session(object):
|
|||
"""
|
||||
objects = [x for x in objects if x]
|
||||
if len(objects) < 2:
|
||||
raise CoreError("wireless link failure: %s" % objects)
|
||||
raise CoreError(f"wireless link failure: {objects}")
|
||||
logging.debug(
|
||||
"handling wireless linking objects(%s) connect(%s)", objects, connect
|
||||
)
|
||||
|
@ -261,7 +264,7 @@ class Session(object):
|
|||
raise CoreError("no common network found for wireless link/unlink")
|
||||
|
||||
for common_network, interface_one, interface_two in common_networks:
|
||||
if not isinstance(common_network, (WlanNode, EmaneNode)):
|
||||
if not isinstance(common_network, (WlanNode, EmaneNet)):
|
||||
logging.info(
|
||||
"skipping common network that is not wireless/emane: %s",
|
||||
common_network,
|
||||
|
@ -663,7 +666,14 @@ class Session(object):
|
|||
node_options = NodeOptions()
|
||||
name = node_options.name
|
||||
if not name:
|
||||
name = "%s%s" % (node_class.__name__, _id)
|
||||
name = f"{node_class.__name__}{_id}"
|
||||
|
||||
# verify distributed server
|
||||
server = self.distributed.servers.get(node_options.emulation_server)
|
||||
if node_options.emulation_server is not None and server is None:
|
||||
raise CoreError(
|
||||
f"invalid distributed server: {node_options.emulation_server}"
|
||||
)
|
||||
|
||||
# create node
|
||||
logging.info(
|
||||
|
@ -680,9 +690,12 @@ class Session(object):
|
|||
name=name,
|
||||
start=start,
|
||||
image=node_options.image,
|
||||
server=server,
|
||||
)
|
||||
else:
|
||||
node = self.create_node(cls=node_class, _id=_id, name=name, start=start)
|
||||
node = self.create_node(
|
||||
cls=node_class, _id=_id, name=name, start=start, server=server
|
||||
)
|
||||
|
||||
# set node attributes
|
||||
node.icon = node_options.icon
|
||||
|
@ -842,7 +855,7 @@ class Session(object):
|
|||
:return: nothing
|
||||
"""
|
||||
# hack to conform with old logic until updated
|
||||
state = ":%s" % state
|
||||
state = f":{state}"
|
||||
self.set_hook(state, file_name, source_name, data)
|
||||
|
||||
def add_node_file(self, node_id, source_name, file_name, data):
|
||||
|
@ -865,13 +878,13 @@ class Session(object):
|
|||
|
||||
def clear(self):
|
||||
"""
|
||||
Clear all CORE session data. (objects, hooks, broker)
|
||||
Clear all CORE session data. (nodes, hooks, etc)
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.delete_nodes()
|
||||
self.distributed.shutdown()
|
||||
self.del_hooks()
|
||||
self.broker.reset()
|
||||
self.emane.reset()
|
||||
|
||||
def start_events(self):
|
||||
|
@ -945,11 +958,11 @@ class Session(object):
|
|||
|
||||
# shutdown/cleanup feature helpers
|
||||
self.emane.shutdown()
|
||||
self.broker.shutdown()
|
||||
self.sdt.shutdown()
|
||||
|
||||
# delete all current nodes
|
||||
# remove and shutdown all nodes and tunnels
|
||||
self.delete_nodes()
|
||||
self.distributed.shutdown()
|
||||
|
||||
# remove this sessions working directory
|
||||
preserve = self.options.get_config("preservedir") == "1"
|
||||
|
@ -1054,7 +1067,7 @@ class Session(object):
|
|||
self.run_state_hooks(state_value)
|
||||
|
||||
if send_event:
|
||||
event_data = EventData(event_type=state_value, time="%s" % time.time())
|
||||
event_data = EventData(event_type=state_value, time=str(time.time()))
|
||||
self.broadcast_event(event_data)
|
||||
|
||||
def write_state(self, state):
|
||||
|
@ -1066,7 +1079,7 @@ class Session(object):
|
|||
"""
|
||||
try:
|
||||
state_file = open(self._state_file, "w")
|
||||
state_file.write("%d %s\n" % (state, coreapi.state_name(state)))
|
||||
state_file.write(f"{state} {EventTypes(self.state).name}\n")
|
||||
state_file.close()
|
||||
except IOError:
|
||||
logging.exception("error writing state file: %s", state)
|
||||
|
@ -1183,9 +1196,9 @@ class Session(object):
|
|||
try:
|
||||
hook(state)
|
||||
except Exception:
|
||||
message = "exception occured when running %s state hook: %s" % (
|
||||
coreapi.state_name(state),
|
||||
hook,
|
||||
state_name = EventTypes(self.state).name
|
||||
message = (
|
||||
f"exception occured when running {state_name} state hook: {hook}"
|
||||
)
|
||||
logging.exception(message)
|
||||
self.exception(
|
||||
|
@ -1228,12 +1241,12 @@ class Session(object):
|
|||
"""
|
||||
if state == EventTypes.RUNTIME_STATE.value:
|
||||
self.emane.poststartup()
|
||||
xml_file_version = self.options.get_config("xmlfilever")
|
||||
if xml_file_version in ("1.0",):
|
||||
xml_file_name = os.path.join(self.session_dir, "session-deployed.xml")
|
||||
xml_writer = corexml.CoreXmlWriter(self)
|
||||
corexmldeployment.CoreXmlDeployment(self, xml_writer.scenario)
|
||||
xml_writer.write(xml_file_name)
|
||||
|
||||
# create session deployed xml
|
||||
xml_file_name = os.path.join(self.session_dir, "session-deployed.xml")
|
||||
xml_writer = corexml.CoreXmlWriter(self)
|
||||
corexmldeployment.CoreXmlDeployment(self, xml_writer.scenario)
|
||||
xml_writer.write(xml_file_name)
|
||||
|
||||
def get_environment(self, state=True):
|
||||
"""
|
||||
|
@ -1246,16 +1259,16 @@ class Session(object):
|
|||
:rtype: dict
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
env["SESSION"] = "%s" % self.id
|
||||
env["SESSION_SHORT"] = "%s" % self.short_session_id()
|
||||
env["SESSION_DIR"] = "%s" % self.session_dir
|
||||
env["SESSION_NAME"] = "%s" % self.name
|
||||
env["SESSION_FILENAME"] = "%s" % self.file_name
|
||||
env["SESSION_USER"] = "%s" % self.user
|
||||
env["SESSION_NODE_COUNT"] = "%s" % self.get_node_count()
|
||||
env["SESSION"] = str(self.id)
|
||||
env["SESSION_SHORT"] = self.short_session_id()
|
||||
env["SESSION_DIR"] = self.session_dir
|
||||
env["SESSION_NAME"] = str(self.name)
|
||||
env["SESSION_FILENAME"] = str(self.file_name)
|
||||
env["SESSION_USER"] = str(self.user)
|
||||
env["SESSION_NODE_COUNT"] = str(self.get_node_count())
|
||||
|
||||
if state:
|
||||
env["SESSION_STATE"] = "%s" % self.state
|
||||
env["SESSION_STATE"] = str(self.state)
|
||||
|
||||
# attempt to read and add environment config file
|
||||
environment_config_file = os.path.join(constants.CORE_CONF_DIR, "environment")
|
||||
|
@ -1344,7 +1357,7 @@ class Session(object):
|
|||
with self._nodes_lock:
|
||||
if node.id in self.nodes:
|
||||
node.shutdown()
|
||||
raise CoreError("duplicate node id %s for %s" % (node.id, node.name))
|
||||
raise CoreError(f"duplicate node id {node.id} for {node.name}")
|
||||
self.nodes[node.id] = node
|
||||
|
||||
return node
|
||||
|
@ -1359,7 +1372,7 @@ class Session(object):
|
|||
:raises core.CoreError: when node does not exist
|
||||
"""
|
||||
if _id not in self.nodes:
|
||||
raise CoreError("unknown node id %s" % _id)
|
||||
raise CoreError(f"unknown node id {_id}")
|
||||
return self.nodes[_id]
|
||||
|
||||
def delete_node(self, _id):
|
||||
|
@ -1404,9 +1417,7 @@ class Session(object):
|
|||
with open(file_path, "w") as f:
|
||||
for _id in self.nodes.keys():
|
||||
node = self.nodes[_id]
|
||||
f.write(
|
||||
"%s %s %s %s\n" % (_id, node.name, node.apitype, type(node))
|
||||
)
|
||||
f.write(f"{_id} {node.name} {node.apitype} {type(node)}\n")
|
||||
except IOError:
|
||||
logging.exception("error writing nodes file")
|
||||
|
||||
|
@ -1455,11 +1466,13 @@ class Session(object):
|
|||
# write current nodes out to session directory file
|
||||
self.write_nodes()
|
||||
|
||||
# create control net interfaces and broker network tunnels
|
||||
# create control net interfaces and network tunnels
|
||||
# which need to exist for emane to sync on location events
|
||||
# in distributed scenarios
|
||||
self.add_remove_control_interface(node=None, remove=False)
|
||||
self.broker.startup()
|
||||
|
||||
# initialize distributed tunnels
|
||||
self.distributed.start()
|
||||
|
||||
# instantiate will be invoked again upon Emane configure
|
||||
if self.emane.startup() == self.emane.NOT_READY:
|
||||
|
@ -1469,9 +1482,6 @@ class Session(object):
|
|||
self.boot_nodes()
|
||||
self.mobility.startup()
|
||||
|
||||
# set broker local instantiation to complete
|
||||
self.broker.local_instantiation_complete()
|
||||
|
||||
# notify listeners that instantiation is complete
|
||||
event = EventData(event_type=EventTypes.INSTANTIATION_COMPLETE.value)
|
||||
self.broadcast_event(event)
|
||||
|
@ -1509,21 +1519,16 @@ class Session(object):
|
|||
have entered runtime (time=0).
|
||||
"""
|
||||
# this is called from instantiate() after receiving an event message
|
||||
# for the instantiation state, and from the broker when distributed
|
||||
# nodes have been started
|
||||
# for the instantiation state
|
||||
logging.debug(
|
||||
"session(%s) checking if not in runtime state, current state: %s",
|
||||
self.id,
|
||||
coreapi.state_name(self.state),
|
||||
EventTypes(self.state).name,
|
||||
)
|
||||
if self.state == EventTypes.RUNTIME_STATE.value:
|
||||
logging.info("valid runtime state found, returning")
|
||||
return
|
||||
|
||||
# check to verify that all nodes and networks are running
|
||||
if not self.broker.instantiation_complete():
|
||||
return
|
||||
|
||||
# start event loop and set to runtime
|
||||
self.event_loop.run()
|
||||
self.set_state(EventTypes.RUNTIME_STATE, send_event=True)
|
||||
|
@ -1579,7 +1584,7 @@ class Session(object):
|
|||
interface names, where length may be limited.
|
||||
"""
|
||||
ssid = (self.id >> 8) ^ (self.id & ((1 << 8) - 1))
|
||||
return "%x" % ssid
|
||||
return f"{ssid:x}"
|
||||
|
||||
def boot_nodes(self):
|
||||
"""
|
||||
|
@ -1663,9 +1668,7 @@ class Session(object):
|
|||
return -1
|
||||
|
||||
def get_control_net(self, net_index):
|
||||
# TODO: all nodes use an integer id and now this wants to use a string
|
||||
_id = "ctrl%dnet" % net_index
|
||||
return self.get_node(_id)
|
||||
return self.get_node(CTRL_NET_ID + net_index)
|
||||
|
||||
def add_remove_control_net(self, net_index, remove=False, conf_required=True):
|
||||
"""
|
||||
|
@ -1712,7 +1715,7 @@ class Session(object):
|
|||
return None
|
||||
|
||||
# build a new controlnet bridge
|
||||
_id = "ctrl%dnet" % net_index
|
||||
_id = CTRL_NET_ID + net_index
|
||||
|
||||
# use the updown script for control net 0 only.
|
||||
updown_script = None
|
||||
|
@ -1720,7 +1723,7 @@ class Session(object):
|
|||
if net_index == 0:
|
||||
updown_script = self.options.get_config("controlnet_updown_script")
|
||||
if not updown_script:
|
||||
logging.warning("controlnet updown script not configured")
|
||||
logging.debug("controlnet updown script not configured")
|
||||
|
||||
prefixes = prefix_spec.split()
|
||||
if len(prefixes) > 1:
|
||||
|
@ -1733,42 +1736,23 @@ class Session(object):
|
|||
except IndexError:
|
||||
# no server name. possibly only one server
|
||||
prefix = prefixes[0]
|
||||
else:
|
||||
# slave servers have their name and localhost in the serverlist
|
||||
servers = self.broker.getservernames()
|
||||
servers.remove("localhost")
|
||||
prefix = None
|
||||
|
||||
for server_prefix in prefixes:
|
||||
try:
|
||||
# split each entry into server and prefix
|
||||
server, p = server_prefix.split(":")
|
||||
except ValueError:
|
||||
server = ""
|
||||
p = None
|
||||
|
||||
if server == servers[0]:
|
||||
# the server name in the list matches this server
|
||||
prefix = p
|
||||
break
|
||||
|
||||
if not prefix:
|
||||
logging.error(
|
||||
"control network prefix not found for server: %s", servers[0]
|
||||
)
|
||||
assign_address = False
|
||||
try:
|
||||
prefix = prefixes[0].split(":", 1)[1]
|
||||
except IndexError:
|
||||
prefix = prefixes[0]
|
||||
# len(prefixes) == 1
|
||||
else:
|
||||
# TODO: can we get the server name from the servers.conf or from the node assignments?
|
||||
# TODO: can we get the server name from the servers.conf or from the node
|
||||
# assignments?o
|
||||
# with one prefix, only master gets a ctrlnet address
|
||||
assign_address = self.master
|
||||
prefix = prefixes[0]
|
||||
|
||||
logging.info("controlnet prefix: %s - %s", type(prefix), prefix)
|
||||
logging.info(
|
||||
"controlnet(%s) prefix(%s) assign(%s) updown(%s) serverintf(%s)",
|
||||
_id,
|
||||
prefix,
|
||||
assign_address,
|
||||
updown_script,
|
||||
server_interface,
|
||||
)
|
||||
control_net = self.create_node(
|
||||
cls=CtrlNet,
|
||||
_id=_id,
|
||||
|
@ -1778,13 +1762,6 @@ class Session(object):
|
|||
serverintf=server_interface,
|
||||
)
|
||||
|
||||
# tunnels between controlnets will be built with Broker.addnettunnels()
|
||||
# TODO: potentially remove documentation saying node ids are ints
|
||||
# TODO: need to move broker code out of the session object
|
||||
self.broker.addnet(_id)
|
||||
for server in self.broker.getservers():
|
||||
self.broker.addnodemap(server, _id)
|
||||
|
||||
return control_net
|
||||
|
||||
def add_remove_control_interface(
|
||||
|
@ -1817,13 +1794,12 @@ class Session(object):
|
|||
control_ip = node.id
|
||||
|
||||
try:
|
||||
addrlist = [
|
||||
"%s/%s"
|
||||
% (control_net.prefix.addr(control_ip), control_net.prefix.prefixlen)
|
||||
]
|
||||
address = control_net.prefix.addr(control_ip)
|
||||
prefix = control_net.prefix.prefixlen
|
||||
addrlist = [f"{address}/{prefix}"]
|
||||
except ValueError:
|
||||
msg = "Control interface not added to node %s. " % node.id
|
||||
msg += "Invalid control network prefix (%s). " % control_net.prefix
|
||||
msg = f"Control interface not added to node {node.id}. "
|
||||
msg += f"Invalid control network prefix ({control_net.prefix}). "
|
||||
msg += "A longer prefix length may be required for this many nodes."
|
||||
logging.exception(msg)
|
||||
return
|
||||
|
@ -1831,7 +1807,7 @@ class Session(object):
|
|||
interface1 = node.newnetif(
|
||||
net=control_net,
|
||||
ifindex=control_net.CTRLIF_IDX_BASE + net_index,
|
||||
ifname="ctrl%d" % net_index,
|
||||
ifname=f"ctrl{net_index}",
|
||||
hwaddr=MacAddress.random(),
|
||||
addrlist=addrlist,
|
||||
)
|
||||
|
@ -1854,7 +1830,7 @@ class Session(object):
|
|||
logging.exception("error retrieving control net node")
|
||||
return
|
||||
|
||||
header = "CORE session %s host entries" % self.id
|
||||
header = f"CORE session {self.id} host entries"
|
||||
if remove:
|
||||
logging.info("Removing /etc/hosts file entries.")
|
||||
utils.file_demunge("/etc/hosts", header)
|
||||
|
@ -1864,9 +1840,10 @@ class Session(object):
|
|||
for interface in control_net.netifs():
|
||||
name = interface.node.name
|
||||
for address in interface.addrlist:
|
||||
entries.append("%s %s" % (address.split("/")[0], name))
|
||||
address = address.split("/")[0]
|
||||
entries.append(f"{address} {name}")
|
||||
|
||||
logging.info("Adding %d /etc/hosts file entries." % len(entries))
|
||||
logging.info("Adding %d /etc/hosts file entries.", len(entries))
|
||||
|
||||
utils.file_munge("/etc/hosts", header, "\n".join(entries) + "\n")
|
||||
|
||||
|
@ -1917,7 +1894,8 @@ class Session(object):
|
|||
data,
|
||||
)
|
||||
|
||||
# TODO: if data is None, this blows up, but this ties into how event functions are ran, need to clean that up
|
||||
# TODO: if data is None, this blows up, but this ties into how event functions
|
||||
# are ran, need to clean that up
|
||||
def run_event(self, node_id=None, name=None, data=None):
|
||||
"""
|
||||
Run a scheduled event, executing commands in the data string.
|
||||
|
|
24
daemon/core/errors.py
Normal file
24
daemon/core/errors.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
"""
|
||||
Provides CORE specific errors.
|
||||
"""
|
||||
import subprocess
|
||||
|
||||
|
||||
class CoreCommandError(subprocess.CalledProcessError):
|
||||
"""
|
||||
Used when encountering internal CORE command errors.
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
f"Command({self.cmd}), Status({self.returncode}):\n"
|
||||
f"stdout: {self.output}\nstderr: {self.stderr}"
|
||||
)
|
||||
|
||||
|
||||
class CoreError(Exception):
|
||||
"""
|
||||
Used for errors when dealing with CoreEmu and Sessions.
|
||||
"""
|
||||
|
||||
pass
|
|
@ -128,8 +128,6 @@ class CoreLocation(object):
|
|||
z,
|
||||
)
|
||||
lat, lon = self.refgeo[:2]
|
||||
# self.info("getgeo(%s,%s,%s) e=%s n=%s zone=%s lat,lon,alt=" \
|
||||
# "%.3f,%.3f,%.3f" % (x, y, z, e, n, zone, lat, lon, alt))
|
||||
return lat, lon, alt
|
||||
|
||||
def getxyz(self, lat, lon, alt):
|
||||
|
|
|
@ -5,8 +5,7 @@ event.py: event loop implementation using a heap queue and threads.
|
|||
import heapq
|
||||
import threading
|
||||
import time
|
||||
|
||||
from past.builtins import cmp
|
||||
from functools import total_ordering
|
||||
|
||||
|
||||
class Timer(threading.Thread):
|
||||
|
@ -70,6 +69,7 @@ class Timer(threading.Thread):
|
|||
self.finished.set()
|
||||
|
||||
|
||||
@total_ordering
|
||||
class Event(object):
|
||||
"""
|
||||
Provides event objects that can be used within the EventLoop class.
|
||||
|
@ -92,18 +92,11 @@ class Event(object):
|
|||
self.kwds = kwds
|
||||
self.canceled = False
|
||||
|
||||
def __cmp__(self, other):
|
||||
"""
|
||||
Comparison function.
|
||||
|
||||
:param Event other: event to compare with
|
||||
:return: comparison result
|
||||
:rtype: int
|
||||
"""
|
||||
tmp = cmp(self.time, other.time)
|
||||
if tmp == 0:
|
||||
tmp = cmp(self.eventnum, other.eventnum)
|
||||
return tmp
|
||||
def __lt__(self, other):
|
||||
result = self.time < other.time
|
||||
if result:
|
||||
result = self.eventnum < other.eventnum
|
||||
return result
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
|
|
|
@ -8,10 +8,9 @@ import math
|
|||
import os
|
||||
import threading
|
||||
import time
|
||||
from builtins import int
|
||||
from functools import total_ordering
|
||||
|
||||
from core import CoreError, utils
|
||||
from core import utils
|
||||
from core.config import ConfigGroup, ConfigurableOptions, Configuration, ModelManager
|
||||
from core.emulator.data import EventData, LinkData
|
||||
from core.emulator.enumerations import (
|
||||
|
@ -19,12 +18,9 @@ from core.emulator.enumerations import (
|
|||
EventTypes,
|
||||
LinkTypes,
|
||||
MessageFlags,
|
||||
MessageTypes,
|
||||
NodeTlvs,
|
||||
RegisterTlvs,
|
||||
)
|
||||
from core.nodes.base import CoreNodeBase
|
||||
from core.nodes.ipaddress import IpAddress
|
||||
from core.errors import CoreError
|
||||
|
||||
|
||||
class MobilityManager(ModelManager):
|
||||
|
@ -47,11 +43,6 @@ class MobilityManager(ModelManager):
|
|||
self.models[BasicRangeModel.name] = BasicRangeModel
|
||||
self.models[Ns2ScriptedMobility.name] = Ns2ScriptedMobility
|
||||
|
||||
# dummy node objects for tracking position of nodes on other servers
|
||||
self.phys = {}
|
||||
self.physnets = {}
|
||||
self.session.broker.handlers.add(self.physnodehandlelink)
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Clear out all current configurations.
|
||||
|
@ -92,9 +83,6 @@ class MobilityManager(ModelManager):
|
|||
model_class = self.models[model_name]
|
||||
self.set_model(node, model_class, config)
|
||||
|
||||
if self.session.master:
|
||||
self.installphysnodes(node)
|
||||
|
||||
if node.mobility:
|
||||
self.session.event_loop.add_event(0.0, node.mobility.startup)
|
||||
|
||||
|
@ -177,15 +165,16 @@ class MobilityManager(ModelManager):
|
|||
elif model.state == model.STATE_PAUSED:
|
||||
event_type = EventTypes.PAUSE.value
|
||||
|
||||
data = "start=%d" % int(model.lasttime - model.timezero)
|
||||
data += " end=%d" % int(model.endtime)
|
||||
start_time = int(model.lasttime - model.timezero)
|
||||
end_time = int(model.endtime)
|
||||
data = f"start={start_time} end={end_time}"
|
||||
|
||||
event_data = EventData(
|
||||
node=model.id,
|
||||
event_type=event_type,
|
||||
name="mobility:%s" % model.name,
|
||||
name=f"mobility:{model.name}",
|
||||
data=data,
|
||||
time="%s" % time.time(),
|
||||
time=str(time.time()),
|
||||
)
|
||||
|
||||
self.session.broadcast_event(event_data)
|
||||
|
@ -208,87 +197,6 @@ class MobilityManager(ModelManager):
|
|||
if node.model:
|
||||
node.model.update(moved, moved_netifs)
|
||||
|
||||
def addphys(self, netnum, node):
|
||||
"""
|
||||
Keep track of PhysicalNodes and which network they belong to.
|
||||
|
||||
:param int netnum: network number
|
||||
:param core.coreobj.PyCoreNode node: node to add physical network to
|
||||
:return: nothing
|
||||
"""
|
||||
node_id = node.id
|
||||
self.phys[node_id] = node
|
||||
if netnum not in self.physnets:
|
||||
self.physnets[netnum] = [node_id]
|
||||
else:
|
||||
self.physnets[netnum].append(node_id)
|
||||
|
||||
# TODO: remove need for handling old style message
|
||||
|
||||
def physnodehandlelink(self, message):
|
||||
"""
|
||||
Broker handler. Snoop Link add messages to get
|
||||
node numbers of PhyiscalNodes and their nets.
|
||||
Physical nodes exist only on other servers, but a shadow object is
|
||||
created here for tracking node position.
|
||||
|
||||
:param message: link message to handle
|
||||
:return: nothing
|
||||
"""
|
||||
if (
|
||||
message.message_type == MessageTypes.LINK.value
|
||||
and message.flags & MessageFlags.ADD.value
|
||||
):
|
||||
nn = message.node_numbers()
|
||||
# first node is always link layer node in Link add message
|
||||
if nn[0] not in self.session.broker.network_nodes:
|
||||
return
|
||||
if nn[1] in self.session.broker.physical_nodes:
|
||||
# record the fact that this PhysicalNode is linked to a net
|
||||
dummy = CoreNodeBase(
|
||||
session=self.session, _id=nn[1], name="n%d" % nn[1], start=False
|
||||
)
|
||||
self.addphys(nn[0], dummy)
|
||||
|
||||
# TODO: remove need to handling old style messages
|
||||
def physnodeupdateposition(self, message):
|
||||
"""
|
||||
Snoop node messages belonging to physical nodes. The dummy object
|
||||
in self.phys[] records the node position.
|
||||
|
||||
:param message: message to handle
|
||||
:return: nothing
|
||||
"""
|
||||
nodenum = message.node_numbers()[0]
|
||||
try:
|
||||
dummy = self.phys[nodenum]
|
||||
nodexpos = message.get_tlv(NodeTlvs.X_POSITION.value)
|
||||
nodeypos = message.get_tlv(NodeTlvs.Y_POSITION.value)
|
||||
dummy.setposition(nodexpos, nodeypos, None)
|
||||
except KeyError:
|
||||
logging.exception("error retrieving physical node: %s", nodenum)
|
||||
|
||||
def installphysnodes(self, net):
|
||||
"""
|
||||
After installing a mobility model on a net, include any physical
|
||||
nodes that we have recorded. Use the GreTap tunnel to the physical node
|
||||
as the node's interface.
|
||||
|
||||
:param net: network to install
|
||||
:return: nothing
|
||||
"""
|
||||
node_ids = self.physnets.get(net.id, [])
|
||||
for node_id in node_ids:
|
||||
node = self.phys[node_id]
|
||||
# TODO: fix this bad logic, relating to depending on a break to get a valid server
|
||||
for server in self.session.broker.getserversbynode(node_id):
|
||||
break
|
||||
netif = self.session.broker.gettunnel(net.id, IpAddress.to_int(server.host))
|
||||
node.addnetif(netif, 0)
|
||||
netif.node = node
|
||||
x, y, z = netif.node.position.get()
|
||||
netif.poshook(netif, x, y, z)
|
||||
|
||||
|
||||
class WirelessModel(ConfigurableOptions):
|
||||
"""
|
||||
|
@ -425,7 +333,7 @@ class BasicRangeModel(WirelessModel):
|
|||
self.delay = int(config["delay"])
|
||||
if self.delay == 0:
|
||||
self.delay = None
|
||||
self.loss = int(config["error"])
|
||||
self.loss = int(float(config["error"]))
|
||||
if self.loss == 0:
|
||||
self.loss = None
|
||||
self.jitter = int(config["jitter"])
|
||||
|
@ -1084,7 +992,7 @@ class Ns2ScriptedMobility(WayPointMobility):
|
|||
"ns-2 scripted mobility failed to load file: %s", self.file
|
||||
)
|
||||
return
|
||||
logging.info("reading ns-2 script file: %s" % filename)
|
||||
logging.info("reading ns-2 script file: %s", filename)
|
||||
ln = 0
|
||||
ix = iy = iz = None
|
||||
inodenum = None
|
||||
|
@ -1205,7 +1113,7 @@ class Ns2ScriptedMobility(WayPointMobility):
|
|||
:return: nothing
|
||||
"""
|
||||
if self.autostart == "":
|
||||
logging.info("not auto-starting ns-2 script for %s" % self.wlan.name)
|
||||
logging.info("not auto-starting ns-2 script for %s", self.wlan.name)
|
||||
return
|
||||
try:
|
||||
t = float(self.autostart)
|
||||
|
@ -1217,9 +1125,7 @@ class Ns2ScriptedMobility(WayPointMobility):
|
|||
)
|
||||
return
|
||||
self.movenodesinitial()
|
||||
logging.info(
|
||||
"scheduling ns-2 script for %s autostart at %s" % (self.wlan.name, t)
|
||||
)
|
||||
logging.info("scheduling ns-2 script for %s autostart at %s", self.wlan.name, t)
|
||||
self.state = self.STATE_RUNNING
|
||||
self.session.event_loop.add_event(t, self.run)
|
||||
|
||||
|
@ -1280,7 +1186,7 @@ class Ns2ScriptedMobility(WayPointMobility):
|
|||
if filename is None or filename == "":
|
||||
return
|
||||
filename = self.findfile(filename)
|
||||
args = ["/bin/sh", filename, typestr]
|
||||
utils.check_cmd(
|
||||
args = f"/bin/sh {filename} {typestr}"
|
||||
utils.cmd(
|
||||
args, cwd=self.session.session_dir, env=self.session.get_environment()
|
||||
)
|
||||
|
|
|
@ -2,29 +2,24 @@
|
|||
Defines the base logic for nodes used within core.
|
||||
"""
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import signal
|
||||
import socket
|
||||
import string
|
||||
import threading
|
||||
from builtins import range
|
||||
from socket import AF_INET, AF_INET6
|
||||
|
||||
from core import CoreCommandError, constants, utils
|
||||
from core import utils
|
||||
from core.constants import MOUNT_BIN, VNODED_BIN
|
||||
from core.emulator.data import LinkData, NodeData
|
||||
from core.emulator.enumerations import LinkTypes, NodeTypes
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes import client, ipaddress
|
||||
from core.nodes.interface import CoreInterface, TunTap, Veth
|
||||
from core.nodes.netclient import LinuxNetClient, OvsNetClient
|
||||
from core.nodes.interface import TunTap, Veth
|
||||
from core.nodes.netclient import get_net_client
|
||||
|
||||
_DEFAULT_MTU = 1500
|
||||
|
||||
utils.check_executables([constants.IP_BIN])
|
||||
|
||||
|
||||
class NodeBase(object):
|
||||
"""
|
||||
|
@ -34,7 +29,7 @@ class NodeBase(object):
|
|||
apitype = None
|
||||
|
||||
# TODO: appears start has no usage, verify and remove
|
||||
def __init__(self, session, _id=None, name=None, start=True):
|
||||
def __init__(self, session, _id=None, name=None, start=True, server=None):
|
||||
"""
|
||||
Creates a PyCoreObj instance.
|
||||
|
||||
|
@ -42,7 +37,8 @@ class NodeBase(object):
|
|||
:param int _id: id
|
||||
:param str name: object name
|
||||
:param bool start: start value
|
||||
:return:
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
|
||||
self.session = session
|
||||
|
@ -50,12 +46,13 @@ class NodeBase(object):
|
|||
_id = session.get_node_id()
|
||||
self.id = _id
|
||||
if name is None:
|
||||
name = "o%s" % self.id
|
||||
name = f"o{self.id}"
|
||||
self.name = name
|
||||
self.server = server
|
||||
|
||||
self.type = None
|
||||
self.server = None
|
||||
self.services = None
|
||||
# ifindex is key, PyCoreNetIf instance is value
|
||||
# ifindex is key, CoreInterface instance is value
|
||||
self._netif = {}
|
||||
self.ifindex = 0
|
||||
self.canvas = None
|
||||
|
@ -63,6 +60,9 @@ class NodeBase(object):
|
|||
self.opaque = None
|
||||
self.position = Position()
|
||||
|
||||
use_ovs = session.options.get_config("ovs") == "True"
|
||||
self.net_client = get_net_client(use_ovs, self.host_cmd)
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Each object implements its own startup method.
|
||||
|
@ -79,6 +79,24 @@ class NodeBase(object):
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def host_cmd(self, args, env=None, cwd=None, wait=True, shell=False):
|
||||
"""
|
||||
Runs a command on the host system or distributed server.
|
||||
|
||||
:param str args: command to run
|
||||
:param dict env: environment to run command with
|
||||
:param str cwd: directory to run command in
|
||||
:param bool wait: True to wait for status, False otherwise
|
||||
:param bool shell: True to use shell, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
if self.server is None:
|
||||
return utils.cmd(args, env, cwd, wait, shell)
|
||||
else:
|
||||
return self.server.remote_cmd(args, env, cwd, wait)
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Set the (x,y,z) position of the object.
|
||||
|
@ -176,7 +194,9 @@ class NodeBase(object):
|
|||
|
||||
x, y, _ = self.getposition()
|
||||
model = self.type
|
||||
emulation_server = self.server
|
||||
emulation_server = None
|
||||
if self.server is not None:
|
||||
emulation_server = self.server.name
|
||||
|
||||
services = self.services
|
||||
if services is not None:
|
||||
|
@ -221,7 +241,7 @@ class CoreNodeBase(NodeBase):
|
|||
Base class for CORE nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, session, _id=None, name=None, start=True):
|
||||
def __init__(self, session, _id=None, name=None, start=True, server=None):
|
||||
"""
|
||||
Create a CoreNodeBase instance.
|
||||
|
||||
|
@ -229,8 +249,10 @@ class CoreNodeBase(NodeBase):
|
|||
:param int _id: object id
|
||||
:param str name: object name
|
||||
:param bool start: boolean for starting
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
super(CoreNodeBase, self).__init__(session, _id, name, start=start)
|
||||
super(CoreNodeBase, self).__init__(session, _id, name, start, server)
|
||||
self.services = []
|
||||
self.nodedir = None
|
||||
self.tmpnodedir = False
|
||||
|
@ -243,7 +265,7 @@ class CoreNodeBase(NodeBase):
|
|||
"""
|
||||
if self.nodedir is None:
|
||||
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
|
||||
os.makedirs(self.nodedir)
|
||||
self.host_cmd(f"mkdir -p {self.nodedir}")
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
self.tmpnodedir = False
|
||||
|
@ -259,7 +281,7 @@ class CoreNodeBase(NodeBase):
|
|||
return
|
||||
|
||||
if self.tmpnodedir:
|
||||
shutil.rmtree(self.nodedir, ignore_errors=True)
|
||||
self.host_cmd(f"rm -rf {self.nodedir}")
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
"""
|
||||
|
@ -270,7 +292,7 @@ class CoreNodeBase(NodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if ifindex in self._netif:
|
||||
raise ValueError("ifindex %s already exists" % ifindex)
|
||||
raise ValueError(f"ifindex {ifindex} already exists")
|
||||
self._netif[ifindex] = netif
|
||||
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
|
||||
netif.netindex = ifindex
|
||||
|
@ -283,7 +305,7 @@ class CoreNodeBase(NodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
raise ValueError(f"ifindex {ifindex} does not exist")
|
||||
netif = self._netif.pop(ifindex)
|
||||
netif.shutdown()
|
||||
del netif
|
||||
|
@ -308,11 +330,11 @@ class CoreNodeBase(NodeBase):
|
|||
Attach a network.
|
||||
|
||||
:param int ifindex: interface of index to attach
|
||||
:param core.nodes.interface.CoreInterface net: network to attach
|
||||
:param core.nodes.base.CoreNetworkBase net: network to attach
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
raise ValueError(f"ifindex {ifindex} does not exist")
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
|
@ -323,7 +345,7 @@ class CoreNodeBase(NodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
raise ValueError(f"ifindex {ifindex} does not exist")
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
|
@ -361,38 +383,18 @@ class CoreNodeBase(NodeBase):
|
|||
|
||||
return common
|
||||
|
||||
def check_cmd(self, args):
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
Runs a command within a node container.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param str args: command to run
|
||||
:param bool wait: True to wait for status, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def termcmdstring(self, sh):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
@ -412,7 +414,14 @@ class CoreNode(CoreNodeBase):
|
|||
valid_address_types = {"inet", "inet6", "inet6link"}
|
||||
|
||||
def __init__(
|
||||
self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True
|
||||
self,
|
||||
session,
|
||||
_id=None,
|
||||
name=None,
|
||||
nodedir=None,
|
||||
bootsh="boot.sh",
|
||||
start=True,
|
||||
server=None,
|
||||
):
|
||||
"""
|
||||
Create a CoreNode instance.
|
||||
|
@ -423,8 +432,10 @@ class CoreNode(CoreNodeBase):
|
|||
:param str nodedir: node directory
|
||||
:param str bootsh: boot shell to use
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
super(CoreNode, self).__init__(session, _id, name, start)
|
||||
super(CoreNode, self).__init__(session, _id, name, start, server)
|
||||
self.nodedir = nodedir
|
||||
self.ctrlchnlname = os.path.abspath(
|
||||
os.path.join(self.session.session_dir, self.name)
|
||||
|
@ -435,9 +446,23 @@ class CoreNode(CoreNodeBase):
|
|||
self.lock = threading.RLock()
|
||||
self._mounts = []
|
||||
self.bootsh = bootsh
|
||||
|
||||
use_ovs = session.options.get_config("ovs") == "True"
|
||||
self.node_net_client = self.create_node_net_client(use_ovs)
|
||||
|
||||
if start:
|
||||
self.startup()
|
||||
|
||||
def create_node_net_client(self, use_ovs):
|
||||
"""
|
||||
Create node network client for running network commands within the nodes
|
||||
container.
|
||||
|
||||
:param bool use_ovs: True for OVS bridges, False for Linux bridges
|
||||
:return:node network client
|
||||
"""
|
||||
return get_net_client(use_ovs, self.cmd)
|
||||
|
||||
def alive(self):
|
||||
"""
|
||||
Check if the node is alive.
|
||||
|
@ -446,8 +471,8 @@ class CoreNode(CoreNodeBase):
|
|||
:rtype: bool
|
||||
"""
|
||||
try:
|
||||
os.kill(self.pid, 0)
|
||||
except OSError:
|
||||
self.host_cmd(f"kill -0 {self.pid}")
|
||||
except CoreCommandError:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
@ -466,35 +491,30 @@ class CoreNode(CoreNodeBase):
|
|||
raise ValueError("starting a node that is already up")
|
||||
|
||||
# create a new namespace for this node using vnoded
|
||||
vnoded = [
|
||||
constants.VNODED_BIN,
|
||||
"-v",
|
||||
"-c",
|
||||
self.ctrlchnlname,
|
||||
"-l",
|
||||
self.ctrlchnlname + ".log",
|
||||
"-p",
|
||||
self.ctrlchnlname + ".pid",
|
||||
]
|
||||
vnoded = (
|
||||
f"{VNODED_BIN} -v -c {self.ctrlchnlname} -l {self.ctrlchnlname}.log "
|
||||
f"-p {self.ctrlchnlname}.pid"
|
||||
)
|
||||
if self.nodedir:
|
||||
vnoded += ["-C", self.nodedir]
|
||||
vnoded += f" -C {self.nodedir}"
|
||||
env = self.session.get_environment(state=False)
|
||||
env["NODE_NUMBER"] = str(self.id)
|
||||
env["NODE_NAME"] = str(self.name)
|
||||
|
||||
output = utils.check_cmd(vnoded, env=env)
|
||||
output = self.host_cmd(vnoded, env=env)
|
||||
self.pid = int(output)
|
||||
logging.debug("node(%s) pid: %s", self.name, self.pid)
|
||||
|
||||
# create vnode client
|
||||
self.client = client.VnodeClient(self.name, self.ctrlchnlname)
|
||||
|
||||
# bring up the loopback interface
|
||||
logging.debug("bringing up loopback interface")
|
||||
self.network_cmd([constants.IP_BIN, "link", "set", "lo", "up"])
|
||||
self.node_net_client.device_up("lo")
|
||||
|
||||
# set hostname for node
|
||||
logging.debug("setting hostname: %s", self.name)
|
||||
self.network_cmd(["hostname", self.name])
|
||||
self.node_net_client.set_hostname(self.name)
|
||||
|
||||
# mark node as up
|
||||
self.up = True
|
||||
|
@ -523,21 +543,17 @@ class CoreNode(CoreNodeBase):
|
|||
for netif in self.netifs():
|
||||
netif.shutdown()
|
||||
|
||||
# attempt to kill node process and wait for termination of children
|
||||
# kill node process if present
|
||||
try:
|
||||
os.kill(self.pid, signal.SIGTERM)
|
||||
os.waitpid(self.pid, 0)
|
||||
except OSError as e:
|
||||
if e.errno != 10:
|
||||
logging.exception("error killing process")
|
||||
self.host_cmd(f"kill -9 {self.pid}")
|
||||
except CoreCommandError:
|
||||
logging.exception("error killing process")
|
||||
|
||||
# remove node directory if present
|
||||
try:
|
||||
os.unlink(self.ctrlchnlname)
|
||||
except OSError as e:
|
||||
# no such file or directory
|
||||
if e.errno != errno.ENOENT:
|
||||
logging.exception("error removing node directory")
|
||||
self.host_cmd(f"rm -rf {self.ctrlchnlname}")
|
||||
except CoreCommandError:
|
||||
logging.exception("error removing node directory")
|
||||
|
||||
# clear interface data, close client, and mark self and not up
|
||||
self._netif.clear()
|
||||
|
@ -550,46 +566,20 @@ class CoreNode(CoreNodeBase):
|
|||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
Runs a command that is used to configure and setup the network within a
|
||||
node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
return self.client.cmd(args, wait)
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
return self.client.cmd_output(args)
|
||||
|
||||
def network_cmd(self, args):
|
||||
"""
|
||||
Runs a command for a node that is used to configure and setup network interfaces.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param str args: command to run
|
||||
:param bool wait: True to wait for status, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
return self.check_cmd(args)
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
return self.client.check_cmd(args)
|
||||
if self.server is None:
|
||||
return self.client.check_cmd(args, wait=wait)
|
||||
else:
|
||||
args = self.client.create_cmd(args)
|
||||
return self.server.remote_cmd(args, wait=wait)
|
||||
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
|
@ -598,7 +588,11 @@ class CoreNode(CoreNodeBase):
|
|||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
return self.client.termcmdstring(sh)
|
||||
terminal = self.client.create_cmd(sh)
|
||||
if self.server is None:
|
||||
return terminal
|
||||
else:
|
||||
return f"ssh -X -f {self.server.host} xterm -e {terminal}"
|
||||
|
||||
def privatedir(self, path):
|
||||
"""
|
||||
|
@ -608,11 +602,11 @@ class CoreNode(CoreNodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if path[0] != "/":
|
||||
raise ValueError("path not fully qualified: %s" % path)
|
||||
raise ValueError(f"path not fully qualified: {path}")
|
||||
hostpath = os.path.join(
|
||||
self.nodedir, os.path.normpath(path).strip("/").replace("/", ".")
|
||||
)
|
||||
os.mkdir(hostpath)
|
||||
self.host_cmd(f"mkdir -p {hostpath}")
|
||||
self.mount(hostpath, path)
|
||||
|
||||
def mount(self, source, target):
|
||||
|
@ -626,15 +620,8 @@ class CoreNode(CoreNodeBase):
|
|||
"""
|
||||
source = os.path.abspath(source)
|
||||
logging.debug("node(%s) mounting: %s at %s", self.name, source, target)
|
||||
cmd = 'mkdir -p "%s" && %s -n --bind "%s" "%s"' % (
|
||||
target,
|
||||
constants.MOUNT_BIN,
|
||||
source,
|
||||
target,
|
||||
)
|
||||
status, output = self.client.shcmd_result(cmd)
|
||||
if status:
|
||||
raise CoreCommandError(status, cmd, output)
|
||||
self.cmd(f"mkdir -p {target}")
|
||||
self.cmd(f"{MOUNT_BIN} -n --bind {source} {target}")
|
||||
self._mounts.append((source, target))
|
||||
|
||||
def newifindex(self):
|
||||
|
@ -661,54 +648,42 @@ class CoreNode(CoreNodeBase):
|
|||
ifindex = self.newifindex()
|
||||
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
ifname = f"eth{ifindex}"
|
||||
|
||||
sessionid = self.session.short_session_id()
|
||||
|
||||
try:
|
||||
suffix = "%x.%s.%s" % (self.id, ifindex, sessionid)
|
||||
suffix = f"{self.id:x}.{ifindex}.{sessionid}"
|
||||
except TypeError:
|
||||
suffix = "%s.%s.%s" % (self.id, ifindex, sessionid)
|
||||
suffix = f"{self.id}.{ifindex}.{sessionid}"
|
||||
|
||||
localname = "veth" + suffix
|
||||
localname = f"veth{suffix}"
|
||||
if len(localname) >= 16:
|
||||
raise ValueError("interface local name (%s) too long" % localname)
|
||||
raise ValueError(f"interface local name ({localname}) too long")
|
||||
|
||||
name = localname + "p"
|
||||
if len(name) >= 16:
|
||||
raise ValueError("interface name (%s) too long" % name)
|
||||
raise ValueError(f"interface name ({name}) too long")
|
||||
|
||||
veth = Veth(
|
||||
node=self, name=name, localname=localname, net=net, start=self.up
|
||||
self.session, self, name, localname, start=self.up, server=self.server
|
||||
)
|
||||
|
||||
if self.up:
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", veth.name, "netns", str(self.pid)]
|
||||
)
|
||||
self.network_cmd(
|
||||
[constants.IP_BIN, "link", "set", veth.name, "name", ifname]
|
||||
)
|
||||
self.network_cmd(
|
||||
[constants.ETHTOOL_BIN, "-K", ifname, "rx", "off", "tx", "off"]
|
||||
)
|
||||
self.net_client.device_ns(veth.name, str(self.pid))
|
||||
self.node_net_client.device_name(veth.name, ifname)
|
||||
self.node_net_client.checksums_off(ifname)
|
||||
|
||||
veth.name = ifname
|
||||
|
||||
if self.up:
|
||||
# TODO: potentially find better way to query interface ID
|
||||
# retrieve interface information
|
||||
output = self.network_cmd([constants.IP_BIN, "link", "show", veth.name])
|
||||
logging.debug("interface command output: %s", output)
|
||||
output = output.split("\n")
|
||||
veth.flow_id = int(output[0].strip().split(":")[0]) + 1
|
||||
flow_id = self.node_net_client.get_ifindex(veth.name)
|
||||
veth.flow_id = int(flow_id)
|
||||
logging.debug("interface flow index: %s - %s", veth.name, veth.flow_id)
|
||||
# TODO: mimic packed hwaddr
|
||||
# veth.hwaddr = MacAddress.from_string(output[1].strip().split()[1])
|
||||
logging.debug("interface mac: %s - %s", veth.name, veth.hwaddr)
|
||||
|
||||
try:
|
||||
# add network interface to the node. If unsuccessful, destroy the network interface and raise exception.
|
||||
# add network interface to the node. If unsuccessful, destroy the
|
||||
# network interface and raise exception.
|
||||
self.addnetif(veth, ifindex)
|
||||
except ValueError as e:
|
||||
veth.shutdown()
|
||||
|
@ -732,14 +707,12 @@ class CoreNode(CoreNodeBase):
|
|||
ifindex = self.newifindex()
|
||||
|
||||
if ifname is None:
|
||||
ifname = "eth%d" % ifindex
|
||||
ifname = f"eth{ifindex}"
|
||||
|
||||
sessionid = self.session.short_session_id()
|
||||
localname = "tap%s.%s.%s" % (self.id, ifindex, sessionid)
|
||||
localname = f"tap{self.id}.{ifindex}.{sessionid}"
|
||||
name = ifname
|
||||
tuntap = TunTap(
|
||||
node=self, name=name, localname=localname, net=net, start=self.up
|
||||
)
|
||||
tuntap = TunTap(self.session, self, name, localname, start=self.up)
|
||||
|
||||
try:
|
||||
self.addnetif(tuntap, ifindex)
|
||||
|
@ -759,105 +732,47 @@ class CoreNode(CoreNodeBase):
|
|||
:return: nothing
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
interface = self._netif[ifindex]
|
||||
interface.sethwaddr(addr)
|
||||
if self.up:
|
||||
args = [
|
||||
constants.IP_BIN,
|
||||
"link",
|
||||
"set",
|
||||
"dev",
|
||||
self.ifname(ifindex),
|
||||
"address",
|
||||
str(addr),
|
||||
]
|
||||
self.network_cmd(args)
|
||||
self.node_net_client.device_mac(interface.name, str(addr))
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
"""
|
||||
Add interface address.
|
||||
|
||||
:param int ifindex: index of interface to add address to
|
||||
:param str addr: address to add to interface
|
||||
:param core.nodes.ipaddress.IpAddress addr: address to add to interface
|
||||
:return: nothing
|
||||
"""
|
||||
interface = self._netif[ifindex]
|
||||
interface.addaddr(addr)
|
||||
if self.up:
|
||||
# check if addr is ipv6
|
||||
if ":" in str(addr):
|
||||
args = [
|
||||
constants.IP_BIN,
|
||||
"addr",
|
||||
"add",
|
||||
str(addr),
|
||||
"dev",
|
||||
self.ifname(ifindex),
|
||||
]
|
||||
self.network_cmd(args)
|
||||
else:
|
||||
args = [
|
||||
constants.IP_BIN,
|
||||
"addr",
|
||||
"add",
|
||||
str(addr),
|
||||
"broadcast",
|
||||
"+",
|
||||
"dev",
|
||||
self.ifname(ifindex),
|
||||
]
|
||||
self.network_cmd(args)
|
||||
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
address = str(addr)
|
||||
# ipv6 check
|
||||
broadcast = None
|
||||
if ":" not in address:
|
||||
broadcast = "+"
|
||||
self.node_net_client.create_address(interface.name, address, broadcast)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
"""
|
||||
Delete address from an interface.
|
||||
|
||||
:param int ifindex: index of interface to delete address from
|
||||
:param str addr: address to delete from interface
|
||||
:param core.nodes.ipaddress.IpAddress addr: address to delete from interface
|
||||
:return: nothing
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
interface = self._netif[ifindex]
|
||||
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
interface.deladdr(addr)
|
||||
except ValueError:
|
||||
logging.exception("trying to delete unknown address: %s" % addr)
|
||||
logging.exception("trying to delete unknown address: %s", addr)
|
||||
|
||||
if self.up:
|
||||
self.network_cmd(
|
||||
[
|
||||
constants.IP_BIN,
|
||||
"addr",
|
||||
"del",
|
||||
str(addr),
|
||||
"dev",
|
||||
self.ifname(ifindex),
|
||||
]
|
||||
)
|
||||
|
||||
def delalladdr(self, ifindex, address_types=None):
|
||||
"""
|
||||
Delete all addresses from an interface.
|
||||
|
||||
:param int ifindex: index of interface to delete address types from
|
||||
:param tuple[str] address_types: address types to delete
|
||||
:return: nothing
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
if not address_types:
|
||||
address_types = self.valid_address_types
|
||||
|
||||
interface_name = self.ifname(ifindex)
|
||||
addresses = self.client.getaddr(interface_name, rescan=True)
|
||||
|
||||
for address_type in address_types:
|
||||
if address_type not in self.valid_address_types:
|
||||
raise ValueError(
|
||||
"addr type must be in: %s" % " ".join(self.valid_address_types)
|
||||
)
|
||||
for address in addresses[address_type]:
|
||||
self.deladdr(ifindex, address)
|
||||
|
||||
# update cached information
|
||||
self.client.getaddr(interface_name, rescan=True)
|
||||
self.node_net_client.delete_address(interface.name, str(addr))
|
||||
|
||||
def ifup(self, ifindex):
|
||||
"""
|
||||
|
@ -867,9 +782,8 @@ class CoreNode(CoreNodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if self.up:
|
||||
self.network_cmd(
|
||||
[constants.IP_BIN, "link", "set", self.ifname(ifindex), "up"]
|
||||
)
|
||||
interface_name = self.ifname(ifindex)
|
||||
self.node_net_client.device_up(interface_name)
|
||||
|
||||
def newnetif(self, net=None, addrlist=None, hwaddr=None, ifindex=None, ifname=None):
|
||||
"""
|
||||
|
@ -915,53 +829,6 @@ class CoreNode(CoreNodeBase):
|
|||
self.ifup(ifindex)
|
||||
return ifindex
|
||||
|
||||
def connectnode(self, ifname, othernode, otherifname):
|
||||
"""
|
||||
Connect a node.
|
||||
|
||||
:param str ifname: name of interface to connect
|
||||
:param core.nodes.CoreNodeBase othernode: node to connect to
|
||||
:param str otherifname: interface name to connect to
|
||||
:return: nothing
|
||||
"""
|
||||
tmplen = 8
|
||||
tmp1 = "tmp." + "".join(
|
||||
[random.choice(string.ascii_lowercase) for _ in range(tmplen)]
|
||||
)
|
||||
tmp2 = "tmp." + "".join(
|
||||
[random.choice(string.ascii_lowercase) for _ in range(tmplen)]
|
||||
)
|
||||
utils.check_cmd(
|
||||
[
|
||||
constants.IP_BIN,
|
||||
"link",
|
||||
"add",
|
||||
"name",
|
||||
tmp1,
|
||||
"type",
|
||||
"veth",
|
||||
"peer",
|
||||
"name",
|
||||
tmp2,
|
||||
]
|
||||
)
|
||||
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", tmp1, "netns", str(self.pid)])
|
||||
self.network_cmd([constants.IP_BIN, "link", "set", tmp1, "name", ifname])
|
||||
interface = CoreInterface(node=self, name=ifname, mtu=_DEFAULT_MTU)
|
||||
self.addnetif(interface, self.newifindex())
|
||||
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", tmp2, "netns", str(othernode.pid)]
|
||||
)
|
||||
othernode.network_cmd(
|
||||
[constants.IP_BIN, "link", "set", tmp2, "name", otherifname]
|
||||
)
|
||||
other_interface = CoreInterface(
|
||||
node=othernode, name=otherifname, mtu=_DEFAULT_MTU
|
||||
)
|
||||
othernode.addnetif(other_interface, othernode.newifindex())
|
||||
|
||||
def addfile(self, srcname, filename):
|
||||
"""
|
||||
Add a file.
|
||||
|
@ -973,11 +840,13 @@ class CoreNode(CoreNodeBase):
|
|||
"""
|
||||
logging.info("adding file from %s to %s", srcname, filename)
|
||||
directory = os.path.dirname(filename)
|
||||
|
||||
cmd = 'mkdir -p "%s" && mv "%s" "%s" && sync' % (directory, srcname, filename)
|
||||
status, output = self.client.shcmd_result(cmd)
|
||||
if status:
|
||||
raise CoreCommandError(status, cmd, output)
|
||||
if self.server is None:
|
||||
self.client.check_cmd(f"mkdir -p {directory}")
|
||||
self.client.check_cmd(f"mv {srcname} {filename}")
|
||||
self.client.check_cmd("sync")
|
||||
else:
|
||||
self.host_cmd(f"mkdir -p {directory}")
|
||||
self.server.remote_put(srcname, filename)
|
||||
|
||||
def hostfilename(self, filename):
|
||||
"""
|
||||
|
@ -988,43 +857,37 @@ class CoreNode(CoreNodeBase):
|
|||
"""
|
||||
dirname, basename = os.path.split(filename)
|
||||
if not basename:
|
||||
raise ValueError("no basename for filename: %s" % filename)
|
||||
raise ValueError(f"no basename for filename: {filename}")
|
||||
if dirname and dirname[0] == "/":
|
||||
dirname = dirname[1:]
|
||||
dirname = dirname.replace("/", ".")
|
||||
dirname = os.path.join(self.nodedir, dirname)
|
||||
return os.path.join(dirname, basename)
|
||||
|
||||
def opennodefile(self, filename, mode="w"):
|
||||
"""
|
||||
Open a node file, within it"s directory.
|
||||
|
||||
:param str filename: file name to open
|
||||
:param str mode: mode to open file in
|
||||
:return: open file
|
||||
:rtype: file
|
||||
"""
|
||||
hostfilename = self.hostfilename(filename)
|
||||
dirname, _basename = os.path.split(hostfilename)
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode=0o755)
|
||||
return open(hostfilename, mode)
|
||||
|
||||
def nodefile(self, filename, contents, mode=0o644):
|
||||
"""
|
||||
Create a node file with a given mode.
|
||||
|
||||
:param str filename: name of file to create
|
||||
:param contents: contents of file
|
||||
:param str contents: contents of file
|
||||
:param int mode: mode for file
|
||||
:return: nothing
|
||||
"""
|
||||
with self.opennodefile(filename, "w") as open_file:
|
||||
open_file.write(contents)
|
||||
os.chmod(open_file.name, mode)
|
||||
logging.debug(
|
||||
"node(%s) added file: %s; mode: 0%o", self.name, open_file.name, mode
|
||||
)
|
||||
hostfilename = self.hostfilename(filename)
|
||||
dirname, _basename = os.path.split(hostfilename)
|
||||
if self.server is None:
|
||||
if not os.path.isdir(dirname):
|
||||
os.makedirs(dirname, mode=0o755)
|
||||
with open(hostfilename, "w") as open_file:
|
||||
open_file.write(contents)
|
||||
os.chmod(open_file.name, mode)
|
||||
else:
|
||||
self.host_cmd(f"mkdir -m {0o755:o} -p {dirname}")
|
||||
self.server.remote_put_temp(hostfilename, contents)
|
||||
self.host_cmd(f"chmod {mode:o} {hostfilename}")
|
||||
logging.debug(
|
||||
"node(%s) added file: %s; mode: 0%o", self.name, hostfilename, mode
|
||||
)
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode=None):
|
||||
"""
|
||||
|
@ -1037,9 +900,12 @@ class CoreNode(CoreNodeBase):
|
|||
:return: nothing
|
||||
"""
|
||||
hostfilename = self.hostfilename(filename)
|
||||
shutil.copy2(srcfilename, hostfilename)
|
||||
if self.server is None:
|
||||
shutil.copy2(srcfilename, hostfilename)
|
||||
else:
|
||||
self.server.remote_put(srcfilename, hostfilename)
|
||||
if mode is not None:
|
||||
os.chmod(hostfilename, mode)
|
||||
self.host_cmd(f"chmod {mode:o} {hostfilename}")
|
||||
logging.info(
|
||||
"node(%s) copied file: %s; mode: %s", self.name, hostfilename, mode
|
||||
)
|
||||
|
@ -1053,7 +919,7 @@ class CoreNetworkBase(NodeBase):
|
|||
linktype = LinkTypes.WIRED.value
|
||||
is_emane = False
|
||||
|
||||
def __init__(self, session, _id, name, start=True):
|
||||
def __init__(self, session, _id, name, start=True, server=None):
|
||||
"""
|
||||
Create a CoreNetworkBase instance.
|
||||
|
||||
|
@ -1061,14 +927,12 @@ class CoreNetworkBase(NodeBase):
|
|||
:param int _id: object id
|
||||
:param str name: object name
|
||||
:param bool start: should object start
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
super(CoreNetworkBase, self).__init__(session, _id, name, start=start)
|
||||
super(CoreNetworkBase, self).__init__(session, _id, name, start, server)
|
||||
self._linked = {}
|
||||
self._linked_lock = threading.Lock()
|
||||
if session.options.get_config("ovs") == "True":
|
||||
self.net_client = OvsNetClient()
|
||||
else:
|
||||
self.net_client = LinuxNetClient()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
|
|
|
@ -4,11 +4,8 @@ over a control channel to the vnoded process running in a network namespace.
|
|||
The control channel can be accessed via calls using the vcmd shell.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from subprocess import PIPE, Popen
|
||||
|
||||
from core import CoreCommandError, constants, utils
|
||||
from core import utils
|
||||
from core.constants import VCMD_BIN
|
||||
|
||||
|
||||
class VnodeClient(object):
|
||||
|
@ -25,7 +22,6 @@ class VnodeClient(object):
|
|||
"""
|
||||
self.name = name
|
||||
self.ctrlchnlname = ctrlchnlname
|
||||
self._addr = {}
|
||||
|
||||
def _verify_connection(self):
|
||||
"""
|
||||
|
@ -54,271 +50,19 @@ class VnodeClient(object):
|
|||
"""
|
||||
pass
|
||||
|
||||
def _cmd_args(self):
|
||||
return [constants.VCMD_BIN, "-c", self.ctrlchnlname, "--"]
|
||||
def create_cmd(self, args):
|
||||
return f"{VCMD_BIN} -c {self.ctrlchnlname} -- {args}"
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Execute a command on a node and return the status (return code).
|
||||
|
||||
:param list[str]|str args: command arguments
|
||||
:param bool wait: wait for command to end or not
|
||||
:return: command status
|
||||
:rtype: int
|
||||
"""
|
||||
self._verify_connection()
|
||||
args = utils.split_args(args)
|
||||
|
||||
# run command, return process when not waiting
|
||||
cmd = self._cmd_args() + args
|
||||
logging.debug("cmd wait(%s): %s", wait, cmd)
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
|
||||
if not wait:
|
||||
return 0
|
||||
|
||||
# wait for and return exit status
|
||||
return p.wait()
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Execute a command on a node and return a tuple containing the
|
||||
exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: command status and combined stdout and stderr output
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
p, stdin, stdout, stderr = self.popen(args)
|
||||
stdin.close()
|
||||
output = stdout.read() + stderr.read()
|
||||
stdout.close()
|
||||
stderr.close()
|
||||
status = p.wait()
|
||||
return status, output.decode("utf-8").strip()
|
||||
|
||||
def check_cmd(self, args):
|
||||
def check_cmd(self, args, wait=True):
|
||||
"""
|
||||
Run command and return exit status and combined stdout and stderr.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param str args: command to run
|
||||
:param bool wait: True to wait for command status, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises core.CoreCommandError: when there is a non-zero exit status
|
||||
"""
|
||||
status, output = self.cmd_output(args)
|
||||
if status != 0:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output.strip()
|
||||
|
||||
def popen(self, args):
|
||||
"""
|
||||
Execute a popen command against the node.
|
||||
|
||||
:param list[str]|str args: command arguments
|
||||
:return: popen object, stdin, stdout, and stderr
|
||||
:rtype: tuple
|
||||
"""
|
||||
self._verify_connection()
|
||||
args = utils.split_args(args)
|
||||
cmd = self._cmd_args() + args
|
||||
logging.debug("popen: %s", cmd)
|
||||
p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
|
||||
return p, p.stdin, p.stdout, p.stderr
|
||||
|
||||
def icmd(self, args):
|
||||
"""
|
||||
Execute an icmd against a node.
|
||||
|
||||
:param list[str]|str args: command arguments
|
||||
:return: command result
|
||||
:rtype: int
|
||||
"""
|
||||
args = utils.split_args(args)
|
||||
return os.spawnlp(
|
||||
os.P_WAIT,
|
||||
constants.VCMD_BIN,
|
||||
constants.VCMD_BIN,
|
||||
"-c",
|
||||
self.ctrlchnlname,
|
||||
"--",
|
||||
*args
|
||||
)
|
||||
|
||||
def redircmd(self, infd, outfd, errfd, args, wait=True):
|
||||
"""
|
||||
Execute a command on a node with standard input, output, and
|
||||
error redirected according to the given file descriptors.
|
||||
|
||||
:param infd: stdin file descriptor
|
||||
:param outfd: stdout file descriptor
|
||||
:param errfd: stderr file descriptor
|
||||
:param list[str]|str args: command arguments
|
||||
:param bool wait: wait flag
|
||||
:return: command status
|
||||
:rtype: int
|
||||
"""
|
||||
self._verify_connection()
|
||||
|
||||
# run command, return process when not waiting
|
||||
args = utils.split_args(args)
|
||||
cmd = self._cmd_args() + args
|
||||
logging.debug("redircmd: %s", cmd)
|
||||
p = Popen(cmd, stdin=infd, stdout=outfd, stderr=errfd)
|
||||
|
||||
if not wait:
|
||||
return p
|
||||
|
||||
# wait for and return exit status
|
||||
status = p.wait()
|
||||
if status:
|
||||
logging.warning("cmd exited with status %s: %s", status, args)
|
||||
return status
|
||||
|
||||
def term(self, sh="/bin/sh"):
|
||||
"""
|
||||
Open a terminal on a node.
|
||||
|
||||
:param str sh: shell to open terminal with
|
||||
:return: terminal command result
|
||||
:rtype: int
|
||||
"""
|
||||
args = (
|
||||
"xterm",
|
||||
"-ut",
|
||||
"-title",
|
||||
self.name,
|
||||
"-e",
|
||||
constants.VCMD_BIN,
|
||||
"-c",
|
||||
self.ctrlchnlname,
|
||||
"--",
|
||||
sh,
|
||||
)
|
||||
if "SUDO_USER" in os.environ:
|
||||
args = (
|
||||
"su",
|
||||
"-s",
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"exec " + " ".join(map(lambda x: "'%s'" % x, args)),
|
||||
os.environ["SUDO_USER"],
|
||||
)
|
||||
return os.spawnvp(os.P_NOWAIT, args[0], args)
|
||||
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
||||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
return "%s -c %s -- %s" % (constants.VCMD_BIN, self.ctrlchnlname, sh)
|
||||
|
||||
def shcmd(self, cmd, sh="/bin/sh"):
|
||||
"""
|
||||
Execute a shell command.
|
||||
|
||||
:param str cmd: command string
|
||||
:param str sh: shell to run command in
|
||||
:return: command result
|
||||
:rtype: int
|
||||
"""
|
||||
return self.cmd([sh, "-c", cmd])
|
||||
|
||||
def shcmd_result(self, cmd, sh="/bin/sh"):
|
||||
"""
|
||||
Execute a shell command and return the exist status and combined output.
|
||||
|
||||
:param str cmd: shell command to run
|
||||
:param str sh: shell to run command in
|
||||
:return: exist status and combined output
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
return self.cmd_output([sh, "-c", cmd])
|
||||
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
"""
|
||||
Get address for interface on node.
|
||||
|
||||
:param str ifname: interface name to get address for
|
||||
:param bool rescan: rescan flag
|
||||
:return: interface information
|
||||
:rtype: dict
|
||||
"""
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
|
||||
interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
args = [constants.IP_BIN, "addr", "show", "dev", ifname]
|
||||
p, stdin, stdout, stderr = self.popen(args)
|
||||
stdin.close()
|
||||
|
||||
for line in stdout:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
interface["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
interface["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
interface["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
interface["inet6link"].append(line[1])
|
||||
else:
|
||||
logging.warning("unknown scope: %s" % line[3])
|
||||
|
||||
err = stderr.read()
|
||||
stdout.close()
|
||||
stderr.close()
|
||||
status = p.wait()
|
||||
if status:
|
||||
logging.warning("nonzero exist status (%s) for cmd: %s", status, args)
|
||||
if err:
|
||||
logging.warning("error output: %s", err)
|
||||
self._addr[ifname] = interface
|
||||
return interface
|
||||
|
||||
def netifstats(self, ifname=None):
|
||||
"""
|
||||
Retrieve network interface state.
|
||||
|
||||
:param str ifname: name of interface to get state for
|
||||
:return: interface state information
|
||||
:rtype: dict
|
||||
"""
|
||||
stats = {}
|
||||
args = ["cat", "/proc/net/dev"]
|
||||
p, stdin, stdout, stderr = self.popen(args)
|
||||
stdin.close()
|
||||
# ignore first line
|
||||
stdout.readline()
|
||||
# second line has count names
|
||||
tmp = stdout.readline().decode("utf-8").strip().split("|")
|
||||
rxkeys = tmp[1].split()
|
||||
txkeys = tmp[2].split()
|
||||
for line in stdout:
|
||||
line = line.decode("utf-8").strip().split()
|
||||
devname, tmp = line[0].split(":")
|
||||
if tmp:
|
||||
line.insert(1, tmp)
|
||||
stats[devname] = {"rx": {}, "tx": {}}
|
||||
field = 1
|
||||
for count in rxkeys:
|
||||
stats[devname]["rx"][count] = int(line[field])
|
||||
field += 1
|
||||
for count in txkeys:
|
||||
stats[devname]["tx"][count] = int(line[field])
|
||||
field += 1
|
||||
err = stderr.read()
|
||||
stdout.close()
|
||||
stderr.close()
|
||||
status = p.wait()
|
||||
if status:
|
||||
logging.warning("nonzero exist status (%s) for cmd: %s", status, args)
|
||||
if err:
|
||||
logging.warning("error output: %s", err)
|
||||
if ifname is not None:
|
||||
return stats[ifname]
|
||||
else:
|
||||
return stats
|
||||
args = self.create_cmd(args)
|
||||
return utils.cmd(args, wait=wait)
|
||||
|
|
|
@ -1,38 +1,36 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from core import CoreCommandError, utils
|
||||
from core import utils
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes.base import CoreNode
|
||||
from core.nodes.netclient import get_net_client
|
||||
|
||||
|
||||
class DockerClient(object):
|
||||
def __init__(self, name, image):
|
||||
def __init__(self, name, image, run):
|
||||
self.name = name
|
||||
self.image = image
|
||||
self.run = run
|
||||
self.pid = None
|
||||
self._addr = {}
|
||||
|
||||
def create_container(self):
|
||||
utils.check_cmd(
|
||||
"docker run -td --init --net=none --hostname {name} --name {name} "
|
||||
"--sysctl net.ipv6.conf.all.disable_ipv6=0 "
|
||||
"{image} /bin/bash".format(
|
||||
name=self.name,
|
||||
image=self.image
|
||||
))
|
||||
self.run(
|
||||
f"docker run -td --init --net=none --hostname {self.name} --name {self.name} "
|
||||
f"--sysctl net.ipv6.conf.all.disable_ipv6=0 {self.image} /bin/bash"
|
||||
)
|
||||
self.pid = self.get_pid()
|
||||
return self.pid
|
||||
|
||||
def get_info(self):
|
||||
args = "docker inspect {name}".format(name=self.name)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
args = f"docker inspect {self.name}"
|
||||
output = self.run(args)
|
||||
data = json.loads(output)
|
||||
if not data:
|
||||
raise CoreCommandError(status, args, "docker({name}) not present".format(name=self.name))
|
||||
raise CoreCommandError(-1, args, f"docker({self.name}) not present")
|
||||
return data[0]
|
||||
|
||||
def is_alive(self):
|
||||
|
@ -43,96 +41,45 @@ class DockerClient(object):
|
|||
return False
|
||||
|
||||
def stop_container(self):
|
||||
utils.check_cmd("docker rm -f {name}".format(
|
||||
name=self.name
|
||||
))
|
||||
self.run(f"docker rm -f {self.name}")
|
||||
|
||||
def cmd(self, cmd, wait=True):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
logging.info("docker cmd wait(%s): %s", wait, cmd)
|
||||
return utils.cmd("docker exec {name} {cmd}".format(
|
||||
name=self.name,
|
||||
cmd=cmd
|
||||
), wait)
|
||||
|
||||
def cmd_output(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
def check_cmd(self, cmd):
|
||||
logging.info("docker cmd output: %s", cmd)
|
||||
return utils.cmd_output("docker exec {name} {cmd}".format(
|
||||
name=self.name,
|
||||
cmd=cmd
|
||||
))
|
||||
return utils.cmd(f"docker exec {self.name} {cmd}")
|
||||
|
||||
def ns_cmd(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = "nsenter -t {pid} -u -i -p -n {cmd}".format(
|
||||
pid=self.pid,
|
||||
cmd=cmd
|
||||
)
|
||||
logging.info("ns cmd: %s", args)
|
||||
return utils.cmd_output(args)
|
||||
def create_ns_cmd(self, cmd):
|
||||
return f"nsenter -t {self.pid} -u -i -p -n {cmd}"
|
||||
|
||||
def ns_cmd(self, cmd, wait):
|
||||
args = f"nsenter -t {self.pid} -u -i -p -n {cmd}"
|
||||
return utils.cmd(args, wait=wait)
|
||||
|
||||
def get_pid(self):
|
||||
args = "docker inspect -f '{{{{.State.Pid}}}}' {name}".format(name=self.name)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
args = f"docker inspect -f '{{{{.State.Pid}}}}' {self.name}"
|
||||
output = self.run(args)
|
||||
self.pid = output
|
||||
logging.debug("node(%s) pid: %s", self.name, self.pid)
|
||||
return output
|
||||
|
||||
def copy_file(self, source, destination):
|
||||
args = "docker cp {source} {name}:{destination}".format(
|
||||
source=source,
|
||||
name=self.name,
|
||||
destination=destination
|
||||
)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
"""
|
||||
Get address for interface on node.
|
||||
|
||||
:param str ifname: interface name to get address for
|
||||
:param bool rescan: rescan flag
|
||||
:return: interface information
|
||||
:rtype: dict
|
||||
"""
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
|
||||
interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
args = ["ip", "addr", "show", "dev", ifname]
|
||||
status, output = self.ns_cmd(args)
|
||||
for line in output:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
interface["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
interface["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
interface["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
interface["inet6link"].append(line[1])
|
||||
else:
|
||||
logging.warning("unknown scope: %s" % line[3])
|
||||
|
||||
if status:
|
||||
logging.warning("nonzero exist status (%s) for cmd: %s", status, args)
|
||||
self._addr[ifname] = interface
|
||||
return interface
|
||||
args = f"docker cp {source} {self.name}:{destination}"
|
||||
return self.run(args)
|
||||
|
||||
|
||||
class DockerNode(CoreNode):
|
||||
apitype = NodeTypes.DOCKER.value
|
||||
|
||||
def __init__(self, session, _id=None, name=None, nodedir=None, bootsh="boot.sh", start=True, image=None):
|
||||
def __init__(
|
||||
self,
|
||||
session,
|
||||
_id=None,
|
||||
name=None,
|
||||
nodedir=None,
|
||||
bootsh="boot.sh",
|
||||
start=True,
|
||||
server=None,
|
||||
image=None
|
||||
):
|
||||
"""
|
||||
Create a DockerNode instance.
|
||||
|
||||
|
@ -142,12 +89,26 @@ class DockerNode(CoreNode):
|
|||
:param str nodedir: node directory
|
||||
:param str bootsh: boot shell to use
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param str image: image to start container with
|
||||
"""
|
||||
if image is None:
|
||||
image = "ubuntu"
|
||||
self.image = image
|
||||
super(DockerNode, self).__init__(session, _id, name, nodedir, bootsh, start)
|
||||
super(DockerNode, self).__init__(
|
||||
session, _id, name, nodedir, bootsh, start, server
|
||||
)
|
||||
|
||||
def create_node_net_client(self, use_ovs):
|
||||
"""
|
||||
Create node network client for running network commands within the nodes
|
||||
container.
|
||||
|
||||
:param bool use_ovs: True for OVS bridges, False for Linux bridges
|
||||
:return:node network client
|
||||
"""
|
||||
return get_net_client(use_ovs, self.nsenter_cmd)
|
||||
|
||||
def alive(self):
|
||||
"""
|
||||
|
@ -170,7 +131,7 @@ class DockerNode(CoreNode):
|
|||
if self.up:
|
||||
raise ValueError("starting a node that is already up")
|
||||
self.makenodedir()
|
||||
self.client = DockerClient(self.name, self.image)
|
||||
self.client = DockerClient(self.name, self.image, self.host_cmd)
|
||||
self.pid = self.client.create_container()
|
||||
self.up = True
|
||||
|
||||
|
@ -189,50 +150,13 @@ class DockerNode(CoreNode):
|
|||
self.client.stop_container()
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
return self.client.cmd(args, wait)
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
return self.client.cmd_output(args)
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
status, output = self.client.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output
|
||||
|
||||
def network_cmd(self, args):
|
||||
if not self.up:
|
||||
logging.debug("node down, not running network command: %s", args)
|
||||
return 0
|
||||
|
||||
status, output = self.client.ns_cmd(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output
|
||||
def nsenter_cmd(self, args, wait=True):
|
||||
if self.server is None:
|
||||
args = self.client.create_ns_cmd(args)
|
||||
return utils.cmd(args, wait=wait)
|
||||
else:
|
||||
args = self.client.create_ns_cmd(args)
|
||||
return self.server.remote_cmd(args, wait=wait)
|
||||
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
|
@ -241,7 +165,7 @@ class DockerNode(CoreNode):
|
|||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
return "docker exec -it {name} bash".format(name=self.name)
|
||||
return f"docker exec -it {self.name} bash"
|
||||
|
||||
def privatedir(self, path):
|
||||
"""
|
||||
|
@ -251,8 +175,8 @@ class DockerNode(CoreNode):
|
|||
:return: nothing
|
||||
"""
|
||||
logging.debug("creating node dir: %s", path)
|
||||
args = "mkdir -p {path}".format(path=path)
|
||||
self.check_cmd(args)
|
||||
args = f"mkdir -p {path}"
|
||||
self.cmd(args)
|
||||
|
||||
def mount(self, source, target):
|
||||
"""
|
||||
|
@ -275,13 +199,24 @@ class DockerNode(CoreNode):
|
|||
:param int mode: mode for file
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("node dir(%s) ctrlchannel(%s)", self.nodedir, self.ctrlchnlname)
|
||||
logging.debug("nodefile filename(%s) mode(%s)", filename, mode)
|
||||
file_path = os.path.join(self.nodedir, filename)
|
||||
with open(file_path, "w") as f:
|
||||
os.chmod(f.name, mode)
|
||||
f.write(contents)
|
||||
self.client.copy_file(file_path, filename)
|
||||
directory = os.path.dirname(filename)
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
temp.write(contents.encode("utf-8"))
|
||||
temp.close()
|
||||
|
||||
if directory:
|
||||
self.cmd(f"mkdir -m {0o755:o} -p {directory}")
|
||||
if self.server is not None:
|
||||
self.server.remote_put(temp.name, temp.name)
|
||||
self.client.copy_file(temp.name, filename)
|
||||
self.cmd(f"chmod {mode:o} {filename}")
|
||||
if self.server is not None:
|
||||
self.host_cmd(f"rm -f {temp.name}")
|
||||
os.unlink(temp.name)
|
||||
logging.debug(
|
||||
"node(%s) added file: %s; mode: 0%o", self.name, filename, mode
|
||||
)
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode=None):
|
||||
"""
|
||||
|
@ -293,5 +228,18 @@ class DockerNode(CoreNode):
|
|||
:param int mode: mode to copy to
|
||||
:return: nothing
|
||||
"""
|
||||
logging.info("node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode)
|
||||
raise Exception("not supported")
|
||||
logging.info(
|
||||
"node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode
|
||||
)
|
||||
directory = os.path.dirname(filename)
|
||||
self.cmd(f"mkdir -p {directory}")
|
||||
|
||||
if self.server is None:
|
||||
source = srcfilename
|
||||
else:
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
source = temp.name
|
||||
self.server.remote_put(source, temp.name)
|
||||
|
||||
self.client.copy_file(source, filename)
|
||||
self.cmd(f"chmod {mode:o} {filename}")
|
||||
|
|
|
@ -4,11 +4,10 @@ virtual ethernet classes that implement the interfaces available under Linux.
|
|||
|
||||
import logging
|
||||
import time
|
||||
from builtins import int, range
|
||||
|
||||
from core import CoreCommandError, constants, utils
|
||||
|
||||
utils.check_executables([constants.IP_BIN])
|
||||
from core import utils
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes.netclient import get_net_client
|
||||
|
||||
|
||||
class CoreInterface(object):
|
||||
|
@ -16,15 +15,18 @@ class CoreInterface(object):
|
|||
Base class for network interfaces.
|
||||
"""
|
||||
|
||||
def __init__(self, node, name, mtu):
|
||||
def __init__(self, session, node, name, mtu, server=None):
|
||||
"""
|
||||
Creates a PyCoreNetIf instance.
|
||||
Creates a CoreInterface instance.
|
||||
|
||||
:param core.emulator.session.Session session: core session instance
|
||||
:param core.nodes.base.CoreNode node: node for interface
|
||||
:param str name: interface name
|
||||
:param mtu: mtu value
|
||||
:param int mtu: mtu value
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
|
||||
self.session = session
|
||||
self.node = node
|
||||
self.name = name
|
||||
if not isinstance(mtu, int):
|
||||
|
@ -42,6 +44,27 @@ class CoreInterface(object):
|
|||
self.netindex = None
|
||||
# index used to find flow data
|
||||
self.flow_id = None
|
||||
self.server = server
|
||||
use_ovs = session.options.get_config("ovs") == "True"
|
||||
self.net_client = get_net_client(use_ovs, self.host_cmd)
|
||||
|
||||
def host_cmd(self, args, env=None, cwd=None, wait=True, shell=False):
|
||||
"""
|
||||
Runs a command on the host system or distributed server.
|
||||
|
||||
:param str args: command to run
|
||||
:param dict env: environment to run command with
|
||||
:param str cwd: directory to run command in
|
||||
:param bool wait: True to wait for status, False otherwise
|
||||
:param bool shell: True to use shell, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
if self.server is None:
|
||||
return utils.cmd(args, env, cwd, wait, shell)
|
||||
else:
|
||||
return self.server.remote_cmd(args, env, cwd, wait)
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
|
@ -190,21 +213,24 @@ class Veth(CoreInterface):
|
|||
Provides virtual ethernet functionality for core nodes.
|
||||
"""
|
||||
|
||||
# TODO: network is not used, why was it needed?
|
||||
def __init__(self, node, name, localname, mtu=1500, net=None, start=True):
|
||||
def __init__(
|
||||
self, session, node, name, localname, mtu=1500, server=None, start=True
|
||||
):
|
||||
"""
|
||||
Creates a VEth instance.
|
||||
|
||||
:param core.emulator.session.Session session: core session instance
|
||||
:param core.nodes.base.CoreNode node: related core node
|
||||
:param str name: interface name
|
||||
:param str localname: interface local name
|
||||
:param mtu: interface mtu
|
||||
:param net: network
|
||||
:param int mtu: interface mtu
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param bool start: start flag
|
||||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
# note that net arg is ignored
|
||||
CoreInterface.__init__(self, node=node, name=name, mtu=mtu)
|
||||
CoreInterface.__init__(self, session, node, name, mtu, server)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
if start:
|
||||
|
@ -217,21 +243,8 @@ class Veth(CoreInterface):
|
|||
:return: nothing
|
||||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
utils.check_cmd(
|
||||
[
|
||||
constants.IP_BIN,
|
||||
"link",
|
||||
"add",
|
||||
"name",
|
||||
self.localname,
|
||||
"type",
|
||||
"veth",
|
||||
"peer",
|
||||
"name",
|
||||
self.name,
|
||||
]
|
||||
)
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.net_client.create_veth(self.localname, self.name)
|
||||
self.net_client.device_up(self.localname)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -245,15 +258,13 @@ class Veth(CoreInterface):
|
|||
|
||||
if self.node:
|
||||
try:
|
||||
self.node.network_cmd(
|
||||
[constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]
|
||||
)
|
||||
self.node.node_net_client.device_flush(self.name)
|
||||
except CoreCommandError:
|
||||
logging.exception("error shutting down interface")
|
||||
|
||||
if self.localname:
|
||||
try:
|
||||
utils.check_cmd([constants.IP_BIN, "link", "delete", self.localname])
|
||||
self.net_client.delete_device(self.localname)
|
||||
except CoreCommandError:
|
||||
logging.info("link already removed: %s", self.localname)
|
||||
|
||||
|
@ -265,19 +276,22 @@ class TunTap(CoreInterface):
|
|||
TUN/TAP virtual device in TAP mode
|
||||
"""
|
||||
|
||||
# TODO: network is not used, why was it needed?
|
||||
def __init__(self, node, name, localname, mtu=1500, net=None, start=True):
|
||||
def __init__(
|
||||
self, session, node, name, localname, mtu=1500, server=None, start=True
|
||||
):
|
||||
"""
|
||||
Create a TunTap instance.
|
||||
|
||||
:param core.emulator.session.Session session: core session instance
|
||||
:param core.nodes.base.CoreNode node: related core node
|
||||
:param str name: interface name
|
||||
:param str localname: local interface name
|
||||
:param mtu: interface mtu
|
||||
:param core.nodes.base.CoreNetworkBase net: related network
|
||||
:param int mtu: interface mtu
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param bool start: start flag
|
||||
"""
|
||||
CoreInterface.__init__(self, node=node, name=name, mtu=mtu)
|
||||
CoreInterface.__init__(self, session, node, name, mtu, server)
|
||||
self.localname = localname
|
||||
self.up = False
|
||||
self.transport_type = "virtual"
|
||||
|
@ -308,9 +322,7 @@ class TunTap(CoreInterface):
|
|||
return
|
||||
|
||||
try:
|
||||
self.node.network_cmd(
|
||||
[constants.IP_BIN, "-6", "addr", "flush", "dev", self.name]
|
||||
)
|
||||
self.node.node_net_client.device_flush(self.name)
|
||||
except CoreCommandError:
|
||||
logging.exception("error shutting down tunnel tap")
|
||||
|
||||
|
@ -333,7 +345,7 @@ class TunTap(CoreInterface):
|
|||
if r == 0:
|
||||
result = True
|
||||
break
|
||||
msg = "attempt %s failed with nonzero exit status %s" % (i, r)
|
||||
msg = f"attempt {i} failed with nonzero exit status {r}"
|
||||
if i < attempts + 1:
|
||||
msg += ", retrying..."
|
||||
logging.info(msg)
|
||||
|
@ -358,8 +370,11 @@ class TunTap(CoreInterface):
|
|||
logging.debug("waiting for device local: %s", self.localname)
|
||||
|
||||
def localdevexists():
|
||||
args = [constants.IP_BIN, "link", "show", self.localname]
|
||||
return utils.cmd(args)
|
||||
try:
|
||||
self.net_client.device_show(self.localname)
|
||||
return 0
|
||||
except CoreCommandError:
|
||||
return 1
|
||||
|
||||
self.waitfor(localdevexists)
|
||||
|
||||
|
@ -372,9 +387,8 @@ class TunTap(CoreInterface):
|
|||
logging.debug("waiting for device node: %s", self.name)
|
||||
|
||||
def nodedevexists():
|
||||
args = [constants.IP_BIN, "link", "show", self.name]
|
||||
try:
|
||||
self.node.network_cmd(args)
|
||||
self.node.node_net_client.device_show(self.name)
|
||||
return 0
|
||||
except CoreCommandError:
|
||||
return 1
|
||||
|
@ -407,13 +421,9 @@ class TunTap(CoreInterface):
|
|||
"""
|
||||
self.waitfordevicelocal()
|
||||
netns = str(self.node.pid)
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", self.localname, "netns", netns]
|
||||
)
|
||||
self.node.network_cmd(
|
||||
[constants.IP_BIN, "link", "set", self.localname, "name", self.name]
|
||||
)
|
||||
self.node.network_cmd([constants.IP_BIN, "link", "set", self.name, "up"])
|
||||
self.net_client.device_ns(self.localname, netns)
|
||||
self.node.node_net_client.device_name(self.localname, self.name)
|
||||
self.node.node_net_client.device_up(self.name)
|
||||
|
||||
def setaddrs(self):
|
||||
"""
|
||||
|
@ -423,9 +433,7 @@ class TunTap(CoreInterface):
|
|||
"""
|
||||
self.waitfordevicenode()
|
||||
for addr in self.addrlist:
|
||||
self.node.network_cmd(
|
||||
[constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]
|
||||
)
|
||||
self.node.node_net_client.create_address(self.name, str(addr))
|
||||
|
||||
|
||||
class GreTap(CoreInterface):
|
||||
|
@ -447,6 +455,7 @@ class GreTap(CoreInterface):
|
|||
ttl=255,
|
||||
key=None,
|
||||
start=True,
|
||||
server=None,
|
||||
):
|
||||
"""
|
||||
Creates a GreTap instance.
|
||||
|
@ -454,24 +463,25 @@ class GreTap(CoreInterface):
|
|||
:param core.nodes.base.CoreNode node: related core node
|
||||
:param str name: interface name
|
||||
:param core.emulator.session.Session session: core session instance
|
||||
:param mtu: interface mtu
|
||||
:param int mtu: interface mtu
|
||||
:param str remoteip: remote address
|
||||
:param int _id: object id
|
||||
:param str localip: local address
|
||||
:param ttl: ttl value
|
||||
:param key: gre tap key
|
||||
:param int ttl: ttl value
|
||||
:param int key: gre tap key
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
CoreInterface.__init__(self, node=node, name=name, mtu=mtu)
|
||||
self.session = session
|
||||
CoreInterface.__init__(self, session, node, name, mtu, server)
|
||||
if _id is None:
|
||||
# from PyCoreObj
|
||||
_id = ((id(self) >> 16) ^ (id(self) & 0xFFFF)) & 0xFFFF
|
||||
self.id = _id
|
||||
sessionid = self.session.short_session_id()
|
||||
# interface name on the local host machine
|
||||
self.localname = "gt.%s.%s" % (self.id, sessionid)
|
||||
self.localname = f"gt.{self.id}.{sessionid}"
|
||||
self.transport_type = "raw"
|
||||
if not start:
|
||||
self.up = False
|
||||
|
@ -479,25 +489,9 @@ class GreTap(CoreInterface):
|
|||
|
||||
if remoteip is None:
|
||||
raise ValueError("missing remote IP required for GRE TAP device")
|
||||
args = [
|
||||
constants.IP_BIN,
|
||||
"link",
|
||||
"add",
|
||||
self.localname,
|
||||
"type",
|
||||
"gretap",
|
||||
"remote",
|
||||
str(remoteip),
|
||||
]
|
||||
if localip:
|
||||
args += ["local", str(localip)]
|
||||
if ttl:
|
||||
args += ["ttl", str(ttl)]
|
||||
if key:
|
||||
args += ["key", str(key)]
|
||||
utils.check_cmd(args)
|
||||
args = [constants.IP_BIN, "link", "set", self.localname, "up"]
|
||||
utils.check_cmd(args)
|
||||
|
||||
self.net_client.create_gretap(self.localname, remoteip, localip, ttl, key)
|
||||
self.net_client.device_up(self.localname)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -508,10 +502,8 @@ class GreTap(CoreInterface):
|
|||
"""
|
||||
if self.localname:
|
||||
try:
|
||||
args = [constants.IP_BIN, "link", "set", self.localname, "down"]
|
||||
utils.check_cmd(args)
|
||||
args = [constants.IP_BIN, "link", "del", self.localname]
|
||||
utils.check_cmd(args)
|
||||
self.net_client.device_down(self.localname)
|
||||
self.net_client.delete_device(self.localname)
|
||||
except CoreCommandError:
|
||||
logging.exception("error during shutdown")
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import logging
|
|||
import random
|
||||
import socket
|
||||
import struct
|
||||
from builtins import bytes, int, range
|
||||
from socket import AF_INET, AF_INET6
|
||||
|
||||
|
||||
|
@ -19,7 +18,7 @@ class MacAddress(object):
|
|||
"""
|
||||
Creates a MacAddress instance.
|
||||
|
||||
:param str address: mac address
|
||||
:param bytes address: mac address
|
||||
"""
|
||||
self.addr = address
|
||||
|
||||
|
@ -30,7 +29,7 @@ class MacAddress(object):
|
|||
:return: string representation
|
||||
:rtype: str
|
||||
"""
|
||||
return ":".join("%02x" % x for x in bytearray(self.addr))
|
||||
return ":".join(f"{x:02x}" for x in bytearray(self.addr))
|
||||
|
||||
def to_link_local(self):
|
||||
"""
|
||||
|
@ -42,7 +41,7 @@ class MacAddress(object):
|
|||
"""
|
||||
if not self.addr:
|
||||
return IpAddress.from_string("::")
|
||||
tmp = struct.unpack("!Q", "\x00\x00" + self.addr)[0]
|
||||
tmp = struct.unpack("!Q", b"\x00\x00" + self.addr)[0]
|
||||
nic = int(tmp) & 0x000000FFFFFF
|
||||
oui = int(tmp) & 0xFFFFFF000000
|
||||
# toggle U/L bit
|
||||
|
@ -88,7 +87,7 @@ class IpAddress(object):
|
|||
Create a IpAddress instance.
|
||||
|
||||
:param int af: address family
|
||||
:param str address: ip address
|
||||
:param bytes address: ip address
|
||||
:return:
|
||||
"""
|
||||
# check if (af, addr) is valid
|
||||
|
@ -218,14 +217,14 @@ class IpPrefix(object):
|
|||
# prefixstr format: address/prefixlen
|
||||
tmp = prefixstr.split("/")
|
||||
if len(tmp) > 2:
|
||||
raise ValueError("invalid prefix: %s" % prefixstr)
|
||||
raise ValueError(f"invalid prefix: {prefixstr}")
|
||||
self.af = af
|
||||
if self.af == AF_INET:
|
||||
self.addrlen = 32
|
||||
elif self.af == AF_INET6:
|
||||
self.addrlen = 128
|
||||
else:
|
||||
raise ValueError("invalid address family: %s" % self.af)
|
||||
raise ValueError(f"invalid address family: {self.af}")
|
||||
if len(tmp) == 2:
|
||||
self.prefixlen = int(tmp[1])
|
||||
else:
|
||||
|
@ -248,7 +247,8 @@ class IpPrefix(object):
|
|||
:return: string representation
|
||||
:rtype: str
|
||||
"""
|
||||
return "%s/%s" % (socket.inet_ntop(self.af, self.prefix), self.prefixlen)
|
||||
address = socket.inet_ntop(self.af, self.prefix)
|
||||
return f"{address}/{self.prefixlen}"
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
|
@ -284,7 +284,7 @@ class IpPrefix(object):
|
|||
return NotImplemented
|
||||
|
||||
a = IpAddress(self.af, self.prefix) + (tmp << (self.addrlen - self.prefixlen))
|
||||
prefixstr = "%s/%s" % (a, self.prefixlen)
|
||||
prefixstr = f"{a}/{self.prefixlen}"
|
||||
if self.__class__ == IpPrefix:
|
||||
return self.__class__(self.af, prefixstr)
|
||||
else:
|
||||
|
@ -325,7 +325,7 @@ class IpPrefix(object):
|
|||
self.af == AF_INET and tmp == (1 << (self.addrlen - self.prefixlen)) - 1
|
||||
)
|
||||
):
|
||||
raise ValueError("invalid hostid for prefix %s: %s" % (self, hostid))
|
||||
raise ValueError(f"invalid hostid for prefix {self}: {hostid}")
|
||||
|
||||
addr = bytes(b"")
|
||||
prefix_endpoint = -1
|
||||
|
@ -375,7 +375,7 @@ class IpPrefix(object):
|
|||
:return: prefix string
|
||||
:rtype: str
|
||||
"""
|
||||
return "%s" % socket.inet_ntop(self.af, self.prefix)
|
||||
return socket.inet_ntop(self.af, self.prefix)
|
||||
|
||||
def netmask_str(self):
|
||||
"""
|
||||
|
|
|
@ -2,37 +2,33 @@ import json
|
|||
import logging
|
||||
import os
|
||||
import time
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from core import CoreCommandError, utils
|
||||
from core import utils
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes.base import CoreNode
|
||||
|
||||
|
||||
class LxdClient(object):
|
||||
def __init__(self, name, image):
|
||||
def __init__(self, name, image, run):
|
||||
self.name = name
|
||||
self.image = image
|
||||
self.run = run
|
||||
self.pid = None
|
||||
self._addr = {}
|
||||
|
||||
def create_container(self):
|
||||
utils.check_cmd(
|
||||
"lxc launch {image} {name}".format(name=self.name, image=self.image)
|
||||
)
|
||||
self.run(f"lxc launch {self.image} {self.name}")
|
||||
data = self.get_info()
|
||||
self.pid = data["state"]["pid"]
|
||||
return self.pid
|
||||
|
||||
def get_info(self):
|
||||
args = "lxc list {name} --format json".format(name=self.name)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
args = f"lxc list {self.name} --format json"
|
||||
output = self.run(args)
|
||||
data = json.loads(output)
|
||||
if not data:
|
||||
raise CoreCommandError(
|
||||
status, args, "LXC({name}) not present".format(name=self.name)
|
||||
)
|
||||
raise CoreCommandError(-1, args, f"LXC({self.name}) not present")
|
||||
return data[0]
|
||||
|
||||
def is_alive(self):
|
||||
|
@ -43,86 +39,24 @@ class LxdClient(object):
|
|||
return False
|
||||
|
||||
def stop_container(self):
|
||||
utils.check_cmd("lxc delete --force {name}".format(name=self.name))
|
||||
self.run(f"lxc delete --force {self.name}")
|
||||
|
||||
def _cmd_args(self, cmd):
|
||||
return "lxc exec -nT {name} -- {cmd}".format(name=self.name, cmd=cmd)
|
||||
def create_cmd(self, cmd):
|
||||
return f"lxc exec -nT {self.name} -- {cmd}"
|
||||
|
||||
def cmd_output(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._cmd_args(cmd)
|
||||
logging.info("lxc cmd output: %s", args)
|
||||
return utils.cmd_output(args)
|
||||
def create_ns_cmd(self, cmd):
|
||||
return f"nsenter -t {self.pid} -m -u -i -p -n {cmd}"
|
||||
|
||||
def cmd(self, cmd, wait=True):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._cmd_args(cmd)
|
||||
logging.info("lxc cmd: %s", args)
|
||||
return utils.cmd(args, wait)
|
||||
|
||||
def _ns_args(self, cmd):
|
||||
return "nsenter -t {pid} -m -u -i -p -n {cmd}".format(pid=self.pid, cmd=cmd)
|
||||
|
||||
def ns_cmd_output(self, cmd):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._ns_args(cmd)
|
||||
logging.info("ns cmd: %s", args)
|
||||
return utils.cmd_output(args)
|
||||
|
||||
def ns_cmd(self, cmd, wait=True):
|
||||
if isinstance(cmd, list):
|
||||
cmd = " ".join(cmd)
|
||||
args = self._ns_args(cmd)
|
||||
logging.info("ns cmd: %s", args)
|
||||
return utils.cmd(args, wait)
|
||||
def check_cmd(self, cmd, wait=True):
|
||||
args = self.create_cmd(cmd)
|
||||
return utils.cmd(args, wait=wait)
|
||||
|
||||
def copy_file(self, source, destination):
|
||||
if destination[0] != "/":
|
||||
destination = os.path.join("/root/", destination)
|
||||
|
||||
args = "lxc file push {source} {name}/{destination}".format(
|
||||
source=source, name=self.name, destination=destination
|
||||
)
|
||||
status, output = utils.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
|
||||
def getaddr(self, ifname, rescan=False):
|
||||
"""
|
||||
Get address for interface on node.
|
||||
|
||||
:param str ifname: interface name to get address for
|
||||
:param bool rescan: rescan flag
|
||||
:return: interface information
|
||||
:rtype: dict
|
||||
"""
|
||||
if ifname in self._addr and not rescan:
|
||||
return self._addr[ifname]
|
||||
|
||||
interface = {"ether": [], "inet": [], "inet6": [], "inet6link": []}
|
||||
args = ["ip", "addr", "show", "dev", ifname]
|
||||
status, output = self.ns_cmd_output(args)
|
||||
for line in output:
|
||||
line = line.strip().split()
|
||||
if line[0] == "link/ether":
|
||||
interface["ether"].append(line[1])
|
||||
elif line[0] == "inet":
|
||||
interface["inet"].append(line[1])
|
||||
elif line[0] == "inet6":
|
||||
if line[3] == "global":
|
||||
interface["inet6"].append(line[1])
|
||||
elif line[3] == "link":
|
||||
interface["inet6link"].append(line[1])
|
||||
else:
|
||||
logging.warning("unknown scope: %s" % line[3])
|
||||
|
||||
if status:
|
||||
logging.warning("nonzero exist status (%s) for cmd: %s", status, args)
|
||||
self._addr[ifname] = interface
|
||||
return interface
|
||||
args = f"lxc file push {source} {self.name}/{destination}"
|
||||
self.run(args)
|
||||
|
||||
|
||||
class LxcNode(CoreNode):
|
||||
|
@ -136,6 +70,7 @@ class LxcNode(CoreNode):
|
|||
nodedir=None,
|
||||
bootsh="boot.sh",
|
||||
start=True,
|
||||
server=None,
|
||||
image=None,
|
||||
):
|
||||
"""
|
||||
|
@ -147,12 +82,16 @@ class LxcNode(CoreNode):
|
|||
:param str nodedir: node directory
|
||||
:param str bootsh: boot shell to use
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param str image: image to start container with
|
||||
"""
|
||||
if image is None:
|
||||
image = "ubuntu"
|
||||
self.image = image
|
||||
super(LxcNode, self).__init__(session, _id, name, nodedir, bootsh, start)
|
||||
super(LxcNode, self).__init__(
|
||||
session, _id, name, nodedir, bootsh, start, server
|
||||
)
|
||||
|
||||
def alive(self):
|
||||
"""
|
||||
|
@ -173,7 +112,7 @@ class LxcNode(CoreNode):
|
|||
if self.up:
|
||||
raise ValueError("starting a node that is already up")
|
||||
self.makenodedir()
|
||||
self.client = LxdClient(self.name, self.image)
|
||||
self.client = LxdClient(self.name, self.image, self.host_cmd)
|
||||
self.pid = self.client.create_container()
|
||||
self.up = True
|
||||
|
||||
|
@ -192,47 +131,6 @@ class LxcNode(CoreNode):
|
|||
self.client.stop_container()
|
||||
self.up = False
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
return self.client.cmd(args, wait)
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
return self.client.cmd_output(args)
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
status, output = self.client.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output
|
||||
|
||||
def network_cmd(self, args):
|
||||
if not self.up:
|
||||
logging.debug("node down, not running network command: %s", args)
|
||||
return 0
|
||||
return self.check_cmd(args)
|
||||
|
||||
def termcmdstring(self, sh="/bin/sh"):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
@ -240,7 +138,7 @@ class LxcNode(CoreNode):
|
|||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
return "lxc exec {name} -- bash".format(name=self.name)
|
||||
return f"lxc exec {self.name} -- {sh}"
|
||||
|
||||
def privatedir(self, path):
|
||||
"""
|
||||
|
@ -250,8 +148,8 @@ class LxcNode(CoreNode):
|
|||
:return: nothing
|
||||
"""
|
||||
logging.info("creating node dir: %s", path)
|
||||
args = "mkdir -p {path}".format(path=path)
|
||||
self.check_cmd(args)
|
||||
args = f"mkdir -p {path}"
|
||||
return self.cmd(args)
|
||||
|
||||
def mount(self, source, target):
|
||||
"""
|
||||
|
@ -274,13 +172,23 @@ class LxcNode(CoreNode):
|
|||
:param int mode: mode for file
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("node dir(%s) ctrlchannel(%s)", self.nodedir, self.ctrlchnlname)
|
||||
logging.debug("nodefile filename(%s) mode(%s)", filename, mode)
|
||||
file_path = os.path.join(self.nodedir, filename)
|
||||
with open(file_path, "w") as f:
|
||||
os.chmod(f.name, mode)
|
||||
f.write(contents)
|
||||
self.client.copy_file(file_path, filename)
|
||||
|
||||
directory = os.path.dirname(filename)
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
temp.write(contents.encode("utf-8"))
|
||||
temp.close()
|
||||
|
||||
if directory:
|
||||
self.cmd(f"mkdir -m {0o755:o} -p {directory}")
|
||||
if self.server is not None:
|
||||
self.server.remote_put(temp.name, temp.name)
|
||||
self.client.copy_file(temp.name, filename)
|
||||
self.cmd(f"chmod {mode:o} {filename}")
|
||||
if self.server is not None:
|
||||
self.host_cmd(f"rm -f {temp.name}")
|
||||
os.unlink(temp.name)
|
||||
logging.debug("node(%s) added file: %s; mode: 0%o", self.name, filename, mode)
|
||||
|
||||
def nodefilecopy(self, filename, srcfilename, mode=None):
|
||||
"""
|
||||
|
@ -295,7 +203,18 @@ class LxcNode(CoreNode):
|
|||
logging.info(
|
||||
"node file copy file(%s) source(%s) mode(%s)", filename, srcfilename, mode
|
||||
)
|
||||
raise Exception("not supported")
|
||||
directory = os.path.dirname(filename)
|
||||
self.cmd(f"mkdir -p {directory}")
|
||||
|
||||
if self.server is None:
|
||||
source = srcfilename
|
||||
else:
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
source = temp.name
|
||||
self.server.remote_put(source, temp.name)
|
||||
|
||||
self.client.copy_file(source, filename)
|
||||
self.cmd(f"chmod {mode:o} {filename}")
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
super(LxcNode, self).addnetif(netif, ifindex)
|
||||
|
|
|
@ -2,87 +2,225 @@
|
|||
Clients for dealing with bridge/interface commands.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import os
|
||||
|
||||
from future.utils import with_metaclass
|
||||
|
||||
from core.constants import BRCTL_BIN, IP_BIN, OVS_BIN
|
||||
from core.utils import check_cmd
|
||||
from core.constants import BRCTL_BIN, ETHTOOL_BIN, IP_BIN, OVS_BIN, TC_BIN
|
||||
|
||||
|
||||
class NetClientBase(with_metaclass(abc.ABCMeta)):
|
||||
def get_net_client(use_ovs, run):
|
||||
"""
|
||||
Base client for running command line bridge/interface commands.
|
||||
Retrieve desired net client for running network commands.
|
||||
|
||||
:param bool use_ovs: True for OVS bridges, False for Linux bridges
|
||||
:param func run: function used to run net client commands
|
||||
:return: net client class
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_bridge(self, name):
|
||||
"""
|
||||
Create a network bridge to connect interfaces to.
|
||||
|
||||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_bridge(self, name):
|
||||
"""
|
||||
Delete a network bridge.
|
||||
|
||||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_interface(self, bridge_name, interface_name):
|
||||
"""
|
||||
Create an interface associated with a network bridge.
|
||||
|
||||
:param str bridge_name: bridge name
|
||||
:param str interface_name: interface name
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_interface(self, bridge_name, interface_name):
|
||||
"""
|
||||
Delete an interface associated with a network bridge.
|
||||
|
||||
:param str bridge_name: bridge name
|
||||
:param str interface_name: interface name
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def existing_bridges(self, _id):
|
||||
"""
|
||||
Checks if there are any existing bridges for a node.
|
||||
|
||||
:param _id: node id to check bridges for
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def disable_mac_learning(self, name):
|
||||
"""
|
||||
Disable mac learning for a bridge.
|
||||
|
||||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
if use_ovs:
|
||||
return OvsNetClient(run)
|
||||
else:
|
||||
return LinuxNetClient(run)
|
||||
|
||||
|
||||
class LinuxNetClient(NetClientBase):
|
||||
class LinuxNetClient(object):
|
||||
"""
|
||||
Client for creating Linux bridges and ip interfaces for nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, run):
|
||||
"""
|
||||
Create LinuxNetClient instance.
|
||||
|
||||
:param run: function to run commands with
|
||||
"""
|
||||
self.run = run
|
||||
|
||||
def set_hostname(self, name):
|
||||
"""
|
||||
Set network hostname.
|
||||
|
||||
:param str name: name for hostname
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"hostname {name}")
|
||||
|
||||
def create_route(self, route, device):
|
||||
"""
|
||||
Create a new route for a device.
|
||||
|
||||
:param str route: route to create
|
||||
:param str device: device to add route to
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} route add {route} dev {device}")
|
||||
|
||||
def device_up(self, device):
|
||||
"""
|
||||
Bring a device up.
|
||||
|
||||
:param str device: device to bring up
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link set {device} up")
|
||||
|
||||
def device_down(self, device):
|
||||
"""
|
||||
Bring a device down.
|
||||
|
||||
:param str device: device to bring down
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link set {device} down")
|
||||
|
||||
def device_name(self, device, name):
|
||||
"""
|
||||
Set a device name.
|
||||
|
||||
:param str device: device to set name for
|
||||
:param str name: name to set
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link set {device} name {name}")
|
||||
|
||||
def device_show(self, device):
|
||||
"""
|
||||
Show information for a device.
|
||||
|
||||
:param str device: device to get information for
|
||||
:return: device information
|
||||
:rtype: str
|
||||
"""
|
||||
return self.run(f"{IP_BIN} link show {device}")
|
||||
|
||||
def get_mac(self, device):
|
||||
"""
|
||||
Retrieve MAC address for a given device.
|
||||
|
||||
:param str device: device to get mac for
|
||||
:return: MAC address
|
||||
:rtype: str
|
||||
"""
|
||||
return self.run(f"cat /sys/class/net/{device}/address")
|
||||
|
||||
def get_ifindex(self, device):
|
||||
"""
|
||||
Retrieve ifindex for a given device.
|
||||
|
||||
:param str device: device to get ifindex for
|
||||
:return: ifindex
|
||||
:rtype: str
|
||||
"""
|
||||
return self.run(f"cat /sys/class/net/{device}/ifindex")
|
||||
|
||||
def device_ns(self, device, namespace):
|
||||
"""
|
||||
Set netns for a device.
|
||||
|
||||
:param str device: device to setns for
|
||||
:param str namespace: namespace to set device to
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link set {device} netns {namespace}")
|
||||
|
||||
def device_flush(self, device):
|
||||
"""
|
||||
Flush device addresses.
|
||||
|
||||
:param str device: device to flush
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} -6 address flush dev {device}")
|
||||
|
||||
def device_mac(self, device, mac):
|
||||
"""
|
||||
Set MAC address for a device.
|
||||
|
||||
:param str device: device to set mac for
|
||||
:param str mac: mac to set
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link set dev {device} address {mac}")
|
||||
|
||||
def delete_device(self, device):
|
||||
"""
|
||||
Delete device.
|
||||
|
||||
:param str device: device to delete
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link delete {device}")
|
||||
|
||||
def delete_tc(self, device):
|
||||
"""
|
||||
Remove traffic control settings for a device.
|
||||
|
||||
:param str device: device to remove tc
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{TC_BIN} qdisc delete dev {device} root")
|
||||
|
||||
def checksums_off(self, interface_name):
|
||||
"""
|
||||
Turns interface checksums off.
|
||||
|
||||
:param str interface_name: interface to update
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{ETHTOOL_BIN} -K {interface_name} rx off tx off")
|
||||
|
||||
def create_address(self, device, address, broadcast=None):
|
||||
"""
|
||||
Create address for a device.
|
||||
|
||||
:param str device: device to add address to
|
||||
:param str address: address to add
|
||||
:param str broadcast: broadcast address to use, default is None
|
||||
:return: nothing
|
||||
"""
|
||||
if broadcast is not None:
|
||||
self.run(
|
||||
f"{IP_BIN} address add {address} broadcast {broadcast} dev {device}"
|
||||
)
|
||||
else:
|
||||
self.run(f"{IP_BIN} address add {address} dev {device}")
|
||||
|
||||
def delete_address(self, device, address):
|
||||
"""
|
||||
Delete an address from a device.
|
||||
|
||||
:param str device: targeted device
|
||||
:param str address: address to remove
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} address delete {address} dev {device}")
|
||||
|
||||
def create_veth(self, name, peer):
|
||||
"""
|
||||
Create a veth pair.
|
||||
|
||||
:param str name: veth name
|
||||
:param str peer: peer name
|
||||
:return: nothing
|
||||
"""
|
||||
self.run(f"{IP_BIN} link add name {name} type veth peer name {peer}")
|
||||
|
||||
def create_gretap(self, device, address, local, ttl, key):
|
||||
"""
|
||||
Create a GRE tap on a device.
|
||||
|
||||
:param str device: device to add tap to
|
||||
:param str address: address to add tap for
|
||||
:param str local: local address to tie to
|
||||
:param int ttl: time to live value
|
||||
:param int key: key for tap
|
||||
:return: nothing
|
||||
"""
|
||||
cmd = f"{IP_BIN} link add {device} type gretap remote {address}"
|
||||
if local is not None:
|
||||
cmd += f" local {local}"
|
||||
if ttl is not None:
|
||||
cmd += f" ttl {ttl}"
|
||||
if key is not None:
|
||||
cmd += f" key {key}"
|
||||
self.run(cmd)
|
||||
|
||||
def create_bridge(self, name):
|
||||
"""
|
||||
Create a Linux bridge and bring it up.
|
||||
|
@ -90,16 +228,16 @@ class LinuxNetClient(NetClientBase):
|
|||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([BRCTL_BIN, "addbr", name])
|
||||
check_cmd([BRCTL_BIN, "stp", name, "off"])
|
||||
check_cmd([BRCTL_BIN, "setfd", name, "0"])
|
||||
check_cmd([IP_BIN, "link", "set", name, "up"])
|
||||
self.run(f"{BRCTL_BIN} addbr {name}")
|
||||
self.run(f"{BRCTL_BIN} stp {name} off")
|
||||
self.run(f"{BRCTL_BIN} setfd {name} 0")
|
||||
self.device_up(name)
|
||||
|
||||
# turn off multicast snooping so forwarding occurs w/o IGMP joins
|
||||
snoop = "/sys/devices/virtual/net/%s/bridge/multicast_snooping" % name
|
||||
if os.path.exists(snoop):
|
||||
with open(snoop, "w") as f:
|
||||
f.write("0")
|
||||
snoop_file = "multicast_snooping"
|
||||
snoop = f"/sys/devices/virtual/net/{name}/bridge/{snoop_file}"
|
||||
self.run(f"echo 0 > /tmp/{snoop_file}", shell=True)
|
||||
self.run(f"cp /tmp/{snoop_file} {snoop}")
|
||||
|
||||
def delete_bridge(self, name):
|
||||
"""
|
||||
|
@ -108,8 +246,8 @@ class LinuxNetClient(NetClientBase):
|
|||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([IP_BIN, "link", "set", name, "down"])
|
||||
check_cmd([BRCTL_BIN, "delbr", name])
|
||||
self.device_down(name)
|
||||
self.run(f"{BRCTL_BIN} delbr {name}")
|
||||
|
||||
def create_interface(self, bridge_name, interface_name):
|
||||
"""
|
||||
|
@ -119,8 +257,8 @@ class LinuxNetClient(NetClientBase):
|
|||
:param str interface_name: interface name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([BRCTL_BIN, "addif", bridge_name, interface_name])
|
||||
check_cmd([IP_BIN, "link", "set", interface_name, "up"])
|
||||
self.run(f"{BRCTL_BIN} addif {bridge_name} {interface_name}")
|
||||
self.device_up(interface_name)
|
||||
|
||||
def delete_interface(self, bridge_name, interface_name):
|
||||
"""
|
||||
|
@ -130,7 +268,7 @@ class LinuxNetClient(NetClientBase):
|
|||
:param str interface_name: interface name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([BRCTL_BIN, "delif", bridge_name, interface_name])
|
||||
self.run(f"{BRCTL_BIN} delif {bridge_name} {interface_name}")
|
||||
|
||||
def existing_bridges(self, _id):
|
||||
"""
|
||||
|
@ -138,7 +276,7 @@ class LinuxNetClient(NetClientBase):
|
|||
|
||||
:param _id: node id to check bridges for
|
||||
"""
|
||||
output = check_cmd([BRCTL_BIN, "show"])
|
||||
output = self.run(f"{BRCTL_BIN} show")
|
||||
lines = output.split("\n")
|
||||
for line in lines[1:]:
|
||||
columns = line.split()
|
||||
|
@ -157,10 +295,10 @@ class LinuxNetClient(NetClientBase):
|
|||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([BRCTL_BIN, "setageing", name, "0"])
|
||||
self.run(f"{BRCTL_BIN} setageing {name} 0")
|
||||
|
||||
|
||||
class OvsNetClient(NetClientBase):
|
||||
class OvsNetClient(LinuxNetClient):
|
||||
"""
|
||||
Client for creating OVS bridges and ip interfaces for nodes.
|
||||
"""
|
||||
|
@ -172,11 +310,11 @@ class OvsNetClient(NetClientBase):
|
|||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([OVS_BIN, "add-br", name])
|
||||
check_cmd([OVS_BIN, "set", "bridge", name, "stp_enable=false"])
|
||||
check_cmd([OVS_BIN, "set", "bridge", name, "other_config:stp-max-age=6"])
|
||||
check_cmd([OVS_BIN, "set", "bridge", name, "other_config:stp-forward-delay=4"])
|
||||
check_cmd([IP_BIN, "link", "set", name, "up"])
|
||||
self.run(f"{OVS_BIN} add-br {name}")
|
||||
self.run(f"{OVS_BIN} set bridge {name} stp_enable=false")
|
||||
self.run(f"{OVS_BIN} set bridge {name} other_config:stp-max-age=6")
|
||||
self.run(f"{OVS_BIN} set bridge {name} other_config:stp-forward-delay=4")
|
||||
self.device_up(name)
|
||||
|
||||
def delete_bridge(self, name):
|
||||
"""
|
||||
|
@ -185,8 +323,8 @@ class OvsNetClient(NetClientBase):
|
|||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([IP_BIN, "link", "set", name, "down"])
|
||||
check_cmd([OVS_BIN, "del-br", name])
|
||||
self.device_down(name)
|
||||
self.run(f"{OVS_BIN} del-br {name}")
|
||||
|
||||
def create_interface(self, bridge_name, interface_name):
|
||||
"""
|
||||
|
@ -196,8 +334,8 @@ class OvsNetClient(NetClientBase):
|
|||
:param str interface_name: interface name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([OVS_BIN, "add-port", bridge_name, interface_name])
|
||||
check_cmd([IP_BIN, "link", "set", interface_name, "up"])
|
||||
self.run(f"{OVS_BIN} add-port {bridge_name} {interface_name}")
|
||||
self.device_up(interface_name)
|
||||
|
||||
def delete_interface(self, bridge_name, interface_name):
|
||||
"""
|
||||
|
@ -207,7 +345,7 @@ class OvsNetClient(NetClientBase):
|
|||
:param str interface_name: interface name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([OVS_BIN, "del-port", bridge_name, interface_name])
|
||||
self.run(f"{OVS_BIN} del-port {bridge_name} {interface_name}")
|
||||
|
||||
def existing_bridges(self, _id):
|
||||
"""
|
||||
|
@ -215,7 +353,7 @@ class OvsNetClient(NetClientBase):
|
|||
|
||||
:param _id: node id to check bridges for
|
||||
"""
|
||||
output = check_cmd([OVS_BIN, "list-br"])
|
||||
output = self.run(f"{OVS_BIN} list-br")
|
||||
if output:
|
||||
for line in output.split("\n"):
|
||||
fields = line.split(".")
|
||||
|
@ -230,4 +368,4 @@ class OvsNetClient(NetClientBase):
|
|||
:param str name: bridge name
|
||||
:return: nothing
|
||||
"""
|
||||
check_cmd([OVS_BIN, "set", "bridge", name, "other_config:mac-aging-time=0"])
|
||||
self.run(f"{OVS_BIN} set bridge {name} other_config:mac-aging-time=0")
|
||||
|
|
|
@ -3,22 +3,20 @@ Defines network nodes used within core.
|
|||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import threading
|
||||
import time
|
||||
from socket import AF_INET, AF_INET6
|
||||
|
||||
from core import CoreCommandError, CoreError, constants, utils
|
||||
from core import utils
|
||||
from core.constants import EBTABLES_BIN, TC_BIN
|
||||
from core.emulator.data import LinkData
|
||||
from core.emulator.enumerations import LinkTypes, NodeTypes, RegisterTlvs
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.nodes import ipaddress
|
||||
from core.nodes.base import CoreNetworkBase
|
||||
from core.nodes.interface import GreTap, Veth
|
||||
|
||||
utils.check_executables(
|
||||
[constants.BRCTL_BIN, constants.IP_BIN, constants.EBTABLES_BIN, constants.TC_BIN]
|
||||
)
|
||||
from core.nodes.netclient import get_net_client
|
||||
|
||||
ebtables_lock = threading.Lock()
|
||||
|
||||
|
@ -95,14 +93,11 @@ class EbtablesQueue(object):
|
|||
"""
|
||||
Helper for building ebtables atomic file command list.
|
||||
|
||||
:param list[str] cmd: ebtable command
|
||||
:param str cmd: ebtable command
|
||||
:return: ebtable atomic command
|
||||
:rtype: list[str]
|
||||
"""
|
||||
r = [constants.EBTABLES_BIN, "--atomic-file", self.atomic_file]
|
||||
if cmd:
|
||||
r.extend(cmd)
|
||||
return r
|
||||
return f"{EBTABLES_BIN} --atomic-file {self.atomic_file} {cmd}"
|
||||
|
||||
def lastupdate(self, wlan):
|
||||
"""
|
||||
|
@ -166,22 +161,22 @@ class EbtablesQueue(object):
|
|||
:return: nothing
|
||||
"""
|
||||
# save kernel ebtables snapshot to a file
|
||||
args = self.ebatomiccmd(["--atomic-save"])
|
||||
utils.check_cmd(args)
|
||||
args = self.ebatomiccmd("--atomic-save")
|
||||
wlan.host_cmd(args)
|
||||
|
||||
# modify the table file using queued ebtables commands
|
||||
for c in self.cmds:
|
||||
args = self.ebatomiccmd(c)
|
||||
utils.check_cmd(args)
|
||||
wlan.host_cmd(args)
|
||||
self.cmds = []
|
||||
|
||||
# commit the table file to the kernel
|
||||
args = self.ebatomiccmd(["--atomic-commit"])
|
||||
utils.check_cmd(args)
|
||||
args = self.ebatomiccmd("--atomic-commit")
|
||||
wlan.host_cmd(args)
|
||||
|
||||
try:
|
||||
os.unlink(self.atomic_file)
|
||||
except OSError:
|
||||
wlan.host_cmd(f"rm -f {self.atomic_file}")
|
||||
except CoreCommandError:
|
||||
logging.exception("error removing atomic file: %s", self.atomic_file)
|
||||
|
||||
def ebchange(self, wlan):
|
||||
|
@ -203,58 +198,22 @@ class EbtablesQueue(object):
|
|||
"""
|
||||
with wlan._linked_lock:
|
||||
# flush the chain
|
||||
self.cmds.extend([["-F", wlan.brname]])
|
||||
self.cmds.append(f"-F {wlan.brname}")
|
||||
# rebuild the chain
|
||||
for netif1, v in wlan._linked.items():
|
||||
for netif2, linked in v.items():
|
||||
if wlan.policy == "DROP" and linked:
|
||||
self.cmds.extend(
|
||||
[
|
||||
[
|
||||
"-A",
|
||||
wlan.brname,
|
||||
"-i",
|
||||
netif1.localname,
|
||||
"-o",
|
||||
netif2.localname,
|
||||
"-j",
|
||||
"ACCEPT",
|
||||
],
|
||||
[
|
||||
"-A",
|
||||
wlan.brname,
|
||||
"-o",
|
||||
netif1.localname,
|
||||
"-i",
|
||||
netif2.localname,
|
||||
"-j",
|
||||
"ACCEPT",
|
||||
],
|
||||
f"-A {wlan.brname} -i {netif1.localname} -o {netif2.localname} -j ACCEPT",
|
||||
f"-A {wlan.brname} -o {netif1.localname} -i {netif2.localname} -j ACCEPT",
|
||||
]
|
||||
)
|
||||
elif wlan.policy == "ACCEPT" and not linked:
|
||||
self.cmds.extend(
|
||||
[
|
||||
[
|
||||
"-A",
|
||||
wlan.brname,
|
||||
"-i",
|
||||
netif1.localname,
|
||||
"-o",
|
||||
netif2.localname,
|
||||
"-j",
|
||||
"DROP",
|
||||
],
|
||||
[
|
||||
"-A",
|
||||
wlan.brname,
|
||||
"-o",
|
||||
netif1.localname,
|
||||
"-i",
|
||||
netif2.localname,
|
||||
"-j",
|
||||
"DROP",
|
||||
],
|
||||
f"-A {wlan.brname} -i {netif1.localname} -o {netif2.localname} -j DROP",
|
||||
f"-A {wlan.brname} -o {netif1.localname} -i {netif2.localname} -j DROP",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -284,7 +243,9 @@ class CoreNetwork(CoreNetworkBase):
|
|||
|
||||
policy = "DROP"
|
||||
|
||||
def __init__(self, session, _id=None, name=None, start=True, policy=None):
|
||||
def __init__(
|
||||
self, session, _id=None, name=None, start=True, server=None, policy=None
|
||||
):
|
||||
"""
|
||||
Creates a LxBrNet instance.
|
||||
|
||||
|
@ -292,21 +253,42 @@ class CoreNetwork(CoreNetworkBase):
|
|||
:param int _id: object id
|
||||
:param str name: object name
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param policy: network policy
|
||||
"""
|
||||
CoreNetworkBase.__init__(self, session, _id, name, start)
|
||||
CoreNetworkBase.__init__(self, session, _id, name, start, server)
|
||||
if name is None:
|
||||
name = str(self.id)
|
||||
if policy is not None:
|
||||
self.policy = policy
|
||||
self.name = name
|
||||
sessionid = self.session.short_session_id()
|
||||
self.brname = "b.%s.%s" % (str(self.id), sessionid)
|
||||
self.brname = f"b.{self.id}.{sessionid}"
|
||||
self.up = False
|
||||
if start:
|
||||
self.startup()
|
||||
ebq.startupdateloop(self)
|
||||
|
||||
def host_cmd(self, args, env=None, cwd=None, wait=True, shell=False):
|
||||
"""
|
||||
Runs a command that is used to configure and setup the network on the host
|
||||
system and all configured distributed servers.
|
||||
|
||||
:param str args: command to run
|
||||
:param dict env: environment to run command with
|
||||
:param str cwd: directory to run command in
|
||||
:param bool wait: True to wait for status, False otherwise
|
||||
:param bool shell: True to use shell, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
logging.info("network node(%s) cmd", self.name)
|
||||
output = utils.cmd(args, env, cwd, wait, shell)
|
||||
self.session.distributed.execute(lambda x: x.remote_cmd(args, env, cwd, wait))
|
||||
return output
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Linux bridge starup logic.
|
||||
|
@ -317,21 +299,11 @@ class CoreNetwork(CoreNetworkBase):
|
|||
self.net_client.create_bridge(self.brname)
|
||||
|
||||
# create a new ebtables chain for this bridge
|
||||
ebtablescmds(
|
||||
utils.check_cmd,
|
||||
[
|
||||
[constants.EBTABLES_BIN, "-N", self.brname, "-P", self.policy],
|
||||
[
|
||||
constants.EBTABLES_BIN,
|
||||
"-A",
|
||||
"FORWARD",
|
||||
"--logical-in",
|
||||
self.brname,
|
||||
"-j",
|
||||
self.brname,
|
||||
],
|
||||
],
|
||||
)
|
||||
cmds = [
|
||||
f"{EBTABLES_BIN} -N {self.brname} -P {self.policy}",
|
||||
f"{EBTABLES_BIN} -A FORWARD --logical-in {self.brname} -j {self.brname}",
|
||||
]
|
||||
ebtablescmds(self.host_cmd, cmds)
|
||||
|
||||
self.up = True
|
||||
|
||||
|
@ -348,21 +320,11 @@ class CoreNetwork(CoreNetworkBase):
|
|||
|
||||
try:
|
||||
self.net_client.delete_bridge(self.brname)
|
||||
ebtablescmds(
|
||||
utils.check_cmd,
|
||||
[
|
||||
[
|
||||
constants.EBTABLES_BIN,
|
||||
"-D",
|
||||
"FORWARD",
|
||||
"--logical-in",
|
||||
self.brname,
|
||||
"-j",
|
||||
self.brname,
|
||||
],
|
||||
[constants.EBTABLES_BIN, "-X", self.brname],
|
||||
],
|
||||
)
|
||||
cmds = [
|
||||
f"{EBTABLES_BIN} -D FORWARD --logical-in {self.brname} -j {self.brname}",
|
||||
f"{EBTABLES_BIN} -X {self.brname}",
|
||||
]
|
||||
ebtablescmds(self.host_cmd, cmds)
|
||||
except CoreCommandError:
|
||||
logging.exception("error during shutdown")
|
||||
|
||||
|
@ -381,11 +343,11 @@ class CoreNetwork(CoreNetworkBase):
|
|||
"""
|
||||
Attach a network interface.
|
||||
|
||||
:param core.netns.vnode.VEth netif: network interface to attach
|
||||
:param core.nodes.interface.Veth netif: network interface to attach
|
||||
:return: nothing
|
||||
"""
|
||||
if self.up:
|
||||
self.net_client.create_interface(self.brname, netif.localname)
|
||||
netif.net_client.create_interface(self.brname, netif.localname)
|
||||
|
||||
CoreNetworkBase.attach(self, netif)
|
||||
|
||||
|
@ -397,7 +359,7 @@ class CoreNetwork(CoreNetworkBase):
|
|||
:return: nothing
|
||||
"""
|
||||
if self.up:
|
||||
self.net_client.delete_interface(self.brname, netif.localname)
|
||||
netif.net_client.delete_interface(self.brname, netif.localname)
|
||||
|
||||
CoreNetworkBase.detach(self, netif)
|
||||
|
||||
|
@ -412,10 +374,10 @@ class CoreNetwork(CoreNetworkBase):
|
|||
"""
|
||||
# check if the network interfaces are attached to this network
|
||||
if self._netif[netif1.netifi] != netif1:
|
||||
raise ValueError("inconsistency for netif %s" % netif1.name)
|
||||
raise ValueError(f"inconsistency for netif {netif1.name}")
|
||||
|
||||
if self._netif[netif2.netifi] != netif2:
|
||||
raise ValueError("inconsistency for netif %s" % netif2.name)
|
||||
raise ValueError(f"inconsistency for netif {netif2.name}")
|
||||
|
||||
try:
|
||||
linked = self._linked[netif1][netif2]
|
||||
|
@ -425,7 +387,7 @@ class CoreNetwork(CoreNetworkBase):
|
|||
elif self.policy == "DROP":
|
||||
linked = False
|
||||
else:
|
||||
raise Exception("unknown policy: %s" % self.policy)
|
||||
raise Exception(f"unknown policy: {self.policy}")
|
||||
self._linked[netif1][netif2] = linked
|
||||
|
||||
return linked
|
||||
|
@ -488,8 +450,8 @@ class CoreNetwork(CoreNetworkBase):
|
|||
"""
|
||||
if devname is None:
|
||||
devname = netif.localname
|
||||
tc = [constants.TC_BIN, "qdisc", "replace", "dev", devname]
|
||||
parent = ["root"]
|
||||
tc = f"{TC_BIN} qdisc replace dev {devname}"
|
||||
parent = "root"
|
||||
changed = False
|
||||
if netif.setparam("bw", bw):
|
||||
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
|
||||
|
@ -497,27 +459,24 @@ class CoreNetwork(CoreNetworkBase):
|
|||
burst = max(2 * netif.mtu, bw / 1000)
|
||||
# max IP payload
|
||||
limit = 0xFFFF
|
||||
tbf = ["tbf", "rate", str(bw), "burst", str(burst), "limit", str(limit)]
|
||||
tbf = f"tbf rate {bw} burst {burst} limit {limit}"
|
||||
if bw > 0:
|
||||
if self.up:
|
||||
logging.debug(
|
||||
"linkconfig: %s" % ([tc + parent + ["handle", "1:"] + tbf],)
|
||||
)
|
||||
utils.check_cmd(tc + parent + ["handle", "1:"] + tbf)
|
||||
cmd = f"{tc} {parent} handle 1: {tbf}"
|
||||
netif.host_cmd(cmd)
|
||||
netif.setparam("has_tbf", True)
|
||||
changed = True
|
||||
elif netif.getparam("has_tbf") and bw <= 0:
|
||||
tcd = [] + tc
|
||||
tcd[2] = "delete"
|
||||
if self.up:
|
||||
utils.check_cmd(tcd + parent)
|
||||
cmd = f"{TC_BIN} qdisc delete dev {devname} {parent}"
|
||||
netif.host_cmd(cmd)
|
||||
netif.setparam("has_tbf", False)
|
||||
# removing the parent removes the child
|
||||
netif.setparam("has_netem", False)
|
||||
changed = True
|
||||
if netif.getparam("has_tbf"):
|
||||
parent = ["parent", "1:1"]
|
||||
netem = ["netem"]
|
||||
parent = "parent 1:1"
|
||||
netem = "netem"
|
||||
changed = max(changed, netif.setparam("delay", delay))
|
||||
if loss is not None:
|
||||
loss = float(loss)
|
||||
|
@ -530,17 +489,17 @@ class CoreNetwork(CoreNetworkBase):
|
|||
return
|
||||
# jitter and delay use the same delay statement
|
||||
if delay is not None:
|
||||
netem += ["delay", "%sus" % delay]
|
||||
netem += f" delay {delay}us"
|
||||
if jitter is not None:
|
||||
if delay is None:
|
||||
netem += ["delay", "0us", "%sus" % jitter, "25%"]
|
||||
netem += f" delay 0us {jitter}us 25%"
|
||||
else:
|
||||
netem += ["%sus" % jitter, "25%"]
|
||||
netem += f" {jitter}us 25%"
|
||||
|
||||
if loss is not None and loss > 0:
|
||||
netem += ["loss", "%s%%" % min(loss, 100)]
|
||||
netem += f" loss {min(loss, 100)}%"
|
||||
if duplicate is not None and duplicate > 0:
|
||||
netem += ["duplicate", "%s%%" % min(duplicate, 100)]
|
||||
netem += f" duplicate {min(duplicate, 100)}%"
|
||||
|
||||
delay_check = delay is None or delay <= 0
|
||||
jitter_check = jitter is None or jitter <= 0
|
||||
|
@ -550,17 +509,16 @@ class CoreNetwork(CoreNetworkBase):
|
|||
# possibly remove netem if it exists and parent queue wasn't removed
|
||||
if not netif.getparam("has_netem"):
|
||||
return
|
||||
tc[2] = "delete"
|
||||
if self.up:
|
||||
logging.debug("linkconfig: %s" % ([tc + parent + ["handle", "10:"]],))
|
||||
utils.check_cmd(tc + parent + ["handle", "10:"])
|
||||
cmd = f"{TC_BIN} qdisc delete dev {devname} {parent} handle 10:"
|
||||
netif.host_cmd(cmd)
|
||||
netif.setparam("has_netem", False)
|
||||
elif len(netem) > 1:
|
||||
if self.up:
|
||||
logging.debug(
|
||||
"linkconfig: %s" % ([tc + parent + ["handle", "10:"] + netem],)
|
||||
cmd = (
|
||||
f"{TC_BIN} qdisc replace dev {devname} {parent} handle 10: {netem}"
|
||||
)
|
||||
utils.check_cmd(tc + parent + ["handle", "10:"] + netem)
|
||||
netif.host_cmd(cmd)
|
||||
netif.setparam("has_netem", True)
|
||||
|
||||
def linknet(self, net):
|
||||
|
@ -574,30 +532,28 @@ class CoreNetwork(CoreNetworkBase):
|
|||
"""
|
||||
sessionid = self.session.short_session_id()
|
||||
try:
|
||||
_id = "%x" % self.id
|
||||
_id = f"{self.id:x}"
|
||||
except TypeError:
|
||||
_id = "%s" % self.id
|
||||
_id = str(self.id)
|
||||
|
||||
try:
|
||||
net_id = "%x" % net.id
|
||||
net_id = f"{net.id:x}"
|
||||
except TypeError:
|
||||
net_id = "%s" % net.id
|
||||
net_id = str(net.id)
|
||||
|
||||
localname = "veth%s.%s.%s" % (_id, net_id, sessionid)
|
||||
localname = f"veth{_id}.{net_id}.{sessionid}"
|
||||
if len(localname) >= 16:
|
||||
raise ValueError("interface local name %s too long" % localname)
|
||||
raise ValueError(f"interface local name {localname} too long")
|
||||
|
||||
name = "veth%s.%s.%s" % (net_id, _id, sessionid)
|
||||
name = f"veth{net_id}.{_id}.{sessionid}"
|
||||
if len(name) >= 16:
|
||||
raise ValueError("interface name %s too long" % name)
|
||||
raise ValueError(f"interface name {name} too long")
|
||||
|
||||
netif = Veth(
|
||||
node=None, name=name, localname=localname, mtu=1500, net=self, start=self.up
|
||||
)
|
||||
netif = Veth(self.session, None, name, localname, start=self.up)
|
||||
self.attach(netif)
|
||||
if net.up:
|
||||
# this is similar to net.attach() but uses netif.name instead of localname
|
||||
self.net_client.create_interface(net.brname, netif.name)
|
||||
netif.net_client.create_interface(net.brname, netif.name)
|
||||
i = net.newifindex()
|
||||
net._netif[i] = netif
|
||||
with net._linked_lock:
|
||||
|
@ -632,9 +588,7 @@ class CoreNetwork(CoreNetworkBase):
|
|||
return
|
||||
|
||||
for addr in addrlist:
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "addr", "add", str(addr), "dev", self.brname]
|
||||
)
|
||||
self.net_client.create_address(self.brname, str(addr))
|
||||
|
||||
|
||||
class GreTapBridge(CoreNetwork):
|
||||
|
@ -654,6 +608,7 @@ class GreTapBridge(CoreNetwork):
|
|||
ttl=255,
|
||||
key=None,
|
||||
start=True,
|
||||
server=None,
|
||||
):
|
||||
"""
|
||||
Create a GreTapBridge instance.
|
||||
|
@ -667,10 +622,10 @@ class GreTapBridge(CoreNetwork):
|
|||
:param ttl: ttl value
|
||||
:param key: gre tap key
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
CoreNetwork.__init__(
|
||||
self, session=session, _id=_id, name=name, policy=policy, start=False
|
||||
)
|
||||
CoreNetwork.__init__(self, session, _id, name, False, server, policy)
|
||||
self.grekey = key
|
||||
if self.grekey is None:
|
||||
self.grekey = self.session.id ^ self.id
|
||||
|
@ -726,7 +681,7 @@ class GreTapBridge(CoreNetwork):
|
|||
:return: nothing
|
||||
"""
|
||||
if self.gretap:
|
||||
raise ValueError("gretap already exists for %s" % self.name)
|
||||
raise ValueError(f"gretap already exists for {self.name}")
|
||||
remoteip = addrlist[0].split("/")[0]
|
||||
localip = None
|
||||
if len(addrlist) > 1:
|
||||
|
@ -769,11 +724,12 @@ class CtrlNet(CoreNetwork):
|
|||
def __init__(
|
||||
self,
|
||||
session,
|
||||
_id="ctrlnet",
|
||||
_id=None,
|
||||
name=None,
|
||||
prefix=None,
|
||||
hostid=None,
|
||||
start=True,
|
||||
server=None,
|
||||
assign_address=True,
|
||||
updown_script=None,
|
||||
serverintf=None,
|
||||
|
@ -787,6 +743,8 @@ class CtrlNet(CoreNetwork):
|
|||
:param prefix: control network ipv4 prefix
|
||||
:param hostid: host id
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param str assign_address: assigned address
|
||||
:param str updown_script: updown script
|
||||
:param serverintf: server interface
|
||||
|
@ -797,7 +755,26 @@ class CtrlNet(CoreNetwork):
|
|||
self.assign_address = assign_address
|
||||
self.updown_script = updown_script
|
||||
self.serverintf = serverintf
|
||||
CoreNetwork.__init__(self, session, _id=_id, name=name, start=start)
|
||||
CoreNetwork.__init__(self, session, _id, name, start, server)
|
||||
|
||||
def add_addresses(self, address):
|
||||
"""
|
||||
Add addresses used for created control networks,
|
||||
|
||||
:param core.nodes.interfaces.IpAddress address: starting address to use
|
||||
:return:
|
||||
"""
|
||||
use_ovs = self.session.options.get_config("ovs") == "True"
|
||||
current = f"{address}/{self.prefix.prefixlen}"
|
||||
net_client = get_net_client(use_ovs, utils.cmd)
|
||||
net_client.create_address(self.brname, current)
|
||||
servers = self.session.distributed.servers
|
||||
for name in servers:
|
||||
server = servers[name]
|
||||
address -= 1
|
||||
current = f"{address}/{self.prefix.prefixlen}"
|
||||
net_client = get_net_client(use_ovs, server.remote_cmd)
|
||||
net_client.create_address(self.brname, current)
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
|
@ -807,21 +784,18 @@ class CtrlNet(CoreNetwork):
|
|||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
if self.net_client.existing_bridges(self.id):
|
||||
raise CoreError("old bridges exist for node: %s" % self.id)
|
||||
raise CoreError(f"old bridges exist for node: {self.id}")
|
||||
|
||||
CoreNetwork.startup(self)
|
||||
|
||||
if self.hostid:
|
||||
addr = self.prefix.addr(self.hostid)
|
||||
else:
|
||||
addr = self.prefix.max_addr()
|
||||
|
||||
logging.info("added control network bridge: %s %s", self.brname, self.prefix)
|
||||
|
||||
if self.assign_address:
|
||||
addrlist = ["%s/%s" % (addr, self.prefix.prefixlen)]
|
||||
self.addrconfig(addrlist=addrlist)
|
||||
logging.info("address %s", addr)
|
||||
if self.hostid and self.assign_address:
|
||||
address = self.prefix.addr(self.hostid)
|
||||
self.add_addresses(address)
|
||||
elif self.assign_address:
|
||||
address = self.prefix.max_addr()
|
||||
self.add_addresses(address)
|
||||
|
||||
if self.updown_script:
|
||||
logging.info(
|
||||
|
@ -829,7 +803,7 @@ class CtrlNet(CoreNetwork):
|
|||
self.brname,
|
||||
self.updown_script,
|
||||
)
|
||||
utils.check_cmd([self.updown_script, self.brname, "startup"])
|
||||
self.host_cmd(f"{self.updown_script} {self.brname} startup")
|
||||
|
||||
if self.serverintf:
|
||||
self.net_client.create_interface(self.brname, self.serverintf)
|
||||
|
@ -857,7 +831,7 @@ class CtrlNet(CoreNetwork):
|
|||
self.brname,
|
||||
self.updown_script,
|
||||
)
|
||||
utils.check_cmd([self.updown_script, self.brname, "shutdown"])
|
||||
self.host_cmd(f"{self.updown_script} {self.brname} shutdown")
|
||||
except CoreCommandError:
|
||||
logging.exception("error issuing shutdown script shutdown")
|
||||
|
||||
|
@ -1033,7 +1007,7 @@ class HubNode(CoreNetwork):
|
|||
policy = "ACCEPT"
|
||||
type = "hub"
|
||||
|
||||
def __init__(self, session, _id=None, name=None, start=True):
|
||||
def __init__(self, session, _id=None, name=None, start=True, server=None):
|
||||
"""
|
||||
Creates a HubNode instance.
|
||||
|
||||
|
@ -1041,9 +1015,11 @@ class HubNode(CoreNetwork):
|
|||
:param int _id: node id
|
||||
:param str name: node namee
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
CoreNetwork.__init__(self, session, _id, name, start)
|
||||
CoreNetwork.__init__(self, session, _id, name, start, server)
|
||||
|
||||
# TODO: move to startup method
|
||||
if start:
|
||||
|
@ -1060,7 +1036,9 @@ class WlanNode(CoreNetwork):
|
|||
policy = "DROP"
|
||||
type = "wlan"
|
||||
|
||||
def __init__(self, session, _id=None, name=None, start=True, policy=None):
|
||||
def __init__(
|
||||
self, session, _id=None, name=None, start=True, server=None, policy=None
|
||||
):
|
||||
"""
|
||||
Create a WlanNode instance.
|
||||
|
||||
|
@ -1068,9 +1046,11 @@ class WlanNode(CoreNetwork):
|
|||
:param int _id: node id
|
||||
:param str name: node name
|
||||
:param bool start: start flag
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:param policy: wlan policy
|
||||
"""
|
||||
CoreNetwork.__init__(self, session, _id, name, start, policy)
|
||||
CoreNetwork.__init__(self, session, _id, name, start, server, policy)
|
||||
# wireless model such as basic range
|
||||
self.model = None
|
||||
# mobility model such as scripted
|
||||
|
|
|
@ -4,19 +4,24 @@ PhysicalNode class for including real systems in the emulated network.
|
|||
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import threading
|
||||
|
||||
from core import CoreCommandError, constants, utils
|
||||
from core import utils
|
||||
from core.constants import MOUNT_BIN, UMOUNT_BIN
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.nodes.base import CoreNodeBase
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.nodes.network import CoreNetwork, GreTap
|
||||
|
||||
|
||||
class PhysicalNode(CoreNodeBase):
|
||||
def __init__(self, session, _id=None, name=None, nodedir=None, start=True):
|
||||
CoreNodeBase.__init__(self, session, _id, name, start=start)
|
||||
def __init__(
|
||||
self, session, _id=None, name=None, nodedir=None, start=True, server=None
|
||||
):
|
||||
CoreNodeBase.__init__(self, session, _id, name, start, server)
|
||||
if not self.server:
|
||||
raise CoreError("physical nodes must be assigned to a remote server")
|
||||
self.nodedir = nodedir
|
||||
self.up = start
|
||||
self.lock = threading.RLock()
|
||||
|
@ -51,119 +56,52 @@ class PhysicalNode(CoreNodeBase):
|
|||
"""
|
||||
return sh
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
os.chdir(self.nodedir)
|
||||
status = utils.cmd(args, wait)
|
||||
return status
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
os.chdir(self.nodedir)
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
stdout, _ = p.communicate()
|
||||
status = p.wait()
|
||||
return status, stdout.strip()
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
status, output = self.cmd_output(args)
|
||||
if status:
|
||||
raise CoreCommandError(status, args, output)
|
||||
return output.strip()
|
||||
|
||||
def shcmd(self, cmdstr, sh="/bin/sh"):
|
||||
return self.cmd([sh, "-c", cmdstr])
|
||||
|
||||
def sethwaddr(self, ifindex, addr):
|
||||
"""
|
||||
Set hardware address for an interface.
|
||||
"""
|
||||
self._netif[ifindex].sethwaddr(addr)
|
||||
ifname = self.ifname(ifindex)
|
||||
interface = self._netif[ifindex]
|
||||
interface.sethwaddr(addr)
|
||||
if self.up:
|
||||
self.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", "dev", ifname, "address", str(addr)]
|
||||
)
|
||||
self.net_client.device_mac(interface.name, str(addr))
|
||||
|
||||
def addaddr(self, ifindex, addr):
|
||||
"""
|
||||
Add an address to an interface.
|
||||
"""
|
||||
interface = self._netif[ifindex]
|
||||
if self.up:
|
||||
self.check_cmd(
|
||||
[
|
||||
constants.IP_BIN,
|
||||
"addr",
|
||||
"add",
|
||||
str(addr),
|
||||
"dev",
|
||||
self.ifname(ifindex),
|
||||
]
|
||||
)
|
||||
|
||||
self._netif[ifindex].addaddr(addr)
|
||||
self.net_client.create_address(interface.name, str(addr))
|
||||
interface.addaddr(addr)
|
||||
|
||||
def deladdr(self, ifindex, addr):
|
||||
"""
|
||||
Delete an address from an interface.
|
||||
"""
|
||||
interface = self._netif[ifindex]
|
||||
|
||||
try:
|
||||
self._netif[ifindex].deladdr(addr)
|
||||
interface.deladdr(addr)
|
||||
except ValueError:
|
||||
logging.exception("trying to delete unknown address: %s", addr)
|
||||
|
||||
if self.up:
|
||||
self.check_cmd(
|
||||
[
|
||||
constants.IP_BIN,
|
||||
"addr",
|
||||
"del",
|
||||
str(addr),
|
||||
"dev",
|
||||
self.ifname(ifindex),
|
||||
]
|
||||
)
|
||||
self.net_client.delete_address(interface.name, str(addr))
|
||||
|
||||
def adoptnetif(self, netif, ifindex, hwaddr, addrlist):
|
||||
"""
|
||||
The broker builds a GreTap tunnel device to this physical node.
|
||||
When a link message is received linking this node to another part of
|
||||
the emulation, no new interface is created; instead, adopt the
|
||||
GreTap netif as the node interface.
|
||||
"""
|
||||
netif.name = "gt%d" % ifindex
|
||||
netif.name = f"gt{ifindex}"
|
||||
netif.node = self
|
||||
self.addnetif(netif, ifindex)
|
||||
|
||||
# use a more reasonable name, e.g. "gt0" instead of "gt.56286.150"
|
||||
if self.up:
|
||||
self.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", "dev", netif.localname, "down"]
|
||||
)
|
||||
self.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", netif.localname, "name", netif.name]
|
||||
)
|
||||
self.net_client.device_down(netif.localname)
|
||||
self.net_client.device_name(netif.localname, netif.name)
|
||||
|
||||
netif.localname = netif.name
|
||||
|
||||
|
@ -174,9 +112,7 @@ class PhysicalNode(CoreNodeBase):
|
|||
self.addaddr(ifindex, addr)
|
||||
|
||||
if self.up:
|
||||
self.check_cmd(
|
||||
[constants.IP_BIN, "link", "set", "dev", netif.localname, "up"]
|
||||
)
|
||||
self.net_client.device_up(netif.localname)
|
||||
|
||||
def linkconfig(
|
||||
self,
|
||||
|
@ -224,30 +160,24 @@ class PhysicalNode(CoreNodeBase):
|
|||
if ifindex is None:
|
||||
ifindex = self.newifindex()
|
||||
|
||||
if ifname is None:
|
||||
ifname = f"gt{ifindex}"
|
||||
|
||||
if self.up:
|
||||
# this is reached when this node is linked to a network node
|
||||
# tunnel to net not built yet, so build it now and adopt it
|
||||
gt = self.session.broker.addnettunnel(net.id)
|
||||
if gt is None or len(gt) != 1:
|
||||
raise ValueError(
|
||||
"error building tunnel from adding a new network interface: %s" % gt
|
||||
)
|
||||
gt = gt[0]
|
||||
net.detach(gt)
|
||||
self.adoptnetif(gt, ifindex, hwaddr, addrlist)
|
||||
_, remote_tap = self.session.distributed.create_gre_tunnel(net, self.server)
|
||||
self.adoptnetif(remote_tap, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
else:
|
||||
# this is reached when configuring services (self.up=False)
|
||||
netif = GreTap(node=self, name=ifname, session=self.session, start=False)
|
||||
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
# this is reached when configuring services (self.up=False)
|
||||
if ifname is None:
|
||||
ifname = "gt%d" % ifindex
|
||||
|
||||
netif = GreTap(node=self, name=ifname, session=self.session, start=False)
|
||||
self.adoptnetif(netif, ifindex, hwaddr, addrlist)
|
||||
return ifindex
|
||||
|
||||
def privatedir(self, path):
|
||||
if path[0] != "/":
|
||||
raise ValueError("path not fully qualified: %s" % path)
|
||||
raise ValueError(f"path not fully qualified: {path}")
|
||||
hostpath = os.path.join(
|
||||
self.nodedir, os.path.normpath(path).strip("/").replace("/", ".")
|
||||
)
|
||||
|
@ -258,13 +188,13 @@ class PhysicalNode(CoreNodeBase):
|
|||
source = os.path.abspath(source)
|
||||
logging.info("mounting %s at %s", source, target)
|
||||
os.makedirs(target)
|
||||
self.check_cmd([constants.MOUNT_BIN, "--bind", source, target])
|
||||
self.host_cmd(f"{MOUNT_BIN} --bind {source} {target}", cwd=self.nodedir)
|
||||
self._mounts.append((source, target))
|
||||
|
||||
def umount(self, target):
|
||||
logging.info("unmounting '%s'" % target)
|
||||
logging.info("unmounting '%s'", target)
|
||||
try:
|
||||
self.check_cmd([constants.UMOUNT_BIN, "-l", target])
|
||||
self.host_cmd(f"{UMOUNT_BIN} -l {target}", cwd=self.nodedir)
|
||||
except CoreCommandError:
|
||||
logging.exception("unmounting failed for %s", target)
|
||||
|
||||
|
@ -290,6 +220,9 @@ class PhysicalNode(CoreNodeBase):
|
|||
os.chmod(node_file.name, mode)
|
||||
logging.info("created nodefile: '%s'; mode: 0%o", node_file.name, mode)
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
return self.host_cmd(args, wait=wait)
|
||||
|
||||
|
||||
class Rj45Node(CoreNodeBase, CoreInterface):
|
||||
"""
|
||||
|
@ -300,7 +233,7 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
apitype = NodeTypes.RJ45.value
|
||||
type = "rj45"
|
||||
|
||||
def __init__(self, session, _id=None, name=None, mtu=1500, start=True):
|
||||
def __init__(self, session, _id=None, name=None, mtu=1500, start=True, server=None):
|
||||
"""
|
||||
Create an RJ45Node instance.
|
||||
|
||||
|
@ -309,10 +242,11 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
:param str name: node name
|
||||
:param mtu: rj45 mtu
|
||||
:param bool start: start flag
|
||||
:return:
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
"""
|
||||
CoreNodeBase.__init__(self, session, _id, name, start=start)
|
||||
CoreInterface.__init__(self, node=self, name=name, mtu=mtu)
|
||||
CoreNodeBase.__init__(self, session, _id, name, start, server)
|
||||
CoreInterface.__init__(self, session, self, name, mtu, server)
|
||||
self.up = False
|
||||
self.lock = threading.RLock()
|
||||
self.ifindex = None
|
||||
|
@ -334,7 +268,7 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
"""
|
||||
# interface will also be marked up during net.attach()
|
||||
self.savestate()
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.net_client.device_up(self.localname)
|
||||
self.up = True
|
||||
|
||||
def shutdown(self):
|
||||
|
@ -348,18 +282,17 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
return
|
||||
|
||||
try:
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "down"])
|
||||
utils.check_cmd([constants.IP_BIN, "addr", "flush", "dev", self.localname])
|
||||
utils.check_cmd(
|
||||
[constants.TC_BIN, "qdisc", "del", "dev", self.localname, "root"]
|
||||
)
|
||||
self.net_client.device_down(self.localname)
|
||||
self.net_client.device_flush(self.localname)
|
||||
self.net_client.delete_tc(self.localname)
|
||||
except CoreCommandError:
|
||||
logging.exception("error shutting down")
|
||||
|
||||
self.up = False
|
||||
self.restorestate()
|
||||
|
||||
# TODO: issue in that both classes inherited from provide the same method with different signatures
|
||||
# TODO: issue in that both classes inherited from provide the same method with
|
||||
# different signatures
|
||||
def attachnet(self, net):
|
||||
"""
|
||||
Attach a network.
|
||||
|
@ -369,7 +302,8 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
"""
|
||||
CoreInterface.attachnet(self, net)
|
||||
|
||||
# TODO: issue in that both classes inherited from provide the same method with different signatures
|
||||
# TODO: issue in that both classes inherited from provide the same method with
|
||||
# different signatures
|
||||
def detachnet(self):
|
||||
"""
|
||||
Detach a network.
|
||||
|
@ -429,7 +363,7 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
if ifindex == self.ifindex:
|
||||
self.shutdown()
|
||||
else:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
raise ValueError(f"ifindex {ifindex} does not exist")
|
||||
|
||||
def netif(self, ifindex, net=None):
|
||||
"""
|
||||
|
@ -475,9 +409,7 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
if self.up:
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "addr", "add", str(addr), "dev", self.name]
|
||||
)
|
||||
self.net_client.create_address(self.name, str(addr))
|
||||
|
||||
CoreInterface.addaddr(self, addr)
|
||||
|
||||
|
@ -490,9 +422,7 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
:raises CoreCommandError: when there is a command exception
|
||||
"""
|
||||
if self.up:
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "addr", "del", str(addr), "dev", self.name]
|
||||
)
|
||||
self.net_client.delete_address(self.name, str(addr))
|
||||
|
||||
CoreInterface.deladdr(self, addr)
|
||||
|
||||
|
@ -506,14 +436,13 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
"""
|
||||
self.old_up = False
|
||||
self.old_addrs = []
|
||||
args = [constants.IP_BIN, "addr", "show", "dev", self.localname]
|
||||
output = utils.check_cmd(args)
|
||||
output = self.net_client.device_show(self.localname)
|
||||
for line in output.split("\n"):
|
||||
items = line.split()
|
||||
if len(items) < 2:
|
||||
continue
|
||||
|
||||
if items[1] == "%s:" % self.localname:
|
||||
if items[1] == f"{self.localname}:":
|
||||
flags = items[2][1:-1].split(",")
|
||||
if "UP" in flags:
|
||||
self.old_up = True
|
||||
|
@ -533,25 +462,14 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
"""
|
||||
for addr in self.old_addrs:
|
||||
if addr[1] is None:
|
||||
utils.check_cmd(
|
||||
[constants.IP_BIN, "addr", "add", addr[0], "dev", self.localname]
|
||||
)
|
||||
self.net_client.create_address(self.localname, addr[0])
|
||||
else:
|
||||
utils.check_cmd(
|
||||
[
|
||||
constants.IP_BIN,
|
||||
"addr",
|
||||
"add",
|
||||
addr[0],
|
||||
"brd",
|
||||
addr[1],
|
||||
"dev",
|
||||
self.localname,
|
||||
]
|
||||
self.net_client.create_address(
|
||||
self.localname, addr[0], broadcast=addr[1]
|
||||
)
|
||||
|
||||
if self.old_up:
|
||||
utils.check_cmd([constants.IP_BIN, "link", "set", self.localname, "up"])
|
||||
self.net_client.device_up(self.localname)
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
|
@ -567,38 +485,6 @@ class Rj45Node(CoreNodeBase, CoreInterface):
|
|||
CoreInterface.setposition(self, x, y, z)
|
||||
return result
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exist status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def termcmdstring(self, sh):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
|
|
@ -4,11 +4,11 @@ sdt.py: Scripted Display Tool (SDT3D) helper
|
|||
|
||||
import logging
|
||||
import socket
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from future.moves.urllib.parse import urlparse
|
||||
|
||||
from core import CoreError, constants
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core import constants
|
||||
from core.constants import CORE_DATA_DIR
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.enumerations import (
|
||||
EventTypes,
|
||||
LinkTlvs,
|
||||
|
@ -18,6 +18,7 @@ from core.emulator.enumerations import (
|
|||
NodeTlvs,
|
||||
NodeTypes,
|
||||
)
|
||||
from core.errors import CoreError
|
||||
from core.nodes.base import CoreNetworkBase, NodeBase
|
||||
from core.nodes.network import WlanNode
|
||||
|
||||
|
@ -75,7 +76,6 @@ class Sdt(object):
|
|||
# node information for remote nodes not in session._objs
|
||||
# local nodes also appear here since their obj may not exist yet
|
||||
self.remotes = {}
|
||||
session.broker.handlers.add(self.handle_distributed)
|
||||
|
||||
# add handler for node updates
|
||||
self.session.node_handlers.append(self.handle_node_update)
|
||||
|
@ -162,7 +162,7 @@ class Sdt(object):
|
|||
return False
|
||||
|
||||
self.seturl()
|
||||
logging.info("connecting to SDT at %s://%s" % (self.protocol, self.address))
|
||||
logging.info("connecting to SDT at %s://%s", self.protocol, self.address)
|
||||
if self.sock is None:
|
||||
try:
|
||||
if self.protocol.lower() == "udp":
|
||||
|
@ -193,14 +193,14 @@ class Sdt(object):
|
|||
:return: initialize command status
|
||||
:rtype: bool
|
||||
"""
|
||||
if not self.cmd('path "%s/icons/normal"' % constants.CORE_DATA_DIR):
|
||||
if not self.cmd(f'path "{CORE_DATA_DIR}/icons/normal"'):
|
||||
return False
|
||||
# send node type to icon mappings
|
||||
for node_type, icon in self.DEFAULT_SPRITES:
|
||||
if not self.cmd("sprite %s image %s" % (node_type, icon)):
|
||||
if not self.cmd(f"sprite {node_type} image {icon}"):
|
||||
return False
|
||||
lat, long = self.session.location.refgeo[:2]
|
||||
return self.cmd("flyto %.6f,%.6f,%d" % (long, lat, self.DEFAULT_ALT))
|
||||
return self.cmd(f"flyto {long:.6f},{lat:.6f},{self.DEFAULT_ALT}")
|
||||
|
||||
def disconnect(self):
|
||||
"""
|
||||
|
@ -241,8 +241,8 @@ class Sdt(object):
|
|||
if self.sock is None:
|
||||
return False
|
||||
try:
|
||||
logging.info("sdt: %s" % cmdstr)
|
||||
self.sock.sendall("%s\n" % cmdstr)
|
||||
logging.info("sdt: %s", cmdstr)
|
||||
self.sock.sendall(f"{cmdstr}\n")
|
||||
return True
|
||||
except IOError:
|
||||
logging.exception("SDT connection error")
|
||||
|
@ -267,23 +267,21 @@ class Sdt(object):
|
|||
if not self.connect():
|
||||
return
|
||||
if flags & MessageFlags.DELETE.value:
|
||||
self.cmd("delete node,%d" % nodenum)
|
||||
self.cmd(f"delete node,{nodenum}")
|
||||
return
|
||||
if x is None or y is None:
|
||||
return
|
||||
lat, lon, alt = self.session.location.getgeo(x, y, z)
|
||||
pos = "pos %.6f,%.6f,%.6f" % (lon, lat, alt)
|
||||
pos = f"pos {lon:.6f},{lat:.6f},{alt:.6f}"
|
||||
if flags & MessageFlags.ADD.value:
|
||||
if icon is not None:
|
||||
node_type = name
|
||||
icon = icon.replace("$CORE_DATA_DIR", constants.CORE_DATA_DIR)
|
||||
icon = icon.replace("$CORE_CONF_DIR", constants.CORE_CONF_DIR)
|
||||
self.cmd("sprite %s image %s" % (type, icon))
|
||||
self.cmd(
|
||||
'node %d type %s label on,"%s" %s' % (nodenum, node_type, name, pos)
|
||||
)
|
||||
self.cmd(f"sprite {node_type} image {icon}")
|
||||
self.cmd(f'node {nodenum} type {node_type} label on,"{name}" {pos}')
|
||||
else:
|
||||
self.cmd("node %d %s" % (nodenum, pos))
|
||||
self.cmd(f"node {nodenum} {pos}")
|
||||
|
||||
def updatenodegeo(self, nodenum, lat, long, alt):
|
||||
"""
|
||||
|
@ -299,8 +297,8 @@ class Sdt(object):
|
|||
# TODO: received Node Message with lat/long/alt.
|
||||
if not self.connect():
|
||||
return
|
||||
pos = "pos %.6f,%.6f,%.6f" % (long, lat, alt)
|
||||
self.cmd("node %d %s" % (nodenum, pos))
|
||||
pos = f"pos {long:.6f},{lat:.6f},{alt:.6f}"
|
||||
self.cmd(f"node {nodenum} {pos}")
|
||||
|
||||
def updatelink(self, node1num, node2num, flags, wireless=False):
|
||||
"""
|
||||
|
@ -317,14 +315,13 @@ class Sdt(object):
|
|||
if not self.connect():
|
||||
return
|
||||
if flags & MessageFlags.DELETE.value:
|
||||
self.cmd("delete link,%s,%s" % (node1num, node2num))
|
||||
self.cmd(f"delete link,{node1num},{node2num}")
|
||||
elif flags & MessageFlags.ADD.value:
|
||||
attr = ""
|
||||
if wireless:
|
||||
attr = " line green,2"
|
||||
else:
|
||||
attr = " line red,2"
|
||||
self.cmd("link %s,%s%s" % (node1num, node2num, attr))
|
||||
self.cmd(f"link {node1num},{node2num}{attr}")
|
||||
|
||||
def sendobjs(self):
|
||||
"""
|
||||
|
@ -365,7 +362,7 @@ class Sdt(object):
|
|||
for net in nets:
|
||||
all_links = net.all_link_data(flags=MessageFlags.ADD.value)
|
||||
for link_data in all_links:
|
||||
is_wireless = isinstance(net, (WlanNode, EmaneNode))
|
||||
is_wireless = isinstance(net, (WlanNode, EmaneNet))
|
||||
wireless_link = link_data.message_type == LinkTypes.WIRELESS.value
|
||||
if is_wireless and link_data.node1_id == net.id:
|
||||
continue
|
||||
|
@ -493,7 +490,7 @@ class Sdt(object):
|
|||
|
||||
def wlancheck(self, nodenum):
|
||||
"""
|
||||
Helper returns True if a node number corresponds to a WlanNode or EmaneNode.
|
||||
Helper returns True if a node number corresponds to a WLAN or EMANE node.
|
||||
|
||||
:param int nodenum: node id to check
|
||||
:return: True if node is wlan or emane, False otherwise
|
||||
|
@ -508,6 +505,6 @@ class Sdt(object):
|
|||
n = self.session.get_node(nodenum)
|
||||
except CoreError:
|
||||
return False
|
||||
if isinstance(n, (WlanNode, EmaneNode)):
|
||||
if isinstance(n, (WlanNode, EmaneNet)):
|
||||
return True
|
||||
return False
|
||||
|
|
|
@ -12,10 +12,11 @@ import logging
|
|||
import time
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from core import CoreCommandError, utils
|
||||
from core import utils
|
||||
from core.constants import which
|
||||
from core.emulator.data import FileData
|
||||
from core.emulator.enumerations import MessageFlags, RegisterTlvs
|
||||
from core.errors import CoreCommandError
|
||||
|
||||
|
||||
class ServiceBootError(Exception):
|
||||
|
@ -248,6 +249,7 @@ class ServiceManager(object):
|
|||
|
||||
:param CoreService service: service to add
|
||||
:return: nothing
|
||||
:raises ValueError: when service cannot be loaded
|
||||
"""
|
||||
name = service.name
|
||||
logging.debug("loading service: class(%s) name(%s)", service.__name__, name)
|
||||
|
@ -258,13 +260,14 @@ class ServiceManager(object):
|
|||
|
||||
# validate dependent executables are present
|
||||
for executable in service.executables:
|
||||
if not which(executable):
|
||||
logging.debug(
|
||||
"service(%s) missing executable: %s", service.name, executable
|
||||
)
|
||||
raise ValueError(
|
||||
"service(%s) missing executable: %s" % (service.name, executable)
|
||||
)
|
||||
which(executable, required=True)
|
||||
|
||||
# validate service on load succeeds
|
||||
try:
|
||||
service.on_load()
|
||||
except Exception as e:
|
||||
logging.exception("error during service(%s) on load", service.name)
|
||||
raise ValueError(e)
|
||||
|
||||
# make service available
|
||||
cls.services[name] = service
|
||||
|
@ -294,13 +297,12 @@ class ServiceManager(object):
|
|||
for service in services:
|
||||
if not service.name:
|
||||
continue
|
||||
service.on_load()
|
||||
|
||||
try:
|
||||
cls.add(service)
|
||||
except ValueError as e:
|
||||
service_errors.append(service.name)
|
||||
logging.debug("not loading service: %s", e)
|
||||
logging.debug("not loading service(%s): %s", service.name, e)
|
||||
return service_errors
|
||||
|
||||
|
||||
|
@ -596,7 +598,7 @@ class CoreServices(object):
|
|||
for cmd in cmds:
|
||||
logging.debug("validating service(%s) using: %s", service.name, cmd)
|
||||
try:
|
||||
node.check_cmd(cmd)
|
||||
node.cmd(cmd)
|
||||
except CoreCommandError as e:
|
||||
logging.debug(
|
||||
"node(%s) service(%s) validate failed", node.name, service.name
|
||||
|
@ -629,7 +631,7 @@ class CoreServices(object):
|
|||
status = 0
|
||||
for args in service.shutdown:
|
||||
try:
|
||||
node.check_cmd(args)
|
||||
node.cmd(args)
|
||||
except CoreCommandError:
|
||||
logging.exception("error running stop command %s", args)
|
||||
status = -1
|
||||
|
@ -727,10 +729,7 @@ class CoreServices(object):
|
|||
status = 0
|
||||
for cmd in cmds:
|
||||
try:
|
||||
if wait:
|
||||
node.check_cmd(cmd)
|
||||
else:
|
||||
node.cmd(cmd, wait=False)
|
||||
node.cmd(cmd, wait)
|
||||
except CoreCommandError:
|
||||
logging.exception("error starting command")
|
||||
status = -1
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from core.emane.nodes import EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.services.coreservices import CoreService
|
||||
from core.xml import emanexml
|
||||
|
||||
|
@ -21,7 +21,7 @@ class EmaneTransportService(CoreService):
|
|||
transport_commands = []
|
||||
for interface in node.netifs(sort=True):
|
||||
network_node = node.session.get_node(interface.net.id)
|
||||
if isinstance(network_node, EmaneNode):
|
||||
if isinstance(network_node, EmaneNet):
|
||||
config = node.session.emane.get_configs(
|
||||
network_node.id, network_node.model.name
|
||||
)
|
||||
|
|
|
@ -92,7 +92,7 @@ class NrlNhdp(NrlService):
|
|||
cmd += " -flooding ecds"
|
||||
cmd += " -smfClient %s_smf" % node.name
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, "control", False), node.netifs())
|
||||
netifs = list(filter(lambda x: not getattr(x, "control", False), node.netifs()))
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
|
@ -126,7 +126,7 @@ class NrlSmf(NrlService):
|
|||
cmd = "nrlsmf instance %s_smf" % node.name
|
||||
|
||||
servicenames = map(lambda x: x.name, node.services)
|
||||
netifs = filter(lambda x: not getattr(x, "control", False), node.netifs())
|
||||
netifs = list(filter(lambda x: not getattr(x, "control", False), node.netifs()))
|
||||
if len(netifs) == 0:
|
||||
return ""
|
||||
|
||||
|
@ -216,7 +216,7 @@ class NrlOlsrv2(NrlService):
|
|||
|
||||
cmd += " -p olsr"
|
||||
|
||||
netifs = filter(lambda x: not getattr(x, "control", False), node.netifs())
|
||||
netifs = list(filter(lambda x: not getattr(x, "control", False), node.netifs()))
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
|
@ -244,7 +244,7 @@ class OlsrOrg(NrlService):
|
|||
Generate the appropriate command-line based on node interfaces.
|
||||
"""
|
||||
cmd = cls.startup[0]
|
||||
netifs = filter(lambda x: not getattr(x, "control", False), node.netifs())
|
||||
netifs = list(filter(lambda x: not getattr(x, "control", False), node.netifs()))
|
||||
if len(netifs) > 0:
|
||||
interfacenames = map(lambda x: x.name, netifs)
|
||||
cmd += " -i "
|
||||
|
|
|
@ -3,7 +3,7 @@ quagga.py: defines routing services provided by Quagga.
|
|||
"""
|
||||
|
||||
from core import constants
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.enumerations import LinkTypes
|
||||
from core.nodes import ipaddress
|
||||
from core.nodes.network import PtpNet, WlanNode
|
||||
|
@ -460,7 +460,7 @@ class Ospfv3mdr(Ospfv3):
|
|||
cfg = cls.mtucheck(ifc)
|
||||
# Uncomment the following line to use Address Family Translation for IPv4
|
||||
cfg += " ipv6 ospf6 instance-id 65\n"
|
||||
if ifc.net is not None and isinstance(ifc.net, (WlanNode, EmaneNode)):
|
||||
if ifc.net is not None and isinstance(ifc.net, (WlanNode, EmaneNet)):
|
||||
return (
|
||||
cfg
|
||||
+ """\
|
||||
|
|
|
@ -4,7 +4,8 @@ utility.py: defines miscellaneous utility services.
|
|||
|
||||
import os
|
||||
|
||||
from core import CoreCommandError, constants, utils
|
||||
from core import constants, utils
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes.ipaddress import Ipv4Prefix, Ipv6Prefix
|
||||
from core.services.coreservices import CoreService
|
||||
|
||||
|
@ -414,9 +415,11 @@ class HttpService(UtilService):
|
|||
Detect the apache2 version using the 'a2query' command.
|
||||
"""
|
||||
try:
|
||||
status, result = utils.cmd_output(["a2query", "-v"])
|
||||
except CoreCommandError:
|
||||
status = -1
|
||||
result = utils.cmd("a2query -v")
|
||||
status = 0
|
||||
except CoreCommandError as e:
|
||||
status = e.returncode
|
||||
result = e.stderr
|
||||
|
||||
if status == 0 and result[:3] == "2.4":
|
||||
return cls.APACHEVER24
|
||||
|
|
|
@ -312,13 +312,6 @@ class XorpRipng(XorpService):
|
|||
continue
|
||||
cfg += "\tinterface %s {\n" % ifc.name
|
||||
cfg += "\t vif %s {\n" % ifc.name
|
||||
# for a in ifc.addrlist:
|
||||
# if a.find(":") < 0:
|
||||
# continue
|
||||
# addr = a.split("/")[0]
|
||||
# cfg += "\t\taddress %s {\n" % addr
|
||||
# cfg += "\t\t disable: false\n"
|
||||
# cfg += "\t\t}\n"
|
||||
cfg += "\t\taddress %s {\n" % ifc.hwaddr.tolinklocal()
|
||||
cfg += "\t\t disable: false\n"
|
||||
cfg += "\t\t}\n"
|
||||
|
|
|
@ -6,15 +6,15 @@ import fcntl
|
|||
import hashlib
|
||||
import importlib
|
||||
import inspect
|
||||
import json
|
||||
import logging
|
||||
import logging.config
|
||||
import os
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
from subprocess import PIPE, STDOUT, Popen
|
||||
|
||||
from past.builtins import basestring
|
||||
|
||||
from core import CoreCommandError
|
||||
from core.errors import CoreCommandError
|
||||
|
||||
DEVNULL = open(os.devnull, "wb")
|
||||
|
||||
|
@ -109,17 +109,6 @@ def _is_class(module, member, clazz):
|
|||
return True
|
||||
|
||||
|
||||
def _is_exe(file_path):
|
||||
"""
|
||||
Check if a given file path exists and is an executable file.
|
||||
|
||||
:param str file_path: file path to check
|
||||
:return: True if the file is considered and executable file, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
|
||||
|
||||
|
||||
def close_onexec(fd):
|
||||
"""
|
||||
Close on execution of a shell process.
|
||||
|
@ -131,17 +120,26 @@ def close_onexec(fd):
|
|||
fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC)
|
||||
|
||||
|
||||
def check_executables(executables):
|
||||
def which(command, required):
|
||||
"""
|
||||
Check executables, verify they exist and are executable.
|
||||
Find location of desired executable within current PATH.
|
||||
|
||||
:param list[str] executables: executable to check
|
||||
:return: nothing
|
||||
:raises EnvironmentError: when an executable doesn't exist or is not executable
|
||||
:param str command: command to find location for
|
||||
:param bool required: command is required to be found, false otherwise
|
||||
:return: command location or None
|
||||
:raises ValueError: when not found and required
|
||||
"""
|
||||
for executable in executables:
|
||||
if not _is_exe(executable):
|
||||
raise EnvironmentError("executable not found: %s" % executable)
|
||||
found_path = None
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
command_path = os.path.join(path, command)
|
||||
if os.path.isfile(command_path) and os.access(command_path, os.X_OK):
|
||||
found_path = command_path
|
||||
break
|
||||
|
||||
if found_path is None and required:
|
||||
raise ValueError(f"failed to find required executable({command}) in path")
|
||||
|
||||
return found_path
|
||||
|
||||
|
||||
def make_tuple(obj):
|
||||
|
@ -167,7 +165,8 @@ def make_tuple_fromstr(s, value_type):
|
|||
:return: tuple from string
|
||||
:rtype: tuple
|
||||
"""
|
||||
# remove tuple braces and strip commands and space from all values in the tuple string
|
||||
# remove tuple braces and strip commands and space from all values in the tuple
|
||||
# string
|
||||
values = []
|
||||
for x in s.strip("(), ").split(","):
|
||||
x = x.strip("' ")
|
||||
|
@ -176,19 +175,6 @@ def make_tuple_fromstr(s, value_type):
|
|||
return tuple(value_type(i) for i in values)
|
||||
|
||||
|
||||
def split_args(args):
|
||||
"""
|
||||
Convenience method for splitting potential string commands into a shell-like syntax list.
|
||||
|
||||
:param list/str args: command list or string
|
||||
:return: shell-like syntax list
|
||||
:rtype: list
|
||||
"""
|
||||
if isinstance(args, basestring):
|
||||
args = shlex.split(args)
|
||||
return args
|
||||
|
||||
|
||||
def mute_detach(args, **kwargs):
|
||||
"""
|
||||
Run a muted detached process by forking it.
|
||||
|
@ -198,76 +184,41 @@ def mute_detach(args, **kwargs):
|
|||
:return: process id of the command
|
||||
:rtype: int
|
||||
"""
|
||||
args = split_args(args)
|
||||
args = shlex.split(args)
|
||||
kwargs["preexec_fn"] = _detach_init
|
||||
kwargs["stdout"] = DEVNULL
|
||||
kwargs["stderr"] = subprocess.STDOUT
|
||||
return subprocess.Popen(args, **kwargs).pid
|
||||
kwargs["stderr"] = STDOUT
|
||||
return Popen(args, **kwargs).pid
|
||||
|
||||
|
||||
def cmd(args, wait=True):
|
||||
def cmd(args, env=None, cwd=None, wait=True, shell=False):
|
||||
"""
|
||||
Runs a command on and returns the exit status.
|
||||
Execute a command on the host and return a tuple containing the exit status and
|
||||
result string. stderr output is folded into the stdout result string.
|
||||
|
||||
:param list[str]|str args: command arguments
|
||||
:param bool wait: wait for command to end or not
|
||||
:return: command status
|
||||
:rtype: int
|
||||
"""
|
||||
args = split_args(args)
|
||||
logging.debug("command: %s", args)
|
||||
try:
|
||||
p = subprocess.Popen(args)
|
||||
if not wait:
|
||||
return 0
|
||||
return p.wait()
|
||||
except OSError:
|
||||
raise CoreCommandError(-1, args)
|
||||
|
||||
|
||||
def cmd_output(args):
|
||||
"""
|
||||
Execute a command on the host and return a tuple containing the exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
|
||||
:param list[str]|str args: command arguments
|
||||
:return: command status and stdout
|
||||
:rtype: tuple[int, str]
|
||||
:raises CoreCommandError: when the file to execute is not found
|
||||
"""
|
||||
args = split_args(args)
|
||||
logging.debug("command: %s", args)
|
||||
try:
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
stdout, _ = p.communicate()
|
||||
status = p.wait()
|
||||
return status, stdout.decode("utf-8").strip()
|
||||
except OSError:
|
||||
raise CoreCommandError(-1, args)
|
||||
|
||||
|
||||
def check_cmd(args, **kwargs):
|
||||
"""
|
||||
Execute a command on the host and return a tuple containing the exit status and result string. stderr output
|
||||
is folded into the stdout result string.
|
||||
|
||||
:param list[str]|str args: command arguments
|
||||
:param dict kwargs: keyword arguments to pass to subprocess.Popen
|
||||
:param str args: command arguments
|
||||
:param dict env: environment to run command with
|
||||
:param str cwd: directory to run command in
|
||||
:param bool wait: True to wait for status, False otherwise
|
||||
:param bool shell: True to use shell, False otherwise
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when there is a non-zero exit status or the file to execute is not found
|
||||
:raises CoreCommandError: when there is a non-zero exit status or the file to
|
||||
execute is not found
|
||||
"""
|
||||
kwargs["stdout"] = subprocess.PIPE
|
||||
kwargs["stderr"] = subprocess.STDOUT
|
||||
args = split_args(args)
|
||||
logging.debug("command: %s", args)
|
||||
logging.info("command cwd(%s) wait(%s): %s", cwd, wait, args)
|
||||
if shell is False:
|
||||
args = shlex.split(args)
|
||||
try:
|
||||
p = subprocess.Popen(args, **kwargs)
|
||||
stdout, _ = p.communicate()
|
||||
status = p.wait()
|
||||
if status != 0:
|
||||
raise CoreCommandError(status, args, stdout)
|
||||
return stdout.decode("utf-8").strip()
|
||||
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env, cwd=cwd, shell=shell)
|
||||
if wait:
|
||||
stdout, stderr = p.communicate()
|
||||
status = p.wait()
|
||||
if status != 0:
|
||||
raise CoreCommandError(status, args, stdout, stderr)
|
||||
return stdout.decode("utf-8").strip()
|
||||
else:
|
||||
return ""
|
||||
except OSError:
|
||||
raise CoreCommandError(-1, args)
|
||||
|
||||
|
@ -289,12 +240,13 @@ def hex_dump(s, bytes_per_word=2, words_per_line=8):
|
|||
line = s[:total_bytes]
|
||||
s = s[total_bytes:]
|
||||
tmp = map(
|
||||
lambda x: ("%02x" * bytes_per_word) % x,
|
||||
lambda x: (f"{bytes_per_word:02x}" * bytes_per_word) % x,
|
||||
zip(*[iter(map(ord, line))] * bytes_per_word),
|
||||
)
|
||||
if len(line) % 2:
|
||||
tmp.append("%x" % ord(line[-1]))
|
||||
dump += "0x%08x: %s\n" % (count, " ".join(tmp))
|
||||
tmp.append(f"{ord(line[-1]):x}")
|
||||
tmp = " ".join(tmp)
|
||||
dump += f"0x{count:08x}: {tmp}\n"
|
||||
count += len(line)
|
||||
return dump[:-1]
|
||||
|
||||
|
@ -312,9 +264,9 @@ def file_munge(pathname, header, text):
|
|||
file_demunge(pathname, header)
|
||||
|
||||
with open(pathname, "a") as append_file:
|
||||
append_file.write("# BEGIN %s\n" % header)
|
||||
append_file.write(f"# BEGIN {header}\n")
|
||||
append_file.write(text)
|
||||
append_file.write("# END %s\n" % header)
|
||||
append_file.write(f"# END {header}\n")
|
||||
|
||||
|
||||
def file_demunge(pathname, header):
|
||||
|
@ -332,9 +284,9 @@ def file_demunge(pathname, header):
|
|||
end = None
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if line == "# BEGIN %s\n" % header:
|
||||
if line == f"# BEGIN {header}\n":
|
||||
start = i
|
||||
elif line == "# END %s\n" % header:
|
||||
elif line == f"# END {header}\n":
|
||||
end = i + 1
|
||||
|
||||
if start is None or end is None:
|
||||
|
@ -350,13 +302,13 @@ def expand_corepath(pathname, session=None, node=None):
|
|||
Expand a file path given session information.
|
||||
|
||||
:param str pathname: file path to expand
|
||||
:param core.emulator.session.Session session: core session object to expand path with
|
||||
:param core.emulator.session.Session session: core session object to expand path
|
||||
:param core.nodes.base.CoreNode node: node to expand path with
|
||||
:return: expanded path
|
||||
:rtype: str
|
||||
"""
|
||||
if session is not None:
|
||||
pathname = pathname.replace("~", "/home/%s" % session.user)
|
||||
pathname = pathname.replace("~", f"/home/{session.user}")
|
||||
pathname = pathname.replace("%SESSION%", str(session.id))
|
||||
pathname = pathname.replace("%SESSION_DIR%", session.session_dir)
|
||||
pathname = pathname.replace("%SESSION_USER%", session.user)
|
||||
|
@ -383,7 +335,8 @@ def sysctl_devname(devname):
|
|||
|
||||
def load_config(filename, d):
|
||||
"""
|
||||
Read key=value pairs from a file, into a dict. Skip comments; strip newline characters and spacing.
|
||||
Read key=value pairs from a file, into a dict. Skip comments; strip newline
|
||||
characters and spacing.
|
||||
|
||||
:param str filename: file to read into a dictionary
|
||||
:param dict d: dictionary to read file into
|
||||
|
@ -414,7 +367,7 @@ def load_classes(path, clazz):
|
|||
# validate path exists
|
||||
logging.debug("attempting to load modules from path: %s", path)
|
||||
if not os.path.isdir(path):
|
||||
logging.warning("invalid custom module directory specified" ": %s" % path)
|
||||
logging.warning("invalid custom module directory specified" ": %s", path)
|
||||
# check if path is in sys.path
|
||||
parent_path = os.path.dirname(path)
|
||||
if parent_path not in sys.path:
|
||||
|
@ -430,7 +383,7 @@ def load_classes(path, clazz):
|
|||
# import and add all service modules in the path
|
||||
classes = []
|
||||
for module_name in module_names:
|
||||
import_statement = "%s.%s" % (base_module, module_name)
|
||||
import_statement = f"{base_module}.{module_name}"
|
||||
logging.debug("importing custom module: %s", import_statement)
|
||||
try:
|
||||
module = importlib.import_module(import_statement)
|
||||
|
@ -444,3 +397,15 @@ def load_classes(path, clazz):
|
|||
)
|
||||
|
||||
return classes
|
||||
|
||||
|
||||
def load_logging_config(config_path):
|
||||
"""
|
||||
Load CORE logging configuration file.
|
||||
|
||||
:param str config_path: path to logging config file
|
||||
:return: nothing
|
||||
"""
|
||||
with open(config_path, "r") as log_config_file:
|
||||
log_config = json.load(log_config_file)
|
||||
logging.config.dictConfig(log_config)
|
||||
|
|
|
@ -4,7 +4,7 @@ from lxml import etree
|
|||
|
||||
import core.nodes.base
|
||||
import core.nodes.physical
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.emudata import InterfaceData, LinkOptions, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.nodes.base import CoreNetworkBase
|
||||
|
@ -458,7 +458,7 @@ class CoreXmlWriter(object):
|
|||
interface_name = node_interface.name
|
||||
|
||||
# check if emane interface
|
||||
if isinstance(node_interface.net, EmaneNode):
|
||||
if isinstance(node_interface.net, EmaneNet):
|
||||
nem = node_interface.net.getnemid(node_interface)
|
||||
add_attribute(interface, "nem", nem)
|
||||
|
||||
|
@ -600,7 +600,7 @@ class CoreXmlReader(object):
|
|||
name = hook.get("name")
|
||||
state = hook.get("state")
|
||||
data = hook.text
|
||||
hook_type = "hook:%s" % state
|
||||
hook_type = f"hook:{state}"
|
||||
logging.info("reading hook: state(%s) name(%s)", state, name)
|
||||
self.session.set_hook(
|
||||
hook_type, file_name=name, source_name=None, data=data
|
||||
|
|
|
@ -3,8 +3,9 @@ import socket
|
|||
|
||||
from lxml import etree
|
||||
|
||||
from core import constants, utils
|
||||
from core.emane.nodes import EmaneNode
|
||||
from core import utils
|
||||
from core.constants import IP_BIN
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.nodes import ipaddress
|
||||
from core.nodes.base import CoreNodeBase
|
||||
|
||||
|
@ -30,20 +31,20 @@ def add_emane_interface(host_element, netif, platform_name="p1", transport_name=
|
|||
host_id = host_element.get("id")
|
||||
|
||||
# platform data
|
||||
platform_id = "%s/%s" % (host_id, platform_name)
|
||||
platform_id = f"{host_id}/{platform_name}"
|
||||
platform_element = etree.SubElement(
|
||||
host_element, "emanePlatform", id=platform_id, name=platform_name
|
||||
)
|
||||
|
||||
# transport data
|
||||
transport_id = "%s/%s" % (host_id, transport_name)
|
||||
transport_id = f"{host_id}/{transport_name}"
|
||||
etree.SubElement(
|
||||
platform_element, "transport", id=transport_id, name=transport_name
|
||||
)
|
||||
|
||||
# nem data
|
||||
nem_name = "nem%s" % nem_id
|
||||
nem_element_id = "%s/%s" % (host_id, nem_name)
|
||||
nem_name = f"nem{nem_id}"
|
||||
nem_element_id = f"{host_id}/{nem_name}"
|
||||
nem_element = etree.SubElement(
|
||||
platform_element, "nem", id=nem_element_id, name=nem_name
|
||||
)
|
||||
|
@ -67,8 +68,8 @@ def get_address_type(address):
|
|||
def get_ipv4_addresses(hostname):
|
||||
if hostname == "localhost":
|
||||
addresses = []
|
||||
args = [constants.IP_BIN, "-o", "-f", "inet", "addr", "show"]
|
||||
output = utils.check_cmd(args)
|
||||
args = f"{IP_BIN} -o -f inet address show"
|
||||
output = utils.cmd(args)
|
||||
for line in output.split(os.linesep):
|
||||
split = line.split()
|
||||
if not split:
|
||||
|
@ -93,23 +94,18 @@ class CoreXmlDeployment(object):
|
|||
self.add_deployment()
|
||||
|
||||
def find_device(self, name):
|
||||
device = self.scenario.find("devices/device[@name='%s']" % name)
|
||||
device = self.scenario.find(f"devices/device[@name='{name}']")
|
||||
return device
|
||||
|
||||
def find_interface(self, device, name):
|
||||
interface = self.scenario.find(
|
||||
"devices/device[@name='%s']/interfaces/interface[@name='%s']"
|
||||
% (device.name, name)
|
||||
f"devices/device[@name='{device.name}']/interfaces/interface[@name='{name}']"
|
||||
)
|
||||
return interface
|
||||
|
||||
def add_deployment(self):
|
||||
physical_host = self.add_physical_host(socket.gethostname())
|
||||
|
||||
# TODO: handle other servers
|
||||
# servers = self.session.broker.getservernames()
|
||||
# servers.remove("localhost")
|
||||
|
||||
for node_id in self.session.nodes:
|
||||
node = self.session.nodes[node_id]
|
||||
if isinstance(node, CoreNodeBase):
|
||||
|
@ -117,7 +113,8 @@ class CoreXmlDeployment(object):
|
|||
|
||||
def add_physical_host(self, name):
|
||||
# add host
|
||||
host_id = "%s/%s" % (self.root.get("id"), name)
|
||||
root_id = self.root.get("id")
|
||||
host_id = f"{root_id}/{name}"
|
||||
host_element = etree.SubElement(self.root, "testHost", id=host_id, name=name)
|
||||
|
||||
# add type element
|
||||
|
@ -131,10 +128,11 @@ class CoreXmlDeployment(object):
|
|||
|
||||
def add_virtual_host(self, physical_host, node):
|
||||
if not isinstance(node, CoreNodeBase):
|
||||
raise TypeError("invalid node type: %s" % node)
|
||||
raise TypeError(f"invalid node type: {node}")
|
||||
|
||||
# create virtual host element
|
||||
host_id = "%s/%s" % (physical_host.get("id"), node.name)
|
||||
phys_id = physical_host.get("id")
|
||||
host_id = f"{phys_id}/{node.name}"
|
||||
host_element = etree.SubElement(
|
||||
physical_host, "testHost", id=host_id, name=node.name
|
||||
)
|
||||
|
@ -144,7 +142,7 @@ class CoreXmlDeployment(object):
|
|||
|
||||
for netif in node.netifs():
|
||||
emane_element = None
|
||||
if isinstance(netif.net, EmaneNode):
|
||||
if isinstance(netif.net, EmaneNet):
|
||||
emane_element = add_emane_interface(host_element, netif)
|
||||
|
||||
parent_element = host_element
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import logging
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
from lxml import etree
|
||||
|
||||
|
@ -44,20 +45,28 @@ def _value_to_params(value):
|
|||
return None
|
||||
|
||||
|
||||
def create_file(xml_element, doc_name, file_path):
|
||||
def create_file(xml_element, doc_name, file_path, server=None):
|
||||
"""
|
||||
Create xml file.
|
||||
|
||||
:param lxml.etree.Element xml_element: root element to write to file
|
||||
:param str doc_name: name to use in the emane doctype
|
||||
:param str file_path: file path to write xml file to
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:return: nothing
|
||||
"""
|
||||
doctype = (
|
||||
'<!DOCTYPE %(doc_name)s SYSTEM "file:///usr/share/emane/dtd/%(doc_name)s.dtd">'
|
||||
% {"doc_name": doc_name}
|
||||
f'<!DOCTYPE {doc_name} SYSTEM "file:///usr/share/emane/dtd/{doc_name}.dtd">'
|
||||
)
|
||||
corexml.write_xml_file(xml_element, file_path, doctype=doctype)
|
||||
if server is not None:
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
create_file(xml_element, doc_name, temp.name)
|
||||
temp.close()
|
||||
server.remote_put(temp.name, file_path)
|
||||
os.unlink(temp.name)
|
||||
else:
|
||||
corexml.write_xml_file(xml_element, file_path, doctype=doctype)
|
||||
|
||||
|
||||
def add_param(xml_element, name, value):
|
||||
|
@ -103,9 +112,11 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
|
|||
"""
|
||||
Create platform xml for a specific node.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations
|
||||
:param core.nodes.network.CtrlNet control_net: control net node for this emane network
|
||||
:param core.emane.nodes.EmaneNode node: node to write platform xml for
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane
|
||||
configurations
|
||||
:param core.nodes.network.CtrlNet control_net: control net node for this emane
|
||||
network
|
||||
:param core.emane.nodes.EmaneNet node: node to write platform xml for
|
||||
:param int nem_id: nem id to use for interfaces for this node
|
||||
:param dict platform_xmls: stores platform xml elements to append nem entries to
|
||||
:return: the next nem id that can be used for creating platform xml files
|
||||
|
@ -120,7 +131,7 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
|
|||
nem_entries = {}
|
||||
|
||||
if node.model is None:
|
||||
logging.warning("warning: EmaneNode %s has no associated model", node.name)
|
||||
logging.warning("warning: EMANE network %s has no associated model", node.name)
|
||||
return nem_entries
|
||||
|
||||
for netif in node.netifs():
|
||||
|
@ -133,7 +144,8 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
|
|||
"nem", id=str(nem_id), name=netif.localname, definition=nem_definition
|
||||
)
|
||||
|
||||
# check if this is an external transport, get default config if an interface specific one does not exist
|
||||
# check if this is an external transport, get default config if an interface
|
||||
# specific one does not exist
|
||||
config = emane_manager.getifcconfig(node.model.id, netif, node.model.name)
|
||||
|
||||
if is_external(config):
|
||||
|
@ -195,23 +207,24 @@ def build_node_platform_xml(emane_manager, control_net, node, nem_id, platform_x
|
|||
|
||||
node.setnemid(netif, nem_id)
|
||||
macstr = _hwaddr_prefix + ":00:00:"
|
||||
macstr += "%02X:%02X" % ((nem_id >> 8) & 0xFF, nem_id & 0xFF)
|
||||
macstr += f"{(nem_id >> 8) & 0xFF:02X}:{nem_id & 0xFF:02X}"
|
||||
netif.sethwaddr(MacAddress.from_string(macstr))
|
||||
|
||||
# increment nem id
|
||||
nem_id += 1
|
||||
|
||||
doc_name = "platform"
|
||||
for key in sorted(platform_xmls.keys()):
|
||||
platform_element = platform_xmls[key]
|
||||
if key == "host":
|
||||
file_name = "platform.xml"
|
||||
file_path = os.path.join(emane_manager.session.session_dir, file_name)
|
||||
create_file(platform_element, doc_name, file_path)
|
||||
else:
|
||||
file_name = "platform%d.xml" % key
|
||||
|
||||
platform_element = platform_xmls[key]
|
||||
|
||||
doc_name = "platform"
|
||||
file_path = os.path.join(emane_manager.session.session_dir, file_name)
|
||||
create_file(platform_element, doc_name, file_path)
|
||||
file_name = f"platform{key}.xml"
|
||||
file_path = os.path.join(emane_manager.session.session_dir, file_name)
|
||||
linked_node = emane_manager.session.nodes[key]
|
||||
create_file(platform_element, doc_name, file_path, linked_node.server)
|
||||
|
||||
return nem_id
|
||||
|
||||
|
@ -220,8 +233,9 @@ def build_xml_files(emane_manager, node):
|
|||
"""
|
||||
Generate emane xml files required for node.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations
|
||||
:param core.emane.nodes.EmaneNode node: node to write platform xml for
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane
|
||||
configurations
|
||||
:param core.emane.nodes.EmaneNet node: node to write platform xml for
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug("building all emane xml for node(%s): %s", node, node.name)
|
||||
|
@ -233,7 +247,7 @@ def build_xml_files(emane_manager, node):
|
|||
if not config:
|
||||
return
|
||||
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
# build XML for overall network EMANE configs
|
||||
node.model.build_xml_files(config)
|
||||
|
||||
# build XML for specific interface (NEM) configs
|
||||
|
@ -243,7 +257,7 @@ def build_xml_files(emane_manager, node):
|
|||
rtype = "raw"
|
||||
|
||||
for netif in node.netifs():
|
||||
# check for interface specific emane configuration and write xml files, if needed
|
||||
# check for interface specific emane configuration and write xml files
|
||||
config = emane_manager.getifcconfig(node.model.id, netif, node.model.name)
|
||||
if config:
|
||||
node.model.build_xml_files(config, netif)
|
||||
|
@ -267,15 +281,16 @@ def build_transport_xml(emane_manager, node, transport_type):
|
|||
"""
|
||||
Build transport xml file for node and transport type.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane configurations
|
||||
:param core.emane.nodes.EmaneNode node: node to write platform xml for
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager with emane
|
||||
configurations
|
||||
:param core.emane.nodes.EmaneNet node: node to write platform xml for
|
||||
:param str transport_type: transport type to build xml for
|
||||
:return: nothing
|
||||
"""
|
||||
transport_element = etree.Element(
|
||||
"transport",
|
||||
name="%s Transport" % transport_type.capitalize(),
|
||||
library="trans%s" % transport_type.lower(),
|
||||
name=f"{transport_type.capitalize()} Transport",
|
||||
library=f"trans{transport_type.lower()}",
|
||||
)
|
||||
|
||||
# add bitrate
|
||||
|
@ -298,18 +313,23 @@ def build_transport_xml(emane_manager, node, transport_type):
|
|||
file_name = transport_file_name(node.id, transport_type)
|
||||
file_path = os.path.join(emane_manager.session.session_dir, file_name)
|
||||
create_file(transport_element, doc_name, file_path)
|
||||
emane_manager.session.distributed.execute(
|
||||
lambda x: create_file(transport_element, doc_name, file_path, x)
|
||||
)
|
||||
|
||||
|
||||
def create_phy_xml(emane_model, config, file_path):
|
||||
def create_phy_xml(emane_model, config, file_path, server):
|
||||
"""
|
||||
Create the phy xml document.
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create xml
|
||||
:param dict config: all current configuration values
|
||||
:param str file_path: path to write file to
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:return: nothing
|
||||
"""
|
||||
phy_element = etree.Element("phy", name="%s PHY" % emane_model.name)
|
||||
phy_element = etree.Element("phy", name=f"{emane_model.name} PHY")
|
||||
if emane_model.phy_library:
|
||||
phy_element.set("library", emane_model.phy_library)
|
||||
|
||||
|
@ -317,54 +337,84 @@ def create_phy_xml(emane_model, config, file_path):
|
|||
phy_element, emane_model.phy_config, config, emane_model.config_ignore
|
||||
)
|
||||
create_file(phy_element, "phy", file_path)
|
||||
if server is not None:
|
||||
create_file(phy_element, "phy", file_path, server)
|
||||
else:
|
||||
create_file(phy_element, "phy", file_path)
|
||||
emane_model.session.distributed.execute(
|
||||
lambda x: create_file(phy_element, "phy", file_path, x)
|
||||
)
|
||||
|
||||
|
||||
def create_mac_xml(emane_model, config, file_path):
|
||||
def create_mac_xml(emane_model, config, file_path, server):
|
||||
"""
|
||||
Create the mac xml document.
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create xml
|
||||
:param dict config: all current configuration values
|
||||
:param str file_path: path to write file to
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:return: nothing
|
||||
"""
|
||||
if not emane_model.mac_library:
|
||||
raise ValueError("must define emane model library")
|
||||
|
||||
mac_element = etree.Element(
|
||||
"mac", name="%s MAC" % emane_model.name, library=emane_model.mac_library
|
||||
"mac", name=f"{emane_model.name} MAC", library=emane_model.mac_library
|
||||
)
|
||||
add_configurations(
|
||||
mac_element, emane_model.mac_config, config, emane_model.config_ignore
|
||||
)
|
||||
create_file(mac_element, "mac", file_path)
|
||||
if server is not None:
|
||||
create_file(mac_element, "mac", file_path, server)
|
||||
else:
|
||||
create_file(mac_element, "mac", file_path)
|
||||
emane_model.session.distributed.execute(
|
||||
lambda x: create_file(mac_element, "mac", file_path, x)
|
||||
)
|
||||
|
||||
|
||||
def create_nem_xml(
|
||||
emane_model, config, nem_file, transport_definition, mac_definition, phy_definition
|
||||
emane_model,
|
||||
config,
|
||||
nem_file,
|
||||
transport_definition,
|
||||
mac_definition,
|
||||
phy_definition,
|
||||
server,
|
||||
):
|
||||
"""
|
||||
Create the nem xml document.
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create xml
|
||||
:param dict config: all current configuration values
|
||||
:param str nem_file: nem file path to write
|
||||
:param str transport_definition: transport file definition path
|
||||
:param str mac_definition: mac file definition path
|
||||
:param str phy_definition: phy file definition path
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:return: nothing
|
||||
"""
|
||||
nem_element = etree.Element("nem", name="%s NEM" % emane_model.name)
|
||||
nem_element = etree.Element("nem", name=f"{emane_model.name} NEM")
|
||||
if is_external(config):
|
||||
nem_element.set("type", "unstructured")
|
||||
else:
|
||||
etree.SubElement(nem_element, "transport", definition=transport_definition)
|
||||
etree.SubElement(nem_element, "mac", definition=mac_definition)
|
||||
etree.SubElement(nem_element, "phy", definition=phy_definition)
|
||||
create_file(nem_element, "nem", nem_file)
|
||||
if server is not None:
|
||||
create_file(nem_element, "nem", nem_file, server)
|
||||
else:
|
||||
create_file(nem_element, "nem", nem_file)
|
||||
emane_model.session.distributed.execute(
|
||||
lambda x: create_file(nem_element, "nem", nem_file, x)
|
||||
)
|
||||
|
||||
|
||||
def create_event_service_xml(group, port, device, file_directory):
|
||||
def create_event_service_xml(group, port, device, file_directory, server=None):
|
||||
"""
|
||||
Create a emane event service xml file.
|
||||
|
||||
|
@ -372,6 +422,8 @@ def create_event_service_xml(group, port, device, file_directory):
|
|||
:param str port: event port
|
||||
:param str device: event device
|
||||
:param str file_directory: directory to create file in
|
||||
:param core.emulator.distributed.DistributedServer server: remote server node
|
||||
will run on, default is None for localhost
|
||||
:return: nothing
|
||||
"""
|
||||
event_element = etree.Element("emaneeventmsgsvc")
|
||||
|
@ -386,7 +438,7 @@ def create_event_service_xml(group, port, device, file_directory):
|
|||
sub_element.text = value
|
||||
file_name = "libemaneeventservice.xml"
|
||||
file_path = os.path.join(file_directory, file_name)
|
||||
create_file(event_element, "emaneeventmsgsvc", file_path)
|
||||
create_file(event_element, "emaneeventmsgsvc", file_path, server)
|
||||
|
||||
|
||||
def transport_file_name(node_id, transport_type):
|
||||
|
@ -397,7 +449,7 @@ def transport_file_name(node_id, transport_type):
|
|||
:param str transport_type: transport type to generate transport file
|
||||
:return:
|
||||
"""
|
||||
return "n%strans%s.xml" % (node_id, transport_type)
|
||||
return f"n{node_id}trans{transport_type}.xml"
|
||||
|
||||
|
||||
def _basename(emane_model, interface=None):
|
||||
|
@ -408,21 +460,21 @@ def _basename(emane_model, interface=None):
|
|||
:return: basename used for file creation
|
||||
:rtype: str
|
||||
"""
|
||||
name = "n%s" % emane_model.id
|
||||
name = f"n{emane_model.id}"
|
||||
|
||||
if interface:
|
||||
node_id = interface.node.id
|
||||
if emane_model.session.emane.getifcconfig(node_id, interface, emane_model.name):
|
||||
name = interface.localname.replace(".", "_")
|
||||
|
||||
return "%s%s" % (name, emane_model.name)
|
||||
return f"{name}{emane_model.name}"
|
||||
|
||||
|
||||
def nem_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the NEM XML file, e.g. "n3rfpipenem.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create file
|
||||
:param interface: interface for this model
|
||||
:return: nem xml filename
|
||||
:rtype: str
|
||||
|
@ -431,40 +483,43 @@ def nem_file_name(emane_model, interface=None):
|
|||
append = ""
|
||||
if interface and interface.transport_type == "raw":
|
||||
append = "_raw"
|
||||
return "%snem%s.xml" % (basename, append)
|
||||
return f"{basename}nem{append}.xml"
|
||||
|
||||
|
||||
def shim_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the SHIM XML file, e.g. "commeffectshim.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create file
|
||||
:param interface: interface for this model
|
||||
:return: shim xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sshim.xml" % _basename(emane_model, interface)
|
||||
name = _basename(emane_model, interface)
|
||||
return f"{name}shim.xml"
|
||||
|
||||
|
||||
def mac_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the MAC XML file, e.g. "n3rfpipemac.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create file
|
||||
:param interface: interface for this model
|
||||
:return: mac xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%smac.xml" % _basename(emane_model, interface)
|
||||
name = _basename(emane_model, interface)
|
||||
return f"{name}mac.xml"
|
||||
|
||||
|
||||
def phy_file_name(emane_model, interface=None):
|
||||
"""
|
||||
Return the string name for the PHY XML file, e.g. "n3rfpipephy.xml"
|
||||
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create phy xml for
|
||||
:param core.emane.emanemodel.EmaneModel emane_model: emane model to create file
|
||||
:param interface: interface for this model
|
||||
:return: phy xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sphy.xml" % _basename(emane_model, interface)
|
||||
name = _basename(emane_model, interface)
|
||||
return f"{name}phy.xml"
|
||||
|
|
|
@ -1,14 +1,9 @@
|
|||
# Configuration file for CORE (core-gui, core-daemon)
|
||||
|
||||
### GUI configuration options ###
|
||||
[core-gui]
|
||||
# no options are presently defined; see the ~/.core preferences file
|
||||
|
||||
### core-daemon configuration options ###
|
||||
[core-daemon]
|
||||
xmlfilever = 1.0
|
||||
#distributed_address = 127.0.0.1
|
||||
listenaddr = localhost
|
||||
port = 4038
|
||||
grpcaddress = localhost
|
||||
grpcport = 50051
|
||||
numthreads = 1
|
||||
quagga_bin_search = "/usr/local/bin /usr/bin /usr/lib/quagga"
|
||||
quagga_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/quagga"
|
||||
|
@ -16,15 +11,14 @@ frr_bin_search = "/usr/local/bin /usr/bin /usr/lib/frr"
|
|||
frr_sbin_search = "/usr/local/sbin /usr/sbin /usr/lib/frr"
|
||||
|
||||
# uncomment the following line to load custom services from the specified dir
|
||||
# this may be a comma-separated list, and directory names should be unique
|
||||
# and not named 'services'
|
||||
# this may be a comma-separated list, and directory names should be unique
|
||||
# and not named 'services'
|
||||
#custom_services_dir = /home/username/.core/myservices
|
||||
#
|
||||
|
||||
# uncomment to establish a standalone control backchannel for accessing nodes
|
||||
# (overriden by the session option of the same name)
|
||||
#controlnet = 172.16.0.0/24
|
||||
#
|
||||
#
|
||||
|
||||
# uncomment and edit to establish a distributed control backchannel
|
||||
#controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24
|
||||
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
import argparse
|
||||
|
||||
DEFAULT_NODES = 2
|
||||
DEFAULT_TIME = 10
|
||||
DEFAULT_STEP = 1
|
||||
|
||||
|
||||
def parse_options(name):
|
||||
parser = argparse.ArgumentParser(description="Run %s example" % name)
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--nodes",
|
||||
type=int,
|
||||
default=DEFAULT_NODES,
|
||||
help="number of nodes to create in this example",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--time",
|
||||
type=int,
|
||||
default=DEFAULT_TIME,
|
||||
help="example iperf run time in seconds",
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
# usagestr = "usage: %prog [-h] [options] [args]"
|
||||
# parser = optparse.OptionParser(usage=usagestr)
|
||||
#
|
||||
# parser.add_option("-n", "--nodes", dest="nodes", type=int, default=DEFAULT_NODES,
|
||||
# help="number of nodes to create in this example")
|
||||
#
|
||||
# parser.add_option("-t", "--time", dest="time", type=int, default=DEFAULT_TIME,
|
||||
# help="example iperf run time in seconds")
|
||||
|
||||
# def usage(msg=None, err=0):
|
||||
# print
|
||||
# if msg:
|
||||
# print "%s\n" % msg
|
||||
# parser.print_help()
|
||||
# sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
# options, args = parser.parse_args()
|
||||
|
||||
if options.nodes < 2:
|
||||
parser.error("invalid min number of nodes: %s" % options.nodes)
|
||||
if options.time < 1:
|
||||
parser.error("invalid test time: %s" % options.time)
|
||||
|
||||
return options
|
|
@ -1,44 +0,0 @@
|
|||
import logging
|
||||
import time
|
||||
|
||||
from core.location.event import EventLoop
|
||||
|
||||
|
||||
def main():
|
||||
loop = EventLoop()
|
||||
|
||||
def msg(arg):
|
||||
delta = time.time() - loop.start
|
||||
logging.debug("%s arg: %s", delta, arg)
|
||||
|
||||
def repeat(interval, count):
|
||||
count -= 1
|
||||
msg("repeat: interval: %s; remaining: %s" % (interval, count))
|
||||
if count > 0:
|
||||
loop.add_event(interval, repeat, interval, count)
|
||||
|
||||
def sleep(delay):
|
||||
msg("sleep %s" % delay)
|
||||
time.sleep(delay)
|
||||
msg("sleep done")
|
||||
|
||||
def stop(arg):
|
||||
msg(arg)
|
||||
loop.stop()
|
||||
|
||||
loop.add_event(0, msg, "start")
|
||||
loop.add_event(0, msg, "time zero")
|
||||
|
||||
for delay in 5, 4, 10, -1, 0, 9, 3, 7, 3.14:
|
||||
loop.add_event(delay, msg, "time %s" % delay)
|
||||
|
||||
loop.run()
|
||||
|
||||
loop.add_event(0, repeat, 1, 5)
|
||||
loop.add_event(12, sleep, 10)
|
||||
|
||||
loop.add_event(15.75, stop, "stop time: 15.75")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
86
daemon/examples/grpc/distributed_switch.py
Normal file
86
daemon/examples/grpc/distributed_switch.py
Normal file
|
@ -0,0 +1,86 @@
|
|||
import argparse
|
||||
import logging
|
||||
|
||||
from core.api.grpc import client, core_pb2
|
||||
|
||||
|
||||
def log_event(event):
|
||||
logging.info("event: %s", event)
|
||||
|
||||
|
||||
def main(args):
|
||||
core = client.CoreGrpcClient()
|
||||
|
||||
with core.context_connect():
|
||||
# create session
|
||||
response = core.create_session()
|
||||
session_id = response.session_id
|
||||
logging.info("created session: %s", response)
|
||||
|
||||
# add distributed server
|
||||
server_name = "core2"
|
||||
response = core.add_session_server(session_id, server_name, args.server)
|
||||
logging.info("added session server: %s", response)
|
||||
|
||||
# handle events session may broadcast
|
||||
core.events(session_id, log_event)
|
||||
|
||||
# change session state
|
||||
response = core.set_session_state(
|
||||
session_id, core_pb2.SessionState.CONFIGURATION
|
||||
)
|
||||
logging.info("set session state: %s", response)
|
||||
|
||||
# create switch node
|
||||
switch = core_pb2.Node(type=core_pb2.NodeType.SWITCH)
|
||||
response = core.add_node(session_id, switch)
|
||||
logging.info("created switch: %s", response)
|
||||
switch_id = response.node_id
|
||||
|
||||
# helper to create interfaces
|
||||
interface_helper = client.InterfaceHelper(ip4_prefix="10.83.0.0/16")
|
||||
|
||||
# create node one
|
||||
position = core_pb2.Position(x=100, y=50)
|
||||
node = core_pb2.Node(position=position)
|
||||
response = core.add_node(session_id, node)
|
||||
logging.info("created node one: %s", response)
|
||||
node_one_id = response.node_id
|
||||
|
||||
# create link
|
||||
interface_one = interface_helper.create_interface(node_one_id, 0)
|
||||
response = core.add_link(session_id, node_one_id, switch_id, interface_one)
|
||||
logging.info("created link from node one to switch: %s", response)
|
||||
|
||||
# create node two
|
||||
position = core_pb2.Position(x=200, y=50)
|
||||
node = core_pb2.Node(position=position, server=server_name)
|
||||
response = core.add_node(session_id, node)
|
||||
logging.info("created node two: %s", response)
|
||||
node_two_id = response.node_id
|
||||
|
||||
# create link
|
||||
interface_one = interface_helper.create_interface(node_two_id, 0)
|
||||
response = core.add_link(session_id, node_two_id, switch_id, interface_one)
|
||||
logging.info("created link from node two to switch: %s", response)
|
||||
|
||||
# change session state
|
||||
response = core.set_session_state(
|
||||
session_id, core_pb2.SessionState.INSTANTIATION
|
||||
)
|
||||
logging.info("set session state: %s", response)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
parser = argparse.ArgumentParser(description="Run distributed_switch example")
|
||||
parser.add_argument(
|
||||
"-a",
|
||||
"--address",
|
||||
help="local address that distributed servers will use for gre tunneling",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s", "--server", help="distributed server to use for creating nodes"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
main(args)
|
|
@ -1,5 +1,4 @@
|
|||
import logging
|
||||
from builtins import range
|
||||
|
||||
from core.api.grpc import client, core_pb2
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ from core.emulator.emudata import IpPrefixes, NodeOptions
|
|||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
coreemu = CoreEmu()
|
||||
session = coreemu.create_session()
|
||||
|
@ -14,7 +14,7 @@ if __name__ == "__main__":
|
|||
# create nodes and interfaces
|
||||
try:
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
options = NodeOptions(image="ubuntu")
|
||||
options = NodeOptions(image="ubuntu:18.04")
|
||||
|
||||
# create node one
|
||||
node_one = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
|
|
|
@ -37,7 +37,7 @@ class MyService(CoreService):
|
|||
dependencies = ()
|
||||
dirs = ()
|
||||
configs = ("myservice1.sh", "myservice2.sh")
|
||||
startup = ("sh %s" % configs[0], "sh %s" % configs[1])
|
||||
startup = tuple(f"sh {x}" for x in configs)
|
||||
validate = ()
|
||||
validation_mode = ServiceMode.NON_BLOCKING
|
||||
validation_timer = 5
|
||||
|
@ -81,7 +81,7 @@ class MyService(CoreService):
|
|||
if filename == cls.configs[0]:
|
||||
cfg += "# auto-generated by MyService (sample.py)\n"
|
||||
for ifc in node.netifs():
|
||||
cfg += 'echo "Node %s has interface %s"\n' % (node.name, ifc.name)
|
||||
cfg += f'echo "Node {node.name} has interface {ifc.name}"\n'
|
||||
elif filename == cls.configs[1]:
|
||||
cfg += "echo hello"
|
||||
|
||||
|
|
|
@ -1,212 +0,0 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# A distributed example where CORE API messaging is used to create a session
|
||||
# on a daemon server. The daemon server defaults to 127.0.0.1:4038
|
||||
# to target a remote machine specify "-d <ip address>" parameter, it needs to be
|
||||
# running the daemon with listenaddr=0.0.0.0 in the core.conf file.
|
||||
# This script creates no nodes locally and therefore can be run as an
|
||||
# unprivileged user.
|
||||
|
||||
import datetime
|
||||
import optparse
|
||||
import sys
|
||||
from builtins import range
|
||||
|
||||
import core.nodes.base
|
||||
import core.nodes.network
|
||||
from core.api.tlv import coreapi, dataconversion
|
||||
from core.api.tlv.coreapi import CoreExecuteTlv
|
||||
from core.emulator.enumerations import (
|
||||
CORE_API_PORT,
|
||||
EventTlvs,
|
||||
EventTypes,
|
||||
ExecuteTlvs,
|
||||
LinkTlvs,
|
||||
LinkTypes,
|
||||
MessageFlags,
|
||||
MessageTypes,
|
||||
)
|
||||
from core.emulator.session import Session
|
||||
from core.nodes import ipaddress
|
||||
|
||||
# declare classes for use with Broker
|
||||
|
||||
# node list (count from 1)
|
||||
n = [None]
|
||||
exec_num = 1
|
||||
|
||||
|
||||
def cmd(node, exec_cmd):
|
||||
"""
|
||||
:param node: The node the command should be issued too
|
||||
:param exec_cmd: A string with the command to be run
|
||||
:return: Returns the result of the command
|
||||
"""
|
||||
global exec_num
|
||||
|
||||
# Set up the command api message
|
||||
tlvdata = CoreExecuteTlv.pack(ExecuteTlvs.NODE.value, node.id)
|
||||
tlvdata += CoreExecuteTlv.pack(ExecuteTlvs.NUMBER.value, exec_num)
|
||||
tlvdata += CoreExecuteTlv.pack(ExecuteTlvs.COMMAND.value, exec_cmd)
|
||||
msg = coreapi.CoreExecMessage.pack(
|
||||
MessageFlags.STRING.value | MessageFlags.TEXT.value, tlvdata
|
||||
)
|
||||
node.session.broker.handlerawmsg(msg)
|
||||
exec_num += 1
|
||||
|
||||
# Now wait for the response
|
||||
server = node.session.broker.servers["localhost"]
|
||||
server.sock.settimeout(50.0)
|
||||
|
||||
# receive messages until we get our execute response
|
||||
result = None
|
||||
while True:
|
||||
msghdr = server.sock.recv(coreapi.CoreMessage.header_len)
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(msghdr)
|
||||
msgdata = server.sock.recv(msglen)
|
||||
|
||||
# If we get the right response return the results
|
||||
print("received response message: %s" % MessageTypes(msgtype))
|
||||
if msgtype == MessageTypes.EXECUTE.value:
|
||||
msg = coreapi.CoreExecMessage(msgflags, msghdr, msgdata)
|
||||
result = msg.get_tlv(ExecuteTlvs.RESULT.value)
|
||||
break
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-n] number of nodes [-d] daemon address"
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
parser.set_defaults(numnodes=5, daemon="127.0.0.1:" + str(CORE_API_PORT))
|
||||
|
||||
parser.add_option(
|
||||
"-n", "--numnodes", dest="numnodes", type=int, help="number of nodes"
|
||||
)
|
||||
parser.add_option(
|
||||
"-d",
|
||||
"--daemon-server",
|
||||
dest="daemon",
|
||||
type=str,
|
||||
help="daemon server IP address",
|
||||
)
|
||||
|
||||
def usage(msg=None, err=0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 1:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
if not options.daemon:
|
||||
usage("daemon server IP address (-d) is a required argument")
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: %s\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
|
||||
session = Session(1)
|
||||
server = globals().get("server")
|
||||
if server:
|
||||
server.addsession(session)
|
||||
|
||||
# distributed setup - connect to daemon server
|
||||
daemonport = options.daemon.split(":")
|
||||
daemonip = daemonport[0]
|
||||
|
||||
# Localhost is already set in the session but we change it to be the remote daemon
|
||||
# This stops the remote daemon trying to build a tunnel back which would fail
|
||||
daemon = "localhost"
|
||||
if len(daemonport) > 1:
|
||||
port = int(daemonport[1])
|
||||
else:
|
||||
port = CORE_API_PORT
|
||||
print("connecting to daemon at %s:%d" % (daemon, port))
|
||||
session.broker.addserver(daemon, daemonip, port)
|
||||
|
||||
# Set the local session id to match the port.
|
||||
# Not necessary but seems neater.
|
||||
session.broker.setupserver(daemon)
|
||||
|
||||
# We do not want the recvloop running as we will deal ourselves
|
||||
session.broker.dorecvloop = False
|
||||
|
||||
# Change to configuration state on both machines
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
tlvdata = coreapi.CoreEventTlv.pack(
|
||||
EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value
|
||||
)
|
||||
session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata))
|
||||
|
||||
flags = MessageFlags.ADD.value
|
||||
switch = core.nodes.network.SwitchNode(session=session, name="switch", start=False)
|
||||
switch.setposition(x=80, y=50)
|
||||
switch.server = daemon
|
||||
switch_data = switch.data(flags)
|
||||
switch_message = dataconversion.convert_node(switch_data)
|
||||
session.broker.handlerawmsg(switch_message)
|
||||
|
||||
number_of_nodes = options.numnodes
|
||||
|
||||
print(
|
||||
"creating %d remote nodes with addresses from %s" % (options.numnodes, prefix)
|
||||
)
|
||||
|
||||
# create remote nodes via API
|
||||
for i in range(1, number_of_nodes + 1):
|
||||
node = core.nodes.base.CoreNode(
|
||||
session=session, _id=i, name="n%d" % i, start=False
|
||||
)
|
||||
node.setposition(x=150 * i, y=150)
|
||||
node.server = daemon
|
||||
node_data = node.data(flags)
|
||||
node_message = dataconversion.convert_node(node_data)
|
||||
session.broker.handlerawmsg(node_message)
|
||||
n.append(node)
|
||||
|
||||
# create remote links via API
|
||||
for i in range(1, number_of_nodes + 1):
|
||||
tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.id)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(
|
||||
LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i)
|
||||
)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(
|
||||
LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen
|
||||
)
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
session.broker.handlerawmsg(msg)
|
||||
|
||||
# We change the daemon to Instantiation state
|
||||
# We do not change the local session as it would try and build a tunnel and fail
|
||||
tlvdata = coreapi.CoreEventTlv.pack(
|
||||
EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value
|
||||
)
|
||||
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
|
||||
session.broker.handlerawmsg(msg)
|
||||
|
||||
# Get the ip or last node and ping it from the first
|
||||
print("Pinging from the first to the last node")
|
||||
pingip = cmd(n[-1], "ip -4 -o addr show dev eth0").split()[3].split("/")[0]
|
||||
print(cmd(n[1], "ping -c 5 " + pingip))
|
||||
print("elapsed time: %s" % (datetime.datetime.now() - start))
|
||||
print(
|
||||
"To stop this session, use the core-cleanup script on the remote daemon server."
|
||||
)
|
||||
input("press enter to exit")
|
||||
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
|
@ -1,151 +0,0 @@
|
|||
#!/usr/bin/python -i
|
||||
|
||||
# Copyright (c)2010-2013 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# A distributed example where CORE API messaging is used to create a session
|
||||
# distributed across the local server and one slave server. The slave server
|
||||
# must be specified using the '-s <ip address>' parameter, and needs to be
|
||||
# running the daemon with listenaddr=0.0.0.0 in the core.conf file.
|
||||
#
|
||||
|
||||
import datetime
|
||||
import optparse
|
||||
import sys
|
||||
from builtins import range
|
||||
|
||||
import core.nodes.base
|
||||
import core.nodes.network
|
||||
from core import constants
|
||||
from core.api.tlv import coreapi, dataconversion
|
||||
from core.emulator.enumerations import (
|
||||
CORE_API_PORT,
|
||||
EventTlvs,
|
||||
EventTypes,
|
||||
LinkTlvs,
|
||||
LinkTypes,
|
||||
MessageFlags,
|
||||
)
|
||||
from core.emulator.session import Session
|
||||
from core.nodes import ipaddress
|
||||
|
||||
# node list (count from 1)
|
||||
n = [None]
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
parser.set_defaults(numnodes=5, slave=None)
|
||||
|
||||
parser.add_option(
|
||||
"-n", "--numnodes", dest="numnodes", type=int, help="number of nodes"
|
||||
)
|
||||
parser.add_option(
|
||||
"-s", "--slave-server", dest="slave", type=str, help="slave server IP address"
|
||||
)
|
||||
|
||||
def usage(msg=None, err=0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 1:
|
||||
usage("invalid number of nodes: %s" % options.numnodes)
|
||||
if not options.slave:
|
||||
usage("slave server IP address (-s) is a required argument")
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
|
||||
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
|
||||
session = Session(1)
|
||||
server = globals().get("server")
|
||||
if server is not None:
|
||||
server.addsession(session)
|
||||
|
||||
# distributed setup - connect to slave server
|
||||
slaveport = options.slave.split(":")
|
||||
slave = slaveport[0]
|
||||
if len(slaveport) > 1:
|
||||
port = int(slaveport[1])
|
||||
else:
|
||||
port = CORE_API_PORT
|
||||
print("connecting to slave at %s:%d" % (slave, port))
|
||||
session.broker.addserver(slave, slave, port)
|
||||
session.broker.setupserver(slave)
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
tlvdata = coreapi.CoreEventTlv.pack(
|
||||
EventTlvs.TYPE.value, EventTypes.CONFIGURATION_STATE.value
|
||||
)
|
||||
session.broker.handlerawmsg(coreapi.CoreEventMessage.pack(0, tlvdata))
|
||||
|
||||
switch = session.create_node(cls=core.nodes.network.SwitchNode, name="switch")
|
||||
switch.setposition(x=80, y=50)
|
||||
num_local = options.numnodes / 2
|
||||
num_remote = options.numnodes / 2 + options.numnodes % 2
|
||||
print(
|
||||
"creating %d (%d local / %d remote) nodes with addresses from %s"
|
||||
% (options.numnodes, num_local, num_remote, prefix)
|
||||
)
|
||||
for i in range(1, num_local + 1):
|
||||
node = session.create_node(cls=core.nodes.base.CoreNode, name="n%d" % i, _id=i)
|
||||
node.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
node.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
node.setposition(x=150 * i, y=150)
|
||||
n.append(node)
|
||||
|
||||
flags = MessageFlags.ADD.value
|
||||
session.broker.handlerawmsg(switch.tonodemsg(flags=flags))
|
||||
|
||||
# create remote nodes via API
|
||||
for i in range(num_local + 1, options.numnodes + 1):
|
||||
node = core.nodes.base.CoreNode(
|
||||
session=session, _id=i, name="n%d" % i, start=False
|
||||
)
|
||||
node.setposition(x=150 * i, y=150)
|
||||
node.server = slave
|
||||
n.append(node)
|
||||
node_data = node.data(flags)
|
||||
node_message = dataconversion.convert_node(node_data)
|
||||
session.broker.handlerawmsg(node_message)
|
||||
|
||||
# create remote links via API
|
||||
for i in range(num_local + 1, options.numnodes + 1):
|
||||
tlvdata = coreapi.CoreLinkTlv.pack(LinkTlvs.N1_NUMBER.value, switch.id)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.N2_NUMBER.value, i)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.TYPE.value, LinkTypes.WIRED.value)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(LinkTlvs.INTERFACE2_NUMBER.value, 0)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(
|
||||
LinkTlvs.INTERFACE2_IP4.value, prefix.addr(i)
|
||||
)
|
||||
tlvdata += coreapi.CoreLinkTlv.pack(
|
||||
LinkTlvs.INTERFACE2_IP4_MASK.value, prefix.prefixlen
|
||||
)
|
||||
msg = coreapi.CoreLinkMessage.pack(flags, tlvdata)
|
||||
session.broker.handlerawmsg(msg)
|
||||
|
||||
session.instantiate()
|
||||
tlvdata = coreapi.CoreEventTlv.pack(
|
||||
EventTlvs.TYPE.value, EventTypes.INSTANTIATION_STATE.value
|
||||
)
|
||||
msg = coreapi.CoreEventMessage.pack(0, tlvdata)
|
||||
session.broker.handlerawmsg(msg)
|
||||
|
||||
# start a shell on node 1
|
||||
n[1].client.term("bash")
|
||||
|
||||
print("elapsed time: %s" % (datetime.datetime.now() - start))
|
||||
print("To stop this session, use the 'core-cleanup' script on this server")
|
||||
print("and on the remote slave server.")
|
||||
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
||||
main()
|
|
@ -1,247 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2010-2012 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
#
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
|
||||
"""
|
||||
howmanynodes.py - This is a CORE script that creates network namespace nodes
|
||||
having one virtual Ethernet interface connected to a bridge. It continues to
|
||||
add nodes until an exception occurs. The number of nodes per bridge can be
|
||||
specified.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import optparse
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
|
||||
import core.nodes.base
|
||||
import core.nodes.network
|
||||
from core import constants
|
||||
from core.emulator.session import Session
|
||||
from core.nodes import ipaddress
|
||||
|
||||
GBD = 1024.0 * 1024.0
|
||||
|
||||
|
||||
def linuxversion():
|
||||
""" Return a string having the Linux kernel version.
|
||||
"""
|
||||
f = open("/proc/version", "r")
|
||||
v = f.readline().split()
|
||||
version_str = " ".join(v[:3])
|
||||
f.close()
|
||||
return version_str
|
||||
|
||||
|
||||
MEMKEYS = ("total", "free", "buff", "cached", "stotal", "sfree")
|
||||
|
||||
|
||||
def memfree():
|
||||
""" Returns kilobytes memory [total, free, buff, cached, stotal, sfree].
|
||||
useful stats are:
|
||||
free memory = free + buff + cached
|
||||
swap used = stotal - sfree
|
||||
"""
|
||||
f = open("/proc/meminfo", "r")
|
||||
lines = f.readlines()
|
||||
f.close()
|
||||
kbs = {}
|
||||
for k in MEMKEYS:
|
||||
kbs[k] = 0
|
||||
for l in lines:
|
||||
if l[:9] == "MemTotal:":
|
||||
kbs["total"] = int(l.split()[1])
|
||||
elif l[:8] == "MemFree:":
|
||||
kbs["free"] = int(l.split()[1])
|
||||
elif l[:8] == "Buffers:":
|
||||
kbs["buff"] = int(l.split()[1])
|
||||
elif l[:8] == "Cached:":
|
||||
kbs["cache"] = int(l.split()[1])
|
||||
elif l[:10] == "SwapTotal:":
|
||||
kbs["stotal"] = int(l.split()[1])
|
||||
elif l[:9] == "SwapFree:":
|
||||
kbs["sfree"] = int(l.split()[1])
|
||||
break
|
||||
return kbs
|
||||
|
||||
|
||||
# node list (count from 1)
|
||||
nodelist = [None]
|
||||
switchlist = []
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
parser.set_defaults(
|
||||
waittime=0.2, numnodes=0, bridges=0, retries=0, logfile=None, services=None
|
||||
)
|
||||
|
||||
parser.add_option(
|
||||
"-w",
|
||||
"--waittime",
|
||||
dest="waittime",
|
||||
type=float,
|
||||
help="number of seconds to wait between node creation"
|
||||
" (default = %s)" % parser.defaults["waittime"],
|
||||
)
|
||||
parser.add_option(
|
||||
"-n",
|
||||
"--numnodes",
|
||||
dest="numnodes",
|
||||
type=int,
|
||||
help="number of nodes (default = unlimited)",
|
||||
)
|
||||
parser.add_option(
|
||||
"-b",
|
||||
"--bridges",
|
||||
dest="bridges",
|
||||
type=int,
|
||||
help="number of nodes per bridge; 0 = one bridge "
|
||||
"(def. = %s)" % parser.defaults["bridges"],
|
||||
)
|
||||
parser.add_option(
|
||||
"-r",
|
||||
"--retry",
|
||||
dest="retries",
|
||||
type=int,
|
||||
help="number of retries on error (default = %s)" % parser.defaults["retries"],
|
||||
)
|
||||
parser.add_option(
|
||||
"-l",
|
||||
"--log",
|
||||
dest="logfile",
|
||||
type=str,
|
||||
help="log memory usage to this file (default = %s)"
|
||||
% parser.defaults["logfile"],
|
||||
)
|
||||
parser.add_option(
|
||||
"-s",
|
||||
"--services",
|
||||
dest="services",
|
||||
type=str,
|
||||
help="pipe-delimited list of services added to each "
|
||||
"node (default = %s)\n(Example: zebra|OSPFv2|OSPFv3|"
|
||||
"IPForward)" % parser.defaults["services"],
|
||||
)
|
||||
|
||||
def usage(msg=None, err=0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: %s\n" % a)
|
||||
|
||||
start = datetime.datetime.now()
|
||||
prefix = ipaddress.Ipv4Prefix("10.83.0.0/16")
|
||||
|
||||
print("Testing how many network namespace nodes this machine can create.")
|
||||
print(" - %s" % linuxversion())
|
||||
mem = memfree()
|
||||
print(
|
||||
" - %.02f GB total memory (%.02f GB swap)"
|
||||
% (mem["total"] / GBD, mem["stotal"] / GBD)
|
||||
)
|
||||
print(" - using IPv4 network prefix %s" % prefix)
|
||||
print(" - using wait time of %s" % options.waittime)
|
||||
print(" - using %d nodes per bridge" % options.bridges)
|
||||
print(" - will retry %d times on failure" % options.retries)
|
||||
print(" - adding these services to each node: %s" % options.services)
|
||||
print(" ")
|
||||
|
||||
lfp = None
|
||||
if options.logfile is not None:
|
||||
# initialize a csv log file header
|
||||
lfp = open(options.logfile, "a")
|
||||
lfp.write("# log from howmanynodes.py %s\n" % time.ctime())
|
||||
lfp.write("# options = %s\n#\n" % options)
|
||||
lfp.write("# numnodes,%s\n" % ",".join(MEMKEYS))
|
||||
lfp.flush()
|
||||
|
||||
session = Session(1)
|
||||
switch = session.create_node(cls=core.nodes.network.SwitchNode)
|
||||
switchlist.append(switch)
|
||||
print("Added bridge %s (%d)." % (switch.brname, len(switchlist)))
|
||||
|
||||
i = 0
|
||||
retry_count = options.retries
|
||||
while True:
|
||||
i += 1
|
||||
# optionally add a bridge (options.bridges nodes per bridge)
|
||||
try:
|
||||
if 0 < options.bridges <= switch.numnetif():
|
||||
switch = session.create_node(cls=core.nodes.network.SwitchNode)
|
||||
switchlist.append(switch)
|
||||
print(
|
||||
"\nAdded bridge %s (%d) for node %d."
|
||||
% (switch.brname, len(switchlist), i)
|
||||
)
|
||||
except Exception as e:
|
||||
print(
|
||||
"At %d bridges (%d nodes) caught exception:\n%s\n"
|
||||
% (len(switchlist), i - 1, e)
|
||||
)
|
||||
break
|
||||
|
||||
# create a node
|
||||
try:
|
||||
n = session.create_node(cls=core.nodes.base.CoreNode, name="n%d" % i)
|
||||
n.newnetif(switch, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
|
||||
n.cmd([constants.SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
|
||||
if options.services is not None:
|
||||
session.services.add_services(n, "", options.services.split("|"))
|
||||
session.services.boot_services(n)
|
||||
nodelist.append(n)
|
||||
if i % 25 == 0:
|
||||
print("\n%s nodes created " % i)
|
||||
mem = memfree()
|
||||
free = mem["free"] + mem["buff"] + mem["cached"]
|
||||
swap = mem["stotal"] - mem["sfree"]
|
||||
print("(%.02f/%.02f GB free/swap)" % (free / GBD, swap / GBD))
|
||||
if lfp:
|
||||
lfp.write("%d," % i)
|
||||
lfp.write("%s\n" % ",".join(str(mem[x]) for x in MEMKEYS))
|
||||
lfp.flush()
|
||||
else:
|
||||
sys.stdout.write(".")
|
||||
sys.stdout.flush()
|
||||
time.sleep(options.waittime)
|
||||
except Exception as e:
|
||||
print("At %d nodes caught exception:\n" % i, e)
|
||||
if retry_count > 0:
|
||||
print("\nWill retry creating node %d." % i)
|
||||
shutil.rmtree(n.nodedir, ignore_errors=True)
|
||||
retry_count -= 1
|
||||
i -= 1
|
||||
time.sleep(options.waittime)
|
||||
continue
|
||||
else:
|
||||
print("Stopping at %d nodes!" % i)
|
||||
break
|
||||
|
||||
if i == options.numnodes:
|
||||
print("Stopping at %d nodes due to numnodes option." % i)
|
||||
break
|
||||
# node creation was successful at this point
|
||||
retry_count = options.retries
|
||||
|
||||
if lfp:
|
||||
lfp.flush()
|
||||
lfp.close()
|
||||
|
||||
print("elapsed time: %s" % (datetime.datetime.now() - start))
|
||||
print("Use the core-cleanup script to remove nodes and bridges.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,637 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c)2011-2014 the Boeing Company.
|
||||
# See the LICENSE file included in this distribution.
|
||||
|
||||
# create a random topology running OSPFv3 MDR, wait and then check
|
||||
# that all neighbor states are either full or two-way, and check the routes
|
||||
# in zebra vs those installed in the kernel.
|
||||
|
||||
import datetime
|
||||
import optparse
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from builtins import range
|
||||
from string import Template
|
||||
|
||||
import core.nodes.base
|
||||
import core.nodes.network
|
||||
from core.constants import QUAGGA_STATE_DIR
|
||||
from core.emulator.session import Session
|
||||
from core.nodes import ipaddress
|
||||
from core.utils import check_cmd
|
||||
|
||||
quagga_sbin_search = ("/usr/local/sbin", "/usr/sbin", "/usr/lib/quagga")
|
||||
quagga_path = "zebra"
|
||||
|
||||
# sanity check that zebra is installed
|
||||
try:
|
||||
for p in quagga_sbin_search:
|
||||
if os.path.exists(os.path.join(p, "zebra")):
|
||||
quagga_path = p
|
||||
break
|
||||
check_cmd([os.path.join(quagga_path, "zebra"), "-u", "root", "-g", "root", "-v"])
|
||||
except OSError:
|
||||
sys.stderr.write("ERROR: running zebra failed\n")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class ManetNode(core.nodes.base.CoreNode):
|
||||
""" An Lxc namespace node configured for Quagga OSPFv3 MANET MDR
|
||||
"""
|
||||
|
||||
conftemp = Template(
|
||||
"""\
|
||||
interface eth0
|
||||
ip address $ipaddr
|
||||
ipv6 ospf6 instance-id 65
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network manet-designated-router
|
||||
ipv6 ospf6 diffhellos
|
||||
ipv6 ospf6 adjacencyconnectivity biconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
!
|
||||
router ospf6
|
||||
router-id $routerid
|
||||
interface eth0 area 0.0.0.0
|
||||
!
|
||||
ip forwarding
|
||||
"""
|
||||
)
|
||||
|
||||
confdir = "/usr/local/etc/quagga"
|
||||
|
||||
def __init__(self, core, ipaddr, routerid=None, _id=None, name=None, nodedir=None):
|
||||
if routerid is None:
|
||||
routerid = ipaddr.split("/")[0]
|
||||
self.ipaddr = ipaddr
|
||||
self.routerid = routerid
|
||||
core.nodes.base.CoreBaseNode.__init__(self, core, _id, name, nodedir)
|
||||
self.privatedir(self.confdir)
|
||||
self.privatedir(QUAGGA_STATE_DIR)
|
||||
|
||||
def qconf(self):
|
||||
return self.conftemp.substitute(ipaddr=self.ipaddr, routerid=self.routerid)
|
||||
|
||||
def config(self):
|
||||
filename = os.path.join(self.confdir, "Quagga.conf")
|
||||
f = self.opennodefile(filename, "w")
|
||||
f.write(self.qconf())
|
||||
f.close()
|
||||
tmp = self.bootscript()
|
||||
if tmp:
|
||||
self.nodefile(self.bootsh, tmp, mode=0o755)
|
||||
|
||||
def boot(self):
|
||||
self.config()
|
||||
self.session.services.boot_services(self)
|
||||
|
||||
def bootscript(self):
|
||||
return """\
|
||||
#!/bin/sh -e
|
||||
|
||||
STATEDIR=%s
|
||||
|
||||
waitfile()
|
||||
{
|
||||
fname=$1
|
||||
|
||||
i=0
|
||||
until [ -e $fname ]; do
|
||||
i=$(($i + 1))
|
||||
if [ $i -eq 10 ]; then
|
||||
echo "file not found: $fname" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.1
|
||||
done
|
||||
}
|
||||
|
||||
mkdir -p $STATEDIR
|
||||
|
||||
%s/zebra -d -u root -g root
|
||||
waitfile $STATEDIR/zebra.vty
|
||||
|
||||
%s/ospf6d -d -u root -g root
|
||||
waitfile $STATEDIR/ospf6d.vty
|
||||
|
||||
vtysh -b
|
||||
""" % (
|
||||
QUAGGA_STATE_DIR,
|
||||
quagga_path,
|
||||
quagga_path,
|
||||
)
|
||||
|
||||
|
||||
class Route(object):
|
||||
""" Helper class for organzing routing table entries. """
|
||||
|
||||
def __init__(self, prefix=None, gw=None, metric=None):
|
||||
try:
|
||||
self.prefix = ipaddress.Ipv4Prefix(prefix)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
"Invalid prefix given to Route object: %s\n%s" % (prefix, e)
|
||||
)
|
||||
self.gw = gw
|
||||
self.metric = metric
|
||||
|
||||
def __eq__(self, other):
|
||||
try:
|
||||
return (
|
||||
self.prefix == other.prefix
|
||||
and self.gw == other.gw
|
||||
and self.metric == other.metric
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def __str__(self):
|
||||
return "(%s,%s,%s)" % (self.prefix, self.gw, self.metric)
|
||||
|
||||
@staticmethod
|
||||
def key(r):
|
||||
if not r.prefix:
|
||||
return 0
|
||||
return r.prefix.prefix
|
||||
|
||||
|
||||
class ManetExperiment(object):
|
||||
""" A class for building an MDR network and checking and logging its state.
|
||||
"""
|
||||
|
||||
def __init__(self, options, start):
|
||||
""" Initialize with options and start time. """
|
||||
self.session = None
|
||||
# node list
|
||||
self.nodes = []
|
||||
# WLAN network
|
||||
self.net = None
|
||||
self.verbose = options.verbose
|
||||
# dict from OptionParser
|
||||
self.options = options
|
||||
self.start = start
|
||||
self.logbegin()
|
||||
|
||||
def info(self, msg):
|
||||
""" Utility method for writing output to stdout. """
|
||||
print(msg)
|
||||
sys.stdout.flush()
|
||||
self.log(msg)
|
||||
|
||||
def warn(self, msg):
|
||||
""" Utility method for writing output to stderr. """
|
||||
sys.stderr.write(msg)
|
||||
sys.stderr.flush()
|
||||
self.log(msg)
|
||||
|
||||
def logbegin(self):
|
||||
""" Start logging. """
|
||||
self.logfp = None
|
||||
if not self.options.logfile:
|
||||
return
|
||||
self.logfp = open(self.options.logfile, "w")
|
||||
self.log("ospfmanetmdrtest begin: %s\n" % self.start.ctime())
|
||||
|
||||
def logend(self):
|
||||
""" End logging. """
|
||||
if not self.logfp:
|
||||
return
|
||||
end = datetime.datetime.now()
|
||||
self.log("ospfmanetmdrtest end: %s (%s)\n" % (end.ctime(), end - self.start))
|
||||
self.logfp.flush()
|
||||
self.logfp.close()
|
||||
self.logfp = None
|
||||
|
||||
def log(self, msg):
|
||||
""" Write to the log file, if any. """
|
||||
if not self.logfp:
|
||||
return
|
||||
self.logfp.write(msg)
|
||||
|
||||
def logdata(self, nbrs, mdrs, lsdbs, krs, zrs):
|
||||
""" Dump experiment parameters and data to the log file. """
|
||||
self.log("ospfmantetmdrtest data:")
|
||||
self.log("----- parameters -----")
|
||||
self.log("%s" % self.options)
|
||||
self.log("----- neighbors -----")
|
||||
for rtrid in sorted(nbrs.keys()):
|
||||
self.log("%s: %s" % (rtrid, nbrs[rtrid]))
|
||||
self.log("----- mdr levels -----")
|
||||
self.log(mdrs)
|
||||
self.log("----- link state databases -----")
|
||||
for rtrid in sorted(lsdbs.keys()):
|
||||
self.log("%s lsdb:" % rtrid)
|
||||
for line in lsdbs[rtrid].split("\n"):
|
||||
self.log(line)
|
||||
self.log("----- kernel routes -----")
|
||||
for rtrid in sorted(krs.keys()):
|
||||
msg = rtrid + ": "
|
||||
for rt in krs[rtrid]:
|
||||
msg += "%s" % rt
|
||||
self.log(msg)
|
||||
self.log("----- zebra routes -----")
|
||||
for rtrid in sorted(zrs.keys()):
|
||||
msg = rtrid + ": "
|
||||
for rt in zrs[rtrid]:
|
||||
msg += "%s" % rt
|
||||
self.log(msg)
|
||||
|
||||
def topology(self, numnodes, linkprob, verbose=False):
|
||||
""" Build a topology consisting of the given number of ManetNodes
|
||||
connected to a WLAN and probabilty of links and set
|
||||
the session, WLAN, and node list objects.
|
||||
"""
|
||||
# IP subnet
|
||||
prefix = ipaddress.Ipv4Prefix("10.14.0.0/16")
|
||||
self.session = Session(1)
|
||||
# emulated network
|
||||
self.net = self.session.create_node(cls=core.nodes.network.WlanNode)
|
||||
for i in range(1, numnodes + 1):
|
||||
addr = "%s/%s" % (prefix.addr(i), 32)
|
||||
tmp = self.session.create_node(
|
||||
cls=ManetNode, ipaddr=addr, _id="%d" % i, name="n%d" % i
|
||||
)
|
||||
tmp.newnetif(self.net, [addr])
|
||||
self.nodes.append(tmp)
|
||||
# connect nodes with probability linkprob
|
||||
for i in range(numnodes):
|
||||
for j in range(i + 1, numnodes):
|
||||
r = random.random()
|
||||
if r < linkprob:
|
||||
if self.verbose:
|
||||
self.info("linking (%d,%d)" % (i, j))
|
||||
self.net.link(self.nodes[i].netif(0), self.nodes[j].netif(0))
|
||||
# force one link to avoid partitions (should check if this is needed)
|
||||
j = i
|
||||
while j == i:
|
||||
j = random.randint(0, numnodes - 1)
|
||||
if self.verbose:
|
||||
self.info("linking (%d,%d)" % (i, j))
|
||||
self.net.link(self.nodes[i].netif(0), self.nodes[j].netif(0))
|
||||
self.nodes[i].boot()
|
||||
# run the boot.sh script on all nodes to start Quagga
|
||||
for i in range(numnodes):
|
||||
self.nodes[i].cmd(["./%s" % self.nodes[i].bootsh])
|
||||
|
||||
def compareroutes(self, node, kr, zr):
|
||||
""" Compare two lists of Route objects.
|
||||
"""
|
||||
kr.sort(key=Route.key)
|
||||
zr.sort(key=Route.key)
|
||||
if kr != zr:
|
||||
self.warn("kernel and zebra routes differ")
|
||||
if self.verbose:
|
||||
msg = "kernel: "
|
||||
for r in kr:
|
||||
msg += "%s " % r
|
||||
msg += "\nzebra: "
|
||||
for r in zr:
|
||||
msg += "%s " % r
|
||||
self.warn(msg)
|
||||
else:
|
||||
self.info(" kernel and zebra routes match")
|
||||
|
||||
def comparemdrlevels(self, nbrs, mdrs):
|
||||
""" Check that all routers form a connected dominating set, i.e. all
|
||||
routers are either MDR, BMDR, or adjacent to one.
|
||||
"""
|
||||
msg = "All routers form a CDS"
|
||||
for n in self.nodes:
|
||||
if mdrs[n.routerid] != "OTHER":
|
||||
continue
|
||||
connected = False
|
||||
for nbr in nbrs[n.routerid]:
|
||||
if mdrs[nbr] == "MDR" or mdrs[nbr] == "BMDR":
|
||||
connected = True
|
||||
break
|
||||
if not connected:
|
||||
msg = "All routers do not form a CDS"
|
||||
self.warn(
|
||||
"XXX %s: not in CDS; neighbors: %s" % (n.routerid, nbrs[n.routerid])
|
||||
)
|
||||
if self.verbose:
|
||||
self.info(msg)
|
||||
|
||||
def comparelsdbs(self, lsdbs):
|
||||
""" Check LSDBs for consistency.
|
||||
"""
|
||||
msg = "LSDBs of all routers are consistent"
|
||||
prev = self.nodes[0]
|
||||
for n in self.nodes:
|
||||
db = lsdbs[n.routerid]
|
||||
if lsdbs[prev.routerid] != db:
|
||||
msg = "LSDBs of all routers are not consistent"
|
||||
self.warn(
|
||||
"XXX LSDBs inconsistent for %s and %s" % (n.routerid, prev.routerid)
|
||||
)
|
||||
i = 0
|
||||
for entry in lsdbs[n.routerid].split("\n"):
|
||||
preventries = lsdbs[prev.routerid].split("\n")
|
||||
try:
|
||||
preventry = preventries[i]
|
||||
except IndexError:
|
||||
preventry = None
|
||||
if entry != preventry:
|
||||
self.warn("%s: %s" % (n.routerid, entry))
|
||||
self.warn("%s: %s" % (prev.routerid, preventry))
|
||||
i += 1
|
||||
prev = n
|
||||
if self.verbose:
|
||||
self.info(msg)
|
||||
|
||||
def checknodes(self):
|
||||
""" Check the neighbor state and routing tables of all nodes. """
|
||||
nbrs = {}
|
||||
mdrs = {}
|
||||
lsdbs = {}
|
||||
krs = {}
|
||||
zrs = {}
|
||||
v = self.verbose
|
||||
for n in self.nodes:
|
||||
self.info("checking %s" % n.name)
|
||||
nbrs[n.routerid] = Ospf6NeighState(n, verbose=v).run()
|
||||
krs[n.routerid] = KernelRoutes(n, verbose=v).run()
|
||||
zrs[n.routerid] = ZebraRoutes(n, verbose=v).run()
|
||||
self.compareroutes(n, krs[n.routerid], zrs[n.routerid])
|
||||
mdrs[n.routerid] = Ospf6MdrLevel(n, verbose=v).run()
|
||||
lsdbs[n.routerid] = Ospf6Database(n, verbose=v).run()
|
||||
self.comparemdrlevels(nbrs, mdrs)
|
||||
self.comparelsdbs(lsdbs)
|
||||
self.logdata(nbrs, mdrs, lsdbs, krs, zrs)
|
||||
|
||||
|
||||
class Cmd:
|
||||
""" Helper class for running a command on a node and parsing the result. """
|
||||
|
||||
args = ""
|
||||
|
||||
def __init__(self, node, verbose=False):
|
||||
""" Initialize with a CoreNode (LxcNode) """
|
||||
self.id = None
|
||||
self.stdin = None
|
||||
self.out = None
|
||||
self.node = node
|
||||
self.verbose = verbose
|
||||
|
||||
def info(self, msg):
|
||||
""" Utility method for writing output to stdout."""
|
||||
print(msg)
|
||||
sys.stdout.flush()
|
||||
|
||||
def warn(self, msg):
|
||||
""" Utility method for writing output to stderr. """
|
||||
sys.stderr.write("XXX %s:" % self.node.routerid, msg)
|
||||
sys.stderr.flush()
|
||||
|
||||
def run(self):
|
||||
""" This is the primary method used for running this command. """
|
||||
self.open()
|
||||
r = self.parse()
|
||||
self.cleanup()
|
||||
return r
|
||||
|
||||
def open(self):
|
||||
""" Exceute call to node.popen(). """
|
||||
self.id, self.stdin, self.out, self.err = self.node.client.popen(self.args)
|
||||
|
||||
def parse(self):
|
||||
""" This method is overloaded by child classes and should return some
|
||||
result.
|
||||
"""
|
||||
return None
|
||||
|
||||
def cleanup(self):
|
||||
""" Close the Popen channels."""
|
||||
self.stdin.close()
|
||||
self.out.close()
|
||||
self.err.close()
|
||||
tmp = self.id.wait()
|
||||
if tmp:
|
||||
self.warn("nonzero exit status:", tmp)
|
||||
|
||||
|
||||
class VtyshCmd(Cmd):
|
||||
""" Runs a vtysh command. """
|
||||
|
||||
def open(self):
|
||||
args = ("vtysh", "-c", self.args)
|
||||
self.id, self.stdin, self.out, self.err = self.node.client.popen(args)
|
||||
|
||||
|
||||
class Ospf6NeighState(VtyshCmd):
|
||||
""" Check a node for OSPFv3 neighbors in the full/two-way states. """
|
||||
|
||||
args = "show ipv6 ospf6 neighbor"
|
||||
|
||||
def parse(self):
|
||||
# skip first line
|
||||
self.out.readline()
|
||||
nbrlist = []
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
nbr = field[0]
|
||||
state = field[3].split("/")[0]
|
||||
if not state.lower() in ("full", "twoway"):
|
||||
self.warn("neighbor %s state: %s" % (nbr, state))
|
||||
nbrlist.append(nbr)
|
||||
|
||||
if len(nbrlist) == 0:
|
||||
self.warn("no neighbors")
|
||||
if self.verbose:
|
||||
self.info(" %s has %d neighbors" % (self.node.routerid, len(nbrlist)))
|
||||
return nbrlist
|
||||
|
||||
|
||||
class Ospf6MdrLevel(VtyshCmd):
|
||||
""" Retrieve the OSPFv3 MDR level for a node. """
|
||||
|
||||
args = "show ipv6 ospf6 mdrlevel"
|
||||
|
||||
def parse(self):
|
||||
line = self.out.readline()
|
||||
# TODO: handle multiple interfaces
|
||||
field = line.split()
|
||||
mdrlevel = field[4]
|
||||
if mdrlevel not in ("MDR", "BMDR", "OTHER"):
|
||||
self.warn("mdrlevel: %s" % mdrlevel)
|
||||
if self.verbose:
|
||||
self.info(" %s is %s" % (self.node.routerid, mdrlevel))
|
||||
return mdrlevel
|
||||
|
||||
|
||||
class Ospf6Database(VtyshCmd):
|
||||
""" Retrieve the OSPFv3 LSDB summary for a node. """
|
||||
|
||||
args = "show ipv6 ospf6 database"
|
||||
|
||||
def parse(self):
|
||||
db = ""
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
if len(field) < 8:
|
||||
continue
|
||||
# filter out Age and Duration columns
|
||||
filtered = field[:3] + field[4:7]
|
||||
db += " ".join(filtered) + "\n"
|
||||
return db
|
||||
|
||||
|
||||
class ZebraRoutes(VtyshCmd):
|
||||
""" Return a list of Route objects for a node based on its zebra
|
||||
routing table.
|
||||
"""
|
||||
|
||||
args = "show ip route"
|
||||
|
||||
def parse(self):
|
||||
for i in range(0, 3):
|
||||
# skip first three lines
|
||||
self.out.readline()
|
||||
r = []
|
||||
prefix = None
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
if len(field) < 1:
|
||||
continue
|
||||
# only use OSPFv3 selected FIB routes
|
||||
elif field[0][:2] == "o>":
|
||||
prefix = field[1]
|
||||
metric = field[2].split("/")[1][:-1]
|
||||
if field[0][2:] != "*":
|
||||
continue
|
||||
if field[3] == "via":
|
||||
gw = field[4][:-1]
|
||||
else:
|
||||
gw = field[6][:-1]
|
||||
r.append(Route(prefix, gw, metric))
|
||||
prefix = None
|
||||
elif prefix and field[0] == "*":
|
||||
# already have prefix and metric from previous line
|
||||
gw = field[2][:-1]
|
||||
r.append(Route(prefix, gw, metric))
|
||||
prefix = None
|
||||
|
||||
if len(r) == 0:
|
||||
self.warn("no zebra routes")
|
||||
if self.verbose:
|
||||
self.info(" %s has %d zebra routes" % (self.node.routerid, len(r)))
|
||||
return r
|
||||
|
||||
|
||||
class KernelRoutes(Cmd):
|
||||
""" Return a list of Route objects for a node based on its kernel
|
||||
routing table.
|
||||
"""
|
||||
|
||||
args = ("/sbin/ip", "route", "show")
|
||||
|
||||
def parse(self):
|
||||
r = []
|
||||
prefix = None
|
||||
for line in self.out:
|
||||
field = line.split()
|
||||
if field[0] == "nexthop":
|
||||
if not prefix:
|
||||
# this saves only the first nexthop entry if multiple exist
|
||||
continue
|
||||
else:
|
||||
prefix = field[0]
|
||||
metric = field[-1]
|
||||
tmp = prefix.split("/")
|
||||
if len(tmp) < 2:
|
||||
prefix += "/32"
|
||||
if field[1] == "proto":
|
||||
# nexthop entry is on the next line
|
||||
continue
|
||||
# nexthop IP or interface
|
||||
gw = field[2]
|
||||
r.append(Route(prefix, gw, metric))
|
||||
prefix = None
|
||||
|
||||
if len(r) == 0:
|
||||
self.warn("no kernel routes")
|
||||
if self.verbose:
|
||||
self.info(" %s has %d kernel routes" % (self.node.routerid, len(r)))
|
||||
return r
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "usage: %prog [-h] [options] [args]"
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
parser.set_defaults(numnodes=10, linkprob=0.35, delay=20, seed=None)
|
||||
|
||||
parser.add_option(
|
||||
"-n", "--numnodes", dest="numnodes", type=int, help="number of nodes"
|
||||
)
|
||||
parser.add_option(
|
||||
"-p", "--linkprob", dest="linkprob", type=float, help="link probabilty"
|
||||
)
|
||||
parser.add_option(
|
||||
"-d", "--delay", dest="delay", type=float, help="wait time before checking"
|
||||
)
|
||||
parser.add_option(
|
||||
"-s",
|
||||
"--seed",
|
||||
dest="seed",
|
||||
type=int,
|
||||
help="specify integer to use for random seed",
|
||||
)
|
||||
parser.add_option(
|
||||
"-v", "--verbose", dest="verbose", action="store_true", help="be more verbose"
|
||||
)
|
||||
parser.add_option(
|
||||
"-l",
|
||||
"--logfile",
|
||||
dest="logfile",
|
||||
type=str,
|
||||
help="log detailed output to the specified file",
|
||||
)
|
||||
|
||||
def usage(msg=None, err=0):
|
||||
sys.stdout.write("\n")
|
||||
if msg:
|
||||
sys.stdout.write(msg + "\n\n")
|
||||
parser.print_help()
|
||||
sys.exit(err)
|
||||
|
||||
# parse command line options
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.numnodes < 2:
|
||||
usage("invalid numnodes: %s" % options.numnodes)
|
||||
if options.linkprob <= 0.0 or options.linkprob > 1.0:
|
||||
usage("invalid linkprob: %s" % options.linkprob)
|
||||
if options.delay < 0.0:
|
||||
usage("invalid delay: %s" % options.delay)
|
||||
|
||||
for a in args:
|
||||
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
|
||||
|
||||
if options.seed:
|
||||
random.seed(options.seed)
|
||||
|
||||
me = ManetExperiment(options=options, start=datetime.datetime.now())
|
||||
me.info(
|
||||
"creating topology: numnodes = %s; linkprob = %s"
|
||||
% (options.numnodes, options.linkprob)
|
||||
)
|
||||
me.topology(options.numnodes, options.linkprob)
|
||||
|
||||
me.info("waiting %s sec" % options.delay)
|
||||
time.sleep(options.delay)
|
||||
me.info("checking neighbor state and routes")
|
||||
me.checknodes()
|
||||
me.info("done")
|
||||
me.info("elapsed time: %s" % (datetime.datetime.now() - me.start))
|
||||
me.logend()
|
||||
|
||||
return me
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
me = main()
|
59
daemon/examples/python/distributed_emane.py
Normal file
59
daemon/examples/python/distributed_emane.py
Normal file
|
@ -0,0 +1,59 @@
|
|||
import logging
|
||||
|
||||
import distributed_parser
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
|
||||
|
||||
def main(args):
|
||||
# ip generator for example
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
|
||||
# create emulator instance for creating sessions and utility methods
|
||||
coreemu = CoreEmu(
|
||||
{
|
||||
"controlnet": "core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 "
|
||||
"core4:172.16.4.0/24 core5:172.16.5.0/24",
|
||||
"distributed_address": args.address,
|
||||
}
|
||||
)
|
||||
session = coreemu.create_session()
|
||||
|
||||
# initialize distributed
|
||||
server_name = "core2"
|
||||
session.distributed.add_server(server_name, args.server)
|
||||
|
||||
# must be in configuration state for nodes to start, when using "node_add" below
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# create local node, switch, and remote nodes
|
||||
options = NodeOptions(model="mdr")
|
||||
options.set_position(0, 0)
|
||||
node_one = session.add_node(node_options=options)
|
||||
emane_net = session.add_node(_type=NodeTypes.EMANE)
|
||||
session.emane.set_model(emane_net, EmaneIeee80211abgModel)
|
||||
options.emulation_server = server_name
|
||||
node_two = session.add_node(node_options=options)
|
||||
|
||||
# create node interfaces and link
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
session.add_link(node_one.id, emane_net.id, interface_one=interface_one)
|
||||
session.add_link(node_two.id, emane_net.id, interface_one=interface_two)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# pause script for verification
|
||||
input("press enter for shutdown")
|
||||
|
||||
# shutdown session
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
args = distributed_parser.parse(__file__)
|
||||
main(args)
|
48
daemon/examples/python/distributed_lxd.py
Normal file
48
daemon/examples/python/distributed_lxd.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
import logging
|
||||
|
||||
import distributed_parser
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
|
||||
|
||||
def main(args):
|
||||
# ip generator for example
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
|
||||
# create emulator instance for creating sessions and utility methods
|
||||
coreemu = CoreEmu({"distributed_address": args.address})
|
||||
session = coreemu.create_session()
|
||||
|
||||
# initialize distributed
|
||||
server_name = "core2"
|
||||
session.distributed.add_server(server_name, args.server)
|
||||
|
||||
# must be in configuration state for nodes to start, when using "node_add" below
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# create local node, switch, and remote nodes
|
||||
options = NodeOptions(image="ubuntu:18.04")
|
||||
node_one = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
options.emulation_server = server_name
|
||||
node_two = session.add_node(_type=NodeTypes.LXC, node_options=options)
|
||||
|
||||
# create node interfaces and link
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# pause script for verification
|
||||
input("press enter for shutdown")
|
||||
|
||||
# shutdown session
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
args = distributed_parser.parse(__file__)
|
||||
main(args)
|
15
daemon/examples/python/distributed_parser.py
Normal file
15
daemon/examples/python/distributed_parser.py
Normal file
|
@ -0,0 +1,15 @@
|
|||
import argparse
|
||||
|
||||
|
||||
def parse(name):
|
||||
parser = argparse.ArgumentParser(description=f"Run {name} example")
|
||||
parser.add_argument(
|
||||
"-a",
|
||||
"--address",
|
||||
help="local address that distributed servers will use for gre tunneling",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-s", "--server", help="distributed server to use for creating nodes"
|
||||
)
|
||||
options = parser.parse_args()
|
||||
return options
|
48
daemon/examples/python/distributed_ptp.py
Normal file
48
daemon/examples/python/distributed_ptp.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
import logging
|
||||
|
||||
import distributed_parser
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import EventTypes
|
||||
|
||||
|
||||
def main(args):
|
||||
# ip generator for example
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
|
||||
# create emulator instance for creating sessions and utility methods
|
||||
coreemu = CoreEmu({"distributed_address": args.address})
|
||||
session = coreemu.create_session()
|
||||
|
||||
# initialize distributed
|
||||
server_name = "core2"
|
||||
session.distributed.add_server(server_name, args.server)
|
||||
|
||||
# must be in configuration state for nodes to start, when using "node_add" below
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# create local node, switch, and remote nodes
|
||||
options = NodeOptions()
|
||||
node_one = session.add_node(node_options=options)
|
||||
options.emulation_server = server_name
|
||||
node_two = session.add_node(node_options=options)
|
||||
|
||||
# create node interfaces and link
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
session.add_link(node_one.id, node_two.id, interface_one, interface_two)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# pause script for verification
|
||||
input("press enter for shutdown")
|
||||
|
||||
# shutdown session
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
args = distributed_parser.parse(__file__)
|
||||
main(args)
|
52
daemon/examples/python/distributed_switch.py
Normal file
52
daemon/examples/python/distributed_switch.py
Normal file
|
@ -0,0 +1,52 @@
|
|||
import logging
|
||||
|
||||
import distributed_parser
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
|
||||
|
||||
def main(args):
|
||||
# ip generator for example
|
||||
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
|
||||
|
||||
# create emulator instance for creating sessions and utility methods
|
||||
coreemu = CoreEmu(
|
||||
{"controlnet": "172.16.0.0/24", "distributed_address": args.address}
|
||||
)
|
||||
session = coreemu.create_session()
|
||||
|
||||
# initialize distributed
|
||||
server_name = "core2"
|
||||
session.distributed.add_server(server_name, args.server)
|
||||
|
||||
# must be in configuration state for nodes to start, when using "node_add" below
|
||||
session.set_state(EventTypes.CONFIGURATION_STATE)
|
||||
|
||||
# create local node, switch, and remote nodes
|
||||
node_one = session.add_node()
|
||||
switch = session.add_node(_type=NodeTypes.SWITCH)
|
||||
options = NodeOptions()
|
||||
options.emulation_server = server_name
|
||||
node_two = session.add_node(node_options=options)
|
||||
|
||||
# create node interfaces and link
|
||||
interface_one = prefixes.create_interface(node_one)
|
||||
interface_two = prefixes.create_interface(node_two)
|
||||
session.add_link(node_one.id, switch.id, interface_one=interface_one)
|
||||
session.add_link(node_two.id, switch.id, interface_one=interface_two)
|
||||
|
||||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# pause script for verification
|
||||
input("press enter for shutdown")
|
||||
|
||||
# shutdown session
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
args = distributed_parser.parse(__file__)
|
||||
main(args)
|
|
@ -1,19 +1,12 @@
|
|||
#!/usr/bin/python -i
|
||||
#
|
||||
# Example CORE Python script that attaches N nodes to an EMANE 802.11abg network.
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import parser
|
||||
from builtins import range
|
||||
|
||||
from core import load_logging_config
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes
|
||||
from core.emulator.enumerations import EventTypes
|
||||
|
||||
load_logging_config()
|
||||
|
||||
|
||||
def example(options):
|
||||
# ip generator for example
|
||||
|
@ -42,24 +35,20 @@ def example(options):
|
|||
# instantiate session
|
||||
session.instantiate()
|
||||
|
||||
# start a shell on the first node
|
||||
node = session.get_node(2)
|
||||
node.client.term("bash")
|
||||
|
||||
# shutdown session
|
||||
input("press enter to exit...")
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
options = parser.parse_options("emane80211")
|
||||
start = datetime.datetime.now()
|
||||
print(
|
||||
"running emane 80211 example: nodes(%s) time(%s)"
|
||||
% (options.nodes, options.time)
|
||||
logging.info(
|
||||
"running emane 80211 example: nodes(%s) time(%s)", options.nodes, options.time
|
||||
)
|
||||
example(options)
|
||||
print("elapsed time: %s" % (datetime.datetime.now() - start))
|
||||
logging.info("elapsed time: %s", datetime.datetime.now() - start)
|
||||
|
||||
|
||||
if __name__ == "__main__" or __name__ == "__builtin__":
|
32
daemon/examples/python/parser.py
Normal file
32
daemon/examples/python/parser.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import argparse
|
||||
|
||||
DEFAULT_NODES = 2
|
||||
DEFAULT_TIME = 10
|
||||
DEFAULT_STEP = 1
|
||||
|
||||
|
||||
def parse_options(name):
|
||||
parser = argparse.ArgumentParser(description=f"Run {name} example")
|
||||
parser.add_argument(
|
||||
"-n",
|
||||
"--nodes",
|
||||
type=int,
|
||||
default=DEFAULT_NODES,
|
||||
help="number of nodes to create in this example",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--time",
|
||||
type=int,
|
||||
default=DEFAULT_TIME,
|
||||
help="example iperf run time in seconds",
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.nodes < 2:
|
||||
parser.error(f"invalid min number of nodes: {options.nodes}")
|
||||
if options.time < 1:
|
||||
parser.error(f"invalid test time: {options.time}")
|
||||
|
||||
return options
|
|
@ -1,21 +1,11 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# run iperf to measure the effective throughput between two nodes when
|
||||
# n nodes are connected to a virtual wlan; run test for testsec
|
||||
# and repeat for minnodes <= n <= maxnodes with a step size of
|
||||
# nodestep
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import parser
|
||||
from builtins import range
|
||||
|
||||
from core import load_logging_config
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes
|
||||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
|
||||
load_logging_config()
|
||||
|
||||
|
||||
def example(options):
|
||||
# ip generator for example
|
||||
|
@ -44,24 +34,27 @@ def example(options):
|
|||
first_node = session.get_node(2)
|
||||
last_node = session.get_node(options.nodes + 1)
|
||||
|
||||
print("starting iperf server on node: %s" % first_node.name)
|
||||
first_node.cmd(["iperf", "-s", "-D"])
|
||||
logging.info("starting iperf server on node: %s", first_node.name)
|
||||
first_node.cmd("iperf -s -D")
|
||||
first_node_address = prefixes.ip4_address(first_node)
|
||||
print("node %s connecting to %s" % (last_node.name, first_node_address))
|
||||
last_node.client.icmd(["iperf", "-t", str(options.time), "-c", first_node_address])
|
||||
first_node.cmd(["killall", "-9", "iperf"])
|
||||
logging.info("node %s connecting to %s", last_node.name, first_node_address)
|
||||
output = last_node.cmd(f"iperf -t {options.time} -c {first_node_address}")
|
||||
logging.info(output)
|
||||
first_node.cmd("killall -9 iperf")
|
||||
|
||||
# shutdown session
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
options = parser.parse_options("switch")
|
||||
|
||||
start = datetime.datetime.now()
|
||||
print("running switch example: nodes(%s) time(%s)" % (options.nodes, options.time))
|
||||
logging.info(
|
||||
"running switch example: nodes(%s) time(%s)", options.nodes, options.time
|
||||
)
|
||||
example(options)
|
||||
print("elapsed time: %s" % (datetime.datetime.now() - start))
|
||||
logging.info("elapsed time: %s", datetime.datetime.now() - start)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
|
@ -1,17 +1,8 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# run iperf to measure the effective throughput between two nodes when
|
||||
# n nodes are connected to a virtual wlan; run test for testsec
|
||||
# and repeat for minnodes <= n <= maxnodes with a step size of
|
||||
# nodestep
|
||||
from builtins import range
|
||||
import logging
|
||||
|
||||
from core import load_logging_config
|
||||
from core.emulator.emudata import IpPrefixes
|
||||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
|
||||
load_logging_config()
|
||||
|
||||
|
||||
def example(nodes):
|
||||
# ip generator for example
|
||||
|
@ -38,4 +29,5 @@ def example(nodes):
|
|||
|
||||
|
||||
if __name__ in {"__main__", "__builtin__"}:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
example(2)
|
|
@ -1,22 +1,12 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# run iperf to measure the effective throughput between two nodes when
|
||||
# n nodes are connected to a virtual wlan; run test for testsec
|
||||
# and repeat for minnodes <= n <= maxnodes with a step size of
|
||||
# nodestep
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import parser
|
||||
from builtins import range
|
||||
|
||||
from core import load_logging_config
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
from core.emulator.emudata import IpPrefixes, NodeOptions
|
||||
from core.emulator.enumerations import EventTypes, NodeTypes
|
||||
from core.location.mobility import BasicRangeModel
|
||||
|
||||
load_logging_config()
|
||||
|
||||
|
||||
def example(options):
|
||||
# ip generator for example
|
||||
|
@ -48,24 +38,27 @@ def example(options):
|
|||
first_node = session.get_node(2)
|
||||
last_node = session.get_node(options.nodes + 1)
|
||||
|
||||
print("starting iperf server on node: %s" % first_node.name)
|
||||
first_node.cmd(["iperf", "-s", "-D"])
|
||||
logging.info("starting iperf server on node: %s", first_node.name)
|
||||
first_node.cmd("iperf -s -D")
|
||||
address = prefixes.ip4_address(first_node)
|
||||
print("node %s connecting to %s" % (last_node.name, address))
|
||||
last_node.client.icmd(["iperf", "-t", str(options.time), "-c", address])
|
||||
first_node.cmd(["killall", "-9", "iperf"])
|
||||
logging.info("node %s connecting to %s", last_node.name, address)
|
||||
last_node.cmd(f"iperf -t {options.time} -c {address}")
|
||||
first_node.cmd("killall -9 iperf")
|
||||
|
||||
# shutdown session
|
||||
coreemu.shutdown()
|
||||
|
||||
|
||||
def main():
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
options = parser.parse_options("wlan")
|
||||
|
||||
start = datetime.datetime.now()
|
||||
print("running wlan example: nodes(%s) time(%s)" % (options.nodes, options.time))
|
||||
logging.info(
|
||||
"running wlan example: nodes(%s) time(%s)", options.nodes, options.time
|
||||
)
|
||||
example(options)
|
||||
print("elapsed time: %s" % (datetime.datetime.now() - start))
|
||||
logging.info("elapsed time: %s", datetime.datetime.now() - start)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
|
@ -1,50 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
# (c)2010-2012 the Boeing Company
|
||||
# author: Jeff Ahrenholz <jeffrey.m.ahrenholz@boeing.com>
|
||||
#
|
||||
# List and stop CORE sessions from the command line.
|
||||
#
|
||||
|
||||
import optparse
|
||||
import socket
|
||||
|
||||
from core.api.tlv import coreapi
|
||||
from core.emulator.enumerations import CORE_API_PORT, MessageFlags, SessionTlvs
|
||||
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(usage="usage: %prog [-l] <sessionid>")
|
||||
parser.add_option(
|
||||
"-l", "--list", dest="list", action="store_true", help="list running sessions"
|
||||
)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if options.list is True:
|
||||
num = "0"
|
||||
flags = MessageFlags.STRING.value
|
||||
else:
|
||||
num = args[0]
|
||||
flags = MessageFlags.DELETE.value
|
||||
tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, num)
|
||||
message = coreapi.CoreSessionMessage.pack(flags, tlvdata)
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.connect(("localhost", CORE_API_PORT))
|
||||
sock.send(message)
|
||||
|
||||
# receive and print a session list
|
||||
if options.list is True:
|
||||
hdr = sock.recv(coreapi.CoreMessage.header_len)
|
||||
msgtype, msgflags, msglen = coreapi.CoreMessage.unpack_header(hdr)
|
||||
data = ""
|
||||
if msglen:
|
||||
data = sock.recv(msglen)
|
||||
message = coreapi.CoreMessage(msgflags, hdr, data)
|
||||
sessions = message.get_tlv(coreapi.SessionTlvs.NUMBER.value)
|
||||
print("sessions: {}".format(sessions))
|
||||
|
||||
sock.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -25,6 +25,8 @@ service CoreApi {
|
|||
}
|
||||
rpc SetSessionState (SetSessionStateRequest) returns (SetSessionStateResponse) {
|
||||
}
|
||||
rpc AddSessionServer (AddSessionServerRequest) returns (AddSessionServerResponse) {
|
||||
}
|
||||
|
||||
// streams
|
||||
rpc Events (EventsRequest) returns (stream Event) {
|
||||
|
@ -119,6 +121,8 @@ service CoreApi {
|
|||
// utilities
|
||||
rpc GetInterfaces (GetInterfacesRequest) returns (GetInterfacesResponse) {
|
||||
}
|
||||
rpc EmaneLink (EmaneLinkRequest) returns (EmaneLinkResponse) {
|
||||
}
|
||||
}
|
||||
|
||||
// rpc request/response messages
|
||||
|
@ -199,6 +203,16 @@ message SetSessionStateResponse {
|
|||
bool result = 1;
|
||||
}
|
||||
|
||||
message AddSessionServerRequest {
|
||||
int32 session_id = 1;
|
||||
string name = 2;
|
||||
string host = 3;
|
||||
}
|
||||
|
||||
message AddSessionServerResponse {
|
||||
bool result = 1;
|
||||
}
|
||||
|
||||
message EventsRequest {
|
||||
int32 session_id = 1;
|
||||
}
|
||||
|
@ -314,6 +328,7 @@ message EditNodeRequest {
|
|||
int32 session_id = 1;
|
||||
int32 node_id = 2;
|
||||
Position position = 3;
|
||||
string icon = 4;
|
||||
}
|
||||
|
||||
message EditNodeResponse {
|
||||
|
@ -633,6 +648,17 @@ message GetInterfacesResponse {
|
|||
repeated string interfaces = 1;
|
||||
}
|
||||
|
||||
message EmaneLinkRequest {
|
||||
int32 session_id = 1;
|
||||
int32 nem_one = 2;
|
||||
int32 nem_two = 3;
|
||||
bool linked = 4;
|
||||
}
|
||||
|
||||
message EmaneLinkResponse {
|
||||
bool result = 1;
|
||||
}
|
||||
|
||||
// data structures for messages below
|
||||
message MessageType {
|
||||
enum Enum {
|
||||
|
@ -788,6 +814,7 @@ message Node {
|
|||
string icon = 8;
|
||||
string opaque = 9;
|
||||
string image = 10;
|
||||
string server = 11;
|
||||
}
|
||||
|
||||
message Link {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
configparser==4.0.2
|
||||
future==0.17.1
|
||||
fabric==2.5.0
|
||||
grpcio==1.23.0
|
||||
grpcio-tools==1.21.1
|
||||
invoke==1.3.0
|
||||
lxml==4.4.1
|
||||
protobuf==3.9.1
|
||||
six==1.12.0
|
||||
|
|
|
@ -7,17 +7,19 @@ message handlers are defined and some support for sending messages.
|
|||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from configparser import ConfigParser
|
||||
|
||||
from core import constants, load_logging_config
|
||||
from core import constants
|
||||
from core.api.grpc.server import CoreGrpcServer
|
||||
from core.api.tlv.corehandlers import CoreHandler, CoreUdpHandler
|
||||
from core.api.tlv.coreserver import CoreServer, CoreUdpServer
|
||||
from core.emulator import enumerations
|
||||
from core.utils import close_onexec
|
||||
from core.constants import CORE_CONF_DIR, COREDPY_VERSION
|
||||
from core.emulator.enumerations import CORE_API_PORT
|
||||
from core.utils import close_onexec, load_logging_config
|
||||
|
||||
|
||||
def banner():
|
||||
|
@ -64,12 +66,13 @@ def cored(cfg):
|
|||
sys.exit(1)
|
||||
|
||||
# initialize grpc api
|
||||
if cfg["grpc"] == "True":
|
||||
grpc_server = CoreGrpcServer(server.coreemu)
|
||||
grpc_address = "%s:%s" % (cfg["grpcaddress"], cfg["grpcport"])
|
||||
grpc_thread = threading.Thread(target=grpc_server.listen, args=(grpc_address,))
|
||||
grpc_thread.daemon = True
|
||||
grpc_thread.start()
|
||||
grpc_server = CoreGrpcServer(server.coreemu)
|
||||
address_config = cfg["grpcaddress"]
|
||||
port_config = cfg["grpcport"]
|
||||
grpc_address = f"{address_config}:{port_config}"
|
||||
grpc_thread = threading.Thread(target=grpc_server.listen, args=(grpc_address,))
|
||||
grpc_thread.daemon = True
|
||||
grpc_thread.start()
|
||||
|
||||
# start udp server
|
||||
start_udp(server, address)
|
||||
|
@ -90,31 +93,33 @@ def get_merged_config(filename):
|
|||
:rtype: dict
|
||||
"""
|
||||
# these are the defaults used in the config file
|
||||
default_log = os.path.join(constants.CORE_CONF_DIR, "logging.conf")
|
||||
default_grpc_port = "50051"
|
||||
default_threads = "1"
|
||||
default_address = "localhost"
|
||||
defaults = {
|
||||
"port": "%d" % enumerations.CORE_API_PORT,
|
||||
"listenaddr": "localhost",
|
||||
"xmlfilever": "1.0",
|
||||
"numthreads": "1",
|
||||
"grpcport": "50051",
|
||||
"grpcaddress": "localhost",
|
||||
"logfile": ""
|
||||
"port": str(CORE_API_PORT),
|
||||
"listenaddr": default_address,
|
||||
"numthreads": default_threads,
|
||||
"grpcport": default_grpc_port,
|
||||
"grpcaddress": default_address,
|
||||
"logfile": default_log
|
||||
}
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="CORE daemon v.%s instantiates Linux network namespace nodes." % constants.COREDPY_VERSION)
|
||||
description=f"CORE daemon v.{COREDPY_VERSION} instantiates Linux network namespace nodes.")
|
||||
parser.add_argument("-f", "--configfile", dest="configfile",
|
||||
help="read config from specified file; default = %s" % filename)
|
||||
help=f"read config from specified file; default = {filename}")
|
||||
parser.add_argument("-p", "--port", dest="port", type=int,
|
||||
help="port number to listen on; default = %s" % defaults["port"])
|
||||
help=f"port number to listen on; default = {CORE_API_PORT}")
|
||||
parser.add_argument("-n", "--numthreads", dest="numthreads", type=int,
|
||||
help="number of server threads; default = %s" % defaults["numthreads"])
|
||||
help=f"number of server threads; default = {default_threads}")
|
||||
parser.add_argument("--ovs", action="store_true", help="enable experimental ovs mode, default is false")
|
||||
parser.add_argument("--grpc", action="store_true", help="enable grpc api, default is false")
|
||||
parser.add_argument("--grpc-port", dest="grpcport",
|
||||
help="grpc port to listen on; default %s" % defaults["grpcport"])
|
||||
help=f"grpc port to listen on; default {default_grpc_port}")
|
||||
parser.add_argument("--grpc-address", dest="grpcaddress",
|
||||
help="grpc address to listen on; default %s" % defaults["grpcaddress"])
|
||||
parser.add_argument("-l", "--logfile", help="core logging configuration; default %s" % defaults["logfile"])
|
||||
help=f"grpc address to listen on; default {default_address}")
|
||||
parser.add_argument("-l", "--logfile", help=f"core logging configuration; default {default_log}")
|
||||
|
||||
# parse command line options
|
||||
args = parser.parse_args()
|
||||
|
@ -126,16 +131,13 @@ def get_merged_config(filename):
|
|||
cfg = ConfigParser(defaults)
|
||||
cfg.read(filename)
|
||||
|
||||
# load logging configuration
|
||||
load_logging_config(args.logfile)
|
||||
|
||||
section = "core-daemon"
|
||||
if not cfg.has_section(section):
|
||||
cfg.add_section(section)
|
||||
|
||||
# merge command line with config file
|
||||
for opt in args.__dict__:
|
||||
val = args.__dict__[opt]
|
||||
# merge argparse with configparser
|
||||
for opt in vars(args):
|
||||
val = getattr(args, opt)
|
||||
if val is not None:
|
||||
cfg.set(section, opt, str(val))
|
||||
|
||||
|
@ -149,7 +151,11 @@ def main():
|
|||
:return: nothing
|
||||
"""
|
||||
# get a configuration merged from config file and command-line arguments
|
||||
cfg = get_merged_config("%s/core.conf" % constants.CORE_CONF_DIR)
|
||||
cfg = get_merged_config(f"{CORE_CONF_DIR}/core.conf")
|
||||
|
||||
# load logging configuration
|
||||
load_logging_config(cfg["logfile"])
|
||||
|
||||
banner()
|
||||
|
||||
try:
|
||||
|
|
|
@ -38,7 +38,7 @@ class FileUpdater(object):
|
|||
txt = "Updating"
|
||||
if self.action == "check":
|
||||
txt = "Checking"
|
||||
sys.stdout.write("%s file: %s\n" % (txt, self.filename))
|
||||
sys.stdout.write(f"{txt} file: {self.filename}\n")
|
||||
|
||||
if self.target == "service":
|
||||
r = self.update_file(fn=self.update_services)
|
||||
|
@ -52,9 +52,9 @@ class FileUpdater(object):
|
|||
if not r:
|
||||
txt = "NOT "
|
||||
if self.action == "check":
|
||||
sys.stdout.write("String %sfound.\n" % txt)
|
||||
sys.stdout.write(f"String {txt} found.\n")
|
||||
else:
|
||||
sys.stdout.write("File %supdated.\n" % txt)
|
||||
sys.stdout.write(f"File {txt} updated.\n")
|
||||
|
||||
return r
|
||||
|
||||
|
@ -70,7 +70,7 @@ class FileUpdater(object):
|
|||
r = self.update_keyvals(key, vals)
|
||||
if self.action == "check":
|
||||
return r
|
||||
valstr = "%s" % r
|
||||
valstr = str(r)
|
||||
return "= ".join([key, valstr]) + "\n"
|
||||
|
||||
def update_emane_models(self, line):
|
||||
|
@ -125,7 +125,7 @@ class FileUpdater(object):
|
|||
else:
|
||||
raise ValueError("unknown target")
|
||||
if not os.path.exists(filename):
|
||||
raise ValueError("file %s does not exist" % filename)
|
||||
raise ValueError(f"file {filename} does not exist")
|
||||
return search, filename
|
||||
|
||||
def update_file(self, fn=None):
|
||||
|
@ -187,18 +187,17 @@ class FileUpdater(object):
|
|||
|
||||
|
||||
def main():
|
||||
actions = ", ".join(FileUpdater.actions)
|
||||
targets = ", ".join(FileUpdater.targets)
|
||||
usagestr = "usage: %prog [-h] [options] <action> <target> <string>\n"
|
||||
usagestr += "\nHelper tool to add, remove, or check for "
|
||||
usagestr += "services, models, and node types\nin a CORE installation.\n"
|
||||
usagestr += "\nExamples:\n %prog add service newrouting"
|
||||
usagestr += "\n %prog -v check model RfPipe"
|
||||
usagestr += "\n %prog --userpath=\"$HOME/.core\" add nodetype \"{ftp ftp.gif ftp.gif {DefaultRoute FTP} netns {FTP server} }\" \n"
|
||||
usagestr += "\nArguments:\n <action> should be one of: %s" % \
|
||||
", ".join(FileUpdater.actions)
|
||||
usagestr += "\n <target> should be one of: %s" % \
|
||||
", ".join(FileUpdater.targets)
|
||||
usagestr += "\n <string> is the text to %s" % \
|
||||
", ".join(FileUpdater.actions)
|
||||
usagestr += f"\nArguments:\n <action> should be one of: {actions}"
|
||||
usagestr += f"\n <target> should be one of: {targets}"
|
||||
usagestr += f"\n <string> is the text to {actions}"
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
parser.set_defaults(userpath=None, verbose=False, )
|
||||
|
||||
|
@ -222,14 +221,14 @@ def main():
|
|||
|
||||
action = args[0]
|
||||
if action not in FileUpdater.actions:
|
||||
usage("invalid action %s" % action, 1)
|
||||
usage(f"invalid action {action}", 1)
|
||||
|
||||
target = args[1]
|
||||
if target not in FileUpdater.targets:
|
||||
usage("invalid target %s" % target, 1)
|
||||
usage(f"invalid target {target}", 1)
|
||||
|
||||
if target == "nodetype" and not options.userpath:
|
||||
usage("user path option required for this target (%s)" % target)
|
||||
usage(f"user path option required for this target ({target})")
|
||||
|
||||
data = args[2]
|
||||
|
||||
|
@ -237,7 +236,7 @@ def main():
|
|||
up = FileUpdater(action, target, data, options)
|
||||
r = up.process()
|
||||
except Exception as e:
|
||||
sys.stderr.write("Exception: %s\n" % e)
|
||||
sys.stderr.write(f"Exception: {e}\n")
|
||||
sys.exit(1)
|
||||
if not r:
|
||||
sys.exit(1)
|
||||
|
|
|
@ -21,9 +21,9 @@ def print_available_tlvs(t, tlv_class):
|
|||
"""
|
||||
Print a TLV list.
|
||||
"""
|
||||
print("TLVs available for %s message:" % t)
|
||||
print(f"TLVs available for {t} message:")
|
||||
for tlv in sorted([tlv for tlv in tlv_class.tlv_type_map], key=lambda x: x.name):
|
||||
print("%s:%s" % (tlv.value, tlv.name))
|
||||
print(f"{tlv.value}:{tlv.name}")
|
||||
|
||||
|
||||
def print_examples(name):
|
||||
|
@ -54,9 +54,9 @@ def print_examples(name):
|
|||
"srcname=\"./test.log\"",
|
||||
"move a test.log file from host to node 2"),
|
||||
]
|
||||
print("Example %s invocations:" % name)
|
||||
print(f"Example {name} invocations:")
|
||||
for cmd, descr in examples:
|
||||
print(" %s %s\n\t\t%s" % (name, cmd, descr))
|
||||
print(f" {name} {cmd}\n\t\t{descr}")
|
||||
|
||||
|
||||
def receive_message(sock):
|
||||
|
@ -86,11 +86,11 @@ def receive_message(sock):
|
|||
except KeyError:
|
||||
msg = coreapi.CoreMessage(msgflags, msghdr, msgdata)
|
||||
msg.message_type = msgtype
|
||||
print("unimplemented CORE message type: %s" % msg.type_str())
|
||||
print(f"unimplemented CORE message type: {msg.type_str()}")
|
||||
return msg
|
||||
if len(data) > msglen + coreapi.CoreMessage.header_len:
|
||||
print("received a message of type %d, dropping %d bytes of extra data" \
|
||||
% (msgtype, len(data) - (msglen + coreapi.CoreMessage.header_len)))
|
||||
data_size = len(data) - (msglen + coreapi.CoreMessage.header_len)
|
||||
print(f"received a message of type {msgtype}, dropping {data_size} bytes of extra data")
|
||||
return msgcls(msgflags, msghdr, msgdata)
|
||||
|
||||
|
||||
|
@ -132,7 +132,7 @@ def connect_to_session(sock, requested):
|
|||
print("requested session not found!")
|
||||
return False
|
||||
|
||||
print("joining session: %s" % session)
|
||||
print(f"joining session: {session}")
|
||||
tlvdata = coreapi.CoreSessionTlv.pack(SessionTlvs.NUMBER.value, session)
|
||||
flags = MessageFlags.ADD.value
|
||||
smsg = coreapi.CoreSessionMessage.pack(flags, tlvdata)
|
||||
|
@ -147,9 +147,9 @@ def receive_response(sock, opt):
|
|||
print("waiting for response...")
|
||||
msg = receive_message(sock)
|
||||
if msg is None:
|
||||
print("disconnected from %s:%s" % (opt.address, opt.port))
|
||||
print(f"disconnected from {opt.address}:{opt.port}")
|
||||
sys.exit(0)
|
||||
print("received message: %s" % msg)
|
||||
print(f"received message: {msg}")
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -160,36 +160,36 @@ def main():
|
|||
flags = [flag.name for flag in MessageFlags]
|
||||
usagestr = "usage: %prog [-h|-H] [options] [message-type] [flags=flags] "
|
||||
usagestr += "[message-TLVs]\n\n"
|
||||
usagestr += "Supported message types:\n %s\n" % types
|
||||
usagestr += "Supported message flags (flags=f1,f2,...):\n %s" % flags
|
||||
usagestr += f"Supported message types:\n {types}\n"
|
||||
usagestr += f"Supported message flags (flags=f1,f2,...):\n {flags}"
|
||||
parser = optparse.OptionParser(usage=usagestr)
|
||||
default_address = "localhost"
|
||||
default_session = None
|
||||
default_tcp = False
|
||||
parser.set_defaults(
|
||||
port=CORE_API_PORT,
|
||||
address="localhost",
|
||||
session=None,
|
||||
address=default_address,
|
||||
session=default_session,
|
||||
listen=False,
|
||||
examples=False,
|
||||
tlvs=False,
|
||||
tcp=False
|
||||
tcp=default_tcp
|
||||
)
|
||||
|
||||
parser.add_option("-H", dest="examples", action="store_true",
|
||||
help="show example usage help message and exit")
|
||||
parser.add_option("-p", "--port", dest="port", type=int,
|
||||
help="TCP port to connect to, default: %d" % \
|
||||
parser.defaults["port"])
|
||||
help=f"TCP port to connect to, default: {CORE_API_PORT}")
|
||||
parser.add_option("-a", "--address", dest="address", type=str,
|
||||
help="Address to connect to, default: %s" % \
|
||||
parser.defaults["address"])
|
||||
help=f"Address to connect to, default: {default_address}")
|
||||
parser.add_option("-s", "--session", dest="session", type=str,
|
||||
help="Session to join, default: %s" % \
|
||||
parser.defaults["session"])
|
||||
help=f"Session to join, default: {default_session}")
|
||||
parser.add_option("-l", "--listen", dest="listen", action="store_true",
|
||||
help="Listen for a response message and print it.")
|
||||
parser.add_option("-t", "--list-tlvs", dest="tlvs", action="store_true",
|
||||
help="List TLVs for the specified message type.")
|
||||
parser.add_option("--tcp", dest="tcp", action="store_true",
|
||||
help="Use TCP instead of UDP and connect to a session default: %s" % parser.defaults["tcp"])
|
||||
help=f"Use TCP instead of UDP and connect to a session default: {default_tcp}")
|
||||
|
||||
def usage(msg=None, err=0):
|
||||
sys.stdout.write("\n")
|
||||
|
@ -209,7 +209,7 @@ def main():
|
|||
# given a message type t, determine the message and TLV classes
|
||||
t = args.pop(0)
|
||||
if t not in types:
|
||||
usage("Unknown message type requested: %s" % t)
|
||||
usage(f"Unknown message type requested: {t}")
|
||||
message_type = MessageTypes[t]
|
||||
msg_cls = coreapi.CLASS_MAP[message_type.value]
|
||||
tlv_cls = msg_cls.tlv_class
|
||||
|
@ -225,7 +225,7 @@ def main():
|
|||
for a in args:
|
||||
typevalue = a.split("=")
|
||||
if len(typevalue) < 2:
|
||||
usage("Use \"type=value\" syntax instead of \"%s\"." % a)
|
||||
usage(f"Use \"type=value\" syntax instead of \"{a}\".")
|
||||
tlv_typestr = typevalue[0]
|
||||
tlv_valstr = "=".join(typevalue[1:])
|
||||
if tlv_typestr == "flags":
|
||||
|
@ -237,7 +237,7 @@ def main():
|
|||
tlv_type = tlv_cls.tlv_type_map[tlv_name]
|
||||
tlvdata += tlv_cls.pack_string(tlv_type.value, tlv_valstr)
|
||||
except KeyError:
|
||||
usage("Unknown TLV: \"%s\"" % tlv_name)
|
||||
usage(f"Unknown TLV: \"{tlv_name}\"")
|
||||
|
||||
flags = 0
|
||||
for f in flagstr.split(","):
|
||||
|
@ -249,7 +249,7 @@ def main():
|
|||
n = flag_enum.value
|
||||
flags |= n
|
||||
except KeyError:
|
||||
usage("Invalid flag \"%s\"." % f)
|
||||
usage(f"Invalid flag \"{f}\".")
|
||||
|
||||
msg = msg_cls.pack(flags, tlvdata)
|
||||
|
||||
|
@ -264,7 +264,7 @@ def main():
|
|||
try:
|
||||
sock.connect((opt.address, opt.port))
|
||||
except Exception as e:
|
||||
print("Error connecting to %s:%s:\n\t%s" % (opt.address, opt.port, e))
|
||||
print(f"Error connecting to {opt.address}:{opt.port}:\n\t{e}")
|
||||
sys.exit(1)
|
||||
|
||||
if opt.tcp and not connect_to_session(sock, opt.session):
|
||||
|
|
|
@ -34,16 +34,12 @@ setup(
|
|||
version="@PACKAGE_VERSION@",
|
||||
packages=find_packages(),
|
||||
install_requires=[
|
||||
"configparser",
|
||||
"future",
|
||||
"fabric",
|
||||
"grpcio",
|
||||
"invoke",
|
||||
"lxml",
|
||||
"protobuf",
|
||||
],
|
||||
extra_require={
|
||||
":python_version<'3.2'": ["futures"],
|
||||
":python_version<'3.4'": ["enum34"],
|
||||
},
|
||||
tests_require=[
|
||||
"pytest",
|
||||
"mock",
|
||||
|
|
|
@ -58,7 +58,6 @@ class CoreServerTest(object):
|
|||
self.request_handler = CoreHandler(request_mock, "", self.server)
|
||||
self.request_handler.session = self.session
|
||||
self.request_handler.add_session_handlers()
|
||||
self.session.broker.session_clients.append(self.request_handler)
|
||||
|
||||
# have broker handle a configuration state change
|
||||
self.session.set_state(EventTypes.DEFINITION_STATE)
|
||||
|
@ -68,11 +67,7 @@ class CoreServerTest(object):
|
|||
self.request_handler.handle_message(message)
|
||||
|
||||
# add broker server for distributed core
|
||||
distributed = "%s:%s:%s" % (
|
||||
self.distributed_server,
|
||||
distributed_address,
|
||||
self.port,
|
||||
)
|
||||
distributed = f"{self.distributed_server}:{distributed_address}:{self.port}"
|
||||
message = CoreConfMessage.create(
|
||||
0,
|
||||
[
|
||||
|
|
|
@ -206,7 +206,7 @@ class TestDistributed:
|
|||
|
||||
# test a ping command
|
||||
node_one = cored.session.get_node(1)
|
||||
message = command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
message = command_message(node_one, f"ping -c 5 {ip4_address}")
|
||||
cored.request_handler.dispatch_replies = validate_response
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
|
@ -259,7 +259,7 @@ class TestDistributed:
|
|||
|
||||
# test a ping command
|
||||
node_one = cored.session.get_node(1)
|
||||
message = command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
message = command_message(node_one, f"ping -c 5 {ip4_address}")
|
||||
cored.request_handler.dispatch_replies = validate_response
|
||||
cored.request_handler.handle_message(message)
|
||||
|
||||
|
@ -307,7 +307,7 @@ class TestDistributed:
|
|||
|
||||
# test a ping command
|
||||
node_one = cored.session.get_node(1)
|
||||
message = command_message(node_one, "ping -c 5 %s" % ip4_address)
|
||||
message = command_message(node_one, f"ping -c 5 {ip4_address}")
|
||||
cored.request_handler.dispatch_replies = validate_response
|
||||
cored.request_handler.handle_message(message)
|
||||
cored.request_handler.handle_message(message)
|
||||
|
|
|
@ -6,13 +6,13 @@ from xml.etree import ElementTree
|
|||
|
||||
import pytest
|
||||
|
||||
from core import CoreError
|
||||
from core.emane.bypass import EmaneBypassModel
|
||||
from core.emane.commeffect import EmaneCommEffectModel
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emane.rfpipe import EmaneRfPipeModel
|
||||
from core.emane.tdma import EmaneTdmaModel
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
|
||||
_EMANE_MODELS = [
|
||||
EmaneIeee80211abgModel,
|
||||
|
@ -26,7 +26,12 @@ _DIR = os.path.dirname(os.path.abspath(__file__))
|
|||
|
||||
def ping(from_node, to_node, ip_prefixes, count=3):
|
||||
address = ip_prefixes.ip4_address(to_node)
|
||||
return from_node.cmd(["ping", "-c", str(count), address])
|
||||
try:
|
||||
from_node.cmd(f"ping -c {count} {address}")
|
||||
status = 0
|
||||
except CoreCommandError as e:
|
||||
status = e.returncode
|
||||
return status
|
||||
|
||||
|
||||
class TestEmane:
|
||||
|
|
|
@ -3,43 +3,28 @@ Unit tests for testing basic CORE networks.
|
|||
"""
|
||||
|
||||
import os
|
||||
import stat
|
||||
import subprocess
|
||||
import threading
|
||||
|
||||
import pytest
|
||||
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.emulator.enumerations import MessageFlags, NodeTypes
|
||||
from core.errors import CoreCommandError
|
||||
from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility
|
||||
from core.nodes.client import VnodeClient
|
||||
|
||||
_PATH = os.path.abspath(os.path.dirname(__file__))
|
||||
_MOBILITY_FILE = os.path.join(_PATH, "mobility.scen")
|
||||
_WIRED = [NodeTypes.PEER_TO_PEER, NodeTypes.HUB, NodeTypes.SWITCH]
|
||||
|
||||
|
||||
def createclients(sessiondir, clientcls=VnodeClient, cmdchnlfilterfunc=None):
|
||||
"""
|
||||
Create clients
|
||||
|
||||
:param str sessiondir: session directory to create clients
|
||||
:param class clientcls: class to create clients from
|
||||
:param func cmdchnlfilterfunc: command channel filter function
|
||||
:return: list of created clients
|
||||
:rtype: list
|
||||
"""
|
||||
direntries = map(lambda x: os.path.join(sessiondir, x), os.listdir(sessiondir))
|
||||
cmdchnls = list(filter(lambda x: stat.S_ISSOCK(os.stat(x).st_mode), direntries))
|
||||
if cmdchnlfilterfunc:
|
||||
cmdchnls = list(filter(cmdchnlfilterfunc, cmdchnls))
|
||||
cmdchnls.sort()
|
||||
return map(lambda x: clientcls(os.path.basename(x), x), cmdchnls)
|
||||
|
||||
|
||||
def ping(from_node, to_node, ip_prefixes):
|
||||
address = ip_prefixes.ip4_address(to_node)
|
||||
return from_node.cmd(["ping", "-c", "3", address])
|
||||
try:
|
||||
from_node.cmd(f"ping -c 3 {address}")
|
||||
status = 0
|
||||
except CoreCommandError as e:
|
||||
status = e.returncode
|
||||
return status
|
||||
|
||||
|
||||
class TestCore:
|
||||
|
@ -101,34 +86,8 @@ class TestCore:
|
|||
# check we are connected
|
||||
assert client.connected()
|
||||
|
||||
# check various command using vcmd module
|
||||
command = ["ls"]
|
||||
assert not client.cmd(command)
|
||||
status, output = client.cmd_output(command)
|
||||
assert not status
|
||||
p, stdin, stdout, stderr = client.popen(command)
|
||||
assert not p.wait()
|
||||
assert not client.icmd(command)
|
||||
assert not client.redircmd(
|
||||
subprocess.PIPE, subprocess.PIPE, subprocess.PIPE, command
|
||||
)
|
||||
assert not client.shcmd(command[0])
|
||||
|
||||
# check various command using command line
|
||||
assert not client.cmd(command)
|
||||
status, output = client.cmd_output(command)
|
||||
assert not status
|
||||
p, stdin, stdout, stderr = client.popen(command)
|
||||
assert not p.wait()
|
||||
assert not client.icmd(command)
|
||||
assert not client.shcmd(command[0])
|
||||
|
||||
# check module methods
|
||||
assert createclients(session.session_dir)
|
||||
|
||||
# check convenience methods for interface information
|
||||
assert client.getaddr("eth0")
|
||||
assert client.netifstats()
|
||||
# validate command
|
||||
assert client.check_cmd("echo hello") == "hello"
|
||||
|
||||
def test_netif(self, session, ip_prefixes):
|
||||
"""
|
||||
|
|
|
@ -1,11 +1,9 @@
|
|||
import time
|
||||
from builtins import int
|
||||
from queue import Queue
|
||||
|
||||
import grpc
|
||||
import pytest
|
||||
|
||||
from core import CoreError
|
||||
from core.api.grpc import core_pb2
|
||||
from core.api.grpc.client import CoreGrpcClient
|
||||
from core.config import ConfigShim
|
||||
|
@ -18,6 +16,7 @@ from core.emulator.enumerations import (
|
|||
ExceptionLevels,
|
||||
NodeTypes,
|
||||
)
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import BasicRangeModel, Ns2ScriptedMobility
|
||||
|
||||
|
||||
|
@ -207,8 +206,7 @@ class TestGrpc:
|
|||
# then
|
||||
assert response.node.id == node.id
|
||||
|
||||
@pytest.mark.parametrize("node_id, expected", [(1, True), (2, False)])
|
||||
def test_edit_node(self, grpc_server, node_id, expected):
|
||||
def test_edit_node(self, grpc_server):
|
||||
# given
|
||||
client = CoreGrpcClient()
|
||||
session = grpc_server.coreemu.create_session()
|
||||
|
@ -218,13 +216,12 @@ class TestGrpc:
|
|||
x, y = 10, 10
|
||||
with client.context_connect():
|
||||
position = core_pb2.Position(x=x, y=y)
|
||||
response = client.edit_node(session.id, node_id, position)
|
||||
response = client.edit_node(session.id, node.id, position)
|
||||
|
||||
# then
|
||||
assert response.result is expected
|
||||
if expected is True:
|
||||
assert node.position.x == x
|
||||
assert node.position.y == y
|
||||
assert response.result is True
|
||||
assert node.position.x == x
|
||||
assert node.position.y == y
|
||||
|
||||
@pytest.mark.parametrize("node_id, expected", [(1, True), (2, False)])
|
||||
def test_delete_node(self, grpc_server, node_id, expected):
|
||||
|
@ -254,7 +251,7 @@ class TestGrpc:
|
|||
output = "hello world"
|
||||
|
||||
# then
|
||||
command = "echo %s" % output
|
||||
command = f"echo {output}"
|
||||
with client.context_connect():
|
||||
response = client.node_command(session.id, node.id, command)
|
||||
|
||||
|
@ -864,7 +861,7 @@ class TestGrpc:
|
|||
client.events(session.id, handle_event)
|
||||
time.sleep(0.1)
|
||||
event = EventData(
|
||||
event_type=EventTypes.RUNTIME_STATE.value, time="%s" % time.time()
|
||||
event_type=EventTypes.RUNTIME_STATE.value, time=str(time.time())
|
||||
)
|
||||
session.broadcast_event(event)
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import time
|
|||
import mock
|
||||
import pytest
|
||||
|
||||
from core import CoreError
|
||||
from core.api.tlv import coreapi
|
||||
from core.emane.ieee80211abg import EmaneIeee80211abgModel
|
||||
from core.emulator.enumerations import (
|
||||
|
@ -24,12 +23,13 @@ from core.emulator.enumerations import (
|
|||
RegisterTlvs,
|
||||
SessionTlvs,
|
||||
)
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import BasicRangeModel
|
||||
from core.nodes.ipaddress import Ipv4Prefix
|
||||
|
||||
|
||||
def dict_to_str(values):
|
||||
return "|".join("%s=%s" % (x, values[x]) for x in values)
|
||||
return "|".join(f"{x}={values[x]}" for x in values)
|
||||
|
||||
|
||||
class TestGui:
|
||||
|
@ -383,7 +383,7 @@ class TestGui:
|
|||
message = coreapi.CoreFileMessage.create(
|
||||
MessageFlags.ADD.value,
|
||||
[
|
||||
(FileTlvs.TYPE, "hook:%s" % state),
|
||||
(FileTlvs.TYPE, f"hook:{state}"),
|
||||
(FileTlvs.NAME, file_name),
|
||||
(FileTlvs.DATA, file_data),
|
||||
],
|
||||
|
@ -406,7 +406,7 @@ class TestGui:
|
|||
MessageFlags.ADD.value,
|
||||
[
|
||||
(FileTlvs.NODE, node.id),
|
||||
(FileTlvs.TYPE, "service:%s" % service),
|
||||
(FileTlvs.TYPE, f"service:{service}"),
|
||||
(FileTlvs.NAME, file_name),
|
||||
(FileTlvs.DATA, file_data),
|
||||
],
|
||||
|
@ -760,16 +760,14 @@ class TestGui:
|
|||
[
|
||||
(ConfigTlvs.OBJECT, "broker"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.VALUES, "%s:%s:%s" % (server, host, port)),
|
||||
(ConfigTlvs.VALUES, f"{server}:{host}:{port}"),
|
||||
],
|
||||
)
|
||||
coreserver.session.broker.addserver = mock.MagicMock()
|
||||
coreserver.session.broker.setupserver = mock.MagicMock()
|
||||
coreserver.session.distributed.add_server = mock.MagicMock()
|
||||
|
||||
coreserver.request_handler.handle_message(message)
|
||||
|
||||
coreserver.session.broker.addserver.assert_called_once_with(server, host, port)
|
||||
coreserver.session.broker.setupserver.assert_called_once_with(server)
|
||||
coreserver.session.distributed.add_server.assert_called_once_with(server, host)
|
||||
|
||||
def test_config_services_request_all(self, coreserver):
|
||||
message = coreapi.CoreConfMessage.create(
|
||||
|
@ -846,7 +844,7 @@ class TestGui:
|
|||
(ConfigTlvs.NODE, node.id),
|
||||
(ConfigTlvs.OBJECT, "services"),
|
||||
(ConfigTlvs.TYPE, ConfigFlags.UPDATE.value),
|
||||
(ConfigTlvs.OPAQUE, "service:%s" % service),
|
||||
(ConfigTlvs.OPAQUE, f"service:{service}"),
|
||||
(ConfigTlvs.VALUES, dict_to_str(values)),
|
||||
],
|
||||
)
|
||||
|
|
|
@ -3,9 +3,10 @@ import time
|
|||
|
||||
import pytest
|
||||
|
||||
from core import CoreError, utils
|
||||
from core import utils
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.errors import CoreError
|
||||
|
||||
MODELS = ["router", "host", "PC", "mdr"]
|
||||
|
||||
|
@ -29,7 +30,7 @@ class TestNodes:
|
|||
assert os.path.exists(node.nodedir)
|
||||
assert node.alive()
|
||||
assert node.up
|
||||
assert node.check_cmd(["ip", "addr", "show", "lo"])
|
||||
assert node.cmd("ip address show lo")
|
||||
|
||||
def test_node_update(self, session):
|
||||
# given
|
||||
|
@ -66,4 +67,4 @@ class TestNodes:
|
|||
# then
|
||||
assert node
|
||||
assert node.up
|
||||
assert utils.check_cmd(["brctl", "show", node.brname])
|
||||
assert utils.cmd(f"brctl show {node.brname}")
|
||||
|
|
|
@ -2,9 +2,9 @@ from xml.etree import ElementTree
|
|||
|
||||
import pytest
|
||||
|
||||
from core import CoreError
|
||||
from core.emulator.emudata import LinkOptions, NodeOptions
|
||||
from core.emulator.enumerations import NodeTypes
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import BasicRangeModel
|
||||
from core.services.utility import SshService
|
||||
|
||||
|
|
|
@ -9,39 +9,17 @@ A large emulation scenario can be deployed on multiple emulation servers and
|
|||
controlled by a single GUI. The GUI, representing the entire topology, can be
|
||||
run on one of the emulation servers or on a separate machine.
|
||||
|
||||
Each machine that will act as an emulation server would ideally have the
|
||||
same version of CORE installed. It is not important to have the GUI component
|
||||
but the CORE Python daemon **core-daemon** needs to be installed.
|
||||
|
||||
**NOTE: The server that the GUI connects with is referred to as
|
||||
the master server.**
|
||||
Each machine that will act as an emulation will require the installation of a distributed CORE package and
|
||||
some configuration to allow SSH as root.
|
||||
|
||||
## Configuring Listen Address
|
||||
## Configuring SSH
|
||||
|
||||
First we need to configure the **core-daemon** on all servers to listen on an
|
||||
interface over the network. The simplest way would be updating the core
|
||||
configuration file to listen on all interfaces. Alternatively, configure it to
|
||||
listen to the specific interface you desire by supplying the correct address.
|
||||
Distributed CORE works using the python fabric library to run commands on remote servers over SSH.
|
||||
|
||||
The **listenaddr** configuration should be set to the address of the interface
|
||||
that should receive CORE API control commands from the other servers;
|
||||
setting **listenaddr = 0.0.0.0** causes the Python daemon to listen on all
|
||||
interfaces. CORE uses TCP port **4038** by default to communicate from the
|
||||
controlling machine (with GUI) to the emulation servers. Make sure that
|
||||
firewall rules are configured as necessary to allow this traffic.
|
||||
### Remote GUI Terminals
|
||||
|
||||
```shell
|
||||
# open configuration file
|
||||
vi /etc/core/core.conf
|
||||
|
||||
# within core.conf
|
||||
[core-daemon]
|
||||
listenaddr = 0.0.0.0
|
||||
```
|
||||
|
||||
## Enabling Remote SSH Shells
|
||||
|
||||
### Update GUI Terminal Program
|
||||
You need to have the same user defined on each server, since the user used
|
||||
for these remote shells is the same user that is running the CORE GUI.
|
||||
|
||||
**Edit -> Preferences... -> Terminal program:**
|
||||
|
||||
|
@ -54,31 +32,58 @@ May need to install xterm if, not already installed.
|
|||
sudo apt install xterm
|
||||
```
|
||||
|
||||
### Setup SSH
|
||||
### Distributed Server SSH Configuration
|
||||
|
||||
In order to easily open shells on the emulation servers, the servers should be
|
||||
running an SSH server, and public key login should be enabled. This is
|
||||
accomplished by generating an SSH key for your user on all servers being used
|
||||
for distributed emulation, if you do not already have one. Then copying your
|
||||
master server public key to the authorized_keys file on all other servers that
|
||||
will be used to help drive the distributed emulation. When double-clicking on a
|
||||
node during runtime, instead of opening a local shell, the GUI will attempt to
|
||||
SSH to the emulation server to run an interactive shell.
|
||||
First the distributed servers must be configured to allow passwordless root login over SSH.
|
||||
|
||||
You need to have the same user defined on each server, since the user used
|
||||
for these remote shells is the same user that is running the CORE GUI.
|
||||
|
||||
```shell
|
||||
On distributed server:
|
||||
```shelll
|
||||
# install openssh-server
|
||||
sudo apt install openssh-server
|
||||
|
||||
# generate ssh if needed
|
||||
ssh-keygen -o -t rsa -b 4096
|
||||
# open sshd config
|
||||
vi /etc/ssh/sshd_config
|
||||
|
||||
# verify these configurations in file
|
||||
PermitRootLogin yes
|
||||
PasswordAuthentication yes
|
||||
|
||||
# if desired add/modify the following line to allow SSH to
|
||||
# accept all env variables
|
||||
AcceptEnv *
|
||||
|
||||
# restart sshd
|
||||
sudo systemctl restart sshd
|
||||
```
|
||||
|
||||
On master server:
|
||||
```shell
|
||||
# install package if needed
|
||||
sudo apt install openssh-client
|
||||
|
||||
# generate ssh key if needed
|
||||
ssh-keygen -o -t rsa -b 4096 -f ~/.ssh/core
|
||||
|
||||
# copy public key to authorized_keys file
|
||||
ssh-copy-id user@server
|
||||
# or
|
||||
scp ~/.ssh/id_rsa.pub username@server:~/.ssh/authorized_keys
|
||||
ssh-copy-id -i ~/.ssh/core root@server
|
||||
|
||||
# configure fabric to use the core ssh key
|
||||
sudo vi /etc/fabric.yml
|
||||
|
||||
# set configuration
|
||||
connect_kwargs: {"key_filename": "/home/user/.ssh/core"}
|
||||
```
|
||||
|
||||
On distributed server:
|
||||
```shell
|
||||
# open sshd config
|
||||
vi /etc/ssh/sshd_config
|
||||
|
||||
# change configuration for root login to without password
|
||||
PermitRootLogin without-password
|
||||
|
||||
# restart sshd
|
||||
sudo systemctl restart sshd
|
||||
```
|
||||
|
||||
## Add Emulation Servers in GUI
|
||||
|
@ -155,27 +160,16 @@ The names before the addresses need to match the servers configured in
|
|||
controlnet = core1:172.16.1.0/24 core2:172.16.2.0/24 core3:172.16.3.0/24 core4:172.16.4.0/24 core5:172.16.5.0/24
|
||||
```
|
||||
|
||||
EMANE appears to require location events for nodes to be sync'ed across
|
||||
all EMANE instances for nodes to find each other. Using an EMANE eel file
|
||||
for your scenario can help clear this up, which might be desired anyway.
|
||||
|
||||
* https://github.com/adjacentlink/emane/wiki/EEL-Generator
|
||||
|
||||
You can also move nodes within the GUI to help trigger location events from
|
||||
CORE when the **core.conf** settings below is used. Assuming the nodes
|
||||
did not find each other by default and you are not using an eel file.
|
||||
|
||||
```shell
|
||||
emane_event_generate = True
|
||||
```
|
||||
|
||||
## Distributed Checklist
|
||||
|
||||
1. Install the same version of the CORE daemon on all servers.
|
||||
1. Set **listenaddr** configuration in all of the server's core.conf files,
|
||||
then start (or restart) the daemon.
|
||||
1. Install CORE on master server
|
||||
1. Install distributed CORE package on all servers needed
|
||||
1. Installed and configure public-key SSH access on all servers (if you want to use
|
||||
double-click shells or Widgets.)
|
||||
double-click shells or Widgets.) for both the GUI user (for terminals) and root for running CORE commands
|
||||
1. Choose the servers that participate in distributed emulation.
|
||||
1. Assign nodes to desired servers, empty for master server.
|
||||
1. Press the **Start** button to launch the distributed emulation.
|
||||
|
|
53
docs/grpc.md
53
docs/grpc.md
|
@ -1,60 +1,11 @@
|
|||
# Using the gRPC API
|
||||
|
||||
By default the gRPC API is currently not turned on by default. There are a couple ways that this can be enabled
|
||||
to use.
|
||||
gRPC is the main API for interfacing with CORE.
|
||||
|
||||
## Enabling gRPC
|
||||
|
||||
### HTTP Proxy
|
||||
## HTTP Proxy
|
||||
|
||||
Since gRPC is HTTP2 based, proxy configurations can cause issue. Clear out your proxy when running if needed.
|
||||
|
||||
### Daemon Options
|
||||
|
||||
The gRPC API is enabled through options provided to the **core-daemon**.
|
||||
|
||||
```shell
|
||||
usage: core-daemon [-h] [-f CONFIGFILE] [-p PORT] [-n NUMTHREADS] [--ovs]
|
||||
[--grpc] [--grpc-port GRPCPORT]
|
||||
[--grpc-address GRPCADDRESS]
|
||||
|
||||
CORE daemon v.5.3.0 instantiates Linux network namespace nodes.
|
||||
|
||||
optional arguments:
|
||||
-h, --help show this help message and exit
|
||||
-f CONFIGFILE, --configfile CONFIGFILE
|
||||
read config from specified file; default =
|
||||
/etc/core/core.conf
|
||||
-p PORT, --port PORT port number to listen on; default = 4038
|
||||
-n NUMTHREADS, --numthreads NUMTHREADS
|
||||
number of server threads; default = 1
|
||||
--ovs enable experimental ovs mode, default is false
|
||||
--grpc enable grpc api, default is false
|
||||
--grpc-port GRPCPORT grpc port to listen on; default 50051
|
||||
--grpc-address GRPCADDRESS
|
||||
grpc address to listen on; default localhost
|
||||
```
|
||||
|
||||
### Enabling in Service Files
|
||||
|
||||
Modify service files to append the --grpc options as desired.
|
||||
|
||||
For sysv services /etc/init.d/core-daemon
|
||||
```shell
|
||||
CMD="PYTHONPATH=/usr/lib/python3.6/site-packages python3 /usr/bin/$NAME --grpc"
|
||||
```
|
||||
|
||||
For systemd service /lib/systemd/system/core-daemon.service
|
||||
```shell
|
||||
ExecStart=@PYTHON@ @bindir@/core-daemon --grpc
|
||||
```
|
||||
|
||||
### Enabling from Command Line
|
||||
|
||||
```shell
|
||||
sudo core-daemon --grpc
|
||||
```
|
||||
|
||||
## Python Client
|
||||
|
||||
A python client wrapper is provided at **core.api.grpc.client.CoreGrpcClient**.
|
||||
|
|
|
@ -34,7 +34,7 @@ Install Path | Description
|
|||
/usr/bin/core-daemon|Daemon startup command
|
||||
/usr/bin/{core-cleanup, coresendmsg, core-manage}|Misc. helper commands/scripts
|
||||
/usr/lib/core|GUI files
|
||||
/usr/lib/python{2.7,3}/dist-packages/core|Python modules for daemon/scripts
|
||||
/usr/lib/python{3.6+}/dist-packages/core|Python modules for daemon/scripts
|
||||
/etc/core/|Daemon and log configuration files
|
||||
~/.core/|User-specific GUI preferences and scenario files
|
||||
/usr/share/core/|Example scripts and scenarios
|
||||
|
@ -42,6 +42,16 @@ Install Path | Description
|
|||
/etc/init.d/core-daemon|SysV startup script for daemon
|
||||
/etc/systemd/system/core-daemon.service|Systemd startup script for daemon
|
||||
|
||||
# Pre-Req Installing Python
|
||||
|
||||
You may already have these installed, and can ignore this step if so, but if
|
||||
needed you can run the following to install python and pip.
|
||||
|
||||
```shell
|
||||
sudo apt install python3
|
||||
sudo apt install python3-pip
|
||||
```
|
||||
|
||||
# Pre-Req Python Requirements
|
||||
|
||||
The newly added gRPC API which depends on python library grpcio is not commonly found within system repos.
|
||||
|
@ -49,23 +59,9 @@ To account for this it would be recommended to install the python dependencies u
|
|||
the latest [CORE Release](https://github.com/coreemu/core/releases).
|
||||
|
||||
```shell
|
||||
# for python 2
|
||||
sudo python -m pip install -r requirements.txt
|
||||
# for python 3
|
||||
sudo python3 -m pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Ubuntu 19.04
|
||||
|
||||
Ubuntu 19.04 can provide all the packages needed at the system level and can be installed as follows:
|
||||
|
||||
```shell
|
||||
# python 2
|
||||
sudo apt install python-configparser python-enum34 python-future python-grpcio python-lxml
|
||||
# python 3
|
||||
sudo apt install python3-configparser python3-enum34 python3-future python3-grpcio python3-lxml
|
||||
```
|
||||
|
||||
# Pre-Req Installing OSPF MDR
|
||||
|
||||
Virtual networks generally require some form of routing in order to work (e.g. to automatically populate routing
|
||||
|
@ -91,7 +87,7 @@ Requires building from source, from the latest nightly snapshot.
|
|||
|
||||
```shell
|
||||
# packages needed beyond what's normally required to build core on ubuntu
|
||||
sudo apt install libtool libreadline-dev
|
||||
sudo apt install libtool libreadline-dev autoconf
|
||||
|
||||
wget https://downloads.pf.itd.nrl.navy.mil/ospf-manet/nightly_snapshots/quagga-svnsnap.tgz
|
||||
tar xzf quagga-svnsnap.tgz
|
||||
|
@ -119,7 +115,7 @@ this is usually a sign that you have to run ```sudo ldconfig```` to refresh the
|
|||
# Installing from Packages
|
||||
|
||||
The easiest way to install CORE is using the pre-built packages. The package managers on Ubuntu or Fedora/CentOS
|
||||
will help in automatically installing most dependencies for you.
|
||||
will help in automatically installing most dependencies, except for the python ones described previously.
|
||||
|
||||
You can obtain the CORE packages from [CORE Releases](https://github.com/coreemu/core/releases).
|
||||
|
||||
|
@ -128,10 +124,9 @@ You can obtain the CORE packages from [CORE Releases](https://github.com/coreemu
|
|||
Ubuntu package defaults to using systemd for running as a service.
|
||||
|
||||
```shell
|
||||
# python2
|
||||
sudo apt install ./core_python_$VERSION_amd64.deb
|
||||
# python3
|
||||
sudo apt install ./core_python3_$VERSION_amd64.deb
|
||||
# $PYTHON and $VERSION represent the python and CORE
|
||||
# versions the package was built for
|
||||
sudo apt install ./core_$PYTHON_$VERSION_amd64.deb
|
||||
```
|
||||
|
||||
Run the CORE GUI as a normal user:
|
||||
|
@ -149,9 +144,6 @@ Messages will print out on the console about connecting to the CORE daemon.
|
|||
on CentOS <= 6, or build from source otherwise**
|
||||
|
||||
```shell
|
||||
# python2
|
||||
yum install ./core_python_$VERSION_x86_64.rpm
|
||||
# python3
|
||||
yum install ./core_python3_$VERSION_x86_64.rpm
|
||||
```
|
||||
|
||||
|
@ -219,10 +211,7 @@ You can obtain the CORE source from the [CORE GitHub](https://github.com/coreemu
|
|||
Python module grpcio-tools is currently needed to generate code from the CORE protobuf file during the build.
|
||||
|
||||
```shell
|
||||
# python2
|
||||
pip2 install grpcio-tools
|
||||
# python3
|
||||
pip3 install grpcio-tools
|
||||
python3 -m pip install grpcio-tools
|
||||
```
|
||||
|
||||
## Distro Requirements
|
||||
|
@ -230,27 +219,26 @@ pip3 install grpcio-tools
|
|||
### Ubuntu 18.04 Requirements
|
||||
|
||||
```shell
|
||||
sudo apt install automake pkg-config gcc libev-dev bridge-utils ebtables python-dev python-setuptools tk libtk-img ethtool
|
||||
sudo apt install automake pkg-config gcc libev-dev bridge-utils ebtables python3-dev python3-setuptools tk libtk-img ethtool
|
||||
```
|
||||
|
||||
### Ubuntu 16.04 Requirements
|
||||
|
||||
```shell
|
||||
sudo apt-get install automake bridge-utils ebtables python-dev libev-dev python-setuptools libtk-img ethtool
|
||||
sudo apt-get install automake bridge-utils ebtables python3-dev libev-dev python3-setuptools libtk-img ethtool
|
||||
```
|
||||
|
||||
### CentOS 7 with Gnome Desktop Requirements
|
||||
|
||||
```shell
|
||||
sudo yum -y install automake gcc python-devel libev-devel tk ethtool
|
||||
sudo yum -y install automake gcc python3-devel python3-devel libev-devel tk ethtool
|
||||
```
|
||||
|
||||
## Build and Install
|
||||
|
||||
```shell
|
||||
./bootstrap.sh
|
||||
# $VERSION should be path to python2/3
|
||||
PYTHON=$VERSION ./configure
|
||||
PYTHON=python3 ./configure
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
@ -260,16 +248,11 @@ sudo make install
|
|||
Building documentation requires python-sphinx not noted above.
|
||||
|
||||
```shell
|
||||
# install python2 sphinx
|
||||
sudo apt install python-sphinx
|
||||
sudo yum install python-sphinx
|
||||
# install python3 sphinx
|
||||
sudo apt install python3-sphinx
|
||||
sudo yum install python3-sphinx
|
||||
|
||||
./bootstrap.sh
|
||||
# $VERSION should be path to python2/3
|
||||
PYTHON=$VERSION ./configure
|
||||
PYTHON=python3 ./configure
|
||||
make doc
|
||||
```
|
||||
|
||||
|
@ -282,10 +265,7 @@ Build package commands, DESTDIR is used to make install into and then for packag
|
|||
|
||||
```shell
|
||||
./bootstrap.sh
|
||||
# for python2
|
||||
PYTHON=python2 ./configure
|
||||
# for python3
|
||||
PYTHON=python3 ./configure --enable-python3
|
||||
PYTHON=python3 ./configure
|
||||
make
|
||||
mkdir /tmp/core-build
|
||||
make fpm DESTDIR=/tmp/core-build
|
||||
|
|
|
@ -19,7 +19,7 @@ array set g_node_types_default {
|
|||
4 {mdr mdr.gif mdr.gif {zebra OSPFv3MDR IPForward} \
|
||||
netns {built-in type for wireless routers}}
|
||||
5 {prouter router_green.gif router_green.gif \
|
||||
{zebra OSPFv2 OSPFv3 IPForward} \
|
||||
{} \
|
||||
physical {built-in type for physical nodes}}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,6 @@
|
|||
|
||||
if WANT_PYTHON
|
||||
|
||||
PYTHONLIBDIR=$(subst site-packages,dist-packages,$(pythondir))
|
||||
|
||||
SETUPPY = setup.py
|
||||
SETUPPYFLAGS = -v
|
||||
|
||||
|
@ -24,15 +22,15 @@ install-exec-hook:
|
|||
$(PYTHON) $(SETUPPY) $(SETUPPYFLAGS) install \
|
||||
--root=/$(DESTDIR) \
|
||||
--prefix=$(prefix) \
|
||||
--install-lib=$(PYTHONLIBDIR) \
|
||||
--install-lib=$(pythondir) \
|
||||
--single-version-externally-managed \
|
||||
--no-compile
|
||||
|
||||
# Python package uninstall
|
||||
uninstall-hook:
|
||||
-rm -rf core_ns3.egg-info
|
||||
-rm -rf $(DESTDIR)/$(PYTHONLIBDIR)/core_ns3-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info
|
||||
-rm -rf $(DESTDIR)/$(PYTHONLIBDIR)/corens3
|
||||
-rm -rf $(DESTDIR)/$(pythondir)/core_ns3-$(PACKAGE_VERSION)-py$(PYTHON_VERSION).egg-info
|
||||
-rm -rf $(DESTDIR)/$(pythondir)/corens3
|
||||
-rm -rf $(DESTDIR)/$(datadir)/corens3
|
||||
|
||||
# Python package cleanup
|
||||
|
|
|
@ -117,8 +117,10 @@ class CoreNs3Net(CoreNetworkBase):
|
|||
# icon used
|
||||
type = "wlan"
|
||||
|
||||
def __init__(self, session, _id=None, name=None, start=True, policy=None):
|
||||
CoreNetworkBase.__init__(self, session, _id, name)
|
||||
def __init__(
|
||||
self, session, _id=None, name=None, start=True, server=None, policy=None
|
||||
):
|
||||
CoreNetworkBase.__init__(self, session, _id, name, start, server)
|
||||
self.tapbridge = ns.tap_bridge.TapBridgeHelper()
|
||||
self._ns3devs = {}
|
||||
self._tapdevs = {}
|
||||
|
|
Loading…
Reference in a new issue