Merge branch 'develop' into coredev-painttool

This commit is contained in:
Huy Pham 2019-12-19 16:16:18 -08:00
commit 9a506fc35f
9 changed files with 114 additions and 83 deletions

View file

@ -5,7 +5,7 @@ verify_ssl = true
[scripts]
core = "python scripts/core-daemon -f data/core.conf -l data/logging.conf"
coretk = "python core/gui/app.py"
coretk = "python scripts/coretk-gui"
test = "pytest -v tests"
test-mock = "pytest -v --mock tests"
test-emane = "pytest -v tests/emane"

View file

@ -1,4 +1,3 @@
import logging
import tkinter as tk
from tkinter import ttk
@ -96,12 +95,3 @@ class Application(tk.Frame):
def close(self):
self.master.destroy()
if __name__ == "__main__":
log_format = "%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format)
Images.load_all()
appconfig.check_directory()
app = Application()
app.mainloop()

View file

@ -357,7 +357,7 @@ class CanvasGraph(tk.Canvas):
return
# edge dst must be a node
logging.debug(f"current selected: {self.selected}")
logging.debug("current selected: %s", self.selected)
dst_node = self.nodes.get(self.selected)
if not dst_node:
edge.delete()
@ -634,7 +634,7 @@ class CanvasGraph(tk.Canvas):
selected = self.get_selected(event)
canvas_node = self.nodes.get(selected)
if canvas_node:
logging.debug(f"node context: {selected}")
logging.debug("node context: %s", selected)
self.context = canvas_node.create_context()
self.context.post(event.x_root, event.y_root)
else:

View file

@ -4,13 +4,13 @@ Information on how Docker can be leveraged and included to create
nodes based on Docker containers and images to interface with
existing CORE nodes, when needed.
# Installation
## Installation
```shell
sudo apt install docker.io
```
# Configuration
## Configuration
Custom configuration required to avoid iptable rules being added and removing
the need for the default docker network, since core will be orchestrating
@ -19,12 +19,28 @@ connections between nodes.
Place the file below in **/etc/docker/**
* daemon.json
# Tools and Versions Tested With
## Group Setup
To use Docker nodes within the python GUI, you will need to make sure the user running the GUI is a member of the
docker group.
```shell
# add group if does not exist
sudo groupadd docker
# add user to group
sudo usermod -aG docker $USER
# to get this change to take effect, log out and back in or run the following
newgrp docker
```
## Tools and Versions Tested With
* Docker version 18.09.5, build e8ff056
* nsenter from util-linux 2.31.1
# Examples
## Examples
This directory provides a few small examples creating Docker nodes
and linking them to themselves or with standard CORE nodes.

View file

@ -4,13 +4,13 @@ Information on how LXD can be leveraged and included to create
nodes based on LXC containers and images to interface with
existing CORE nodes, when needed.
# Installation
## Installation
```shell
sudo snap install lxd
```
# Configuration
## Configuration
Initialize LXD and say no to adding a default bridge.
@ -18,12 +18,28 @@ Initialize LXD and say no to adding a default bridge.
sudo lxd init
```
# Tools and Versions Tested With
## Group Setup
To use LXC nodes within the python GUI, you will need to make sure the user running the GUI is a member of the
lxd group.
```shell
# add group if does not exist
sudo groupadd lxd
# add user to group
sudo usermod -aG lxd $USER
# to get this change to take effect, log out and back in or run the following
newgrp lxd
```
## Tools and Versions Tested With
* LXD 3.14
* nsenter from util-linux 2.31.1
# Examples
## Examples
This directory provides a few small examples creating LXC nodes
using LXD and linking them to themselves or with standard CORE nodes.

14
daemon/scripts/coretk-gui Executable file
View file

@ -0,0 +1,14 @@
#!/usr/bin/env python
import logging
from core.gui import appconfig
from core.gui.app import Application
from core.gui.images import Images
if __name__ == "__main__":
log_format = "%(asctime)s - %(levelname)s - %(module)s:%(funcName)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=log_format)
Images.load_all()
appconfig.check_directory()
app = Application()
app.mainloop()

View file

@ -45,7 +45,6 @@ setup(
],
tests_require=[
"pytest",
"mock",
],
data_files=data_files,
scripts=glob.glob("scripts/*"),

View file

@ -10,7 +10,6 @@ Current development focuses on the Python modules and daemon. Here is a brief de
| Directory | Description |
|---|---|
|corefx|JavaFX based GUI using gRPC API to replace legacy GUI|
|daemon|Python CORE daemon code that handles receiving API calls and creating containers|
|docs|Markdown Documentation currently hosted on GitHub|
|gui|Tcl/Tk GUI|
@ -24,6 +23,13 @@ Current development focuses on the Python modules and daemon. Here is a brief de
Overview for setting up the pipenv environment, building core, installing the GUI and netns, then running
the core-daemon for development.
### Clone CORE Repo
```shell
git clone https://github.com/coreemu/core.git
cd core
```
### Setup Python Environment
To leverage the dev environment you need python 3.6+.
@ -40,14 +46,13 @@ pip3 install pipenv
# setup a virtual environment and install all required development dependencies
python3 -m pipenv install --dev
# setup python variable using pipenv created python
export PYTHON=$(python3 -m pipenv --py)
```
### Setup pre-commit
Install pre-commit hooks to help automate running tool checks against code.
Install pre-commit hooks to help automate running tool checks against code. Once installed every time a commit is made
python utilities will be ran to check validity of code, potentially failing and backing out the commit. This allows
one to review changes being made by tools ro the fix the issue noted. Then add the changes and commit again.
```shell
python3 -m pipenv run pre-commit install
@ -56,11 +61,6 @@ python3 -m pipenv run pre-commit install
### Build CORE
```shell
# clone core
git clone https://github.com/coreemu/core.git
cd core
# build core
./bootstrap.sh
./configure --prefix=/usr
make
@ -89,7 +89,7 @@ EMANE bindings are not available through pip, you will need to build and install
```shell
# after building emane above
# ./autogen.sh && ./configure --prefix=/usr && make
python3 -m pipenv install --skip-lock $EMANEREPO/src/python
python3 -m pipenv install $EMANEREPO/src/python
```
### Running CORE
@ -97,20 +97,16 @@ python3 -m pipenv install --skip-lock $EMANEREPO/src/python
This will run the core-daemon server using the configuration files within the repo.
```shell
python3 -m pipenv run coredev
sudo python3 -m pipenv run core
```
## The CORE API
### Running CORE Python GUI
The CORE API is used between different components of CORE for communication. The GUI communicates with the CORE daemon
using the API. One emulation server communicates with another using the API. The API also allows other systems to
interact with the CORE emulation. The API allows another system to add, remove, or modify nodes and links, and enables
executing commands on the emulated systems. Wireless link parameters are updated on-the-fly based on node positions.
Must be ran after the daemon above or will fail to connect.
CORE listens on a local TCP port for API messages. The other system could be software running locally or another
machine accessible across the network.
The CORE API is currently specified in a separate document, available from the CORE website.
```shell
python3 -m pipenv run coretk
```
## Linux Network Namespace Commands
@ -169,43 +165,3 @@ tc qdisc show
# view the rules that make the wireless LAN work
ebtables -L
```
### Example Command Usage
Below is a transcript of creating two emulated nodes and connecting them together with a wired link:
```shell
# create node 1 namespace container
vnoded -c /tmp/n1.ctl -l /tmp/n1.log -p /tmp/n1.pid
# create a virtual Ethernet (veth) pair, installing one end into node 1
ip link add name n1.0.1 type veth peer name n1.0
ip link set n1.0 netns `cat /tmp/n1.pid`
vcmd -c /tmp/n1.ctl -- ip link set lo up
vcmd -c /tmp/n1.ctl -- ip link set n1.0 name eth0 up
vcmd -c /tmp/n1.ctl -- ip addr add 10.0.0.1/24 dev eth0
# create node 2 namespace container
vnoded -c /tmp/n2.ctl -l /tmp/n2.log -p /tmp/n2.pid
# create a virtual Ethernet (veth) pair, installing one end into node 2
ip link add name n2.0.1 type veth peer name n2.0
ip link set n2.0 netns `cat /tmp/n2.pid`
vcmd -c /tmp/n2.ctl -- ip link set lo up
vcmd -c /tmp/n2.ctl -- ip link set n2.0 name eth0 up
vcmd -c /tmp/n2.ctl -- ip addr add 10.0.0.2/24 eth0
# bridge together nodes 1 and 2 using the other end of each veth pair
brctl addbr b.1.1
brctl setfd b.1.1 0
brctl addif b.1.1 n1.0.1
brctl addif b.1.1 n2.0.1
ip link set n1.0.1 up
ip link set n2.0.1 up
ip link set b.1.1 up
# display connectivity and ping from node 1 to node 2
brctl show
vcmd -c /tmp/n1.ctl -- ping 10.0.0.2
```
The above example script can be found as *twonodes.sh* in the *examples/netns* directory. Use *core-cleanup* to clean
up after the script.

40
install.sh Executable file
View file

@ -0,0 +1,40 @@
#!/bin/bash
# exit on error
set -e
# detect os/ver for install type
os=""
if [[ -f /etc/os-release ]]; then
. /etc/os-release
os=${NAME}
fi
# check install was found
if [[ ${os} == "Ubuntu" ]]; then
# install system dependencies
sudo apt install -y automake pkg-config gcc libev-dev bridge-utils ebtables \
python3.6 python3.6-dev python3-pip python3-tk tk libtk-img ethtool libtool libreadline-dev autoconf
# install python dependencies
sudo python3 -m pip install -r daemon/requirements.txt
# make and install ospf mdr
git clone https://github.com/USNavalResearchLaboratory/ospf-mdr /tmp/ospf-mdr
cd /tmp/ospf-mdr
./bootstrap.sh
./configure --disable-doc --enable-user=root --enable-group=root --with-cflags=-ggdb \
--sysconfdir=/usr/local/etc/quagga --enable-vtysh \
--localstatedir=/var/run/quagga
make -j8
sudo make install
cd -
# build and install core
./bootstrap.sh
./configure
make -j8
sudo make install
else
echo "unknown os ${os} cannot install"
fi