Merge branch 'develop' into ovs
This commit is contained in:
commit
06e145f508
487 changed files with 49691 additions and 30722 deletions
|
@ -1,24 +1,7 @@
|
|||
import json
|
||||
import logging
|
||||
import logging.config
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from core import constants
|
||||
# setup default null handler
|
||||
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
||||
|
||||
# setup logging
|
||||
log_config_path = os.path.join(constants.CORE_CONF_DIR, "logging.conf")
|
||||
with open(log_config_path, "r") as log_config_file:
|
||||
log_config = json.load(log_config_file)
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
logger = logging.getLogger()
|
||||
|
||||
|
||||
class CoreCommandError(subprocess.CalledProcessError):
|
||||
"""
|
||||
Used when encountering internal CORE command errors.
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "Command(%s), Status(%s):\n%s" % (self.cmd, self.returncode, self.output)
|
||||
# disable paramiko logging
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
|
|
|
@ -1,3 +0,0 @@
|
|||
"""
|
||||
Contains code specific to the legacy TCP API for interacting with the TCL based GUI.
|
||||
"""
|
|
@ -1,43 +0,0 @@
|
|||
"""
|
||||
Converts CORE data objects into legacy API messages.
|
||||
"""
|
||||
|
||||
from core import logger
|
||||
from core.api import coreapi
|
||||
from core.enumerations import NodeTlvs
|
||||
from core.misc import structutils
|
||||
|
||||
|
||||
def convert_node(node_data):
|
||||
"""
|
||||
Callback to handle an node broadcast out from a session.
|
||||
|
||||
:param core.data.NodeData node_data: node data to handle
|
||||
:return: packed node message
|
||||
"""
|
||||
logger.debug("converting node data to message: %s", node_data)
|
||||
|
||||
tlv_data = structutils.pack_values(coreapi.CoreNodeTlv, [
|
||||
(NodeTlvs.NUMBER, node_data.id),
|
||||
(NodeTlvs.TYPE, node_data.node_type),
|
||||
(NodeTlvs.NAME, node_data.name),
|
||||
(NodeTlvs.IP_ADDRESS, node_data.ip_address),
|
||||
(NodeTlvs.MAC_ADDRESS, node_data.mac_address),
|
||||
(NodeTlvs.IP6_ADDRESS, node_data.ip6_address),
|
||||
(NodeTlvs.MODEL, node_data.model),
|
||||
(NodeTlvs.EMULATION_ID, node_data.emulation_id),
|
||||
(NodeTlvs.EMULATION_SERVER, node_data.emulation_server),
|
||||
(NodeTlvs.SESSION, node_data.session),
|
||||
(NodeTlvs.X_POSITION, node_data.x_position),
|
||||
(NodeTlvs.Y_POSITION, node_data.y_position),
|
||||
(NodeTlvs.CANVAS, node_data.canvas),
|
||||
(NodeTlvs.NETWORK_ID, node_data.network_id),
|
||||
(NodeTlvs.SERVICES, node_data.services),
|
||||
(NodeTlvs.LATITUDE, node_data.latitude),
|
||||
(NodeTlvs.LONGITUDE, node_data.longitude),
|
||||
(NodeTlvs.ALTITUDE, node_data.altitude),
|
||||
(NodeTlvs.ICON, node_data.icon),
|
||||
(NodeTlvs.OPAQUE, node_data.opaque)
|
||||
])
|
||||
|
||||
return coreapi.CoreNodeMessage.pack(node_data.message_type, tlv_data)
|
1241
daemon/core/api/grpc/client.py
Normal file
1241
daemon/core/api/grpc/client.py
Normal file
File diff suppressed because it is too large
Load diff
210
daemon/core/api/grpc/events.py
Normal file
210
daemon/core/api/grpc/events.py
Normal file
|
@ -0,0 +1,210 @@
|
|||
import logging
|
||||
from queue import Empty, Queue
|
||||
from typing import Iterable
|
||||
|
||||
from core.api.grpc import core_pb2
|
||||
from core.api.grpc.grpcutils import convert_link
|
||||
from core.emulator.data import (
|
||||
ConfigData,
|
||||
EventData,
|
||||
ExceptionData,
|
||||
FileData,
|
||||
LinkData,
|
||||
NodeData,
|
||||
)
|
||||
from core.emulator.session import Session
|
||||
|
||||
|
||||
def handle_node_event(event: NodeData) -> core_pb2.NodeEvent:
|
||||
"""
|
||||
Handle node event when there is a node event
|
||||
|
||||
:param event: node data
|
||||
:return: node event that contains node id, name, model, position, and services
|
||||
"""
|
||||
position = core_pb2.Position(x=event.x_position, y=event.y_position)
|
||||
node_proto = core_pb2.Node(
|
||||
id=event.id,
|
||||
name=event.name,
|
||||
model=event.model,
|
||||
position=position,
|
||||
services=event.services,
|
||||
)
|
||||
return core_pb2.NodeEvent(node=node_proto, source=event.source)
|
||||
|
||||
|
||||
def handle_link_event(event: LinkData) -> core_pb2.LinkEvent:
|
||||
"""
|
||||
Handle link event when there is a link event
|
||||
|
||||
:param event: link data
|
||||
:return: link event that has message type and link information
|
||||
"""
|
||||
link = convert_link(event)
|
||||
return core_pb2.LinkEvent(message_type=event.message_type.value, link=link)
|
||||
|
||||
|
||||
def handle_session_event(event: EventData) -> core_pb2.SessionEvent:
|
||||
"""
|
||||
Handle session event when there is a session event
|
||||
|
||||
:param event: event data
|
||||
:return: session event
|
||||
"""
|
||||
event_time = event.time
|
||||
if event_time is not None:
|
||||
event_time = float(event_time)
|
||||
return core_pb2.SessionEvent(
|
||||
node_id=event.node,
|
||||
event=event.event_type.value,
|
||||
name=event.name,
|
||||
data=event.data,
|
||||
time=event_time,
|
||||
)
|
||||
|
||||
|
||||
def handle_config_event(event: ConfigData) -> core_pb2.ConfigEvent:
|
||||
"""
|
||||
Handle configuration event when there is configuration event
|
||||
|
||||
:param event: configuration data
|
||||
:return: configuration event
|
||||
"""
|
||||
return core_pb2.ConfigEvent(
|
||||
message_type=event.message_type,
|
||||
node_id=event.node,
|
||||
object=event.object,
|
||||
type=event.type,
|
||||
captions=event.captions,
|
||||
bitmap=event.bitmap,
|
||||
data_values=event.data_values,
|
||||
possible_values=event.possible_values,
|
||||
groups=event.groups,
|
||||
interface=event.interface_number,
|
||||
network_id=event.network_id,
|
||||
opaque=event.opaque,
|
||||
data_types=event.data_types,
|
||||
)
|
||||
|
||||
|
||||
def handle_exception_event(event: ExceptionData) -> core_pb2.ExceptionEvent:
|
||||
"""
|
||||
Handle exception event when there is exception event
|
||||
|
||||
:param event: exception data
|
||||
:return: exception event
|
||||
"""
|
||||
return core_pb2.ExceptionEvent(
|
||||
node_id=event.node,
|
||||
level=event.level.value,
|
||||
source=event.source,
|
||||
date=event.date,
|
||||
text=event.text,
|
||||
opaque=event.opaque,
|
||||
)
|
||||
|
||||
|
||||
def handle_file_event(event: FileData) -> core_pb2.FileEvent:
|
||||
"""
|
||||
Handle file event
|
||||
|
||||
:param event: file data
|
||||
:return: file event
|
||||
"""
|
||||
return core_pb2.FileEvent(
|
||||
message_type=event.message_type.value,
|
||||
node_id=event.node,
|
||||
name=event.name,
|
||||
mode=event.mode,
|
||||
number=event.number,
|
||||
type=event.type,
|
||||
source=event.source,
|
||||
data=event.data,
|
||||
compressed_data=event.compressed_data,
|
||||
)
|
||||
|
||||
|
||||
class EventStreamer:
|
||||
"""
|
||||
Processes session events to generate grpc events.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, session: Session, event_types: Iterable[core_pb2.EventType]
|
||||
) -> None:
|
||||
"""
|
||||
Create a EventStreamer instance.
|
||||
|
||||
:param session: session to process events for
|
||||
:param event_types: types of events to process
|
||||
"""
|
||||
self.session = session
|
||||
self.event_types = event_types
|
||||
self.queue = Queue()
|
||||
self.add_handlers()
|
||||
|
||||
def add_handlers(self) -> None:
|
||||
"""
|
||||
Add a session event handler for desired event types.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if core_pb2.EventType.NODE in self.event_types:
|
||||
self.session.node_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.LINK in self.event_types:
|
||||
self.session.link_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.CONFIG in self.event_types:
|
||||
self.session.config_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.FILE in self.event_types:
|
||||
self.session.file_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.EXCEPTION in self.event_types:
|
||||
self.session.exception_handlers.append(self.queue.put)
|
||||
if core_pb2.EventType.SESSION in self.event_types:
|
||||
self.session.event_handlers.append(self.queue.put)
|
||||
|
||||
def process(self) -> core_pb2.Event:
|
||||
"""
|
||||
Process the next event in the queue.
|
||||
|
||||
:return: grpc event, or None when invalid event or queue timeout
|
||||
"""
|
||||
event = core_pb2.Event(session_id=self.session.id)
|
||||
try:
|
||||
data = self.queue.get(timeout=1)
|
||||
if isinstance(data, NodeData):
|
||||
event.node_event.CopyFrom(handle_node_event(data))
|
||||
elif isinstance(data, LinkData):
|
||||
event.link_event.CopyFrom(handle_link_event(data))
|
||||
elif isinstance(data, EventData):
|
||||
event.session_event.CopyFrom(handle_session_event(data))
|
||||
elif isinstance(data, ConfigData):
|
||||
event.config_event.CopyFrom(handle_config_event(data))
|
||||
elif isinstance(data, ExceptionData):
|
||||
event.exception_event.CopyFrom(handle_exception_event(data))
|
||||
elif isinstance(data, FileData):
|
||||
event.file_event.CopyFrom(handle_file_event(data))
|
||||
else:
|
||||
logging.error("unknown event: %s", data)
|
||||
event = None
|
||||
except Empty:
|
||||
event = None
|
||||
return event
|
||||
|
||||
def remove_handlers(self) -> None:
|
||||
"""
|
||||
Remove session event handlers for events being watched.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if core_pb2.EventType.NODE in self.event_types:
|
||||
self.session.node_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.LINK in self.event_types:
|
||||
self.session.link_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.CONFIG in self.event_types:
|
||||
self.session.config_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.FILE in self.event_types:
|
||||
self.session.file_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.EXCEPTION in self.event_types:
|
||||
self.session.exception_handlers.remove(self.queue.put)
|
||||
if core_pb2.EventType.SESSION in self.event_types:
|
||||
self.session.event_handlers.remove(self.queue.put)
|
475
daemon/core/api/grpc/grpcutils.py
Normal file
475
daemon/core/api/grpc/grpcutils.py
Normal file
|
@ -0,0 +1,475 @@
|
|||
import logging
|
||||
import time
|
||||
from typing import Any, Dict, List, Tuple, Type
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import utils
|
||||
from core.api.grpc import common_pb2, core_pb2
|
||||
from core.api.grpc.services_pb2 import NodeServiceData, ServiceConfig
|
||||
from core.config import ConfigurableOptions
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.data import LinkData
|
||||
from core.emulator.emudata import InterfaceData, LinkOptions, NodeOptions
|
||||
from core.emulator.enumerations import LinkTypes, NodeTypes
|
||||
from core.emulator.session import Session
|
||||
from core.nodes.base import NodeBase
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.services.coreservices import CoreService
|
||||
|
||||
WORKERS = 10
|
||||
|
||||
|
||||
def add_node_data(node_proto: core_pb2.Node) -> Tuple[NodeTypes, int, NodeOptions]:
|
||||
"""
|
||||
Convert node protobuf message to data for creating a node.
|
||||
|
||||
:param node_proto: node proto message
|
||||
:return: node type, id, and options
|
||||
"""
|
||||
_id = node_proto.id
|
||||
_type = NodeTypes(node_proto.type)
|
||||
options = NodeOptions(name=node_proto.name, model=node_proto.model)
|
||||
options.icon = node_proto.icon
|
||||
options.opaque = node_proto.opaque
|
||||
options.image = node_proto.image
|
||||
options.services = node_proto.services
|
||||
options.config_services = node_proto.config_services
|
||||
if node_proto.emane:
|
||||
options.emane = node_proto.emane
|
||||
if node_proto.server:
|
||||
options.server = node_proto.server
|
||||
|
||||
position = node_proto.position
|
||||
options.set_position(position.x, position.y)
|
||||
if node_proto.HasField("geo"):
|
||||
geo = node_proto.geo
|
||||
options.set_location(geo.lat, geo.lon, geo.alt)
|
||||
return _type, _id, options
|
||||
|
||||
|
||||
def link_interface(interface_proto: core_pb2.Interface) -> InterfaceData:
|
||||
"""
|
||||
Create interface data from interface proto.
|
||||
|
||||
:param interface_proto: interface proto
|
||||
:return: interface data
|
||||
"""
|
||||
interface = None
|
||||
if interface_proto:
|
||||
name = interface_proto.name
|
||||
if name == "":
|
||||
name = None
|
||||
mac = interface_proto.mac
|
||||
if mac == "":
|
||||
mac = None
|
||||
interface = InterfaceData(
|
||||
_id=interface_proto.id,
|
||||
name=name,
|
||||
mac=mac,
|
||||
ip4=interface_proto.ip4,
|
||||
ip4_mask=interface_proto.ip4mask,
|
||||
ip6=interface_proto.ip6,
|
||||
ip6_mask=interface_proto.ip6mask,
|
||||
)
|
||||
return interface
|
||||
|
||||
|
||||
def add_link_data(
|
||||
link_proto: core_pb2.Link
|
||||
) -> Tuple[InterfaceData, InterfaceData, LinkOptions]:
|
||||
"""
|
||||
Convert link proto to link interfaces and options data.
|
||||
|
||||
:param link_proto: link proto
|
||||
:return: link interfaces and options
|
||||
"""
|
||||
interface_one = link_interface(link_proto.interface_one)
|
||||
interface_two = link_interface(link_proto.interface_two)
|
||||
|
||||
link_type = None
|
||||
link_type_value = link_proto.type
|
||||
if link_type_value is not None:
|
||||
link_type = LinkTypes(link_type_value)
|
||||
|
||||
options = LinkOptions(_type=link_type)
|
||||
options_data = link_proto.options
|
||||
if options_data:
|
||||
options.delay = options_data.delay
|
||||
options.bandwidth = options_data.bandwidth
|
||||
options.per = options_data.per
|
||||
options.dup = options_data.dup
|
||||
options.jitter = options_data.jitter
|
||||
options.mer = options_data.mer
|
||||
options.burst = options_data.burst
|
||||
options.mburst = options_data.mburst
|
||||
options.unidirectional = options_data.unidirectional
|
||||
options.key = options_data.key
|
||||
options.opaque = options_data.opaque
|
||||
|
||||
return interface_one, interface_two, options
|
||||
|
||||
|
||||
def create_nodes(
|
||||
session: Session, node_protos: List[core_pb2.Node]
|
||||
) -> Tuple[List[NodeBase], List[Exception]]:
|
||||
"""
|
||||
Create nodes using a thread pool and wait for completion.
|
||||
|
||||
:param session: session to create nodes in
|
||||
:param node_protos: node proto messages
|
||||
:return: results and exceptions for created nodes
|
||||
"""
|
||||
funcs = []
|
||||
for node_proto in node_protos:
|
||||
_type, _id, options = add_node_data(node_proto)
|
||||
args = (_type, _id, options)
|
||||
funcs.append((session.add_node, args, {}))
|
||||
start = time.monotonic()
|
||||
results, exceptions = utils.threadpool(funcs)
|
||||
total = time.monotonic() - start
|
||||
logging.debug("grpc created nodes time: %s", total)
|
||||
return results, exceptions
|
||||
|
||||
|
||||
def create_links(
|
||||
session: Session, link_protos: List[core_pb2.Link]
|
||||
) -> Tuple[List[NodeBase], List[Exception]]:
|
||||
"""
|
||||
Create links using a thread pool and wait for completion.
|
||||
|
||||
:param session: session to create nodes in
|
||||
:param link_protos: link proto messages
|
||||
:return: results and exceptions for created links
|
||||
"""
|
||||
funcs = []
|
||||
for link_proto in link_protos:
|
||||
node_one_id = link_proto.node_one_id
|
||||
node_two_id = link_proto.node_two_id
|
||||
interface_one, interface_two, options = add_link_data(link_proto)
|
||||
args = (node_one_id, node_two_id, interface_one, interface_two, options)
|
||||
funcs.append((session.add_link, args, {}))
|
||||
start = time.monotonic()
|
||||
results, exceptions = utils.threadpool(funcs)
|
||||
total = time.monotonic() - start
|
||||
logging.debug("grpc created links time: %s", total)
|
||||
return results, exceptions
|
||||
|
||||
|
||||
def edit_links(
|
||||
session: Session, link_protos: List[core_pb2.Link]
|
||||
) -> Tuple[List[None], List[Exception]]:
|
||||
"""
|
||||
Edit links using a thread pool and wait for completion.
|
||||
|
||||
:param session: session to create nodes in
|
||||
:param link_protos: link proto messages
|
||||
:return: results and exceptions for created links
|
||||
"""
|
||||
funcs = []
|
||||
for link_proto in link_protos:
|
||||
node_one_id = link_proto.node_one_id
|
||||
node_two_id = link_proto.node_two_id
|
||||
interface_one, interface_two, options = add_link_data(link_proto)
|
||||
args = (node_one_id, node_two_id, interface_one.id, interface_two.id, options)
|
||||
funcs.append((session.update_link, args, {}))
|
||||
start = time.monotonic()
|
||||
results, exceptions = utils.threadpool(funcs)
|
||||
total = time.monotonic() - start
|
||||
logging.debug("grpc edit links time: %s", total)
|
||||
return results, exceptions
|
||||
|
||||
|
||||
def convert_value(value: Any) -> str:
|
||||
"""
|
||||
Convert value into string.
|
||||
|
||||
:param value: value
|
||||
:return: string conversion of the value
|
||||
"""
|
||||
if value is not None:
|
||||
value = str(value)
|
||||
return value
|
||||
|
||||
|
||||
def get_config_options(
|
||||
config: Dict[str, str], configurable_options: Type[ConfigurableOptions]
|
||||
) -> Dict[str, common_pb2.ConfigOption]:
|
||||
"""
|
||||
Retrieve configuration options in a form that is used by the grpc server.
|
||||
|
||||
:param config: configuration
|
||||
:param configurable_options: configurable options
|
||||
:return: mapping of configuration ids to configuration options
|
||||
"""
|
||||
results = {}
|
||||
for configuration in configurable_options.configurations():
|
||||
value = config[configuration.id]
|
||||
config_option = common_pb2.ConfigOption(
|
||||
label=configuration.label,
|
||||
name=configuration.id,
|
||||
value=value,
|
||||
type=configuration.type.value,
|
||||
select=configuration.options,
|
||||
)
|
||||
results[configuration.id] = config_option
|
||||
for config_group in configurable_options.config_groups():
|
||||
start = config_group.start - 1
|
||||
stop = config_group.stop
|
||||
options = list(results.values())[start:stop]
|
||||
for option in options:
|
||||
option.group = config_group.name
|
||||
return results
|
||||
|
||||
|
||||
def get_node_proto(session: Session, node: NodeBase) -> core_pb2.Node:
|
||||
"""
|
||||
Convert CORE node to protobuf representation.
|
||||
|
||||
:param session: session containing node
|
||||
:param node: node to convert
|
||||
:return: node proto
|
||||
"""
|
||||
node_type = session.get_node_type(node.__class__)
|
||||
position = core_pb2.Position(
|
||||
x=node.position.x, y=node.position.y, z=node.position.z
|
||||
)
|
||||
services = getattr(node, "services", [])
|
||||
if services is None:
|
||||
services = []
|
||||
services = [x.name for x in services]
|
||||
config_services = getattr(node, "config_services", {})
|
||||
config_services = [x for x in config_services]
|
||||
emane_model = None
|
||||
if isinstance(node, EmaneNet):
|
||||
emane_model = node.model.name
|
||||
model = getattr(node, "type", None)
|
||||
node_dir = getattr(node, "nodedir", None)
|
||||
channel = getattr(node, "ctrlchnlname", None)
|
||||
image = getattr(node, "image", None)
|
||||
return core_pb2.Node(
|
||||
id=node.id,
|
||||
name=node.name,
|
||||
emane=emane_model,
|
||||
model=model,
|
||||
type=node_type.value,
|
||||
position=position,
|
||||
services=services,
|
||||
icon=node.icon,
|
||||
image=image,
|
||||
config_services=config_services,
|
||||
dir=node_dir,
|
||||
channel=channel,
|
||||
)
|
||||
|
||||
|
||||
def get_links(node: NodeBase):
|
||||
"""
|
||||
Retrieve a list of links for grpc to use.
|
||||
|
||||
:param node: node to get links from
|
||||
:return: protobuf links
|
||||
"""
|
||||
links = []
|
||||
for link_data in node.all_link_data():
|
||||
link = convert_link(link_data)
|
||||
links.append(link)
|
||||
return links
|
||||
|
||||
|
||||
def get_emane_model_id(node_id: int, interface_id: int) -> int:
|
||||
"""
|
||||
Get EMANE model id
|
||||
|
||||
:param node_id: node id
|
||||
:param interface_id: interface id
|
||||
:return: EMANE model id
|
||||
"""
|
||||
if interface_id >= 0:
|
||||
return node_id * 1000 + interface_id
|
||||
else:
|
||||
return node_id
|
||||
|
||||
|
||||
def parse_emane_model_id(_id: int) -> Tuple[int, int]:
|
||||
"""
|
||||
Parses EMANE model id to get true node id and interface id.
|
||||
|
||||
:param _id: id to parse
|
||||
:return: node id and interface id
|
||||
"""
|
||||
interface = -1
|
||||
node_id = _id
|
||||
if _id >= 1000:
|
||||
interface = _id % 1000
|
||||
node_id = int(_id / 1000)
|
||||
return node_id, interface
|
||||
|
||||
|
||||
def convert_link(link_data: LinkData) -> core_pb2.Link:
|
||||
"""
|
||||
Convert link_data into core protobuf link.
|
||||
|
||||
:param link_data: link to convert
|
||||
:return: core protobuf Link
|
||||
"""
|
||||
interface_one = None
|
||||
if link_data.interface1_id is not None:
|
||||
interface_one = core_pb2.Interface(
|
||||
id=link_data.interface1_id,
|
||||
name=link_data.interface1_name,
|
||||
mac=convert_value(link_data.interface1_mac),
|
||||
ip4=convert_value(link_data.interface1_ip4),
|
||||
ip4mask=link_data.interface1_ip4_mask,
|
||||
ip6=convert_value(link_data.interface1_ip6),
|
||||
ip6mask=link_data.interface1_ip6_mask,
|
||||
)
|
||||
interface_two = None
|
||||
if link_data.interface2_id is not None:
|
||||
interface_two = core_pb2.Interface(
|
||||
id=link_data.interface2_id,
|
||||
name=link_data.interface2_name,
|
||||
mac=convert_value(link_data.interface2_mac),
|
||||
ip4=convert_value(link_data.interface2_ip4),
|
||||
ip4mask=link_data.interface2_ip4_mask,
|
||||
ip6=convert_value(link_data.interface2_ip6),
|
||||
ip6mask=link_data.interface2_ip6_mask,
|
||||
)
|
||||
options = core_pb2.LinkOptions(
|
||||
opaque=link_data.opaque,
|
||||
jitter=link_data.jitter,
|
||||
key=link_data.key,
|
||||
mburst=link_data.mburst,
|
||||
mer=link_data.mer,
|
||||
per=link_data.per,
|
||||
bandwidth=link_data.bandwidth,
|
||||
burst=link_data.burst,
|
||||
delay=link_data.delay,
|
||||
dup=link_data.dup,
|
||||
unidirectional=link_data.unidirectional,
|
||||
)
|
||||
return core_pb2.Link(
|
||||
type=link_data.link_type.value,
|
||||
node_one_id=link_data.node1_id,
|
||||
node_two_id=link_data.node2_id,
|
||||
interface_one=interface_one,
|
||||
interface_two=interface_two,
|
||||
options=options,
|
||||
network_id=link_data.network_id,
|
||||
label=link_data.label,
|
||||
color=link_data.color,
|
||||
)
|
||||
|
||||
|
||||
def get_net_stats() -> Dict[str, Dict]:
|
||||
"""
|
||||
Retrieve status about the current interfaces in the system
|
||||
|
||||
:return: send and receive status of the interfaces in the system
|
||||
"""
|
||||
with open("/proc/net/dev", "r") as f:
|
||||
data = f.readlines()[2:]
|
||||
|
||||
stats = {}
|
||||
for line in data:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
line = line.split()
|
||||
line[0] = line[0].strip(":")
|
||||
stats[line[0]] = {"rx": float(line[1]), "tx": float(line[9])}
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def session_location(session: Session, location: core_pb2.SessionLocation) -> None:
|
||||
"""
|
||||
Set session location based on location proto.
|
||||
|
||||
:param session: session for location
|
||||
:param location: location to set
|
||||
:return: nothing
|
||||
"""
|
||||
session.location.refxyz = (location.x, location.y, location.z)
|
||||
session.location.setrefgeo(location.lat, location.lon, location.alt)
|
||||
session.location.refscale = location.scale
|
||||
|
||||
|
||||
def service_configuration(session: Session, config: ServiceConfig) -> None:
|
||||
"""
|
||||
Convenience method for setting a node service configuration.
|
||||
|
||||
:param session: session for service configuration
|
||||
:param config: service configuration
|
||||
:return:
|
||||
"""
|
||||
session.services.set_service(config.node_id, config.service)
|
||||
service = session.services.get_service(config.node_id, config.service)
|
||||
if config.files:
|
||||
service.configs = tuple(config.files)
|
||||
if config.directories:
|
||||
service.dirs = tuple(config.directories)
|
||||
if config.startup:
|
||||
service.startup = tuple(config.startup)
|
||||
if config.validate:
|
||||
service.validate = tuple(config.validate)
|
||||
if config.shutdown:
|
||||
service.shutdown = tuple(config.shutdown)
|
||||
|
||||
|
||||
def get_service_configuration(service: Type[CoreService]) -> NodeServiceData:
|
||||
"""
|
||||
Convenience for converting a service to service data proto.
|
||||
|
||||
:param service: service to get proto data for
|
||||
:return: service proto data
|
||||
"""
|
||||
return NodeServiceData(
|
||||
executables=service.executables,
|
||||
dependencies=service.dependencies,
|
||||
dirs=service.dirs,
|
||||
configs=service.configs,
|
||||
startup=service.startup,
|
||||
validate=service.validate,
|
||||
validation_mode=service.validation_mode.value,
|
||||
validation_timer=service.validation_timer,
|
||||
shutdown=service.shutdown,
|
||||
meta=service.meta,
|
||||
)
|
||||
|
||||
|
||||
def interface_to_proto(interface: CoreInterface) -> core_pb2.Interface:
|
||||
"""
|
||||
Convenience for converting a core interface to the protobuf representation.
|
||||
:param interface: interface to convert
|
||||
:return: interface proto
|
||||
"""
|
||||
net_id = None
|
||||
if interface.net:
|
||||
net_id = interface.net.id
|
||||
ip4 = None
|
||||
ip4mask = None
|
||||
ip6 = None
|
||||
ip6mask = None
|
||||
for addr in interface.addrlist:
|
||||
network = netaddr.IPNetwork(addr)
|
||||
mask = network.prefixlen
|
||||
ip = str(network.ip)
|
||||
if netaddr.valid_ipv4(ip) and not ip4:
|
||||
ip4 = ip
|
||||
ip4mask = mask
|
||||
elif netaddr.valid_ipv6(ip) and not ip6:
|
||||
ip6 = ip
|
||||
ip6mask = mask
|
||||
return core_pb2.Interface(
|
||||
id=interface.netindex,
|
||||
netid=net_id,
|
||||
name=interface.name,
|
||||
mac=str(interface.hwaddr),
|
||||
mtu=interface.mtu,
|
||||
flowid=interface.flow_id,
|
||||
ip4=ip4,
|
||||
ip4mask=ip4mask,
|
||||
ip6=ip6,
|
||||
ip6mask=ip6mask,
|
||||
)
|
1687
daemon/core/api/grpc/server.py
Normal file
1687
daemon/core/api/grpc/server.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -5,30 +5,30 @@ types and objects used for parsing and building CORE API messages.
|
|||
CORE API messaging is leveraged for communication with the GUI.
|
||||
"""
|
||||
|
||||
import binascii
|
||||
import socket
|
||||
import struct
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from core.enumerations import ConfigTlvs
|
||||
from core.enumerations import EventTlvs
|
||||
from core.enumerations import EventTypes
|
||||
from core.enumerations import ExceptionTlvs
|
||||
from core.enumerations import ExecuteTlvs
|
||||
from core.enumerations import FileTlvs
|
||||
from core.enumerations import InterfaceTlvs
|
||||
from core.enumerations import LinkTlvs
|
||||
from core.enumerations import MessageFlags
|
||||
from core.enumerations import MessageTypes
|
||||
from core.enumerations import NodeTlvs
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.enumerations import SessionTlvs
|
||||
from core.misc import structutils
|
||||
from core.misc.ipaddress import IpAddress
|
||||
from core.misc.ipaddress import MacAddress
|
||||
import netaddr
|
||||
|
||||
from core.api.tlv import structutils
|
||||
from core.api.tlv.enumerations import (
|
||||
ConfigTlvs,
|
||||
EventTlvs,
|
||||
ExceptionTlvs,
|
||||
ExecuteTlvs,
|
||||
FileTlvs,
|
||||
InterfaceTlvs,
|
||||
LinkTlvs,
|
||||
MessageTypes,
|
||||
NodeTlvs,
|
||||
SessionTlvs,
|
||||
)
|
||||
from core.emulator.enumerations import MessageFlags, RegisterTlvs
|
||||
|
||||
|
||||
class CoreTlvData(object):
|
||||
class CoreTlvData:
|
||||
"""
|
||||
Helper base class used for packing and unpacking values using struct.
|
||||
"""
|
||||
|
@ -91,16 +91,16 @@ class CoreTlvDataObj(CoreTlvData):
|
|||
"""
|
||||
|
||||
@classmethod
|
||||
def pack(cls, obj):
|
||||
def pack(cls, value):
|
||||
"""
|
||||
Convenience method for packing custom object data.
|
||||
|
||||
:param obj: custom object to pack
|
||||
:param value: custom object to pack
|
||||
:return: length of data and the packed data itself
|
||||
:rtype: tuple
|
||||
"""
|
||||
value = cls.get_value(obj)
|
||||
return super(CoreTlvDataObj, cls).pack(value)
|
||||
value = cls.get_value(value)
|
||||
return super().pack(value)
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
|
@ -110,7 +110,7 @@ class CoreTlvDataObj(CoreTlvData):
|
|||
:param data: data to unpack custom object from
|
||||
:return: unpacked custom object
|
||||
"""
|
||||
data = super(CoreTlvDataObj, cls).unpack(data)
|
||||
data = super().unpack(data)
|
||||
return cls.new_obj(data)
|
||||
|
||||
@staticmethod
|
||||
|
@ -138,6 +138,7 @@ class CoreTlvDataUint16(CoreTlvData):
|
|||
"""
|
||||
Helper class for packing uint16 data.
|
||||
"""
|
||||
|
||||
data_format = "!H"
|
||||
data_type = int
|
||||
pad_len = 0
|
||||
|
@ -147,6 +148,7 @@ class CoreTlvDataUint32(CoreTlvData):
|
|||
"""
|
||||
Helper class for packing uint32 data.
|
||||
"""
|
||||
|
||||
data_format = "!2xI"
|
||||
data_type = int
|
||||
pad_len = 2
|
||||
|
@ -156,8 +158,9 @@ class CoreTlvDataUint64(CoreTlvData):
|
|||
"""
|
||||
Helper class for packing uint64 data.
|
||||
"""
|
||||
|
||||
data_format = "!2xQ"
|
||||
data_type = long
|
||||
data_type = int
|
||||
pad_len = 2
|
||||
|
||||
|
||||
|
@ -165,6 +168,7 @@ class CoreTlvDataString(CoreTlvData):
|
|||
"""
|
||||
Helper class for packing string data.
|
||||
"""
|
||||
|
||||
data_type = str
|
||||
|
||||
@classmethod
|
||||
|
@ -177,7 +181,8 @@ class CoreTlvDataString(CoreTlvData):
|
|||
:rtype: tuple
|
||||
"""
|
||||
if not isinstance(value, str):
|
||||
raise ValueError("value not a string: %s" % value)
|
||||
raise ValueError(f"value not a string: {type(value)}")
|
||||
value = value.encode("utf-8")
|
||||
|
||||
if len(value) < 256:
|
||||
header_len = CoreTlv.header_len
|
||||
|
@ -185,7 +190,7 @@ class CoreTlvDataString(CoreTlvData):
|
|||
header_len = CoreTlv.long_header_len
|
||||
|
||||
pad_len = -(header_len + len(value)) % 4
|
||||
return len(value), value + "\0" * pad_len
|
||||
return len(value), value + b"\0" * pad_len
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
|
@ -195,13 +200,14 @@ class CoreTlvDataString(CoreTlvData):
|
|||
:param str data: unpack string data
|
||||
:return: unpacked string data
|
||||
"""
|
||||
return data.rstrip("\0")
|
||||
return data.rstrip(b"\0").decode("utf-8")
|
||||
|
||||
|
||||
class CoreTlvDataUint16List(CoreTlvData):
|
||||
"""
|
||||
List of unsigned 16-bit values.
|
||||
"""
|
||||
|
||||
data_type = tuple
|
||||
data_format = "!H"
|
||||
|
||||
|
@ -215,14 +221,14 @@ class CoreTlvDataUint16List(CoreTlvData):
|
|||
:rtype: tuple
|
||||
"""
|
||||
if not isinstance(values, tuple):
|
||||
raise ValueError("value not a tuple: %s" % values)
|
||||
raise ValueError(f"value not a tuple: {values}")
|
||||
|
||||
data = ""
|
||||
data = b""
|
||||
for value in values:
|
||||
data += struct.pack(cls.data_format, value)
|
||||
|
||||
pad_len = -(CoreTlv.header_len + len(data)) % 4
|
||||
return len(data), data + "\0" * pad_len
|
||||
return len(data), data + b"\0" * pad_len
|
||||
|
||||
@classmethod
|
||||
def unpack(cls, data):
|
||||
|
@ -232,7 +238,8 @@ class CoreTlvDataUint16List(CoreTlvData):
|
|||
:param data: data to unpack
|
||||
:return: unpacked data
|
||||
"""
|
||||
data_format = "!%dH" % (len(data) / 2)
|
||||
size = int(len(data) / 2)
|
||||
data_format = f"!{size}H"
|
||||
return struct.unpack(data_format, data)
|
||||
|
||||
@classmethod
|
||||
|
@ -241,17 +248,18 @@ class CoreTlvDataUint16List(CoreTlvData):
|
|||
Retrieves a unint 16 list from a string
|
||||
|
||||
:param str value: string representation of a uint 16 list
|
||||
:return: unint 16 list
|
||||
:return: uint 16 list
|
||||
:rtype: list
|
||||
"""
|
||||
return tuple(map(lambda (x): int(x), value.split()))
|
||||
return tuple(int(x) for x in value.split())
|
||||
|
||||
|
||||
class CoreTlvDataIpv4Addr(CoreTlvDataObj):
|
||||
"""
|
||||
Utility class for packing/unpacking Ipv4 addresses.
|
||||
"""
|
||||
data_type = IpAddress.from_string
|
||||
|
||||
data_type = str
|
||||
data_format = "!2x4s"
|
||||
pad_len = 2
|
||||
|
||||
|
@ -260,29 +268,31 @@ class CoreTlvDataIpv4Addr(CoreTlvDataObj):
|
|||
"""
|
||||
Retrieve Ipv4 address value from object.
|
||||
|
||||
:param core.misc.ipaddress.IpAddress obj: ip address to get value from
|
||||
:return:
|
||||
:param str obj: ip address to get value from
|
||||
:return: packed address
|
||||
:rtype: bytes
|
||||
"""
|
||||
return obj.addr
|
||||
return socket.inet_pton(socket.AF_INET, obj)
|
||||
|
||||
@staticmethod
|
||||
def new_obj(value):
|
||||
"""
|
||||
Retrieve Ipv4 address from a string representation.
|
||||
|
||||
:param str value: value to get Ipv4 address from
|
||||
:param bytes value: value to get Ipv4 address from
|
||||
:return: Ipv4 address
|
||||
:rtype: core.misc.ipaddress.IpAddress
|
||||
:rtype: str
|
||||
"""
|
||||
return IpAddress(af=socket.AF_INET, address=value)
|
||||
return socket.inet_ntop(socket.AF_INET, value)
|
||||
|
||||
|
||||
class CoreTlvDataIPv6Addr(CoreTlvDataObj):
|
||||
"""
|
||||
Utility class for packing/unpacking Ipv6 addresses.
|
||||
"""
|
||||
|
||||
data_format = "!16s2x"
|
||||
data_type = IpAddress.from_string
|
||||
data_type = str
|
||||
pad_len = 2
|
||||
|
||||
@staticmethod
|
||||
|
@ -290,29 +300,31 @@ class CoreTlvDataIPv6Addr(CoreTlvDataObj):
|
|||
"""
|
||||
Retrieve Ipv6 address value from object.
|
||||
|
||||
:param core.misc.ipaddress.IpAddress obj: ip address to get value from
|
||||
:return:
|
||||
:param str obj: ip address to get value from
|
||||
:return: packed address
|
||||
:rtype: bytes
|
||||
"""
|
||||
return obj.addr
|
||||
return socket.inet_pton(socket.AF_INET6, obj)
|
||||
|
||||
@staticmethod
|
||||
def new_obj(value):
|
||||
"""
|
||||
Retrieve Ipv6 address from a string representation.
|
||||
|
||||
:param str value: value to get Ipv4 address from
|
||||
:param bytes value: value to get Ipv4 address from
|
||||
:return: Ipv4 address
|
||||
:rtype: core.misc.ipaddress.IpAddress
|
||||
:rtype: str
|
||||
"""
|
||||
return IpAddress(af=socket.AF_INET6, address=value)
|
||||
return socket.inet_ntop(socket.AF_INET6, value)
|
||||
|
||||
|
||||
class CoreTlvDataMacAddr(CoreTlvDataObj):
|
||||
"""
|
||||
Utility class for packing/unpacking mac addresses.
|
||||
"""
|
||||
|
||||
data_format = "!2x8s"
|
||||
data_type = MacAddress.from_string
|
||||
data_type = str
|
||||
pad_len = 2
|
||||
|
||||
@staticmethod
|
||||
|
@ -320,29 +332,34 @@ class CoreTlvDataMacAddr(CoreTlvDataObj):
|
|||
"""
|
||||
Retrieve Ipv6 address value from object.
|
||||
|
||||
:param core.misc.ipaddress.MacAddress obj: mac address to get value from
|
||||
:return:
|
||||
:param str obj: mac address to get value from
|
||||
:return: packed mac address
|
||||
:rtype: bytes
|
||||
"""
|
||||
# extend to 64 bits
|
||||
return "\0\0" + obj.addr
|
||||
return b"\0\0" + netaddr.EUI(obj).packed
|
||||
|
||||
@staticmethod
|
||||
def new_obj(value):
|
||||
"""
|
||||
Retrieve mac address from a string representation.
|
||||
|
||||
:param str value: value to get Ipv4 address from
|
||||
:return: Ipv4 address
|
||||
:rtype: core.misc.ipaddress.MacAddress
|
||||
:param bytes value: value to get Ipv4 address from
|
||||
:return: mac address
|
||||
:rtype: str
|
||||
"""
|
||||
# only use 48 bits
|
||||
return MacAddress(address=value[2:])
|
||||
value = binascii.hexlify(value[2:]).decode()
|
||||
mac = netaddr.EUI(value)
|
||||
mac.dialect = netaddr.mac_unix
|
||||
return str(mac)
|
||||
|
||||
|
||||
class CoreTlv(object):
|
||||
class CoreTlv:
|
||||
"""
|
||||
Base class for representing CORE TLVs.
|
||||
"""
|
||||
|
||||
header_format = "!BB"
|
||||
header_len = struct.calcsize(header_format)
|
||||
|
||||
|
@ -377,10 +394,12 @@ class CoreTlv(object):
|
|||
:param data: data to unpack
|
||||
:return: unpacked data class
|
||||
"""
|
||||
tlv_type, tlv_len = struct.unpack(cls.header_format, data[:cls.header_len])
|
||||
tlv_type, tlv_len = struct.unpack(cls.header_format, data[: cls.header_len])
|
||||
header_len = cls.header_len
|
||||
if tlv_len == 0:
|
||||
tlv_type, zero, tlv_len = struct.unpack(cls.long_header_format, data[:cls.long_header_len])
|
||||
tlv_type, _zero, tlv_len = struct.unpack(
|
||||
cls.long_header_format, data[: cls.long_header_len]
|
||||
)
|
||||
header_len = cls.long_header_len
|
||||
tlv_size = header_len + tlv_len
|
||||
# for 32-bit alignment
|
||||
|
@ -397,12 +416,10 @@ class CoreTlv(object):
|
|||
:return: header and packed data
|
||||
"""
|
||||
tlv_len, tlv_data = cls.tlv_data_class_map[tlv_type].pack(value)
|
||||
|
||||
if tlv_len < 256:
|
||||
hdr = struct.pack(cls.header_format, tlv_type, tlv_len)
|
||||
else:
|
||||
hdr = struct.pack(cls.long_header_format, tlv_type, 0, tlv_len)
|
||||
|
||||
return hdr + tlv_data
|
||||
|
||||
@classmethod
|
||||
|
@ -426,7 +443,7 @@ class CoreTlv(object):
|
|||
try:
|
||||
return self.tlv_type_map(self.tlv_type).name
|
||||
except ValueError:
|
||||
return "unknown tlv type: %s" % str(self.tlv_type)
|
||||
return f"unknown tlv type: {self.tlv_type}"
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
|
@ -435,7 +452,7 @@ class CoreTlv(object):
|
|||
:return: string representation
|
||||
:rtype: str
|
||||
"""
|
||||
return "%s <tlvtype = %s, value = %s>" % (self.__class__.__name__, self.type_str(), self.value)
|
||||
return f"{self.__class__.__name__} <tlvtype = {self.type_str()}, value = {self.value}>"
|
||||
|
||||
|
||||
class CoreNodeTlv(CoreTlv):
|
||||
|
@ -660,7 +677,7 @@ class CoreExceptionTlv(CoreTlv):
|
|||
}
|
||||
|
||||
|
||||
class CoreMessage(object):
|
||||
class CoreMessage:
|
||||
"""
|
||||
Base class for representing CORE messages.
|
||||
"""
|
||||
|
@ -686,14 +703,16 @@ class CoreMessage(object):
|
|||
:return: unpacked tuple
|
||||
:rtype: tuple
|
||||
"""
|
||||
message_type, message_flags, message_len = struct.unpack(cls.header_format, data[:cls.header_len])
|
||||
message_type, message_flags, message_len = struct.unpack(
|
||||
cls.header_format, data[: cls.header_len]
|
||||
)
|
||||
return message_type, message_flags, message_len
|
||||
|
||||
@classmethod
|
||||
def create(cls, flags, values):
|
||||
tlv_data = structutils.pack_values(cls.tlv_class, values)
|
||||
packed = cls.pack(flags, tlv_data)
|
||||
header_data = packed[:cls.header_len]
|
||||
header_data = packed[: cls.header_len]
|
||||
return cls(flags, header_data, tlv_data)
|
||||
|
||||
@classmethod
|
||||
|
@ -705,7 +724,9 @@ class CoreMessage(object):
|
|||
:param tlv_data: data to get length from for packing
|
||||
:return: combined header and tlv data
|
||||
"""
|
||||
header = struct.pack(cls.header_format, cls.message_type, message_flags, len(tlv_data))
|
||||
header = struct.pack(
|
||||
cls.header_format, cls.message_type, message_flags, len(tlv_data)
|
||||
)
|
||||
return header + tlv_data
|
||||
|
||||
def add_tlv_data(self, key, value):
|
||||
|
@ -717,7 +738,7 @@ class CoreMessage(object):
|
|||
:return: nothing
|
||||
"""
|
||||
if key in self.tlv_data:
|
||||
raise KeyError("key already exists: %s (val=%s)" % (key, value))
|
||||
raise KeyError(f"key already exists: {key} (val={value})")
|
||||
|
||||
self.tlv_data[key] = value
|
||||
|
||||
|
@ -748,13 +769,11 @@ class CoreMessage(object):
|
|||
:return: packed data
|
||||
:rtype: str
|
||||
"""
|
||||
tlv_data = ""
|
||||
keys = sorted(self.tlv_data.keys())
|
||||
|
||||
tlv_data = b""
|
||||
for key in keys:
|
||||
value = self.tlv_data[key]
|
||||
tlv_data += self.tlv_class.pack(key, value)
|
||||
|
||||
return tlv_data
|
||||
|
||||
def repack(self):
|
||||
|
@ -778,7 +797,7 @@ class CoreMessage(object):
|
|||
try:
|
||||
return MessageTypes(self.message_type).name
|
||||
except ValueError:
|
||||
return "unknown message type: %s" % str(self.message_type)
|
||||
return f"unknown message type: {self.message_type}"
|
||||
|
||||
def flag_str(self):
|
||||
"""
|
||||
|
@ -788,19 +807,20 @@ class CoreMessage(object):
|
|||
:rtype: str
|
||||
"""
|
||||
message_flags = []
|
||||
flag = 1L
|
||||
flag = 1
|
||||
|
||||
while True:
|
||||
if self.flags & flag:
|
||||
try:
|
||||
message_flags.append(self.flag_map(flag).name)
|
||||
except ValueError:
|
||||
message_flags.append("0x%x" % flag)
|
||||
message_flags.append(f"0x{flag:x}")
|
||||
flag <<= 1
|
||||
if not (self.flags & ~(flag - 1)):
|
||||
break
|
||||
|
||||
return "0x%x <%s>" % (self.flags, " | ".join(message_flags))
|
||||
message_flags = " | ".join(message_flags)
|
||||
return f"0x{self.flags:x} <{message_flags}>"
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
|
@ -809,15 +829,16 @@ class CoreMessage(object):
|
|||
:return: string representation
|
||||
:rtype: str
|
||||
"""
|
||||
result = "%s <msgtype = %s, flags = %s>" % (self.__class__.__name__, self.type_str(), self.flag_str())
|
||||
result = f"{self.__class__.__name__} <msgtype = {self.type_str()}, flags = {self.flag_str()}>"
|
||||
|
||||
for key, value in self.tlv_data.iteritems():
|
||||
for key in self.tlv_data:
|
||||
value = self.tlv_data[key]
|
||||
try:
|
||||
tlv_type = self.tlv_class.tlv_type_map(key).name
|
||||
except ValueError:
|
||||
tlv_type = "tlv type %s" % key
|
||||
tlv_type = f"tlv type {key}"
|
||||
|
||||
result += "\n %s: %s" % (tlv_type, value)
|
||||
result += f"\n {tlv_type}: {value}"
|
||||
|
||||
return result
|
||||
|
||||
|
@ -843,7 +864,7 @@ class CoreMessage(object):
|
|||
elif self.message_type == MessageTypes.INTERFACE.value:
|
||||
number1 = self.get_tlv(InterfaceTlvs.NODE.value)
|
||||
elif self.message_type == MessageTypes.EVENT.value:
|
||||
number1 = self.get_tlv(EventTlvs.NODE)
|
||||
number1 = self.get_tlv(EventTlvs.NODE.value)
|
||||
|
||||
result = []
|
||||
|
||||
|
@ -880,6 +901,7 @@ class CoreNodeMessage(CoreMessage):
|
|||
"""
|
||||
CORE node message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.NODE.value
|
||||
tlv_class = CoreNodeTlv
|
||||
|
||||
|
@ -888,6 +910,7 @@ class CoreLinkMessage(CoreMessage):
|
|||
"""
|
||||
CORE link message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.LINK.value
|
||||
tlv_class = CoreLinkTlv
|
||||
|
||||
|
@ -896,6 +919,7 @@ class CoreExecMessage(CoreMessage):
|
|||
"""
|
||||
CORE execute message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.EXECUTE.value
|
||||
tlv_class = CoreExecuteTlv
|
||||
|
||||
|
@ -904,6 +928,7 @@ class CoreRegMessage(CoreMessage):
|
|||
"""
|
||||
CORE register message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.REGISTER.value
|
||||
tlv_class = CoreRegisterTlv
|
||||
|
||||
|
@ -912,6 +937,7 @@ class CoreConfMessage(CoreMessage):
|
|||
"""
|
||||
CORE configuration message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.CONFIG.value
|
||||
tlv_class = CoreConfigTlv
|
||||
|
||||
|
@ -920,6 +946,7 @@ class CoreFileMessage(CoreMessage):
|
|||
"""
|
||||
CORE file message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.FILE.value
|
||||
tlv_class = CoreFileTlv
|
||||
|
||||
|
@ -928,6 +955,7 @@ class CoreIfaceMessage(CoreMessage):
|
|||
"""
|
||||
CORE interface message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.INTERFACE.value
|
||||
tlv_class = CoreInterfaceTlv
|
||||
|
||||
|
@ -936,6 +964,7 @@ class CoreEventMessage(CoreMessage):
|
|||
"""
|
||||
CORE event message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.EVENT.value
|
||||
tlv_class = CoreEventTlv
|
||||
|
||||
|
@ -944,6 +973,7 @@ class CoreSessionMessage(CoreMessage):
|
|||
"""
|
||||
CORE session message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.SESSION.value
|
||||
tlv_class = CoreSessionTlv
|
||||
|
||||
|
@ -952,6 +982,7 @@ class CoreExceptionMessage(CoreMessage):
|
|||
"""
|
||||
CORE exception message class.
|
||||
"""
|
||||
|
||||
message_type = MessageTypes.EXCEPTION.value
|
||||
tlv_class = CoreExceptionTlv
|
||||
|
||||
|
@ -984,20 +1015,3 @@ def str_to_list(value):
|
|||
return None
|
||||
|
||||
return value.split("|")
|
||||
|
||||
|
||||
def state_name(value):
|
||||
"""
|
||||
Helper to convert state number into state name using event types.
|
||||
|
||||
:param int value: state value to derive name from
|
||||
:return: state name
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
try:
|
||||
value = EventTypes(value).name
|
||||
except ValueError:
|
||||
value = "unknown"
|
||||
|
||||
return value
|
2091
daemon/core/api/tlv/corehandlers.py
Normal file
2091
daemon/core/api/tlv/corehandlers.py
Normal file
File diff suppressed because it is too large
Load diff
60
daemon/core/api/tlv/coreserver.py
Normal file
60
daemon/core/api/tlv/coreserver.py
Normal file
|
@ -0,0 +1,60 @@
|
|||
"""
|
||||
Defines core server for handling TCP connections.
|
||||
"""
|
||||
|
||||
import socketserver
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
|
||||
|
||||
class CoreServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
|
||||
"""
|
||||
TCP server class, manages sessions and spawns request handlers for
|
||||
incoming connections.
|
||||
"""
|
||||
|
||||
daemon_threads = True
|
||||
allow_reuse_address = True
|
||||
|
||||
def __init__(self, server_address, handler_class, config=None):
|
||||
"""
|
||||
Server class initialization takes configuration data and calls
|
||||
the socketserver constructor.
|
||||
|
||||
:param tuple[str, int] server_address: server host and port to use
|
||||
:param class handler_class: request handler
|
||||
:param dict config: configuration setting
|
||||
"""
|
||||
self.coreemu = CoreEmu(config)
|
||||
self.config = config
|
||||
socketserver.TCPServer.__init__(self, server_address, handler_class)
|
||||
|
||||
|
||||
class CoreUdpServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
|
||||
"""
|
||||
UDP server class, manages sessions and spawns request handlers for
|
||||
incoming connections.
|
||||
"""
|
||||
|
||||
daemon_threads = True
|
||||
allow_reuse_address = True
|
||||
|
||||
def __init__(self, server_address, handler_class, mainserver):
|
||||
"""
|
||||
Server class initialization takes configuration data and calls
|
||||
the SocketServer constructor
|
||||
|
||||
:param server_address:
|
||||
:param class handler_class: request handler
|
||||
:param mainserver:
|
||||
"""
|
||||
self.mainserver = mainserver
|
||||
socketserver.UDPServer.__init__(self, server_address, handler_class)
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Thread target to run concurrently with the TCP server.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.serve_forever()
|
182
daemon/core/api/tlv/dataconversion.py
Normal file
182
daemon/core/api/tlv/dataconversion.py
Normal file
|
@ -0,0 +1,182 @@
|
|||
"""
|
||||
Converts CORE data objects into legacy API messages.
|
||||
"""
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from typing import Dict, List
|
||||
|
||||
from core.api.tlv import coreapi, structutils
|
||||
from core.api.tlv.enumerations import ConfigTlvs, NodeTlvs
|
||||
from core.config import ConfigGroup, ConfigurableOptions
|
||||
from core.emulator.data import ConfigData
|
||||
|
||||
|
||||
def convert_node(node_data):
|
||||
"""
|
||||
Convenience method for converting NodeData to a packed TLV message.
|
||||
|
||||
:param core.emulator.data.NodeData node_data: node data to convert
|
||||
:return: packed node message
|
||||
"""
|
||||
session = None
|
||||
if node_data.session is not None:
|
||||
session = str(node_data.session)
|
||||
services = None
|
||||
if node_data.services is not None:
|
||||
services = "|".join([x for x in node_data.services])
|
||||
tlv_data = structutils.pack_values(
|
||||
coreapi.CoreNodeTlv,
|
||||
[
|
||||
(NodeTlvs.NUMBER, node_data.id),
|
||||
(NodeTlvs.TYPE, node_data.node_type.value),
|
||||
(NodeTlvs.NAME, node_data.name),
|
||||
(NodeTlvs.IP_ADDRESS, node_data.ip_address),
|
||||
(NodeTlvs.MAC_ADDRESS, node_data.mac_address),
|
||||
(NodeTlvs.IP6_ADDRESS, node_data.ip6_address),
|
||||
(NodeTlvs.MODEL, node_data.model),
|
||||
(NodeTlvs.EMULATION_ID, node_data.emulation_id),
|
||||
(NodeTlvs.EMULATION_SERVER, node_data.server),
|
||||
(NodeTlvs.SESSION, session),
|
||||
(NodeTlvs.X_POSITION, int(node_data.x_position)),
|
||||
(NodeTlvs.Y_POSITION, int(node_data.y_position)),
|
||||
(NodeTlvs.CANVAS, node_data.canvas),
|
||||
(NodeTlvs.NETWORK_ID, node_data.network_id),
|
||||
(NodeTlvs.SERVICES, services),
|
||||
(NodeTlvs.LATITUDE, str(node_data.latitude)),
|
||||
(NodeTlvs.LONGITUDE, str(node_data.longitude)),
|
||||
(NodeTlvs.ALTITUDE, str(node_data.altitude)),
|
||||
(NodeTlvs.ICON, node_data.icon),
|
||||
(NodeTlvs.OPAQUE, node_data.opaque),
|
||||
],
|
||||
)
|
||||
return coreapi.CoreNodeMessage.pack(node_data.message_type.value, tlv_data)
|
||||
|
||||
|
||||
def convert_config(config_data):
|
||||
"""
|
||||
Convenience method for converting ConfigData to a packed TLV message.
|
||||
|
||||
:param core.emulator.data.ConfigData config_data: config data to convert
|
||||
:return: packed message
|
||||
"""
|
||||
session = None
|
||||
if config_data.session is not None:
|
||||
session = str(config_data.session)
|
||||
tlv_data = structutils.pack_values(
|
||||
coreapi.CoreConfigTlv,
|
||||
[
|
||||
(ConfigTlvs.NODE, config_data.node),
|
||||
(ConfigTlvs.OBJECT, config_data.object),
|
||||
(ConfigTlvs.TYPE, config_data.type),
|
||||
(ConfigTlvs.DATA_TYPES, config_data.data_types),
|
||||
(ConfigTlvs.VALUES, config_data.data_values),
|
||||
(ConfigTlvs.CAPTIONS, config_data.captions),
|
||||
(ConfigTlvs.BITMAP, config_data.bitmap),
|
||||
(ConfigTlvs.POSSIBLE_VALUES, config_data.possible_values),
|
||||
(ConfigTlvs.GROUPS, config_data.groups),
|
||||
(ConfigTlvs.SESSION, session),
|
||||
(ConfigTlvs.INTERFACE_NUMBER, config_data.interface_number),
|
||||
(ConfigTlvs.NETWORK_ID, config_data.network_id),
|
||||
(ConfigTlvs.OPAQUE, config_data.opaque),
|
||||
],
|
||||
)
|
||||
return coreapi.CoreConfMessage.pack(config_data.message_type, tlv_data)
|
||||
|
||||
|
||||
class ConfigShim:
|
||||
"""
|
||||
Provides helper methods for converting newer configuration values into TLV
|
||||
compatible formats.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def str_to_dict(cls, key_values: str) -> Dict[str, str]:
|
||||
"""
|
||||
Converts a TLV key/value string into an ordered mapping.
|
||||
|
||||
:param key_values:
|
||||
:return: ordered mapping of key/value pairs
|
||||
"""
|
||||
key_values = key_values.split("|")
|
||||
values = OrderedDict()
|
||||
for key_value in key_values:
|
||||
key, value = key_value.split("=", 1)
|
||||
values[key] = value
|
||||
return values
|
||||
|
||||
@classmethod
|
||||
def groups_to_str(cls, config_groups: List[ConfigGroup]) -> str:
|
||||
"""
|
||||
Converts configuration groups to a TLV formatted string.
|
||||
|
||||
:param config_groups: configuration groups to format
|
||||
:return: TLV configuration group string
|
||||
"""
|
||||
group_strings = []
|
||||
for config_group in config_groups:
|
||||
group_string = (
|
||||
f"{config_group.name}:{config_group.start}-{config_group.stop}"
|
||||
)
|
||||
group_strings.append(group_string)
|
||||
return "|".join(group_strings)
|
||||
|
||||
@classmethod
|
||||
def config_data(
|
||||
cls,
|
||||
flags: int,
|
||||
node_id: int,
|
||||
type_flags: int,
|
||||
configurable_options: ConfigurableOptions,
|
||||
config: Dict[str, str],
|
||||
) -> ConfigData:
|
||||
"""
|
||||
Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
|
||||
:param flags: message flags
|
||||
:param node_id: node id
|
||||
:param type_flags: type flags
|
||||
:param configurable_options: options to create config data for
|
||||
:param config: configuration values for options
|
||||
:return: configuration data object
|
||||
"""
|
||||
key_values = None
|
||||
captions = None
|
||||
data_types = []
|
||||
possible_values = []
|
||||
logging.debug("configurable: %s", configurable_options)
|
||||
logging.debug("configuration options: %s", configurable_options.configurations)
|
||||
logging.debug("configuration data: %s", config)
|
||||
for configuration in configurable_options.configurations():
|
||||
if not captions:
|
||||
captions = configuration.label
|
||||
else:
|
||||
captions += f"|{configuration.label}"
|
||||
|
||||
data_types.append(configuration.type.value)
|
||||
|
||||
options = ",".join(configuration.options)
|
||||
possible_values.append(options)
|
||||
|
||||
_id = configuration.id
|
||||
config_value = config.get(_id, configuration.default)
|
||||
key_value = f"{_id}={config_value}"
|
||||
if not key_values:
|
||||
key_values = key_value
|
||||
else:
|
||||
key_values += f"|{key_value}"
|
||||
|
||||
groups_str = cls.groups_to_str(configurable_options.config_groups())
|
||||
return ConfigData(
|
||||
message_type=flags,
|
||||
node=node_id,
|
||||
object=configurable_options.name,
|
||||
type=type_flags,
|
||||
data_types=tuple(data_types),
|
||||
data_values=key_values,
|
||||
captions=captions,
|
||||
possible_values="|".join(possible_values),
|
||||
bitmap=configurable_options.bitmap,
|
||||
groups=groups_str,
|
||||
)
|
|
@ -1,10 +1,8 @@
|
|||
"""
|
||||
Contains all legacy enumerations for interacting with legacy CORE code.
|
||||
Enumerations specific to the CORE TLV API.
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
CORE_API_VERSION = "1.23"
|
||||
CORE_API_PORT = 4038
|
||||
|
||||
|
||||
|
@ -12,6 +10,7 @@ class MessageTypes(Enum):
|
|||
"""
|
||||
CORE message types.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
LINK = 0x02
|
||||
EXECUTE = 0x03
|
||||
|
@ -24,23 +23,11 @@ class MessageTypes(Enum):
|
|||
EXCEPTION = 0x0A
|
||||
|
||||
|
||||
class MessageFlags(Enum):
|
||||
"""
|
||||
CORE message flags.
|
||||
"""
|
||||
ADD = 0x01
|
||||
DELETE = 0x02
|
||||
CRI = 0x04
|
||||
LOCAL = 0x08
|
||||
STRING = 0x10
|
||||
TEXT = 0x20
|
||||
TTY = 0x40
|
||||
|
||||
|
||||
class NodeTlvs(Enum):
|
||||
"""
|
||||
Node type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NUMBER = 0x01
|
||||
TYPE = 0x02
|
||||
NAME = 0x03
|
||||
|
@ -63,40 +50,11 @@ class NodeTlvs(Enum):
|
|||
OPAQUE = 0x50
|
||||
|
||||
|
||||
class NodeTypes(Enum):
|
||||
"""
|
||||
Node types.
|
||||
"""
|
||||
DEFAULT = 0
|
||||
PHYSICAL = 1
|
||||
TBD = 3
|
||||
SWITCH = 4
|
||||
HUB = 5
|
||||
WIRELESS_LAN = 6
|
||||
RJ45 = 7
|
||||
TUNNEL = 8
|
||||
KTUNNEL = 9
|
||||
EMANE = 10
|
||||
TAP_BRIDGE = 11
|
||||
PEER_TO_PEER = 12
|
||||
CONTROL_NET = 13
|
||||
EMANE_NET = 14
|
||||
|
||||
|
||||
class Rj45Models(Enum):
|
||||
"""
|
||||
RJ45 model types.
|
||||
"""
|
||||
LINKED = 0
|
||||
WIRELESS = 1
|
||||
INSTALLED = 2
|
||||
|
||||
|
||||
# Link Message TLV Types
|
||||
class LinkTlvs(Enum):
|
||||
"""
|
||||
Link type, length, value enumerations.
|
||||
"""
|
||||
|
||||
N1_NUMBER = 0x01
|
||||
N2_NUMBER = 0x02
|
||||
DELAY = 0x03
|
||||
|
@ -131,18 +89,11 @@ class LinkTlvs(Enum):
|
|||
OPAQUE = 0x50
|
||||
|
||||
|
||||
class LinkTypes(Enum):
|
||||
"""
|
||||
Link types.
|
||||
"""
|
||||
WIRELESS = 0
|
||||
WIRED = 1
|
||||
|
||||
|
||||
class ExecuteTlvs(Enum):
|
||||
"""
|
||||
Execute type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
NUMBER = 0x02
|
||||
TIME = 0x03
|
||||
|
@ -152,23 +103,11 @@ class ExecuteTlvs(Enum):
|
|||
SESSION = 0x0A
|
||||
|
||||
|
||||
class RegisterTlvs(Enum):
|
||||
"""
|
||||
Register type, length, value enumerations.
|
||||
"""
|
||||
WIRELESS = 0x01
|
||||
MOBILITY = 0x02
|
||||
UTILITY = 0x03
|
||||
EXECUTE_SERVER = 0x04
|
||||
GUI = 0x05
|
||||
EMULATION_SERVER = 0x06
|
||||
SESSION = 0x0A
|
||||
|
||||
|
||||
class ConfigTlvs(Enum):
|
||||
"""
|
||||
Configuration type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
OBJECT = 0x02
|
||||
TYPE = 0x03
|
||||
|
@ -188,33 +127,18 @@ class ConfigFlags(Enum):
|
|||
"""
|
||||
Configuration flags.
|
||||
"""
|
||||
|
||||
NONE = 0x00
|
||||
REQUEST = 0x01
|
||||
UPDATE = 0x02
|
||||
RESET = 0x03
|
||||
|
||||
|
||||
class ConfigDataTypes(Enum):
|
||||
"""
|
||||
Configuration data types.
|
||||
"""
|
||||
UINT8 = 0x01
|
||||
UINT16 = 0x02
|
||||
UINT32 = 0x03
|
||||
UINT64 = 0x04
|
||||
INT8 = 0x05
|
||||
INT16 = 0x06
|
||||
INT32 = 0x07
|
||||
INT64 = 0x08
|
||||
FLOAT = 0x09
|
||||
STRING = 0x0A
|
||||
BOOL = 0x0B
|
||||
|
||||
|
||||
class FileTlvs(Enum):
|
||||
"""
|
||||
File type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
NAME = 0x02
|
||||
MODE = 0x03
|
||||
|
@ -230,6 +154,7 @@ class InterfaceTlvs(Enum):
|
|||
"""
|
||||
Interface type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
NUMBER = 0x02
|
||||
NAME = 0x03
|
||||
|
@ -249,6 +174,7 @@ class EventTlvs(Enum):
|
|||
"""
|
||||
Event type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
TYPE = 0x02
|
||||
NAME = 0x03
|
||||
|
@ -257,32 +183,11 @@ class EventTlvs(Enum):
|
|||
SESSION = 0x0A
|
||||
|
||||
|
||||
class EventTypes(Enum):
|
||||
"""
|
||||
Event types.
|
||||
"""
|
||||
NONE = 0
|
||||
DEFINITION_STATE = 1
|
||||
CONFIGURATION_STATE = 2
|
||||
INSTANTIATION_STATE = 3
|
||||
RUNTIME_STATE = 4
|
||||
DATACOLLECT_STATE = 5
|
||||
SHUTDOWN_STATE = 6
|
||||
START = 7
|
||||
STOP = 8
|
||||
PAUSE = 9
|
||||
RESTART = 10
|
||||
FILE_OPEN = 11
|
||||
FILE_SAVE = 12
|
||||
SCHEDULED = 13
|
||||
RECONFIGURE = 14
|
||||
INSTANTIATION_COMPLETE = 15
|
||||
|
||||
|
||||
class SessionTlvs(Enum):
|
||||
"""
|
||||
Session type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NUMBER = 0x01
|
||||
NAME = 0x02
|
||||
FILE = 0x03
|
||||
|
@ -297,6 +202,7 @@ class ExceptionTlvs(Enum):
|
|||
"""
|
||||
Exception type, length, value enumerations.
|
||||
"""
|
||||
|
||||
NODE = 0x01
|
||||
SESSION = 0x02
|
||||
LEVEL = 0x03
|
||||
|
@ -304,14 +210,3 @@ class ExceptionTlvs(Enum):
|
|||
DATE = 0x05
|
||||
TEXT = 0x06
|
||||
OPAQUE = 0x0A
|
||||
|
||||
|
||||
class ExceptionLevels(Enum):
|
||||
"""
|
||||
Exception levels.
|
||||
"""
|
||||
NONE = 0
|
||||
FATAL = 1
|
||||
ERROR = 2
|
||||
WARNING = 3
|
||||
NOTICE = 4
|
|
@ -2,7 +2,7 @@
|
|||
Utilities for working with python struct data.
|
||||
"""
|
||||
|
||||
from core import logger
|
||||
import logging
|
||||
|
||||
|
||||
def pack_values(clazz, packers):
|
||||
|
@ -15,7 +15,8 @@ def pack_values(clazz, packers):
|
|||
"""
|
||||
|
||||
# iterate through tuples of values to pack
|
||||
data = ""
|
||||
logging.debug("packing: %s", packers)
|
||||
data = b""
|
||||
for packer in packers:
|
||||
# check if a transformer was provided for valid values
|
||||
transformer = None
|
||||
|
@ -26,10 +27,6 @@ def pack_values(clazz, packers):
|
|||
else:
|
||||
raise RuntimeError("packer had more than 3 arguments")
|
||||
|
||||
# convert unicode to normal str for packing
|
||||
if isinstance(value, unicode):
|
||||
value = str(value)
|
||||
|
||||
# only pack actual values and avoid packing empty strings
|
||||
# protobuf defaults to empty strings and does no imply a value to set
|
||||
if value is None or (isinstance(value, str) and not value):
|
||||
|
@ -40,7 +37,7 @@ def pack_values(clazz, packers):
|
|||
value = transformer(value)
|
||||
|
||||
# pack and add to existing data
|
||||
logger.debug("packing: %s - %s", tlv_type, value)
|
||||
logging.debug("packing: %s - %s type(%s)", tlv_type, value, type(value))
|
||||
data += clazz.pack(tlv_type.value, value)
|
||||
|
||||
return data
|
File diff suppressed because it is too large
Load diff
|
@ -1,493 +0,0 @@
|
|||
"""
|
||||
Common support for configurable CORE objects.
|
||||
"""
|
||||
|
||||
import string
|
||||
|
||||
from core import logger
|
||||
from core.data import ConfigData
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.enumerations import ConfigFlags
|
||||
|
||||
|
||||
class ConfigurableManager(object):
|
||||
"""
|
||||
A generic class for managing Configurables. This class can register
|
||||
with a session to receive Config Messages for setting some parameters
|
||||
for itself or for the Configurables that it manages.
|
||||
"""
|
||||
# name corresponds to configuration object field
|
||||
name = ""
|
||||
|
||||
# type corresponds with register message types
|
||||
config_type = None
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Creates a ConfigurableManager instance.
|
||||
"""
|
||||
# configurable key=values, indexed by node number
|
||||
self.configs = {}
|
||||
|
||||
# TODO: fix the need for this and isolate to the mobility class that wants it
|
||||
self._modelclsmap = {}
|
||||
|
||||
def configure(self, session, config_data):
|
||||
"""
|
||||
Handle configure messages. The configuration message sent to a
|
||||
ConfigurableManager usually is used to:
|
||||
1. Request a list of Configurables (request flag)
|
||||
2. Reset manager and clear configs (reset flag)
|
||||
3. Send values that configure the manager or one of its Configurables
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: response messages
|
||||
"""
|
||||
|
||||
if config_data.type == ConfigFlags.REQUEST.value:
|
||||
return self.configure_request(config_data)
|
||||
elif config_data.type == ConfigFlags.RESET.value:
|
||||
return self.configure_reset(config_data)
|
||||
else:
|
||||
return self.configure_values(config_data)
|
||||
|
||||
def configure_request(self, config_data):
|
||||
"""
|
||||
Request configuration data.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_reset(self, config_data):
|
||||
"""
|
||||
By default, resets this manager to clear configs.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: reset response messages, or None
|
||||
"""
|
||||
return self.reset()
|
||||
|
||||
def configure_values(self, config_data):
|
||||
"""
|
||||
Values have been sent to this manager.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def configure_values_keyvalues(self, config_data, target, keys):
|
||||
"""
|
||||
Helper that can be used for configure_values for parsing in
|
||||
'key=value' strings from a values field. The key name must be
|
||||
in the keys list, and target.key=value is set.
|
||||
|
||||
:param ConfigData config_data: configuration data for carrying out a configuration
|
||||
:param target: target to set attribute values on
|
||||
:param keys: list of keys to verify validity
|
||||
:return: nothing
|
||||
"""
|
||||
values = config_data.data_values
|
||||
|
||||
if values is None:
|
||||
return None
|
||||
|
||||
kvs = values.split('|')
|
||||
for kv in kvs:
|
||||
try:
|
||||
key, value = kv.split('=', 1)
|
||||
if value is not None and not value.strip():
|
||||
value = None
|
||||
except ValueError:
|
||||
# value only
|
||||
key = keys[kvs.index(kv)]
|
||||
value = kv
|
||||
if key not in keys:
|
||||
raise ValueError("invalid key: %s" % key)
|
||||
if value is not None:
|
||||
setattr(target, key, value)
|
||||
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset functionality for the configurable class.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
return None
|
||||
|
||||
def setconfig(self, nodenum, conftype, values):
|
||||
"""
|
||||
Add configuration values for a node to a dictionary; values are
|
||||
usually received from a Configuration Message, and may refer to a
|
||||
node for which no object exists yet
|
||||
|
||||
:param int nodenum: node id
|
||||
:param conftype: configuration types
|
||||
:param values: configuration values
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("setting config for node(%s): %s - %s", nodenum, conftype, values)
|
||||
conflist = []
|
||||
if nodenum in self.configs:
|
||||
oldlist = self.configs[nodenum]
|
||||
found = False
|
||||
for t, v in oldlist:
|
||||
if t == conftype:
|
||||
# replace existing config
|
||||
found = True
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((t, v))
|
||||
if not found:
|
||||
conflist.append((conftype, values))
|
||||
else:
|
||||
conflist.append((conftype, values))
|
||||
self.configs[nodenum] = conflist
|
||||
|
||||
def getconfig(self, nodenum, conftype, defaultvalues):
|
||||
"""
|
||||
Get configuration values for a node; if the values don't exist in
|
||||
our dictionary then return the default values supplied
|
||||
|
||||
:param int nodenum: node id
|
||||
:param conftype: configuration type
|
||||
:param defaultvalues: default values
|
||||
:return: configuration type and default values
|
||||
:type: tuple
|
||||
"""
|
||||
logger.info("getting config for node(%s): %s - default(%s)",
|
||||
nodenum, conftype, defaultvalues)
|
||||
if nodenum in self.configs:
|
||||
# return configured values
|
||||
conflist = self.configs[nodenum]
|
||||
for t, v in conflist:
|
||||
if conftype is None or t == conftype:
|
||||
return t, v
|
||||
# return default values provided (may be None)
|
||||
return conftype, defaultvalues
|
||||
|
||||
def getallconfigs(self, use_clsmap=True):
|
||||
"""
|
||||
Return (nodenum, conftype, values) tuples for all stored configs.
|
||||
Used when reconnecting to a session.
|
||||
|
||||
:param bool use_clsmap: should a class map be used, default to True
|
||||
:return: list of all configurations
|
||||
:rtype: list
|
||||
"""
|
||||
r = []
|
||||
for nodenum in self.configs:
|
||||
for t, v in self.configs[nodenum]:
|
||||
if use_clsmap:
|
||||
t = self._modelclsmap[t]
|
||||
r.append((nodenum, t, v))
|
||||
return r
|
||||
|
||||
def clearconfig(self, nodenum):
|
||||
"""
|
||||
remove configuration values for the specified node;
|
||||
when nodenum is None, remove all configuration values
|
||||
|
||||
:param int nodenum: node id
|
||||
:return: nothing
|
||||
"""
|
||||
if nodenum is None:
|
||||
self.configs = {}
|
||||
return
|
||||
if nodenum in self.configs:
|
||||
self.configs.pop(nodenum)
|
||||
|
||||
def setconfig_keyvalues(self, nodenum, conftype, keyvalues):
|
||||
"""
|
||||
Key values list of tuples for a node.
|
||||
|
||||
:param int nodenum: node id
|
||||
:param conftype: configuration type
|
||||
:param keyvalues: key valyes
|
||||
:return: nothing
|
||||
"""
|
||||
if conftype not in self._modelclsmap:
|
||||
logger.warn("unknown model type '%s'", conftype)
|
||||
return
|
||||
model = self._modelclsmap[conftype]
|
||||
keys = model.getnames()
|
||||
# defaults are merged with supplied values here
|
||||
values = list(model.getdefaultvalues())
|
||||
for key, value in keyvalues:
|
||||
if key not in keys:
|
||||
logger.warn("Skipping unknown configuration key for %s: '%s'", conftype, key)
|
||||
continue
|
||||
i = keys.index(key)
|
||||
values[i] = value
|
||||
self.setconfig(nodenum, conftype, values)
|
||||
|
||||
def getmodels(self, n):
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
This assumes self.configs contains an iterable of (model-names, values)
|
||||
and a self._modelclsmapdict exists.
|
||||
|
||||
:param n: network node to get models for
|
||||
:return: list of model and values tuples for the network node
|
||||
:rtype: list
|
||||
"""
|
||||
r = []
|
||||
if n.objid in self.configs:
|
||||
v = self.configs[n.objid]
|
||||
for model in v:
|
||||
cls = self._modelclsmap[model[0]]
|
||||
vals = model[1]
|
||||
r.append((cls, vals))
|
||||
return r
|
||||
|
||||
|
||||
class Configurable(object):
|
||||
"""
|
||||
A generic class for managing configuration parameters.
|
||||
Parameters are sent via Configuration Messages, which allow the GUI
|
||||
to build dynamic dialogs depending on what is being configured.
|
||||
"""
|
||||
name = ""
|
||||
# Configuration items:
|
||||
# ('name', 'type', 'default', 'possible-value-list', 'caption')
|
||||
config_matrix = []
|
||||
config_groups = None
|
||||
bitmap = None
|
||||
|
||||
def __init__(self, session=None, object_id=None):
|
||||
"""
|
||||
Creates a Configurable instance.
|
||||
|
||||
:param core.session.Session session: session for this configurable
|
||||
:param object_id:
|
||||
"""
|
||||
self.session = session
|
||||
self.object_id = object_id
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Reset method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
def register(self):
|
||||
"""
|
||||
Register method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def getdefaultvalues(cls):
|
||||
"""
|
||||
Retrieve default values from configuration matrix.
|
||||
|
||||
:return: tuple of default values
|
||||
:rtype: tuple
|
||||
"""
|
||||
return tuple(map(lambda x: x[2], cls.config_matrix))
|
||||
|
||||
@classmethod
|
||||
def getnames(cls):
|
||||
"""
|
||||
Retrieve name values from configuration matrix.
|
||||
|
||||
:return: tuple of name values
|
||||
:rtype: tuple
|
||||
"""
|
||||
return tuple(map(lambda x: x[0], cls.config_matrix))
|
||||
|
||||
@classmethod
|
||||
def configure(cls, manager, config_data):
|
||||
"""
|
||||
Handle configuration messages for this object.
|
||||
|
||||
:param ConfigurableManager manager: configuration manager
|
||||
:param config_data: configuration data
|
||||
:return: configuration data object
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
reply = None
|
||||
node_id = config_data.node
|
||||
object_name = config_data.object
|
||||
config_type = config_data.type
|
||||
interface_id = config_data.interface_number
|
||||
values_str = config_data.data_values
|
||||
|
||||
if interface_id is not None:
|
||||
node_id = node_id * 1000 + interface_id
|
||||
|
||||
logger.debug("received configure message for %s nodenum:%s", cls.name, str(node_id))
|
||||
if config_type == ConfigFlags.REQUEST.value:
|
||||
logger.info("replying to configure request for %s model", cls.name)
|
||||
# when object name is "all", the reply to this request may be None
|
||||
# if this node has not been configured for this model; otherwise we
|
||||
# reply with the defaults for this model
|
||||
if object_name == "all":
|
||||
defaults = None
|
||||
typeflags = ConfigFlags.UPDATE.value
|
||||
else:
|
||||
defaults = cls.getdefaultvalues()
|
||||
typeflags = ConfigFlags.NONE.value
|
||||
values = manager.getconfig(node_id, cls.name, defaults)[1]
|
||||
if values is None:
|
||||
logger.warn("no active configuration for node (%s), ignoring request")
|
||||
# node has no active config for this model (don't send defaults)
|
||||
return None
|
||||
# reply with config options
|
||||
reply = cls.config_data(0, node_id, typeflags, values)
|
||||
elif config_type == ConfigFlags.RESET.value:
|
||||
if object_name == "all":
|
||||
manager.clearconfig(node_id)
|
||||
# elif conftype == coreapi.CONF_TYPE_FLAGS_UPDATE:
|
||||
else:
|
||||
# store the configuration values for later use, when the node
|
||||
# object has been created
|
||||
if object_name is None:
|
||||
logger.info("no configuration object for node %s", node_id)
|
||||
return None
|
||||
defaults = cls.getdefaultvalues()
|
||||
if values_str is None:
|
||||
# use default or preconfigured values
|
||||
values = manager.getconfig(node_id, cls.name, defaults)[1]
|
||||
else:
|
||||
# use new values supplied from the conf message
|
||||
values = values_str.split('|')
|
||||
# determine new or old style config
|
||||
new = cls.haskeyvalues(values)
|
||||
if new:
|
||||
new_values = list(defaults)
|
||||
keys = cls.getnames()
|
||||
for v in values:
|
||||
key, value = v.split('=', 1)
|
||||
try:
|
||||
new_values[keys.index(key)] = value
|
||||
except ValueError:
|
||||
logger.info("warning: ignoring invalid key '%s'" % key)
|
||||
values = new_values
|
||||
manager.setconfig(node_id, object_name, values)
|
||||
|
||||
return reply
|
||||
|
||||
@classmethod
|
||||
def config_data(cls, flags, node_id, type_flags, values):
|
||||
"""
|
||||
Convert this class to a Config API message. Some TLVs are defined
|
||||
by the class, but node number, conf type flags, and values must
|
||||
be passed in.
|
||||
|
||||
:param flags: message flags
|
||||
:param int node_id: node id
|
||||
:param type_flags: type flags
|
||||
:param values: values
|
||||
:return: configuration data object
|
||||
:rtype: ConfigData
|
||||
"""
|
||||
keys = cls.getnames()
|
||||
keyvalues = map(lambda a, b: "%s=%s" % (a, b), keys, values)
|
||||
values_str = string.join(keyvalues, '|')
|
||||
datatypes = tuple(map(lambda x: x[1], cls.config_matrix))
|
||||
captions = reduce(lambda a, b: a + '|' + b, map(lambda x: x[4], cls.config_matrix))
|
||||
possible_valuess = reduce(lambda a, b: a + '|' + b, map(lambda x: x[3], cls.config_matrix))
|
||||
|
||||
return ConfigData(
|
||||
message_type=flags,
|
||||
node=node_id,
|
||||
object=cls.name,
|
||||
type=type_flags,
|
||||
data_types=datatypes,
|
||||
data_values=values_str,
|
||||
captions=captions,
|
||||
possible_values=possible_valuess,
|
||||
bitmap=cls.bitmap,
|
||||
groups=cls.config_groups
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def booltooffon(value):
|
||||
"""
|
||||
Convenience helper turns bool into on (True) or off (False) string.
|
||||
|
||||
:param str value: value to retrieve on/off value for
|
||||
:return: on or off string
|
||||
:rtype: str
|
||||
"""
|
||||
if value == "1" or value == "true" or value == "on":
|
||||
return "on"
|
||||
else:
|
||||
return "off"
|
||||
|
||||
@staticmethod
|
||||
def offontobool(value):
|
||||
"""
|
||||
Convenience helper for converting an on/off string to a integer.
|
||||
|
||||
:param str value: on/off string
|
||||
:return: on/off integer value
|
||||
:rtype: int
|
||||
"""
|
||||
if type(value) == str:
|
||||
if value.lower() == "on":
|
||||
return 1
|
||||
elif value.lower() == "off":
|
||||
return 0
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def valueof(cls, name, values):
|
||||
"""
|
||||
Helper to return a value by the name defined in confmatrix.
|
||||
Checks if it is boolean
|
||||
|
||||
:param str name: name to get value of
|
||||
:param values: values to get value from
|
||||
:return: value for name
|
||||
"""
|
||||
i = cls.getnames().index(name)
|
||||
if cls.config_matrix[i][1] == ConfigDataTypes.BOOL.value and values[i] != "":
|
||||
return cls.booltooffon(values[i])
|
||||
else:
|
||||
return values[i]
|
||||
|
||||
@staticmethod
|
||||
def haskeyvalues(values):
|
||||
"""
|
||||
Helper to check for list of key=value pairs versus a plain old
|
||||
list of values. Returns True if all elements are "key=value".
|
||||
|
||||
:param values: items to check for key/value pairs
|
||||
:return: True if all values are key/value pairs, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
if len(values) == 0:
|
||||
return False
|
||||
for v in values:
|
||||
if "=" not in v:
|
||||
return False
|
||||
return True
|
||||
|
||||
def getkeyvaluelist(self):
|
||||
"""
|
||||
Helper to return a list of (key, value) tuples. Keys come from
|
||||
configuration matrix and values are instance attributes.
|
||||
|
||||
:return: tuples of key value pairs
|
||||
:rtype: list
|
||||
"""
|
||||
key_values = []
|
||||
|
||||
for name in self.getnames():
|
||||
if hasattr(self, name):
|
||||
value = getattr(self, name)
|
||||
key_values.append((name, value))
|
||||
|
||||
return key_values
|
343
daemon/core/config.py
Normal file
343
daemon/core/config.py
Normal file
|
@ -0,0 +1,343 @@
|
|||
"""
|
||||
Common support for configurable CORE objects.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from collections import OrderedDict
|
||||
from typing import TYPE_CHECKING, Dict, List, Tuple, Type, Union
|
||||
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
from core.nodes.network import WlanNode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.location.mobility import WirelessModel
|
||||
|
||||
WirelessModelType = Type[WirelessModel]
|
||||
|
||||
|
||||
class ConfigGroup:
|
||||
"""
|
||||
Defines configuration group tabs used for display by ConfigurationOptions.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, start: int, stop: int) -> None:
|
||||
"""
|
||||
Creates a ConfigGroup object.
|
||||
|
||||
:param name: configuration group display name
|
||||
:param start: configurations start index for this group
|
||||
:param stop: configurations stop index for this group
|
||||
"""
|
||||
self.name = name
|
||||
self.start = start
|
||||
self.stop = stop
|
||||
|
||||
|
||||
class Configuration:
|
||||
"""
|
||||
Represents a configuration options.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
_id: str,
|
||||
_type: ConfigDataTypes,
|
||||
label: str = None,
|
||||
default: str = "",
|
||||
options: List[str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Creates a Configuration object.
|
||||
|
||||
:param _id: unique name for configuration
|
||||
:param _type: configuration data type
|
||||
:param label: configuration label for display
|
||||
:param default: default value for configuration
|
||||
:param options: list options if this is a configuration with a combobox
|
||||
"""
|
||||
self.id = _id
|
||||
self.type = _type
|
||||
self.default = default
|
||||
if not options:
|
||||
options = []
|
||||
self.options = options
|
||||
if not label:
|
||||
label = _id
|
||||
self.label = label
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__class__.__name__}(id={self.id}, type={self.type}, default={self.default}, options={self.options})"
|
||||
|
||||
|
||||
class ConfigurableOptions:
|
||||
"""
|
||||
Provides a base for defining configuration options within CORE.
|
||||
"""
|
||||
|
||||
name = None
|
||||
bitmap = None
|
||||
options = []
|
||||
|
||||
@classmethod
|
||||
def configurations(cls) -> List[Configuration]:
|
||||
"""
|
||||
Provides the configurations for this class.
|
||||
|
||||
:return: configurations
|
||||
"""
|
||||
return cls.options
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls) -> List[ConfigGroup]:
|
||||
"""
|
||||
Defines how configurations are grouped.
|
||||
|
||||
:return: configuration group definition
|
||||
"""
|
||||
return [ConfigGroup("Options", 1, len(cls.configurations()))]
|
||||
|
||||
@classmethod
|
||||
def default_values(cls) -> Dict[str, str]:
|
||||
"""
|
||||
Provides an ordered mapping of configuration keys to default values.
|
||||
|
||||
:return: ordered configuration mapping default values
|
||||
"""
|
||||
return OrderedDict(
|
||||
[(config.id, config.default) for config in cls.configurations()]
|
||||
)
|
||||
|
||||
|
||||
class ConfigurableManager:
|
||||
"""
|
||||
Provides convenience methods for storing and retrieving configuration options for
|
||||
nodes.
|
||||
"""
|
||||
|
||||
_default_node = -1
|
||||
_default_type = _default_node
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Creates a ConfigurableManager object.
|
||||
"""
|
||||
self.node_configurations = {}
|
||||
|
||||
def nodes(self) -> List[int]:
|
||||
"""
|
||||
Retrieves the ids of all node configurations known by this manager.
|
||||
|
||||
:return: list of node ids
|
||||
"""
|
||||
return [x for x in self.node_configurations if x != self._default_node]
|
||||
|
||||
def config_reset(self, node_id: int = None) -> None:
|
||||
"""
|
||||
Clears all configurations or configuration for a specific node.
|
||||
|
||||
:param node_id: node id to clear configurations for, default is None and clears all configurations
|
||||
:return: nothing
|
||||
"""
|
||||
if not node_id:
|
||||
self.node_configurations.clear()
|
||||
elif node_id in self.node_configurations:
|
||||
self.node_configurations.pop(node_id)
|
||||
|
||||
def set_config(
|
||||
self,
|
||||
_id: str,
|
||||
value: str,
|
||||
node_id: int = _default_node,
|
||||
config_type: str = _default_type,
|
||||
) -> None:
|
||||
"""
|
||||
Set a specific configuration value for a node and configuration type.
|
||||
|
||||
:param _id: configuration key
|
||||
:param value: configuration value
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_type_configs = node_configs.setdefault(config_type, OrderedDict())
|
||||
node_type_configs[_id] = value
|
||||
|
||||
def set_configs(
|
||||
self,
|
||||
config: Dict[str, str],
|
||||
node_id: int = _default_node,
|
||||
config_type: str = _default_type,
|
||||
) -> None:
|
||||
"""
|
||||
Set configurations for a node and configuration type.
|
||||
|
||||
:param config: configurations to set
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug(
|
||||
"setting config for node(%s) type(%s): %s", node_id, config_type, config
|
||||
)
|
||||
node_configs = self.node_configurations.setdefault(node_id, OrderedDict())
|
||||
node_configs[config_type] = config
|
||||
|
||||
def get_config(
|
||||
self,
|
||||
_id: str,
|
||||
node_id: int = _default_node,
|
||||
config_type: str = _default_type,
|
||||
default: str = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieves a specific configuration for a node and configuration type.
|
||||
|
||||
:param _id: specific configuration to retrieve
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:param default: default value to return when value is not found
|
||||
:return: configuration value
|
||||
"""
|
||||
result = default
|
||||
node_type_configs = self.get_configs(node_id, config_type)
|
||||
if node_type_configs:
|
||||
result = node_type_configs.get(_id, default)
|
||||
return result
|
||||
|
||||
def get_configs(
|
||||
self, node_id: int = _default_node, config_type: str = _default_type
|
||||
) -> Dict[str, str]:
|
||||
"""
|
||||
Retrieve configurations for a node and configuration type.
|
||||
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:return: configurations
|
||||
"""
|
||||
result = None
|
||||
node_configs = self.node_configurations.get(node_id)
|
||||
if node_configs:
|
||||
result = node_configs.get(config_type)
|
||||
return result
|
||||
|
||||
def get_all_configs(self, node_id: int = _default_node) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Retrieve all current configuration types for a node.
|
||||
|
||||
:param node_id: node id to retrieve configurations for
|
||||
:return: all configuration types for a node
|
||||
"""
|
||||
return self.node_configurations.get(node_id)
|
||||
|
||||
|
||||
class ModelManager(ConfigurableManager):
|
||||
"""
|
||||
Helps handle setting models for nodes and managing their model configurations.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""
|
||||
Creates a ModelManager object.
|
||||
"""
|
||||
super().__init__()
|
||||
self.models = {}
|
||||
self.node_models = {}
|
||||
|
||||
def set_model_config(
|
||||
self, node_id: int, model_name: str, config: Dict[str, str] = None
|
||||
) -> None:
|
||||
"""
|
||||
Set configuration data for a model.
|
||||
|
||||
:param node_id: node id to set model configuration for
|
||||
:param model_name: model to set configuration for
|
||||
:param config: configuration data to set for model
|
||||
:return: nothing
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError(f"{model_name} is an invalid model")
|
||||
|
||||
# retrieve default values
|
||||
model_config = self.get_model_config(node_id, model_name)
|
||||
if not config:
|
||||
config = {}
|
||||
for key in config:
|
||||
value = config[key]
|
||||
model_config[key] = value
|
||||
|
||||
# set as node model for startup
|
||||
self.node_models[node_id] = model_name
|
||||
|
||||
# set configuration
|
||||
self.set_configs(model_config, node_id=node_id, config_type=model_name)
|
||||
|
||||
def get_model_config(self, node_id: int, model_name: str) -> Dict[str, str]:
|
||||
"""
|
||||
Retrieve configuration data for a model.
|
||||
|
||||
:param node_id: node id to set model configuration for
|
||||
:param model_name: model to set configuration for
|
||||
:return: current model configuration for node
|
||||
"""
|
||||
# get model class to configure
|
||||
model_class = self.models.get(model_name)
|
||||
if not model_class:
|
||||
raise ValueError(f"{model_name} is an invalid model")
|
||||
|
||||
config = self.get_configs(node_id=node_id, config_type=model_name)
|
||||
if not config:
|
||||
# set default values, when not already set
|
||||
config = model_class.default_values()
|
||||
self.set_configs(config, node_id=node_id, config_type=model_name)
|
||||
|
||||
return config
|
||||
|
||||
def set_model(
|
||||
self,
|
||||
node: Union[WlanNode, EmaneNet],
|
||||
model_class: "WirelessModelType",
|
||||
config: Dict[str, str] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Set model and model configuration for node.
|
||||
|
||||
:param node: node to set model for
|
||||
:param model_class: model class to set for node
|
||||
:param config: model configuration, None for default configuration
|
||||
:return: nothing
|
||||
"""
|
||||
logging.debug(
|
||||
"setting model(%s) for node(%s): %s", model_class.name, node.id, config
|
||||
)
|
||||
self.set_model_config(node.id, model_class.name, config)
|
||||
config = self.get_model_config(node.id, model_class.name)
|
||||
node.setmodel(model_class, config)
|
||||
|
||||
def get_models(
|
||||
self, node: Union[WlanNode, EmaneNet]
|
||||
) -> List[Tuple[Type, Dict[str, str]]]:
|
||||
"""
|
||||
Return a list of model classes and values for a net if one has been
|
||||
configured. This is invoked when exporting a session to XML.
|
||||
|
||||
:param node: network node to get models for
|
||||
:return: list of model and values tuples for the network node
|
||||
"""
|
||||
all_configs = self.get_all_configs(node.id)
|
||||
if not all_configs:
|
||||
all_configs = {}
|
||||
|
||||
models = []
|
||||
for model_name in all_configs:
|
||||
config = all_configs[model_name]
|
||||
if model_name == ModelManager._default_node:
|
||||
continue
|
||||
model_class = self.models[model_name]
|
||||
models.append((model_class, config))
|
||||
|
||||
logging.debug("models for node(%s): %s", node.id, models)
|
||||
return models
|
395
daemon/core/configservice/base.py
Normal file
395
daemon/core/configservice/base.py
Normal file
|
@ -0,0 +1,395 @@
|
|||
import abc
|
||||
import enum
|
||||
import inspect
|
||||
import logging
|
||||
import pathlib
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from mako import exceptions
|
||||
from mako.lookup import TemplateLookup
|
||||
from mako.template import Template
|
||||
|
||||
from core.config import Configuration
|
||||
from core.errors import CoreCommandError, CoreError
|
||||
from core.nodes.base import CoreNode
|
||||
|
||||
TEMPLATES_DIR = "templates"
|
||||
|
||||
|
||||
class ConfigServiceMode(enum.Enum):
|
||||
BLOCKING = 0
|
||||
NON_BLOCKING = 1
|
||||
TIMER = 2
|
||||
|
||||
|
||||
class ConfigServiceBootError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConfigService(abc.ABC):
|
||||
"""
|
||||
Base class for creating configurable services.
|
||||
"""
|
||||
|
||||
# validation period in seconds, how frequent validation is attempted
|
||||
validation_period = 0.5
|
||||
|
||||
# time to wait in seconds for determining if service started successfully
|
||||
validation_timer = 5
|
||||
|
||||
def __init__(self, node: CoreNode) -> None:
|
||||
"""
|
||||
Create ConfigService instance.
|
||||
|
||||
:param node: node this service is assigned to
|
||||
"""
|
||||
self.node = node
|
||||
class_file = inspect.getfile(self.__class__)
|
||||
templates_path = pathlib.Path(class_file).parent.joinpath(TEMPLATES_DIR)
|
||||
self.templates = TemplateLookup(directories=templates_path)
|
||||
self.config = {}
|
||||
self.custom_templates = {}
|
||||
self.custom_config = {}
|
||||
configs = self.default_configs[:]
|
||||
self._define_config(configs)
|
||||
|
||||
@staticmethod
|
||||
def clean_text(text: str) -> str:
|
||||
"""
|
||||
Returns space stripped text for string literals, while keeping space
|
||||
indentations.
|
||||
|
||||
:param text: text to clean
|
||||
:return: cleaned text
|
||||
"""
|
||||
return inspect.cleandoc(text)
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def name(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def group(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def directories(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def files(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def default_configs(self) -> List[Configuration]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def modes(self) -> Dict[str, Dict[str, str]]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def executables(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def dependencies(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def startup(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def validate(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def shutdown(self) -> List[str]:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def validation_mode(self) -> ConfigServiceMode:
|
||||
raise NotImplementedError
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
Creates services files/directories, runs startup, and validates based on
|
||||
validation mode.
|
||||
|
||||
:return: nothing
|
||||
:raises ConfigServiceBootError: when there is an error starting service
|
||||
"""
|
||||
logging.info("node(%s) service(%s) starting...", self.node.name, self.name)
|
||||
self.create_dirs()
|
||||
self.create_files()
|
||||
wait = self.validation_mode == ConfigServiceMode.BLOCKING
|
||||
self.run_startup(wait)
|
||||
if not wait:
|
||||
if self.validation_mode == ConfigServiceMode.TIMER:
|
||||
self.wait_validation()
|
||||
else:
|
||||
self.run_validation()
|
||||
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
Stop service using shutdown commands.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
for cmd in self.shutdown:
|
||||
try:
|
||||
self.node.cmd(cmd)
|
||||
except CoreCommandError:
|
||||
logging.exception(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"failed shutdown: {cmd}"
|
||||
)
|
||||
|
||||
def restart(self) -> None:
|
||||
"""
|
||||
Restarts service by running stop and then start.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.stop()
|
||||
self.start()
|
||||
|
||||
def create_dirs(self) -> None:
|
||||
"""
|
||||
Creates directories for service.
|
||||
|
||||
:return: nothing
|
||||
:raises CoreError: when there is a failure creating a directory
|
||||
"""
|
||||
for directory in self.directories:
|
||||
try:
|
||||
self.node.privatedir(directory)
|
||||
except (CoreCommandError, ValueError):
|
||||
raise CoreError(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"failure to create service directory: {directory}"
|
||||
)
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Returns key/value data, used when rendering file templates.
|
||||
|
||||
:return: key/value template data
|
||||
"""
|
||||
return {}
|
||||
|
||||
def set_template(self, name: str, template: str) -> None:
|
||||
"""
|
||||
Store custom template to render for a given file.
|
||||
|
||||
:param name: file to store custom template for
|
||||
:param template: custom template to render
|
||||
:return: nothing
|
||||
"""
|
||||
self.custom_templates[name] = template
|
||||
|
||||
def get_text_template(self, name: str) -> str:
|
||||
"""
|
||||
Retrieves text based template for files that do not have a file based template.
|
||||
|
||||
:param name: name of file to get template for
|
||||
:return: template to render
|
||||
"""
|
||||
raise CoreError(f"service({self.name}) unknown template({name})")
|
||||
|
||||
def get_templates(self) -> Dict[str, str]:
|
||||
"""
|
||||
Retrieves mapping of file names to templates for all cases, which
|
||||
includes custom templates, file templates, and text templates.
|
||||
|
||||
:return: mapping of files to templates
|
||||
"""
|
||||
templates = {}
|
||||
for name in self.files:
|
||||
basename = pathlib.Path(name).name
|
||||
if name in self.custom_templates:
|
||||
template = self.custom_templates[name]
|
||||
template = self.clean_text(template)
|
||||
elif self.templates.has_template(basename):
|
||||
template = self.templates.get_template(basename).source
|
||||
else:
|
||||
template = self.get_text_template(name)
|
||||
template = self.clean_text(template)
|
||||
templates[name] = template
|
||||
return templates
|
||||
|
||||
def create_files(self) -> None:
|
||||
"""
|
||||
Creates service files inside associated node.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
data = self.data()
|
||||
for name in self.files:
|
||||
basename = pathlib.Path(name).name
|
||||
if name in self.custom_templates:
|
||||
text = self.custom_templates[name]
|
||||
rendered = self.render_text(text, data)
|
||||
elif self.templates.has_template(basename):
|
||||
rendered = self.render_template(basename, data)
|
||||
else:
|
||||
text = self.get_text_template(name)
|
||||
rendered = self.render_text(text, data)
|
||||
logging.debug(
|
||||
"node(%s) service(%s) template(%s): \n%s",
|
||||
self.node.name,
|
||||
self.name,
|
||||
name,
|
||||
rendered,
|
||||
)
|
||||
self.node.nodefile(name, rendered)
|
||||
|
||||
def run_startup(self, wait: bool) -> None:
|
||||
"""
|
||||
Run startup commands for service on node.
|
||||
|
||||
:param wait: wait successful command exit status when True, ignore status
|
||||
otherwise
|
||||
:return: nothing
|
||||
:raises ConfigServiceBootError: when a command that waits fails
|
||||
"""
|
||||
for cmd in self.startup:
|
||||
try:
|
||||
self.node.cmd(cmd, wait=wait)
|
||||
except CoreCommandError as e:
|
||||
raise ConfigServiceBootError(
|
||||
f"node({self.node.name}) service({self.name}) failed startup: {e}"
|
||||
)
|
||||
|
||||
def wait_validation(self) -> None:
|
||||
"""
|
||||
Waits for a period of time to consider service started successfully.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
time.sleep(self.validation_timer)
|
||||
|
||||
def run_validation(self) -> None:
|
||||
"""
|
||||
Runs validation commands for service on node.
|
||||
|
||||
:return: nothing
|
||||
:raises ConfigServiceBootError: if there is a validation failure
|
||||
"""
|
||||
start = time.monotonic()
|
||||
cmds = self.validate[:]
|
||||
index = 0
|
||||
while cmds:
|
||||
cmd = cmds[index]
|
||||
try:
|
||||
self.node.cmd(cmd)
|
||||
del cmds[index]
|
||||
index += 1
|
||||
except CoreCommandError:
|
||||
logging.debug(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"validate command failed: {cmd}"
|
||||
)
|
||||
time.sleep(self.validation_period)
|
||||
|
||||
if cmds and time.monotonic() - start > self.validation_timer:
|
||||
raise ConfigServiceBootError(
|
||||
f"node({self.node.name}) service({self.name}) failed to validate"
|
||||
)
|
||||
|
||||
def _render(self, template: Template, data: Dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Renders template providing all associated data to template.
|
||||
|
||||
:param template: template to render
|
||||
:param data: service specific defined data for template
|
||||
:return: rendered template
|
||||
"""
|
||||
if data is None:
|
||||
data = {}
|
||||
return template.render_unicode(
|
||||
node=self.node, config=self.render_config(), **data
|
||||
)
|
||||
|
||||
def render_text(self, text: str, data: Dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Renders text based template providing all associated data to template.
|
||||
|
||||
:param text: text to render
|
||||
:param data: service specific defined data for template
|
||||
:return: rendered template
|
||||
"""
|
||||
text = self.clean_text(text)
|
||||
try:
|
||||
template = Template(text)
|
||||
return self._render(template, data)
|
||||
except Exception:
|
||||
raise CoreError(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"{exceptions.text_error_template().render_unicode()}"
|
||||
)
|
||||
|
||||
def render_template(self, basename: str, data: Dict[str, Any] = None) -> str:
|
||||
"""
|
||||
Renders file based template providing all associated data to template.
|
||||
|
||||
:param basename: base name for file to render
|
||||
:param data: service specific defined data for template
|
||||
:return: rendered template
|
||||
"""
|
||||
try:
|
||||
template = self.templates.get_template(basename)
|
||||
return self._render(template, data)
|
||||
except Exception:
|
||||
raise CoreError(
|
||||
f"node({self.node.name}) service({self.name}) "
|
||||
f"{exceptions.text_error_template().render_template()}"
|
||||
)
|
||||
|
||||
def _define_config(self, configs: List[Configuration]) -> None:
|
||||
"""
|
||||
Initializes default configuration data.
|
||||
|
||||
:param configs: configs to initialize
|
||||
:return: nothing
|
||||
"""
|
||||
for config in configs:
|
||||
self.config[config.id] = config
|
||||
|
||||
def render_config(self) -> Dict[str, str]:
|
||||
"""
|
||||
Returns configuration data key/value pairs for rendering a template.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if self.custom_config:
|
||||
return self.custom_config
|
||||
else:
|
||||
return {k: v.default for k, v in self.config.items()}
|
||||
|
||||
def set_config(self, data: Dict[str, str]) -> None:
|
||||
"""
|
||||
Set configuration data from key/value pairs.
|
||||
|
||||
:param data: configuration key/values to set
|
||||
:return: nothing
|
||||
:raise CoreError: when an unknown configuration value is given
|
||||
"""
|
||||
for key, value in data.items():
|
||||
if key not in self.config:
|
||||
raise CoreError(f"unknown config: {key}")
|
||||
self.custom_config[key] = value
|
123
daemon/core/configservice/dependencies.py
Normal file
123
daemon/core/configservice/dependencies.py
Normal file
|
@ -0,0 +1,123 @@
|
|||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.configservice.base import ConfigService
|
||||
|
||||
|
||||
class ConfigServiceDependencies:
|
||||
"""
|
||||
Generates sets of services to start in order of their dependencies.
|
||||
"""
|
||||
|
||||
def __init__(self, services: Dict[str, "ConfigService"]) -> None:
|
||||
"""
|
||||
Create a ConfigServiceDependencies instance.
|
||||
|
||||
:param services: services for determining dependency sets
|
||||
"""
|
||||
# helpers to check validity
|
||||
self.dependents = {}
|
||||
self.started = set()
|
||||
self.node_services = {}
|
||||
for service in services.values():
|
||||
self.node_services[service.name] = service
|
||||
for dependency in service.dependencies:
|
||||
dependents = self.dependents.setdefault(dependency, set())
|
||||
dependents.add(service.name)
|
||||
|
||||
# used to find paths
|
||||
self.path = []
|
||||
self.visited = set()
|
||||
self.visiting = set()
|
||||
|
||||
def startup_paths(self) -> List[List["ConfigService"]]:
|
||||
"""
|
||||
Find startup path sets based on service dependencies.
|
||||
|
||||
:return: lists of lists of services that can be started in parallel
|
||||
"""
|
||||
paths = []
|
||||
for name in self.node_services:
|
||||
service = self.node_services[name]
|
||||
if service.name in self.started:
|
||||
logging.debug(
|
||||
"skipping service that will already be started: %s", service.name
|
||||
)
|
||||
continue
|
||||
|
||||
path = self._start(service)
|
||||
if path:
|
||||
paths.append(path)
|
||||
|
||||
if self.started != set(self.node_services):
|
||||
raise ValueError(
|
||||
"failure to start all services: %s != %s"
|
||||
% (self.started, self.node_services.keys())
|
||||
)
|
||||
|
||||
return paths
|
||||
|
||||
def _reset(self) -> None:
|
||||
"""
|
||||
Clear out metadata used for finding service dependency sets.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.path = []
|
||||
self.visited.clear()
|
||||
self.visiting.clear()
|
||||
|
||||
def _start(self, service: "ConfigService") -> List["ConfigService"]:
|
||||
"""
|
||||
Starts a oath for checking dependencies for a given service.
|
||||
|
||||
:param service: service to check dependencies for
|
||||
:return: list of config services to start in order
|
||||
"""
|
||||
logging.debug("starting service dependency check: %s", service.name)
|
||||
self._reset()
|
||||
return self._visit(service)
|
||||
|
||||
def _visit(self, current_service: "ConfigService") -> List["ConfigService"]:
|
||||
"""
|
||||
Visits a service when discovering dependency chains for service.
|
||||
|
||||
:param current_service: service being visited
|
||||
:return: list of dependent services for a visited service
|
||||
"""
|
||||
logging.debug("visiting service(%s): %s", current_service.name, self.path)
|
||||
self.visited.add(current_service.name)
|
||||
self.visiting.add(current_service.name)
|
||||
|
||||
# dive down
|
||||
for service_name in current_service.dependencies:
|
||||
if service_name not in self.node_services:
|
||||
raise ValueError(
|
||||
"required dependency was not included in node services: %s"
|
||||
% service_name
|
||||
)
|
||||
|
||||
if service_name in self.visiting:
|
||||
raise ValueError(
|
||||
"cyclic dependency at service(%s): %s"
|
||||
% (current_service.name, service_name)
|
||||
)
|
||||
|
||||
if service_name not in self.visited:
|
||||
service = self.node_services[service_name]
|
||||
self._visit(service)
|
||||
|
||||
# add service when bottom is found
|
||||
logging.debug("adding service to startup path: %s", current_service.name)
|
||||
self.started.add(current_service.name)
|
||||
self.path.append(current_service)
|
||||
self.visiting.remove(current_service.name)
|
||||
|
||||
# rise back up
|
||||
for service_name in self.dependents.get(current_service.name, []):
|
||||
if service_name not in self.visited:
|
||||
service = self.node_services[service_name]
|
||||
self._visit(service)
|
||||
|
||||
return self.path
|
82
daemon/core/configservice/manager.py
Normal file
82
daemon/core/configservice/manager.py
Normal file
|
@ -0,0 +1,82 @@
|
|||
import logging
|
||||
import pathlib
|
||||
from typing import List, Type
|
||||
|
||||
from core import utils
|
||||
from core.configservice.base import ConfigService
|
||||
from core.errors import CoreError
|
||||
|
||||
|
||||
class ConfigServiceManager:
|
||||
"""
|
||||
Manager for configurable services.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""
|
||||
Create a ConfigServiceManager instance.
|
||||
"""
|
||||
self.services = {}
|
||||
|
||||
def get_service(self, name: str) -> Type[ConfigService]:
|
||||
"""
|
||||
Retrieve a service by name.
|
||||
|
||||
:param name: name of service
|
||||
:return: service class
|
||||
:raises CoreError: when service is not found
|
||||
"""
|
||||
service_class = self.services.get(name)
|
||||
if service_class is None:
|
||||
raise CoreError(f"service does not exit {name}")
|
||||
return service_class
|
||||
|
||||
def add(self, service: ConfigService) -> None:
|
||||
"""
|
||||
Add service to manager, checking service requirements have been met.
|
||||
|
||||
:param service: service to add to manager
|
||||
:return: nothing
|
||||
:raises CoreError: when service is a duplicate or has unmet executables
|
||||
"""
|
||||
name = service.name
|
||||
logging.debug("loading service: class(%s) name(%s)", service.__class__, name)
|
||||
|
||||
# avoid duplicate services
|
||||
if name in self.services:
|
||||
raise CoreError(f"duplicate service being added: {name}")
|
||||
|
||||
# validate dependent executables are present
|
||||
for executable in service.executables:
|
||||
try:
|
||||
utils.which(executable, required=True)
|
||||
except ValueError:
|
||||
raise CoreError(
|
||||
f"service({service.name}) missing executable {executable}"
|
||||
)
|
||||
|
||||
# make service available
|
||||
self.services[name] = service
|
||||
|
||||
def load(self, path: str) -> List[str]:
|
||||
"""
|
||||
Search path provided for configurable services and add them for being managed.
|
||||
|
||||
:param path: path to search configurable services
|
||||
:return: list errors when loading and adding services
|
||||
"""
|
||||
path = pathlib.Path(path)
|
||||
subdirs = [x for x in path.iterdir() if x.is_dir()]
|
||||
subdirs.append(path)
|
||||
service_errors = []
|
||||
for subdir in subdirs:
|
||||
logging.debug("loading config services from: %s", subdir)
|
||||
services = utils.load_classes(str(subdir), ConfigService)
|
||||
for service in services:
|
||||
logging.debug("found service: %s", service)
|
||||
try:
|
||||
self.add(service)
|
||||
except CoreError as e:
|
||||
service_errors.append(service.name)
|
||||
logging.debug("not loading service(%s): %s", service.name, e)
|
||||
return service_errors
|
0
daemon/core/configservices/__init__.py
Normal file
0
daemon/core/configservices/__init__.py
Normal file
0
daemon/core/configservices/frrservices/__init__.py
Normal file
0
daemon/core/configservices/frrservices/__init__.py
Normal file
391
daemon/core/configservices/frrservices/services.py
Normal file
391
daemon/core/configservices/frrservices/services.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
import abc
|
||||
from typing import Any, Dict
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import constants
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.nodes.base import CoreNodeBase
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.nodes.network import WlanNode
|
||||
|
||||
GROUP = "FRR"
|
||||
|
||||
|
||||
def has_mtu_mismatch(ifc: CoreInterface) -> bool:
|
||||
"""
|
||||
Helper to detect MTU mismatch and add the appropriate FRR
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
"""
|
||||
if ifc.mtu != 1500:
|
||||
return True
|
||||
if not ifc.net:
|
||||
return False
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu != ifc.mtu:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_min_mtu(ifc):
|
||||
"""
|
||||
Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
"""
|
||||
mtu = ifc.mtu
|
||||
if not ifc.net:
|
||||
return mtu
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu < mtu:
|
||||
mtu = i.mtu
|
||||
return mtu
|
||||
|
||||
|
||||
def get_router_id(node: CoreNodeBase) -> str:
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
a = a.split("/")[0]
|
||||
if netaddr.valid_ipv4(a):
|
||||
return a
|
||||
return "0.0.0.0"
|
||||
|
||||
|
||||
class FRRZebra(ConfigService):
|
||||
name = "FRRzebra"
|
||||
group = GROUP
|
||||
directories = ["/usr/local/etc/frr", "/var/run/frr", "/var/log/frr"]
|
||||
files = [
|
||||
"/usr/local/etc/frr/frr.conf",
|
||||
"frrboot.sh",
|
||||
"/usr/local/etc/frr/vtysh.conf",
|
||||
"/usr/local/etc/frr/daemons",
|
||||
]
|
||||
executables = ["zebra"]
|
||||
dependencies = []
|
||||
startup = ["sh frrboot.sh zebra"]
|
||||
validate = ["pidof zebra"]
|
||||
shutdown = ["killall zebra"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
frr_conf = self.files[0]
|
||||
frr_bin_search = self.node.session.options.get_config(
|
||||
"frr_bin_search", default="/usr/local/bin /usr/bin /usr/lib/frr"
|
||||
).strip('"')
|
||||
frr_sbin_search = self.node.session.options.get_config(
|
||||
"frr_sbin_search", default="/usr/local/sbin /usr/sbin /usr/lib/frr"
|
||||
).strip('"')
|
||||
|
||||
services = []
|
||||
want_ip4 = False
|
||||
want_ip6 = False
|
||||
for service in self.node.config_services.values():
|
||||
if self.name not in service.dependencies:
|
||||
continue
|
||||
if service.ipv4_routing:
|
||||
want_ip4 = True
|
||||
if service.ipv6_routing:
|
||||
want_ip6 = True
|
||||
services.append(service)
|
||||
|
||||
interfaces = []
|
||||
for ifc in self.node.netifs():
|
||||
ip4s = []
|
||||
ip6s = []
|
||||
for x in ifc.addrlist:
|
||||
addr = x.split("/")[0]
|
||||
if netaddr.valid_ipv4(addr):
|
||||
ip4s.append(x)
|
||||
else:
|
||||
ip6s.append(x)
|
||||
is_control = getattr(ifc, "control", False)
|
||||
interfaces.append((ifc, ip4s, ip6s, is_control))
|
||||
|
||||
return dict(
|
||||
frr_conf=frr_conf,
|
||||
frr_sbin_search=frr_sbin_search,
|
||||
frr_bin_search=frr_bin_search,
|
||||
frr_state_dir=constants.FRR_STATE_DIR,
|
||||
interfaces=interfaces,
|
||||
want_ip4=want_ip4,
|
||||
want_ip6=want_ip6,
|
||||
services=services,
|
||||
)
|
||||
|
||||
|
||||
class FrrService(abc.ABC):
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = []
|
||||
executables = []
|
||||
dependencies = ["FRRzebra"]
|
||||
startup = []
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
ipv4_routing = False
|
||||
ipv6_routing = False
|
||||
|
||||
@abc.abstractmethod
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def frr_config(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class FRROspfv2(FrrService, ConfigService):
|
||||
"""
|
||||
The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified frr.conf file.
|
||||
"""
|
||||
|
||||
name = "FRROSPFv2"
|
||||
startup = ()
|
||||
shutdown = ["killall ospfd"]
|
||||
validate = ["pidof ospfd"]
|
||||
ipv4_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
addresses = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
addr = a.split("/")[0]
|
||||
if netaddr.valid_ipv4(addr):
|
||||
addresses.append(a)
|
||||
data = dict(router_id=router_id, addresses=addresses)
|
||||
text = """
|
||||
router ospf
|
||||
router-id ${router_id}
|
||||
% for addr in addresses:
|
||||
network ${addr} area 0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
if has_mtu_mismatch(ifc):
|
||||
return "ip ospf mtu-ignore"
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
class FRROspfv3(FrrService, ConfigService):
|
||||
"""
|
||||
The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified frr.conf file.
|
||||
"""
|
||||
|
||||
name = "FRROSPFv3"
|
||||
shutdown = ["killall ospf6d"]
|
||||
validate = ["pidof ospf6d"]
|
||||
ipv4_routing = True
|
||||
ipv6_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
data = dict(router_id=router_id, ifnames=ifnames)
|
||||
text = """
|
||||
router ospf6
|
||||
router-id ${router_id}
|
||||
% for ifname in ifnames:
|
||||
interface ${ifname} area 0.0.0.0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
mtu = get_min_mtu(ifc)
|
||||
if mtu < ifc.mtu:
|
||||
return f"ipv6 ospf6 ifmtu {mtu}"
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRBgp(FrrService, ConfigService):
|
||||
"""
|
||||
The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
"""
|
||||
|
||||
name = "FRRBGP"
|
||||
shutdown = ["killall bgpd"]
|
||||
validate = ["pidof bgpd"]
|
||||
custom_needed = True
|
||||
ipv4_routing = True
|
||||
ipv6_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
text = f"""
|
||||
! BGP configuration
|
||||
! You should configure the AS number below
|
||||
! along with this router's peers.
|
||||
router bgp {self.node.id}
|
||||
bgp router-id {router_id}
|
||||
redistribute connected
|
||||
!neighbor 1.2.3.4 remote-as 555
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRRip(FrrService, ConfigService):
|
||||
"""
|
||||
The RIP service provides IPv4 routing for wired networks.
|
||||
"""
|
||||
|
||||
name = "FRRRIP"
|
||||
shutdown = ["killall ripd"]
|
||||
validate = ["pidof ripd"]
|
||||
ipv4_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
text = """
|
||||
router rip
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf
|
||||
network 0.0.0.0/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRRipng(FrrService, ConfigService):
|
||||
"""
|
||||
The RIP NG service provides IPv6 routing for wired networks.
|
||||
"""
|
||||
|
||||
name = "FRRRIPNG"
|
||||
shutdown = ["killall ripngd"]
|
||||
validate = ["pidof ripngd"]
|
||||
ipv6_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
text = """
|
||||
router ripng
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf6
|
||||
network ::/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class FRRBabel(FrrService, ConfigService):
|
||||
"""
|
||||
The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
"""
|
||||
|
||||
name = "FRRBabel"
|
||||
shutdown = ["killall babeld"]
|
||||
validate = ["pidof babeld"]
|
||||
ipv6_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
text = """
|
||||
router babel
|
||||
% for ifname in ifnames:
|
||||
network ${ifname}
|
||||
% endfor
|
||||
redistribute static
|
||||
redistribute ipv4 connected
|
||||
!
|
||||
"""
|
||||
data = dict(ifnames=ifnames)
|
||||
return self.render_text(text, data)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
if isinstance(ifc.net, (WlanNode, EmaneNet)):
|
||||
text = """
|
||||
babel wireless
|
||||
no babel split-horizon
|
||||
"""
|
||||
else:
|
||||
text = """
|
||||
babel wired
|
||||
babel split-horizon
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
|
||||
class FRRpimd(FrrService, ConfigService):
|
||||
"""
|
||||
PIM multicast routing based on XORP.
|
||||
"""
|
||||
|
||||
name = "FRRpimd"
|
||||
shutdown = ["killall pimd"]
|
||||
validate = ["pidof pimd"]
|
||||
ipv4_routing = True
|
||||
|
||||
def frr_config(self) -> str:
|
||||
ifname = "eth0"
|
||||
for ifc in self.node.netifs():
|
||||
if ifc.name != "lo":
|
||||
ifname = ifc.name
|
||||
break
|
||||
|
||||
text = f"""
|
||||
router mfea
|
||||
!
|
||||
router igmp
|
||||
!
|
||||
router pim
|
||||
!ip pim rp-address 10.0.0.1
|
||||
ip pim bsr-candidate {ifname}
|
||||
ip pim rp-candidate {ifname}
|
||||
!ip pim spt-threshold interval 10 bytes 80000
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def frr_interface_config(self, ifc: CoreInterface) -> str:
|
||||
text = """
|
||||
ip mfea
|
||||
ip igmp
|
||||
ip pim
|
||||
"""
|
||||
return self.clean_text(text)
|
59
daemon/core/configservices/frrservices/templates/daemons
Normal file
59
daemon/core/configservices/frrservices/templates/daemons
Normal file
|
@ -0,0 +1,59 @@
|
|||
#
|
||||
# When activation a daemon at the first time, a config file, even if it is
|
||||
# empty, has to be present *and* be owned by the user and group "frr", else
|
||||
# the daemon will not be started by /etc/init.d/frr. The permissions should
|
||||
# be u=rw,g=r,o=.
|
||||
# When using "vtysh" such a config file is also needed. It should be owned by
|
||||
# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too.
|
||||
#
|
||||
# The watchfrr and zebra daemons are always started.
|
||||
#
|
||||
bgpd=yes
|
||||
ospfd=yes
|
||||
ospf6d=yes
|
||||
ripd=yes
|
||||
ripngd=yes
|
||||
isisd=yes
|
||||
pimd=yes
|
||||
ldpd=yes
|
||||
nhrpd=yes
|
||||
eigrpd=yes
|
||||
babeld=yes
|
||||
sharpd=yes
|
||||
pbrd=yes
|
||||
bfdd=yes
|
||||
fabricd=yes
|
||||
|
||||
#
|
||||
# If this option is set the /etc/init.d/frr script automatically loads
|
||||
# the config via "vtysh -b" when the servers are started.
|
||||
# Check /etc/pam.d/frr if you intend to use "vtysh"!
|
||||
#
|
||||
vtysh_enable=yes
|
||||
zebra_options=" -A 127.0.0.1 -s 90000000"
|
||||
bgpd_options=" -A 127.0.0.1"
|
||||
ospfd_options=" -A 127.0.0.1"
|
||||
ospf6d_options=" -A ::1"
|
||||
ripd_options=" -A 127.0.0.1"
|
||||
ripngd_options=" -A ::1"
|
||||
isisd_options=" -A 127.0.0.1"
|
||||
pimd_options=" -A 127.0.0.1"
|
||||
ldpd_options=" -A 127.0.0.1"
|
||||
nhrpd_options=" -A 127.0.0.1"
|
||||
eigrpd_options=" -A 127.0.0.1"
|
||||
babeld_options=" -A 127.0.0.1"
|
||||
sharpd_options=" -A 127.0.0.1"
|
||||
pbrd_options=" -A 127.0.0.1"
|
||||
staticd_options="-A 127.0.0.1"
|
||||
bfdd_options=" -A 127.0.0.1"
|
||||
fabricd_options="-A 127.0.0.1"
|
||||
|
||||
# The list of daemons to watch is automatically generated by the init script.
|
||||
#watchfrr_options=""
|
||||
|
||||
# for debugging purposes, you can specify a "wrap" command to start instead
|
||||
# of starting the daemon directly, e.g. to use valgrind on ospfd:
|
||||
# ospfd_wrap="/usr/bin/valgrind"
|
||||
# or you can use "all_wrap" for all daemons, e.g. to use perf record:
|
||||
# all_wrap="/usr/bin/perf record --call-graph -"
|
||||
# the normal daemon command is added to this at the end.
|
25
daemon/core/configservices/frrservices/templates/frr.conf
Normal file
25
daemon/core/configservices/frrservices/templates/frr.conf
Normal file
|
@ -0,0 +1,25 @@
|
|||
% for ifc, ip4s, ip6s, is_control in interfaces:
|
||||
interface ${ifc.name}
|
||||
% if want_ip4:
|
||||
% for addr in ip4s:
|
||||
ip address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if want_ip6:
|
||||
% for addr in ip6s:
|
||||
ipv6 address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if not is_control:
|
||||
% for service in services:
|
||||
% for line in service.frr_interface_config(ifc).split("\n"):
|
||||
${line}
|
||||
% endfor
|
||||
% endfor
|
||||
% endif
|
||||
!
|
||||
% endfor
|
||||
|
||||
% for service in services:
|
||||
${service.frr_config()}
|
||||
% endfor
|
105
daemon/core/configservices/frrservices/templates/frrboot.sh
Normal file
105
daemon/core/configservices/frrservices/templates/frrboot.sh
Normal file
|
@ -0,0 +1,105 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by zebra service (frr.py)
|
||||
FRR_CONF="${frr_conf}"
|
||||
FRR_SBIN_SEARCH="${frr_sbin_search}"
|
||||
FRR_BIN_SEARCH="${frr_bin_search}"
|
||||
FRR_STATE_DIR="${frr_state_dir}"
|
||||
|
||||
searchforprog()
|
||||
{
|
||||
prog=$1
|
||||
searchpath=$@
|
||||
ret=
|
||||
for p in $searchpath; do
|
||||
if [ -x $p/$prog ]; then
|
||||
ret=$p
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo $ret
|
||||
}
|
||||
|
||||
confcheck()
|
||||
{
|
||||
CONF_DIR=`dirname $FRR_CONF`
|
||||
# if /etc/frr exists, point /etc/frr/frr.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/frr.conf ]; then
|
||||
ln -s $CONF_DIR/frr.conf /etc/frr/frr.conf
|
||||
fi
|
||||
# if /etc/frr exists, point /etc/frr/vtysh.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/frr" ] && [ -d /etc/frr ] && [ ! -e /etc/frr/vtysh.conf ]; then
|
||||
ln -s $CONF_DIR/vtysh.conf /etc/frr/vtysh.conf
|
||||
fi
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
{
|
||||
FRR_SBIN_DIR=$(searchforprog $1 $FRR_SBIN_SEARCH)
|
||||
if [ "z$FRR_SBIN_DIR" = "z" ]; then
|
||||
echo "ERROR: FRR's '$1' daemon not found in search path:"
|
||||
echo " $FRR_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flags=""
|
||||
|
||||
if [ "$1" = "pimd" ] && \\
|
||||
grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $FRR_CONF; then
|
||||
flags="$flags -6"
|
||||
fi
|
||||
|
||||
#force FRR to use CORE generated conf file
|
||||
flags="$flags -d -f $FRR_CONF"
|
||||
$FRR_SBIN_DIR/$1 $flags
|
||||
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "ERROR: FRR's '$1' daemon failed to start!:"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
bootfrr()
|
||||
{
|
||||
FRR_BIN_DIR=$(searchforprog 'vtysh' $FRR_BIN_SEARCH)
|
||||
if [ "z$FRR_BIN_DIR" = "z" ]; then
|
||||
echo "ERROR: FRR's 'vtysh' program not found in search path:"
|
||||
echo " $FRR_BIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# fix /var/run/frr permissions
|
||||
id -u frr 2>/dev/null >/dev/null
|
||||
if [ "$?" = "0" ]; then
|
||||
chown frr $FRR_STATE_DIR
|
||||
fi
|
||||
|
||||
bootdaemon "zebra"
|
||||
if grep -q "^ip route " $FRR_CONF; then
|
||||
bootdaemon "staticd"
|
||||
fi
|
||||
for r in rip ripng ospf6 ospf bgp babel; do
|
||||
if grep -q "^router \\<$${}{r}\\>" $FRR_CONF; then
|
||||
bootdaemon "$${}{r}d"
|
||||
fi
|
||||
done
|
||||
|
||||
if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $FRR_CONF; then
|
||||
bootdaemon "pimd"
|
||||
fi
|
||||
|
||||
$FRR_BIN_DIR/vtysh -b
|
||||
}
|
||||
|
||||
if [ "$1" != "zebra" ]; then
|
||||
echo "WARNING: '$1': all FRR daemons are launched by the 'zebra' service!"
|
||||
exit 1
|
||||
fi
|
||||
confcheck
|
||||
bootfrr
|
||||
|
||||
# reset interfaces
|
||||
% for ifc, _, _ , _ in interfaces:
|
||||
ip link set dev ${ifc.name} down
|
||||
sleep 1
|
||||
ip link set dev ${ifc.name} up
|
||||
% endfor
|
|
@ -0,0 +1 @@
|
|||
service integrated-vtysh-config
|
0
daemon/core/configservices/nrlservices/__init__.py
Normal file
0
daemon/core/configservices/nrlservices/__init__.py
Normal file
212
daemon/core/configservices/nrlservices/services.py
Normal file
212
daemon/core/configservices/nrlservices/services.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
from typing import Any, Dict
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import utils
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
|
||||
GROUP = "ProtoSvc"
|
||||
|
||||
|
||||
class MgenSinkService(ConfigService):
|
||||
name = "MGEN_Sink"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["mgensink.sh", "sink.mgen"]
|
||||
executables = ["mgen"]
|
||||
dependencies = []
|
||||
startup = ["sh mgensink.sh"]
|
||||
validate = ["pidof mgen"]
|
||||
shutdown = ["killall mgen"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
name = utils.sysctl_devname(ifc.name)
|
||||
ifnames.append(name)
|
||||
return dict(ifnames=ifnames)
|
||||
|
||||
|
||||
class NrlNhdp(ConfigService):
|
||||
name = "NHDP"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["nrlnhdp.sh"]
|
||||
executables = ["nrlnhdp"]
|
||||
dependencies = []
|
||||
startup = ["sh nrlnhdp.sh"]
|
||||
validate = ["pidof nrlnhdp"]
|
||||
shutdown = ["killall nrlnhdp"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
return dict(has_smf=has_smf, ifnames=ifnames)
|
||||
|
||||
|
||||
class NrlSmf(ConfigService):
|
||||
name = "SMF"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["startsmf.sh"]
|
||||
executables = ["nrlsmf", "killall"]
|
||||
dependencies = []
|
||||
startup = ["sh startsmf.sh"]
|
||||
validate = ["pidof nrlsmf"]
|
||||
shutdown = ["killall nrlsmf"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
has_arouted = "arouted" in self.node.config_services
|
||||
has_nhdp = "NHDP" in self.node.config_services
|
||||
has_olsr = "OLSR" in self.node.config_services
|
||||
ifnames = []
|
||||
ip4_prefix = None
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
if ip4_prefix:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
a = a.split("/")[0]
|
||||
if netaddr.valid_ipv4(a):
|
||||
ip4_prefix = f"{a}/{24}"
|
||||
break
|
||||
return dict(
|
||||
has_arouted=has_arouted,
|
||||
has_nhdp=has_nhdp,
|
||||
has_olsr=has_olsr,
|
||||
ifnames=ifnames,
|
||||
ip4_prefix=ip4_prefix,
|
||||
)
|
||||
|
||||
|
||||
class NrlOlsr(ConfigService):
|
||||
name = "OLSR"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["nrlolsrd.sh"]
|
||||
executables = ["nrlolsrd"]
|
||||
dependencies = []
|
||||
startup = ["sh nrlolsrd.sh"]
|
||||
validate = ["pidof nrlolsrd"]
|
||||
shutdown = ["killall nrlolsrd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
has_zebra = "zebra" in self.node.config_services
|
||||
ifname = None
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifname = ifc.name
|
||||
break
|
||||
return dict(has_smf=has_smf, has_zebra=has_zebra, ifname=ifname)
|
||||
|
||||
|
||||
class NrlOlsrv2(ConfigService):
|
||||
name = "OLSRv2"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["nrlolsrv2.sh"]
|
||||
executables = ["nrlolsrv2"]
|
||||
dependencies = []
|
||||
startup = ["sh nrlolsrv2.sh"]
|
||||
validate = ["pidof nrlolsrv2"]
|
||||
shutdown = ["killall nrlolsrv2"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
return dict(has_smf=has_smf, ifnames=ifnames)
|
||||
|
||||
|
||||
class OlsrOrg(ConfigService):
|
||||
name = "OLSRORG"
|
||||
group = GROUP
|
||||
directories = ["/etc/olsrd"]
|
||||
files = ["olsrd.sh", "/etc/olsrd/olsrd.conf"]
|
||||
executables = ["olsrd"]
|
||||
dependencies = []
|
||||
startup = ["sh olsrd.sh"]
|
||||
validate = ["pidof olsrd"]
|
||||
shutdown = ["killall olsrd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
has_smf = "SMF" in self.node.config_services
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
return dict(has_smf=has_smf, ifnames=ifnames)
|
||||
|
||||
|
||||
class MgenActor(ConfigService):
|
||||
name = "MgenActor"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["start_mgen_actor.sh"]
|
||||
executables = ["mgen"]
|
||||
dependencies = []
|
||||
startup = ["sh start_mgen_actor.sh"]
|
||||
validate = ["pidof mgen"]
|
||||
shutdown = ["killall mgen"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
|
||||
class Arouted(ConfigService):
|
||||
name = "arouted"
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = ["startarouted.sh"]
|
||||
executables = ["arouted"]
|
||||
dependencies = []
|
||||
startup = ["sh startarouted.sh"]
|
||||
validate = ["pidof arouted"]
|
||||
shutdown = ["pkill arouted"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
ip4_prefix = None
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
if ip4_prefix:
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
a = a.split("/")[0]
|
||||
if netaddr.valid_ipv4(a):
|
||||
ip4_prefix = f"{a}/{24}"
|
||||
break
|
||||
return dict(ip4_prefix=ip4_prefix)
|
|
@ -0,0 +1 @@
|
|||
mgen input sink.mgen output mgen_${node.name}.log
|
|
@ -0,0 +1,7 @@
|
|||
<%
|
||||
interfaces = "-i " + " -i ".join(ifnames)
|
||||
smf = ""
|
||||
if has_smf:
|
||||
smf = "-flooding ecds -smfClient %s_smf" % node.name
|
||||
%>
|
||||
nrlnhdp -l /var/log/nrlnhdp.log -rpipe ${node.name}_nhdp ${smf} ${interfaces}
|
|
@ -0,0 +1,9 @@
|
|||
<%
|
||||
smf = ""
|
||||
if has_smf:
|
||||
smf = "-flooding s-mpr -smfClient %s_smf" % node.name
|
||||
zebra = ""
|
||||
if has_zebra:
|
||||
zebra = "-z"
|
||||
%>
|
||||
nrlolsrd -i ${ifname} -l /var/log/nrlolsrd.log -rpipe ${node.name}_olsr ${smf} ${zebra}
|
|
@ -0,0 +1,7 @@
|
|||
<%
|
||||
interfaces = "-i " + " -i ".join(ifnames)
|
||||
smf = ""
|
||||
if has_smf:
|
||||
smf = "-flooding ecds -smfClient %s_smf" % node.name
|
||||
%>
|
||||
nrlolsrv2 -l /var/log/nrlolsrv2.log -rpipe ${node.name}_olsrv2 -p olsr ${smf} ${interfaces}
|
312
daemon/core/configservices/nrlservices/templates/olsrd.conf
Normal file
312
daemon/core/configservices/nrlservices/templates/olsrd.conf
Normal file
|
@ -0,0 +1,312 @@
|
|||
#
|
||||
# OLSR.org routing daemon config file
|
||||
# This file contains the usual options for an ETX based
|
||||
# stationary network without fisheye
|
||||
# (for other options see olsrd.conf.default.full)
|
||||
#
|
||||
# Lines starting with a # are discarded
|
||||
#
|
||||
|
||||
#### ATTENTION for IPv6 users ####
|
||||
# Because of limitations in the parser IPv6 addresses must NOT
|
||||
# begin with a ":", so please add a "0" as a prefix.
|
||||
|
||||
###########################
|
||||
### Basic configuration ###
|
||||
###########################
|
||||
# keep this settings at the beginning of your first configuration file
|
||||
|
||||
# Debug level (0-9)
|
||||
# If set to 0 the daemon runs in the background, unless "NoFork" is set to true
|
||||
# (Default is 1)
|
||||
|
||||
# DebugLevel 1
|
||||
|
||||
# IP version to use (4 or 6)
|
||||
# (Default is 4)
|
||||
|
||||
# IpVersion 4
|
||||
|
||||
#################################
|
||||
### OLSRd agent configuration ###
|
||||
#################################
|
||||
# this parameters control the settings of the routing agent which are not
|
||||
# related to the OLSR protocol and it's extensions
|
||||
|
||||
# FIBMetric controls the metric value of the host-routes OLSRd sets.
|
||||
# - "flat" means that the metric value is always 2. This is the preferred value
|
||||
# because it helps the linux kernel routing to clean up older routes
|
||||
# - "correct" use the hopcount as the metric value.
|
||||
# - "approx" use the hopcount as the metric value too, but does only update the
|
||||
# hopcount if the nexthop changes too
|
||||
# (Default is "flat")
|
||||
|
||||
# FIBMetric "flat"
|
||||
|
||||
#######################################
|
||||
### Linux specific OLSRd extensions ###
|
||||
#######################################
|
||||
# these parameters are only working on linux at the moment
|
||||
|
||||
# SrcIpRoutes tells OLSRd to set the Src flag of host routes to the originator-ip
|
||||
# of the node. In addition to this an additional localhost device is created
|
||||
# to make sure the returning traffic can be received.
|
||||
# (Default is "no")
|
||||
|
||||
# SrcIpRoutes no
|
||||
|
||||
# Specify the proto tag to be used for routes olsr inserts into kernel
|
||||
# currently only implemented for linux
|
||||
# valid values under linux are 1 .. 254
|
||||
# 1 gets remapped by olsrd to 0 UNSPECIFIED (1 is reserved for ICMP redirects)
|
||||
# 2 KERNEL routes (not very wise to use)
|
||||
# 3 BOOT (should in fact not be used by routing daemons)
|
||||
# 4 STATIC
|
||||
# 8 .. 15 various routing daemons (gated, zebra, bird, & co)
|
||||
# (defaults to 0 which gets replaced by an OS-specific default value
|
||||
# under linux 3 (BOOT) (for backward compatibility)
|
||||
|
||||
# RtProto 0
|
||||
|
||||
# Activates (in IPv6 mode) the automatic use of NIIT
|
||||
# (see README-Olsr-Extensions)
|
||||
# (default is "yes")
|
||||
|
||||
# UseNiit yes
|
||||
|
||||
# Activates the smartgateway ipip tunnel feature.
|
||||
# See README-Olsr-Extensions for a description of smartgateways.
|
||||
# (default is "no")
|
||||
|
||||
# SmartGateway no
|
||||
|
||||
# Signals that the server tunnel must always be removed on shutdown,
|
||||
# irrespective of the interface up/down state during startup.
|
||||
# (default is "no")
|
||||
|
||||
# SmartGatewayAlwaysRemoveServerTunnel no
|
||||
|
||||
# Determines the maximum number of gateways that can be in use at any given
|
||||
# time. This setting is used to mitigate the effects of breaking connections
|
||||
# (due to the selection of a new gateway) on a dynamic network.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayUseCount 1
|
||||
|
||||
# Determines the take-down percentage for a non-current smart gateway tunnel.
|
||||
# If the cost of the current smart gateway tunnel is less than this percentage
|
||||
# of the cost of the non-current smart gateway tunnel, then the non-current smart
|
||||
# gateway tunnel is taken down because it is then presumed to be 'too expensive'.
|
||||
# This setting is only relevant when SmartGatewayUseCount is larger than 1;
|
||||
# a value of 0 will result in the tunnels not being taken down proactively.
|
||||
# (default is 0)
|
||||
|
||||
# SmartGatewayTakeDownPercentage 0
|
||||
|
||||
# Determines the policy routing script that is executed during startup and
|
||||
# shutdown of olsrd. The script is only executed when SmartGatewayUseCount
|
||||
# is set to a value larger than 1. The script must setup policy routing
|
||||
# rules such that multi-gateway mode works. A sample script is included.
|
||||
# (default is not set)
|
||||
|
||||
# SmartGatewayPolicyRoutingScript ""
|
||||
|
||||
# Determines the egress interfaces that are part of the multi-gateway setup and
|
||||
# therefore only relevant when SmartGatewayUseCount is larger than 1 (in which
|
||||
# case it must be explicitly set).
|
||||
# (default is not set)
|
||||
|
||||
# SmartGatewayEgressInterfaces ""
|
||||
|
||||
# Determines the routing tables offset for multi-gateway policy routing tables
|
||||
# See the policy routing script for an explanation.
|
||||
# (default is 90)
|
||||
|
||||
# SmartGatewayTablesOffset 90
|
||||
|
||||
# Determines the policy routing rules offset for multi-gateway policy routing
|
||||
# rules. See the policy routing script for an explanation.
|
||||
# (default is 0, which indicates that the rules and tables should be aligned and
|
||||
# puts this value at SmartGatewayTablesOffset - # egress interfaces -
|
||||
# # olsr interfaces)
|
||||
|
||||
# SmartGatewayRulesOffset 87
|
||||
|
||||
# Allows the selection of a smartgateway with NAT (only for IPv4)
|
||||
# (default is "yes")
|
||||
|
||||
# SmartGatewayAllowNAT yes
|
||||
|
||||
# Determines the period (in milliseconds) on which a new smart gateway
|
||||
# selection is performed.
|
||||
# (default is 10000 milliseconds)
|
||||
|
||||
# SmartGatewayPeriod 10000
|
||||
|
||||
# Determines the number of times the link state database must be stable
|
||||
# before a new smart gateway is selected.
|
||||
# (default is 6)
|
||||
|
||||
# SmartGatewayStableCount 6
|
||||
|
||||
# When another gateway than the current one has a cost of less than the cost
|
||||
# of the current gateway multiplied by SmartGatewayThreshold then the smart
|
||||
# gateway is switched to the other gateway. The unit is percentage.
|
||||
# (defaults to 0)
|
||||
|
||||
# SmartGatewayThreshold 0
|
||||
|
||||
# The weighing factor for the gateway uplink bandwidth (exit link, uplink).
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayWeightExitLinkUp 1
|
||||
|
||||
# The weighing factor for the gateway downlink bandwidth (exit link, downlink).
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayWeightExitLinkDown 1
|
||||
|
||||
# The weighing factor for the ETX costs.
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 1)
|
||||
|
||||
# SmartGatewayWeightEtx 1
|
||||
|
||||
# The divider for the ETX costs.
|
||||
# See README-Olsr-Extensions for a description of smart gateways.
|
||||
# (default is 0)
|
||||
|
||||
# SmartGatewayDividerEtx 0
|
||||
|
||||
# Defines what kind of Uplink this node will publish as a
|
||||
# smartgateway. The existence of the uplink is detected by
|
||||
# a route to 0.0.0.0/0, ::ffff:0:0/96 and/or 2000::/3.
|
||||
# possible values are "none", "ipv4", "ipv6", "both"
|
||||
# (default is "both")
|
||||
|
||||
# SmartGatewayUplink "both"
|
||||
|
||||
# Specifies if the local ipv4 uplink use NAT
|
||||
# (default is "yes")
|
||||
|
||||
# SmartGatewayUplinkNAT yes
|
||||
|
||||
# Specifies the speed of the uplink in kilobit/s.
|
||||
# First parameter is upstream, second parameter is downstream
|
||||
# (default is 128/1024)
|
||||
|
||||
# SmartGatewaySpeed 128 1024
|
||||
|
||||
# Specifies the EXTERNAL ipv6 prefix of the uplink. A prefix
|
||||
# length of more than 64 is not allowed.
|
||||
# (default is 0::/0
|
||||
|
||||
# SmartGatewayPrefix 0::/0
|
||||
|
||||
##############################
|
||||
### OLSR protocol settings ###
|
||||
##############################
|
||||
|
||||
# HNA (Host network association) allows the OLSR to announce
|
||||
# additional IPs or IP subnets to the net that are reachable
|
||||
# through this node.
|
||||
# Syntax for HNA4 is "network-address network-mask"
|
||||
# Syntax for HNA6 is "network-address prefix-length"
|
||||
# (default is no HNA)
|
||||
Hna4
|
||||
{
|
||||
# Internet gateway
|
||||
# 0.0.0.0 0.0.0.0
|
||||
# specific small networks reachable through this node
|
||||
# 15.15.0.0 255.255.255.0
|
||||
}
|
||||
Hna6
|
||||
{
|
||||
# Internet gateway
|
||||
# 0:: 0
|
||||
# specific small networks reachable through this node
|
||||
# fec0:2200:106:0:0:0:0:0 48
|
||||
}
|
||||
|
||||
################################
|
||||
### OLSR protocol extensions ###
|
||||
################################
|
||||
|
||||
# Link quality algorithm (only for lq level 2)
|
||||
# (see README-Olsr-Extensions)
|
||||
# - "etx_float", a floating point ETX with exponential aging
|
||||
# - "etx_fpm", same as ext_float, but with integer arithmetic
|
||||
# - "etx_ff" (ETX freifunk), an etx variant which use all OLSR
|
||||
# traffic (instead of only hellos) for ETX calculation
|
||||
# - "etx_ffeth", an incompatible variant of etx_ff that allows
|
||||
# ethernet links with ETX 0.1.
|
||||
# (defaults to "etx_ff")
|
||||
|
||||
# LinkQualityAlgorithm "etx_ff"
|
||||
|
||||
# Fisheye mechanism for TCs (0 meansoff, 1 means on)
|
||||
# (default is 1)
|
||||
|
||||
LinkQualityFishEye 0
|
||||
|
||||
#####################################
|
||||
### Example plugin configurations ###
|
||||
#####################################
|
||||
# Olsrd plugins to load
|
||||
# This must be the absolute path to the file
|
||||
# or the loader will use the following scheme:
|
||||
# - Try the paths in the LD_LIBRARY_PATH
|
||||
# environment variable.
|
||||
# - The list of libraries cached in /etc/ld.so.cache
|
||||
# - /lib, followed by /usr/lib
|
||||
#
|
||||
# the examples in this list are for linux, so check if the plugin is
|
||||
# available if you use windows.
|
||||
# each plugin should have a README file in it's lib subfolder
|
||||
|
||||
# LoadPlugin "olsrd_txtinfo.dll"
|
||||
#LoadPlugin "olsrd_txtinfo.so.0.1"
|
||||
#{
|
||||
# the default port is 2006 but you can change it like this:
|
||||
#PlParam "port" "8080"
|
||||
|
||||
# You can set a "accept" single address to allow to connect to
|
||||
# txtinfo. If no address is specified, then localhost (127.0.0.1)
|
||||
# is allowed by default. txtinfo will only use the first "accept"
|
||||
# parameter specified and will ignore the rest.
|
||||
|
||||
# to allow a specific host:
|
||||
#PlParam "accept" "172.29.44.23"
|
||||
# if you set it to 0.0.0.0, it will accept all connections
|
||||
#PlParam "accept" "0.0.0.0"
|
||||
#}
|
||||
|
||||
#############################################
|
||||
### OLSRD default interface configuration ###
|
||||
#############################################
|
||||
# the default interface section can have the same values as the following
|
||||
# interface configuration. It will allow you so set common options for all
|
||||
# interfaces.
|
||||
|
||||
InterfaceDefaults {
|
||||
Ip4Broadcast 255.255.255.255
|
||||
}
|
||||
|
||||
######################################
|
||||
### OLSRd Interfaces configuration ###
|
||||
######################################
|
||||
# multiple interfaces can be specified for a single configuration block
|
||||
# multiple configuration blocks can be specified
|
||||
|
||||
# WARNING, don't forget to insert your interface names here !
|
||||
#Interface "<OLSRd-Interface1>" "<OLSRd-Interface2>"
|
||||
#{
|
||||
# Interface Mode is used to prevent unnecessary
|
||||
# packet forwarding on switched ethernet interfaces
|
||||
# valid Modes are "mesh" and "ether"
|
||||
# (default is "mesh")
|
||||
|
||||
# Mode "mesh"
|
||||
#}
|
|
@ -0,0 +1,4 @@
|
|||
<%
|
||||
interfaces = "-i " + " -i ".join(ifnames)
|
||||
%>
|
||||
olsrd ${interfaces}
|
|
@ -0,0 +1,4 @@
|
|||
0.0 LISTEN UDP 5000
|
||||
% for ifname in ifnames:
|
||||
0.0 Join 224.225.1.2 INTERFACE ${ifname}
|
||||
% endfor
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by MgenActor service
|
||||
mgenBasicActor.py -n ${node.name} -a 0.0.0.0 < /dev/null > /dev/null 2>&1 &
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/sh
|
||||
for f in "/tmp/${node.name}_smf"; do
|
||||
count=1
|
||||
until [ -e "$f" ]; do
|
||||
if [ $count -eq 10 ]; then
|
||||
echo "ERROR: nrlmsf pipe not found: $f" >&2
|
||||
exit 1
|
||||
fi
|
||||
sleep 0.1
|
||||
count=$(($count + 1))
|
||||
done
|
||||
done
|
||||
|
||||
ip route add ${ip4_prefix} dev lo
|
||||
arouted instance ${node.name}_smf tap ${node.name}_tap stability 10 2>&1 > /var/log/arouted.log &
|
15
daemon/core/configservices/nrlservices/templates/startsmf.sh
Normal file
15
daemon/core/configservices/nrlservices/templates/startsmf.sh
Normal file
|
@ -0,0 +1,15 @@
|
|||
<%
|
||||
interfaces = ",".join(ifnames)
|
||||
arouted = ""
|
||||
if has_arouted:
|
||||
arouted = "tap %s_tap unicast %s push lo,%s resequence on" % (node.name, ip4_prefix, ifnames[0])
|
||||
if has_nhdp:
|
||||
flood = "ecds"
|
||||
elif has_olsr:
|
||||
flood = "smpr"
|
||||
else:
|
||||
flood = "cf"
|
||||
%>
|
||||
#!/bin/sh
|
||||
# auto-generated by NrlSmf service
|
||||
nrlsmf instance ${node.name}_smf ${interfaces} ${arouted} ${flood} hash MD5 log /var/log/nrlsmf.log < /dev/null > /dev/null 2>&1 &
|
0
daemon/core/configservices/quaggaservices/__init__.py
Normal file
0
daemon/core/configservices/quaggaservices/__init__.py
Normal file
424
daemon/core/configservices/quaggaservices/services.py
Normal file
424
daemon/core/configservices/quaggaservices/services.py
Normal file
|
@ -0,0 +1,424 @@
|
|||
import abc
|
||||
import logging
|
||||
from typing import Any, Dict
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import constants
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.nodes.base import CoreNodeBase
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.nodes.network import WlanNode
|
||||
|
||||
GROUP = "Quagga"
|
||||
|
||||
|
||||
def has_mtu_mismatch(ifc: CoreInterface) -> bool:
|
||||
"""
|
||||
Helper to detect MTU mismatch and add the appropriate OSPF
|
||||
mtu-ignore command. This is needed when e.g. a node is linked via a
|
||||
GreTap device.
|
||||
"""
|
||||
if ifc.mtu != 1500:
|
||||
return True
|
||||
if not ifc.net:
|
||||
return False
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu != ifc.mtu:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def get_min_mtu(ifc):
|
||||
"""
|
||||
Helper to discover the minimum MTU of interfaces linked with the
|
||||
given interface.
|
||||
"""
|
||||
mtu = ifc.mtu
|
||||
if not ifc.net:
|
||||
return mtu
|
||||
for i in ifc.net.netifs():
|
||||
if i.mtu < mtu:
|
||||
mtu = i.mtu
|
||||
return mtu
|
||||
|
||||
|
||||
def get_router_id(node: CoreNodeBase) -> str:
|
||||
"""
|
||||
Helper to return the first IPv4 address of a node as its router ID.
|
||||
"""
|
||||
for ifc in node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
a = a.split("/")[0]
|
||||
if netaddr.valid_ipv4(a):
|
||||
return a
|
||||
return "0.0.0.0"
|
||||
|
||||
|
||||
class Zebra(ConfigService):
|
||||
name = "zebra"
|
||||
group = GROUP
|
||||
directories = ["/usr/local/etc/quagga", "/var/run/quagga"]
|
||||
files = [
|
||||
"/usr/local/etc/quagga/Quagga.conf",
|
||||
"quaggaboot.sh",
|
||||
"/usr/local/etc/quagga/vtysh.conf",
|
||||
]
|
||||
executables = ["zebra"]
|
||||
dependencies = []
|
||||
startup = ["sh quaggaboot.sh zebra"]
|
||||
validate = ["pidof zebra"]
|
||||
shutdown = ["killall zebra"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
quagga_bin_search = self.node.session.options.get_config(
|
||||
"quagga_bin_search", default="/usr/local/bin /usr/bin /usr/lib/quagga"
|
||||
).strip('"')
|
||||
quagga_sbin_search = self.node.session.options.get_config(
|
||||
"quagga_sbin_search", default="/usr/local/sbin /usr/sbin /usr/lib/quagga"
|
||||
).strip('"')
|
||||
quagga_state_dir = constants.QUAGGA_STATE_DIR
|
||||
quagga_conf = self.files[0]
|
||||
|
||||
services = []
|
||||
want_ip4 = False
|
||||
want_ip6 = False
|
||||
for service in self.node.config_services.values():
|
||||
if self.name not in service.dependencies:
|
||||
continue
|
||||
if service.ipv4_routing:
|
||||
want_ip4 = True
|
||||
if service.ipv6_routing:
|
||||
want_ip6 = True
|
||||
services.append(service)
|
||||
|
||||
interfaces = []
|
||||
for ifc in self.node.netifs():
|
||||
ip4s = []
|
||||
ip6s = []
|
||||
for x in ifc.addrlist:
|
||||
addr = x.split("/")[0]
|
||||
if netaddr.valid_ipv4(addr):
|
||||
ip4s.append(x)
|
||||
else:
|
||||
ip6s.append(x)
|
||||
is_control = getattr(ifc, "control", False)
|
||||
interfaces.append((ifc, ip4s, ip6s, is_control))
|
||||
|
||||
return dict(
|
||||
quagga_bin_search=quagga_bin_search,
|
||||
quagga_sbin_search=quagga_sbin_search,
|
||||
quagga_state_dir=quagga_state_dir,
|
||||
quagga_conf=quagga_conf,
|
||||
interfaces=interfaces,
|
||||
want_ip4=want_ip4,
|
||||
want_ip6=want_ip6,
|
||||
services=services,
|
||||
)
|
||||
|
||||
|
||||
class QuaggaService(abc.ABC):
|
||||
group = GROUP
|
||||
directories = []
|
||||
files = []
|
||||
executables = []
|
||||
dependencies = ["zebra"]
|
||||
startup = []
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
ipv4_routing = False
|
||||
ipv6_routing = False
|
||||
|
||||
@abc.abstractmethod
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
def quagga_config(self) -> str:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class Ospfv2(QuaggaService, ConfigService):
|
||||
"""
|
||||
The OSPFv2 service provides IPv4 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
|
||||
name = "OSPFv2"
|
||||
validate = ["pidof ospfd"]
|
||||
shutdown = ["killall ospfd"]
|
||||
ipv4_routing = True
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
if has_mtu_mismatch(ifc):
|
||||
return "ip ospf mtu-ignore"
|
||||
else:
|
||||
return ""
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
addresses = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for a in ifc.addrlist:
|
||||
addr = a.split("/")[0]
|
||||
if netaddr.valid_ipv4(addr):
|
||||
addresses.append(a)
|
||||
data = dict(router_id=router_id, addresses=addresses)
|
||||
text = """
|
||||
router ospf
|
||||
router-id ${router_id}
|
||||
% for addr in addresses:
|
||||
network ${addr} area 0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
|
||||
class Ospfv3(QuaggaService, ConfigService):
|
||||
"""
|
||||
The OSPFv3 service provides IPv6 routing for wired networks. It does
|
||||
not build its own configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
|
||||
name = "OSPFv3"
|
||||
shutdown = ("killall ospf6d",)
|
||||
validate = ("pidof ospf6d",)
|
||||
ipv4_routing = True
|
||||
ipv6_routing = True
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
mtu = get_min_mtu(ifc)
|
||||
if mtu < ifc.mtu:
|
||||
return f"ipv6 ospf6 ifmtu {mtu}"
|
||||
else:
|
||||
return ""
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
data = dict(router_id=router_id, ifnames=ifnames)
|
||||
text = """
|
||||
router ospf6
|
||||
instance-id 65
|
||||
router-id ${router_id}
|
||||
% for ifname in ifnames:
|
||||
interface ${ifname} area 0.0.0.0
|
||||
% endfor
|
||||
!
|
||||
"""
|
||||
return self.render_text(text, data)
|
||||
|
||||
|
||||
class Ospfv3mdr(Ospfv3):
|
||||
"""
|
||||
The OSPFv3 MANET Designated Router (MDR) service provides IPv6
|
||||
routing for wireless networks. It does not build its own
|
||||
configuration file but has hooks for adding to the
|
||||
unified Quagga.conf file.
|
||||
"""
|
||||
|
||||
name = "OSPFv3MDR"
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
for ifc in self.node.netifs():
|
||||
is_wireless = isinstance(ifc.net, (WlanNode, EmaneNet))
|
||||
logging.info("MDR wireless: %s", is_wireless)
|
||||
return dict()
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
config = super().quagga_interface_config(ifc)
|
||||
if isinstance(ifc.net, (WlanNode, EmaneNet)):
|
||||
config = self.clean_text(
|
||||
f"""
|
||||
{config}
|
||||
ipv6 ospf6 hello-interval 2
|
||||
ipv6 ospf6 dead-interval 6
|
||||
ipv6 ospf6 retransmit-interval 5
|
||||
ipv6 ospf6 network manet-designated-router
|
||||
ipv6 ospf6 twohoprefresh 3
|
||||
ipv6 ospf6 adjacencyconnectivity uniconnected
|
||||
ipv6 ospf6 lsafullness mincostlsa
|
||||
"""
|
||||
)
|
||||
return config
|
||||
|
||||
|
||||
class Bgp(QuaggaService, ConfigService):
|
||||
"""
|
||||
The BGP service provides interdomain routing.
|
||||
Peers must be manually configured, with a full mesh for those
|
||||
having the same AS number.
|
||||
"""
|
||||
|
||||
name = "BGP"
|
||||
shutdown = ["killall bgpd"]
|
||||
validate = ["pidof bgpd"]
|
||||
ipv4_routing = True
|
||||
ipv6_routing = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
return ""
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
router_id = get_router_id(self.node)
|
||||
text = f"""
|
||||
! BGP configuration
|
||||
! You should configure the AS number below
|
||||
! along with this router's peers.
|
||||
router bgp {self.node.id}
|
||||
bgp router-id {router_id}
|
||||
redistribute connected
|
||||
!neighbor 1.2.3.4 remote-as 555
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
|
||||
class Rip(QuaggaService, ConfigService):
|
||||
"""
|
||||
The RIP service provides IPv4 routing for wired networks.
|
||||
"""
|
||||
|
||||
name = "RIP"
|
||||
shutdown = ["killall ripd"]
|
||||
validate = ["pidof ripd"]
|
||||
ipv4_routing = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
text = """
|
||||
router rip
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf
|
||||
network 0.0.0.0/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class Ripng(QuaggaService, ConfigService):
|
||||
"""
|
||||
The RIP NG service provides IPv6 routing for wired networks.
|
||||
"""
|
||||
|
||||
name = "RIPNG"
|
||||
shutdown = ["killall ripngd"]
|
||||
validate = ["pidof ripngd"]
|
||||
ipv6_routing = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
text = """
|
||||
router ripng
|
||||
redistribute static
|
||||
redistribute connected
|
||||
redistribute ospf6
|
||||
network ::/0
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
return ""
|
||||
|
||||
|
||||
class Babel(QuaggaService, ConfigService):
|
||||
"""
|
||||
The Babel service provides a loop-avoiding distance-vector routing
|
||||
protocol for IPv6 and IPv4 with fast convergence properties.
|
||||
"""
|
||||
|
||||
name = "Babel"
|
||||
shutdown = ["killall babeld"]
|
||||
validate = ["pidof babeld"]
|
||||
ipv6_routing = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
text = """
|
||||
router babel
|
||||
% for ifname in ifnames:
|
||||
network ${ifname}
|
||||
% endfor
|
||||
redistribute static
|
||||
redistribute connected
|
||||
!
|
||||
"""
|
||||
data = dict(ifnames=ifnames)
|
||||
return self.render_text(text, data)
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
if isinstance(ifc.net, (WlanNode, EmaneNet)):
|
||||
text = """
|
||||
babel wireless
|
||||
no babel split-horizon
|
||||
"""
|
||||
else:
|
||||
text = """
|
||||
babel wired
|
||||
babel split-horizon
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
|
||||
class Xpimd(QuaggaService, ConfigService):
|
||||
"""
|
||||
PIM multicast routing based on XORP.
|
||||
"""
|
||||
|
||||
name = "Xpimd"
|
||||
shutdown = ["killall xpimd"]
|
||||
validate = ["pidof xpimd"]
|
||||
ipv4_routing = True
|
||||
|
||||
def quagga_config(self) -> str:
|
||||
ifname = "eth0"
|
||||
for ifc in self.node.netifs():
|
||||
if ifc.name != "lo":
|
||||
ifname = ifc.name
|
||||
break
|
||||
|
||||
text = f"""
|
||||
router mfea
|
||||
!
|
||||
router igmp
|
||||
!
|
||||
router pim
|
||||
!ip pim rp-address 10.0.0.1
|
||||
ip pim bsr-candidate {ifname}
|
||||
ip pim rp-candidate {ifname}
|
||||
!ip pim spt-threshold interval 10 bytes 80000
|
||||
!
|
||||
"""
|
||||
return self.clean_text(text)
|
||||
|
||||
def quagga_interface_config(self, ifc: CoreInterface) -> str:
|
||||
text = """
|
||||
ip mfea
|
||||
ip pim
|
||||
"""
|
||||
return self.clean_text(text)
|
|
@ -0,0 +1,25 @@
|
|||
% for ifc, ip4s, ip6s, is_control in interfaces:
|
||||
interface ${ifc.name}
|
||||
% if want_ip4:
|
||||
% for addr in ip4s:
|
||||
ip address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if want_ip6:
|
||||
% for addr in ip6s:
|
||||
ipv6 address ${addr}
|
||||
% endfor
|
||||
% endif
|
||||
% if not is_control:
|
||||
% for service in services:
|
||||
% for line in service.quagga_interface_config(ifc).split("\n"):
|
||||
${line}
|
||||
% endfor
|
||||
% endfor
|
||||
% endif
|
||||
!
|
||||
% endfor
|
||||
|
||||
% for service in services:
|
||||
${service.quagga_config()}
|
||||
% endfor
|
|
@ -0,0 +1,92 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by zebra service (quagga.py)
|
||||
QUAGGA_CONF="${quagga_conf}"
|
||||
QUAGGA_SBIN_SEARCH="${quagga_sbin_search}"
|
||||
QUAGGA_BIN_SEARCH="${quagga_bin_search}"
|
||||
QUAGGA_STATE_DIR="${quagga_state_dir}"
|
||||
|
||||
searchforprog()
|
||||
{
|
||||
prog=$1
|
||||
searchpath=$@
|
||||
ret=
|
||||
for p in $searchpath; do
|
||||
if [ -x $p/$prog ]; then
|
||||
ret=$p
|
||||
break
|
||||
fi
|
||||
done
|
||||
echo $ret
|
||||
}
|
||||
|
||||
confcheck()
|
||||
{
|
||||
CONF_DIR=`dirname $QUAGGA_CONF`
|
||||
# if /etc/quagga exists, point /etc/quagga/Quagga.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/Quagga.conf ]; then
|
||||
ln -s $CONF_DIR/Quagga.conf /etc/quagga/Quagga.conf
|
||||
fi
|
||||
# if /etc/quagga exists, point /etc/quagga/vtysh.conf -> CONF_DIR
|
||||
if [ "$CONF_DIR" != "/etc/quagga" ] && [ -d /etc/quagga ] && [ ! -e /etc/quagga/vtysh.conf ]; then
|
||||
ln -s $CONF_DIR/vtysh.conf /etc/quagga/vtysh.conf
|
||||
fi
|
||||
}
|
||||
|
||||
bootdaemon()
|
||||
{
|
||||
QUAGGA_SBIN_DIR=$(searchforprog $1 $QUAGGA_SBIN_SEARCH)
|
||||
if [ "z$QUAGGA_SBIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon not found in search path:"
|
||||
echo " $QUAGGA_SBIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flags=""
|
||||
|
||||
if [ "$1" = "xpimd" ] && \\
|
||||
grep -E -q '^[[:space:]]*router[[:space:]]+pim6[[:space:]]*$' $QUAGGA_CONF; then
|
||||
flags="$flags -6"
|
||||
fi
|
||||
|
||||
$QUAGGA_SBIN_DIR/$1 $flags -d
|
||||
if [ "$?" != "0" ]; then
|
||||
echo "ERROR: Quagga's '$1' daemon failed to start!:"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
bootquagga()
|
||||
{
|
||||
QUAGGA_BIN_DIR=$(searchforprog 'vtysh' $QUAGGA_BIN_SEARCH)
|
||||
if [ "z$QUAGGA_BIN_DIR" = "z" ]; then
|
||||
echo "ERROR: Quagga's 'vtysh' program not found in search path:"
|
||||
echo " $QUAGGA_BIN_SEARCH"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# fix /var/run/quagga permissions
|
||||
id -u quagga 2>/dev/null >/dev/null
|
||||
if [ "$?" = "0" ]; then
|
||||
chown quagga $QUAGGA_STATE_DIR
|
||||
fi
|
||||
|
||||
bootdaemon "zebra"
|
||||
for r in rip ripng ospf6 ospf bgp babel; do
|
||||
if grep -q "^router \\<$${}{r}\\>" $QUAGGA_CONF; then
|
||||
bootdaemon "$${}{r}d"
|
||||
fi
|
||||
done
|
||||
|
||||
if grep -E -q '^[[:space:]]*router[[:space:]]+pim6?[[:space:]]*$' $QUAGGA_CONF; then
|
||||
bootdaemon "xpimd"
|
||||
fi
|
||||
|
||||
$QUAGGA_BIN_DIR/vtysh -b
|
||||
}
|
||||
|
||||
if [ "$1" != "zebra" ]; then
|
||||
echo "WARNING: '$1': all Quagga daemons are launched by the 'zebra' service!"
|
||||
exit 1
|
||||
fi
|
||||
confcheck
|
||||
bootquagga
|
|
@ -0,0 +1 @@
|
|||
service integrated-vtysh-config
|
0
daemon/core/configservices/sercurityservices/__init__.py
Normal file
0
daemon/core/configservices/sercurityservices/__init__.py
Normal file
141
daemon/core/configservices/sercurityservices/services.py
Normal file
141
daemon/core/configservices/sercurityservices/services.py
Normal file
|
@ -0,0 +1,141 @@
|
|||
from typing import Any, Dict
|
||||
|
||||
import netaddr
|
||||
|
||||
from core.config import Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
||||
GROUP_NAME = "Security"
|
||||
|
||||
|
||||
class VpnClient(ConfigService):
|
||||
name = "VPNClient"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["vpnclient.sh"]
|
||||
executables = ["openvpn", "ip", "killall"]
|
||||
dependencies = []
|
||||
startup = ["sh vpnclient.sh"]
|
||||
validate = ["pidof openvpn"]
|
||||
shutdown = ["killall openvpn"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = [
|
||||
Configuration(
|
||||
_id="keydir",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Key Dir",
|
||||
default="/etc/core/keys",
|
||||
),
|
||||
Configuration(
|
||||
_id="keyname",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Key Name",
|
||||
default="client1",
|
||||
),
|
||||
Configuration(
|
||||
_id="server",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Server",
|
||||
default="10.0.2.10",
|
||||
),
|
||||
]
|
||||
modes = {}
|
||||
|
||||
|
||||
class VpnServer(ConfigService):
|
||||
name = "VPNServer"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["vpnserver.sh"]
|
||||
executables = ["openvpn", "ip", "killall"]
|
||||
dependencies = []
|
||||
startup = ["sh vpnserver.sh"]
|
||||
validate = ["pidof openvpn"]
|
||||
shutdown = ["killall openvpn"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = [
|
||||
Configuration(
|
||||
_id="keydir",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Key Dir",
|
||||
default="/etc/core/keys",
|
||||
),
|
||||
Configuration(
|
||||
_id="keyname",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Key Name",
|
||||
default="server",
|
||||
),
|
||||
Configuration(
|
||||
_id="subnet",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Subnet",
|
||||
default="10.0.200.0",
|
||||
),
|
||||
]
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
address = None
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for x in ifc.addrlist:
|
||||
addr = x.split("/")[0]
|
||||
if netaddr.valid_ipv4(addr):
|
||||
address = addr
|
||||
return dict(address=address)
|
||||
|
||||
|
||||
class IPsec(ConfigService):
|
||||
name = "IPsec"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["ipsec.sh"]
|
||||
executables = ["racoon", "ip", "setkey", "killall"]
|
||||
dependencies = []
|
||||
startup = ["sh ipsec.sh"]
|
||||
validate = ["pidof racoon"]
|
||||
shutdown = ["killall racoon"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
|
||||
class Firewall(ConfigService):
|
||||
name = "Firewall"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["firewall.sh"]
|
||||
executables = ["iptables"]
|
||||
dependencies = []
|
||||
startup = ["sh firewall.sh"]
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
|
||||
class Nat(ConfigService):
|
||||
name = "NAT"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["nat.sh"]
|
||||
executables = ["iptables"]
|
||||
dependencies = []
|
||||
startup = ["sh nat.sh"]
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
return dict(ifnames=ifnames)
|
|
@ -0,0 +1,30 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# Below are sample iptables firewall rules that you can uncomment and edit.
|
||||
# You can also use ip6tables rules for IPv6.
|
||||
#
|
||||
|
||||
# start by flushing all firewall rules (so this script may be re-run)
|
||||
#iptables -F
|
||||
|
||||
# allow traffic related to established connections
|
||||
#iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
|
||||
|
||||
# allow TCP packets from any source destined for 192.168.1.1
|
||||
#iptables -A INPUT -s 0/0 -i eth0 -d 192.168.1.1 -p TCP -j ACCEPT
|
||||
|
||||
# allow OpenVPN server traffic from eth0
|
||||
#iptables -A INPUT -p udp --dport 1194 -j ACCEPT
|
||||
#iptables -A INPUT -i eth0 -j DROP
|
||||
#iptables -A OUTPUT -p udp --sport 1194 -j ACCEPT
|
||||
#iptables -A OUTPUT -o eth0 -j DROP
|
||||
|
||||
# allow ICMP ping traffic
|
||||
#iptables -A OUTPUT -p icmp --icmp-type echo-request -j ACCEPT
|
||||
#iptables -A INPUT -p icmp --icmp-type echo-reply -j ACCEPT
|
||||
|
||||
# allow SSH traffic
|
||||
#iptables -A -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
|
||||
|
||||
# drop all other traffic coming in eth0
|
||||
#iptables -A INPUT -i eth0 -j DROP
|
114
daemon/core/configservices/sercurityservices/templates/ipsec.sh
Normal file
114
daemon/core/configservices/sercurityservices/templates/ipsec.sh
Normal file
|
@ -0,0 +1,114 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The IPsec service builds ESP tunnels between the specified peers using the
|
||||
# racoon IKEv2 keying daemon. You need to provide keys and the addresses of
|
||||
# peers, along with subnets to tunnel.
|
||||
|
||||
# directory containing the certificate and key described below
|
||||
keydir=/etc/core/keys
|
||||
|
||||
# the name used for the "$certname.pem" x509 certificate and
|
||||
# "$certname.key" RSA private key, which can be generated using openssl
|
||||
certname=ipsec1
|
||||
|
||||
# list the public-facing IP addresses, starting with the localhost and followed
|
||||
# by each tunnel peer, separated with a single space
|
||||
tunnelhosts="172.16.0.1AND172.16.0.2 172.16.0.1AND172.16.2.1"
|
||||
|
||||
# Define T<i> where i is the index for each tunnel peer host from
|
||||
# the tunnel_hosts list above (0 is localhost).
|
||||
# T<i> is a list of IPsec tunnels with peer i, with a local subnet address
|
||||
# followed by the remote subnet address:
|
||||
# T<i>="<local>AND<remote> <local>AND<remote>"
|
||||
# For example, 172.16.0.0/24 is a local network (behind this node) to be
|
||||
# tunneled and 172.16.2.0/24 is a remote network (behind peer 1)
|
||||
T1="172.16.3.0/24AND172.16.5.0/24"
|
||||
T2="172.16.4.0/24AND172.16.5.0/24 172.16.4.0/24AND172.16.6.0/24"
|
||||
|
||||
# -------- END CUSTOMIZATION --------
|
||||
|
||||
echo "building config $PWD/ipsec.conf..."
|
||||
echo "building config $PWD/ipsec.conf..." > $PWD/ipsec.log
|
||||
|
||||
checkip=0
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed
|
||||
" >> $PWD/ipsec.log
|
||||
checkip=1
|
||||
fi
|
||||
|
||||
echo "#!/usr/sbin/setkey -f
|
||||
# Flush the SAD and SPD
|
||||
flush;
|
||||
spdflush;
|
||||
|
||||
# Security policies " > $PWD/ipsec.conf
|
||||
i=0
|
||||
for hostpair in $tunnelhosts; do
|
||||
i=`expr $i + 1`
|
||||
# parse tunnel host IP
|
||||
thishost=$${}{hostpair%%AND*}
|
||||
peerhost=$${}{hostpair##*AND}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$thishost" "$peerhost" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid host address $thishost or $peerhost " >> $PWD/ipsec.log
|
||||
fi
|
||||
# parse each tunnel addresses
|
||||
tunnel_list_var_name=T$i
|
||||
eval tunnels="$"$tunnel_list_var_name""
|
||||
for ttunnel in $tunnels; do
|
||||
lclnet=$${}{ttunnel%%AND*}
|
||||
rmtnet=$${}{ttunnel##*AND}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$lclnet" "$rmtnet"| grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid tunnel address $lclnet and $rmtnet " >> $PWD/ipsec.log
|
||||
fi
|
||||
# add tunnel policies
|
||||
echo "
|
||||
spdadd $lclnet $rmtnet any -P out ipsec
|
||||
esp/tunnel/$thishost-$peerhost/require;
|
||||
spdadd $rmtnet $lclnet any -P in ipsec
|
||||
esp/tunnel/$peerhost-$thishost/require; " >> $PWD/ipsec.conf
|
||||
done
|
||||
done
|
||||
|
||||
echo "building config $PWD/racoon.conf..."
|
||||
if [ ! -e $keydir\/$certname.key ] || [ ! -e $keydir\/$certname.pem ]; then
|
||||
echo "ERROR: missing certification files under $keydir $certname.key or $certname.pem " >> $PWD/ipsec.log
|
||||
fi
|
||||
echo "
|
||||
path certificate \"$keydir\";
|
||||
listen {
|
||||
adminsock disabled;
|
||||
}
|
||||
remote anonymous
|
||||
{
|
||||
exchange_mode main;
|
||||
certificate_type x509 \"$certname.pem\" \"$certname.key\";
|
||||
ca_type x509 \"ca-cert.pem\";
|
||||
my_identifier asn1dn;
|
||||
peers_identifier asn1dn;
|
||||
|
||||
proposal {
|
||||
encryption_algorithm 3des ;
|
||||
hash_algorithm sha1;
|
||||
authentication_method rsasig ;
|
||||
dh_group modp768;
|
||||
}
|
||||
}
|
||||
sainfo anonymous
|
||||
{
|
||||
pfs_group modp768;
|
||||
lifetime time 1 hour ;
|
||||
encryption_algorithm 3des, blowfish 448, rijndael ;
|
||||
authentication_algorithm hmac_sha1, hmac_md5 ;
|
||||
compression_algorithm deflate ;
|
||||
}
|
||||
" > $PWD/racoon.conf
|
||||
|
||||
# the setkey program is required from the ipsec-tools package
|
||||
echo "running setkey -f $PWD/ipsec.conf..."
|
||||
setkey -f $PWD/ipsec.conf
|
||||
|
||||
echo "running racoon -d -f $PWD/racoon.conf..."
|
||||
racoon -d -f $PWD/racoon.conf -l racoon.log
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/sh
|
||||
# generated by security.py
|
||||
# NAT out the first interface by default
|
||||
% for index, ifname in enumerate(ifnames):
|
||||
% if index == 0:
|
||||
iptables -t nat -A POSTROUTING -o ${ifname} -j MASQUERADE
|
||||
iptables -A FORWARD -i ${ifname} -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
iptables -A FORWARD -i ${ifname} -j DROP
|
||||
% else:
|
||||
# iptables -t nat -A POSTROUTING -o ${ifname} -j MASQUERADE
|
||||
# iptables -A FORWARD -i ${ifname} -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
# iptables -A FORWARD -i ${ifname} -j DROP
|
||||
% endif
|
||||
% endfor
|
|
@ -0,0 +1,61 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The VPNClient service builds a VPN tunnel to the specified VPN server using
|
||||
# OpenVPN software and a virtual TUN/TAP device.
|
||||
|
||||
# directory containing the certificate and key described below
|
||||
keydir=${config["keydir"]}
|
||||
|
||||
# the name used for a "$keyname.crt" certificate and "$keyname.key" private key.
|
||||
keyname=${config["keyname"]}
|
||||
|
||||
# the public IP address of the VPN server this client should connect with
|
||||
vpnserver=${config["server"]}
|
||||
|
||||
# optional next hop for adding a static route to reach the VPN server
|
||||
#nexthop="10.0.1.1"
|
||||
|
||||
# --------- END CUSTOMIZATION --------
|
||||
|
||||
# validate addresses
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed
|
||||
" > $PWD/vpnclient.log
|
||||
else
|
||||
if [ "$(sipcalc "$vpnserver" "$nexthop" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalide address $vpnserver or $nexthop " > $PWD/vpnclient.log
|
||||
fi
|
||||
fi
|
||||
|
||||
# validate key and certification files
|
||||
if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \
|
||||
|| [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then
|
||||
echo "ERROR: missing certification or key files under $keydir $keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnclient.log
|
||||
fi
|
||||
|
||||
# if necessary, add a static route for reaching the VPN server IP via the IF
|
||||
vpnservernet=$${}{vpnserver%.*}.0/24
|
||||
if [ "$nexthop" != "" ]; then
|
||||
ip route add $vpnservernet via $nexthop
|
||||
fi
|
||||
|
||||
# create openvpn client.conf
|
||||
(
|
||||
cat << EOF
|
||||
client
|
||||
dev tun
|
||||
proto udp
|
||||
remote $vpnserver 1194
|
||||
nobind
|
||||
ca $keydir/ca.crt
|
||||
cert $keydir/$keyname.crt
|
||||
key $keydir/$keyname.key
|
||||
dh $keydir/dh1024.pem
|
||||
cipher AES-256-CBC
|
||||
log $PWD/openvpn-client.log
|
||||
verb 4
|
||||
daemon
|
||||
EOF
|
||||
) > client.conf
|
||||
|
||||
openvpn --config client.conf
|
|
@ -0,0 +1,147 @@
|
|||
# -------- CUSTOMIZATION REQUIRED --------
|
||||
#
|
||||
# The VPNServer service sets up the OpenVPN server for building VPN tunnels
|
||||
# that allow access via TUN/TAP device to private networks.
|
||||
#
|
||||
# note that the IPForward and DefaultRoute services should be enabled
|
||||
|
||||
# directory containing the certificate and key described below, in addition to
|
||||
# a CA certificate and DH key
|
||||
keydir=${config["keydir"]}
|
||||
|
||||
# the name used for a "$keyname.crt" certificate and "$keyname.key" private key.
|
||||
keyname=${config["keyname"]}
|
||||
|
||||
# the VPN subnet address from which the client VPN IP (for the TUN/TAP)
|
||||
# will be allocated
|
||||
vpnsubnet=${config["subnet"]}
|
||||
|
||||
# public IP address of this vpn server (same as VPNClient vpnserver= setting)
|
||||
vpnserver=${address}
|
||||
|
||||
# optional list of private subnets reachable behind this VPN server
|
||||
# each subnet and next hop is separated by a space
|
||||
# "<subnet1>,<nexthop1> <subnet2>,<nexthop2> ..."
|
||||
#privatenets="10.0.11.0,10.0.10.1 10.0.12.0,10.0.10.1"
|
||||
|
||||
# optional list of VPN clients, for statically assigning IP addresses to
|
||||
# clients; also, an optional client subnet can be specified for adding static
|
||||
# routes via the client
|
||||
# Note: VPN addresses x.x.x.0-3 are reserved
|
||||
# "<keyname>,<vpnIP>,<subnetIP> <keyname>,<vpnIP>,<subnetIP> ..."
|
||||
#vpnclients="client1KeyFilename,10.0.200.5,10.0.0.0 client2KeyFilename,,"
|
||||
|
||||
# NOTE: you may need to enable the StaticRoutes service on nodes within the
|
||||
# private subnet, in order to have routes back to the client.
|
||||
# /sbin/ip ro add <vpnsubnet>/24 via <vpnServerRemoteInterface>
|
||||
# /sbin/ip ro add <vpnClientSubnet>/24 via <vpnServerRemoteInterface>
|
||||
|
||||
# -------- END CUSTOMIZATION --------
|
||||
|
||||
echo > $PWD/vpnserver.log
|
||||
rm -f -r $PWD/ccd
|
||||
|
||||
# validate key and certification files
|
||||
if [ ! -e $keydir\/$keyname.key ] || [ ! -e $keydir\/$keyname.crt ] \
|
||||
|| [ ! -e $keydir\/ca.crt ] || [ ! -e $keydir\/dh1024.pem ]; then
|
||||
echo "ERROR: missing certification or key files under $keydir \
|
||||
$keyname.key or $keyname.crt or ca.crt or dh1024.pem" >> $PWD/vpnserver.log
|
||||
fi
|
||||
|
||||
# validate configuration IP addresses
|
||||
checkip=0
|
||||
if [ "$(dpkg -l | grep " sipcalc ")" = "" ]; then
|
||||
echo "WARNING: ip validation disabled because package sipcalc not installed\
|
||||
" >> $PWD/vpnserver.log
|
||||
checkip=1
|
||||
else
|
||||
if [ "$(sipcalc "$vpnsubnet" "$vpnserver" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn subnet or server address \
|
||||
$vpnsubnet or $vpnserver " >> $PWD/vpnserver.log
|
||||
fi
|
||||
fi
|
||||
|
||||
# create client vpn ip pool file
|
||||
(
|
||||
cat << EOF
|
||||
EOF
|
||||
)> $PWD/ippool.txt
|
||||
|
||||
# create server.conf file
|
||||
(
|
||||
cat << EOF
|
||||
# openvpn server config
|
||||
local $vpnserver
|
||||
server $vpnsubnet 255.255.255.0
|
||||
push "redirect-gateway def1"
|
||||
EOF
|
||||
)> $PWD/server.conf
|
||||
|
||||
# add routes to VPN server private subnets, and push these routes to clients
|
||||
for privatenet in $privatenets; do
|
||||
if [ $privatenet != "" ]; then
|
||||
net=$${}{privatenet%%,*}
|
||||
nexthop=$${}{privatenet##*,}
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$net" "$nexthop" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn server private net address \
|
||||
$net or $nexthop " >> $PWD/vpnserver.log
|
||||
fi
|
||||
echo push route $net 255.255.255.0 >> $PWD/server.conf
|
||||
ip ro add $net/24 via $nexthop
|
||||
ip ro add $vpnsubnet/24 via $nexthop
|
||||
fi
|
||||
done
|
||||
|
||||
# allow subnet through this VPN, one route for each client subnet
|
||||
for client in $vpnclients; do
|
||||
if [ $client != "" ]; then
|
||||
cSubnetIP=$${}{client##*,}
|
||||
cVpnIP=$${}{client#*,}
|
||||
cVpnIP=$${}{cVpnIP%%,*}
|
||||
cKeyFilename=$${}{client%%,*}
|
||||
if [ "$cSubnetIP" != "" ]; then
|
||||
if [ $checkip = "0" ] &&
|
||||
[ "$(sipcalc "$cSubnetIP" "$cVpnIP" | grep ERR)" != "" ]; then
|
||||
echo "ERROR: invalid vpn client and subnet address \
|
||||
$cSubnetIP or $cVpnIP " >> $PWD/vpnserver.log
|
||||
fi
|
||||
echo route $cSubnetIP 255.255.255.0 >> $PWD/server.conf
|
||||
if ! test -d $PWD/ccd; then
|
||||
mkdir -p $PWD/ccd
|
||||
echo client-config-dir $PWD/ccd >> $PWD/server.conf
|
||||
fi
|
||||
if test -e $PWD/ccd/$cKeyFilename; then
|
||||
echo iroute $cSubnetIP 255.255.255.0 >> $PWD/ccd/$cKeyFilename
|
||||
else
|
||||
echo iroute $cSubnetIP 255.255.255.0 > $PWD/ccd/$cKeyFilename
|
||||
fi
|
||||
fi
|
||||
if [ "$cVpnIP" != "" ]; then
|
||||
echo $cKeyFilename,$cVpnIP >> $PWD/ippool.txt
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
(
|
||||
cat << EOF
|
||||
keepalive 10 120
|
||||
ca $keydir/ca.crt
|
||||
cert $keydir/$keyname.crt
|
||||
key $keydir/$keyname.key
|
||||
dh $keydir/dh1024.pem
|
||||
cipher AES-256-CBC
|
||||
status /var/log/openvpn-status.log
|
||||
log /var/log/openvpn-server.log
|
||||
ifconfig-pool-linear
|
||||
ifconfig-pool-persist $PWD/ippool.txt
|
||||
port 1194
|
||||
proto udp
|
||||
dev tun
|
||||
verb 4
|
||||
daemon
|
||||
EOF
|
||||
)>> $PWD/server.conf
|
||||
|
||||
# start vpn server
|
||||
openvpn --config server.conf
|
47
daemon/core/configservices/simpleservice.py
Normal file
47
daemon/core/configservices/simpleservice.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
from core.config import Configuration
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
||||
|
||||
class SimpleService(ConfigService):
|
||||
name = "Simple"
|
||||
group = "SimpleGroup"
|
||||
directories = ["/etc/quagga", "/usr/local/lib"]
|
||||
files = ["test1.sh", "test2.sh"]
|
||||
executables = []
|
||||
dependencies = []
|
||||
startup = []
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = [
|
||||
Configuration(_id="value1", _type=ConfigDataTypes.STRING, label="Text"),
|
||||
Configuration(_id="value2", _type=ConfigDataTypes.BOOL, label="Boolean"),
|
||||
Configuration(
|
||||
_id="value3",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Multiple Choice",
|
||||
options=["value1", "value2", "value3"],
|
||||
),
|
||||
]
|
||||
modes = {
|
||||
"mode1": {"value1": "value1", "value2": "0", "value3": "value2"},
|
||||
"mode2": {"value1": "value2", "value2": "1", "value3": "value3"},
|
||||
"mode3": {"value1": "value3", "value2": "0", "value3": "value1"},
|
||||
}
|
||||
|
||||
def get_text_template(self, name: str) -> str:
|
||||
if name == "test1.sh":
|
||||
return """
|
||||
# sample script 1
|
||||
# node id(${node.id}) name(${node.name})
|
||||
# config: ${config}
|
||||
echo hello
|
||||
"""
|
||||
elif name == "test2.sh":
|
||||
return """
|
||||
# sample script 2
|
||||
# node id(${node.id}) name(${node.name})
|
||||
# config: ${config}
|
||||
echo hello2
|
||||
"""
|
0
daemon/core/configservices/utilservices/__init__.py
Normal file
0
daemon/core/configservices/utilservices/__init__.py
Normal file
307
daemon/core/configservices/utilservices/services.py
Normal file
307
daemon/core/configservices/utilservices/services.py
Normal file
|
@ -0,0 +1,307 @@
|
|||
from typing import Any, Dict
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import utils
|
||||
from core.configservice.base import ConfigService, ConfigServiceMode
|
||||
from core.nodes.base import CoreNode
|
||||
|
||||
GROUP_NAME = "Utility"
|
||||
|
||||
|
||||
class DefaultRouteService(ConfigService):
|
||||
name = "DefaultRoute"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["defaultroute.sh"]
|
||||
executables = ["ip"]
|
||||
dependencies = []
|
||||
startup = ["sh defaultroute.sh"]
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
# only add default routes for linked routing nodes
|
||||
routes = []
|
||||
for other_node in self.node.session.nodes.values():
|
||||
if not isinstance(other_node, CoreNode):
|
||||
continue
|
||||
if other_node.type not in ["router", "mdr"]:
|
||||
continue
|
||||
commonnets = self.node.commonnets(other_node)
|
||||
if commonnets:
|
||||
_, _, router_eth = commonnets[0]
|
||||
for x in router_eth.addrlist:
|
||||
addr, prefix = x.split("/")
|
||||
routes.append(addr)
|
||||
break
|
||||
return dict(routes=routes)
|
||||
|
||||
|
||||
class DefaultMulticastRouteService(ConfigService):
|
||||
name = "DefaultMulticastRoute"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["defaultmroute.sh"]
|
||||
executables = []
|
||||
dependencies = []
|
||||
startup = ["sh defaultmroute.sh"]
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
ifname = None
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifname = ifc.name
|
||||
break
|
||||
return dict(ifname=ifname)
|
||||
|
||||
|
||||
class StaticRouteService(ConfigService):
|
||||
name = "StaticRoute"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["staticroute.sh"]
|
||||
executables = []
|
||||
dependencies = []
|
||||
startup = ["sh staticroute.sh"]
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
routes = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for x in ifc.addrlist:
|
||||
addr = x.split("/")[0]
|
||||
if netaddr.valid_ipv6(addr):
|
||||
dst = "3ffe:4::/64"
|
||||
else:
|
||||
dst = "10.9.8.0/24"
|
||||
net = netaddr.IPNetwork(x)
|
||||
if net[-2] != net[1]:
|
||||
routes.append((dst, net[1]))
|
||||
return dict(routes=routes)
|
||||
|
||||
|
||||
class IpForwardService(ConfigService):
|
||||
name = "IPForward"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["ipforward.sh"]
|
||||
executables = ["sysctl"]
|
||||
dependencies = []
|
||||
startup = ["sh ipforward.sh"]
|
||||
validate = []
|
||||
shutdown = []
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
devnames = []
|
||||
for ifc in self.node.netifs():
|
||||
devname = utils.sysctl_devname(ifc.name)
|
||||
devnames.append(devname)
|
||||
return dict(devnames=devnames)
|
||||
|
||||
|
||||
class SshService(ConfigService):
|
||||
name = "SSH"
|
||||
group = GROUP_NAME
|
||||
directories = ["/etc/ssh", "/var/run/sshd"]
|
||||
files = ["startsshd.sh", "/etc/ssh/sshd_config"]
|
||||
executables = ["sshd"]
|
||||
dependencies = []
|
||||
startup = ["sh startsshd.sh"]
|
||||
validate = []
|
||||
shutdown = ["killall sshd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
return dict(
|
||||
sshcfgdir=self.directories[0],
|
||||
sshstatedir=self.directories[1],
|
||||
sshlibdir="/usr/lib/openssh",
|
||||
)
|
||||
|
||||
|
||||
class DhcpService(ConfigService):
|
||||
name = "DHCP"
|
||||
group = GROUP_NAME
|
||||
directories = ["/etc/dhcp", "/var/lib/dhcp"]
|
||||
files = ["/etc/dhcp/dhcpd.conf"]
|
||||
executables = ["dhcpd"]
|
||||
dependencies = []
|
||||
startup = ["touch /var/lib/dhcp/dhcpd.leases", "dhcpd"]
|
||||
validate = ["pidof dhcpd"]
|
||||
shutdown = ["killall dhcpd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
subnets = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
for x in ifc.addrlist:
|
||||
addr = x.split("/")[0]
|
||||
if netaddr.valid_ipv4(addr):
|
||||
net = netaddr.IPNetwork(x)
|
||||
# divide the address space in half
|
||||
index = (net.size - 2) / 2
|
||||
rangelow = net[index]
|
||||
rangehigh = net[-2]
|
||||
subnets.append((net.ip, net.netmask, rangelow, rangehigh, addr))
|
||||
return dict(subnets=subnets)
|
||||
|
||||
|
||||
class DhcpClientService(ConfigService):
|
||||
name = "DHCPClient"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["startdhcpclient.sh"]
|
||||
executables = ["dhclient"]
|
||||
dependencies = []
|
||||
startup = ["sh startdhcpclient.sh"]
|
||||
validate = ["pidof dhclient"]
|
||||
shutdown = ["killall dhclient"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
return dict(ifnames=ifnames)
|
||||
|
||||
|
||||
class FtpService(ConfigService):
|
||||
name = "FTP"
|
||||
group = GROUP_NAME
|
||||
directories = ["/var/run/vsftpd/empty", "/var/ftp"]
|
||||
files = ["vsftpd.conf"]
|
||||
executables = ["vsftpd"]
|
||||
dependencies = []
|
||||
startup = ["vsftpd ./vsftpd.conf"]
|
||||
validate = ["pidof vsftpd"]
|
||||
shutdown = ["killall vsftpd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
|
||||
class PcapService(ConfigService):
|
||||
name = "pcap"
|
||||
group = GROUP_NAME
|
||||
directories = []
|
||||
files = ["pcap.sh"]
|
||||
executables = ["tcpdump"]
|
||||
dependencies = []
|
||||
startup = ["sh pcap.sh start"]
|
||||
validate = ["pidof tcpdump"]
|
||||
shutdown = ["sh pcap.sh stop"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
ifnames = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
ifnames.append(ifc.name)
|
||||
return dict()
|
||||
|
||||
|
||||
class RadvdService(ConfigService):
|
||||
name = "radvd"
|
||||
group = GROUP_NAME
|
||||
directories = ["/etc/radvd"]
|
||||
files = ["/etc/radvd/radvd.conf"]
|
||||
executables = ["radvd"]
|
||||
dependencies = []
|
||||
startup = ["radvd -C /etc/radvd/radvd.conf -m logfile -l /var/log/radvd.log"]
|
||||
validate = ["pidof radvd"]
|
||||
shutdown = ["pkill radvd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
interfaces = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
prefixes = []
|
||||
for x in ifc.addrlist:
|
||||
addr = x.split("/")[0]
|
||||
if netaddr.valid_ipv6(addr):
|
||||
prefixes.append(x)
|
||||
if not prefixes:
|
||||
continue
|
||||
interfaces.append((ifc.name, prefixes))
|
||||
return dict(interfaces=interfaces)
|
||||
|
||||
|
||||
class AtdService(ConfigService):
|
||||
name = "atd"
|
||||
group = GROUP_NAME
|
||||
directories = ["/var/spool/cron/atjobs", "/var/spool/cron/atspool"]
|
||||
files = ["startatd.sh"]
|
||||
executables = ["atd"]
|
||||
dependencies = []
|
||||
startup = ["sh startatd.sh"]
|
||||
validate = ["pidof atd"]
|
||||
shutdown = ["pkill atd"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
|
||||
class HttpService(ConfigService):
|
||||
name = "HTTP"
|
||||
group = GROUP_NAME
|
||||
directories = [
|
||||
"/etc/apache2",
|
||||
"/var/run/apache2",
|
||||
"/var/log/apache2",
|
||||
"/run/lock",
|
||||
"/var/lock/apache2",
|
||||
"/var/www",
|
||||
]
|
||||
files = ["/etc/apache2/apache2.conf", "/etc/apache2/envvars", "/var/www/index.html"]
|
||||
executables = ["apache2ctl"]
|
||||
dependencies = []
|
||||
startup = ["chown www-data /var/lock/apache2", "apache2ctl start"]
|
||||
validate = ["pidof apache2"]
|
||||
shutdown = ["apache2ctl stop"]
|
||||
validation_mode = ConfigServiceMode.BLOCKING
|
||||
default_configs = []
|
||||
modes = {}
|
||||
|
||||
def data(self) -> Dict[str, Any]:
|
||||
interfaces = []
|
||||
for ifc in self.node.netifs():
|
||||
if getattr(ifc, "control", False):
|
||||
continue
|
||||
interfaces.append(ifc)
|
||||
return dict(interfaces=interfaces)
|
102
daemon/core/configservices/utilservices/templates/apache2.conf
Normal file
102
daemon/core/configservices/utilservices/templates/apache2.conf
Normal file
|
@ -0,0 +1,102 @@
|
|||
# apache2.conf generated by utility.py:HttpService
|
||||
Mutex file:$APACHE_LOCK_DIR default
|
||||
|
||||
PidFile $APACHE_PID_FILE
|
||||
Timeout 300
|
||||
KeepAlive On
|
||||
MaxKeepAliveRequests 100
|
||||
KeepAliveTimeout 5
|
||||
|
||||
LoadModule mpm_worker_module /usr/lib/apache2/modules/mod_mpm_worker.so
|
||||
|
||||
<IfModule mpm_prefork_module>
|
||||
StartServers 5
|
||||
MinSpareServers 5
|
||||
MaxSpareServers 10
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_worker_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
<IfModule mpm_event_module>
|
||||
StartServers 2
|
||||
MinSpareThreads 25
|
||||
MaxSpareThreads 75
|
||||
ThreadLimit 64
|
||||
ThreadsPerChild 25
|
||||
MaxClients 150
|
||||
MaxRequestsPerChild 0
|
||||
</IfModule>
|
||||
|
||||
User $APACHE_RUN_USER
|
||||
Group $APACHE_RUN_GROUP
|
||||
|
||||
AccessFileName .htaccess
|
||||
|
||||
<Files ~ "^\\.ht">
|
||||
Require all denied
|
||||
</Files>
|
||||
|
||||
DefaultType None
|
||||
|
||||
HostnameLookups Off
|
||||
|
||||
ErrorLog $APACHE_LOG_DIR/error.log
|
||||
LogLevel warn
|
||||
|
||||
#Include mods-enabled/*.load
|
||||
#Include mods-enabled/*.conf
|
||||
LoadModule alias_module /usr/lib/apache2/modules/mod_alias.so
|
||||
LoadModule auth_basic_module /usr/lib/apache2/modules/mod_auth_basic.so
|
||||
LoadModule authz_core_module /usr/lib/apache2/modules/mod_authz_core.so
|
||||
LoadModule authz_host_module /usr/lib/apache2/modules/mod_authz_host.so
|
||||
LoadModule authz_user_module /usr/lib/apache2/modules/mod_authz_user.so
|
||||
LoadModule autoindex_module /usr/lib/apache2/modules/mod_autoindex.so
|
||||
LoadModule dir_module /usr/lib/apache2/modules/mod_dir.so
|
||||
LoadModule env_module /usr/lib/apache2/modules/mod_env.so
|
||||
|
||||
NameVirtualHost *:80
|
||||
Listen 80
|
||||
|
||||
<IfModule mod_ssl.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
<IfModule mod_gnutls.c>
|
||||
Listen 443
|
||||
</IfModule>
|
||||
|
||||
LogFormat "%v:%p %h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" vhost_combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O \\"%{Referer}i\\" \\"%{User-Agent}i\\"" combined
|
||||
LogFormat "%h %l %u %t \\"%r\\" %>s %O" common
|
||||
LogFormat "%{Referer}i -> %U" referer
|
||||
LogFormat "%{User-agent}i" agent
|
||||
|
||||
ServerTokens OS
|
||||
ServerSignature On
|
||||
TraceEnable Off
|
||||
|
||||
<VirtualHost *:80>
|
||||
ServerAdmin webmaster@localhost
|
||||
DocumentRoot /var/www
|
||||
<Directory />
|
||||
Options FollowSymLinks
|
||||
AllowOverride None
|
||||
</Directory>
|
||||
<Directory /var/www/>
|
||||
Options Indexes FollowSymLinks MultiViews
|
||||
AllowOverride None
|
||||
Require all granted
|
||||
</Directory>
|
||||
ErrorLog $APACHE_LOG_DIR/error.log
|
||||
LogLevel warn
|
||||
CustomLog $APACHE_LOG_DIR/access.log combined
|
||||
</VirtualHost>
|
|
@ -0,0 +1,4 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by DefaultMulticastRoute service (utility.py)
|
||||
# the first interface is chosen below; please change it as needed
|
||||
ip route add 224.0.0.0/4 dev ${ifname}
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by DefaultRoute service
|
||||
% for route in routes:
|
||||
ip route add default via ${route}
|
||||
% endfor
|
22
daemon/core/configservices/utilservices/templates/dhcpd.conf
Normal file
22
daemon/core/configservices/utilservices/templates/dhcpd.conf
Normal file
|
@ -0,0 +1,22 @@
|
|||
# auto-generated by DHCP service (utility.py)
|
||||
# NOTE: move these option lines into the desired pool { } block(s) below
|
||||
#option domain-name "test.com";
|
||||
#option domain-name-servers 10.0.0.1;
|
||||
#option routers 10.0.0.1;
|
||||
|
||||
log-facility local6;
|
||||
|
||||
default-lease-time 600;
|
||||
max-lease-time 7200;
|
||||
|
||||
ddns-update-style none;
|
||||
|
||||
% for subnet, netmask, rangelow, rangehigh, addr in subnets:
|
||||
subnet ${subnet} netmask ${netmask} {
|
||||
pool {
|
||||
range ${rangelow} ${rangehigh};
|
||||
default-lease-time 600;
|
||||
option routers ${addr};
|
||||
}
|
||||
}
|
||||
% endfor
|
10
daemon/core/configservices/utilservices/templates/envvars
Normal file
10
daemon/core/configservices/utilservices/templates/envvars
Normal file
|
@ -0,0 +1,10 @@
|
|||
# this file is used by apache2ctl - generated by utility.py:HttpService
|
||||
# these settings come from a default Ubuntu apache2 installation
|
||||
export APACHE_RUN_USER=www-data
|
||||
export APACHE_RUN_GROUP=www-data
|
||||
export APACHE_PID_FILE=/var/run/apache2.pid
|
||||
export APACHE_RUN_DIR=/var/run/apache2
|
||||
export APACHE_LOCK_DIR=/var/lock/apache2
|
||||
export APACHE_LOG_DIR=/var/log/apache2
|
||||
export LANG=C
|
||||
export LANG
|
13
daemon/core/configservices/utilservices/templates/index.html
Normal file
13
daemon/core/configservices/utilservices/templates/index.html
Normal file
|
@ -0,0 +1,13 @@
|
|||
<!-- generated by utility.py:HttpService -->
|
||||
<html>
|
||||
<body>
|
||||
<h1>${node.name} web server</h1>
|
||||
<p>This is the default web page for this server.</p>
|
||||
<p>The web server software is running but no content has been added, yet.</p>
|
||||
<ul>
|
||||
% for ifc in interfaces:
|
||||
<li>${ifc.name} - ${ifc.addrlist}</li>
|
||||
% endfor
|
||||
</ul>
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by IPForward service (utility.py)
|
||||
sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
sysctl -w net.ipv4.conf.default.forwarding=1
|
||||
sysctl -w net.ipv6.conf.all.forwarding=1
|
||||
sysctl -w net.ipv6.conf.default.forwarding=1
|
||||
sysctl -w net.ipv4.conf.all.send_redirects=0
|
||||
sysctl -w net.ipv4.conf.default.send_redirects=0
|
||||
sysctl -w net.ipv4.conf.all.rp_filter=0
|
||||
sysctl -w net.ipv4.conf.default.rp_filter=0
|
||||
# setup forwarding for node interfaces
|
||||
% for devname in devnames:
|
||||
sysctl -w net.ipv4.conf.${devname}.forwarding=1
|
||||
sysctl -w net.ipv4.conf.${devname}.send_redirects=0
|
||||
sysctl -w net.ipv4.conf.${devname}.rp_filter=0
|
||||
% endfor
|
11
daemon/core/configservices/utilservices/templates/pcap.sh
Normal file
11
daemon/core/configservices/utilservices/templates/pcap.sh
Normal file
|
@ -0,0 +1,11 @@
|
|||
#!/bin/sh
|
||||
# set tcpdump options here (see 'man tcpdump' for help)
|
||||
# (-s snap length, -C limit pcap file length, -n disable name resolution)
|
||||
if [ "x$1" = "xstart" ]; then
|
||||
% for ifname in ifnames:
|
||||
tcpdump -s 12288 -C 10 -n -w ${node.name}.${ifname}.pcap -i ${ifname} < /dev/null &
|
||||
% endfor
|
||||
elif [ "x$1" = "xstop" ]; then
|
||||
mkdir -p $SESSION_DIR/pcap
|
||||
mv *.pcap $SESSION_DIR/pcap
|
||||
fi;
|
19
daemon/core/configservices/utilservices/templates/radvd.conf
Normal file
19
daemon/core/configservices/utilservices/templates/radvd.conf
Normal file
|
@ -0,0 +1,19 @@
|
|||
# auto-generated by RADVD service (utility.py)
|
||||
% for ifname, prefixes in values:
|
||||
interface ${ifname}
|
||||
{
|
||||
AdvSendAdvert on;
|
||||
MinRtrAdvInterval 3;
|
||||
MaxRtrAdvInterval 10;
|
||||
AdvDefaultPreference low;
|
||||
AdvHomeAgentFlag off;
|
||||
% for prefix in prefixes:
|
||||
prefix ${prefix}
|
||||
{
|
||||
AdvOnLink on;
|
||||
AdvAutonomous on;
|
||||
AdvRouterAddr on;
|
||||
};
|
||||
% endfor
|
||||
};
|
||||
% endfor
|
|
@ -0,0 +1,37 @@
|
|||
# auto-generated by SSH service (utility.py)
|
||||
Port 22
|
||||
Protocol 2
|
||||
HostKey ${sshcfgdir}/ssh_host_rsa_key
|
||||
UsePrivilegeSeparation yes
|
||||
PidFile ${sshstatedir}/sshd.pid
|
||||
|
||||
KeyRegenerationInterval 3600
|
||||
ServerKeyBits 768
|
||||
|
||||
SyslogFacility AUTH
|
||||
LogLevel INFO
|
||||
|
||||
LoginGraceTime 120
|
||||
PermitRootLogin yes
|
||||
StrictModes yes
|
||||
|
||||
RSAAuthentication yes
|
||||
PubkeyAuthentication yes
|
||||
|
||||
IgnoreRhosts yes
|
||||
RhostsRSAAuthentication no
|
||||
HostbasedAuthentication no
|
||||
|
||||
PermitEmptyPasswords no
|
||||
ChallengeResponseAuthentication no
|
||||
|
||||
X11Forwarding yes
|
||||
X11DisplayOffset 10
|
||||
PrintMotd no
|
||||
PrintLastLog yes
|
||||
TCPKeepAlive yes
|
||||
|
||||
AcceptEnv LANG LC_*
|
||||
Subsystem sftp ${sshlibdir}/sftp-server
|
||||
UsePAM yes
|
||||
UseDNS no
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
echo 00001 > /var/spool/cron/atjobs/.SEQ
|
||||
chown -R daemon /var/spool/cron/*
|
||||
chmod -R 700 /var/spool/cron/*
|
||||
atd
|
|
@ -0,0 +1,8 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by DHCPClient service (utility.py)
|
||||
# uncomment this mkdir line and symlink line to enable client-side DNS\n# resolution based on the DHCP server response.
|
||||
#mkdir -p /var/run/resolvconf/interface
|
||||
% for ifname in ifnames:
|
||||
#ln -s /var/run/resolvconf/interface/${ifname}.dhclient /var/run/resolvconf/resolv.conf
|
||||
dhclient -nw -pf /var/run/dhclient-${ifname}.pid -lf /var/run/dhclient-${ifname}.lease ${ifname}
|
||||
% endfor
|
|
@ -0,0 +1,6 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by SSH service (utility.py)
|
||||
ssh-keygen -q -t rsa -N "" -f ${sshcfgdir}/ssh_host_rsa_key
|
||||
chmod 655 ${sshstatedir}
|
||||
# wait until RSA host key has been generated to launch sshd
|
||||
$(which sshd) -f ${sshcfgdir}/sshd_config
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/sh
|
||||
# auto-generated by StaticRoute service (utility.py)
|
||||
# NOTE: this service must be customized to be of any use
|
||||
# Below are samples that you can uncomment and edit.
|
||||
% for dest, addr in routes:
|
||||
#ip route add ${dest} via ${addr}
|
||||
% endfor
|
|
@ -0,0 +1,12 @@
|
|||
# vsftpd.conf auto-generated by FTP service (utility.py)
|
||||
listen=YES
|
||||
anonymous_enable=YES
|
||||
local_enable=YES
|
||||
dirmessage_enable=YES
|
||||
use_localtime=YES
|
||||
xferlog_enable=YES
|
||||
connect_from_port_20=YES
|
||||
xferlog_file=/var/log/vsftpd.log
|
||||
ftpd_banner=Welcome to the CORE FTP service
|
||||
secure_chroot_dir=/var/run/vsftpd/empty
|
||||
anon_root=/var/ftp
|
|
@ -1,27 +1,19 @@
|
|||
import os
|
||||
from core.utils import which
|
||||
|
||||
COREDPY_VERSION = "@PACKAGE_VERSION@"
|
||||
CORE_STATE_DIR = "@CORE_STATE_DIR@"
|
||||
COREDPY_VERSION = "@PACKAGE_VERSION@"
|
||||
CORE_CONF_DIR = "@CORE_CONF_DIR@"
|
||||
CORE_DATA_DIR = "@CORE_DATA_DIR@"
|
||||
QUAGGA_STATE_DIR = "@CORE_STATE_DIR@/run/quagga"
|
||||
FRR_STATE_DIR = "@CORE_STATE_DIR@/run/frr"
|
||||
|
||||
|
||||
def which(command):
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
command_path = os.path.join(path, command)
|
||||
if os.path.isfile(command_path) and os.access(command_path, os.X_OK):
|
||||
return command_path
|
||||
|
||||
|
||||
VNODED_BIN = which("vnoded")
|
||||
VCMD_BIN = which("vcmd")
|
||||
BRCTL_BIN = which("brctl")
|
||||
SYSCTL_BIN = which("sysctl")
|
||||
IP_BIN = which("ip")
|
||||
TC_BIN = which("tc")
|
||||
EBTABLES_BIN = which("ebtables")
|
||||
MOUNT_BIN = which("mount")
|
||||
UMOUNT_BIN = which("umount")
|
||||
OVS_BIN = which("ovs-vsctl")
|
||||
OVS_FLOW_BIN = which("ovs-ofctl")
|
||||
VNODED_BIN = which("vnoded", required=True)
|
||||
VCMD_BIN = which("vcmd", required=True)
|
||||
SYSCTL_BIN = which("sysctl", required=True)
|
||||
IP_BIN = which("ip", required=True)
|
||||
ETHTOOL_BIN = which("ethtool", required=True)
|
||||
TC_BIN = which("tc", required=True)
|
||||
EBTABLES_BIN = which("ebtables", required=True)
|
||||
MOUNT_BIN = which("mount", required=True)
|
||||
UMOUNT_BIN = which("umount", required=True)
|
||||
OVS_BIN = which("ovs-vsctl", required=False)
|
||||
OVS_FLOW_BIN = which("ovs-ofctl", required=False)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,764 +0,0 @@
|
|||
"""
|
||||
Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode,
|
||||
PyCoreNet, and PyCoreNetIf.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import socket
|
||||
import threading
|
||||
from socket import AF_INET
|
||||
from socket import AF_INET6
|
||||
|
||||
from core.data import NodeData, LinkData
|
||||
from core.enumerations import LinkTypes
|
||||
from core.misc import ipaddress
|
||||
|
||||
|
||||
class Position(object):
|
||||
"""
|
||||
Helper class for Cartesian coordinate position
|
||||
"""
|
||||
|
||||
def __init__(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Creates a Position instance.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:param z: z position
|
||||
:return:
|
||||
"""
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
|
||||
def set(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Returns True if the position has actually changed.
|
||||
|
||||
:param float x: x position
|
||||
:param float y: y position
|
||||
:param float z: z position
|
||||
:return: True if position changed, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
if self.x == x and self.y == y and self.z == z:
|
||||
return False
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
return True
|
||||
|
||||
def get(self):
|
||||
"""
|
||||
Retrieve x,y,z position.
|
||||
|
||||
:return: x,y,z position tuple
|
||||
:rtype: tuple
|
||||
"""
|
||||
return self.x, self.y, self.z
|
||||
|
||||
|
||||
class PyCoreObj(object):
|
||||
"""
|
||||
Base class for CORE objects (nodes and networks)
|
||||
"""
|
||||
apitype = None
|
||||
|
||||
# TODO: appears start has no usage, verify and remove
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
Creates a PyCoreObj instance.
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param int objid: object id
|
||||
:param str name: object name
|
||||
:param bool start: start value
|
||||
:return:
|
||||
"""
|
||||
|
||||
self.session = session
|
||||
if objid is None:
|
||||
objid = session.get_object_id()
|
||||
self.objid = objid
|
||||
if name is None:
|
||||
name = "o%s" % self.objid
|
||||
self.name = name
|
||||
# ifindex is key, PyCoreNetIf instance is value
|
||||
self._netif = {}
|
||||
self.ifindex = 0
|
||||
self.canvas = None
|
||||
self.icon = None
|
||||
self.opaque = None
|
||||
self.position = Position()
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Each object implements its own startup method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Each object implements its own shutdown method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Set the (x,y,z) position of the object.
|
||||
|
||||
:param float x: x position
|
||||
:param float y: y position
|
||||
:param float z: z position
|
||||
:return: True if position changed, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.position.set(x=x, y=y, z=z)
|
||||
|
||||
def getposition(self):
|
||||
"""
|
||||
Return an (x,y,z) tuple representing this object's position.
|
||||
|
||||
:return: x,y,z position tuple
|
||||
:rtype: tuple
|
||||
"""
|
||||
return self.position.get()
|
||||
|
||||
def ifname(self, ifindex):
|
||||
"""
|
||||
Retrieve interface name for index.
|
||||
|
||||
:param int ifindex: interface index
|
||||
:return: interface name
|
||||
:rtype: str
|
||||
"""
|
||||
return self._netif[ifindex].name
|
||||
|
||||
def netifs(self, sort=False):
|
||||
"""
|
||||
Retrieve network interfaces, sorted if desired.
|
||||
|
||||
:param bool sort: boolean used to determine if interfaces should be sorted
|
||||
:return: network interfaces
|
||||
:rtype: list
|
||||
"""
|
||||
if sort:
|
||||
return map(lambda k: self._netif[k], sorted(self._netif.keys()))
|
||||
else:
|
||||
return self._netif.itervalues()
|
||||
|
||||
def numnetif(self):
|
||||
"""
|
||||
Return the attached interface count.
|
||||
|
||||
:return: number of network interfaces
|
||||
:rtype: int
|
||||
"""
|
||||
return len(self._netif)
|
||||
|
||||
def getifindex(self, netif):
|
||||
"""
|
||||
Retrieve index for an interface.
|
||||
|
||||
:param PyCoreNetIf netif: interface to get index for
|
||||
:return: interface index if found, -1 otherwise
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
for ifindex in self._netif:
|
||||
if self._netif[ifindex] is netif:
|
||||
return ifindex
|
||||
|
||||
return -1
|
||||
|
||||
def newifindex(self):
|
||||
"""
|
||||
Create a new interface index.
|
||||
|
||||
:return: interface index
|
||||
:rtype: int
|
||||
"""
|
||||
while self.ifindex in self._netif:
|
||||
self.ifindex += 1
|
||||
ifindex = self.ifindex
|
||||
self.ifindex += 1
|
||||
return ifindex
|
||||
|
||||
def data(self, message_type, lat=None, lon=None, alt=None):
|
||||
"""
|
||||
Build a data object for this node.
|
||||
|
||||
:param message_type: purpose for the data object we are creating
|
||||
:param float lat: latitude
|
||||
:param float lon: longitude
|
||||
:param float alt: altitude
|
||||
:return: node data object
|
||||
:rtype: core.data.NodeData
|
||||
"""
|
||||
if self.apitype is None:
|
||||
return None
|
||||
|
||||
x, y, _ = self.getposition()
|
||||
|
||||
model = None
|
||||
if hasattr(self, "type"):
|
||||
model = self.type
|
||||
|
||||
emulation_server = None
|
||||
if hasattr(self, "server"):
|
||||
emulation_server = self.server
|
||||
|
||||
services = None
|
||||
if hasattr(self, "services") and len(self.services) != 0:
|
||||
nodeservices = []
|
||||
for s in self.services:
|
||||
nodeservices.append(s._name)
|
||||
services = "|".join(nodeservices)
|
||||
|
||||
node_data = NodeData(
|
||||
message_type=message_type,
|
||||
id=self.objid,
|
||||
node_type=self.apitype,
|
||||
name=self.name,
|
||||
emulation_id=self.objid,
|
||||
canvas=self.canvas,
|
||||
icon=self.icon,
|
||||
opaque=self.opaque,
|
||||
x_position=x,
|
||||
y_position=y,
|
||||
latitude=lat,
|
||||
longitude=lon,
|
||||
altitude=alt,
|
||||
model=model,
|
||||
emulation_server=emulation_server,
|
||||
services=services
|
||||
)
|
||||
|
||||
return node_data
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build CORE Link data for this object. There is no default
|
||||
method for PyCoreObjs as PyCoreNodes do not implement this but
|
||||
PyCoreNets do.
|
||||
|
||||
:param flags: message flags
|
||||
:return: list of link data
|
||||
:rtype: core.data.LinkData
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class PyCoreNode(PyCoreObj):
|
||||
"""
|
||||
Base class for CORE nodes.
|
||||
"""
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
"""
|
||||
Create a PyCoreNode instance.
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param int objid: object id
|
||||
:param str name: object name
|
||||
:param bool start: boolean for starting
|
||||
"""
|
||||
PyCoreObj.__init__(self, session, objid, name, start=start)
|
||||
self.services = []
|
||||
if not hasattr(self, "type"):
|
||||
self.type = None
|
||||
self.nodedir = None
|
||||
self.tmpnodedir = False
|
||||
|
||||
def addservice(self, service):
|
||||
"""
|
||||
Add a services to the service list.
|
||||
|
||||
:param core.service.CoreService service: service to add
|
||||
:return: nothing
|
||||
"""
|
||||
if service is not None:
|
||||
self.services.append(service)
|
||||
|
||||
def makenodedir(self):
|
||||
"""
|
||||
Create the node directory.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if self.nodedir is None:
|
||||
self.nodedir = os.path.join(self.session.session_dir, self.name + ".conf")
|
||||
os.makedirs(self.nodedir)
|
||||
self.tmpnodedir = True
|
||||
else:
|
||||
self.tmpnodedir = False
|
||||
|
||||
def rmnodedir(self):
|
||||
"""
|
||||
Remove the node directory, unless preserve directory has been set.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
preserve = getattr(self.session.options, "preservedir", None)
|
||||
if preserve == "1":
|
||||
return
|
||||
|
||||
if self.tmpnodedir:
|
||||
shutil.rmtree(self.nodedir, ignore_errors=True)
|
||||
|
||||
def addnetif(self, netif, ifindex):
|
||||
"""
|
||||
Add network interface to node and set the network interface index if successful.
|
||||
|
||||
:param PyCoreNetIf netif: network interface to add
|
||||
:param int ifindex: interface index
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex in self._netif:
|
||||
raise ValueError("ifindex %s already exists" % ifindex)
|
||||
self._netif[ifindex] = netif
|
||||
# TODO: this should have probably been set ahead, seems bad to me, check for failure and fix
|
||||
netif.netindex = ifindex
|
||||
|
||||
def delnetif(self, ifindex):
|
||||
"""
|
||||
Delete a network interface
|
||||
|
||||
:param int ifindex: interface index to delete
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
netif = self._netif.pop(ifindex)
|
||||
netif.shutdown()
|
||||
del netif
|
||||
|
||||
# TODO: net parameter is not used, remove
|
||||
def netif(self, ifindex, net=None):
|
||||
"""
|
||||
Retrieve network interface.
|
||||
|
||||
:param int ifindex: index of interface to retrieve
|
||||
:param PyCoreNetIf net: network node
|
||||
:return: network interface, or None if not found
|
||||
:rtype: PyCoreNetIf
|
||||
"""
|
||||
if ifindex in self._netif:
|
||||
return self._netif[ifindex]
|
||||
else:
|
||||
return None
|
||||
|
||||
def attachnet(self, ifindex, net):
|
||||
"""
|
||||
Attach a network.
|
||||
|
||||
:param int ifindex: interface of index to attach
|
||||
:param PyCoreNetIf net: network to attach
|
||||
:return:
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
self._netif[ifindex].attachnet(net)
|
||||
|
||||
def detachnet(self, ifindex):
|
||||
"""
|
||||
Detach network interface.
|
||||
|
||||
:param int ifindex: interface index to detach
|
||||
:return: nothing
|
||||
"""
|
||||
if ifindex not in self._netif:
|
||||
raise ValueError("ifindex %s does not exist" % ifindex)
|
||||
self._netif[ifindex].detachnet()
|
||||
|
||||
def setposition(self, x=None, y=None, z=None):
|
||||
"""
|
||||
Set position.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:param z: z position
|
||||
:return: nothing
|
||||
"""
|
||||
changed = super(PyCoreNode, self).setposition(x, y, z)
|
||||
if changed:
|
||||
for netif in self.netifs(sort=True):
|
||||
netif.setposition(x, y, z)
|
||||
|
||||
def commonnets(self, obj, want_ctrl=False):
|
||||
"""
|
||||
Given another node or net object, return common networks between
|
||||
this node and that object. A list of tuples is returned, with each tuple
|
||||
consisting of (network, interface1, interface2).
|
||||
|
||||
:param obj: object to get common network with
|
||||
:param want_ctrl: flag set to determine if control network are wanted
|
||||
:return: tuples of common networks
|
||||
:rtype: list
|
||||
"""
|
||||
common = []
|
||||
for netif1 in self.netifs():
|
||||
if not want_ctrl and hasattr(netif1, "control"):
|
||||
continue
|
||||
for netif2 in obj.netifs():
|
||||
if netif1.net == netif2.net:
|
||||
common.append((netif1.net, netif1, netif2))
|
||||
|
||||
return common
|
||||
|
||||
def check_cmd(self, args):
|
||||
"""
|
||||
Runs shell command on node.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: combined stdout and stderr
|
||||
:rtype: str
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd(self, args, wait=True):
|
||||
"""
|
||||
Runs shell command on node, with option to not wait for a result.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:param bool wait: wait for command to exit, defaults to True
|
||||
:return: exit status for command
|
||||
:rtype: int
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def cmd_output(self, args):
|
||||
"""
|
||||
Runs shell command on node and get exit status and output.
|
||||
|
||||
:param list[str]|str args: command to run
|
||||
:return: exit status and combined stdout and stderr
|
||||
:rtype: tuple[int, str]
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def termcmdstring(self, sh):
|
||||
"""
|
||||
Create a terminal command string.
|
||||
|
||||
:param str sh: shell to execute command in
|
||||
:return: str
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class PyCoreNet(PyCoreObj):
|
||||
"""
|
||||
Base class for networks
|
||||
"""
|
||||
linktype = LinkTypes.WIRED.value
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Each object implements its own startup method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Each object implements its own shutdown method.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def __init__(self, session, objid, name, start=True):
|
||||
"""
|
||||
Create a PyCoreNet instance.
|
||||
|
||||
:param core.session.Session session: CORE session object
|
||||
:param int objid: object id
|
||||
:param str name: object name
|
||||
:param bool start: should object start
|
||||
"""
|
||||
PyCoreObj.__init__(self, session, objid, name, start=start)
|
||||
self._linked = {}
|
||||
self._linked_lock = threading.Lock()
|
||||
|
||||
def attach(self, netif):
|
||||
"""
|
||||
Attach network interface.
|
||||
|
||||
:param PyCoreNetIf netif: network interface to attach
|
||||
:return: nothing
|
||||
"""
|
||||
i = self.newifindex()
|
||||
self._netif[i] = netif
|
||||
netif.netifi = i
|
||||
with self._linked_lock:
|
||||
self._linked[netif] = {}
|
||||
|
||||
def detach(self, netif):
|
||||
"""
|
||||
Detach network interface.
|
||||
|
||||
:param PyCoreNetIf netif: network interface to detach
|
||||
:return: nothing
|
||||
"""
|
||||
del self._netif[netif.netifi]
|
||||
netif.netifi = None
|
||||
with self._linked_lock:
|
||||
del self._linked[netif]
|
||||
|
||||
def all_link_data(self, flags):
|
||||
"""
|
||||
Build link data objects for this network. Each link object describes a link
|
||||
between this network and a node.
|
||||
"""
|
||||
all_links = []
|
||||
|
||||
# build a link message from this network node to each node having a
|
||||
# connected interface
|
||||
for netif in self.netifs(sort=True):
|
||||
if not hasattr(netif, "node"):
|
||||
continue
|
||||
otherobj = netif.node
|
||||
uni = False
|
||||
if otherobj is None:
|
||||
# two layer-2 switches/hubs linked together via linknet()
|
||||
if not hasattr(netif, "othernet"):
|
||||
continue
|
||||
otherobj = netif.othernet
|
||||
if otherobj.objid == self.objid:
|
||||
continue
|
||||
netif.swapparams('_params_up')
|
||||
upstream_params = netif.getparams()
|
||||
netif.swapparams('_params_up')
|
||||
if netif.getparams() != upstream_params:
|
||||
uni = True
|
||||
|
||||
unidirectional = 0
|
||||
if uni:
|
||||
unidirectional = 1
|
||||
|
||||
interface2_ip4 = None
|
||||
interface2_ip4_mask = None
|
||||
interface2_ip6 = None
|
||||
interface2_ip6_mask = None
|
||||
for address in netif.addrlist:
|
||||
ip, sep, mask = address.partition('/')
|
||||
mask = int(mask)
|
||||
if ipaddress.is_ipv4_address(ip):
|
||||
family = AF_INET
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip4 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip4_mask = mask
|
||||
else:
|
||||
family = AF_INET6
|
||||
ipl = socket.inet_pton(family, ip)
|
||||
interface2_ip6 = ipaddress.IpAddress(af=family, address=ipl)
|
||||
interface2_ip6_mask = mask
|
||||
|
||||
link_data = LinkData(
|
||||
message_type=flags,
|
||||
node1_id=self.objid,
|
||||
node2_id=otherobj.objid,
|
||||
link_type=self.linktype,
|
||||
unidirectional=unidirectional,
|
||||
interface2_id=otherobj.getifindex(netif),
|
||||
interface2_mac=netif.hwaddr,
|
||||
interface2_ip4=interface2_ip4,
|
||||
interface2_ip4_mask=interface2_ip4_mask,
|
||||
interface2_ip6=interface2_ip6,
|
||||
interface2_ip6_mask=interface2_ip6_mask,
|
||||
delay=netif.getparam("delay"),
|
||||
bandwidth=netif.getparam("bw"),
|
||||
dup=netif.getparam("duplicate"),
|
||||
jitter=netif.getparam("jitter")
|
||||
)
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
if not uni:
|
||||
continue
|
||||
|
||||
netif.swapparams('_params_up')
|
||||
link_data = LinkData(
|
||||
message_type=0,
|
||||
node1_id=otherobj.objid,
|
||||
node2_id=self.objid,
|
||||
unidirectional=1,
|
||||
delay=netif.getparam("delay"),
|
||||
bandwidth=netif.getparam("bw"),
|
||||
dup=netif.getparam("duplicate"),
|
||||
jitter=netif.getparam("jitter")
|
||||
)
|
||||
netif.swapparams('_params_up')
|
||||
|
||||
all_links.append(link_data)
|
||||
|
||||
return all_links
|
||||
|
||||
|
||||
class PyCoreNetIf(object):
|
||||
"""
|
||||
Base class for network interfaces.
|
||||
"""
|
||||
|
||||
def __init__(self, node, name, mtu):
|
||||
"""
|
||||
Creates a PyCoreNetIf instance.
|
||||
|
||||
:param core.coreobj.PyCoreNode node: node for interface
|
||||
:param str name: interface name
|
||||
:param mtu: mtu value
|
||||
"""
|
||||
|
||||
self.node = node
|
||||
self.name = name
|
||||
if not isinstance(mtu, (int, long)):
|
||||
raise ValueError
|
||||
self.mtu = mtu
|
||||
self.net = None
|
||||
self._params = {}
|
||||
self.addrlist = []
|
||||
self.hwaddr = None
|
||||
self.poshook = None
|
||||
# used with EMANE
|
||||
self.transport_type = None
|
||||
# interface index on the network
|
||||
self.netindex = None
|
||||
# index used to find flow data
|
||||
self.flow_id = None
|
||||
|
||||
def startup(self):
|
||||
"""
|
||||
Startup method for the interface.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown method for the interface.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
pass
|
||||
|
||||
def attachnet(self, net):
|
||||
"""
|
||||
Attach network.
|
||||
|
||||
:param core.coreobj.PyCoreNet net: network to attach
|
||||
:return: nothing
|
||||
"""
|
||||
if self.net:
|
||||
self.detachnet()
|
||||
self.net = None
|
||||
|
||||
net.attach(self)
|
||||
self.net = net
|
||||
|
||||
def detachnet(self):
|
||||
"""
|
||||
Detach from a network.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
if self.net is not None:
|
||||
self.net.detach(self)
|
||||
|
||||
def addaddr(self, addr):
|
||||
"""
|
||||
Add address.
|
||||
|
||||
:param str addr: address to add
|
||||
:return: nothing
|
||||
"""
|
||||
|
||||
self.addrlist.append(addr)
|
||||
|
||||
def deladdr(self, addr):
|
||||
"""
|
||||
Delete address.
|
||||
|
||||
:param str addr: address to delete
|
||||
:return: nothing
|
||||
"""
|
||||
self.addrlist.remove(addr)
|
||||
|
||||
def sethwaddr(self, addr):
|
||||
"""
|
||||
Set hardware address.
|
||||
|
||||
:param core.misc.ipaddress.MacAddress addr: hardware address to set to.
|
||||
:return: nothing
|
||||
"""
|
||||
self.hwaddr = addr
|
||||
|
||||
def getparam(self, key):
|
||||
"""
|
||||
Retrieve a parameter from the, or None if the parameter does not exist.
|
||||
|
||||
:param key: parameter to get value for
|
||||
:return: parameter value
|
||||
"""
|
||||
return self._params.get(key)
|
||||
|
||||
def getparams(self):
|
||||
"""
|
||||
Return (key, value) pairs for parameters.
|
||||
"""
|
||||
parameters = []
|
||||
for k in sorted(self._params.keys()):
|
||||
parameters.append((k, self._params[k]))
|
||||
return parameters
|
||||
|
||||
def setparam(self, key, value):
|
||||
"""
|
||||
Set a parameter value, returns True if the parameter has changed.
|
||||
|
||||
:param key: parameter name to set
|
||||
:param value: parameter value
|
||||
:return: True if parameter changed, False otherwise
|
||||
"""
|
||||
# treat None and 0 as unchanged values
|
||||
current_value = self._params.get(key)
|
||||
if current_value == value or current_value <= 0 and value <= 0:
|
||||
return False
|
||||
|
||||
self._params[key] = value
|
||||
return True
|
||||
|
||||
def swapparams(self, name):
|
||||
"""
|
||||
Swap out parameters dict for name. If name does not exist,
|
||||
intialize it. This is for supporting separate upstream/downstream
|
||||
parameters when two layer-2 nodes are linked together.
|
||||
|
||||
:param str name: name of parameter to swap
|
||||
:return: nothing
|
||||
"""
|
||||
tmp = self._params
|
||||
if not hasattr(self, name):
|
||||
setattr(self, name, {})
|
||||
self._params = getattr(self, name)
|
||||
setattr(self, name, tmp)
|
||||
|
||||
def setposition(self, x, y, z):
|
||||
"""
|
||||
Dispatch position hook handler.
|
||||
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:param z: z position
|
||||
:return: nothing
|
||||
"""
|
||||
if self.poshook is not None:
|
||||
self.poshook(self, x, y, z)
|
|
@ -1,30 +0,0 @@
|
|||
"""
|
||||
Defines core server for handling TCP connections.
|
||||
"""
|
||||
|
||||
import SocketServer
|
||||
|
||||
from core.emulator.coreemu import CoreEmu
|
||||
|
||||
|
||||
class CoreServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
|
||||
"""
|
||||
TCP server class, manages sessions and spawns request handlers for
|
||||
incoming connections.
|
||||
"""
|
||||
daemon_threads = True
|
||||
allow_reuse_address = True
|
||||
|
||||
def __init__(self, server_address, handler_class, config=None):
|
||||
"""
|
||||
Server class initialization takes configuration data and calls
|
||||
the SocketServer constructor
|
||||
|
||||
:param tuple[str, int] server_address: server host and port to use
|
||||
:param class handler_class: request handler
|
||||
:param dict config: configuration setting
|
||||
:return:
|
||||
"""
|
||||
self.coreemu = CoreEmu(config)
|
||||
self.config = config
|
||||
SocketServer.TCPServer.__init__(self, server_address, handler_class)
|
|
@ -1,120 +0,0 @@
|
|||
"""
|
||||
CORE data objects.
|
||||
"""
|
||||
|
||||
import collections
|
||||
|
||||
ConfigData = collections.namedtuple("ConfigData", [
|
||||
"message_type",
|
||||
"node",
|
||||
"object",
|
||||
"type",
|
||||
"data_types",
|
||||
"data_values",
|
||||
"captions",
|
||||
"bitmap",
|
||||
"possible_values",
|
||||
"groups",
|
||||
"session",
|
||||
"interface_number",
|
||||
"network_id",
|
||||
"opaque"
|
||||
])
|
||||
ConfigData.__new__.__defaults__ = (None,) * len(ConfigData._fields)
|
||||
|
||||
EventData = collections.namedtuple("EventData", [
|
||||
"node",
|
||||
"event_type",
|
||||
"name",
|
||||
"data",
|
||||
"time",
|
||||
"session"
|
||||
])
|
||||
EventData.__new__.__defaults__ = (None,) * len(EventData._fields)
|
||||
|
||||
ExceptionData = collections.namedtuple("ExceptionData", [
|
||||
"node",
|
||||
"session",
|
||||
"level",
|
||||
"source",
|
||||
"date",
|
||||
"text",
|
||||
"opaque"
|
||||
])
|
||||
ExceptionData.__new__.__defaults__ = (None,) * len(ExceptionData._fields)
|
||||
|
||||
FileData = collections.namedtuple("FileData", [
|
||||
"message_type",
|
||||
"node",
|
||||
"name",
|
||||
"mode",
|
||||
"number",
|
||||
"type",
|
||||
"source",
|
||||
"session",
|
||||
"data",
|
||||
"compressed_data"
|
||||
])
|
||||
FileData.__new__.__defaults__ = (None,) * len(FileData._fields)
|
||||
|
||||
NodeData = collections.namedtuple("NodeData", [
|
||||
"message_type",
|
||||
"id",
|
||||
"node_type",
|
||||
"name",
|
||||
"ip_address",
|
||||
"mac_address",
|
||||
"ip6_address",
|
||||
"model",
|
||||
"emulation_id",
|
||||
"emulation_server",
|
||||
"session",
|
||||
"x_position",
|
||||
"y_position",
|
||||
"canvas",
|
||||
"network_id",
|
||||
"services",
|
||||
"latitude",
|
||||
"longitude",
|
||||
"altitude",
|
||||
"icon",
|
||||
"opaque"
|
||||
])
|
||||
NodeData.__new__.__defaults__ = (None,) * len(NodeData._fields)
|
||||
|
||||
LinkData = collections.namedtuple("LinkData", [
|
||||
"message_type",
|
||||
"node1_id",
|
||||
"node2_id",
|
||||
"delay",
|
||||
"bandwidth",
|
||||
"per",
|
||||
"dup",
|
||||
"jitter",
|
||||
"mer",
|
||||
"burst",
|
||||
"session",
|
||||
"mburst",
|
||||
"link_type",
|
||||
"gui_attributes",
|
||||
"unidirectional",
|
||||
"emulation_id",
|
||||
"network_id",
|
||||
"key",
|
||||
"interface1_id",
|
||||
"interface1_name",
|
||||
"interface1_ip4",
|
||||
"interface1_ip4_mask",
|
||||
"interface1_mac",
|
||||
"interface1_ip6",
|
||||
"interface1_ip6_mask",
|
||||
"interface2_id",
|
||||
"interface2_name",
|
||||
"interface2_ip4",
|
||||
"interface2_ip4_mask",
|
||||
"interface2_mac",
|
||||
"interface2_ip6",
|
||||
"interface2_ip6_mask",
|
||||
"opaque"
|
||||
])
|
||||
LinkData.__new__.__defaults__ = (None,) * len(LinkData._fields)
|
|
@ -2,8 +2,9 @@
|
|||
EMANE Bypass model for CORE
|
||||
"""
|
||||
|
||||
from core.config import Configuration
|
||||
from core.emane import emanemodel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
||||
|
||||
class EmaneBypassModel(emanemodel.EmaneModel):
|
||||
|
@ -15,13 +16,19 @@ class EmaneBypassModel(emanemodel.EmaneModel):
|
|||
# mac definitions
|
||||
mac_library = "bypassmaclayer"
|
||||
mac_config = [
|
||||
("none", ConfigDataTypes.BOOL.value, "0", "True,False",
|
||||
"There are no parameters for the bypass model."),
|
||||
Configuration(
|
||||
_id="none",
|
||||
_type=ConfigDataTypes.BOOL,
|
||||
default="0",
|
||||
label="There are no parameters for the bypass model.",
|
||||
)
|
||||
]
|
||||
|
||||
# phy definitions
|
||||
phy_library = "bypassphylayer"
|
||||
phy_config = []
|
||||
|
||||
# override gui display tabs
|
||||
config_groups_override = "Bypass Parameters:1-1"
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: str) -> None:
|
||||
# ignore default logic
|
||||
pass
|
||||
|
|
|
@ -2,9 +2,16 @@
|
|||
commeffect.py: EMANE CommEffect model for CORE
|
||||
"""
|
||||
|
||||
from core import logger
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, List
|
||||
|
||||
from lxml import etree
|
||||
|
||||
from core.config import ConfigGroup, Configuration
|
||||
from core.emane import emanemanifest, emanemodel
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.xml import emanexml
|
||||
|
||||
try:
|
||||
from emane.events.commeffectevent import CommEffectEvent
|
||||
|
@ -12,14 +19,14 @@ except ImportError:
|
|||
try:
|
||||
from emanesh.events.commeffectevent import CommEffectEvent
|
||||
except ImportError:
|
||||
logger.warn("compatible emane python bindings not installed")
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
def convert_none(x):
|
||||
def convert_none(x: float) -> int:
|
||||
"""
|
||||
Helper to use 0 for None values.
|
||||
"""
|
||||
if type(x) is str:
|
||||
if isinstance(x, str):
|
||||
x = float(x)
|
||||
if x is None:
|
||||
return 0
|
||||
|
@ -31,92 +38,117 @@ class EmaneCommEffectModel(emanemodel.EmaneModel):
|
|||
name = "emane_commeffect"
|
||||
|
||||
shim_library = "commeffectshim"
|
||||
shim_xml = "/usr/share/emane/manifest/commeffectshim.xml"
|
||||
shim_xml = "commeffectshim.xml"
|
||||
shim_defaults = {}
|
||||
config_shim = emanemanifest.parse(shim_xml, shim_defaults)
|
||||
config_shim = []
|
||||
|
||||
config_groups_override = "CommEffect SHIM Parameters:1-%d" % len(config_shim)
|
||||
config_matrix_override = config_shim
|
||||
# comm effect does not need the default phy and external configurations
|
||||
phy_config = []
|
||||
external_config = []
|
||||
|
||||
def build_xml_files(self, emane_manager, interface):
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: str) -> None:
|
||||
shim_xml_path = os.path.join(emane_prefix, "share/emane/manifest", cls.shim_xml)
|
||||
cls.config_shim = emanemanifest.parse(shim_xml_path, cls.shim_defaults)
|
||||
|
||||
@classmethod
|
||||
def configurations(cls) -> List[Configuration]:
|
||||
return cls.config_shim
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls) -> List[ConfigGroup]:
|
||||
return [ConfigGroup("CommEffect SHIM Parameters", 1, len(cls.configurations()))]
|
||||
|
||||
def build_xml_files(
|
||||
self, config: Dict[str, str], interface: CoreInterface = None
|
||||
) -> None:
|
||||
"""
|
||||
Build the necessary nem and commeffect XMLs in the given path.
|
||||
If an individual NEM has a nonstandard config, we need to build
|
||||
that file also. Otherwise the WLAN-wide
|
||||
nXXemane_commeffectnem.xml, nXXemane_commeffectshim.xml are used.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param config: emane model configuration for the node and interface
|
||||
:param interface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
values = emane_manager.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), interface)
|
||||
if values is None:
|
||||
return
|
||||
|
||||
# retrieve xml names
|
||||
nem_name = self.nem_name(interface)
|
||||
shim_name = self.shim_name(interface)
|
||||
nem_name = emanexml.nem_file_name(self, interface)
|
||||
shim_name = emanexml.shim_file_name(self, interface)
|
||||
|
||||
nem_document = emane_manager.xmldoc("nem")
|
||||
nem_element = nem_document.getElementsByTagName("nem").pop()
|
||||
nem_element.setAttribute("name", "%s NEM" % self.name)
|
||||
nem_element.setAttribute("type", "unstructured")
|
||||
emane_manager.appendtransporttonem(nem_document, nem_element, self.object_id, interface)
|
||||
# create and write nem document
|
||||
nem_element = etree.Element("nem", name=f"{self.name} NEM", type="unstructured")
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
transport_file = emanexml.transport_file_name(self.id, transport_type)
|
||||
etree.SubElement(nem_element, "transport", definition=transport_file)
|
||||
|
||||
shim_xml = emane_manager.xmlshimdefinition(nem_document, shim_name)
|
||||
nem_element.appendChild(shim_xml)
|
||||
emane_manager.xmlwrite(nem_document, nem_name)
|
||||
# set shim configuration
|
||||
etree.SubElement(nem_element, "shim", definition=shim_name)
|
||||
|
||||
names = self.getnames()
|
||||
shim_names = list(names)
|
||||
shim_names.remove("filterfile")
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_file(nem_element, "nem", nem_file)
|
||||
|
||||
shim_document = emane_manager.xmldoc("shim")
|
||||
shim_element = shim_document.getElementsByTagName("shim").pop()
|
||||
shim_element.setAttribute("name", "%s SHIM" % self.name)
|
||||
shim_element.setAttribute("library", self.shim_library)
|
||||
# create and write shim document
|
||||
shim_element = etree.Element(
|
||||
"shim", name=f"{self.name} SHIM", library=self.shim_library
|
||||
)
|
||||
|
||||
# append all shim options (except filterfile) to shimdoc
|
||||
for name in shim_names:
|
||||
value = self.valueof(name, values)
|
||||
param = emane_manager.xmlparam(shim_document, name, value)
|
||||
shim_element.appendChild(param)
|
||||
for configuration in self.config_shim:
|
||||
name = configuration.id
|
||||
if name == "filterfile":
|
||||
continue
|
||||
value = config[name]
|
||||
emanexml.add_param(shim_element, name, value)
|
||||
|
||||
# empty filterfile is not allowed
|
||||
ff = self.valueof("filterfile", values)
|
||||
ff = config["filterfile"]
|
||||
if ff.strip() != "":
|
||||
shim_element.appendChild(emane_manager.xmlparam(shim_document, "filterfile", ff))
|
||||
emane_manager.xmlwrite(shim_document, shim_name)
|
||||
emanexml.add_param(shim_element, "filterfile", ff)
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
shim_file = os.path.join(self.session.session_dir, shim_name)
|
||||
emanexml.create_file(shim_element, "shim", shim_file)
|
||||
|
||||
def linkconfig(
|
||||
self,
|
||||
netif: CoreInterface,
|
||||
bw: float = None,
|
||||
delay: float = None,
|
||||
loss: float = None,
|
||||
duplicate: float = None,
|
||||
jitter: float = None,
|
||||
netif2: CoreInterface = None,
|
||||
) -> None:
|
||||
"""
|
||||
Generate CommEffect events when a Link Message is received having
|
||||
link parameters.
|
||||
"""
|
||||
service = self.session.emane.service
|
||||
if service is None:
|
||||
logger.warn("%s: EMANE event service unavailable", self.name)
|
||||
logging.warning("%s: EMANE event service unavailable", self.name)
|
||||
return
|
||||
|
||||
if netif is None or netif2 is None:
|
||||
logger.warn("%s: missing NEM information", self.name)
|
||||
logging.warning("%s: missing NEM information", self.name)
|
||||
return
|
||||
|
||||
# TODO: batch these into multiple events per transmission
|
||||
# TODO: may want to split out seconds portion of delay and jitter
|
||||
event = CommEffectEvent()
|
||||
emane_node = self.session.get_object(self.object_id)
|
||||
emane_node = self.session.get_node(self.id)
|
||||
nemid = emane_node.getnemid(netif)
|
||||
nemid2 = emane_node.getnemid(netif2)
|
||||
mbw = bw
|
||||
logger.info("sending comm effect event")
|
||||
logging.info("sending comm effect event")
|
||||
event.append(
|
||||
nemid,
|
||||
latency=convert_none(delay),
|
||||
jitter=convert_none(jitter),
|
||||
loss=convert_none(loss),
|
||||
duplicate=convert_none(duplicate),
|
||||
unicast=long(convert_none(bw)),
|
||||
broadcast=long(convert_none(mbw))
|
||||
unicast=int(convert_none(bw)),
|
||||
broadcast=int(convert_none(mbw)),
|
||||
)
|
||||
service.publish(nemid2, event)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,8 @@
|
|||
from core import logger
|
||||
from core.enumerations import ConfigDataTypes
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
from core.config import Configuration
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
||||
manifest = None
|
||||
try:
|
||||
|
@ -8,54 +11,50 @@ except ImportError:
|
|||
try:
|
||||
from emanesh import manifest
|
||||
except ImportError:
|
||||
logger.warn("compatible emane python bindings not installed")
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
def _type_value(config_type):
|
||||
def _type_value(config_type: str) -> ConfigDataTypes:
|
||||
"""
|
||||
Convert emane configuration type to core configuration value.
|
||||
|
||||
:param str config_type: emane configuration type
|
||||
:return:
|
||||
:param config_type: emane configuration type
|
||||
:return: core config type
|
||||
"""
|
||||
config_type = config_type.upper()
|
||||
if config_type == "DOUBLE":
|
||||
config_type = "FLOAT"
|
||||
elif config_type == "INETADDR":
|
||||
config_type = "STRING"
|
||||
return ConfigDataTypes[config_type].value
|
||||
return ConfigDataTypes[config_type]
|
||||
|
||||
|
||||
def _get_possible(config_type, config_regex):
|
||||
def _get_possible(config_type: str, config_regex: str) -> List[str]:
|
||||
"""
|
||||
Retrieve possible config value options based on emane regexes.
|
||||
|
||||
:param str config_type: emane configuration type
|
||||
:param str config_regex: emane configuration regex
|
||||
:param config_type: emane configuration type
|
||||
:param config_regex: emane configuration regex
|
||||
:return: a string listing comma delimited values, if needed, empty string otherwise
|
||||
:rtype: str
|
||||
"""
|
||||
if config_type == "bool":
|
||||
return "On,Off"
|
||||
return ["On", "Off"]
|
||||
|
||||
if config_type == "string" and config_regex:
|
||||
possible = config_regex[2:-2]
|
||||
possible = possible.replace("|", ",")
|
||||
return possible
|
||||
return possible.split("|")
|
||||
|
||||
return ""
|
||||
return []
|
||||
|
||||
|
||||
def _get_default(config_type_name, config_value):
|
||||
def _get_default(config_type_name: str, config_value: List[str]) -> str:
|
||||
"""
|
||||
Convert default configuration values to one used by core.
|
||||
|
||||
:param str config_type_name: emane configuration type name
|
||||
:param list config_value: emane configuration value list
|
||||
:param config_type_name: emane configuration type name
|
||||
:param config_value: emane configuration value list
|
||||
:return: default core config value
|
||||
:rtype: str
|
||||
"""
|
||||
|
||||
config_default = ""
|
||||
|
||||
if config_type_name == "bool":
|
||||
|
@ -71,14 +70,13 @@ def _get_default(config_type_name, config_value):
|
|||
return config_default
|
||||
|
||||
|
||||
def parse(manifest_path, defaults):
|
||||
def parse(manifest_path: str, defaults: Dict[str, str]) -> List[Configuration]:
|
||||
"""
|
||||
Parses a valid emane manifest file and converts the provided configuration values into ones used by core.
|
||||
|
||||
:param str manifest_path: absolute manifest file path
|
||||
:param dict defaults: used to override default values for configurations
|
||||
:param manifest_path: absolute manifest file path
|
||||
:param defaults: used to override default values for configurations
|
||||
:return: list of core configuration values
|
||||
:rtype: list
|
||||
"""
|
||||
|
||||
# no results when emane bindings are not present
|
||||
|
@ -114,9 +112,15 @@ def parse(manifest_path, defaults):
|
|||
# define description and account for gui quirks
|
||||
config_descriptions = config_name
|
||||
if config_name.endswith("uri"):
|
||||
config_descriptions = "%s file" % config_descriptions
|
||||
config_descriptions = f"{config_descriptions} file"
|
||||
|
||||
config_tuple = (config_name, config_type_value, config_default, possible, config_descriptions)
|
||||
configurations.append(config_tuple)
|
||||
configuration = Configuration(
|
||||
_id=config_name,
|
||||
_type=config_type_value,
|
||||
default=config_default,
|
||||
options=possible,
|
||||
label=config_descriptions,
|
||||
)
|
||||
configurations.append(configuration)
|
||||
|
||||
return configurations
|
||||
|
|
|
@ -1,74 +1,17 @@
|
|||
"""
|
||||
Defines Emane Models used within CORE.
|
||||
"""
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict, List
|
||||
|
||||
from core import logger
|
||||
from core.config import ConfigGroup, Configuration
|
||||
from core.emane import emanemanifest
|
||||
from core.misc import utils
|
||||
from core.mobility import WirelessModel
|
||||
from core.xml import xmlutils
|
||||
|
||||
|
||||
def value_to_params(doc, name, value):
|
||||
"""
|
||||
Helper to convert a parameter to a paramlist. Returns an XML paramlist, or None if the value does not expand to
|
||||
multiple values.
|
||||
|
||||
:param xml.dom.minidom.Document doc: xml document
|
||||
:param name: name of element for params
|
||||
:param str value: value string to convert to tuple
|
||||
:return: xml document with added params or None, when an invalid value has been provided
|
||||
"""
|
||||
try:
|
||||
values = utils.make_tuple_fromstr(value, str)
|
||||
except SyntaxError:
|
||||
logger.exception("error in value string to param list")
|
||||
return None
|
||||
|
||||
if not hasattr(values, "__iter__"):
|
||||
return None
|
||||
|
||||
if len(values) < 2:
|
||||
return None
|
||||
|
||||
return xmlutils.add_param_list_to_parent(doc, parent=None, name=name, values=values)
|
||||
|
||||
|
||||
class EmaneModelMetaClass(type):
|
||||
"""
|
||||
Hack into making class level properties to streamline emane model creation, until the Configurable class is
|
||||
removed or refactored.
|
||||
"""
|
||||
|
||||
@property
|
||||
def config_matrix(cls):
|
||||
"""
|
||||
Convenience method for creating the config matrix, allow for a custom override.
|
||||
|
||||
:param EmaneModel cls: emane class
|
||||
:return: config matrix value
|
||||
:rtype: list
|
||||
"""
|
||||
if cls.config_matrix_override:
|
||||
return cls.config_matrix_override
|
||||
else:
|
||||
return cls.mac_config + cls.phy_config
|
||||
|
||||
@property
|
||||
def config_groups(cls):
|
||||
"""
|
||||
Convenience method for creating the config groups, allow for a custom override.
|
||||
|
||||
:param EmaneModel cls: emane class
|
||||
:return: config groups value
|
||||
:rtype: str
|
||||
"""
|
||||
if cls.config_groups_override:
|
||||
return cls.config_groups_override
|
||||
else:
|
||||
mac_len = len(cls.mac_config)
|
||||
config_len = len(cls.config_matrix)
|
||||
return "MAC Parameters:1-%d|PHY Parameters:%d-%d" % (mac_len, mac_len + 1, config_len)
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
from core.errors import CoreError
|
||||
from core.location.mobility import WirelessModel
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.xml import emanexml
|
||||
|
||||
|
||||
class EmaneModel(WirelessModel):
|
||||
|
@ -77,7 +20,6 @@ class EmaneModel(WirelessModel):
|
|||
handling configuration messages based on the list of
|
||||
configurable parameters. Helper functions also live here.
|
||||
"""
|
||||
__metaclass__ = EmaneModelMetaClass
|
||||
|
||||
# default mac configuration settings
|
||||
mac_library = None
|
||||
|
@ -87,312 +29,152 @@ class EmaneModel(WirelessModel):
|
|||
|
||||
# default phy configuration settings, using the universal model
|
||||
phy_library = None
|
||||
phy_xml = "/usr/share/emane/manifest/emanephy.xml"
|
||||
phy_defaults = {
|
||||
"subid": "1",
|
||||
"propagationmodel": "2ray",
|
||||
"noisemode": "none"
|
||||
}
|
||||
phy_config = emanemanifest.parse(phy_xml, phy_defaults)
|
||||
phy_xml = "emanephy.xml"
|
||||
phy_defaults = {"subid": "1", "propagationmodel": "2ray", "noisemode": "none"}
|
||||
phy_config = []
|
||||
|
||||
# support for external configurations
|
||||
external_config = [
|
||||
Configuration("external", ConfigDataTypes.BOOL, default="0"),
|
||||
Configuration(
|
||||
"platformendpoint", ConfigDataTypes.STRING, default="127.0.0.1:40001"
|
||||
),
|
||||
Configuration(
|
||||
"transportendpoint", ConfigDataTypes.STRING, default="127.0.0.1:50002"
|
||||
),
|
||||
]
|
||||
|
||||
config_ignore = set()
|
||||
config_groups_override = None
|
||||
config_matrix_override = None
|
||||
|
||||
def __init__(self, session, object_id=None):
|
||||
WirelessModel.__init__(self, session, object_id)
|
||||
|
||||
def build_xml_files(self, emane_manager, interface):
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: str) -> None:
|
||||
"""
|
||||
Builds xml files for emane. Includes a nem.xml file that points to both mac.xml and phy.xml definitions.
|
||||
Called after being loaded within the EmaneManager. Provides configured emane_prefix for
|
||||
parsing xml files.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param emane_prefix: configured emane prefix path
|
||||
:return: nothing
|
||||
"""
|
||||
manifest_path = "share/emane/manifest"
|
||||
# load mac configuration
|
||||
mac_xml_path = os.path.join(emane_prefix, manifest_path, cls.mac_xml)
|
||||
cls.mac_config = emanemanifest.parse(mac_xml_path, cls.mac_defaults)
|
||||
|
||||
# load phy configuration
|
||||
phy_xml_path = os.path.join(emane_prefix, manifest_path, cls.phy_xml)
|
||||
cls.phy_config = emanemanifest.parse(phy_xml_path, cls.phy_defaults)
|
||||
|
||||
@classmethod
|
||||
def configurations(cls) -> List[Configuration]:
|
||||
"""
|
||||
Returns the combination all all configurations (mac, phy, and external).
|
||||
|
||||
:return: all configurations
|
||||
"""
|
||||
return cls.mac_config + cls.phy_config + cls.external_config
|
||||
|
||||
@classmethod
|
||||
def config_groups(cls) -> List[ConfigGroup]:
|
||||
"""
|
||||
Returns the defined configuration groups.
|
||||
|
||||
:return: list of configuration groups.
|
||||
"""
|
||||
mac_len = len(cls.mac_config)
|
||||
phy_len = len(cls.phy_config) + mac_len
|
||||
config_len = len(cls.configurations())
|
||||
return [
|
||||
ConfigGroup("MAC Parameters", 1, mac_len),
|
||||
ConfigGroup("PHY Parameters", mac_len + 1, phy_len),
|
||||
ConfigGroup("External Parameters", phy_len + 1, config_len),
|
||||
]
|
||||
|
||||
def build_xml_files(
|
||||
self, config: Dict[str, str], interface: CoreInterface = None
|
||||
) -> None:
|
||||
"""
|
||||
Builds xml files for this emane model. Creates a nem.xml file that points to
|
||||
both mac.xml and phy.xml definitions.
|
||||
|
||||
:param config: emane model configuration for the node and interface
|
||||
:param interface: interface for the emane node
|
||||
:return: nothing
|
||||
"""
|
||||
# retrieve configuration values
|
||||
values = emane_manager.getifcconfig(self.object_id, self.name, self.getdefaultvalues(), interface)
|
||||
if values is None:
|
||||
return
|
||||
nem_name = emanexml.nem_file_name(self, interface)
|
||||
mac_name = emanexml.mac_file_name(self, interface)
|
||||
phy_name = emanexml.phy_file_name(self, interface)
|
||||
|
||||
# create document and write to disk
|
||||
nem_name = self.nem_name(interface)
|
||||
nem_document = self.create_nem_doc(emane_manager, interface)
|
||||
emane_manager.xmlwrite(nem_document, nem_name)
|
||||
# remote server for file
|
||||
server = None
|
||||
if interface is not None:
|
||||
server = interface.node.server
|
||||
|
||||
# create mac document and write to disk
|
||||
mac_name = self.mac_name(interface)
|
||||
mac_document = self.create_mac_doc(emane_manager, values)
|
||||
emane_manager.xmlwrite(mac_document, mac_name)
|
||||
# check if this is external
|
||||
transport_type = "virtual"
|
||||
if interface and interface.transport_type == "raw":
|
||||
transport_type = "raw"
|
||||
transport_name = emanexml.transport_file_name(self.id, transport_type)
|
||||
|
||||
# create phy document and write to disk
|
||||
phy_name = self.phy_name(interface)
|
||||
phy_document = self.create_phy_doc(emane_manager, values)
|
||||
emane_manager.xmlwrite(phy_document, phy_name)
|
||||
# create nem xml file
|
||||
nem_file = os.path.join(self.session.session_dir, nem_name)
|
||||
emanexml.create_nem_xml(
|
||||
self, config, nem_file, transport_name, mac_name, phy_name, server
|
||||
)
|
||||
|
||||
def create_nem_doc(self, emane_manager, interface):
|
||||
"""
|
||||
Create the nem xml document.
|
||||
# create mac xml file
|
||||
mac_file = os.path.join(self.session.session_dir, mac_name)
|
||||
emanexml.create_mac_xml(self, config, mac_file, server)
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param interface: interface for the emane node
|
||||
:return: nem document
|
||||
:rtype: xml.dom.minidom.Document
|
||||
"""
|
||||
mac_name = self.mac_name(interface)
|
||||
phy_name = self.phy_name(interface)
|
||||
# create phy xml file
|
||||
phy_file = os.path.join(self.session.session_dir, phy_name)
|
||||
emanexml.create_phy_xml(self, config, phy_file, server)
|
||||
|
||||
nem_document = emane_manager.xmldoc("nem")
|
||||
nem_element = nem_document.getElementsByTagName("nem").pop()
|
||||
nem_element.setAttribute("name", "%s NEM" % self.name)
|
||||
emane_manager.appendtransporttonem(nem_document, nem_element, self.object_id, interface)
|
||||
|
||||
mac_element = nem_document.createElement("mac")
|
||||
mac_element.setAttribute("definition", mac_name)
|
||||
nem_element.appendChild(mac_element)
|
||||
|
||||
phy_element = nem_document.createElement("phy")
|
||||
phy_element.setAttribute("definition", phy_name)
|
||||
nem_element.appendChild(phy_element)
|
||||
|
||||
return nem_document
|
||||
|
||||
def create_mac_doc(self, emane_manager, values):
|
||||
"""
|
||||
Create the mac xml document.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param tuple values: all current configuration values, mac + phy
|
||||
:return: nem document
|
||||
:rtype: xml.dom.minidom.Document
|
||||
"""
|
||||
names = list(self.getnames())
|
||||
mac_names = names[:len(self.mac_config)]
|
||||
|
||||
mac_document = emane_manager.xmldoc("mac")
|
||||
mac_element = mac_document.getElementsByTagName("mac").pop()
|
||||
mac_element.setAttribute("name", "%s MAC" % self.name)
|
||||
|
||||
if not self.mac_library:
|
||||
raise ValueError("must define emane model library")
|
||||
mac_element.setAttribute("library", self.mac_library)
|
||||
|
||||
for name in mac_names:
|
||||
# ignore custom configurations
|
||||
if name in self.config_ignore:
|
||||
continue
|
||||
|
||||
# check if value is a multi param
|
||||
value = self.valueof(name, values)
|
||||
param = value_to_params(mac_document, name, value)
|
||||
if not param:
|
||||
param = emane_manager.xmlparam(mac_document, name, value)
|
||||
|
||||
mac_element.appendChild(param)
|
||||
|
||||
return mac_document
|
||||
|
||||
def create_phy_doc(self, emane_manager, values):
|
||||
"""
|
||||
Create the phy xml document.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:param tuple values: all current configuration values, mac + phy
|
||||
:return: nem document
|
||||
:rtype: xml.dom.minidom.Document
|
||||
"""
|
||||
names = list(self.getnames())
|
||||
phy_names = names[len(self.mac_config):]
|
||||
|
||||
phy_document = emane_manager.xmldoc("phy")
|
||||
phy_element = phy_document.getElementsByTagName("phy").pop()
|
||||
phy_element.setAttribute("name", "%s PHY" % self.name)
|
||||
|
||||
if self.phy_library:
|
||||
phy_element.setAttribute("library", self.phy_library)
|
||||
|
||||
# append all phy options
|
||||
for name in phy_names:
|
||||
# ignore custom configurations
|
||||
if name in self.config_ignore:
|
||||
continue
|
||||
|
||||
# check if value is a multi param
|
||||
value = self.valueof(name, values)
|
||||
param = value_to_params(phy_document, name, value)
|
||||
if not param:
|
||||
param = emane_manager.xmlparam(phy_document, name, value)
|
||||
|
||||
phy_element.appendChild(param)
|
||||
|
||||
return phy_document
|
||||
|
||||
@classmethod
|
||||
def configure_emane(cls, session, config_data):
|
||||
"""
|
||||
Handle configuration messages for configuring an emane model.
|
||||
|
||||
:param core.session.Session session: session to configure emane
|
||||
:param core.conf.ConfigData config_data: configuration data for carrying out a configuration
|
||||
"""
|
||||
return cls.configure(session.emane, config_data)
|
||||
|
||||
def post_startup(self, emane_manager):
|
||||
def post_startup(self) -> None:
|
||||
"""
|
||||
Logic to execute after the emane manager is finished with startup.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager for the session
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("emane model(%s) has no post setup tasks", self.name)
|
||||
logging.debug("emane model(%s) has no post setup tasks", self.name)
|
||||
|
||||
def build_nem_xml(self, doc, emane_node, interface):
|
||||
"""
|
||||
Build the NEM definition that goes into the platform.xml file.
|
||||
|
||||
This returns an XML element that will be added to the <platform/> element.
|
||||
|
||||
This default method supports per-interface config (e.g. <nem definition="n2_0_63emane_rfpipe.xml" id="1">
|
||||
or per-EmaneNode config (e.g. <nem definition="n1emane_rfpipe.xml" id="1">.
|
||||
|
||||
This can be overriden by a model for NEM flexibility; n is the EmaneNode.
|
||||
|
||||
<nem name="NODE-001" definition="rfpipenem.xml">
|
||||
|
||||
:param xml.dom.minidom.Document doc: xml document
|
||||
:param core.emane.nodes.EmaneNode emane_node: emane node to get information from
|
||||
:param interface: interface for the emane node
|
||||
:return: created platform xml
|
||||
"""
|
||||
# if this netif contains a non-standard (per-interface) config,
|
||||
# then we need to use a more specific xml file here
|
||||
nem_name = self.nem_name(interface)
|
||||
nem = doc.createElement("nem")
|
||||
nem.setAttribute("name", interface.localname)
|
||||
nem.setAttribute("definition", nem_name)
|
||||
return nem
|
||||
|
||||
def build_transport_xml(self, doc, emane_node, interface):
|
||||
"""
|
||||
Build the transport definition that goes into the platform.xml file.
|
||||
This returns an XML element that will be added to the nem definition.
|
||||
This default method supports raw and virtual transport types, but may be
|
||||
overridden by a model to support the e.g. pluggable virtual transport.
|
||||
|
||||
<transport definition="transvirtual.xml" group="1">
|
||||
<param name="device" value="n1.0.158" />
|
||||
</transport>
|
||||
|
||||
:param xml.dom.minidom.Document doc: xml document
|
||||
:param core.emane.nodes.EmaneNode emane_node: emane node to get information from
|
||||
:param interface: interface for the emane node
|
||||
:return: created transport xml
|
||||
"""
|
||||
transport_type = interface.transport_type
|
||||
if not transport_type:
|
||||
logger.info("warning: %s interface type unsupported!", interface.name)
|
||||
transport_type = "raw"
|
||||
transport_name = emane_node.transportxmlname(transport_type)
|
||||
|
||||
transport = doc.createElement("transport")
|
||||
transport.setAttribute("definition", transport_name)
|
||||
|
||||
param = doc.createElement("param")
|
||||
param.setAttribute("name", "device")
|
||||
param.setAttribute("value", interface.name)
|
||||
|
||||
transport.appendChild(param)
|
||||
return transport
|
||||
|
||||
def _basename(self, interface=None):
|
||||
"""
|
||||
Create name that is leveraged for configuration file creation.
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: basename used for file creation
|
||||
:rtype: str
|
||||
"""
|
||||
name = "n%s" % self.object_id
|
||||
emane_manager = self.session.emane
|
||||
|
||||
if interface:
|
||||
node_id = interface.node.objid
|
||||
if emane_manager.getifcconfig(node_id, self.name, None, interface) is not None:
|
||||
name = interface.localname.replace(".", "_")
|
||||
|
||||
return "%s%s" % (name, self.name)
|
||||
|
||||
def nem_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the NEM XML file, e.g. "n3rfpipenem.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: nem xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
basename = self._basename(interface)
|
||||
append = ""
|
||||
if interface and interface.transport_type == "raw":
|
||||
append = "_raw"
|
||||
return "%snem%s.xml" % (basename, append)
|
||||
|
||||
def shim_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the SHIM XML file, e.g. "commeffectshim.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: shim xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sshim.xml" % self._basename(interface)
|
||||
|
||||
def mac_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the MAC XML file, e.g. "n3rfpipemac.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: mac xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%smac.xml" % self._basename(interface)
|
||||
|
||||
def phy_name(self, interface=None):
|
||||
"""
|
||||
Return the string name for the PHY XML file, e.g. "n3rfpipephy.xml"
|
||||
|
||||
:param interface: interface for this model
|
||||
:return: phy xml filename
|
||||
:rtype: str
|
||||
"""
|
||||
return "%sphy.xml" % self._basename(interface)
|
||||
|
||||
def update(self, moved, moved_netifs):
|
||||
def update(self, moved: bool, moved_netifs: List[CoreInterface]) -> None:
|
||||
"""
|
||||
Invoked from MobilityModel when nodes are moved; this causes
|
||||
emane location events to be generated for the nodes in the moved
|
||||
list, making EmaneModels compatible with Ns2ScriptedMobility.
|
||||
|
||||
:param bool moved: were nodes moved
|
||||
:param list moved_netifs: interfaces that were moved
|
||||
:return:
|
||||
:param moved: were nodes moved
|
||||
:param moved_netifs: interfaces that were moved
|
||||
:return: nothing
|
||||
"""
|
||||
try:
|
||||
wlan = self.session.get_object(self.object_id)
|
||||
wlan = self.session.get_node(self.id)
|
||||
wlan.setnempositions(moved_netifs)
|
||||
except KeyError:
|
||||
logger.exception("error during update")
|
||||
except CoreError:
|
||||
logging.exception("error during update")
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
def linkconfig(
|
||||
self,
|
||||
netif: CoreInterface,
|
||||
bw: float = None,
|
||||
delay: float = None,
|
||||
loss: float = None,
|
||||
duplicate: float = None,
|
||||
jitter: float = None,
|
||||
netif2: CoreInterface = None,
|
||||
) -> None:
|
||||
"""
|
||||
Invoked when a Link Message is received. Default is unimplemented.
|
||||
|
||||
:param core.netns.vif.Veth netif: interface one
|
||||
:param netif: interface one
|
||||
:param bw: bandwidth to set to
|
||||
:param delay: packet delay to set to
|
||||
:param loss: packet loss to set to
|
||||
:param duplicate: duplicate percentage to set to
|
||||
:param jitter: jitter to set to
|
||||
:param core.netns.vif.Veth netif2: interface two
|
||||
:param netif2: interface two
|
||||
:return: nothing
|
||||
"""
|
||||
logger.warn("emane model(%s) does not support link configuration", self.name)
|
||||
logging.warning(
|
||||
"emane model(%s) does not support link configuration", self.name
|
||||
)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
"""
|
||||
ieee80211abg.py: EMANE IEEE 802.11abg model for CORE
|
||||
"""
|
||||
import os
|
||||
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
|
@ -12,8 +12,11 @@ class EmaneIeee80211abgModel(emanemodel.EmaneModel):
|
|||
|
||||
# mac configuration
|
||||
mac_library = "ieee80211abgmaclayer"
|
||||
mac_xml = "/usr/share/emane/manifest/ieee80211abgmaclayer.xml"
|
||||
mac_defaults = {
|
||||
"pcrcurveuri": "/usr/share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml",
|
||||
}
|
||||
mac_config = emanemanifest.parse(mac_xml, mac_defaults)
|
||||
mac_xml = "ieee80211abgmaclayer.xml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: str) -> None:
|
||||
cls.mac_defaults["pcrcurveuri"] = os.path.join(
|
||||
emane_prefix, "share/emane/xml/models/mac/ieee80211abg/ieee80211pcr.xml"
|
||||
)
|
||||
super().load(emane_prefix)
|
||||
|
|
335
daemon/core/emane/linkmonitor.py
Normal file
335
daemon/core/emane/linkmonitor.py
Normal file
|
@ -0,0 +1,335 @@
|
|||
import logging
|
||||
import sched
|
||||
import threading
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Dict, List, Tuple
|
||||
|
||||
import netaddr
|
||||
from lxml import etree
|
||||
|
||||
from core.emulator.data import LinkData
|
||||
from core.emulator.enumerations import LinkTypes, MessageFlags
|
||||
from core.nodes.network import CtrlNet
|
||||
|
||||
try:
|
||||
from emane import shell
|
||||
except ImportError:
|
||||
try:
|
||||
from emanesh import shell
|
||||
except ImportError:
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emane.emanemanager import EmaneManager
|
||||
|
||||
DEFAULT_PORT = 47_000
|
||||
MAC_COMPONENT_INDEX = 1
|
||||
EMANE_RFPIPE = "rfpipemaclayer"
|
||||
EMANE_80211 = "ieee80211abgmaclayer"
|
||||
EMANE_TDMA = "tdmaeventschedulerradiomodel"
|
||||
SINR_TABLE = "NeighborStatusTable"
|
||||
NEM_SELF = 65535
|
||||
|
||||
|
||||
class LossTable:
|
||||
def __init__(self, losses: Dict[float, float]) -> None:
|
||||
self.losses = losses
|
||||
self.sinrs = sorted(self.losses.keys())
|
||||
self.loss_lookup = {}
|
||||
for index, value in enumerate(self.sinrs):
|
||||
self.loss_lookup[index] = self.losses[value]
|
||||
self.mac_id = None
|
||||
|
||||
def get_loss(self, sinr: float) -> float:
|
||||
index = self._get_index(sinr)
|
||||
loss = 100.0 - self.loss_lookup[index]
|
||||
return loss
|
||||
|
||||
def _get_index(self, current_sinr: float) -> int:
|
||||
for index, sinr in enumerate(self.sinrs):
|
||||
if current_sinr <= sinr:
|
||||
return index
|
||||
return len(self.sinrs) - 1
|
||||
|
||||
|
||||
class EmaneLink:
|
||||
def __init__(self, from_nem: int, to_nem: int, sinr: float) -> None:
|
||||
self.from_nem = from_nem
|
||||
self.to_nem = to_nem
|
||||
self.sinr = sinr
|
||||
self.last_seen = None
|
||||
self.updated = False
|
||||
self.touch()
|
||||
|
||||
def update(self, sinr: float) -> None:
|
||||
self.updated = self.sinr != sinr
|
||||
self.sinr = sinr
|
||||
self.touch()
|
||||
|
||||
def touch(self) -> None:
|
||||
self.last_seen = time.monotonic()
|
||||
|
||||
def is_dead(self, timeout: int) -> bool:
|
||||
return (time.monotonic() - self.last_seen) >= timeout
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"EmaneLink({self.from_nem}, {self.to_nem}, {self.sinr})"
|
||||
|
||||
|
||||
class EmaneClient:
|
||||
def __init__(self, address: str) -> None:
|
||||
self.address = address
|
||||
self.client = shell.ControlPortClient(self.address, DEFAULT_PORT)
|
||||
self.nems = {}
|
||||
self.setup()
|
||||
|
||||
def setup(self) -> None:
|
||||
manifest = self.client.getManifest()
|
||||
for nem_id, components in manifest.items():
|
||||
# get mac config
|
||||
mac_id, _, emane_model = components[MAC_COMPONENT_INDEX]
|
||||
mac_config = self.client.getConfiguration(mac_id)
|
||||
logging.debug(
|
||||
"address(%s) nem(%s) emane(%s)", self.address, nem_id, emane_model
|
||||
)
|
||||
|
||||
# create loss table based on current configuration
|
||||
if emane_model == EMANE_80211:
|
||||
loss_table = self.handle_80211(mac_config)
|
||||
elif emane_model == EMANE_RFPIPE:
|
||||
loss_table = self.handle_rfpipe(mac_config)
|
||||
else:
|
||||
logging.warning("unknown emane link model: %s", emane_model)
|
||||
continue
|
||||
logging.info("monitoring links nem(%s) model(%s)", nem_id, emane_model)
|
||||
loss_table.mac_id = mac_id
|
||||
self.nems[nem_id] = loss_table
|
||||
|
||||
def check_links(
|
||||
self, links: Dict[Tuple[int, int], EmaneLink], loss_threshold: int
|
||||
) -> None:
|
||||
for from_nem, loss_table in self.nems.items():
|
||||
tables = self.client.getStatisticTable(loss_table.mac_id, (SINR_TABLE,))
|
||||
table = tables[SINR_TABLE][1:][0]
|
||||
for row in table:
|
||||
row = row
|
||||
to_nem = row[0][0]
|
||||
sinr = row[5][0]
|
||||
age = row[-1][0]
|
||||
|
||||
# exclude invalid links
|
||||
is_self = to_nem == NEM_SELF
|
||||
has_valid_age = 0 <= age <= 1
|
||||
if is_self or not has_valid_age:
|
||||
continue
|
||||
|
||||
# check if valid link loss
|
||||
link_key = (from_nem, to_nem)
|
||||
loss = loss_table.get_loss(sinr)
|
||||
if loss < loss_threshold:
|
||||
link = links.get(link_key)
|
||||
if link:
|
||||
link.update(sinr)
|
||||
else:
|
||||
link = EmaneLink(from_nem, to_nem, sinr)
|
||||
links[link_key] = link
|
||||
|
||||
def handle_tdma(self, config: Dict[str, Tuple]):
|
||||
pcr = config["pcrcurveuri"][0][0]
|
||||
logging.debug("tdma pcr: %s", pcr)
|
||||
|
||||
def handle_80211(self, config: Dict[str, Tuple]) -> LossTable:
|
||||
unicastrate = config["unicastrate"][0][0]
|
||||
pcr = config["pcrcurveuri"][0][0]
|
||||
logging.debug("80211 pcr: %s", pcr)
|
||||
tree = etree.parse(pcr)
|
||||
root = tree.getroot()
|
||||
table = root.find("table")
|
||||
losses = {}
|
||||
for rate in table.iter("datarate"):
|
||||
index = int(rate.get("index"))
|
||||
if index == unicastrate:
|
||||
for row in rate.iter("row"):
|
||||
sinr = float(row.get("sinr"))
|
||||
por = float(row.get("por"))
|
||||
losses[sinr] = por
|
||||
return LossTable(losses)
|
||||
|
||||
def handle_rfpipe(self, config: Dict[str, Tuple]) -> LossTable:
|
||||
pcr = config["pcrcurveuri"][0][0]
|
||||
logging.debug("rfpipe pcr: %s", pcr)
|
||||
tree = etree.parse(pcr)
|
||||
root = tree.getroot()
|
||||
table = root.find("table")
|
||||
losses = {}
|
||||
for row in table.iter("row"):
|
||||
sinr = float(row.get("sinr"))
|
||||
por = float(row.get("por"))
|
||||
losses[sinr] = por
|
||||
return LossTable(losses)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.client.stop()
|
||||
|
||||
|
||||
class EmaneLinkMonitor:
|
||||
def __init__(self, emane_manager: "EmaneManager") -> None:
|
||||
self.emane_manager = emane_manager
|
||||
self.clients = []
|
||||
self.links = {}
|
||||
self.complete_links = set()
|
||||
self.loss_threshold = None
|
||||
self.link_interval = None
|
||||
self.link_timeout = None
|
||||
self.scheduler = None
|
||||
self.running = False
|
||||
|
||||
def start(self) -> None:
|
||||
self.loss_threshold = int(self.emane_manager.get_config("loss_threshold"))
|
||||
self.link_interval = int(self.emane_manager.get_config("link_interval"))
|
||||
self.link_timeout = int(self.emane_manager.get_config("link_timeout"))
|
||||
self.initialize()
|
||||
if not self.clients:
|
||||
logging.info("no valid emane models to monitor links")
|
||||
return
|
||||
self.scheduler = sched.scheduler()
|
||||
self.scheduler.enter(0, 0, self.check_links)
|
||||
self.running = True
|
||||
thread = threading.Thread(target=self.scheduler.run, daemon=True)
|
||||
thread.start()
|
||||
|
||||
def initialize(self) -> None:
|
||||
addresses = self.get_addresses()
|
||||
for address in addresses:
|
||||
client = EmaneClient(address)
|
||||
if client.nems:
|
||||
self.clients.append(client)
|
||||
|
||||
def get_addresses(self) -> List[str]:
|
||||
addresses = []
|
||||
nodes = self.emane_manager.getnodes()
|
||||
for node in nodes:
|
||||
for netif in node.netifs():
|
||||
if isinstance(netif.net, CtrlNet):
|
||||
ip4 = None
|
||||
for x in netif.addrlist:
|
||||
address, prefix = x.split("/")
|
||||
if netaddr.valid_ipv4(address):
|
||||
ip4 = address
|
||||
if ip4:
|
||||
addresses.append(ip4)
|
||||
break
|
||||
return addresses
|
||||
|
||||
def check_links(self) -> None:
|
||||
# check for new links
|
||||
previous_links = set(self.links.keys())
|
||||
for client in self.clients:
|
||||
try:
|
||||
client.check_links(self.links, self.loss_threshold)
|
||||
except shell.ControlPortException:
|
||||
if self.running:
|
||||
logging.exception("link monitor error")
|
||||
|
||||
# find new links
|
||||
current_links = set(self.links.keys())
|
||||
new_links = current_links - previous_links
|
||||
|
||||
# find updated and dead links
|
||||
dead_links = []
|
||||
for link_id, link in self.links.items():
|
||||
complete_id = self.get_complete_id(link_id)
|
||||
if link.is_dead(self.link_timeout):
|
||||
dead_links.append(link_id)
|
||||
elif link.updated and complete_id in self.complete_links:
|
||||
link.updated = False
|
||||
self.send_link(MessageFlags.NONE, complete_id)
|
||||
|
||||
# announce dead links
|
||||
for link_id in dead_links:
|
||||
complete_id = self.get_complete_id(link_id)
|
||||
if complete_id in self.complete_links:
|
||||
self.complete_links.remove(complete_id)
|
||||
self.send_link(MessageFlags.DELETE, complete_id)
|
||||
del self.links[link_id]
|
||||
|
||||
# announce new links
|
||||
for link_id in new_links:
|
||||
complete_id = self.get_complete_id(link_id)
|
||||
if complete_id in self.complete_links:
|
||||
continue
|
||||
if self.is_complete_link(link_id):
|
||||
self.complete_links.add(complete_id)
|
||||
self.send_link(MessageFlags.ADD, complete_id)
|
||||
|
||||
if self.running:
|
||||
self.scheduler.enter(self.link_interval, 0, self.check_links)
|
||||
|
||||
def get_complete_id(self, link_id: Tuple[int, int]) -> Tuple[int, int]:
|
||||
value_one, value_two = link_id
|
||||
if value_one < value_two:
|
||||
return value_one, value_two
|
||||
else:
|
||||
return value_two, value_one
|
||||
|
||||
def is_complete_link(self, link_id: Tuple[int, int]) -> bool:
|
||||
reverse_id = link_id[1], link_id[0]
|
||||
return link_id in self.links and reverse_id in self.links
|
||||
|
||||
def get_link_label(self, link_id: Tuple[int, int]) -> str:
|
||||
source_id = tuple(sorted(link_id))
|
||||
source_link = self.links[source_id]
|
||||
dest_id = link_id[::-1]
|
||||
dest_link = self.links[dest_id]
|
||||
return f"{source_link.sinr:.1f} / {dest_link.sinr:.1f}"
|
||||
|
||||
def send_link(self, message_type: MessageFlags, link_id: Tuple[int, int]) -> None:
|
||||
nem_one, nem_two = link_id
|
||||
emane_one, netif = self.emane_manager.nemlookup(nem_one)
|
||||
if not emane_one or not netif:
|
||||
logging.error("invalid nem: %s", nem_one)
|
||||
return
|
||||
node_one = netif.node
|
||||
emane_two, netif = self.emane_manager.nemlookup(nem_two)
|
||||
if not emane_two or not netif:
|
||||
logging.error("invalid nem: %s", nem_two)
|
||||
return
|
||||
node_two = netif.node
|
||||
logging.debug(
|
||||
"%s emane link from %s(%s) to %s(%s)",
|
||||
message_type.name,
|
||||
node_one.name,
|
||||
nem_one,
|
||||
node_two.name,
|
||||
nem_two,
|
||||
)
|
||||
label = self.get_link_label(link_id)
|
||||
self.send_message(message_type, label, node_one.id, node_two.id, emane_one.id)
|
||||
|
||||
def send_message(
|
||||
self,
|
||||
message_type: MessageFlags,
|
||||
label: str,
|
||||
node_one: int,
|
||||
node_two: int,
|
||||
emane_id: int,
|
||||
) -> None:
|
||||
color = self.emane_manager.session.get_link_color(emane_id)
|
||||
link_data = LinkData(
|
||||
message_type=message_type,
|
||||
label=label,
|
||||
node1_id=node_one,
|
||||
node2_id=node_two,
|
||||
network_id=emane_id,
|
||||
link_type=LinkTypes.WIRELESS,
|
||||
color=color,
|
||||
)
|
||||
self.emane_manager.session.broadcast_link(link_data)
|
||||
|
||||
def stop(self) -> None:
|
||||
self.running = False
|
||||
for client in self.clients:
|
||||
client.stop()
|
||||
self.clients.clear()
|
||||
self.links.clear()
|
||||
self.complete_links.clear()
|
|
@ -1,16 +1,21 @@
|
|||
"""
|
||||
nodes.py: definition of an EmaneNode class for implementing configuration
|
||||
control of an EMANE emulation. An EmaneNode has several attached NEMs that
|
||||
Provides an EMANE network node class, which has several attached NEMs that
|
||||
share the same MAC+PHY model.
|
||||
"""
|
||||
|
||||
import os
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Type
|
||||
|
||||
from core import logger
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.enumerations import RegisterTlvs
|
||||
from core.emulator.distributed import DistributedServer
|
||||
from core.emulator.enumerations import LinkTypes, NodeTypes, RegisterTlvs
|
||||
from core.nodes.base import CoreNetworkBase
|
||||
from core.nodes.interface import CoreInterface
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emulator.session import Session
|
||||
from core.location.mobility import WirelessModel
|
||||
|
||||
WirelessModelType = Type[WirelessModel]
|
||||
|
||||
try:
|
||||
from emane.events import LocationEvent
|
||||
|
@ -18,75 +23,103 @@ except ImportError:
|
|||
try:
|
||||
from emanesh.events import LocationEvent
|
||||
except ImportError:
|
||||
logger.warn("compatible emane python bindings not installed")
|
||||
logging.debug("compatible emane python bindings not installed")
|
||||
|
||||
|
||||
class EmaneNet(PyCoreNet):
|
||||
"""
|
||||
EMANE network base class.
|
||||
"""
|
||||
apitype = NodeTypes.EMANE.value
|
||||
linktype = LinkTypes.WIRELESS.value
|
||||
# icon used
|
||||
type = "wlan"
|
||||
|
||||
|
||||
class EmaneNode(EmaneNet):
|
||||
class EmaneNet(CoreNetworkBase):
|
||||
"""
|
||||
EMANE node contains NEM configuration and causes connected nodes
|
||||
to have TAP interfaces (instead of VEth). These are managed by the
|
||||
Emane controller object that exists in a session.
|
||||
"""
|
||||
|
||||
def __init__(self, session, objid=None, name=None, start=True):
|
||||
PyCoreNet.__init__(self, session, objid, name, start)
|
||||
apitype = NodeTypes.EMANE
|
||||
linktype = LinkTypes.WIRED
|
||||
type = "wlan"
|
||||
is_emane = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
session: "Session",
|
||||
_id: int = None,
|
||||
name: str = None,
|
||||
start: bool = True,
|
||||
server: DistributedServer = None,
|
||||
) -> None:
|
||||
super().__init__(session, _id, name, start, server)
|
||||
self.conf = ""
|
||||
self.up = False
|
||||
self.nemidmap = {}
|
||||
self.model = None
|
||||
self.mobility = None
|
||||
|
||||
def linkconfig(self, netif, bw=None, delay=None, loss=None, duplicate=None, jitter=None, netif2=None):
|
||||
def linkconfig(
|
||||
self,
|
||||
netif: CoreInterface,
|
||||
bw: float = None,
|
||||
delay: float = None,
|
||||
loss: float = None,
|
||||
duplicate: float = None,
|
||||
jitter: float = None,
|
||||
netif2: CoreInterface = None,
|
||||
) -> None:
|
||||
"""
|
||||
The CommEffect model supports link configuration.
|
||||
"""
|
||||
if not self.model:
|
||||
return
|
||||
return self.model.linkconfig(netif=netif, bw=bw, delay=delay, loss=loss,
|
||||
duplicate=duplicate, jitter=jitter, netif2=netif2)
|
||||
self.model.linkconfig(
|
||||
netif=netif,
|
||||
bw=bw,
|
||||
delay=delay,
|
||||
loss=loss,
|
||||
duplicate=duplicate,
|
||||
jitter=jitter,
|
||||
netif2=netif2,
|
||||
)
|
||||
|
||||
def config(self, conf):
|
||||
def config(self, conf: str) -> None:
|
||||
self.conf = conf
|
||||
|
||||
def shutdown(self):
|
||||
def shutdown(self) -> None:
|
||||
pass
|
||||
|
||||
def link(self, netif1, netif2):
|
||||
def link(self, netif1: CoreInterface, netif2: CoreInterface) -> None:
|
||||
pass
|
||||
|
||||
def unlink(self, netif1, netif2):
|
||||
def unlink(self, netif1: CoreInterface, netif2: CoreInterface) -> None:
|
||||
pass
|
||||
|
||||
def setmodel(self, model, config):
|
||||
def updatemodel(self, config: Dict[str, str]) -> None:
|
||||
if not self.model:
|
||||
raise ValueError("no model set to update for node(%s)", self.id)
|
||||
logging.info(
|
||||
"node(%s) updating model(%s): %s", self.id, self.model.name, config
|
||||
)
|
||||
self.model.set_configs(config, node_id=self.id)
|
||||
|
||||
def setmodel(self, model: "WirelessModelType", config: Dict[str, str]) -> None:
|
||||
"""
|
||||
set the EmaneModel associated with this node
|
||||
"""
|
||||
logger.info("adding model: %s", model.name)
|
||||
if model.config_type == RegisterTlvs.WIRELESS.value:
|
||||
logging.info("adding model: %s", model.name)
|
||||
if model.config_type == RegisterTlvs.WIRELESS:
|
||||
# EmaneModel really uses values from ConfigurableManager
|
||||
# when buildnemxml() is called, not during init()
|
||||
self.model = model(session=self.session, object_id=self.objid)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY.value:
|
||||
self.mobility = model(session=self.session, object_id=self.objid, values=config)
|
||||
self.model = model(session=self.session, _id=self.id)
|
||||
self.model.update_config(config)
|
||||
elif model.config_type == RegisterTlvs.MOBILITY:
|
||||
self.mobility = model(session=self.session, _id=self.id)
|
||||
self.mobility.update_config(config)
|
||||
|
||||
def setnemid(self, netif, nemid):
|
||||
def setnemid(self, netif: CoreInterface, nemid: int) -> None:
|
||||
"""
|
||||
Record an interface to numerical ID mapping. The Emane controller
|
||||
object manages and assigns these IDs for all NEMs.
|
||||
"""
|
||||
self.nemidmap[netif] = nemid
|
||||
|
||||
def getnemid(self, netif):
|
||||
def getnemid(self, netif: CoreInterface) -> Optional[int]:
|
||||
"""
|
||||
Given an interface, return its numerical ID.
|
||||
"""
|
||||
|
@ -95,7 +128,7 @@ class EmaneNode(EmaneNet):
|
|||
else:
|
||||
return self.nemidmap[netif]
|
||||
|
||||
def getnemnetif(self, nemid):
|
||||
def getnemnetif(self, nemid: int) -> Optional[CoreInterface]:
|
||||
"""
|
||||
Given a numerical NEM ID, return its interface. This returns the
|
||||
first interface that matches the given NEM ID.
|
||||
|
@ -105,125 +138,43 @@ class EmaneNode(EmaneNet):
|
|||
return netif
|
||||
return None
|
||||
|
||||
def netifs(self, sort=True):
|
||||
def netifs(self, sort: bool = True) -> List[CoreInterface]:
|
||||
"""
|
||||
Retrieve list of linked interfaces sorted by node number.
|
||||
"""
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.objid)
|
||||
return sorted(self._netif.values(), key=lambda ifc: ifc.node.id)
|
||||
|
||||
def buildplatformxmlentry(self, doc):
|
||||
"""
|
||||
Return a dictionary of XML elements describing the NEMs
|
||||
connected to this EmaneNode for inclusion in the platform.xml file.
|
||||
"""
|
||||
ret = {}
|
||||
if self.model is None:
|
||||
logger.info("warning: EmaneNode %s has no associated model", self.name)
|
||||
return ret
|
||||
|
||||
for netif in self.netifs():
|
||||
nementry = self.model.build_nem_xml(doc, self, netif)
|
||||
trans = self.model.build_transport_xml(doc, self, netif)
|
||||
nementry.appendChild(trans)
|
||||
ret[netif] = nementry
|
||||
|
||||
return ret
|
||||
|
||||
def build_xml_files(self, emane_manager):
|
||||
"""
|
||||
Let the configured model build the necessary nem, mac, and phy XMLs.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: core emane manager
|
||||
:return: nothing
|
||||
"""
|
||||
if self.model is None:
|
||||
return
|
||||
|
||||
# build XML for overall network (EmaneNode) configs
|
||||
self.model.build_xml_files(emane_manager, interface=None)
|
||||
|
||||
# build XML for specific interface (NEM) configs
|
||||
need_virtual = False
|
||||
need_raw = False
|
||||
vtype = "virtual"
|
||||
rtype = "raw"
|
||||
|
||||
for netif in self.netifs():
|
||||
self.model.build_xml_files(emane_manager, netif)
|
||||
if "virtual" in netif.transport_type:
|
||||
need_virtual = True
|
||||
vtype = netif.transport_type
|
||||
else:
|
||||
need_raw = True
|
||||
rtype = netif.transport_type
|
||||
|
||||
# build transport XML files depending on type of interfaces involved
|
||||
if need_virtual:
|
||||
self.buildtransportxml(emane_manager, vtype)
|
||||
|
||||
if need_raw:
|
||||
self.buildtransportxml(emane_manager, rtype)
|
||||
|
||||
def buildtransportxml(self, emane, transport_type):
|
||||
"""
|
||||
Write a transport XML file for the Virtual or Raw Transport.
|
||||
"""
|
||||
transdoc = emane.xmldoc("transport")
|
||||
trans = transdoc.getElementsByTagName("transport").pop()
|
||||
trans.setAttribute("name", "%s Transport" % transport_type.capitalize())
|
||||
trans.setAttribute("library", "trans%s" % transport_type.lower())
|
||||
trans.appendChild(emane.xmlparam(transdoc, "bitrate", "0"))
|
||||
|
||||
flowcontrol = False
|
||||
names = self.model.getnames()
|
||||
values = emane.getconfig(self.objid, self.model.name, self.model.getdefaultvalues())[1]
|
||||
|
||||
if "flowcontrolenable" in names and values:
|
||||
i = names.index("flowcontrolenable")
|
||||
if self.model.booltooffon(values[i]) == "on":
|
||||
flowcontrol = True
|
||||
|
||||
if "virtual" in transport_type.lower():
|
||||
if os.path.exists("/dev/net/tun_flowctl"):
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun_flowctl"))
|
||||
else:
|
||||
trans.appendChild(emane.xmlparam(transdoc, "devicepath", "/dev/net/tun"))
|
||||
if flowcontrol:
|
||||
trans.appendChild(emane.xmlparam(transdoc, "flowcontrolenable", "on"))
|
||||
|
||||
emane.xmlwrite(transdoc, self.transportxmlname(transport_type.lower()))
|
||||
|
||||
def transportxmlname(self, type):
|
||||
"""
|
||||
Return the string name for the Transport XML file, e.g. 'n3transvirtual.xml'
|
||||
"""
|
||||
return "n%strans%s.xml" % (self.objid, type)
|
||||
|
||||
def installnetifs(self, do_netns=True):
|
||||
def installnetifs(self) -> None:
|
||||
"""
|
||||
Install TAP devices into their namespaces. This is done after
|
||||
EMANE daemons have been started, because that is their only chance
|
||||
to bind to the TAPs.
|
||||
"""
|
||||
if self.session.emane.genlocationevents() and self.session.emane.service is None:
|
||||
if (
|
||||
self.session.emane.genlocationevents()
|
||||
and self.session.emane.service is None
|
||||
):
|
||||
warntxt = "unable to publish EMANE events because the eventservice "
|
||||
warntxt += "Python bindings failed to load"
|
||||
logger.error(warntxt)
|
||||
logging.error(warntxt)
|
||||
|
||||
for netif in self.netifs():
|
||||
if do_netns and "virtual" in netif.transport_type.lower():
|
||||
netif.install()
|
||||
netif.setaddrs()
|
||||
external = self.session.emane.get_config(
|
||||
"external", self.id, self.model.name
|
||||
)
|
||||
if external == "0":
|
||||
netif.setaddrs()
|
||||
|
||||
if not self.session.emane.genlocationevents():
|
||||
netif.poshook = None
|
||||
continue
|
||||
|
||||
# at this point we register location handlers for generating
|
||||
# EMANE location events
|
||||
netif.poshook = self.setnemposition
|
||||
x, y, z = netif.node.position.get()
|
||||
self.setnemposition(netif, x, y, z)
|
||||
netif.setposition()
|
||||
|
||||
def deinstallnetifs(self):
|
||||
def deinstallnetifs(self) -> None:
|
||||
"""
|
||||
Uninstall TAP devices. This invokes their shutdown method for
|
||||
any required cleanup; the device may be actually removed when
|
||||
|
@ -234,29 +185,47 @@ class EmaneNode(EmaneNet):
|
|||
netif.shutdown()
|
||||
netif.poshook = None
|
||||
|
||||
def setnemposition(self, netif, x, y, z):
|
||||
def _nem_position(
|
||||
self, netif: CoreInterface
|
||||
) -> Optional[Tuple[int, float, float, float]]:
|
||||
"""
|
||||
Publish a NEM location change event using the EMANE event service.
|
||||
Creates nem position for emane event for a given interface.
|
||||
|
||||
:param netif: interface to get nem emane position for
|
||||
:return: nem position tuple, None otherwise
|
||||
"""
|
||||
if self.session.emane.service is None:
|
||||
logger.info("position service not available")
|
||||
return
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
logger.info("nemid for %s is unknown" % ifname)
|
||||
logging.info("nemid for %s is unknown", ifname)
|
||||
return
|
||||
lat, long, alt = self.session.location.getgeo(x, y, z)
|
||||
logger.info("setnemposition %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)", ifname, nemid, x, y, z, lat, long, alt)
|
||||
event = LocationEvent()
|
||||
|
||||
node = netif.node
|
||||
x, y, z = node.getposition()
|
||||
lat, lon, alt = self.session.location.getgeo(x, y, z)
|
||||
if node.position.alt is not None:
|
||||
alt = node.position.alt
|
||||
# altitude must be an integer or warning is printed
|
||||
# unused: yaw, pitch, roll, azimuth, elevation, velocity
|
||||
alt = int(round(alt))
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
self.session.emane.service.publish(0, event)
|
||||
return nemid, lon, lat, alt
|
||||
|
||||
def setnempositions(self, moved_netifs):
|
||||
def setnemposition(self, netif: CoreInterface) -> None:
|
||||
"""
|
||||
Publish a NEM location change event using the EMANE event service.
|
||||
|
||||
:param netif: interface to set nem position for
|
||||
"""
|
||||
if self.session.emane.service is None:
|
||||
logging.info("position service not available")
|
||||
return
|
||||
|
||||
position = self._nem_position(netif)
|
||||
if position:
|
||||
nemid, lon, lat, alt = position
|
||||
event = LocationEvent()
|
||||
event.append(nemid, latitude=lat, longitude=lon, altitude=alt)
|
||||
self.session.emane.service.publish(0, event)
|
||||
|
||||
def setnempositions(self, moved_netifs: List[CoreInterface]) -> None:
|
||||
"""
|
||||
Several NEMs have moved, from e.g. a WaypointMobilityModel
|
||||
calculation. Generate an EMANE Location Event having several
|
||||
|
@ -266,24 +235,13 @@ class EmaneNode(EmaneNet):
|
|||
return
|
||||
|
||||
if self.session.emane.service is None:
|
||||
logger.info("position service not available")
|
||||
logging.info("position service not available")
|
||||
return
|
||||
|
||||
event = LocationEvent()
|
||||
i = 0
|
||||
for netif in moved_netifs:
|
||||
nemid = self.getnemid(netif)
|
||||
ifname = netif.localname
|
||||
if nemid is None:
|
||||
logger.info("nemid for %s is unknown" % ifname)
|
||||
continue
|
||||
x, y, z = netif.node.getposition()
|
||||
lat, long, alt = self.session.location.getgeo(x, y, z)
|
||||
logger.info("setnempositions %d %s (%s) x,y,z=(%d,%d,%s)(%.6f,%.6f,%.6f)",
|
||||
i, ifname, nemid, x, y, z, lat, long, alt)
|
||||
# altitude must be an integer or warning is printed
|
||||
alt = int(round(alt))
|
||||
event.append(nemid, latitude=lat, longitude=long, altitude=alt)
|
||||
i += 1
|
||||
|
||||
position = self._nem_position(netif)
|
||||
if position:
|
||||
nemid, lon, lat, alt = position
|
||||
event.append(nemid, latitude=lat, longitude=lon, altitude=alt)
|
||||
self.session.emane.service.publish(0, event)
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
"""
|
||||
rfpipe.py: EMANE RF-PIPE model for CORE
|
||||
"""
|
||||
import os
|
||||
|
||||
from core.emane import emanemanifest
|
||||
from core.emane import emanemodel
|
||||
|
||||
|
||||
|
@ -12,8 +12,11 @@ class EmaneRfPipeModel(emanemodel.EmaneModel):
|
|||
|
||||
# mac configuration
|
||||
mac_library = "rfpipemaclayer"
|
||||
mac_xml = "/usr/share/emane/manifest/rfpipemaclayer.xml"
|
||||
mac_defaults = {
|
||||
"pcrcurveuri": "/usr/share/emane/xml/models/mac/rfpipe/rfpipepcr.xml",
|
||||
}
|
||||
mac_config = emanemanifest.parse(mac_xml, mac_defaults)
|
||||
mac_xml = "rfpipemaclayer.xml"
|
||||
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: str) -> None:
|
||||
cls.mac_defaults["pcrcurveuri"] = os.path.join(
|
||||
emane_prefix, "share/emane/xml/models/mac/rfpipe/rfpipepcr.xml"
|
||||
)
|
||||
super().load(emane_prefix)
|
||||
|
|
|
@ -2,14 +2,13 @@
|
|||
tdma.py: EMANE TDMA model bindings for CORE
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from core import constants
|
||||
from core import logger
|
||||
from core.emane import emanemanifest
|
||||
from core import constants, utils
|
||||
from core.config import Configuration
|
||||
from core.emane import emanemodel
|
||||
from core.enumerations import ConfigDataTypes
|
||||
from core.misc import utils
|
||||
from core.emulator.enumerations import ConfigDataTypes
|
||||
|
||||
|
||||
class EmaneTdmaModel(emanemodel.EmaneModel):
|
||||
|
@ -18,36 +17,50 @@ class EmaneTdmaModel(emanemodel.EmaneModel):
|
|||
|
||||
# mac configuration
|
||||
mac_library = "tdmaeventschedulerradiomodel"
|
||||
mac_xml = "/usr/share/emane/manifest/tdmaeventschedulerradiomodel.xml"
|
||||
mac_defaults = {
|
||||
"pcrcurveuri": "/usr/share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml",
|
||||
}
|
||||
mac_config = emanemanifest.parse(mac_xml, mac_defaults)
|
||||
mac_xml = "tdmaeventschedulerradiomodel.xml"
|
||||
|
||||
# add custom schedule options and ignore it when writing emane xml
|
||||
schedule_name = "schedule"
|
||||
default_schedule = os.path.join(constants.CORE_DATA_DIR, "examples", "tdma", "schedule.xml")
|
||||
mac_config.insert(
|
||||
0,
|
||||
(schedule_name, ConfigDataTypes.STRING.value, default_schedule, "", "TDMA schedule file (core)")
|
||||
default_schedule = os.path.join(
|
||||
constants.CORE_DATA_DIR, "examples", "tdma", "schedule.xml"
|
||||
)
|
||||
config_ignore = {schedule_name}
|
||||
|
||||
def post_startup(self, emane_manager):
|
||||
@classmethod
|
||||
def load(cls, emane_prefix: str) -> None:
|
||||
cls.mac_defaults["pcrcurveuri"] = os.path.join(
|
||||
emane_prefix,
|
||||
"share/emane/xml/models/mac/tdmaeventscheduler/tdmabasemodelpcr.xml",
|
||||
)
|
||||
super().load(emane_prefix)
|
||||
cls.mac_config.insert(
|
||||
0,
|
||||
Configuration(
|
||||
_id=cls.schedule_name,
|
||||
_type=ConfigDataTypes.STRING,
|
||||
default=cls.default_schedule,
|
||||
label="TDMA schedule file (core)",
|
||||
),
|
||||
)
|
||||
|
||||
def post_startup(self) -> None:
|
||||
"""
|
||||
Logic to execute after the emane manager is finished with startup.
|
||||
|
||||
:param core.emane.emanemanager.EmaneManager emane_manager: emane manager for the session
|
||||
:return: nothing
|
||||
"""
|
||||
# get configured schedule
|
||||
values = emane_manager.getconfig(self.object_id, self.name, self.getdefaultvalues())[1]
|
||||
if values is None:
|
||||
config = self.session.emane.get_configs(node_id=self.id, config_type=self.name)
|
||||
if not config:
|
||||
return
|
||||
schedule = self.valueof(self.schedule_name, values)
|
||||
schedule = config[self.schedule_name]
|
||||
|
||||
event_device = emane_manager.event_device
|
||||
# get the set event device
|
||||
event_device = self.session.emane.event_device
|
||||
|
||||
# initiate tdma schedule
|
||||
logger.info("setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device)
|
||||
utils.check_cmd(["emaneevent-tdmaschedule", "-i", event_device, schedule])
|
||||
logging.info(
|
||||
"setting up tdma schedule: schedule(%s) device(%s)", schedule, event_device
|
||||
)
|
||||
args = f"emaneevent-tdmaschedule -i {event_device} {schedule}"
|
||||
utils.cmd(args)
|
||||
|
|
|
@ -1,34 +1,26 @@
|
|||
import atexit
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
from typing import Mapping, Type
|
||||
|
||||
import core.services
|
||||
from core import logger
|
||||
from core.coreobj import PyCoreNet
|
||||
from core.coreobj import PyCoreNode
|
||||
from core.data import NodeData
|
||||
from core.emulator.emudata import LinkOptions
|
||||
from core.emulator.emudata import NodeOptions
|
||||
from core.enumerations import EventTypes
|
||||
from core.enumerations import LinkTypes
|
||||
from core.enumerations import NodeTypes
|
||||
from core.misc import nodemaps
|
||||
from core.misc import nodeutils
|
||||
from core.session import Session
|
||||
from core.xml.xmlparser import core_document_parser
|
||||
from core.xml.xmlwriter import core_document_writer
|
||||
from core import configservices
|
||||
from core.configservice.manager import ConfigServiceManager
|
||||
from core.emulator.session import Session
|
||||
from core.services.coreservices import ServiceManager
|
||||
|
||||
|
||||
def signal_handler(signal_number, _):
|
||||
def signal_handler(signal_number: int, _) -> None:
|
||||
"""
|
||||
Handle signals and force an exit with cleanup.
|
||||
|
||||
:param int signal_number: signal number
|
||||
:param signal_number: signal number
|
||||
:param _: ignored
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("caught signal: %s", signal_number)
|
||||
logging.info("caught signal: %s", signal_number)
|
||||
sys.exit(signal_number)
|
||||
|
||||
|
||||
|
@ -39,895 +31,102 @@ signal.signal(signal.SIGUSR1, signal_handler)
|
|||
signal.signal(signal.SIGUSR2, signal_handler)
|
||||
|
||||
|
||||
def create_interface(node, network, interface_data):
|
||||
"""
|
||||
Create an interface for a node on a network using provided interface data.
|
||||
|
||||
:param node: node to create interface for
|
||||
:param network: network to associate interface with
|
||||
:param core.emulator.emudata.InterfaceData interface_data: interface data
|
||||
:return: created interface
|
||||
"""
|
||||
node.newnetif(
|
||||
network,
|
||||
addrlist=interface_data.get_addresses(),
|
||||
hwaddr=interface_data.mac,
|
||||
ifindex=interface_data.id,
|
||||
ifname=interface_data.name
|
||||
)
|
||||
return node.netif(interface_data.id, network)
|
||||
|
||||
|
||||
def link_config(network, interface, link_options, devname=None, interface_two=None):
|
||||
"""
|
||||
Convenience method for configuring a link,
|
||||
|
||||
:param network: network to configure link for
|
||||
:param interface: interface to configure
|
||||
:param core.emulator.emudata.LinkOptions link_options: data to configure link with
|
||||
:param str devname: device name, default is None
|
||||
:param interface_two: other interface associated, default is None
|
||||
:return: nothing
|
||||
"""
|
||||
config = {
|
||||
"netif": interface,
|
||||
"bw": link_options.bandwidth,
|
||||
"delay": link_options.delay,
|
||||
"loss": link_options.per,
|
||||
"duplicate": link_options.dup,
|
||||
"jitter": link_options.jitter,
|
||||
"netif2": interface_two
|
||||
}
|
||||
|
||||
# hacky check here, because physical and emane nodes do not conform to the same linkconfig interface
|
||||
if not nodeutils.is_node(network, [NodeTypes.EMANE, NodeTypes.PHYSICAL]):
|
||||
config["devname"] = devname
|
||||
|
||||
network.linkconfig(**config)
|
||||
|
||||
|
||||
def is_net_node(node):
|
||||
"""
|
||||
Convenience method for testing if a legacy core node is considered a network node.
|
||||
|
||||
:param object node: object to test against
|
||||
:return: True if object is an instance of a network node, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return isinstance(node, PyCoreNet)
|
||||
|
||||
|
||||
def is_core_node(node):
|
||||
"""
|
||||
Convenience method for testing if a legacy core node is considered a core node.
|
||||
|
||||
:param object node: object to test against
|
||||
:return: True if object is an instance of a core node, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
return isinstance(node, PyCoreNode)
|
||||
|
||||
|
||||
class IdGen(object):
|
||||
def __init__(self, _id=0):
|
||||
self.id = _id
|
||||
|
||||
def next(self):
|
||||
self.id += 1
|
||||
return self.id
|
||||
|
||||
|
||||
class EmuSession(Session):
|
||||
def __init__(self, session_id, config=None, mkdir=True):
|
||||
super(EmuSession, self).__init__(session_id, config, mkdir)
|
||||
|
||||
# object management
|
||||
self.node_id_gen = IdGen()
|
||||
|
||||
# set default services
|
||||
self.services.defaultservices = {
|
||||
"mdr": ("zebra", "OSPFv3MDR", "IPForward"),
|
||||
"PC": ("DefaultRoute",),
|
||||
"prouter": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
"router": ("zebra", "OSPFv2", "OSPFv3", "IPForward"),
|
||||
"host": ("DefaultRoute", "SSH"),
|
||||
}
|
||||
|
||||
def _link_nodes(self, node_one_id, node_two_id):
|
||||
"""
|
||||
Convenience method for retrieving nodes within link data.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:return: nodes, network nodes if present, and tunnel if present
|
||||
:rtype: tuple
|
||||
"""
|
||||
logger.debug("link message between node1(%s) and node2(%s)", node_one_id, node_two_id)
|
||||
|
||||
# values to fill
|
||||
net_one = None
|
||||
net_two = None
|
||||
|
||||
# retrieve node one
|
||||
node_one = self.get_object(node_one_id)
|
||||
node_two = self.get_object(node_two_id)
|
||||
|
||||
# both node ids are provided
|
||||
tunnel = self.broker.gettunnel(node_one_id, node_two_id)
|
||||
logger.debug("tunnel between nodes: %s", tunnel)
|
||||
if nodeutils.is_node(tunnel, NodeTypes.TAP_BRIDGE):
|
||||
net_one = tunnel
|
||||
if tunnel.remotenum == node_one_id:
|
||||
node_one = None
|
||||
else:
|
||||
node_two = None
|
||||
# physical node connected via gre tap tunnel
|
||||
elif tunnel:
|
||||
if tunnel.remotenum == node_one_id:
|
||||
node_one = None
|
||||
else:
|
||||
node_two = None
|
||||
|
||||
if is_net_node(node_one):
|
||||
if not net_one:
|
||||
net_one = node_one
|
||||
else:
|
||||
net_two = node_one
|
||||
node_one = None
|
||||
|
||||
if is_net_node(node_two):
|
||||
if not net_one:
|
||||
net_one = node_two
|
||||
else:
|
||||
net_two = node_two
|
||||
node_two = None
|
||||
|
||||
logger.debug("link node types n1(%s) n2(%s) net1(%s) net2(%s) tunnel(%s)",
|
||||
node_one, node_two, net_one, net_two, tunnel)
|
||||
return node_one, node_two, net_one, net_two, tunnel
|
||||
|
||||
# TODO: this doesn't appear to ever be used, EMANE or basic wireless range
|
||||
def _link_wireless(self, objects, connect):
|
||||
"""
|
||||
Objects to deal with when connecting/disconnecting wireless links.
|
||||
|
||||
:param list objects: possible objects to deal with
|
||||
:param bool connect: link interfaces if True, unlink otherwise
|
||||
:return: nothing
|
||||
"""
|
||||
objects = [x for x in objects if x]
|
||||
if len(objects) < 2:
|
||||
raise ValueError("wireless link failure: %s", objects)
|
||||
logger.debug("handling wireless linking objects(%) connect(%s)", objects, connect)
|
||||
common_networks = objects[0].commonnets(objects[1])
|
||||
if not common_networks:
|
||||
raise ValueError("no common network found for wireless link/unlink")
|
||||
|
||||
for common_network, interface_one, interface_two in common_networks:
|
||||
if not nodeutils.is_node(common_network, [NodeTypes.WIRELESS_LAN, NodeTypes.EMANE]):
|
||||
logger.info("skipping common network that is not wireless/emane: %s", common_network)
|
||||
continue
|
||||
|
||||
logger.info("wireless linking connect(%s): %s - %s", connect, interface_one, interface_two)
|
||||
if connect:
|
||||
common_network.link(interface_one, interface_two)
|
||||
else:
|
||||
common_network.unlink(interface_one, interface_two)
|
||||
|
||||
def add_link(self, node_one_id, node_two_id, interface_one=None, interface_two=None, link_options=LinkOptions()):
|
||||
"""
|
||||
Add a link between nodes.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:param core.emulator.emudata.InterfaceData interface_one: node one interface data, defaults to none
|
||||
:param core.emulator.emudata.InterfaceData interface_two: node two interface data, defaults to none
|
||||
:param core.emulator.emudata.LinkOptions link_options: data for creating link, defaults to no options
|
||||
:return:
|
||||
"""
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
if node_one:
|
||||
node_one.lock.acquire()
|
||||
if node_two:
|
||||
node_two.lock.acquire()
|
||||
|
||||
try:
|
||||
# wireless link
|
||||
if link_options.type == LinkTypes.WIRELESS:
|
||||
objects = [node_one, node_two, net_one, net_two]
|
||||
self._link_wireless(objects, connect=True)
|
||||
# wired link
|
||||
else:
|
||||
# 2 nodes being linked, ptp network
|
||||
if all([node_one, node_two]) and not net_one:
|
||||
logger.info("adding link for peer to peer nodes: %s - %s", node_one.name, node_two.name)
|
||||
ptp_class = nodeutils.get_node_class(NodeTypes.PEER_TO_PEER)
|
||||
start = self.state > EventTypes.DEFINITION_STATE.value
|
||||
net_one = self.add_object(cls=ptp_class, start=start)
|
||||
|
||||
# node to network
|
||||
if node_one and net_one:
|
||||
logger.info("adding link from node to network: %s - %s", node_one.name, net_one.name)
|
||||
interface = create_interface(node_one, net_one, interface_one)
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
# network to node
|
||||
if node_two and net_one:
|
||||
logger.info("adding link from network to node: %s - %s", node_two.name, net_one.name)
|
||||
interface = create_interface(node_two, net_one, interface_two)
|
||||
if not link_options.unidirectional:
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
# network to network
|
||||
if net_one and net_two:
|
||||
logger.info("adding link from network to network: %s", net_one.name, net_two.name)
|
||||
if nodeutils.is_node(net_two, NodeTypes.RJ45):
|
||||
interface = net_two.linknet(net_one)
|
||||
else:
|
||||
interface = net_one.linknet(net_two)
|
||||
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
if not link_options.unidirectional:
|
||||
interface.swapparams("_params_up")
|
||||
link_config(net_two, interface, link_options, devname=interface.name)
|
||||
interface.swapparams("_params_up")
|
||||
|
||||
# a tunnel node was found for the nodes
|
||||
addresses = []
|
||||
if not node_one and all([net_one, interface_one]):
|
||||
addresses.extend(interface_one.get_addresses())
|
||||
|
||||
if not node_two and all([net_two, interface_two]):
|
||||
addresses.extend(interface_two.get_addresses())
|
||||
|
||||
# tunnel node logic
|
||||
key = link_options.key
|
||||
if key and nodeutils.is_node(net_one, NodeTypes.TUNNEL):
|
||||
logger.info("setting tunnel key for: %s", net_one.name)
|
||||
net_one.setkey(key)
|
||||
if addresses:
|
||||
net_one.addrconfig(addresses)
|
||||
if key and nodeutils.is_node(net_two, NodeTypes.TUNNEL):
|
||||
logger.info("setting tunnel key for: %s", net_two.name)
|
||||
net_two.setkey(key)
|
||||
if addresses:
|
||||
net_two.addrconfig(addresses)
|
||||
|
||||
# physical node connected with tunnel
|
||||
if not net_one and not net_two and (node_one or node_two):
|
||||
if node_one and nodeutils.is_node(node_one, NodeTypes.PHYSICAL):
|
||||
logger.info("adding link for physical node: %s", node_one.name)
|
||||
addresses = interface_one.get_addresses()
|
||||
node_one.adoptnetif(tunnel, interface_one.id, interface_one.mac, addresses)
|
||||
link_config(node_one, tunnel, link_options)
|
||||
elif node_two and nodeutils.is_node(node_two, NodeTypes.PHYSICAL):
|
||||
logger.info("adding link for physical node: %s", node_two.name)
|
||||
addresses = interface_two.get_addresses()
|
||||
node_two.adoptnetif(tunnel, interface_two.id, interface_two.mac, addresses)
|
||||
link_config(node_two, tunnel, link_options)
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def delete_link(self, node_one_id, node_two_id, interface_one_id, interface_two_id, link_type=LinkTypes.WIRED):
|
||||
"""
|
||||
Delete a link between nodes.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:param int interface_one_id: interface id for node one
|
||||
:param int interface_two_id: interface id for node two
|
||||
:param core.enumerations.LinkTypes link_type: link type to delete
|
||||
:return: nothing
|
||||
"""
|
||||
# interface data
|
||||
# interface_one_data, interface_two_data = get_interfaces(link_data)
|
||||
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
if node_one:
|
||||
node_one.lock.acquire()
|
||||
if node_two:
|
||||
node_two.lock.acquire()
|
||||
|
||||
try:
|
||||
# wireless link
|
||||
if link_type == LinkTypes.WIRELESS:
|
||||
objects = [node_one, node_two, net_one, net_two]
|
||||
self._link_wireless(objects, connect=False)
|
||||
# wired link
|
||||
else:
|
||||
if all([node_one, node_two]):
|
||||
# TODO: fix this for the case where ifindex[1,2] are not specified
|
||||
# a wired unlink event, delete the connecting bridge
|
||||
interface_one = node_one.netif(interface_one_id)
|
||||
interface_two = node_two.netif(interface_two_id)
|
||||
|
||||
# get interfaces from common network, if no network node
|
||||
# otherwise get interfaces between a node and network
|
||||
if not interface_one and not interface_two:
|
||||
common_networks = node_one.commonnets(node_two)
|
||||
for network, common_interface_one, common_interface_two in common_networks:
|
||||
if (net_one and network == net_one) or not net_one:
|
||||
interface_one = common_interface_one
|
||||
interface_two = common_interface_two
|
||||
break
|
||||
|
||||
if all([interface_one, interface_two]) and any([interface_one.net, interface_two.net]):
|
||||
if interface_one.net != interface_two.net and all([interface_one.up, interface_two.up]):
|
||||
raise ValueError("no common network found")
|
||||
|
||||
logger.info("deleting link node(%s):interface(%s) node(%s):interface(%s)",
|
||||
node_one.name, interface_one.name, node_two.name, interface_two.name)
|
||||
net_one = interface_one.net
|
||||
interface_one.detachnet()
|
||||
interface_two.detachnet()
|
||||
if net_one.numnetif() == 0:
|
||||
self.delete_object(net_one.objid)
|
||||
node_one.delnetif(interface_one.netindex)
|
||||
node_two.delnetif(interface_two.netindex)
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def update_link(self, node_one_id, node_two_id, interface_one_id=None, interface_two_id=None, link_options=LinkOptions()):
|
||||
"""
|
||||
Update link information between nodes.
|
||||
|
||||
:param int node_one_id: node one id
|
||||
:param int node_two_id: node two id
|
||||
:param int interface_one_id: interface id for node one
|
||||
:param int interface_two_id: interface id for node two
|
||||
:param core.emulator.emudata.LinkOptions link_options: data to update link with
|
||||
:return: nothing
|
||||
"""
|
||||
# interface data
|
||||
# interface_one_data, interface_two_data = get_interfaces(link_data)
|
||||
|
||||
# get node objects identified by link data
|
||||
node_one, node_two, net_one, net_two, tunnel = self._link_nodes(node_one_id, node_two_id)
|
||||
|
||||
if node_one:
|
||||
node_one.lock.acquire()
|
||||
if node_two:
|
||||
node_two.lock.acquire()
|
||||
|
||||
try:
|
||||
# wireless link
|
||||
if link_options.type == LinkTypes.WIRELESS.value:
|
||||
raise ValueError("cannot update wireless link")
|
||||
else:
|
||||
if not node_one and not node_two:
|
||||
if net_one and net_two:
|
||||
# modify link between nets
|
||||
interface = net_one.getlinknetif(net_two)
|
||||
upstream = False
|
||||
|
||||
if not interface:
|
||||
upstream = True
|
||||
interface = net_two.getlinknetif(net_one)
|
||||
|
||||
if not interface:
|
||||
raise ValueError("modify unknown link between nets")
|
||||
|
||||
if upstream:
|
||||
interface.swapparams("_params_up")
|
||||
link_config(net_one, interface, link_options, devname=interface.name)
|
||||
interface.swapparams("_params_up")
|
||||
else:
|
||||
link_config(net_one, interface, link_options)
|
||||
|
||||
if not link_options.unidirectional:
|
||||
if upstream:
|
||||
link_config(net_two, interface, link_options)
|
||||
else:
|
||||
interface.swapparams("_params_up")
|
||||
link_config(net_two, interface, link_options, devname=interface.name)
|
||||
interface.swapparams("_params_up")
|
||||
else:
|
||||
raise ValueError("modify link for unknown nodes")
|
||||
elif not node_one:
|
||||
# node1 = layer 2node, node2 = layer3 node
|
||||
interface = node_two.netif(interface_two_id, net_one)
|
||||
link_config(net_one, interface, link_options)
|
||||
elif not node_two:
|
||||
# node2 = layer 2node, node1 = layer3 node
|
||||
interface = node_one.netif(interface_one_id, net_one)
|
||||
link_config(net_one, interface, link_options)
|
||||
else:
|
||||
common_networks = node_one.commonnets(node_two)
|
||||
if not common_networks:
|
||||
raise ValueError("no common network found")
|
||||
|
||||
for net_one, interface_one, interface_two in common_networks:
|
||||
if interface_one_id is not None and interface_one_id != node_one.getifindex(interface_one):
|
||||
continue
|
||||
|
||||
link_config(net_one, interface_one, link_options, interface_two=interface_two)
|
||||
if not link_options.unidirectional:
|
||||
link_config(net_one, interface_two, link_options, interface_two=interface_one)
|
||||
|
||||
finally:
|
||||
if node_one:
|
||||
node_one.lock.release()
|
||||
if node_two:
|
||||
node_two.lock.release()
|
||||
|
||||
def add_node(self, _type=NodeTypes.DEFAULT, _id=None, node_options=NodeOptions()):
|
||||
"""
|
||||
Add a node to the session, based on the provided node data.
|
||||
|
||||
:param core.enumerations.NodeTypes _type: type of node to create
|
||||
:param int _id: id for node, defaults to None for generated id
|
||||
:param core.emulator.emudata.NodeOptions node_options: data to create node with
|
||||
:return: created node
|
||||
"""
|
||||
|
||||
# retrieve node class for given node type
|
||||
try:
|
||||
node_class = nodeutils.get_node_class(_type)
|
||||
except KeyError:
|
||||
logger.error("invalid node type to create: %s", _type)
|
||||
return None
|
||||
|
||||
# set node start based on current session state, override and check when rj45
|
||||
start = self.state > EventTypes.DEFINITION_STATE.value
|
||||
enable_rj45 = getattr(self.options, "enablerj45", "0") == "1"
|
||||
if _type == NodeTypes.RJ45 and not enable_rj45:
|
||||
start = False
|
||||
|
||||
# determine node id
|
||||
if not _id:
|
||||
while True:
|
||||
_id = self.node_id_gen.next()
|
||||
if _id not in self.objects:
|
||||
break
|
||||
|
||||
# generate name if not provided
|
||||
name = node_options.name
|
||||
if not name:
|
||||
name = "%s%s" % (node_class.__name__, _id)
|
||||
|
||||
# create node
|
||||
logger.info("creating node(%s) id(%s) name(%s) start(%s)", node_class.__name__, _id, name, start)
|
||||
node = self.add_object(cls=node_class, objid=_id, name=name, start=start)
|
||||
|
||||
# set node attributes
|
||||
node.icon = node_options.icon
|
||||
node.canvas = node_options.canvas
|
||||
node.opaque = node_options.opaque
|
||||
|
||||
# set node position and broadcast it
|
||||
self.set_node_position(node, node_options)
|
||||
|
||||
# add services to default and physical nodes only
|
||||
if _type in [NodeTypes.DEFAULT, NodeTypes.PHYSICAL]:
|
||||
node.type = node_options.model
|
||||
logger.debug("set node type: %s", node.type)
|
||||
services = "|".join(node_options.services) or None
|
||||
self.services.addservicestonode(node, node.type, services)
|
||||
|
||||
# boot nodes if created after runtime, LcxNodes, Physical, and RJ45 are all PyCoreNodes
|
||||
is_boot_node = isinstance(node, PyCoreNode) and not nodeutils.is_node(node, NodeTypes.RJ45)
|
||||
if self.state == EventTypes.RUNTIME_STATE.value and is_boot_node:
|
||||
self.write_objects()
|
||||
self.add_remove_control_interface(node=node, remove=False)
|
||||
|
||||
# TODO: common method to both Physical and LxcNodes, but not the common PyCoreNode
|
||||
node.boot()
|
||||
|
||||
return node
|
||||
|
||||
def update_node(self, node_id, node_options):
|
||||
"""
|
||||
Update node information.
|
||||
|
||||
:param int node_id: id of node to update
|
||||
:param core.emulator.emudata.NodeOptions node_options: data to update node with
|
||||
:return: True if node updated, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
result = False
|
||||
try:
|
||||
# get node to update
|
||||
node = self.get_object(node_id)
|
||||
|
||||
# set node position and broadcast it
|
||||
self.set_node_position(node, node_options)
|
||||
|
||||
# update attributes
|
||||
node.canvas = node_options.canvas
|
||||
node.icon = node_options.icon
|
||||
|
||||
# set node as updated successfully
|
||||
result = True
|
||||
except KeyError:
|
||||
logger.error("failure to update node that does not exist: %s", node_options.id)
|
||||
|
||||
return result
|
||||
|
||||
def delete_node(self, node_id):
|
||||
"""
|
||||
Delete a node from the session and check if session should shutdown, if no nodes are left.
|
||||
|
||||
:param int node_id: id of node to delete
|
||||
:return: True if node deleted, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
# delete node and check for session shutdown if a node was removed
|
||||
result = self.custom_delete_object(node_id)
|
||||
if result:
|
||||
self.check_shutdown()
|
||||
return result
|
||||
|
||||
def set_node_position(self, node, node_options):
|
||||
"""
|
||||
Set position for a node, use lat/lon/alt if needed.
|
||||
|
||||
:param node: node to set position for
|
||||
:param core.emulator.emudata.NodeOptions node_options: data for node
|
||||
:return: nothing
|
||||
"""
|
||||
# extract location values
|
||||
x = node_options.x
|
||||
y = node_options.y
|
||||
lat = node_options.lat
|
||||
lon = node_options.lon
|
||||
alt = node_options.alt
|
||||
|
||||
# check if we need to generate position from lat/lon/alt
|
||||
has_empty_position = all(i is None for i in [x, y])
|
||||
has_lat_lon_alt = all(i is not None for i in [lat, lon, alt])
|
||||
using_lat_lon_alt = has_empty_position and has_lat_lon_alt
|
||||
if using_lat_lon_alt:
|
||||
x, y, _ = self.location.getxyz(lat, lon, alt)
|
||||
|
||||
# set position and broadcast
|
||||
node.setposition(x, y, None)
|
||||
|
||||
# broadcast updated location when using lat/lon/alt
|
||||
if using_lat_lon_alt:
|
||||
self.broadcast_node_location(node)
|
||||
|
||||
def broadcast_node_location(self, node):
|
||||
"""
|
||||
Broadcast node location to all listeners.
|
||||
|
||||
:param core.netns.nodes.PyCoreObj node: node to broadcast location for
|
||||
:return: nothing
|
||||
"""
|
||||
node_data = NodeData(
|
||||
message_type=0,
|
||||
id=node.objid,
|
||||
x_position=node.position.x,
|
||||
y_position=node.position.y
|
||||
)
|
||||
self.broadcast_node(node_data)
|
||||
|
||||
def start_mobility(self, node_ids=None):
|
||||
"""
|
||||
Start mobility for the provided node ids.
|
||||
|
||||
:param list[int] node_ids: nodes to start mobility for
|
||||
:return: nothing
|
||||
"""
|
||||
self.mobility.startup(node_ids)
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shutdown session.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("session(%s) shutting down", self.session_id)
|
||||
self.set_state(EventTypes.DATACOLLECT_STATE, send_event=True)
|
||||
self.set_state(EventTypes.SHUTDOWN_STATE, send_event=True)
|
||||
super(EmuSession, self).shutdown()
|
||||
|
||||
def custom_delete_object(self, object_id):
|
||||
"""
|
||||
Remove an emulation object.
|
||||
|
||||
:param int object_id: object id to remove
|
||||
:return: True if object deleted, False otherwise
|
||||
"""
|
||||
result = False
|
||||
with self._objects_lock:
|
||||
if object_id in self.objects:
|
||||
obj = self.objects.pop(object_id)
|
||||
obj.shutdown()
|
||||
result = True
|
||||
return result
|
||||
|
||||
def is_active(self):
|
||||
"""
|
||||
Determine if this session is considered to be active. (Runtime or Data collect states)
|
||||
|
||||
:return: True if active, False otherwise
|
||||
"""
|
||||
result = self.state in {EventTypes.RUNTIME_STATE.value, EventTypes.DATACOLLECT_STATE.value}
|
||||
logger.info("session(%s) checking if active: %s", self.session_id, result)
|
||||
return result
|
||||
|
||||
def open_xml(self, file_name, start=False):
|
||||
"""
|
||||
Import a session from the EmulationScript XML format.
|
||||
|
||||
:param str file_name: xml file to load session from
|
||||
:param bool start: instantiate session if true, false otherwise
|
||||
:return: nothing
|
||||
"""
|
||||
# clear out existing session
|
||||
self.clear()
|
||||
|
||||
# set default node class when one is not provided
|
||||
node_class = nodeutils.get_node_class(NodeTypes.DEFAULT)
|
||||
options = {"start": start, "nodecls": node_class}
|
||||
core_document_parser(self, file_name, options)
|
||||
if start:
|
||||
self.name = os.path.basename(file_name)
|
||||
self.file_name = file_name
|
||||
self.instantiate()
|
||||
|
||||
def save_xml(self, file_name, version):
|
||||
"""
|
||||
Export a session to the EmulationScript XML format.
|
||||
|
||||
:param str file_name: file name to write session xml to
|
||||
:param str version: xml version type
|
||||
:return: nothing
|
||||
"""
|
||||
doc = core_document_writer(self, version)
|
||||
doc.writexml(file_name)
|
||||
|
||||
def add_hook(self, state, file_name, source_name, data):
|
||||
"""
|
||||
Store a hook from a received file message.
|
||||
|
||||
:param int state: when to run hook
|
||||
:param str file_name: file name for hook
|
||||
:param str source_name: source name
|
||||
:param data: hook data
|
||||
:return: nothing
|
||||
"""
|
||||
# hack to conform with old logic until updated
|
||||
state = ":%s" % state
|
||||
self.set_hook(state, file_name, source_name, data)
|
||||
|
||||
def add_node_service_file(self, node_id, service_name, file_name, source_name, data):
|
||||
"""
|
||||
Add a service file for a node.
|
||||
|
||||
:param int node_id: node to add service file to
|
||||
:param str service_name: service file to add
|
||||
:param str file_name: file name to use
|
||||
:param str source_name: source file
|
||||
:param str data: file data to save
|
||||
:return: nothing
|
||||
"""
|
||||
# hack to conform with old logic until updated
|
||||
service_name = ":%s" % service_name
|
||||
self.services.setservicefile(node_id, service_name, file_name, source_name, data)
|
||||
|
||||
def add_node_file(self, node_id, source_name, file_name, data):
|
||||
"""
|
||||
Add a file to a node.
|
||||
|
||||
:param int node_id: node to add file to
|
||||
:param str source_name: source file name
|
||||
:param str file_name: file name to add
|
||||
:param str data: file data
|
||||
:return: nothing
|
||||
"""
|
||||
|
||||
node = self.get_object(node_id)
|
||||
|
||||
if source_name is not None:
|
||||
node.addfile(source_name, file_name)
|
||||
elif data is not None:
|
||||
node.nodefile(file_name, data)
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
Clear all CORE session data. (objects, hooks, broker)
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.delete_objects()
|
||||
self.del_hooks()
|
||||
self.broker.reset()
|
||||
|
||||
def start_events(self):
|
||||
"""
|
||||
Start event loop.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
self.event_loop.run()
|
||||
|
||||
def services_event(self, event_data):
|
||||
"""
|
||||
Handle a service event.
|
||||
|
||||
:param core.data.EventData event_data: event data to handle
|
||||
:return:
|
||||
"""
|
||||
self.services.handleevent(event_data)
|
||||
|
||||
def mobility_event(self, event_data):
|
||||
"""
|
||||
Handle a mobility event.
|
||||
|
||||
:param core.data.EventData event_data: event data to handle
|
||||
:return: nothing
|
||||
"""
|
||||
self.mobility.handleevent(event_data)
|
||||
|
||||
def create_wireless_node(self, _id=None, node_options=NodeOptions()):
|
||||
"""
|
||||
Create a wireless node for use within an wireless/EMANE networks.
|
||||
|
||||
:param int _id: int for node, defaults to None and will be generated
|
||||
:param core.emulator.emudata.NodeOptions node_options: options for emane node, model will always be "mdr"
|
||||
:return: new emane node
|
||||
:rtype: core.netns.nodes.CoreNode
|
||||
"""
|
||||
node_options.model = "mdr"
|
||||
return self.add_node(_type=NodeTypes.DEFAULT, _id=_id, node_options=node_options)
|
||||
|
||||
def create_emane_network(self, model, geo_reference, geo_scale=None, node_options=NodeOptions()):
|
||||
"""
|
||||
Convenience method for creating an emane network.
|
||||
|
||||
:param model: emane model to use for emane network
|
||||
:param geo_reference: geo reference point to use for emane node locations
|
||||
:param geo_scale: geo scale to use for emane node locations, defaults to 1.0
|
||||
:param core.emulator.emudata.NodeOptions node_options: options for emane node being created
|
||||
:return: create emane network
|
||||
"""
|
||||
# required to be set for emane to function properly
|
||||
self.location.setrefgeo(*geo_reference)
|
||||
if geo_scale:
|
||||
self.location.refscale = geo_scale
|
||||
|
||||
# create and return network
|
||||
emane_network = self.add_node(_type=NodeTypes.EMANE, node_options=node_options)
|
||||
self.set_emane_model(emane_network, model)
|
||||
return emane_network
|
||||
|
||||
def set_emane_model(self, emane_node, emane_model):
|
||||
"""
|
||||
Set emane model for a given emane node.
|
||||
|
||||
:param emane_node: emane node to set model for
|
||||
:param emane_model: emane model to set
|
||||
:return: nothing
|
||||
"""
|
||||
values = list(emane_model.getdefaultvalues())
|
||||
self.emane.setconfig(emane_node.objid, emane_model.name, values)
|
||||
|
||||
def set_wireless_model(self, node, model):
|
||||
"""
|
||||
Convenience method for setting a wireless model.
|
||||
|
||||
:param node: node to set wireless model for
|
||||
:param core.mobility.WirelessModel model: wireless model to set node to
|
||||
:return: nothing
|
||||
"""
|
||||
values = list(model.getdefaultvalues())
|
||||
node.setmodel(model, values)
|
||||
|
||||
def wireless_link_all(self, network, nodes):
|
||||
"""
|
||||
Link all nodes to the provided wireless network.
|
||||
|
||||
:param network: wireless network to link nodes to
|
||||
:param nodes: nodes to link to wireless network
|
||||
:return: nothing
|
||||
"""
|
||||
for node in nodes:
|
||||
for common_network, interface_one, interface_two in node.commonnets(network):
|
||||
common_network.link(interface_one, interface_two)
|
||||
|
||||
|
||||
class CoreEmu(object):
|
||||
class CoreEmu:
|
||||
"""
|
||||
Provides logic for creating and configuring CORE sessions and the nodes within them.
|
||||
"""
|
||||
|
||||
def __init__(self, config=None):
|
||||
def __init__(self, config: Mapping[str, str] = None) -> None:
|
||||
"""
|
||||
Create a CoreEmu object.
|
||||
|
||||
:param dict config: configuration options
|
||||
:param config: configuration options
|
||||
"""
|
||||
# set umask 0
|
||||
os.umask(0)
|
||||
|
||||
# configuration
|
||||
if config is None:
|
||||
config = {}
|
||||
self.config = config
|
||||
|
||||
# session management
|
||||
self.session_id_gen = IdGen(_id=59999)
|
||||
self.sessions = {}
|
||||
|
||||
# set default nodes
|
||||
node_map = nodemaps.NODES
|
||||
nodeutils.set_node_map(node_map)
|
||||
# load services
|
||||
self.service_errors = []
|
||||
self.load_services()
|
||||
|
||||
# load default services
|
||||
core.services.load()
|
||||
# config services
|
||||
self.service_manager = ConfigServiceManager()
|
||||
config_services_path = os.path.abspath(os.path.dirname(configservices.__file__))
|
||||
self.service_manager.load(config_services_path)
|
||||
custom_dir = self.config.get("custom_config_services_dir")
|
||||
if custom_dir:
|
||||
self.service_manager.load(custom_dir)
|
||||
|
||||
# catch exit event
|
||||
atexit.register(self.shutdown)
|
||||
|
||||
def update_nodes(self, node_map):
|
||||
"""
|
||||
Updates node map used by core.
|
||||
def load_services(self) -> None:
|
||||
# load default services
|
||||
self.service_errors = core.services.load()
|
||||
|
||||
:param dict node_map: node map to update existing node map with
|
||||
:return: nothing
|
||||
"""
|
||||
nodeutils.update_node_map(node_map)
|
||||
# load custom services
|
||||
service_paths = self.config.get("custom_services_dir")
|
||||
logging.debug("custom service paths: %s", service_paths)
|
||||
if service_paths:
|
||||
for service_path in service_paths.split(","):
|
||||
service_path = service_path.strip()
|
||||
custom_service_errors = ServiceManager.add_services(service_path)
|
||||
self.service_errors.extend(custom_service_errors)
|
||||
|
||||
def shutdown(self):
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shutdown all CORE session.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
logger.info("shutting down all sessions")
|
||||
logging.info("shutting down all sessions")
|
||||
sessions = self.sessions.copy()
|
||||
self.sessions.clear()
|
||||
for session in sessions.itervalues():
|
||||
for _id in sessions:
|
||||
session = sessions[_id]
|
||||
session.shutdown()
|
||||
|
||||
def create_session(self, _id=None, master=True):
|
||||
def create_session(self, _id: int = None, _cls: Type[Session] = Session) -> Session:
|
||||
"""
|
||||
Create a new CORE session, set to master if running standalone.
|
||||
Create a new CORE session.
|
||||
|
||||
:param int _id: session id for new session
|
||||
:param bool master: sets session to master
|
||||
:param _id: session id for new session
|
||||
:param _cls: Session class to use
|
||||
:return: created session
|
||||
:rtype: EmuSession
|
||||
"""
|
||||
|
||||
session_id = _id
|
||||
if not session_id:
|
||||
while True:
|
||||
session_id = self.session_id_gen.next()
|
||||
if session_id not in self.sessions:
|
||||
break
|
||||
|
||||
session = EmuSession(session_id, config=self.config)
|
||||
logger.info("created session: %s", session_id)
|
||||
if master:
|
||||
session.master = True
|
||||
|
||||
self.sessions[session_id] = session
|
||||
if not _id:
|
||||
_id = 1
|
||||
while _id in self.sessions:
|
||||
_id += 1
|
||||
session = _cls(_id, config=self.config)
|
||||
session.service_manager = self.service_manager
|
||||
logging.info("created session: %s", _id)
|
||||
self.sessions[_id] = session
|
||||
return session
|
||||
|
||||
def delete_session(self, _id):
|
||||
def delete_session(self, _id: int) -> bool:
|
||||
"""
|
||||
Shutdown and delete a CORE session.
|
||||
|
||||
:param int _id: session id to delete
|
||||
:param _id: session id to delete
|
||||
:return: True if deleted, False otherwise
|
||||
:rtype: bool
|
||||
"""
|
||||
logger.info("deleting session: %s", _id)
|
||||
logging.info("deleting session: %s", _id)
|
||||
session = self.sessions.pop(_id, None)
|
||||
result = False
|
||||
if session:
|
||||
logger.info("shutting session down: %s", _id)
|
||||
logging.info("shutting session down: %s", _id)
|
||||
session.shutdown()
|
||||
result = True
|
||||
else:
|
||||
logger.error("session to delete did not exist: %s", _id)
|
||||
logging.error("session to delete did not exist: %s", _id)
|
||||
|
||||
return result
|
||||
|
|
132
daemon/core/emulator/data.py
Normal file
132
daemon/core/emulator/data.py
Normal file
|
@ -0,0 +1,132 @@
|
|||
"""
|
||||
CORE data objects.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple
|
||||
|
||||
from core.emulator.enumerations import (
|
||||
EventTypes,
|
||||
ExceptionLevels,
|
||||
LinkTypes,
|
||||
MessageFlags,
|
||||
NodeTypes,
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConfigData:
|
||||
message_type: int = None
|
||||
node: int = None
|
||||
object: str = None
|
||||
type: int = None
|
||||
data_types: Tuple[int] = None
|
||||
data_values: str = None
|
||||
captions: str = None
|
||||
bitmap: str = None
|
||||
possible_values: str = None
|
||||
groups: str = None
|
||||
session: int = None
|
||||
interface_number: int = None
|
||||
network_id: int = None
|
||||
opaque: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventData:
|
||||
node: int = None
|
||||
event_type: EventTypes = None
|
||||
name: str = None
|
||||
data: str = None
|
||||
time: float = None
|
||||
session: int = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExceptionData:
|
||||
node: int = None
|
||||
session: int = None
|
||||
level: ExceptionLevels = None
|
||||
source: str = None
|
||||
date: str = None
|
||||
text: str = None
|
||||
opaque: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileData:
|
||||
message_type: MessageFlags = None
|
||||
node: int = None
|
||||
name: str = None
|
||||
mode: str = None
|
||||
number: int = None
|
||||
type: str = None
|
||||
source: str = None
|
||||
session: int = None
|
||||
data: str = None
|
||||
compressed_data: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeData:
|
||||
message_type: MessageFlags = None
|
||||
id: int = None
|
||||
node_type: NodeTypes = None
|
||||
name: str = None
|
||||
ip_address: str = None
|
||||
mac_address: str = None
|
||||
ip6_address: str = None
|
||||
model: str = None
|
||||
emulation_id: int = None
|
||||
server: str = None
|
||||
session: int = None
|
||||
x_position: float = None
|
||||
y_position: float = None
|
||||
canvas: int = None
|
||||
network_id: int = None
|
||||
services: List[str] = None
|
||||
latitude: float = None
|
||||
longitude: float = None
|
||||
altitude: float = None
|
||||
icon: str = None
|
||||
opaque: str = None
|
||||
source: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class LinkData:
|
||||
message_type: MessageFlags = None
|
||||
label: str = None
|
||||
node1_id: int = None
|
||||
node2_id: int = None
|
||||
delay: float = None
|
||||
bandwidth: float = None
|
||||
per: float = None
|
||||
dup: float = None
|
||||
jitter: float = None
|
||||
mer: float = None
|
||||
burst: float = None
|
||||
session: int = None
|
||||
mburst: float = None
|
||||
link_type: LinkTypes = None
|
||||
gui_attributes: str = None
|
||||
unidirectional: int = None
|
||||
emulation_id: int = None
|
||||
network_id: int = None
|
||||
key: int = None
|
||||
interface1_id: int = None
|
||||
interface1_name: str = None
|
||||
interface1_ip4: str = None
|
||||
interface1_ip4_mask: int = None
|
||||
interface1_mac: str = None
|
||||
interface1_ip6: str = None
|
||||
interface1_ip6_mask: int = None
|
||||
interface2_id: int = None
|
||||
interface2_name: str = None
|
||||
interface2_ip4: str = None
|
||||
interface2_ip4_mask: int = None
|
||||
interface2_mac: str = None
|
||||
interface2_ip6: str = None
|
||||
interface2_ip6_mask: int = None
|
||||
opaque: str = None
|
||||
color: str = None
|
257
daemon/core/emulator/distributed.py
Normal file
257
daemon/core/emulator/distributed.py
Normal file
|
@ -0,0 +1,257 @@
|
|||
"""
|
||||
Defines distributed server functionality.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from collections import OrderedDict
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import TYPE_CHECKING, Callable, Dict, Tuple
|
||||
|
||||
import netaddr
|
||||
from fabric import Connection
|
||||
from invoke import UnexpectedExit
|
||||
|
||||
from core import utils
|
||||
from core.errors import CoreCommandError
|
||||
from core.nodes.interface import GreTap
|
||||
from core.nodes.network import CoreNetwork, CtrlNet
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.emulator.session import Session
|
||||
|
||||
LOCK = threading.Lock()
|
||||
CMD_HIDE = True
|
||||
|
||||
|
||||
class DistributedServer:
|
||||
"""
|
||||
Provides distributed server interactions.
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, host: str) -> None:
|
||||
"""
|
||||
Create a DistributedServer instance.
|
||||
|
||||
:param name: convenience name to associate with host
|
||||
:param host: host to connect to
|
||||
"""
|
||||
self.name = name
|
||||
self.host = host
|
||||
self.conn = Connection(host, user="root")
|
||||
self.lock = threading.Lock()
|
||||
|
||||
def remote_cmd(
|
||||
self, cmd: str, env: Dict[str, str] = None, cwd: str = None, wait: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
Run command remotely using server connection.
|
||||
|
||||
:param cmd: command to run
|
||||
:param env: environment for remote command, default is None
|
||||
:param cwd: directory to run command in, defaults to None, which is the
|
||||
user's home directory
|
||||
:param wait: True to wait for status, False to background process
|
||||
:return: stdout when success
|
||||
:raises CoreCommandError: when a non-zero exit status occurs
|
||||
"""
|
||||
|
||||
replace_env = env is not None
|
||||
if not wait:
|
||||
cmd += " &"
|
||||
logging.debug(
|
||||
"remote cmd server(%s) cwd(%s) wait(%s): %s", self.host, cwd, wait, cmd
|
||||
)
|
||||
try:
|
||||
if cwd is None:
|
||||
result = self.conn.run(
|
||||
cmd, hide=CMD_HIDE, env=env, replace_env=replace_env
|
||||
)
|
||||
else:
|
||||
with self.conn.cd(cwd):
|
||||
result = self.conn.run(
|
||||
cmd, hide=CMD_HIDE, env=env, replace_env=replace_env
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except UnexpectedExit as e:
|
||||
stdout, stderr = e.streams_for_display()
|
||||
raise CoreCommandError(e.result.exited, cmd, stdout, stderr)
|
||||
|
||||
def remote_put(self, source: str, destination: str) -> None:
|
||||
"""
|
||||
Push file to remote server.
|
||||
|
||||
:param source: source file to push
|
||||
:param destination: destination file location
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
self.conn.put(source, destination)
|
||||
|
||||
def remote_put_temp(self, destination: str, data: str) -> None:
|
||||
"""
|
||||
Remote push file contents to a remote server, using a temp file as an
|
||||
intermediate step.
|
||||
|
||||
:param destination: file destination for data
|
||||
:param data: data to store in remote file
|
||||
:return: nothing
|
||||
"""
|
||||
with self.lock:
|
||||
temp = NamedTemporaryFile(delete=False)
|
||||
temp.write(data.encode("utf-8"))
|
||||
temp.close()
|
||||
self.conn.put(temp.name, destination)
|
||||
os.unlink(temp.name)
|
||||
|
||||
|
||||
class DistributedController:
|
||||
"""
|
||||
Provides logic for dealing with remote tunnels and distributed servers.
|
||||
"""
|
||||
|
||||
def __init__(self, session: "Session") -> None:
|
||||
"""
|
||||
Create
|
||||
|
||||
:param session: session
|
||||
"""
|
||||
self.session = session
|
||||
self.servers = OrderedDict()
|
||||
self.tunnels = {}
|
||||
self.address = self.session.options.get_config(
|
||||
"distributed_address", default=None
|
||||
)
|
||||
|
||||
def add_server(self, name: str, host: str) -> None:
|
||||
"""
|
||||
Add distributed server configuration.
|
||||
|
||||
:param name: distributed server name
|
||||
:param host: distributed server host address
|
||||
:return: nothing
|
||||
"""
|
||||
server = DistributedServer(name, host)
|
||||
self.servers[name] = server
|
||||
cmd = f"mkdir -p {self.session.session_dir}"
|
||||
server.remote_cmd(cmd)
|
||||
|
||||
def execute(self, func: Callable[[DistributedServer], None]) -> None:
|
||||
"""
|
||||
Convenience for executing logic against all distributed servers.
|
||||
|
||||
:param func: function to run, that takes a DistributedServer as a parameter
|
||||
:return: nothing
|
||||
"""
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
func(server)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""
|
||||
Shutdown logic for dealing with distributed tunnels and server session
|
||||
directories.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
# shutdown all tunnels
|
||||
for key in self.tunnels:
|
||||
tunnels = self.tunnels[key]
|
||||
for tunnel in tunnels:
|
||||
tunnel.shutdown()
|
||||
|
||||
# remove all remote session directories
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
cmd = f"rm -rf {self.session.session_dir}"
|
||||
server.remote_cmd(cmd)
|
||||
|
||||
# clear tunnels
|
||||
self.tunnels.clear()
|
||||
|
||||
def start(self) -> None:
|
||||
"""
|
||||
Start distributed network tunnels.
|
||||
|
||||
:return: nothing
|
||||
"""
|
||||
for node_id in self.session.nodes:
|
||||
node = self.session.nodes[node_id]
|
||||
|
||||
if not isinstance(node, CoreNetwork):
|
||||
continue
|
||||
|
||||
if isinstance(node, CtrlNet) and node.serverintf is not None:
|
||||
continue
|
||||
|
||||
for name in self.servers:
|
||||
server = self.servers[name]
|
||||
self.create_gre_tunnel(node, server)
|
||||
|
||||
def create_gre_tunnel(
|
||||
self, node: CoreNetwork, server: DistributedServer
|
||||
) -> Tuple[GreTap, GreTap]:
|
||||
"""
|
||||
Create gre tunnel using a pair of gre taps between the local and remote server.
|
||||
|
||||
|
||||
:param node: node to create gre tunnel for
|
||||
:param server: server to create
|
||||
tunnel for
|
||||
:return: local and remote gre taps created for tunnel
|
||||
"""
|
||||
host = server.host
|
||||
key = self.tunnel_key(node.id, netaddr.IPAddress(host).value)
|
||||
tunnel = self.tunnels.get(key)
|
||||
if tunnel is not None:
|
||||
return tunnel
|
||||
|
||||
# local to server
|
||||
logging.info(
|
||||
"local tunnel node(%s) to remote(%s) key(%s)", node.name, host, key
|
||||
)
|
||||
local_tap = GreTap(session=self.session, remoteip=host, key=key)
|
||||
local_tap.net_client.create_interface(node.brname, local_tap.localname)
|
||||
|
||||
# server to local
|
||||
logging.info(
|
||||
"remote tunnel node(%s) to local(%s) key(%s)", node.name, self.address, key
|
||||
)
|
||||
remote_tap = GreTap(
|
||||
session=self.session, remoteip=self.address, key=key, server=server
|
||||
)
|
||||
remote_tap.net_client.create_interface(node.brname, remote_tap.localname)
|
||||
|
||||
# save tunnels for shutdown
|
||||
tunnel = (local_tap, remote_tap)
|
||||
self.tunnels[key] = tunnel
|
||||
return tunnel
|
||||
|
||||
def tunnel_key(self, n1_id: int, n2_id: int) -> int:
|
||||
"""
|
||||
Compute a 32-bit key used to uniquely identify a GRE tunnel.
|
||||
The hash(n1num), hash(n2num) values are used, so node numbers may be
|
||||
None or string values (used for e.g. "ctrlnet").
|
||||
|
||||
:param n1_id: node one id
|
||||
:param n2_id: node two id
|
||||
:return: tunnel key for the node pair
|
||||
"""
|
||||
logging.debug("creating tunnel key for: %s, %s", n1_id, n2_id)
|
||||
key = (
|
||||
(self.session.id << 16) ^ utils.hashkey(n1_id) ^ (utils.hashkey(n2_id) << 8)
|
||||
)
|
||||
return key & 0xFFFFFFFF
|
||||
|
||||
def get_tunnel(self, n1_id: int, n2_id: int) -> Tuple[GreTap, GreTap]:
|
||||
"""
|
||||
Return the GreTap between two nodes if it exists.
|
||||
|
||||
:param n1_id: node one id
|
||||
:param n2_id: node two id
|
||||
:return: gre tap between nodes or None
|
||||
"""
|
||||
key = self.tunnel_key(n1_id, n2_id)
|
||||
logging.debug("checking for tunnel key(%s) in: %s", key, self.tunnels)
|
||||
return self.tunnels.get(key)
|
|
@ -1,20 +1,73 @@
|
|||
from core.enumerations import LinkTypes
|
||||
from core.misc.ipaddress import Ipv4Prefix
|
||||
from core.misc.ipaddress import Ipv6Prefix
|
||||
from core.misc.ipaddress import MacAddress
|
||||
from typing import List, Optional
|
||||
|
||||
import netaddr
|
||||
|
||||
from core import utils
|
||||
from core.api.grpc.core_pb2 import LinkOptions
|
||||
from core.emane.nodes import EmaneNet
|
||||
from core.emulator.enumerations import LinkTypes
|
||||
from core.nodes.base import CoreNetworkBase, CoreNode
|
||||
from core.nodes.interface import CoreInterface
|
||||
from core.nodes.physical import PhysicalNode
|
||||
|
||||
|
||||
class NodeOptions(object):
|
||||
class IdGen:
|
||||
def __init__(self, _id: int = 0) -> None:
|
||||
self.id = _id
|
||||
|
||||
def next(self) -> int:
|
||||
self.id += 1
|
||||
return self.id
|
||||
|
||||
|
||||
def link_config(
|
||||
network: CoreNetworkBase,
|
||||
interface: CoreInterface,
|
||||
link_options: LinkOptions,
|
||||
devname: str = None,
|
||||
interface_two: CoreInterface = None,
|
||||
) -> None:
|
||||
"""
|
||||
Convenience method for configuring a link,
|
||||
|
||||
:param network: network to configure link for
|
||||
:param interface: interface to configure
|
||||
:param link_options: data to configure link with
|
||||
:param devname: device name, default is None
|
||||
:param interface_two: other interface associated, default is None
|
||||
:return: nothing
|
||||
"""
|
||||
config = {
|
||||
"netif": interface,
|
||||
"bw": link_options.bandwidth,
|
||||
"delay": link_options.delay,
|
||||
"loss": link_options.per,
|
||||
"duplicate": link_options.dup,
|
||||
"jitter": link_options.jitter,
|
||||
"netif2": interface_two,
|
||||
}
|
||||
|
||||
# hacky check here, because physical and emane nodes do not conform to the same
|
||||
# linkconfig interface
|
||||
if not isinstance(network, (EmaneNet, PhysicalNode)):
|
||||
config["devname"] = devname
|
||||
|
||||
network.linkconfig(**config)
|
||||
|
||||
|
||||
class NodeOptions:
|
||||
"""
|
||||
Options for creating and updating nodes within core.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, model="router"):
|
||||
def __init__(self, name: str = None, model: str = "PC", image: str = None) -> None:
|
||||
"""
|
||||
Create a NodeOptions object.
|
||||
|
||||
:param str name: name of node, defaults to node class name postfix with its id
|
||||
:param str model: defines services for default and physical nodes, defaults to "router"
|
||||
:param name: name of node, defaults to node class name postfix with its id
|
||||
:param model: defines services for default and physical nodes, defaults to
|
||||
"router"
|
||||
:param image: image to use for docker nodes
|
||||
"""
|
||||
self.name = name
|
||||
self.model = model
|
||||
|
@ -22,32 +75,35 @@ class NodeOptions(object):
|
|||
self.icon = None
|
||||
self.opaque = None
|
||||
self.services = []
|
||||
self.config_services = []
|
||||
self.x = None
|
||||
self.y = None
|
||||
self.lat = None
|
||||
self.lon = None
|
||||
self.alt = None
|
||||
self.emulation_id = None
|
||||
self.emulation_server = None
|
||||
self.server = None
|
||||
self.image = image
|
||||
self.emane = None
|
||||
|
||||
def set_position(self, x, y):
|
||||
def set_position(self, x: float, y: float) -> None:
|
||||
"""
|
||||
Convenience method for setting position.
|
||||
|
||||
:param int x: x position
|
||||
:param int y: y position
|
||||
:param x: x position
|
||||
:param y: y position
|
||||
:return: nothing
|
||||
"""
|
||||
self.x = x
|
||||
self.y = y
|
||||
|
||||
def set_location(self, lat, lon, alt):
|
||||
def set_location(self, lat: float, lon: float, alt: float) -> None:
|
||||
"""
|
||||
Convenience method for setting location.
|
||||
|
||||
:param float lat: latitude
|
||||
:param float lon: longitude
|
||||
:param float alt: altitude
|
||||
:param lat: latitude
|
||||
:param lon: longitude
|
||||
:param alt: altitude
|
||||
:return: nothing
|
||||
"""
|
||||
self.lat = lat
|
||||
|
@ -55,16 +111,17 @@ class NodeOptions(object):
|
|||
self.alt = alt
|
||||
|
||||
|
||||
class LinkOptions(object):
|
||||
class LinkOptions:
|
||||
"""
|
||||
Options for creating and updating links within core.
|
||||
"""
|
||||
|
||||
def __init__(self, _type=LinkTypes.WIRED):
|
||||
def __init__(self, _type: LinkTypes = LinkTypes.WIRED) -> None:
|
||||
"""
|
||||
Create a LinkOptions object.
|
||||
|
||||
:param core.enumerations.LinkTypes _type: type of link, defaults to wired
|
||||
:param _type: type of link, defaults to
|
||||
wired
|
||||
"""
|
||||
self.type = _type
|
||||
self.session = None
|
||||
|
@ -84,112 +141,31 @@ class LinkOptions(object):
|
|||
self.opaque = None
|
||||
|
||||
|
||||
class IpPrefixes(object):
|
||||
"""
|
||||
Convenience class to help generate IP4 and IP6 addresses for nodes within CORE.
|
||||
"""
|
||||
|
||||
def __init__(self, ip4_prefix=None, ip6_prefix=None):
|
||||
"""
|
||||
Creates an IpPrefixes object.
|
||||
|
||||
:param str ip4_prefix: ip4 prefix to use for generation
|
||||
:param str ip6_prefix: ip6 prefix to use for generation
|
||||
:raises ValueError: when both ip4 and ip6 prefixes have not been provided
|
||||
"""
|
||||
if not ip4_prefix and not ip6_prefix:
|
||||
raise ValueError("ip4 or ip6 must be provided")
|
||||
|
||||
self.ip4 = None
|
||||
if ip4_prefix:
|
||||
self.ip4 = Ipv4Prefix(ip4_prefix)
|
||||
self.ip6 = None
|
||||
if ip6_prefix:
|
||||
self.ip6 = Ipv6Prefix(ip6_prefix)
|
||||
|
||||
def ip4_address(self, node):
|
||||
"""
|
||||
Convenience method to return the IP4 address for a node.
|
||||
|
||||
:param node: node to get IP4 address for
|
||||
:return: IP4 address or None
|
||||
:rtype: str
|
||||
"""
|
||||
if not self.ip4:
|
||||
raise ValueError("ip4 prefixes have not been set")
|
||||
return str(self.ip4.addr(node.objid))
|
||||
|
||||
def ip6_address(self, node):
|
||||
"""
|
||||
Convenience method to return the IP6 address for a node.
|
||||
|
||||
:param node: node to get IP6 address for
|
||||
:return: IP4 address or None
|
||||
:rtype: str
|
||||
"""
|
||||
if not self.ip6:
|
||||
raise ValueError("ip6 prefixes have not been set")
|
||||
return str(self.ip6.addr(node.objid))
|
||||
|
||||
def create_interface(self, node, name=None, mac=None):
|
||||
"""
|
||||
Creates interface data for linking nodes, using the nodes unique id for generation, along with a random
|
||||
mac address, unless provided.
|
||||
|
||||
:param core.coreobj.PyCoreNode node: node to create interface for
|
||||
:param str name: name to set for interface, default is eth{id}
|
||||
:param str mac: mac address to use for this interface, default is random generation
|
||||
:return: new interface data for the provided node
|
||||
:rtype: InterfaceData
|
||||
"""
|
||||
# interface id
|
||||
inteface_id = node.newifindex()
|
||||
|
||||
# generate ip4 data
|
||||
ip4 = None
|
||||
ip4_mask = None
|
||||
if self.ip4:
|
||||
ip4 = str(self.ip4.addr(node.objid))
|
||||
ip4_mask = self.ip4.prefixlen
|
||||
|
||||
# generate ip6 data
|
||||
ip6 = None
|
||||
ip6_mask = None
|
||||
if self.ip6:
|
||||
ip6 = str(self.ip6.addr(node.objid))
|
||||
ip6_mask = self.ip6.prefixlen
|
||||
|
||||
# random mac
|
||||
if not mac:
|
||||
mac = str(MacAddress.random())
|
||||
|
||||
return InterfaceData(
|
||||
_id=inteface_id,
|
||||
name=name,
|
||||
ip4=ip4,
|
||||
ip4_mask=ip4_mask,
|
||||
ip6=ip6,
|
||||
ip6_mask=ip6_mask,
|
||||
mac=mac
|
||||
)
|
||||
|
||||
|
||||
class InterfaceData(object):
|
||||
class InterfaceData:
|
||||
"""
|
||||
Convenience class for storing interface data.
|
||||
"""
|
||||
|
||||
def __init__(self, _id, name, mac, ip4, ip4_mask, ip6, ip6_mask):
|
||||
def __init__(
|
||||
self,
|
||||
_id: int,
|
||||
name: str,
|
||||
mac: str,
|
||||
ip4: str,
|
||||
ip4_mask: int,
|
||||
ip6: str,
|
||||
ip6_mask: int,
|
||||
) -> None:
|
||||
"""
|
||||
Creates an InterfaceData object.
|
||||
|
||||
:param int _id:
|
||||
:param str name:
|
||||
:param str mac:
|
||||
:param str ip4:
|
||||
:param int ip4_mask:
|
||||
:param str ip6:
|
||||
:param int ip6_mask:
|
||||
:param _id: interface id
|
||||
:param name: name for interface
|
||||
:param mac: mac address
|
||||
:param ip4: ipv4 address
|
||||
:param ip4_mask: ipv4 bit mask
|
||||
:param ip6: ipv6 address
|
||||
:param ip6_mask: ipv6 bit mask
|
||||
"""
|
||||
self.id = _id
|
||||
self.name = name
|
||||
|
@ -199,25 +175,161 @@ class InterfaceData(object):
|
|||
self.ip6 = ip6
|
||||
self.ip6_mask = ip6_mask
|
||||
|
||||
def has_ip4(self):
|
||||
def has_ip4(self) -> bool:
|
||||
"""
|
||||
Determines if interface has an ip4 address.
|
||||
|
||||
:return: True if has ip4, False otherwise
|
||||
"""
|
||||
return all([self.ip4, self.ip4_mask])
|
||||
|
||||
def has_ip6(self):
|
||||
def has_ip6(self) -> bool:
|
||||
"""
|
||||
Determines if interface has an ip6 address.
|
||||
|
||||
:return: True if has ip6, False otherwise
|
||||
"""
|
||||
return all([self.ip6, self.ip6_mask])
|
||||
|
||||
def ip4_address(self):
|
||||
def ip4_address(self) -> Optional[str]:
|
||||
"""
|
||||
Retrieve a string representation of the ip4 address and netmask.
|
||||
|
||||
:return: ip4 string or None
|
||||
"""
|
||||
if self.has_ip4():
|
||||
return "%s/%s" % (self.ip4, self.ip4_mask)
|
||||
return f"{self.ip4}/{self.ip4_mask}"
|
||||
else:
|
||||
return None
|
||||
|
||||
def ip6_address(self):
|
||||
def ip6_address(self) -> Optional[str]:
|
||||
"""
|
||||
Retrieve a string representation of the ip6 address and netmask.
|
||||
|
||||
:return: ip4 string or None
|
||||
"""
|
||||
if self.has_ip6():
|
||||
return "%s/%s" % (self.ip6, self.ip6_mask)
|
||||
return f"{self.ip6}/{self.ip6_mask}"
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_addresses(self):
|
||||
def get_addresses(self) -> List[str]:
|
||||
"""
|
||||
Returns a list of ip4 and ip6 address when present.
|
||||
|
||||
:return: list of addresses
|
||||
"""
|
||||
ip4 = self.ip4_address()
|
||||
ip6 = self.ip6_address()
|
||||
return [i for i in [ip4, ip6] if i]
|
||||
|
||||
|
||||
class IpPrefixes:
|
||||
"""
|
||||
Convenience class to help generate IP4 and IP6 addresses for nodes within CORE.
|
||||
"""
|
||||
|
||||
def __init__(self, ip4_prefix: str = None, ip6_prefix: str = None) -> None:
|
||||
"""
|
||||
Creates an IpPrefixes object.
|
||||
|
||||
:param ip4_prefix: ip4 prefix to use for generation
|
||||
:param ip6_prefix: ip6 prefix to use for generation
|
||||
:raises ValueError: when both ip4 and ip6 prefixes have not been provided
|
||||
"""
|
||||
if not ip4_prefix and not ip6_prefix:
|
||||
raise ValueError("ip4 or ip6 must be provided")
|
||||
|
||||
self.ip4 = None
|
||||
if ip4_prefix:
|
||||
self.ip4 = netaddr.IPNetwork(ip4_prefix)
|
||||
self.ip6 = None
|
||||
if ip6_prefix:
|
||||
self.ip6 = netaddr.IPNetwork(ip6_prefix)
|
||||
|
||||
def ip4_address(self, node: CoreNode) -> str:
|
||||
"""
|
||||
Convenience method to return the IP4 address for a node.
|
||||
|
||||
:param node: node to get IP4 address for
|
||||
:return: IP4 address or None
|
||||
"""
|
||||
if not self.ip4:
|
||||
raise ValueError("ip4 prefixes have not been set")
|
||||
return str(self.ip4[node.id])
|
||||
|
||||
def ip6_address(self, node: CoreNode) -> str:
|
||||
"""
|
||||
Convenience method to return the IP6 address for a node.
|
||||
|
||||
:param node: node to get IP6 address for
|
||||
:return: IP4 address or None
|
||||
"""
|
||||
if not self.ip6:
|
||||
raise ValueError("ip6 prefixes have not been set")
|
||||
return str(self.ip6[node.id])
|
||||
|
||||
def create_interface(
|
||||
self, node: CoreNode, name: str = None, mac: str = None
|
||||
) -> InterfaceData:
|
||||
"""
|
||||
Creates interface data for linking nodes, using the nodes unique id for
|
||||
generation, along with a random mac address, unless provided.
|
||||
|
||||
:param node: node to create interface for
|
||||
:param name: name to set for interface, default is eth{id}
|
||||
:param mac: mac address to use for this interface, default is random
|
||||
generation
|
||||
:return: new interface data for the provided node
|
||||
"""
|
||||
# interface id
|
||||
inteface_id = node.newifindex()
|
||||
|
||||
# generate ip4 data
|
||||
ip4 = None
|
||||
ip4_mask = None
|
||||
if self.ip4:
|
||||
ip4 = self.ip4_address(node)
|
||||
ip4_mask = self.ip4.prefixlen
|
||||
|
||||
# generate ip6 data
|
||||
ip6 = None
|
||||
ip6_mask = None
|
||||
if self.ip6:
|
||||
ip6 = self.ip6_address(node)
|
||||
ip6_mask = self.ip6.prefixlen
|
||||
|
||||
# random mac
|
||||
if not mac:
|
||||
mac = utils.random_mac()
|
||||
|
||||
return InterfaceData(
|
||||
_id=inteface_id,
|
||||
name=name,
|
||||
ip4=ip4,
|
||||
ip4_mask=ip4_mask,
|
||||
ip6=ip6,
|
||||
ip6_mask=ip6_mask,
|
||||
mac=mac,
|
||||
)
|
||||
|
||||
|
||||
def create_interface(
|
||||
node: CoreNode, network: CoreNetworkBase, interface_data: InterfaceData
|
||||
):
|
||||
"""
|
||||
Create an interface for a node on a network using provided interface data.
|
||||
|
||||
:param node: node to create interface for
|
||||
:param network: network to associate interface with
|
||||
:param interface_data: interface data
|
||||
:return: created interface
|
||||
"""
|
||||
node.newnetif(
|
||||
network,
|
||||
addrlist=interface_data.get_addresses(),
|
||||
hwaddr=interface_data.mac,
|
||||
ifindex=interface_data.id,
|
||||
ifname=interface_data.name,
|
||||
)
|
||||
return node.netif(interface_data.id)
|
||||
|
|
119
daemon/core/emulator/enumerations.py
Normal file
119
daemon/core/emulator/enumerations.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
"""
|
||||
Common enumerations used within CORE.
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class MessageFlags(Enum):
|
||||
"""
|
||||
CORE message flags.
|
||||
"""
|
||||
|
||||
NONE = 0x00
|
||||
ADD = 0x01
|
||||
DELETE = 0x02
|
||||
CRI = 0x04
|
||||
LOCAL = 0x08
|
||||
STRING = 0x10
|
||||
TEXT = 0x20
|
||||
TTY = 0x40
|
||||
|
||||
|
||||
class NodeTypes(Enum):
|
||||
"""
|
||||
Node types.
|
||||
"""
|
||||
|
||||
DEFAULT = 0
|
||||
PHYSICAL = 1
|
||||
SWITCH = 4
|
||||
HUB = 5
|
||||
WIRELESS_LAN = 6
|
||||
RJ45 = 7
|
||||
TUNNEL = 8
|
||||
EMANE = 10
|
||||
TAP_BRIDGE = 11
|
||||
PEER_TO_PEER = 12
|
||||
CONTROL_NET = 13
|
||||
DOCKER = 15
|
||||
LXC = 16
|
||||
|
||||
|
||||
class LinkTypes(Enum):
|
||||
"""
|
||||
Link types.
|
||||
"""
|
||||
|
||||
WIRELESS = 0
|
||||
WIRED = 1
|
||||
|
||||
|
||||
class RegisterTlvs(Enum):
|
||||
"""
|
||||
Register type, length, value enumerations.
|
||||
"""
|
||||
|
||||
WIRELESS = 0x01
|
||||
MOBILITY = 0x02
|
||||
UTILITY = 0x03
|
||||
EXECUTE_SERVER = 0x04
|
||||
GUI = 0x05
|
||||
EMULATION_SERVER = 0x06
|
||||
SESSION = 0x0A
|
||||
|
||||
|
||||
class ConfigDataTypes(Enum):
|
||||
"""
|
||||
Configuration data types.
|
||||
"""
|
||||
|
||||
UINT8 = 0x01
|
||||
UINT16 = 0x02
|
||||
UINT32 = 0x03
|
||||
UINT64 = 0x04
|
||||
INT8 = 0x05
|
||||
INT16 = 0x06
|
||||
INT32 = 0x07
|
||||
INT64 = 0x08
|
||||
FLOAT = 0x09
|
||||
STRING = 0x0A
|
||||
BOOL = 0x0B
|
||||
|
||||
|
||||
class EventTypes(Enum):
|
||||
"""
|
||||
Event types.
|
||||
"""
|
||||
|
||||
NONE = 0
|
||||
DEFINITION_STATE = 1
|
||||
CONFIGURATION_STATE = 2
|
||||
INSTANTIATION_STATE = 3
|
||||
RUNTIME_STATE = 4
|
||||
DATACOLLECT_STATE = 5
|
||||
SHUTDOWN_STATE = 6
|
||||
START = 7
|
||||
STOP = 8
|
||||
PAUSE = 9
|
||||
RESTART = 10
|
||||
FILE_OPEN = 11
|
||||
FILE_SAVE = 12
|
||||
SCHEDULED = 13
|
||||
RECONFIGURE = 14
|
||||
INSTANTIATION_COMPLETE = 15
|
||||
|
||||
def should_start(self) -> bool:
|
||||
return self.value > self.DEFINITION_STATE.value
|
||||
|
||||
|
||||
class ExceptionLevels(Enum):
|
||||
"""
|
||||
Exception levels.
|
||||
"""
|
||||
|
||||
NONE = 0
|
||||
FATAL = 1
|
||||
ERROR = 2
|
||||
WARNING = 3
|
||||
NOTICE = 4
|
1977
daemon/core/emulator/session.py
Normal file
1977
daemon/core/emulator/session.py
Normal file
File diff suppressed because it is too large
Load diff
111
daemon/core/emulator/sessionconfig.py
Normal file
111
daemon/core/emulator/sessionconfig.py
Normal file
|
@ -0,0 +1,111 @@
|
|||
from typing import Any
|
||||
|
||||
from core.config import ConfigurableManager, ConfigurableOptions, Configuration
|
||||
from core.emulator.enumerations import ConfigDataTypes, RegisterTlvs
|
||||
from core.plugins.sdt import Sdt
|
||||
|
||||
|
||||
class SessionConfig(ConfigurableManager, ConfigurableOptions):
|
||||
"""
|
||||
Provides session configuration.
|
||||
"""
|
||||
|
||||
name = "session"
|
||||
options = [
|
||||
Configuration(
|
||||
_id="controlnet", _type=ConfigDataTypes.STRING, label="Control Network"
|
||||
),
|
||||
Configuration(
|
||||
_id="controlnet0", _type=ConfigDataTypes.STRING, label="Control Network 0"
|
||||
),
|
||||
Configuration(
|
||||
_id="controlnet1", _type=ConfigDataTypes.STRING, label="Control Network 1"
|
||||
),
|
||||
Configuration(
|
||||
_id="controlnet2", _type=ConfigDataTypes.STRING, label="Control Network 2"
|
||||
),
|
||||
Configuration(
|
||||
_id="controlnet3", _type=ConfigDataTypes.STRING, label="Control Network 3"
|
||||
),
|
||||
Configuration(
|
||||
_id="controlnet_updown_script",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
label="Control Network Script",
|
||||
),
|
||||
Configuration(
|
||||
_id="enablerj45",
|
||||
_type=ConfigDataTypes.BOOL,
|
||||
default="1",
|
||||
label="Enable RJ45s",
|
||||
),
|
||||
Configuration(
|
||||
_id="preservedir",
|
||||
_type=ConfigDataTypes.BOOL,
|
||||
default="0",
|
||||
label="Preserve session dir",
|
||||
),
|
||||
Configuration(
|
||||
_id="enablesdt",
|
||||
_type=ConfigDataTypes.BOOL,
|
||||
default="0",
|
||||
label="Enable SDT3D output",
|
||||
),
|
||||
Configuration(
|
||||
_id="sdturl",
|
||||
_type=ConfigDataTypes.STRING,
|
||||
default=Sdt.DEFAULT_SDT_URL,
|
||||
label="SDT3D URL",
|
||||
),
|
||||
]
|
||||
config_type = RegisterTlvs.UTILITY
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.set_configs(self.default_values())
|
||||
|
||||
def get_config(
|
||||
self,
|
||||
_id: str,
|
||||
node_id: int = ConfigurableManager._default_node,
|
||||
config_type: str = ConfigurableManager._default_type,
|
||||
default: Any = None,
|
||||
) -> str:
|
||||
"""
|
||||
Retrieves a specific configuration for a node and configuration type.
|
||||
|
||||
:param _id: specific configuration to retrieve
|
||||
:param node_id: node id to store configuration for
|
||||
:param config_type: configuration type to store configuration for
|
||||
:param default: default value to return when value is not found
|
||||
:return: configuration value
|
||||
"""
|
||||
value = super().get_config(_id, node_id, config_type, default)
|
||||
if value == "":
|
||||
value = default
|
||||
return value
|
||||
|
||||
def get_config_bool(self, name: str, default: Any = None) -> bool:
|
||||
"""
|
||||
Get configuration value as a boolean.
|
||||
|
||||
:param name: configuration name
|
||||
:param default: default value if not found
|
||||
:return: boolean for configuration value
|
||||
"""
|
||||
value = self.get_config(name)
|
||||
if value is None:
|
||||
return default
|
||||
return value.lower() == "true"
|
||||
|
||||
def get_config_int(self, name: str, default: Any = None) -> int:
|
||||
"""
|
||||
Get configuration value as int.
|
||||
|
||||
:param name: configuration name
|
||||
:param default: default value if not found
|
||||
:return: int for configuration value
|
||||
"""
|
||||
value = self.get_config(name, default=default)
|
||||
if value is not None:
|
||||
value = int(value)
|
||||
return value
|
32
daemon/core/errors.py
Normal file
32
daemon/core/errors.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
"""
|
||||
Provides CORE specific errors.
|
||||
"""
|
||||
import subprocess
|
||||
|
||||
|
||||
class CoreCommandError(subprocess.CalledProcessError):
|
||||
"""
|
||||
Used when encountering internal CORE command errors.
|
||||
"""
|
||||
|
||||
def __str__(self) -> str:
|
||||
return (
|
||||
f"Command({self.cmd}), Status({self.returncode}):\n"
|
||||
f"stdout: {self.output}\nstderr: {self.stderr}"
|
||||
)
|
||||
|
||||
|
||||
class CoreError(Exception):
|
||||
"""
|
||||
Used for errors when dealing with CoreEmu and Sessions.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class CoreXmlError(Exception):
|
||||
"""
|
||||
Used when there was an error parsing a CORE xml file.
|
||||
"""
|
||||
|
||||
pass
|
0
daemon/core/gui/__init__.py
Normal file
0
daemon/core/gui/__init__.py
Normal file
119
daemon/core/gui/app.py
Normal file
119
daemon/core/gui/app.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
import math
|
||||
import tkinter as tk
|
||||
from tkinter import font, ttk
|
||||
|
||||
from core.gui import appconfig, themes
|
||||
from core.gui.coreclient import CoreClient
|
||||
from core.gui.graph.graph import CanvasGraph
|
||||
from core.gui.images import ImageEnum, Images
|
||||
from core.gui.menubar import Menubar
|
||||
from core.gui.nodeutils import NodeUtils
|
||||
from core.gui.statusbar import StatusBar
|
||||
from core.gui.toolbar import Toolbar
|
||||
from core.gui.validation import InputValidation
|
||||
|
||||
WIDTH = 1000
|
||||
HEIGHT = 800
|
||||
|
||||
|
||||
class Application(tk.Frame):
|
||||
def __init__(self, proxy: bool):
|
||||
super().__init__(master=None)
|
||||
# load node icons
|
||||
NodeUtils.setup()
|
||||
|
||||
# widgets
|
||||
self.menubar = None
|
||||
self.toolbar = None
|
||||
self.canvas = None
|
||||
self.statusbar = None
|
||||
self.validation = None
|
||||
|
||||
# fonts
|
||||
self.fonts_size = None
|
||||
self.icon_text_font = None
|
||||
self.edge_font = None
|
||||
|
||||
# setup
|
||||
self.guiconfig = appconfig.read()
|
||||
self.app_scale = self.guiconfig["scale"]
|
||||
self.setup_scaling()
|
||||
self.style = ttk.Style()
|
||||
self.setup_theme()
|
||||
self.core = CoreClient(self, proxy)
|
||||
self.setup_app()
|
||||
self.draw()
|
||||
self.core.setup()
|
||||
|
||||
def setup_scaling(self):
|
||||
self.fonts_size = {name: font.nametofont(name)["size"] for name in font.names()}
|
||||
text_scale = self.app_scale if self.app_scale < 1 else math.sqrt(self.app_scale)
|
||||
themes.scale_fonts(self.fonts_size, self.app_scale)
|
||||
self.icon_text_font = font.Font(family="TkIconFont", size=int(12 * text_scale))
|
||||
self.edge_font = font.Font(family="TkDefaultFont", size=int(8 * text_scale))
|
||||
|
||||
def setup_theme(self):
|
||||
themes.load(self.style)
|
||||
self.master.bind_class("Menu", "<<ThemeChanged>>", themes.theme_change_menu)
|
||||
self.master.bind("<<ThemeChanged>>", themes.theme_change)
|
||||
self.style.theme_use(self.guiconfig["preferences"]["theme"])
|
||||
|
||||
def setup_app(self):
|
||||
self.master.title("CORE")
|
||||
self.center()
|
||||
self.master.protocol("WM_DELETE_WINDOW", self.on_closing)
|
||||
image = Images.get(ImageEnum.CORE, 16)
|
||||
self.master.tk.call("wm", "iconphoto", self.master._w, image)
|
||||
self.pack(fill=tk.BOTH, expand=True)
|
||||
self.validation = InputValidation(self)
|
||||
|
||||
def center(self):
|
||||
screen_width = self.master.winfo_screenwidth()
|
||||
screen_height = self.master.winfo_screenheight()
|
||||
x = int((screen_width / 2) - (WIDTH * self.app_scale / 2))
|
||||
y = int((screen_height / 2) - (HEIGHT * self.app_scale / 2))
|
||||
self.master.geometry(
|
||||
f"{int(WIDTH * self.app_scale)}x{int(HEIGHT * self.app_scale)}+{x}+{y}"
|
||||
)
|
||||
|
||||
def draw(self):
|
||||
self.master.option_add("*tearOff", tk.FALSE)
|
||||
self.toolbar = Toolbar(self, self)
|
||||
self.toolbar.pack(side=tk.LEFT, fill=tk.Y, ipadx=2, ipady=2)
|
||||
self.draw_canvas()
|
||||
self.draw_status()
|
||||
self.menubar = Menubar(self.master, self)
|
||||
|
||||
def draw_canvas(self):
|
||||
width = self.guiconfig["preferences"]["width"]
|
||||
height = self.guiconfig["preferences"]["height"]
|
||||
self.canvas = CanvasGraph(self, self.core, width, height)
|
||||
self.canvas.pack(fill=tk.BOTH, expand=True)
|
||||
scroll_x = ttk.Scrollbar(
|
||||
self.canvas, orient=tk.HORIZONTAL, command=self.canvas.xview
|
||||
)
|
||||
scroll_x.pack(side=tk.BOTTOM, fill=tk.X)
|
||||
scroll_y = ttk.Scrollbar(self.canvas, command=self.canvas.yview)
|
||||
scroll_y.pack(side=tk.RIGHT, fill=tk.Y)
|
||||
self.canvas.configure(xscrollcommand=scroll_x.set)
|
||||
self.canvas.configure(yscrollcommand=scroll_y.set)
|
||||
|
||||
def draw_status(self):
|
||||
self.statusbar = StatusBar(master=self, app=self)
|
||||
self.statusbar.pack(side=tk.BOTTOM, fill=tk.X)
|
||||
|
||||
def on_closing(self):
|
||||
self.menubar.prompt_save_running_session(True)
|
||||
|
||||
def save_config(self):
|
||||
appconfig.save(self.guiconfig)
|
||||
|
||||
def joined_session_update(self):
|
||||
self.statusbar.progress_bar.stop()
|
||||
if self.core.is_runtime():
|
||||
self.toolbar.set_runtime()
|
||||
else:
|
||||
self.toolbar.set_design()
|
||||
|
||||
def close(self):
|
||||
self.master.destroy()
|
129
daemon/core/gui/appconfig.py
Normal file
129
daemon/core/gui/appconfig.py
Normal file
|
@ -0,0 +1,129 @@
|
|||
import os
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
|
||||
# gui home paths
|
||||
from core.gui import themes
|
||||
|
||||
HOME_PATH = Path.home().joinpath(".coretk")
|
||||
BACKGROUNDS_PATH = HOME_PATH.joinpath("backgrounds")
|
||||
CUSTOM_EMANE_PATH = HOME_PATH.joinpath("custom_emane")
|
||||
CUSTOM_SERVICE_PATH = HOME_PATH.joinpath("custom_services")
|
||||
ICONS_PATH = HOME_PATH.joinpath("icons")
|
||||
MOBILITY_PATH = HOME_PATH.joinpath("mobility")
|
||||
XMLS_PATH = HOME_PATH.joinpath("xmls")
|
||||
CONFIG_PATH = HOME_PATH.joinpath("gui.yaml")
|
||||
LOG_PATH = HOME_PATH.joinpath("gui.log")
|
||||
SCRIPT_PATH = HOME_PATH.joinpath("scripts")
|
||||
|
||||
# local paths
|
||||
DATA_PATH = Path(__file__).parent.joinpath("data")
|
||||
LOCAL_ICONS_PATH = DATA_PATH.joinpath("icons").absolute()
|
||||
LOCAL_BACKGROUND_PATH = DATA_PATH.joinpath("backgrounds").absolute()
|
||||
LOCAL_XMLS_PATH = DATA_PATH.joinpath("xmls").absolute()
|
||||
LOCAL_MOBILITY_PATH = DATA_PATH.joinpath("mobility").absolute()
|
||||
|
||||
# configuration data
|
||||
TERMINALS = {
|
||||
"xterm": "xterm -e",
|
||||
"aterm": "aterm -e",
|
||||
"eterm": "eterm -e",
|
||||
"rxvt": "rxvt -e",
|
||||
"konsole": "konsole -e",
|
||||
"lxterminal": "lxterminal -e",
|
||||
"xfce4-terminal": "xfce4-terminal -x",
|
||||
"gnome-terminal": "gnome-terminal --window --",
|
||||
}
|
||||
EDITORS = ["$EDITOR", "vim", "emacs", "gedit", "nano", "vi"]
|
||||
DEFAULT_IP4S = ["10.0.0.0", "192.168.0.0", "172.16.0.0"]
|
||||
DEFAULT_IP4 = DEFAULT_IP4S[0]
|
||||
DEFAULT_IP6S = ["2001::", "2002::", "a::"]
|
||||
DEFAULT_IP6 = DEFAULT_IP6S[0]
|
||||
DEFAULT_MAC = "00:00:00:aa:00:00"
|
||||
|
||||
|
||||
class IndentDumper(yaml.Dumper):
|
||||
def increase_indent(self, flow=False, indentless=False):
|
||||
return super().increase_indent(flow, False)
|
||||
|
||||
|
||||
def copy_files(current_path, new_path):
|
||||
for current_file in current_path.glob("*"):
|
||||
new_file = new_path.joinpath(current_file.name)
|
||||
shutil.copy(current_file, new_file)
|
||||
|
||||
|
||||
def find_terminal():
|
||||
for term in sorted(TERMINALS):
|
||||
cmd = TERMINALS[term]
|
||||
if shutil.which(term):
|
||||
return cmd
|
||||
return None
|
||||
|
||||
|
||||
def check_directory():
|
||||
if HOME_PATH.exists():
|
||||
return
|
||||
HOME_PATH.mkdir()
|
||||
BACKGROUNDS_PATH.mkdir()
|
||||
CUSTOM_EMANE_PATH.mkdir()
|
||||
CUSTOM_SERVICE_PATH.mkdir()
|
||||
ICONS_PATH.mkdir()
|
||||
MOBILITY_PATH.mkdir()
|
||||
XMLS_PATH.mkdir()
|
||||
SCRIPT_PATH.mkdir()
|
||||
|
||||
copy_files(LOCAL_ICONS_PATH, ICONS_PATH)
|
||||
copy_files(LOCAL_BACKGROUND_PATH, BACKGROUNDS_PATH)
|
||||
copy_files(LOCAL_XMLS_PATH, XMLS_PATH)
|
||||
copy_files(LOCAL_MOBILITY_PATH, MOBILITY_PATH)
|
||||
|
||||
terminal = find_terminal()
|
||||
if "EDITOR" in os.environ:
|
||||
editor = EDITORS[0]
|
||||
else:
|
||||
editor = EDITORS[1]
|
||||
config = {
|
||||
"preferences": {
|
||||
"theme": themes.THEME_DARK,
|
||||
"editor": editor,
|
||||
"terminal": terminal,
|
||||
"gui3d": "/usr/local/bin/std3d.sh",
|
||||
"width": 1000,
|
||||
"height": 750,
|
||||
},
|
||||
"location": {
|
||||
"x": 0.0,
|
||||
"y": 0.0,
|
||||
"z": 0.0,
|
||||
"lat": 47.5791667,
|
||||
"lon": -122.132322,
|
||||
"alt": 2.0,
|
||||
"scale": 150.0,
|
||||
},
|
||||
"servers": [],
|
||||
"nodes": [],
|
||||
"recentfiles": [],
|
||||
"observers": [],
|
||||
"scale": 1.0,
|
||||
"ips": {
|
||||
"ip4": DEFAULT_IP4,
|
||||
"ip6": DEFAULT_IP6,
|
||||
"ip4s": DEFAULT_IP4S,
|
||||
"ip6s": DEFAULT_IP6S,
|
||||
},
|
||||
"mac": DEFAULT_MAC,
|
||||
}
|
||||
save(config)
|
||||
|
||||
|
||||
def read():
|
||||
with CONFIG_PATH.open("r") as f:
|
||||
return yaml.load(f, Loader=yaml.SafeLoader)
|
||||
|
||||
|
||||
def save(config):
|
||||
with CONFIG_PATH.open("w") as f:
|
||||
yaml.dump(config, f, Dumper=IndentDumper, default_flow_style=False)
|
1062
daemon/core/gui/coreclient.py
Normal file
1062
daemon/core/gui/coreclient.py
Normal file
File diff suppressed because it is too large
Load diff
BIN
daemon/core/gui/data/backgrounds/sample1-bg.gif
Normal file
BIN
daemon/core/gui/data/backgrounds/sample1-bg.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 312 KiB |
BIN
daemon/core/gui/data/backgrounds/sample4-bg.jpg
Normal file
BIN
daemon/core/gui/data/backgrounds/sample4-bg.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 196 KiB |
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue